diff
stringlengths 41
2.03M
| msg
stringlengths 1
1.5k
⌀ | repo
stringlengths 5
40
| sha
stringlengths 40
40
| time
stringlengths 20
20
|
---|---|---|---|---|
mmm a / src / metric . jl <nl> ppp b / src / metric . jl <nl> end <nl> ACE <nl> <nl> Calculates the averaged cross - entropy ( logloss ) for classification . <nl> + <nl> + # Arguments : <nl> + * ` eps : : Float64 ` : Prevents returning ` Inf ` if ` p = 0 ` . <nl> " " " <nl> type ACE < : AbstractEvalMetric <nl> ace_sum : : Float64 <nl> n_sample : : Int <nl> + eps : : Float64 <nl> <nl> - ACE ( ) = new ( 0 . 0 , 0 ) <nl> + ACE ( eps = 1 . 0e - 8 ) = new ( 0 . 0 , 0 , eps ) <nl> end <nl> <nl> function get ( metric : : ACE ) <nl> end <nl> <nl> function _update_single_output ( metric : : ACE , label : : NDArray , pred : : NDArray ) <nl> @ nd_as_jl ro = ( label , pred ) begin <nl> + eps = metric . eps <nl> # Samples are stored in the last dimension <nl> @ assert size ( label , ndims ( label ) ) = = size ( pred , ndims ( pred ) ) <nl> - if ndims ( pred ) = = 4 <nl> + if size ( label ) = = size ( pred ) # simply calculate the cross entropy of the probabilities <nl> + for ( q , p ) in zip ( pred , label ) <nl> + # p = = true probability <nl> + # q = = " unnatural " probability <nl> + metric . ace_sum + = p * log ( q + eps ) <nl> + metric . n_sample + = 1 <nl> + end <nl> + elseif ndims ( pred ) = = 4 <nl> labels = reshape ( label , size ( pred , 1 , 2 ) . . . , 1 , size ( pred , 4 ) ) <nl> for sample in 1 : size ( labels , 4 ) <nl> for j in 1 : size ( labels , 2 ) <nl> function _update_single_output ( metric : : ACE , label : : NDArray , pred : : NDArray ) <nl> # Since we can only target labels right now this is the only thing we can do . <nl> target = Int ( labels [ i , j , 1 , sample ] ) + 1 # klasses are 0 . . . k - 1 = > julia indexing <nl> p_k = pred [ i , j , target , sample ] <nl> - metric . ace_sum + = log ( p_k ) <nl> + metric . ace_sum + = log ( p_k + eps ) <nl> metric . n_sample + = 1 <nl> end <nl> end <nl> function _update_single_output ( metric : : ACE , label : : NDArray , pred : : NDArray ) <nl> for sample in 1 : size ( label , 1 ) <nl> target = Int ( label [ sample ] ) + 1 # 0 - based indexing = > 1 - based indexing <nl> p_k = pred [ target , sample ] <nl> - metric . ace_sum + = log ( p_k ) <nl> + metric . ace_sum + = log ( p_k + eps ) <nl> metric . n_sample + = 1 <nl> end <nl> else <nl> This can be used to quantify the influence of different classes on the overall l <nl> type MultiACE < : AbstractEvalMetric <nl> aces : : Vector { Float64 } <nl> counts : : Vector { Int } <nl> + eps : : Float64 <nl> <nl> - MultiACE ( nclasses ) = new ( Base . zeros ( nclasses ) , Base . zeros ( Int , nclasses ) ) <nl> + MultiACE ( nclasses , eps = 1 . 0e - 8 ) = new ( Base . zeros ( nclasses ) , Base . zeros ( Int , nclasses ) , eps ) <nl> end <nl> <nl> function get ( metric : : MultiACE ) <nl> end <nl> <nl> function _update_single_output ( metric : : MultiACE , label : : NDArray , pred : : NDArray ) <nl> @ nd_as_jl ro = ( label , pred ) begin <nl> + eps = metric . eps <nl> # Samples are stored in the last dimension <nl> @ assert size ( label , ndims ( label ) ) = = size ( pred , ndims ( pred ) ) <nl> - <nl> - if ndims ( pred ) = = 4 <nl> + @ assert size ( metric . aces ) = = size ( metric . counts ) <nl> + if size ( label ) = = size ( pred ) # simply calculate the cross entropy of the probabilities <nl> + for k in 1 : length ( metric . aces ) <nl> + kpred = view ( pred , ntuple ( d - > : , ndims ( pred ) - 2 ) . . . , k , : ) <nl> + klabel = view ( label , ntuple ( d - > : , ndims ( label ) - 2 ) . . . , k , : ) <nl> + for ( q , p ) in zip ( kpred , klabel ) <nl> + # p = = true probability <nl> + # q = = " unnatural " probability <nl> + metric . aces [ k ] + = p * log ( q + eps ) <nl> + metric . counts [ k ] + = 1 <nl> + end <nl> + end <nl> + elseif ndims ( pred ) = = 4 <nl> labels = reshape ( label , size ( pred , 1 , 2 ) . . . , 1 , size ( pred , 4 ) ) <nl> for sample in 1 : size ( labels , 4 ) <nl> for j in 1 : size ( labels , 2 ) <nl> function _update_single_output ( metric : : MultiACE , label : : NDArray , pred : : NDA <nl> target = Int ( labels [ i , j , 1 , sample ] ) + 1 # klasses are 0 . . . k - 1 = > julia indexing <nl> p_k = pred [ i , j , target , sample ] <nl> <nl> - metric . aces [ target ] + = log ( p_k ) <nl> + metric . aces [ target ] + = log ( p_k + eps ) <nl> metric . counts [ target ] + = 1 <nl> end <nl> end <nl> function _update_single_output ( metric : : MultiACE , label : : NDArray , pred : : NDA <nl> for sample in 1 : size ( label , 1 ) <nl> target = Int ( label [ sample ] ) + 1 <nl> p_k = pred [ target , sample ] <nl> - metric . aces [ target ] + = log ( p_k ) <nl> + metric . aces [ target ] + = log ( p_k + eps ) <nl> metric . counts [ target ] + = 1 <nl> end <nl> else <nl> mmm a / test / unittest / metric . jl <nl> ppp b / test / unittest / metric . jl <nl> function generate_probs ( n , m ) <nl> <nl> # Normalize : ensure each column sums to 1 <nl> for j = 1 : m <nl> - colsum = sum ( result [ : , j ] ) <nl> - for i = 1 : n <nl> - result [ i , j ] / = colsum <nl> - end <nl> + colsum = sum ( result [ : , j ] ) <nl> + for i = 1 : n <nl> + result [ i , j ] / = colsum <nl> + end <nl> end <nl> result <nl> end <nl> end <nl> <nl> function loglikelihood { T < : AbstractFloat } ( labels : : Vector { T } , probs : : Array { T , 2 } ) <nl> LL = 0 . 0 <nl> + eps = 1 . 0e - 8 <nl> for i = 1 : size ( labels , 1 ) <nl> - LL + = log ( probs [ Int ( labels [ i ] ) + 1 , i ] ) # labels are zero - based <nl> + LL + = log ( probs [ Int ( labels [ i ] ) + 1 , i ] + eps ) # labels are zero - based <nl> end <nl> LL / size ( labels , 1 ) <nl> end <nl> | Update ACE metric ( ) | apache/incubator-mxnet | 1781290bfb6d67876db453c652bb3d924eaba13f | 2017-03-29T21:08:36Z |
mmm a / spec / chromium - spec . js <nl> ppp b / spec / chromium - spec . js <nl> describe ( ' chromium feature ' , function ( ) { <nl> } ) <nl> b = window . open ( ' ' , ' __proto__ ' ) <nl> } ) <nl> + <nl> + it ( ' does not throw an exception when the features include webPreferences ' , function ( ) { <nl> + let b <nl> + assert . doesNotThrow ( function ( ) { <nl> + b = window . open ( ' ' , ' ' , ' webPreferences = ' ) <nl> + } ) <nl> + b . close ( ) <nl> + } ) <nl> } ) <nl> <nl> describe ( ' window . opener ' , function ( ) { <nl> | Add spec for webPreferences in features string | electron/electron | 7726c7c6c4eb6cf037eef207958705578ad2271a | 2017-04-26T17:56:53Z |
mmm a / jstests / sharding / features2 . js <nl> ppp b / jstests / sharding / features2 . js <nl> <nl> / / features2 . js <nl> <nl> s = new ShardingTest ( " features2 " , 2 , 1 , 1 ) ; <nl> + <nl> + / / The counts and the tests for " on - num - shards " only works for previous assumptions in balancer <nl> + / / behavior and assumes migrations do not occur during count ( ) commands . <nl> + s . stopBalancer ( ) <nl> + <nl> s . adminCommand ( { enablesharding : " test " } ) ; <nl> <nl> a = s . _connections [ 0 ] . getDB ( " test " ) ; <nl> | buildbot features2 . js intermittent issue - balancer assumptions and splitting assumptions have changed | mongodb/mongo | 982f933d491c977e5d9f708e6ed9945cea2940f4 | 2011-12-30T20:30:34Z |
mmm a / dlib / matrix / matrix_la . h <nl> ppp b / dlib / matrix / matrix_la . h <nl> namespace dlib <nl> / / compute the first column <nl> for ( long r = 1 ; r < A . nr ( ) ; + + r ) <nl> { <nl> - if ( L ( 0 , 0 ) > eps * A ( r , 0 ) ) <nl> + / / if ( L ( 0 , 0 ) > 0 ) <nl> + if ( L ( 0 , 0 ) > eps * std : : abs ( A ( r , 0 ) ) ) <nl> L ( r , 0 ) = A ( r , 0 ) / L ( 0 , 0 ) ; <nl> else <nl> return L ; <nl> namespace dlib <nl> { <nl> temp - = L ( r , i ) * L ( c , i ) ; <nl> } <nl> - if ( L ( c , c ) > eps * temp ) <nl> + <nl> + / / if ( L ( c , c ) > 0 ) <nl> + if ( L ( c , c ) > eps * std : : abs ( temp ) ) <nl> L ( r , c ) = temp / L ( c , c ) ; <nl> else <nl> return L ; <nl> | Added missing std : : abs ( ) calls . | davisking/dlib | 265aaf47ad5d63b412d26ad649578d38c63628ea | 2010-10-31T20:02:23Z |
mmm a / folly / Conv . h <nl> ppp b / folly / Conv . h <nl> template < class Tgt , class Src > <nl> typename std : : enable_if < <nl> std : : is_enum < Src > : : value & & IsSomeString < Tgt > : : value > : : type <nl> toAppend ( Src value , Tgt * result ) { <nl> - toAppend ( to_underlying_type ( value ) , result ) ; <nl> + toAppend ( to_underlying ( value ) , result ) ; <nl> } <nl> <nl> template < class Src > <nl> typename std : : enable_if < std : : is_enum < Src > : : value , size_t > : : type <nl> estimateSpaceNeeded ( Src value ) { <nl> - return estimateSpaceNeeded ( to_underlying_type ( value ) ) ; <nl> + return estimateSpaceNeeded ( to_underlying ( value ) ) ; <nl> } <nl> <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> typename std : : enable_if < <nl> ! std : : is_convertible < Tgt , StringPiece > : : value , <nl> Expected < Tgt , ConversionCode > > : : type <nl> tryTo ( const Src & value ) { <nl> - return tryTo < Tgt > ( to_underlying_type ( value ) ) ; <nl> + return tryTo < Tgt > ( to_underlying ( value ) ) ; <nl> } <nl> <nl> template < class Tgt , class Src > <nl> typename std : : enable_if < <nl> ! std : : is_convertible < Tgt , StringPiece > : : value , <nl> Tgt > : : type <nl> to ( const Src & value ) { <nl> - return to < Tgt > ( to_underlying_type ( value ) ) ; <nl> + return to < Tgt > ( to_underlying ( value ) ) ; <nl> } <nl> <nl> template < class Tgt , class Src > <nl> mmm a / folly / DynamicConverter . h <nl> ppp b / folly / DynamicConverter . h <nl> struct DynamicConstructor < <nl> C , <nl> typename std : : enable_if < std : : is_enum < C > : : value > : : type > { <nl> static dynamic construct ( const C & x ) { <nl> - return dynamic ( to_underlying_type ( x ) ) ; <nl> + return dynamic ( to_underlying ( x ) ) ; <nl> } <nl> } ; <nl> <nl> mmm a / folly / Utility . h <nl> ppp b / folly / Utility . h <nl> constexpr auto to_unsigned ( T const & t ) - > typename std : : make_unsigned < T > : : type { <nl> } <nl> <nl> template < class E > <nl> - constexpr std : : underlying_type_t < E > to_underlying_type ( E e ) noexcept { <nl> + constexpr std : : underlying_type_t < E > to_underlying ( E e ) noexcept { <nl> static_assert ( std : : is_enum < E > : : value , " not an enum type " ) ; <nl> return static_cast < std : : underlying_type_t < E > > ( e ) ; <nl> } <nl> mmm a / folly / hash / Hash . h <nl> ppp b / folly / hash / Hash . h <nl> struct IsAvalanchingHasher < hasher < std : : string > , K > : std : : true_type { } ; <nl> template < typename T > <nl> struct hasher < T , std : : enable_if_t < std : : is_enum < T > : : value > > { <nl> size_t operator ( ) ( T key ) const noexcept { <nl> - return Hash ( ) ( to_underlying_type ( key ) ) ; <nl> + return Hash ( ) ( to_underlying ( key ) ) ; <nl> } <nl> } ; <nl> <nl> | Rename to_underlying_type to to_underlying | facebook/folly | 92c4c7e23dde895236f2dfea9c363902f902108f | 2019-06-13T02:03:44Z |
deleted file mode 100644 <nl> index 2cc5ab49fbf8 . . 000000000000 <nl> mmm a / doc / release - notes - 14060 . md <nl> ppp / dev / null <nl> <nl> - Configuration <nl> mmmmmmmmmmmm - - <nl> - <nl> - The outbound message high water mark of the ZMQ PUB sockets are now <nl> - configurable via the options : <nl> - <nl> - ` - zmqpubhashtxhwm = n ` <nl> - <nl> - ` - zmqpubhashblockhwm = n ` <nl> - <nl> - ` - zmqpubrawblockhwm = n ` <nl> - <nl> - ` - zmqpubrawtxhwm = n ` <nl> - <nl> - Each high water mark value must be an integer greater than or equal to 0 . <nl> - The high water mark limits the maximum number of messages that ZMQ will <nl> - queue in memory for any single subscriber . A value of 0 means no limit . <nl> - When not specified , the default value continues to be 1000 . <nl> - When a ZMQ PUB socket reaches its high water mark for a subscriber , then <nl> - additional messages to the subscriber are dropped until the number of <nl> - queued messages again falls below the high water mark value . <nl> deleted file mode 100644 <nl> index bb8c0a623eed . . 000000000000 <nl> mmm a / doc / release - notes - 14477 . md <nl> ppp / dev / null <nl> <nl> - Miscellaneous RPC changes <nl> mmmmmmmmmmmm - <nl> - <nl> - - ` getaddressinfo ` now reports ` solvable ` , a boolean indicating whether all information necessary for signing is present in the wallet ( ignoring private keys ) . <nl> - - ` getaddressinfo ` , ` listunspent ` , and ` scantxoutset ` have a new output field ` desc ` , an output descriptor that encapsulates all signing information and key paths for the address ( only available when ` solvable ` is true for ` getaddressinfo ` and ` listunspent ` ) . <nl> deleted file mode 100644 <nl> index 38d76fee4605 . . 000000000000 <nl> mmm a / doc / release - notes - 14565 . md <nl> ppp / dev / null <nl> <nl> - Low - level RPC changes <nl> mmmmmmmmmmmmmmmmmmmmm - <nl> - <nl> - The ` importmulti ` RPC will now contain a new per - request ` warnings ` field with strings <nl> - that explain when fields are being ignored or inconsistant , if any . <nl> deleted file mode 100644 <nl> index 75faad99060f . . 000000000000 <nl> mmm a / doc / release - notes - pr13381 . md <nl> ppp / dev / null <nl> <nl> - RPC importprivkey : new label behavior <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - Previously , ` importprivkey ` automatically added the default empty label <nl> - ( " " ) to all addresses associated with the imported private key . Now it <nl> - defaults to using any existing label for those addresses . For example : <nl> - <nl> - - Old behavior : you import a watch - only address with the label " cold <nl> - wallet " . Later , you import the corresponding private key using the <nl> - default settings . The address ' s label is changed from " cold wallet " <nl> - to " " . <nl> - <nl> - - New behavior : you import a watch - only address with the label " cold <nl> - wallet " . Later , you import the corresponding private key using the <nl> - default settings . The address ' s label remains " cold wallet " . <nl> - <nl> - In both the previous and current case , if you directly specify a label <nl> - during the import , that label will override whatever previous label the <nl> - addresses may have had . Also in both cases , if none of the addresses <nl> - previously had a label , they will still receive the default empty label <nl> - ( " " ) . Examples : <nl> - <nl> - - You import a watch - only address with the label " temporary " . Later you <nl> - import the corresponding private key with the label " final " . The <nl> - address ' s label will be changed to " final " . <nl> - <nl> - - You use the default settings to import a private key for an address that <nl> - was not previously in the wallet . Its addresses will receive the default <nl> - empty label ( " " ) . <nl> mmm a / doc / release - notes . md <nl> ppp b / doc / release - notes . md <nl> Configuration option changes <nl> configuration file . Recognized sections are ` [ test ] ` , ` [ main ] ` , and <nl> ` [ regtest ] ` . <nl> <nl> + - Four new options are available for configuring the maximum number of <nl> + messages that ZMQ will queue in memory ( the " high water mark " ) before <nl> + dropping additional messages . The default value is 1 , 000 , the same as <nl> + was used for previous releases . See the [ ZMQ <nl> + documentation ] ( https : / / github . com / bitcoin / bitcoin / blob / master / doc / zmq . md # usage ) <nl> + for details . <nl> + <nl> - The ` enablebip61 ` option ( introduced in Bitcoin Core 0 . 17 . 0 ) is <nl> used to toggle sending of BIP 61 reject messages . Reject messages have no use <nl> case on the P2P network and are only logged for debugging by most network <nl> in the Low - level Changes section below . <nl> P2SH - P2WPKH , and P2SH - P2WSH . Requests for P2WSH and P2SH - P2WSH accept <nl> an additional ` witnessscript ` parameter . <nl> <nl> + - The ` importmulti ` RPC now returns an additional ` warnings ` field for <nl> + each request with an array of strings explaining when fields are being <nl> + ignored or are inconsistent , if there are any . <nl> + <nl> + - The ` getaddressinfo ` RPC now returns an additional ` solvable ` boolean <nl> + field when Bitcoin Core knows enough about the address ' s scriptPubKey , <nl> + optional redeemScript , and optional witnessScript in order for the <nl> + wallet to be able to generate an unsigned input spending funds sent to <nl> + that address . <nl> + <nl> + - The ` getaddressinfo ` , ` listunspent ` , and ` scantxoutset ` RPCs now <nl> + return an additional ` desc ` field that contains an output descriptor <nl> + containing all key paths and signing information for the address <nl> + ( except for the private key ) . The ` desc ` field is only returned for <nl> + ` getaddressinfo ` and ` listunspent ` when the address is solvable . <nl> + <nl> + - The ` importprivkey ` RPC will preserve previously - set labels for <nl> + addresses or public keys corresponding to the private key being <nl> + imported . For example , if you imported a watch - only address with the <nl> + label " cold wallet " in earlier releases of Bitcoin Core , subsequently <nl> + importing the private key would default to resetting the address ' s <nl> + label to the default empty - string label ( " " ) . In this release , the <nl> + previous label of " cold wallet " will be retained . If you optionally <nl> + specify any label besides the default when calling ` importprivkey ` , <nl> + the new label will be applied to the address . <nl> + <nl> - See the [ Mining ] ( # mining ) section for changes to ` getblocktemplate ` . <nl> <nl> Graphical User Interface ( GUI ) <nl> | Release notes : integrate detached release notes | bitcoin/bitcoin | f3d7d75e4e80bcd2c6058babb732c9c6cc7522c6 | 2019-01-03T01:27:07Z |
mmm a / src / a64 / code - stubs - a64 . cc <nl> ppp b / src / a64 / code - stubs - a64 . cc <nl> void RecordWriteStub : : Generate ( MacroAssembler * masm ) { <nl> <nl> <nl> void StoreArrayLiteralElementStub : : Generate ( MacroAssembler * masm ) { <nl> - / / TODO ( all ) : Possible optimisations in this function : <nl> - / / 1 . Merge CheckFastElements and CheckFastSmiElements , so that the map <nl> - / / bitfield is loaded only once . <nl> - / / 2 . Refactor the Ldr / Add sequence at the start of fast_elements and <nl> - / / smi_element . <nl> - <nl> / / x0 value element value to store <nl> / / x3 index_smi element index as smi <nl> / / sp [ 0 ] array_index_smi array literal index in function as smi <nl> void StoreArrayLiteralElementStub : : Generate ( MacroAssembler * masm ) { <nl> __ Ldr ( array_map , FieldMemOperand ( array , JSObject : : kMapOffset ) ) ; <nl> <nl> Label double_elements , smi_element , fast_elements , slow_elements ; <nl> - __ CheckFastElements ( array_map , x10 , & double_elements ) ; <nl> + Register bitfield2 = x10 ; <nl> + __ Ldrb ( bitfield2 , FieldMemOperand ( array_map , Map : : kBitField2Offset ) ) ; <nl> + <nl> + / / Jump if array ' s ElementsKind is not FAST * _SMI_ELEMENTS , FAST_ELEMENTS or <nl> + / / FAST_HOLEY_ELEMENTS . <nl> + STATIC_ASSERT ( FAST_SMI_ELEMENTS = = 0 ) ; <nl> + STATIC_ASSERT ( FAST_HOLEY_SMI_ELEMENTS = = 1 ) ; <nl> + STATIC_ASSERT ( FAST_ELEMENTS = = 2 ) ; <nl> + STATIC_ASSERT ( FAST_HOLEY_ELEMENTS = = 3 ) ; <nl> + __ Cmp ( bitfield2 , Map : : kMaximumBitField2FastHoleyElementValue ) ; <nl> + __ B ( hi , & double_elements ) ; <nl> + <nl> __ JumpIfSmi ( value , & smi_element ) ; <nl> - __ CheckFastSmiElements ( array_map , x10 , & fast_elements ) ; <nl> + <nl> + / / Jump if array ' s ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS . <nl> + __ Tbnz ( bitfield2 , MaskToBit ( FAST_ELEMENTS < < Map : : kElementsKindShift ) , <nl> + & fast_elements ) ; <nl> <nl> / / Store into the array literal requires an elements transition . Call into <nl> / / the runtime . <nl> void InternalArrayConstructorStub : : Generate ( MacroAssembler * masm ) { <nl> __ Ldr ( x10 , FieldMemOperand ( constructor , <nl> JSFunction : : kPrototypeOrInitialMapOffset ) ) ; <nl> <nl> - / / TODO ( jbramley ) : Add a helper function to read elements kind from an <nl> - / / existing map . <nl> - / / Load the map ' s " bit field 2 " into result . <nl> - __ Ldr ( kind , FieldMemOperand ( x10 , Map : : kBitField2Offset ) ) ; <nl> - / / Retrieve elements_kind from bit field 2 . <nl> - __ Ubfx ( kind , kind , Map : : kElementsKindShift , Map : : kElementsKindBitCount ) ; <nl> + / / Retrieve elements_kind from map . <nl> + __ LoadElementsKindFromMap ( kind , x10 ) ; <nl> <nl> if ( FLAG_debug_code ) { <nl> Label done ; <nl> mmm a / src / a64 / lithium - a64 . cc <nl> ppp b / src / a64 / lithium - a64 . cc <nl> LInstruction * LChunkBuilder : : DoChange ( HChange * instr ) { <nl> <nl> <nl> LInstruction * LChunkBuilder : : DoCheckValue ( HCheckValue * instr ) { <nl> - / / We only need a temp register if the target is in new space , but we can ' t <nl> - / / dereference the handle to test that here . <nl> - / / TODO ( all ) : Check these constraints . The temp register is not always used . <nl> - LOperand * value = UseRegister ( instr - > value ( ) ) ; <nl> - LOperand * temp = TempRegister ( ) ; <nl> - return AssignEnvironment ( new ( zone ( ) ) LCheckValue ( value , temp ) ) ; <nl> + LOperand * value = UseRegisterAtStart ( instr - > value ( ) ) ; <nl> + return AssignEnvironment ( new ( zone ( ) ) LCheckValue ( value ) ) ; <nl> } <nl> <nl> <nl> LInstruction * LChunkBuilder : : DoStringCharCodeAt ( HStringCharCodeAt * instr ) { <nl> <nl> <nl> LInstruction * LChunkBuilder : : DoStringCharFromCode ( HStringCharFromCode * instr ) { <nl> - / / TODO ( all ) use at start and remove assert in codegen <nl> LOperand * char_code = UseRegister ( instr - > value ( ) ) ; <nl> LOperand * context = UseAny ( instr - > context ( ) ) ; <nl> LStringCharFromCode * result = <nl> LInstruction * LChunkBuilder : : DoSub ( HSub * instr ) { <nl> ASSERT ( instr - > right ( ) - > representation ( ) . Equals ( instr - > representation ( ) ) ) ; <nl> LOperand * left ; <nl> if ( instr - > left ( ) - > IsConstant ( ) & & <nl> - ( HConstant : : cast ( instr - > left ( ) ) - > Integer32Value ( ) = = 0 ) ) { <nl> + ( HConstant : : cast ( instr - > left ( ) ) - > Integer32Value ( ) = = 0 ) ) { <nl> left = UseConstant ( instr - > left ( ) ) ; <nl> } else { <nl> left = UseRegisterAtStart ( instr - > left ( ) ) ; <nl> mmm a / src / a64 / lithium - a64 . h <nl> ppp b / src / a64 / lithium - a64 . h <nl> class LCheckSmi V8_FINAL : public LTemplateInstruction < 1 , 1 , 0 > { <nl> } ; <nl> <nl> <nl> - class LCheckValue V8_FINAL : public LTemplateInstruction < 0 , 1 , 1 > { <nl> + class LCheckValue V8_FINAL : public LTemplateInstruction < 0 , 1 , 0 > { <nl> public : <nl> - LCheckValue ( LOperand * value , LOperand * temp ) { <nl> + explicit LCheckValue ( LOperand * value ) { <nl> inputs_ [ 0 ] = value ; <nl> - temps_ [ 0 ] = temp ; <nl> } <nl> <nl> LOperand * value ( ) { return inputs_ [ 0 ] ; } <nl> - LOperand * temp ( ) { return temps_ [ 0 ] ; } <nl> <nl> DECLARE_CONCRETE_INSTRUCTION ( CheckValue , " check - value " ) <nl> DECLARE_HYDROGEN_ACCESSOR ( CheckValue ) <nl> mmm a / src / a64 / lithium - codegen - a64 . cc <nl> ppp b / src / a64 / lithium - codegen - a64 . cc <nl> void LCodeGen : : DoCheckValue ( LCheckValue * instr ) { <nl> Handle < HeapObject > object = instr - > hydrogen ( ) - > object ( ) . handle ( ) ; <nl> AllowDeferredHandleDereference smi_check ; <nl> if ( isolate ( ) - > heap ( ) - > InNewSpace ( * object ) ) { <nl> - Register temp = ToRegister ( instr - > temp ( ) ) ; <nl> + UseScratchRegisterScope temps ( masm ( ) ) ; <nl> + Register temp = temps . AcquireX ( ) ; <nl> Handle < Cell > cell = isolate ( ) - > factory ( ) - > NewCell ( object ) ; <nl> __ Mov ( temp , Operand ( Handle < Object > ( cell ) ) ) ; <nl> __ Ldr ( temp , FieldMemOperand ( temp , Cell : : kValueOffset ) ) ; <nl> mmm a / src / a64 / macro - assembler - a64 . cc <nl> ppp b / src / a64 / macro - assembler - a64 . cc <nl> void MacroAssembler : : TestMapBitfield ( Register object , uint64_t mask ) { <nl> } <nl> <nl> <nl> - void MacroAssembler : : LoadElementsKind ( Register result , Register object ) { <nl> - / / Load map . <nl> - __ Ldr ( result , FieldMemOperand ( object , HeapObject : : kMapOffset ) ) ; <nl> + void MacroAssembler : : LoadElementsKindFromMap ( Register result , Register map ) { <nl> / / Load the map ' s " bit field 2 " . <nl> - __ Ldrb ( result , FieldMemOperand ( result , Map : : kBitField2Offset ) ) ; <nl> + __ Ldrb ( result , FieldMemOperand ( map , Map : : kBitField2Offset ) ) ; <nl> / / Retrieve elements_kind from bit field 2 . <nl> __ Ubfx ( result , result , Map : : kElementsKindShift , Map : : kElementsKindBitCount ) ; <nl> } <nl> void MacroAssembler : : CheckFastObjectElements ( Register map , <nl> } <nl> <nl> <nl> - void MacroAssembler : : CheckFastSmiElements ( Register map , <nl> - Register scratch , <nl> - Label * fail ) { <nl> - STATIC_ASSERT ( FAST_SMI_ELEMENTS = = 0 ) ; <nl> - STATIC_ASSERT ( FAST_HOLEY_SMI_ELEMENTS = = 1 ) ; <nl> - Ldrb ( scratch , FieldMemOperand ( map , Map : : kBitField2Offset ) ) ; <nl> - Cmp ( scratch , Map : : kMaximumBitField2FastHoleySmiElementValue ) ; <nl> - B ( hi , fail ) ; <nl> - } <nl> - <nl> - <nl> / / Note : The ARM version of this clobbers elements_reg , but this version does <nl> / / not . Some uses of this in A64 assume that elements_reg will be preserved . <nl> void MacroAssembler : : StoreNumberToDoubleElements ( Register value_reg , <nl> mmm a / src / a64 / macro - assembler - a64 . h <nl> ppp b / src / a64 / macro - assembler - a64 . h <nl> class MacroAssembler : public Assembler { <nl> / / flags . The object register is preserved . <nl> void TestMapBitfield ( Register object , uint64_t mask ) ; <nl> <nl> - / / Load the elements kind field of an object , and return it in the result <nl> + / / Load the elements kind field from a map , and return it in the result <nl> / / register . <nl> - void LoadElementsKind ( Register result , Register object ) ; <nl> + void LoadElementsKindFromMap ( Register result , Register map ) ; <nl> <nl> / / Compare the object in a register to a value from the root list . <nl> void CompareRoot ( const Register & obj , Heap : : RootListIndex index ) ; <nl> class MacroAssembler : public Assembler { <nl> <nl> / / Check if a map for a JSObject indicates that the object has fast elements . <nl> / / Jump to the specified label if it does not . <nl> - void CheckFastElements ( Register map , <nl> - Register scratch , <nl> - Label * fail ) ; <nl> + void CheckFastElements ( Register map , Register scratch , Label * fail ) ; <nl> <nl> / / Check if a map for a JSObject indicates that the object can have both smi <nl> / / and HeapObject elements . Jump to the specified label if it does not . <nl> - void CheckFastObjectElements ( Register map , <nl> - Register scratch , <nl> - Label * fail ) ; <nl> - <nl> - / / Check if a map for a JSObject indicates that the object has fast smi only <nl> - / / elements . Jump to the specified label if it does not . <nl> - void CheckFastSmiElements ( Register map , Register scratch , Label * fail ) ; <nl> + void CheckFastObjectElements ( Register map , Register scratch , Label * fail ) ; <nl> <nl> / / Check to see if number can be stored as a double in FastDoubleElements . <nl> / / If it can , store it at the index specified by key_reg in the array , <nl> | A64 : ElementsKind TODOs | v8/v8 | 2e555d28875c00a5e203108ce1c615cf6a4c87ba | 2014-03-18T15:01:55Z |
mmm a / include / internal / catch_tags . hpp <nl> ppp b / include / internal / catch_tags . hpp <nl> namespace Catch { <nl> m_remainder + = c ; <nl> } <nl> <nl> + TagExtracter & operator = ( const TagExtracter & ) ; <nl> + <nl> std : : set < std : : string > & m_tags ; <nl> std : : string m_remainder ; <nl> } ; <nl> namespace Catch { <nl> m_exp . m_tagSets . push_back ( m_currentTagSet ) ; <nl> } <nl> <nl> + TagExpressionParser & operator = ( const TagExpressionParser & ) ; <nl> + <nl> bool m_isNegated ; <nl> TagSet m_currentTagSet ; <nl> TagExpression & m_exp ; <nl> | Manually applied merge from Master | catchorg/Catch2 | f0f407fc3ebdcbdad50fc1f9a45f15ab3cc54c63 | 2012-10-31T18:28:21Z |
mmm a / tensorflow / core / kernels / ops_testutil . h <nl> ppp b / tensorflow / core / kernels / ops_testutil . h <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / device_factory . h " <nl> # include " tensorflow / core / common_runtime / device_mgr . h " <nl> # include " tensorflow / core / common_runtime / process_function_library_runtime . h " <nl> - # include " tensorflow / core / platform / threadpool . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / device_base . h " <nl> # include " tensorflow / core / framework / function . h " <nl> | Remove duplicate inpclude line | tensorflow/tensorflow | dc048acf2ca5f3f0dcc91439d9cc69e70d19bd95 | 2020-02-24T10:59:01Z |
mmm a / src / mongo / db / mongod_main . cpp <nl> ppp b / src / mongo / db / mongod_main . cpp <nl> void shutdownTask ( const ShutdownTaskArgs & shutdownArgs ) { <nl> # ifndef MONGO_CONFIG_USE_RAW_LATCHES <nl> LatchAnalyzer : : get ( serviceContext ) . dump ( ) ; <nl> # endif <nl> + <nl> + FlowControl : : shutdown ( serviceContext ) ; <nl> } <nl> <nl> } / / namespace <nl> mmm a / src / mongo / db / storage / flow_control . cpp <nl> ppp b / src / mongo / db / storage / flow_control . cpp <nl> void FlowControl : : set ( ServiceContext * service , std : : unique_ptr < FlowControl > flow <nl> globalFlow = std : : move ( flowControl ) ; <nl> } <nl> <nl> + void FlowControl : : shutdown ( ServiceContext * service ) { <nl> + auto & globalFlow = getFlowControl ( service ) ; <nl> + if ( globalFlow ) { <nl> + globalFlow - > _jobAnchor . stop ( ) ; <nl> + globalFlow . reset ( ) ; <nl> + } <nl> + } <nl> + <nl> / * * <nl> * Returns - 1 . 0 if there are not enough samples . <nl> * / <nl> mmm a / src / mongo / db / storage / flow_control . h <nl> ppp b / src / mongo / db / storage / flow_control . h <nl> class FlowControl : public ServerStatusSection { <nl> <nl> static void set ( ServiceContext * service , std : : unique_ptr < FlowControl > flowControl ) ; <nl> <nl> + / * * <nl> + * Shuts down the flow control job and removes it from the ServiceContext . <nl> + * / <nl> + static void shutdown ( ServiceContext * service ) ; <nl> + <nl> / * <nl> * Typical API call . <nl> * <nl> | SERVER - 50007 Extend FlowControl with shutdown support | mongodb/mongo | 13eb3cca6547bb801d1cd01f95c8e9dd564dd47c | 2020-08-10T22:58:34Z |
mmm a / tensorflow / core / common_runtime / direct_session . cc <nl> ppp b / tensorflow / core / common_runtime / direct_session . cc <nl> Status DirectSession : : RunInternal ( <nl> executor_step_count , & debugger_state ) ) ; <nl> } <nl> <nl> - run_state . rendez = new IntraProcessRendezvous ( device_mgr_ . get ( ) ) ; <nl> + run_state . rendez . reset ( new IntraProcessRendezvous ( device_mgr_ . get ( ) ) ) ; <nl> # ifndef __ANDROID__ <nl> / / Set up for collectives if ExecutorsAndKeys declares a key . <nl> if ( executors_and_keys - > collective_graph_key ! = <nl> Status DirectSession : : RunInternal ( <nl> <nl> / / Start parallel Executors . <nl> const size_t num_executors = executors_and_keys - > items . size ( ) ; <nl> - ExecutorBarrier * barrier = new ExecutorBarrier ( <nl> - num_executors , run_state . rendez , [ & run_state ] ( const Status & ret ) { <nl> - { <nl> - mutex_lock l ( run_state . mu ) ; <nl> - run_state . status . Update ( ret ) ; <nl> - } <nl> - run_state . executors_done . Notify ( ) ; <nl> - } ) ; <nl> + Notification executors_done ; <nl> + <nl> + / / TODO ( mrry ) : Switch the RunInternal ( ) synchronous use of ExecutorBarrier <nl> + / / to use a stack - allocated barrier . <nl> + ExecutorBarrier * barrier = <nl> + new ExecutorBarrier ( num_executors , run_state . rendez . get ( ) , <nl> + [ & run_state , & executors_done ] ( const Status & ret ) { <nl> + { <nl> + mutex_lock l ( run_state . mu ) ; <nl> + run_state . status . Update ( ret ) ; <nl> + } <nl> + executors_done . Notify ( ) ; <nl> + } ) ; <nl> <nl> Executor : : Args args ; <nl> args . step_id = step_id ; <nl> args . call_frame = call_frame ; <nl> - args . rendezvous = run_state . rendez ; <nl> + args . rendezvous = run_state . rendez . get ( ) ; <nl> args . collective_executor = <nl> ( run_state . collective_executor ? run_state . collective_executor - > get ( ) <nl> : nullptr ) ; <nl> Status DirectSession : : RunInternal ( <nl> if ( run_options . inter_op_thread_pool ( ) < - 1 | | <nl> run_options . inter_op_thread_pool ( ) > = <nl> static_cast < int32 > ( thread_pools_ . size ( ) ) ) { <nl> - run_state . executors_done . Notify ( ) ; <nl> delete barrier ; <nl> return errors : : InvalidArgument ( " Invalid inter_op_thread_pool : " , <nl> run_options . inter_op_thread_pool ( ) ) ; <nl> Status DirectSession : : RunInternal ( <nl> step_cancellation_manager . StartCancel ( ) ; <nl> } ) ; <nl> if ( already_cancelled ) { <nl> - / / NOTE ( mrry ) : If we don ' t explicitly notify <nl> - / / ` run_state . executors_done ` , the RunState destructor would <nl> - / / block on this notification . <nl> - run_state . executors_done . Notify ( ) ; <nl> delete barrier ; <nl> return errors : : Cancelled ( " Run call was cancelled " ) ; <nl> } <nl> Status DirectSession : : RunInternal ( <nl> item . executor - > RunAsync ( args , barrier - > Get ( ) ) ; <nl> } <nl> <nl> - WaitForNotification ( & run_state , & step_cancellation_manager , <nl> + WaitForNotification ( & executors_done , & run_state , & step_cancellation_manager , <nl> run_options . timeout_in_ms ( ) > 0 <nl> ? run_options . timeout_in_ms ( ) <nl> : operation_timeout_in_ms_ ) ; <nl> Status DirectSession : : PRunSetup ( const std : : vector < string > & input_names , <nl> / / Create the run state and save it for future PRun calls . <nl> Executor : : Args args ; <nl> args . step_id = step_id_counter_ . fetch_add ( 1 ) ; <nl> - RunState * run_state = <nl> - new RunState ( input_names , output_names , args . step_id , & devices_ ) ; <nl> - run_state - > rendez = new IntraProcessRendezvous ( device_mgr_ . get ( ) ) ; <nl> + PartialRunState * run_state = <nl> + new PartialRunState ( input_names , output_names , args . step_id , & devices_ ) ; <nl> + run_state - > rendez . reset ( new IntraProcessRendezvous ( device_mgr_ . get ( ) ) ) ; <nl> { <nl> mutex_lock l ( executor_lock_ ) ; <nl> if ( ! partial_runs_ <nl> . emplace ( run_state_args . handle , <nl> - std : : unique_ptr < RunState > ( run_state ) ) <nl> + std : : unique_ptr < PartialRunState > ( run_state ) ) <nl> . second ) { <nl> return errors : : Internal ( " The handle ' " , run_state_args . handle , <nl> " ' created for this partial run is not unique . " ) ; <nl> Status DirectSession : : PRunSetup ( const std : : vector < string > & input_names , <nl> / / Start parallel Executors . <nl> const size_t num_executors = executors_and_keys - > items . size ( ) ; <nl> ExecutorBarrier * barrier = new ExecutorBarrier ( <nl> - num_executors , run_state - > rendez , [ run_state ] ( const Status & ret ) { <nl> + num_executors , run_state - > rendez . get ( ) , [ run_state ] ( const Status & ret ) { <nl> if ( ! ret . ok ( ) ) { <nl> mutex_lock l ( run_state - > mu ) ; <nl> run_state - > status . Update ( ret ) ; <nl> Status DirectSession : : PRunSetup ( const std : : vector < string > & input_names , <nl> run_state - > executors_done . Notify ( ) ; <nl> } ) ; <nl> <nl> - args . rendezvous = run_state - > rendez ; <nl> + args . rendezvous = run_state - > rendez . get ( ) ; <nl> args . cancellation_manager = cancellation_manager_ ; <nl> / / Note that Collectives are not supported in partial runs <nl> / / because RunOptions is not passed in so we can ' t know whether <nl> Status DirectSession : : PRun ( const string & handle , const NamedTensorList & inputs , <nl> const string & key = parts [ 0 ] ; <nl> / / Get the executors for this partial run . <nl> ExecutorsAndKeys * executors_and_keys ; <nl> - RunState * run_state ; <nl> + PartialRunState * run_state ; <nl> { <nl> mutex_lock l ( executor_lock_ ) ; / / could use reader lock <nl> auto exc_it = executors_ . find ( key ) ; <nl> Status DirectSession : : PRun ( const string & handle , const NamedTensorList & inputs , <nl> CheckFetch ( inputs , output_names , executors_and_keys , run_state ) ) ; <nl> <nl> / / Send inputs . <nl> - Status s = SendPRunInputs ( inputs , executors_and_keys , run_state - > rendez ) ; <nl> + Status s = <nl> + SendPRunInputs ( inputs , executors_and_keys , run_state - > rendez . get ( ) ) ; <nl> <nl> / / Receive outputs . <nl> if ( s . ok ( ) ) { <nl> Status DirectSession : : PRun ( const string & handle , const NamedTensorList & inputs , <nl> done = run_state - > PendingDone ( ) ; <nl> } <nl> if ( done ) { <nl> - WaitForNotification ( run_state , cancellation_manager_ , <nl> - operation_timeout_in_ms_ ) ; <nl> + WaitForNotification ( & run_state - > executors_done , run_state , <nl> + cancellation_manager_ , operation_timeout_in_ms_ ) ; <nl> partial_runs_ . erase ( handle ) ; <nl> } <nl> } <nl> Status DirectSession : : RecvPRunOutputs ( <nl> const string & output_key = it - > second ; <nl> Tensor output_tensor ; <nl> bool is_dead ; <nl> - IntraProcessRendezvous * rendez = run_state - > rendez ; <nl> <nl> s = Rendezvous : : ParseKey ( output_key , & parsed ) ; <nl> if ( s . ok ( ) ) { <nl> / / Fetch data from the Rendezvous . <nl> - s = rendez - > Recv ( parsed , Rendezvous : : Args ( ) , & output_tensor , & is_dead , <nl> - operation_timeout_in_ms_ ) ; <nl> + s = run_state - > rendez - > Recv ( parsed , Rendezvous : : Args ( ) , & output_tensor , <nl> + & is_dead , operation_timeout_in_ms_ ) ; <nl> if ( is_dead & & s . ok ( ) ) { <nl> s = errors : : InvalidArgument ( " The tensor returned for " , output_name , <nl> " was not valid . " ) ; <nl> } <nl> } <nl> if ( ! s . ok ( ) ) { <nl> - rendez - > StartAbort ( s ) ; <nl> + run_state - > rendez - > StartAbort ( s ) ; <nl> outputs - > clear ( ) ; <nl> return s ; <nl> } <nl> Status DirectSession : : RecvPRunOutputs ( <nl> Status DirectSession : : CheckFetch ( const NamedTensorList & feeds , <nl> const std : : vector < string > & fetches , <nl> const ExecutorsAndKeys * executors_and_keys , <nl> - const RunState * run_state ) { <nl> + const PartialRunState * run_state ) { <nl> const Graph * graph = executors_and_keys - > graph . get ( ) ; <nl> const NameNodeMap * name_to_node = & executors_and_keys - > name_to_node ; <nl> <nl> : : tensorflow : : Status DirectSession : : Close ( ) { <nl> return : : tensorflow : : Status : : OK ( ) ; <nl> } <nl> <nl> - DirectSession : : RunState : : RunState ( <nl> - const std : : vector < string > & pending_input_names , <nl> - const std : : vector < string > & pending_output_names , int64 step_id , <nl> - const std : : vector < Device * > * devices ) <nl> + DirectSession : : RunState : : RunState ( int64 step_id , <nl> + const std : : vector < Device * > * devices ) <nl> : step_container ( step_id , [ devices , step_id ] ( const string & name ) { <nl> for ( auto d : * devices ) { <nl> if ( ! d - > resource_manager ( ) - > Cleanup ( name ) . ok ( ) ) { <nl> DirectSession : : RunState : : RunState ( <nl> ScopedAllocatorMgr * sam = d - > GetScopedAllocatorMgr ( ) ; <nl> if ( sam ) sam - > Cleanup ( step_id ) ; <nl> } <nl> - } ) { <nl> + } ) { } <nl> + <nl> + DirectSession : : PartialRunState : : PartialRunState ( <nl> + const std : : vector < string > & pending_input_names , <nl> + const std : : vector < string > & pending_output_names , int64 step_id , <nl> + const std : : vector < Device * > * devices ) <nl> + : RunState ( step_id , devices ) { <nl> / / Initially all the feeds and fetches are pending . <nl> for ( auto & name : pending_input_names ) { <nl> pending_inputs [ name ] = false ; <nl> DirectSession : : RunState : : RunState ( <nl> } <nl> } <nl> <nl> - DirectSession : : RunState : : RunState ( int64 step_id , <nl> - const std : : vector < Device * > * devices ) <nl> - : RunState ( { } , { } , step_id , devices ) { } <nl> - <nl> - DirectSession : : RunState : : ~ RunState ( ) { <nl> + DirectSession : : PartialRunState : : ~ PartialRunState ( ) { <nl> if ( rendez ! = nullptr ) { <nl> - if ( ! executors_done . HasBeenNotified ( ) ) { <nl> - rendez - > StartAbort ( errors : : Cancelled ( " PRun cancellation " ) ) ; <nl> - executors_done . WaitForNotification ( ) ; <nl> - } <nl> - rendez - > Unref ( ) ; <nl> + rendez - > StartAbort ( errors : : Cancelled ( " PRun cancellation " ) ) ; <nl> + executors_done . WaitForNotification ( ) ; <nl> } <nl> } <nl> <nl> - bool DirectSession : : RunState : : PendingDone ( ) const { <nl> + bool DirectSession : : PartialRunState : : PendingDone ( ) const { <nl> for ( const auto & it : pending_inputs ) { <nl> if ( ! it . second ) return false ; <nl> } <nl> bool DirectSession : : RunState : : PendingDone ( ) const { <nl> return true ; <nl> } <nl> <nl> - void DirectSession : : WaitForNotification ( RunState * run_state , <nl> + void DirectSession : : WaitForNotification ( Notification * n , RunState * run_state , <nl> CancellationManager * cm , <nl> int64 timeout_in_ms ) { <nl> - const Status status = <nl> - WaitForNotification ( & run_state - > executors_done , timeout_in_ms ) ; <nl> + const Status status = WaitForNotification ( n , timeout_in_ms ) ; <nl> if ( ! status . ok ( ) ) { <nl> { <nl> mutex_lock l ( run_state - > mu ) ; <nl> void DirectSession : : WaitForNotification ( RunState * run_state , <nl> / / We must wait for the executors to complete , because they have borrowed <nl> / / references to ` cm ` and other per - step state . After this notification , it <nl> / / is safe to clean up the step . <nl> - run_state - > executors_done . WaitForNotification ( ) ; <nl> + n - > WaitForNotification ( ) ; <nl> } <nl> } <nl> <nl> mmm a / tensorflow / core / common_runtime / direct_session . h <nl> ppp b / tensorflow / core / common_runtime / direct_session . h <nl> class DirectSession : public Session { <nl> std : : unique_ptr < ProcessFunctionLibraryRuntime > proc_flr ; <nl> } ; <nl> <nl> - / / For each live partial execution , the session maintains a RunState . <nl> - / / ' status ' is the current status of this partial execution . ' executor_done ' <nl> - / / is " notified " when all executors are done . ' pending_inputs ' are the set <nl> - / / of pending feeds and ' pending_outputs ' are the set of pending fetches . <nl> + / / For each live Run ( ) call , the session maintains a RunState . <nl> + / / ' status ' is the current status of the execution . <nl> struct RunState { <nl> mutex mu ; <nl> Status status GUARDED_BY ( mu ) ; <nl> - IntraProcessRendezvous * rendez = nullptr ; <nl> + core : : RefCountPtr < IntraProcessRendezvous > rendez = nullptr ; <nl> std : : unique_ptr < CollectiveExecutor : : Handle > collective_executor ; <nl> std : : unique_ptr < StepStatsCollector > collector ; <nl> - Notification executors_done ; <nl> - std : : unordered_map < string , bool > pending_inputs ; / / true if fed <nl> - std : : unordered_map < string , bool > pending_outputs ; / / true if fetched <nl> TensorStore tensor_store ; <nl> ScopedStepContainer step_container ; <nl> <nl> RunState ( int64 step_id , const std : : vector < Device * > * devices ) ; <nl> + } ; <nl> <nl> - RunState ( const std : : vector < string > & pending_input_names , <nl> - const std : : vector < string > & pending_output_names , int64 step_id , <nl> - const std : : vector < Device * > * devices ) ; <nl> + / / For each live partial execution , the session maintains a PartialRunState . <nl> + / / ' executor_done ' is " notified " when all executors are done . ' pending_inputs ' <nl> + / / are the set of pending feeds and ' pending_outputs ' are the set of pending <nl> + / / fetches . <nl> + struct PartialRunState : public RunState { <nl> + Notification executors_done ; <nl> + std : : unordered_map < string , bool > pending_inputs ; / / true if fed <nl> + std : : unordered_map < string , bool > pending_outputs ; / / true if fetched <nl> + <nl> + PartialRunState ( const std : : vector < string > & pending_input_names , <nl> + const std : : vector < string > & pending_output_names , <nl> + int64 step_id , const std : : vector < Device * > * devices ) ; <nl> <nl> / / Returns true if all pending inputs and outputs have been completed . <nl> bool PendingDone ( ) const ; <nl> <nl> - ~ RunState ( ) ; <nl> + ~ PartialRunState ( ) ; <nl> } ; <nl> <nl> struct RunStateArgs { <nl> class DirectSession : public Session { <nl> : : tensorflow : : Status CheckFetch ( <nl> const std : : vector < std : : pair < string , Tensor > > & feeds , <nl> const std : : vector < string > & fetches , <nl> - const ExecutorsAndKeys * executors_and_keys , const RunState * run_state ) ; <nl> + const ExecutorsAndKeys * executors_and_keys , <nl> + const PartialRunState * run_state ) ; <nl> <nl> / / Use the appropriate WaitForNotification function based on whether <nl> / / operation_timeout_in_ms is greater than 0 . <nl> class DirectSession : public Session { <nl> / / If the timeout expires , the ` cm - > StartCancel ( ) ` will be called . <nl> : : tensorflow : : Status WaitForNotification ( Notification * n , <nl> int64 timeout_in_ms ) ; <nl> - void WaitForNotification ( RunState * run_state , CancellationManager * cm , <nl> - int64 timeout_in_ms ) ; <nl> + void WaitForNotification ( Notification * n , RunState * run_state , <nl> + CancellationManager * cm , int64 timeout_in_ms ) ; <nl> <nl> : : tensorflow : : Status CheckNotClosed ( ) { <nl> mutex_lock l ( closed_lock_ ) ; <nl> class DirectSession : public Session { <nl> std : : unordered_map < int64 , Callable > callables_ GUARDED_BY ( callables_lock_ ) ; <nl> <nl> / / Holds mappings from handle to partial run state . <nl> - std : : unordered_map < string , std : : unique_ptr < RunState > > partial_runs_ <nl> + std : : unordered_map < string , std : : unique_ptr < PartialRunState > > partial_runs_ <nl> GUARDED_BY ( executor_lock_ ) ; <nl> <nl> / / This holds all the tensors that are currently alive in the session . <nl> | Split PRun - related fields from DirectSession : : RunState into DirectSession : : PartialRunState . | tensorflow/tensorflow | 2e31e570610ac768f61de2a43843bd1219762a4d | 2019-11-19T20:35:59Z |
mmm a / drivers / javascript / src / Makefile <nl> ppp b / drivers / javascript / src / Makefile <nl> <nl> - OVERRIDE_GOALS : = clean = js - clean <nl> + OVERRIDE_GOALS : = clean = js - clean publish = js - publish dist = js - dist <nl> <nl> TOP : = . . / . . / . . <nl> include $ ( TOP ) / Makefile <nl> mmm a / drivers / javascript / src / build . mk <nl> ppp b / drivers / javascript / src / build . mk <nl> $ ( JS_DRIVER_LIB ) : $ ( PB_JS_FILE ) $ ( PB_BIN_FILE ) $ ( DRIVER_COMPILED_COFFEE ) | $ ( JS_ <nl> - - output_mode = $ ( JS_OUTPUT_MODE ) \ <nl> ) > $ @ <nl> <nl> - . PHONY : publish <nl> - publish : $ ( JS_DRIVER_LIB ) <nl> - $ P PUBLISH - JS <nl> + . PHONY : js - dist <nl> + js - dist : $ ( JS_DRIVER_LIB ) $ ( PB_BIN_FILE ) <nl> + $ P DIST - JS $ ( JS_PKG_DIR ) <nl> + rm - rf $ ( JS_PKG_DIR ) <nl> mkdir - p $ ( JS_PKG_DIR ) <nl> cp package . json $ ( JS_PKG_DIR ) <nl> cp README . md $ ( JS_PKG_DIR ) <nl> cp $ ( JS_DRIVER_LIB ) $ ( JS_PKG_DIR ) <nl> - cd $ ( JS_PKG_DIR ) ; npm publish - - force <nl> + cp $ ( PB_BIN_FILE ) $ ( JS_PKG_DIR ) <nl> + <nl> + . PHONY : js - publish <nl> + js - publish : dist <nl> + $ P PUBLISH - JS $ ( JS_PKG_DIR ) <nl> + cd $ ( JS_PKG_DIR ) & & npm publish - - force <nl> <nl> . PHONY : test <nl> test - js : $ ( JS_DRIVER_LIB ) <nl> mmm a / drivers / javascript / src / package . json <nl> ppp b / drivers / javascript / src / package . json <nl> <nl> { " url " : " http : / / github . com / rethinkdb / rethinkdb / issues " <nl> , " email " : " bugs @ rethinkdb . com " <nl> } <nl> - , " files " : [ " rethinkdb . js " , " README . md " ] <nl> + , " files " : [ " rethinkdb . js " , " README . md " , " ql2 . desc " ] <nl> , " repository " : <nl> { " type " : " git " <nl> , " url " : " http : / / github . com / rethinkdb / rethinkdb . git " <nl> } <nl> , " engine " : " node > = 0 . 10 . 0 " <nl> - , " dependencies " : { <nl> + , " optionalDependencies " : { <nl> " node - protobuf " : " > = 1 . 0 . 0 " <nl> } <nl> } <nl> deleted file mode 100644 <nl> index e69de29bb2d . . 00000000000 <nl> | Bring the npm package up to date | rethinkdb/rethinkdb | 75e4cc511ea30770d262abd7d2888c436e4ef56c | 2013-07-02T17:13:08Z |
mmm a / Marlin / src / inc / Conditionals_post . h <nl> ppp b / Marlin / src / inc / Conditionals_post . h <nl> <nl> # if ENABLED ( AUTO_BED_LEVELING_UBL ) <nl> # define _MESH_MIN_X ( max ( X_MIN_BED + MESH_INSET , X_MIN_POS ) ) / / UBL is careful not to probe off the bed . It does not <nl> # define _MESH_MIN_Y ( max ( Y_MIN_BED + MESH_INSET , Y_MIN_POS ) ) / / need * _PROBE_OFFSET_FROM_EXTRUDER in the mesh dimensions <nl> + # define _MESH_MAX_X ( min ( X_MAX_BED - ( MESH_INSET ) , X_MAX_POS ) ) <nl> + # define _MESH_MAX_Y ( min ( Y_MAX_BED - ( MESH_INSET ) , Y_MAX_POS ) ) <nl> # else <nl> # define _MESH_MIN_X ( max ( X_MIN_BED + MESH_INSET , X_MIN_POS + X_PROBE_OFFSET_FROM_EXTRUDER ) ) <nl> # define _MESH_MIN_Y ( max ( Y_MIN_BED + MESH_INSET , Y_MIN_POS + Y_PROBE_OFFSET_FROM_EXTRUDER ) ) <nl> + # define _MESH_MAX_X ( min ( X_MAX_BED - ( MESH_INSET ) , X_MAX_POS + X_PROBE_OFFSET_FROM_EXTRUDER ) ) <nl> + # define _MESH_MAX_Y ( min ( Y_MAX_BED - ( MESH_INSET ) , Y_MAX_POS + Y_PROBE_OFFSET_FROM_EXTRUDER ) ) <nl> # endif <nl> # endif <nl> / * * <nl> | Correct mesh size calculations | MarlinFirmware/Marlin | 9d0cf02fef695cdbfdcdbff133968d8508046cd9 | 2018-01-17T15:36:12Z |
mmm a / fdbclient / NativeAPI . actor . cpp <nl> ppp b / fdbclient / NativeAPI . actor . cpp <nl> struct TrInfoChunk { <nl> <nl> ACTOR static Future < Void > transactionInfoCommitActor ( Transaction * tr , std : : vector < TrInfoChunk > * chunks ) { <nl> state const Key clientLatencyAtomicCtr = CLIENT_LATENCY_INFO_CTR_PREFIX . withPrefix ( fdbClientInfoPrefixRange . begin ) ; <nl> - loop { <nl> + state int retryCount = 0 ; <nl> + loop { <nl> try { <nl> tr - > reset ( ) ; <nl> tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ; <nl> ACTOR static Future < Void > transactionInfoCommitActor ( Transaction * tr , std : : vecto <nl> return Void ( ) ; <nl> } <nl> catch ( Error & e ) { <nl> + retryCount + + ; <nl> + if ( retryCount = = 10 ) throw ; <nl> wait ( tr - > onError ( e ) ) ; <nl> } <nl> } <nl> | Restore retry limiting on the client sampling transaction | apple/foundationdb | e7218bbb282346e052828c029c8a4ce34b0a1521 | 2019-07-02T18:16:00Z |
deleted file mode 100644 <nl> index ec5b5178605 . . 00000000000 <nl> mmm a / src / buffer / out / ut_textbuffer / sources . dep <nl> ppp / dev / null <nl> <nl> - BUILD_PASS3_CONSUMES = \ <nl> - onecore \ merged \ mbs \ bootableskus \ prepareimagingtools | PASS3 \ <nl> mmm a / src / host / ft_host / sources . dep <nl> ppp b / src / host / ft_host / sources . dep <nl> <nl> BUILD_PASS2_CONSUMES = \ <nl> onecore \ windows \ core \ console \ open \ src \ tools \ nihilist | PASS2 \ <nl> <nl> - BUILD_PASS3_CONSUMES = \ <nl> - onecore \ merged \ mbs \ bootableskus \ prepareimagingtools | PASS3 \ <nl> - <nl> mmm a / src / host / ft_integrity / sources . dep <nl> ppp b / src / host / ft_integrity / sources . dep <nl> PUBLIC_PASS1_CONSUMES = \ <nl> BUILD_PASS2_CONSUMES = \ <nl> onecore \ base \ appmodel \ test \ common \ testhelper \ samples \ nativecxapp \ appx | PASS2 \ <nl> <nl> - <nl> - BUILD_PASS3_CONSUMES = \ <nl> - onecore \ merged \ mbs \ bootableskus \ prepareimagingtools | PASS3 \ <nl> mmm a / src / host / ft_uia / sources . dep <nl> ppp b / src / host / ft_uia / sources . dep <nl> PUBLIC_PASS1_CONSUMES = \ <nl> onecore \ sdktools \ winappdriver \ selenium . support | PASS1 \ <nl> onecore \ sdktools \ winappdriver \ selenium . webdriver | PASS1 \ <nl> <nl> - BUILD_PASS3_CONSUMES = \ <nl> - onecore \ merged \ mbs \ bootableskus \ prepareimagingtools | PASS3 \ <nl> - <nl> deleted file mode 100644 <nl> index ec5b5178605 . . 00000000000 <nl> mmm a / src / host / ut_host / sources . dep <nl> ppp / dev / null <nl> <nl> - BUILD_PASS3_CONSUMES = \ <nl> - onecore \ merged \ mbs \ bootableskus \ prepareimagingtools | PASS3 \ <nl> deleted file mode 100644 <nl> index c18b8d21456 . . 00000000000 <nl> mmm a / src / interactivity / win32 / ut_interactivity_win32 / sources . dep <nl> ppp / dev / null <nl> <nl> - BUILD_PASS3_CONSUMES = \ <nl> - onecore \ merged \ mbs \ bootableskus \ prepareimagingtools | PASS3 \ <nl> - <nl> deleted file mode 100644 <nl> index ec5b5178605 . . 00000000000 <nl> mmm a / src / terminal / adapter / ut_adapter / sources . dep <nl> ppp / dev / null <nl> <nl> - BUILD_PASS3_CONSUMES = \ <nl> - onecore \ merged \ mbs \ bootableskus \ prepareimagingtools | PASS3 \ <nl> deleted file mode 100644 <nl> index ec5b5178605 . . 00000000000 <nl> mmm a / src / terminal / parser / ut_parser / sources . dep <nl> ppp / dev / null <nl> <nl> - BUILD_PASS3_CONSUMES = \ <nl> - onecore \ merged \ mbs \ bootableskus \ prepareimagingtools | PASS3 \ <nl> deleted file mode 100644 <nl> index ec5b5178605 . . 00000000000 <nl> mmm a / src / tools / integrity / exewin32 / sources . dep <nl> ppp / dev / null <nl> <nl> - BUILD_PASS3_CONSUMES = \ <nl> - onecore \ merged \ mbs \ bootableskus \ prepareimagingtools | PASS3 \ <nl> mmm a / src / tools / integrity / packageuwp / sources . dep <nl> ppp b / src / tools / integrity / packageuwp / sources . dep <nl> <nl> BUILD_PASS2_CONSUMES = \ <nl> onecore \ windows \ core \ console \ open \ src \ tools \ integrity \ exeuwp | PASS2 \ <nl> <nl> - BUILD_PASS3_CONSUMES = \ <nl> - onecore \ merged \ mbs \ bootableskus \ prepareimagingtools | PASS3 \ <nl> - <nl> deleted file mode 100644 <nl> index ec5b5178605 . . 00000000000 <nl> mmm a / src / tools / nihilist / sources . dep <nl> ppp / dev / null <nl> <nl> - BUILD_PASS3_CONSUMES = \ <nl> - onecore \ merged \ mbs \ bootableskus \ prepareimagingtools | PASS3 \ <nl> mmm a / src / tools / vtapp / sources . dep <nl> ppp b / src / tools / vtapp / sources . dep <nl> <nl> PUBLIC_PASS0_CONSUMES = \ <nl> onecore \ redist \ mspartners \ netfx45 \ core \ binary_release | PASS0 \ <nl> <nl> - BUILD_PASS3_CONSUMES = \ <nl> - onecore \ merged \ mbs \ bootableskus \ prepareimagingtools | PASS3 \ <nl> | Merged PR 4235821 : [ Git2Git ] Reflect some sources . dep changes from OS | microsoft/terminal | 55a90e03fc90382c3831d73b099d132348aa3239 | 2020-01-31T21:27:33Z |
mmm a / platform / server / detect . py <nl> ppp b / platform / server / detect . py <nl> def get_opts ( ) : <nl> BoolVariable ( ' use_ubsan ' , ' Use LLVM / GCC compiler undefined behavior sanitizer ( UBSAN ) ' , False ) , <nl> BoolVariable ( ' use_asan ' , ' Use LLVM / GCC compiler address sanitizer ( ASAN ) ) ' , False ) , <nl> BoolVariable ( ' use_lsan ' , ' Use LLVM / GCC compiler leak sanitizer ( LSAN ) ) ' , False ) , <nl> + BoolVariable ( ' use_tsan ' , ' Use LLVM / GCC compiler thread sanitizer ( TSAN ) ) ' , False ) , <nl> EnumVariable ( ' debug_symbols ' , ' Add debugging symbols to release builds ' , ' yes ' , ( ' yes ' , ' no ' , ' full ' ) ) , <nl> BoolVariable ( ' separate_debug_symbols ' , ' Create a separate file containing debugging symbols ' , False ) , <nl> BoolVariable ( ' execinfo ' , ' Use libexecinfo on systems where glibc is not available ' , False ) , <nl> def configure ( env ) : <nl> env . extra_suffix = " . llvm " + env . extra_suffix <nl> <nl> <nl> - if env [ ' use_ubsan ' ] or env [ ' use_asan ' ] or env [ ' use_lsan ' ] : <nl> + if env [ ' use_ubsan ' ] or env [ ' use_asan ' ] or env [ ' use_lsan ' ] or env [ ' use_tsan ' ] : <nl> env . extra_suffix + = " s " <nl> <nl> if env [ ' use_ubsan ' ] : <nl> def configure ( env ) : <nl> env . Append ( CCFLAGS = [ ' - fsanitize = leak ' ] ) <nl> env . Append ( LINKFLAGS = [ ' - fsanitize = leak ' ] ) <nl> <nl> + if env [ ' use_tsan ' ] : <nl> + env . Append ( CCFLAGS = [ ' - fsanitize = thread ' ] ) <nl> + env . Append ( LINKFLAGS = [ ' - fsanitize = thread ' ] ) <nl> + <nl> if env [ ' use_lto ' ] : <nl> env . Append ( CCFLAGS = [ ' - flto ' ] ) <nl> if not env [ ' use_llvm ' ] and env . GetOption ( " num_jobs " ) > 1 : <nl> mmm a / platform / x11 / detect . py <nl> ppp b / platform / x11 / detect . py <nl> def get_opts ( ) : <nl> BoolVariable ( ' use_ubsan ' , ' Use LLVM / GCC compiler undefined behavior sanitizer ( UBSAN ) ' , False ) , <nl> BoolVariable ( ' use_asan ' , ' Use LLVM / GCC compiler address sanitizer ( ASAN ) ) ' , False ) , <nl> BoolVariable ( ' use_lsan ' , ' Use LLVM / GCC compiler leak sanitizer ( LSAN ) ) ' , False ) , <nl> + BoolVariable ( ' use_tsan ' , ' Use LLVM / GCC compiler thread sanitizer ( TSAN ) ) ' , False ) , <nl> BoolVariable ( ' pulseaudio ' , ' Detect and use PulseAudio ' , True ) , <nl> BoolVariable ( ' udev ' , ' Use udev for gamepad connection callbacks ' , False ) , <nl> EnumVariable ( ' debug_symbols ' , ' Add debugging symbols to release builds ' , ' yes ' , ( ' yes ' , ' no ' , ' full ' ) ) , <nl> def configure ( env ) : <nl> print ( " Using LLD with GCC is not supported yet , try compiling with ' use_llvm = yes ' . " ) <nl> sys . exit ( 255 ) <nl> <nl> - if env [ ' use_ubsan ' ] or env [ ' use_asan ' ] or env [ ' use_lsan ' ] : <nl> + if env [ ' use_ubsan ' ] or env [ ' use_asan ' ] or env [ ' use_lsan ' ] or env [ ' use_tsan ' ] : <nl> env . extra_suffix + = " s " <nl> <nl> if env [ ' use_ubsan ' ] : <nl> def configure ( env ) : <nl> env . Append ( CCFLAGS = [ ' - fsanitize = leak ' ] ) <nl> env . Append ( LINKFLAGS = [ ' - fsanitize = leak ' ] ) <nl> <nl> + if env [ ' use_tsan ' ] : <nl> + env . Append ( CCFLAGS = [ ' - fsanitize = thread ' ] ) <nl> + env . Append ( LINKFLAGS = [ ' - fsanitize = thread ' ] ) <nl> + <nl> if env [ ' use_lto ' ] : <nl> if not env [ ' use_llvm ' ] and env . GetOption ( " num_jobs " ) > 1 : <nl> env . Append ( CCFLAGS = [ ' - flto ' ] ) <nl> | Merge pull request from qarmin / thread_sanitizer | godotengine/godot | 1f2bcb8f025e33718d06a72cbd439f1ba02aed0f | 2019-08-07T11:21:33Z |
mmm a / docs / Lexicon . rst <nl> ppp b / docs / Lexicon . rst <nl> source code , tests , and commit messages . See also the ` LLVM lexicon ` _ . <nl> <nl> script mode <nl> The parsing mode that allows top - level imperative code in a source file . <nl> + <nl> + Sema <nl> + Short for ' Semantic Analysis ' , the compiler pass that performs type checking , <nl> + validation , and expression rewriting before SILGen . <nl> <nl> SIL <nl> " Swift Intermediate Language " . A high - level IR used by the Swift compiler <nl> | Merge pull request from harlanhaskins / its - just - semantics | apple/swift | 76eceb088bde7fd51806c53f19af8f3a73c3728f | 2018-04-17T16:44:18Z |
mmm a / src / wasm / jump - table - assembler . cc <nl> ppp b / src / wasm / jump - table - assembler . cc <nl> namespace v8 { <nl> namespace internal { <nl> namespace wasm { <nl> <nl> - void JumpTableAssembler : : EmitJumpTrampoline ( Address target ) { <nl> - # if V8_TARGET_ARCH_X64 <nl> - movq ( kScratchRegister , static_cast < uint64_t > ( target ) ) ; <nl> - jmp ( kScratchRegister ) ; <nl> - # elif V8_TARGET_ARCH_ARM64 <nl> - UseScratchRegisterScope temps ( this ) ; <nl> - Register scratch = temps . AcquireX ( ) ; <nl> - Mov ( scratch , static_cast < uint64_t > ( target ) ) ; <nl> - Br ( scratch ) ; <nl> - # elif V8_TARGET_ARCH_S390X <nl> - mov ( ip , Operand ( bit_cast < intptr_t , Address > ( target ) ) ) ; <nl> - b ( ip ) ; <nl> - # else <nl> - UNIMPLEMENTED ( ) ; <nl> - # endif <nl> - } <nl> - <nl> / / The implementation is compact enough to implement it inline here . If it gets <nl> / / much bigger , we might want to split it in a separate file per architecture . <nl> # if V8_TARGET_ARCH_X64 <nl> mmm a / src / wasm / jump - table - assembler . h <nl> ppp b / src / wasm / jump - table - assembler . h <nl> class JumpTableAssembler : public TurboAssembler { <nl> reinterpret_cast < void * > ( slot_addr ) , size , <nl> CodeObjectRequired : : kNo ) { } <nl> <nl> - / / Emit a trampoline to a possibly far away code target . <nl> - void EmitJumpTrampoline ( Address target ) ; <nl> - <nl> # if V8_TARGET_ARCH_X64 <nl> static constexpr int kJumpTableSlotSize = 18 ; <nl> # elif V8_TARGET_ARCH_IA32 <nl> | [ wasm ] Remove obsolete { JumpTableAssembler : : EmitJumpTrampoline } . | v8/v8 | de63a2554499a76b8d9959fe96be1f24d1ca66b5 | 2018-06-21T16:15:26Z |
mmm a / Marlin / src / module / tool_change . cpp <nl> ppp b / Marlin / src / module / tool_change . cpp <nl> void tool_change ( const uint8_t tmp_extruder , const float fr_mm_s / * = 0 . 0 * / , bool n <nl> parking_extruder_tool_change ( tmp_extruder , no_move ) ; <nl> # endif <nl> <nl> + const float xdiff = hotend_offset [ X_AXIS ] [ tmp_extruder ] - hotend_offset [ X_AXIS ] [ active_extruder ] , <nl> + ydiff = hotend_offset [ Y_AXIS ] [ tmp_extruder ] - hotend_offset [ Y_AXIS ] [ active_extruder ] , <nl> + zdiff = hotend_offset [ Z_AXIS ] [ tmp_extruder ] - hotend_offset [ Z_AXIS ] [ active_extruder ] ; <nl> + <nl> # if ENABLED ( SWITCHING_NOZZLE ) <nl> / / Always raise by at least 1 to avoid workpiece <nl> - const float zdiff = hotend_offset [ Z_AXIS ] [ active_extruder ] - hotend_offset [ Z_AXIS ] [ tmp_extruder ] ; <nl> - current_position [ Z_AXIS ] + = ( zdiff > 0 . 0 ? zdiff : 0 . 0 ) + 1 ; <nl> + current_position [ Z_AXIS ] + = ( zdiff < 0 . 0 ? - zdiff : 0 . 0 ) + 1 ; <nl> planner . buffer_line_kinematic ( current_position , planner . max_feedrate_mm_s [ Z_AXIS ] , active_extruder ) ; <nl> move_nozzle_servo ( tmp_extruder ) ; <nl> # endif <nl> <nl> - const float xdiff = hotend_offset [ X_AXIS ] [ tmp_extruder ] - hotend_offset [ X_AXIS ] [ active_extruder ] , <nl> - ydiff = hotend_offset [ Y_AXIS ] [ tmp_extruder ] - hotend_offset [ Y_AXIS ] [ active_extruder ] , <nl> - zdiff = hotend_offset [ Z_AXIS ] [ tmp_extruder ] - hotend_offset [ Z_AXIS ] [ active_extruder ] ; <nl> - <nl> # if ENABLED ( DEBUG_LEVELING_FEATURE ) <nl> if ( DEBUGGING ( LEVELING ) ) { <nl> SERIAL_ECHOPAIR ( " Offset Tool XY by { " , xdiff ) ; <nl> | Fix SWITCHING_NOZZLE compile error ( ) | MarlinFirmware/Marlin | 6dc9553aa5d8b35f54a3890e301c9a02c7aba308 | 2018-08-28T02:25:25Z |
mmm a / language / English / langinfo . xml <nl> ppp b / language / English / langinfo . xml <nl> <nl> < / region > <nl> <nl> < region name = " Central Europe " locale = " DE " > <nl> - < dateshort > YYYY - MM - DD < / dateshort > <nl> + < dateshort > DD - MM - YYYY < / dateshort > <nl> < datelong > DDDD , D MMMM YYYY < / datelong > <nl> < time symbolAM = " " symbolPM = " " > H : mm : ss < / time > <nl> < tempunit > C < / tempunit > <nl> | cosmetics : langinfo has a incorrect Central Europe region setting ( commit for ronie ) | xbmc/xbmc | cf3715091a42aa5f52b60c3858f9e2a67160dda9 | 2012-01-04T17:51:33Z |
mmm a / tests / js - tests / src / CocosDenshionTest / CocosDenshionTest . js <nl> ppp b / tests / js - tests / src / CocosDenshionTest / CocosDenshionTest . js <nl> <nl> <nl> var audioEngine = cc . audioEngine ; <nl> <nl> - var MUSIC_FILE = cc . sys . os = = cc . sys . OS_WP8 | | cc . sys . os = = cc . sys . OS_WINRT ? " res / Sound / background - music - aac . wav " : " res / Sound / background . mp3 " ; <nl> - var EFFECT_FILE = cc . sys . os = = cc . sys . OS_WP8 | | cc . sys . os = = cc . sys . OS_WINRT ? " res / Sound / pew - pew - lei . wav " : " res / Sound / effect2 . mp3 " ; <nl> + var MUSIC_FILE = cc . sys . os = = cc . sys . OS_WINRT ? " res / background . wav " : " res / Sound / background . mp3 " ; <nl> + var EFFECT_FILE = cc . sys . os = = cc . sys . OS_WINRT ? " res / effect1 . wav " : " res / Sound / effect2 . mp3 " ; <nl> <nl> var _DenshionTests = [ <nl> ' Music Test ' <nl> | fixed audio file path for winrt | cocos2d/cocos2d-x | aa641189ad56638f394cb258b986f2c002e359f5 | 2015-05-20T18:22:13Z |
mmm a / lib / IRGen / OptimizeARC . cpp <nl> ppp b / lib / IRGen / OptimizeARC . cpp <nl> <nl> # define DEBUG_TYPE " swift - optimize " <nl> # include " IRGen . h " <nl> # include " llvm / Instructions . h " <nl> + # include " llvm / IntrinsicInst . h " <nl> # include " llvm / Module . h " <nl> # include " llvm / Pass . h " <nl> # include " llvm / Transforms / Utils / SSAUpdater . h " <nl> # include " llvm / ADT / DenseMap . h " <nl> + # include " llvm / ADT / SetVector . h " <nl> # include " llvm / ADT / Statistic . h " <nl> # include " llvm / ADT / StringSwitch . h " <nl> # include " llvm / ADT / TinyPtrVector . h " <nl> # include " llvm / Support / InstIterator . h " <nl> + # include " llvm / Support / raw_ostream . h " <nl> using namespace llvm ; <nl> <nl> STATISTIC ( NumNoopDeleted , <nl> STATISTIC ( NumRetainReleasePairs , <nl> " Number of swift retain / release pairs eliminated " ) ; <nl> STATISTIC ( NumAllocateReleasePairs , <nl> " Number of swift allocate / release pairs eliminated " ) ; <nl> + STATISTIC ( NumStoreOnlyObjectsEliminated , <nl> + " Number of swift stored - only objects eliminated " ) ; <nl> <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> / / Utility Functions <nl> static bool performLocalReleaseMotion ( CallInst & Release , BasicBlock & BB ) { <nl> return false ; <nl> } <nl> <nl> - / / / performReleaseMotion - this moves releaes functions earlier , past <nl> - / / / instructions that are known to not access an object . If they are moved to <nl> - / / / touch a retain of the same object , destructive annihilation occurs ! <nl> - static bool performReleaseMotion ( Function & F ) { <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + / / Store - Only Object Elimination <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + <nl> + / / / performStoreOnlyObjectElimination - Scan the graph of uses of the specified <nl> + / / / object allocation . If the object does not escape and is only stored to <nl> + / / / ( this happens because GVN and other optimizations hoists forward substitutes <nl> + / / / all stores to the object to eliminate all loads from it ) , then zap the <nl> + / / / object and all accesses related to it . <nl> + static bool performStoreOnlyObjectElimination ( CallInst & Allocation , <nl> + BasicBlock : : iterator & BBI ) { <nl> + / / Do a depth first search exploring all of the uses of the object pointer , <nl> + / / following through casts , pointer adjustments etc . If we find any loads or <nl> + / / any escape sites of the object , we give up . If we succeed in walking the <nl> + / / entire graph of uses , we can remove the resultant set . <nl> + SmallSetVector < Instruction * , 16 > InvolvedInstructions ; <nl> + SmallVector < Instruction * , 16 > Worklist ; <nl> + Worklist . push_back ( & Allocation ) ; <nl> + <nl> + / / Stores - Keep track of all of the store instructions we see . <nl> + SmallVector < StoreInst * , 16 > Stores ; <nl> + <nl> + while ( ! Worklist . empty ( ) ) { <nl> + Instruction * I = Worklist . pop_back_val ( ) ; <nl> + <nl> + / / Insert the instruction into our InvolvedInstructions set . If we have <nl> + / / already seen it , then don ' t reprocess all of the uses . <nl> + if ( ! InvolvedInstructions . insert ( I ) ) continue ; <nl> + <nl> + / / Okay , this is the first time we ' ve seen this instruction , proceed . <nl> + switch ( classifyInstruction ( * I ) ) { <nl> + case RT_AllocObject : <nl> + / / If this is a different swift_allocObject than we started with , then <nl> + / / there is some computation feeding into a size or alignment computation <nl> + / / that we have to keep . . . unless we can delete * that * entire object as <nl> + / / well . <nl> + break ; <nl> + <nl> + / / If no memory is accessed , then something is being done with the <nl> + / / pointer : maybe it is bitcast or GEP ' d . Since there are no side effects , <nl> + / / it is perfectly fine to delete this instruction if all uses of the <nl> + / / instruction are also eliminable . <nl> + case RT_NoMemoryAccessed : <nl> + if ( I - > mayHaveSideEffects ( ) | | isa < TerminatorInst > ( I ) ) <nl> + return false ; <nl> + break ; <nl> + <nl> + / / It is perfectly fine to eliminate various retains and releases of this <nl> + / / object : we are zapping all accesses or none . <nl> + case RT_Retain : <nl> + case RT_RetainNoResult : <nl> + case RT_Release : <nl> + break ; <nl> + <nl> + / / If this is an unknown instruction , we have more interesting things to <nl> + / / consider . <nl> + case RT_Unknown : <nl> + / / Otherwise , this really is some unhandled instruction . Bail out . <nl> + return false ; <nl> + } <nl> + <nl> + / / Okay , if we got here , the instruction can be eaten so - long as all of its <nl> + / / uses can be . Scan through the uses and add them to the worklist for <nl> + / / recursive processing . <nl> + for ( auto UI = I - > use_begin ( ) , E = I - > use_end ( ) ; UI ! = E ; + + UI ) { <nl> + Instruction * User = cast < Instruction > ( * UI ) ; <nl> + <nl> + / / Handle stores as a special case here : we want to make sure that the <nl> + / / object is being stored * to * , not itself being stored ( which would be an <nl> + / / escape point ) . Since stores themselves don ' t have any uses , we can <nl> + / / short - cut the classification scheme above . <nl> + if ( StoreInst * SI = dyn_cast < StoreInst > ( User ) ) { <nl> + / / If this is a store * to * the object , we can zap it . <nl> + if ( UI . getOperandNo ( ) = = StoreInst : : getPointerOperandIndex ( ) ) { <nl> + InvolvedInstructions . insert ( SI ) ; <nl> + continue ; <nl> + } <nl> + / / Otherwise , using the object as a source ( or size ) is an escape . <nl> + return false ; <nl> + } <nl> + if ( MemIntrinsic * MI = dyn_cast < MemIntrinsic > ( User ) ) { <nl> + / / If this is a memset / memcpy / memmove * to * the object , we can zap it . <nl> + if ( UI . getOperandNo ( ) = = 0 ) { <nl> + InvolvedInstructions . insert ( MI ) ; <nl> + continue ; <nl> + } <nl> + / / Otherwise , using the object as a source ( or size ) is an escape . <nl> + return false ; <nl> + } <nl> + <nl> + / / Otherwise , normal instructions just go on the worklist for processing . <nl> + Worklist . push_back ( User ) ; <nl> + } <nl> + } <nl> + <nl> + / / Ok , we succeeded ! This means we can zap all of the instructions that use <nl> + / / the object . One thing we have to be careful of is to make sure that we <nl> + / / don ' t invalidate " BBI " ( the iterator the outer walk of the optimization <nl> + / / pass is using , and indicates the next instruction to process ) . This would <nl> + / / happen if we delete the instruction it is pointing to . Advance the <nl> + / / iterator if that would happen . <nl> + while ( InvolvedInstructions . count ( BBI ) ) <nl> + + + BBI ; <nl> + <nl> + / / Zap all of the instructions . <nl> + for ( auto I : InvolvedInstructions ) { <nl> + if ( ! I - > use_empty ( ) ) <nl> + I - > replaceAllUsesWith ( UndefValue : : get ( I - > getType ( ) ) ) ; <nl> + I - > eraseFromParent ( ) ; <nl> + } <nl> + <nl> + + + NumStoreOnlyObjectsEliminated ; <nl> + return true ; <nl> + } <nl> + <nl> + / / / performGeneralOptimizations - This does a forward scan over basic blocks , <nl> + / / / looking for interesting local optimizations that can be done . <nl> + static bool performGeneralOptimizations ( Function & F ) { <nl> bool Changed = false ; <nl> <nl> / / TODO : This is a really trivial local algorithm . It could be much better . <nl> for ( BasicBlock & BB : F ) { <nl> - for ( auto BBI = BB . begin ( ) , E = BB . end ( ) ; BBI ! = E ; ) { <nl> + for ( BasicBlock : : iterator BBI = BB . begin ( ) , E = BB . end ( ) ; BBI ! = E ; ) { <nl> / / Preincrement the iterator to avoid invalidation and out trouble . <nl> Instruction & I = * BBI + + ; <nl> - <nl> - / / Ignore instructions that are not releases . Try to optimize ones that <nl> - / / are . <nl> - if ( classifyInstruction ( I ) = = RT_Release ) <nl> + <nl> + / / Do various optimizations based on the instruction we find . <nl> + switch ( classifyInstruction ( I ) ) { <nl> + default : break ; <nl> + case RT_Release : <nl> Changed | = performLocalReleaseMotion ( cast < CallInst > ( I ) , BB ) ; <nl> + break ; <nl> + case RT_AllocObject : <nl> + Changed | = performStoreOnlyObjectElimination ( cast < CallInst > ( I ) , BBI ) ; <nl> + break ; <nl> + } <nl> } <nl> } <nl> return Changed ; <nl> bool SwiftARCOpt : : runOnFunction ( Function & F ) { <nl> / / optimizer . <nl> Changed | = canonicalizeArgumentReturnFunctions ( F ) ; <nl> <nl> - / / Next , perform release ( ) motion , eliminating retain / release pairs when it <nl> - / / turns out that a pair is not protecting anything that accesses the guarded <nl> - / / heap object . <nl> - Changed | = performReleaseMotion ( F ) ; <nl> + / / Next , do a pass with a couple of optimizations : <nl> + / / 1 ) release ( ) motion , eliminating retain / release pairs when it turns out <nl> + / / that a pair is not protecting anything that accesses the guarded heap <nl> + / / object . <nl> + / / 2 ) deletion of stored - only objects - objects that are allocated and <nl> + / / potentially retained and released , but are only stored to and don ' t <nl> + / / escape . <nl> + Changed | = performGeneralOptimizations ( F ) ; <nl> <nl> / / Finally , rewrite remaining heap object uses to make use of the implicit <nl> / / copy that swift_retain and similar functions perform . <nl> | implement a new optimization to completely eliminate objects when they | apple/swift | 13b89db2e2192b4f5fbafca7eb6e9b17244c5919 | 2012-05-29T05:56:31Z |
mmm a / framework / cybertron / croutine / croutine . cpp <nl> ppp b / framework / cybertron / croutine / croutine . cpp <nl> namespace cybertron { <nl> namespace croutine { <nl> <nl> using apollo : : cybertron : : event : : PerfEventCache ; <nl> + using apollo : : cybertron : : event : : SchedPerf ; <nl> <nl> thread_local CRoutine * CRoutine : : current_routine_ ; <nl> thread_local std : : shared_ptr < RoutineContext > CRoutine : : main_context_ ; <nl> RoutineState CRoutine : : Resume ( ) { <nl> current_routine_ = this ; <nl> / / update statistics info <nl> auto t_start = std : : chrono : : high_resolution_clock : : now ( ) ; <nl> - PerfEventCache : : Instance ( ) - > AddSchedEvent ( 1 , id_ , processor_id_ , 0 , 0 , - 1 ) ; <nl> + PerfEventCache : : Instance ( ) - > AddSchedEvent ( SchedPerf : : SWAP_IN , id_ , <nl> + processor_id_ , 0 , 0 , - 1 , - 1 ) ; <nl> SwapContext ( GetMainContext ( ) , this - > GetContext ( ) ) ; <nl> - if ( IsRunning ( ) ) { <nl> - state_ = RoutineState : : READY ; <nl> - } <nl> auto t_end = std : : chrono : : high_resolution_clock : : now ( ) ; <nl> auto start_nanos = std : : chrono : : duration_cast < std : : chrono : : nanoseconds > ( <nl> t_start . time_since_epoch ( ) ) <nl> . count ( ) ; <nl> - PerfEventCache : : Instance ( ) - > AddSchedEvent ( 2 , id_ , processor_id_ , 0 , <nl> - start_nanos , - 1 ) ; <nl> + PerfEventCache : : Instance ( ) - > AddSchedEvent ( <nl> + SchedPerf : : SWAP_OUT , id_ , processor_id_ , 0 , start_nanos , - 1 , int ( state_ ) ) ; <nl> + if ( IsRunning ( ) ) { <nl> + state_ = RoutineState : : READY ; <nl> + } <nl> + <nl> auto diff = <nl> std : : chrono : : duration < double , std : : milli > ( t_end - t_start ) . count ( ) ; <nl> statistic_info_ . exec_time + = diff ; <nl> mmm a / framework / cybertron / croutine / routine_factory . h <nl> ppp b / framework / cybertron / croutine / routine_factory . h <nl> namespace croutine { <nl> <nl> using common : : GlobalData ; <nl> using apollo : : cybertron : : event : : PerfEventCache ; <nl> + using apollo : : cybertron : : event : : SchedPerf ; <nl> <nl> class RoutineFactory { <nl> public : <nl> RoutineFactory CreateRoutineFactory ( <nl> for ( ; ; ) { <nl> if ( dv - > TryFetch ( msg ) ) { <nl> PerfEventCache : : Instance ( ) - > AddSchedEvent ( <nl> - 3 , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> - CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 1 ) ; <nl> + SchedPerf : : TRY_FETCH_OUT , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> + CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 1 , - 1 ) ; <nl> f ( msg ) ; <nl> } else { <nl> PerfEventCache : : Instance ( ) - > AddSchedEvent ( <nl> - 3 , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> - CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 0 ) ; <nl> + SchedPerf : : TRY_FETCH_OUT , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> + CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 0 , - 1 ) ; <nl> CRoutine : : GetCurrentRoutine ( ) - > SetState ( RoutineState : : WAITING_INPUT ) ; <nl> } <nl> CRoutine : : Yield ( ) ; <nl> RoutineFactory CreateRoutineFactory ( <nl> for ( ; ; ) { <nl> if ( dv - > TryFetch ( msg0 , msg1 ) ) { <nl> PerfEventCache : : Instance ( ) - > AddSchedEvent ( <nl> - 3 , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> - CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 1 ) ; <nl> + SchedPerf : : TRY_FETCH_OUT , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> + CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 1 , - 1 ) ; <nl> f ( msg0 , msg1 ) ; <nl> } else { <nl> PerfEventCache : : Instance ( ) - > AddSchedEvent ( <nl> - 3 , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> - CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 0 ) ; <nl> + SchedPerf : : TRY_FETCH_OUT , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> + CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 0 , - 1 ) ; <nl> CRoutine : : GetCurrentRoutine ( ) - > SetState ( RoutineState : : WAITING_INPUT ) ; <nl> } <nl> CRoutine : : Yield ( ) ; <nl> RoutineFactory CreateRoutineFactory ( <nl> for ( ; ; ) { <nl> if ( dv - > TryFetch ( msg0 , msg1 , msg2 ) ) { <nl> PerfEventCache : : Instance ( ) - > AddSchedEvent ( <nl> - 3 , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> - CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 1 ) ; <nl> + SchedPerf : : TRY_FETCH_OUT , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> + CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 1 , - 1 ) ; <nl> f ( msg0 , msg1 , msg2 ) ; <nl> } else { <nl> PerfEventCache : : Instance ( ) - > AddSchedEvent ( <nl> - 3 , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> - CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 0 ) ; <nl> + SchedPerf : : TRY_FETCH_OUT , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> + CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 0 , - 1 ) ; <nl> CRoutine : : GetCurrentRoutine ( ) - > SetState ( RoutineState : : WAITING_INPUT ) ; <nl> } <nl> CRoutine : : Yield ( ) ; <nl> RoutineFactory CreateRoutineFactory ( <nl> for ( ; ; ) { <nl> if ( dv - > TryFetch ( msg0 , msg1 , msg2 , msg3 ) ) { <nl> PerfEventCache : : Instance ( ) - > AddSchedEvent ( <nl> - 3 , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> - CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 1 ) ; <nl> + SchedPerf : : TRY_FETCH_OUT , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> + CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 1 , - 1 ) ; <nl> f ( msg0 , msg1 , msg2 , msg3 ) ; <nl> } else { <nl> PerfEventCache : : Instance ( ) - > AddSchedEvent ( <nl> - 3 , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> - CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 0 ) ; <nl> + SchedPerf : : TRY_FETCH_OUT , CRoutine : : GetCurrentRoutine ( ) - > Id ( ) , <nl> + CRoutine : : GetCurrentRoutine ( ) - > ProcessorId ( ) , 0 , 0 , 0 , - 1 ) ; <nl> CRoutine : : GetCurrentRoutine ( ) - > SetState ( RoutineState : : WAITING_INPUT ) ; <nl> } <nl> CRoutine : : Yield ( ) ; <nl> mmm a / framework / cybertron / event / perf_event . cpp <nl> ppp b / framework / cybertron / event / perf_event . cpp <nl> void SchedPerfEvent : : SetParams ( int count , . . . ) { <nl> t_start = va_arg ( ap , uint64_t ) ; <nl> t_end = va_arg ( ap , uint64_t ) ; <nl> try_fetch_result = va_arg ( ap , int ) ; <nl> + croutine_state = va_arg ( ap , int ) ; <nl> va_end ( ap ) ; <nl> } <nl> <nl> mmm a / framework / cybertron / event / perf_event . h <nl> ppp b / framework / cybertron / event / perf_event . h <nl> namespace event { <nl> using apollo : : cybertron : : base : : BoundedQueue ; <nl> using apollo : : cybertron : : common : : GlobalData ; <nl> <nl> + enum class EventType { SCHED_EVENT = 0 , TRANS_EVENT = 1 } ; <nl> + <nl> + enum class SchedPerf { <nl> + SWAP_IN = 1 , <nl> + SWAP_OUT = 2 , <nl> + TRY_FETCH_OUT = 3 , <nl> + NOTIFY_IN = 4 , <nl> + NEXT_ROUTINE = 5 <nl> + } ; <nl> + <nl> + enum class TransPerf { TRANS_FROM = 1 , TRANS_TO = 2 , WRITE_NOTIFY = 3 } ; <nl> + <nl> / / event_id <nl> / / 1 swap_in <nl> / / 2 swap_out <nl> class PerfEventBase { <nl> / / 5 next_routine <nl> class SchedPerfEvent : public PerfEventBase { <nl> public : <nl> - SchedPerfEvent ( ) { event_type = 0 ; } <nl> + SchedPerfEvent ( ) { event_type = int ( EventType : : SCHED_EVENT ) ; } <nl> void SetParams ( int count , . . . ) override ; <nl> std : : string SerializeToString ( ) override { <nl> std : : stringstream ss ; <nl> class SchedPerfEvent : public PerfEventBase { <nl> ss < < t_sleep < < " \ t " ; <nl> ss < < t_start < < " \ t " ; <nl> ss < < t_end < < " \ t " ; <nl> - ss < < try_fetch_result ; <nl> + ss < < try_fetch_result < < " \ t " ; <nl> + ss < < croutine_state ; <nl> return ss . str ( ) ; <nl> } <nl> <nl> class SchedPerfEvent : public PerfEventBase { <nl> int proc_id = - 1 ; <nl> uint64_t t_sleep = 0 ; <nl> int try_fetch_result = - 1 ; <nl> + int croutine_state = - 1 ; <nl> } ; <nl> <nl> / / event_id = 1 transport <nl> class SchedPerfEvent : public PerfEventBase { <nl> / / 2 write_data_cache & notify listener <nl> class TransportPerfEvent : public PerfEventBase { <nl> public : <nl> - TransportPerfEvent ( ) { event_type = 1 ; } <nl> + TransportPerfEvent ( ) { event_type = int ( EventType : : TRANS_EVENT ) ; } <nl> void SetParams ( int count , . . . ) override ; <nl> std : : string SerializeToString ( ) override { <nl> std : : stringstream ss ; <nl> mmm a / framework / cybertron / event / perf_event_cache . cpp <nl> ppp b / framework / cybertron / event / perf_event_cache . cpp <nl> void PerfEventCache : : AddEvent ( const std : : shared_ptr < PerfEventBase > & event ) { <nl> } <nl> } <nl> <nl> - void PerfEventCache : : AddSchedEvent ( int event_id , uint64_t cr_id , int proc_id , <nl> - uint64_t t_sleep , uint64_t t_start , <nl> - int try_fetch_result ) { <nl> + void PerfEventCache : : AddSchedEvent ( SchedPerf event_id , uint64_t cr_id , <nl> + int proc_id , uint64_t t_sleep , <nl> + uint64_t t_start , int try_fetch_result , <nl> + int croutine_state ) { <nl> if ( ! enable_ ) { <nl> return ; <nl> } <nl> void PerfEventCache : : AddSchedEvent ( int event_id , uint64_t cr_id , int proc_id , <nl> } <nl> <nl> std : : shared_ptr < PerfEventBase > event = std : : make_shared < SchedPerfEvent > ( ) ; <nl> - event - > SetParams ( 7 , event_id , cr_id , proc_id , t_sleep , t_start , <nl> - Time : : Now ( ) . ToNanosecond ( ) , try_fetch_result ) ; <nl> + event - > SetParams ( 8 , event_id , cr_id , proc_id , t_sleep , t_start , <nl> + Time : : Now ( ) . ToNanosecond ( ) , try_fetch_result , <nl> + croutine_state ) ; <nl> if ( ! event_queue_ . Enqueue ( event ) ) { <nl> / / AWARN < < " msg dropped . . . " < < event_id ; <nl> } <nl> } <nl> <nl> - void PerfEventCache : : AddTransportEvent ( int event_id , uint64_t channel_id , <nl> + void PerfEventCache : : AddTransportEvent ( TransPerf event_id , uint64_t channel_id , <nl> uint64_t msg_seq ) { <nl> if ( ! enable_ ) { <nl> return ; <nl> mmm a / framework / cybertron / event / perf_event_cache . h <nl> ppp b / framework / cybertron / event / perf_event_cache . h <nl> using apollo : : cybertron : : proto : : PerfConf ; <nl> class PerfEventCache { <nl> public : <nl> ~ PerfEventCache ( ) ; <nl> - void AddSchedEvent ( int event_id , uint64_t cr_id , int proc_id , <nl> - uint64_t t_sleep , uint64_t t_start , int try_fetch_result ) ; <nl> - void AddTransportEvent ( int event_id , uint64_t channel_id , uint64_t msg_seq ) ; <nl> + void AddSchedEvent ( SchedPerf event_id , uint64_t cr_id , int proc_id , <nl> + uint64_t t_sleep , uint64_t t_start , int try_fetch_result , <nl> + int croutine_state ) ; <nl> + void AddTransportEvent ( TransPerf event_id , uint64_t channel_id , <nl> + uint64_t msg_seq ) ; <nl> void AddEvent ( const std : : shared_ptr < PerfEventBase > & event ) ; <nl> <nl> private : <nl> mmm a / framework / cybertron / node / reader_base . h <nl> ppp b / framework / cybertron / node / reader_base . h <nl> namespace cybertron { <nl> <nl> using apollo : : cybertron : : common : : GlobalData ; <nl> using apollo : : cybertron : : event : : PerfEventCache ; <nl> + using apollo : : cybertron : : event : : TransPerf ; <nl> <nl> class ReaderBase { <nl> public : <nl> auto ReaderManager < MessageT > : : GetReader ( const proto : : RoleAttributes & role_attr ) <nl> ( void ) msg_info ; <nl> ( void ) reader_attr ; <nl> PerfEventCache : : Instance ( ) - > AddTransportEvent ( <nl> - 2 , reader_attr . channel_id ( ) , msg_info . seq_num ( ) ) ; <nl> + TransPerf : : TRANS_TO , reader_attr . channel_id ( ) , <nl> + msg_info . seq_num ( ) ) ; <nl> data : : DataDispatcher < MessageT > : : Instance ( ) - > Dispatch ( <nl> reader_attr . channel_id ( ) , msg ) ; <nl> PerfEventCache : : Instance ( ) - > AddTransportEvent ( <nl> - 3 , reader_attr . channel_id ( ) , msg_info . seq_num ( ) ) ; <nl> + TransPerf : : WRITE_NOTIFY , reader_attr . channel_id ( ) , <nl> + msg_info . seq_num ( ) ) ; <nl> + <nl> } ) ; <nl> } <nl> return lower_reach_map_ [ channel_name ] ; <nl> mmm a / framework / cybertron / scheduler / policy / fcfs_context . cpp <nl> ppp b / framework / cybertron / scheduler / policy / fcfs_context . cpp <nl> namespace scheduler { <nl> <nl> using apollo : : cybertron : : common : : GlobalData ; <nl> using apollo : : cybertron : : event : : PerfEventCache ; <nl> + using apollo : : cybertron : : event : : SchedPerf ; <nl> using croutine : : RoutineState ; <nl> <nl> std : : shared_ptr < CRoutine > FCFSContext : : NextRoutine ( ) { <nl> std : : shared_ptr < CRoutine > FCFSContext : : NextRoutine ( ) { <nl> routine - > SetVFrequency ( routine - > ProcessedNum ( ) / routine - > Frequency ( ) ) ; <nl> routine - > SetState ( RoutineState : : RUNNING ) ; <nl> PerfEventCache : : Instance ( ) - > AddSchedEvent ( <nl> - 5 , routine - > Id ( ) , routine - > ProcessorId ( ) , 0 , start_perf_time , - 1 ) ; <nl> + SchedPerf : : NEXT_ROUTINE , routine - > Id ( ) , routine - > ProcessorId ( ) , 0 , <nl> + start_perf_time , - 1 , - 1 ) ; <nl> } <nl> return routine ; <nl> } <nl> mmm a / framework / cybertron / scheduler / policy / processor_context . cpp <nl> ppp b / framework / cybertron / scheduler / policy / processor_context . cpp <nl> namespace cybertron { <nl> namespace scheduler { <nl> <nl> using apollo : : cybertron : : event : : PerfEventCache ; <nl> + using apollo : : cybertron : : event : : SchedPerf ; <nl> <nl> bool ProcessorContext : : Pop ( uint64_t croutine_id , <nl> std : : future < std : : shared_ptr < CRoutine > > & fut ) { <nl> void ProcessorContext : : NotifyProcessor ( uint64_t routine_id ) { <nl> return ; <nl> } <nl> <nl> - PerfEventCache : : Instance ( ) - > AddSchedEvent ( 4 , routine_id , proc_index_ , 0 , 0 , <nl> - - 1 ) ; <nl> + PerfEventCache : : Instance ( ) - > AddSchedEvent ( SchedPerf : : NOTIFY_IN , routine_id , <nl> + proc_index_ , 0 , 0 , - 1 , - 1 ) ; <nl> if ( ! cr_map_ [ routine_id ] - > IsRunning ( ) ) { <nl> cr_map_ [ routine_id ] - > SetState ( RoutineState : : READY ) ; <nl> } <nl> mmm a / framework / cybertron / transport / upper_reach / upper_reach . h <nl> ppp b / framework / cybertron / transport / upper_reach / upper_reach . h <nl> namespace apollo { <nl> namespace cybertron { <nl> namespace transport { <nl> <nl> + using apollo : : cybertron : : event : : PerfEventCache ; <nl> + using apollo : : cybertron : : event : : TransPerf ; <nl> + <nl> template < typename MessageT > <nl> class UpperReach : public Endpoint { <nl> public : <nl> UpperReach < MessageT > : : ~ UpperReach ( ) { } <nl> template < typename MessageT > <nl> bool UpperReach < MessageT > : : Transmit ( const MessagePtr & msg ) { <nl> msg_info_ . set_seq_num ( NextSeqNum ( ) ) ; <nl> - apollo : : cybertron : : event : : PerfEventCache : : Instance ( ) - > AddTransportEvent ( <nl> - 1 , attr_ . channel_id ( ) , msg_info_ . seq_num ( ) ) ; <nl> + PerfEventCache : : Instance ( ) - > AddTransportEvent ( <nl> + TransPerf : : TRANS_FROM , attr_ . channel_id ( ) , msg_info_ . seq_num ( ) ) ; <nl> return Transmit ( msg , msg_info_ ) ; <nl> } <nl> <nl> | framework : fix perf script issues | ApolloAuto/apollo | bdcf3c0048a9a610b7a81e5dbffc65b09fcc42c4 | 2018-09-17T20:45:03Z |
mmm a / tensorflow / core / tpu / tpu_library_init_fns . inc <nl> ppp b / tensorflow / core / tpu / tpu_library_init_fns . inc <nl> tensorflow : : Status SetExecutorStructFn ( void * library_handle ) { <nl> TFTPU_SET_FN ( executor_fn , TpuTopology_NumCores ) ; <nl> TFTPU_SET_FN ( executor_fn , TpuTopology_Cores ) ; <nl> TFTPU_SET_FN ( executor_fn , TpuTopology_IdForHost ) ; <nl> + TFTPU_SET_FN ( executor_fn , TpuTopology_Version ) ; <nl> + <nl> TFTPU_SET_FN ( executor_fn , TpuCoreLocation_ChipCoordinates ) ; <nl> TFTPU_SET_FN ( executor_fn , TpuCoreLocation_HostCoordinates ) ; <nl> TFTPU_SET_FN ( executor_fn , TpuCoreLocation_Index ) ; <nl> mmm a / tensorflow / stream_executor / tpu / c_api_decl . h <nl> ppp b / tensorflow / stream_executor / tpu / c_api_decl . h <nl> enum TpuCoreTypeEnum { <nl> kEmbeddingV2 , <nl> } ; <nl> <nl> + enum TpuVersionEnum { <nl> + kUnknownTpuVersion , <nl> + kTpuV2 , <nl> + kTpuV3 , <nl> + } ; <nl> + <nl> typedef struct SE_Status SE_Status ; <nl> <nl> typedef struct SE_Platform SE_Platform ; <nl> mmm a / tensorflow / stream_executor / tpu / tpu_executor_c_api . h <nl> ppp b / tensorflow / stream_executor / tpu / tpu_executor_c_api . h <nl> void TpuTopology_Cores ( SE_TpuTopology * tpu_topology , <nl> TpuCoreTypeEnum tpu_core_type , <nl> SE_TpuTopology_Core * * cores ) ; <nl> int TpuTopology_IdForHost ( SE_TpuTopology * tpu_topology , int x , int y , int z ) ; <nl> + TpuVersionEnum TpuTopology_Version ( SE_TpuTopology * tpu_topology ) ; <nl> void TpuCoreLocation_ChipCoordinates ( SE_TpuTopology_Core * tpu_core_location , <nl> int * x , int * y , int * z ) ; <nl> void TpuCoreLocation_HostCoordinates ( SE_TpuTopology_Core * tpu_core_location , <nl> struct TfTpu_ExecutorApiFn { <nl> TFTPU_ADD_FN_IN_STRUCT ( TpuTopology_NumCores ) ; <nl> TFTPU_ADD_FN_IN_STRUCT ( TpuTopology_Cores ) ; <nl> TFTPU_ADD_FN_IN_STRUCT ( TpuTopology_IdForHost ) ; <nl> + TFTPU_ADD_FN_IN_STRUCT ( TpuTopology_Version ) ; <nl> <nl> TFTPU_ADD_FN_IN_STRUCT ( TpuCoreLocation_ChipCoordinates ) ; <nl> TFTPU_ADD_FN_IN_STRUCT ( TpuCoreLocation_HostCoordinates ) ; <nl> mmm a / tensorflow / stream_executor / tpu / tpu_topology . cc <nl> ppp b / tensorflow / stream_executor / tpu / tpu_topology . cc <nl> int TpuTopologyExternal : : IdForHost ( TpuDimensionsExternal host ) const { <nl> host . y , host . z ) ; <nl> } <nl> <nl> + TpuVersionEnum TpuTopologyExternal : : version ( ) const { <nl> + return tpu : : ExecutorApiFn ( ) - > TpuTopology_VersionFn ( topology_ ) ; <nl> + } <nl> + <nl> + std : : string TpuVersionEnumToString ( TpuVersionEnum version ) { <nl> + switch ( version ) { <nl> + case kUnknownTpuVersion : <nl> + return " Unknown TPU version " ; <nl> + case kTpuV2 : <nl> + return " TPU v2 " ; <nl> + case kTpuV3 : <nl> + return " TPU v3 " ; <nl> + } <nl> + } <nl> + <nl> } / / namespace tpu <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / stream_executor / tpu / tpu_topology . h <nl> ppp b / tensorflow / stream_executor / tpu / tpu_topology . h <nl> class TpuTopologyExternal { <nl> int index ) const ; <nl> std : : vector < TpuCoreLocationExternal > cores ( TpuCoreTypeEnum core_type ) const ; <nl> int IdForHost ( TpuDimensionsExternal host ) const ; <nl> + TpuVersionEnum version ( ) const ; <nl> <nl> private : <nl> SE_TpuTopology * topology_ ; <nl> } ; <nl> <nl> + std : : string TpuVersionEnumToString ( TpuVersionEnum version ) ; <nl> + <nl> } / / namespace tpu <nl> } / / namespace tensorflow <nl> <nl> | Add TpuTopologyExternal : : version ( ) and TpuVersionEnumToString ( ) . | tensorflow/tensorflow | 6a2f00362eed16cfcc792d8fc716c19fa9108ea4 | 2020-08-11T16:58:22Z |
mmm a / xbmc / cores / dvdplayer / DVDDemuxers / DVDDemuxFFmpeg . cpp <nl> ppp b / xbmc / cores / dvdplayer / DVDDemuxers / DVDDemuxFFmpeg . cpp <nl> int CDVDDemuxFFmpeg : : GetNrOfStreams ( ) <nl> return m_stream_index . size ( ) ; <nl> } <nl> <nl> - static double SelectAspect ( AVStream * st , bool * forced ) <nl> + double CDVDDemuxFFmpeg : : SelectAspect ( AVStream * st , bool & forced ) <nl> { <nl> - * forced = false ; <nl> + / / trust matroshka container <nl> + if ( m_bMatroska & & st - > sample_aspect_ratio . num ! = 0 ) <nl> + { <nl> + forced = true ; <nl> + return av_q2d ( st - > sample_aspect_ratio ) ; <nl> + } <nl> + <nl> + forced = false ; <nl> / * if stream aspect is 1 : 1 or 0 : 0 use codec aspect * / <nl> - if ( ( st - > sample_aspect_ratio . den = = 1 | | st - > sample_aspect_ratio . den = = 0 ) <nl> - & & ( st - > sample_aspect_ratio . num = = 1 | | st - > sample_aspect_ratio . num = = 0 ) <nl> - & & st - > codec - > sample_aspect_ratio . num ! = 0 ) <nl> + if ( ( st - > sample_aspect_ratio . den = = 1 | | st - > sample_aspect_ratio . den = = 0 ) & & <nl> + ( st - > sample_aspect_ratio . num = = 1 | | st - > sample_aspect_ratio . num = = 0 ) & & <nl> + st - > codec - > sample_aspect_ratio . num ! = 0 ) <nl> + { <nl> return av_q2d ( st - > codec - > sample_aspect_ratio ) ; <nl> + } <nl> <nl> - * forced = true ; <nl> + forced = true ; <nl> if ( st - > sample_aspect_ratio . num ! = 0 ) <nl> return av_q2d ( st - > sample_aspect_ratio ) ; <nl> <nl> CDemuxStream * CDVDDemuxFFmpeg : : AddStream ( int iId ) <nl> <nl> st - > iWidth = pStream - > codec - > width ; <nl> st - > iHeight = pStream - > codec - > height ; <nl> - st - > fAspect = SelectAspect ( pStream , & st - > bForcedAspect ) * pStream - > codec - > width / pStream - > codec - > height ; <nl> + st - > fAspect = SelectAspect ( pStream , st - > bForcedAspect ) * pStream - > codec - > width / pStream - > codec - > height ; <nl> st - > iOrientation = 0 ; <nl> st - > iBitsPerPixel = pStream - > codec - > bits_per_coded_sample ; <nl> <nl> mmm a / xbmc / cores / dvdplayer / DVDDemuxers / DVDDemuxFFmpeg . h <nl> ppp b / xbmc / cores / dvdplayer / DVDDemuxers / DVDDemuxFFmpeg . h <nl> class CDVDDemuxFFmpeg : public CDVDDemux <nl> std : : string ConvertCodecToInternalStereoMode ( const std : : string & mode , const StereoModeConversionMap * conversionMap ) ; <nl> <nl> void GetL16Parameters ( int & channels , int & samplerate ) ; <nl> + double SelectAspect ( AVStream * st , bool & forced ) ; <nl> <nl> CCriticalSection m_critSection ; <nl> std : : map < int , CDemuxStream * > m_streams ; <nl> | Merge pull request from FernetMenta / SAR | xbmc/xbmc | f9a77bc72a0ef56c7173cc848aa5e6631f4d8c6c | 2015-10-03T08:04:12Z |
new file mode 100644 <nl> index 000000000000 . . 9faf02752269 <nl> mmm / dev / null <nl> ppp b / jstests / core / json_schema / array_keywords . js <nl> <nl> + / * * <nl> + * Tests JSON Schema keywords related to arrays : <nl> + * - minItems <nl> + * - maxItems <nl> + * / <nl> + ( function ( ) { <nl> + " use strict " ; <nl> + <nl> + const coll = db . getCollection ( " json_schema_arrays " ) ; <nl> + coll . drop ( ) ; <nl> + <nl> + assert . writeOK ( coll . insert ( { _id : 0 , a : 1 } ) ) ; <nl> + assert . writeOK ( coll . insert ( { _id : 1 , a : [ ] } ) ) ; <nl> + assert . writeOK ( coll . insert ( { _id : 2 , a : [ 2 , " str " ] } ) ) ; <nl> + assert . writeOK ( coll . insert ( { _id : 3 , a : [ 3 , 3 , " foo " ] } ) ) ; <nl> + assert . writeOK ( coll . insert ( { _id : 4 } ) ) ; <nl> + <nl> + function assertFindResultsSortedEq ( query , expected ) { <nl> + assert . eq ( coll . find ( query , { _id : 1 } ) . sort ( { _id : 1 } ) . toArray ( ) , <nl> + expected , <nl> + " JSON Schema keyword did not match the expected documents " ) ; <nl> + } <nl> + <nl> + / / Test that the JSON Schema fails to parse if " minItems " is not a valid number . <nl> + assert . throws ( ( ) = > coll . find ( { $ jsonSchema : { minItems : " blah " } } ) . itcount ( ) ) ; <nl> + assert . throws ( ( ) = > coll . find ( { $ jsonSchema : { minItems : - 1 } } ) . itcount ( ) ) ; <nl> + assert . throws ( ( ) = > coll . find ( { $ jsonSchema : { minItems : 12 . 5 } } ) . itcount ( ) ) ; <nl> + <nl> + / / Test that " minItems " matches non - arrays , or arrays with at least the given number of items . <nl> + assertFindResultsSortedEq ( { $ jsonSchema : { minItems : 10 } } , <nl> + [ { _id : 0 } , { _id : 1 } , { _id : 2 } , { _id : 3 } , { _id : 4 } ] ) ; <nl> + assertFindResultsSortedEq ( { $ jsonSchema : { properties : { a : { minItems : 10 } } } } , <nl> + [ { _id : 0 } , { _id : 4 } ] ) ; <nl> + assertFindResultsSortedEq ( { $ jsonSchema : { properties : { a : { minItems : 2 } } } } , <nl> + [ { _id : 0 } , { _id : 2 } , { _id : 3 } , { _id : 4 } ] ) ; <nl> + <nl> + / / Test that the JSON Schema fails to parse if " maxItems " is not a valid number . <nl> + assert . throws ( ( ) = > coll . find ( { $ jsonSchema : { maxItems : " blah " } } ) . itcount ( ) ) ; <nl> + assert . throws ( ( ) = > coll . find ( { $ jsonSchema : { maxItems : - 1 } } ) . itcount ( ) ) ; <nl> + assert . throws ( ( ) = > coll . find ( { $ jsonSchema : { maxItems : 12 . 5 } } ) . itcount ( ) ) ; <nl> + <nl> + / / Test that " maxItems " matches non - arrays , or arrays with at most the given number of items . <nl> + assertFindResultsSortedEq ( { $ jsonSchema : { maxItems : 0 } } , <nl> + [ { _id : 0 } , { _id : 1 } , { _id : 2 } , { _id : 3 } , { _id : 4 } ] ) ; <nl> + assertFindResultsSortedEq ( { $ jsonSchema : { properties : { a : { maxItems : 0 } } } } , <nl> + [ { _id : 0 } , { _id : 1 } , { _id : 4 } ] ) ; <nl> + assertFindResultsSortedEq ( { $ jsonSchema : { properties : { a : { maxItems : 2 } } } } , <nl> + [ { _id : 0 } , { _id : 1 } , { _id : 2 } , { _id : 4 } ] ) ; <nl> + } ( ) ) ; <nl> mmm a / src / mongo / db / matcher / schema / json_schema_parser . cpp <nl> ppp b / src / mongo / db / matcher / schema / json_schema_parser . cpp <nl> <nl> # include " mongo / db / matcher / expression_always_boolean . h " <nl> # include " mongo / db / matcher / expression_parser . h " <nl> # include " mongo / db / matcher / matcher_type_alias . h " <nl> + # include " mongo / db / matcher / schema / expression_internal_schema_all_elem_match_from_index . h " <nl> # include " mongo / db / matcher / schema / expression_internal_schema_fmod . h " <nl> + # include " mongo / db / matcher / schema / expression_internal_schema_max_items . h " <nl> # include " mongo / db / matcher / schema / expression_internal_schema_max_length . h " <nl> + # include " mongo / db / matcher / schema / expression_internal_schema_min_items . h " <nl> # include " mongo / db / matcher / schema / expression_internal_schema_min_length . h " <nl> # include " mongo / db / matcher / schema / expression_internal_schema_object_match . h " <nl> + # include " mongo / db / matcher / schema / expression_internal_schema_unique_items . h " <nl> # include " mongo / db / matcher / schema / expression_internal_schema_xor . h " <nl> # include " mongo / stdx / memory . h " <nl> # include " mongo / util / string_map . h " <nl> namespace mongo { <nl> <nl> namespace { <nl> / / JSON Schema keyword constants . <nl> + constexpr StringData kSchemaAdditionalItemsKeyword = " additionalItems " _sd ; <nl> constexpr StringData kSchemaAllOfKeyword = " allOf " _sd ; <nl> constexpr StringData kSchemaAnyOfKeyword = " anyOf " _sd ; <nl> constexpr StringData kSchemaExclusiveMaximumKeyword = " exclusiveMaximum " _sd ; <nl> constexpr StringData kSchemaExclusiveMinimumKeyword = " exclusiveMinimum " _sd ; <nl> + constexpr StringData kSchemaItemsKeyword = " items " _sd ; <nl> constexpr StringData kSchemaMaximumKeyword = " maximum " _sd ; <nl> + constexpr StringData kSchemaMaxItemsKeyword = " maxItems " _sd ; <nl> constexpr StringData kSchemaMaxLengthKeyword = " maxLength " _sd ; <nl> constexpr StringData kSchemaMinimumKeyword = " minimum " _sd ; <nl> + constexpr StringData kSchemaMinItemsKeyword = " minItems " _sd ; <nl> constexpr StringData kSchemaMinLengthKeyword = " minLength " _sd ; <nl> constexpr StringData kSchemaMultipleOfKeyword = " multipleOf " _sd ; <nl> constexpr StringData kSchemaNotKeyword = " not " _sd ; <nl> constexpr StringData kSchemaOneOfKeyword = " oneOf " _sd ; <nl> constexpr StringData kSchemaPatternKeyword = " pattern " _sd ; <nl> constexpr StringData kSchemaPropertiesKeyword = " properties " _sd ; <nl> constexpr StringData kSchemaTypeKeyword = " type " _sd ; <nl> + constexpr StringData kSchemaUniqueItemsKeyword = " uniqueItems " _sd ; <nl> <nl> / * * <nl> - * Parses ' schema ' to the semantically equivalent match expression . If the schema has an <nl> - * associated path , e . g . if we are parsing the nested schema for property " myProp " in <nl> + * Parses ' schema ' to the semantically equivalent match expression . If the schema has an associated <nl> + * path , e . g . if we are parsing the nested schema for property " myProp " in <nl> * <nl> - * { properties : { myProp : < nested - schema > } } <nl> + * { properties : { myProp : < nested - schema > } } <nl> * <nl> - * then this is passed in ' path ' . In this example , the value of ' path ' is " myProp " . If there is <nl> - * no path , e . g . for top - level schemas , then ' path ' is empty . <nl> + * then this is passed in ' path ' . In this example , the value of ' path ' is " myProp " . If there is no <nl> + * path , e . g . for top - level schemas , then ' path ' is empty . <nl> * / <nl> StatusWithMatchExpression _parse ( StringData path , BSONObj schema ) ; <nl> <nl> StatusWithMatchExpression parseMinimum ( StringData path , <nl> return makeRestriction ( restrictionType , std : : move ( expr ) , typeExpr ) ; <nl> } <nl> <nl> + / * * <nl> + * Parses length - related keywords that expect a nonnegative long as an argument . <nl> + * / <nl> template < class T > <nl> - StatusWithMatchExpression parseStrLength ( StringData path , <nl> - BSONElement strLength , <nl> - InternalSchemaTypeExpression * typeExpr , <nl> - StringData keyword ) { <nl> - if ( ! strLength . isNumber ( ) ) { <nl> - return { <nl> - Status ( ErrorCodes : : TypeMismatch , <nl> - str : : stream ( ) < < " $ jsonSchema keyword ' " < < keyword < < " ' must be a number " ) } ; <nl> - } <nl> - <nl> - auto strLengthWithStatus = <nl> - MatchExpressionParser : : parseIntegerElementToNonNegativeLong ( strLength ) ; <nl> - <nl> - if ( ! strLengthWithStatus . isOK ( ) ) { <nl> - return strLengthWithStatus . getStatus ( ) ; <nl> + StatusWithMatchExpression parseLength ( StringData path , <nl> + BSONElement length , <nl> + InternalSchemaTypeExpression * typeExpr , <nl> + BSONType restrictionType ) { <nl> + auto parsedLength = MatchExpressionParser : : parseIntegerElementToNonNegativeLong ( length ) ; <nl> + if ( ! parsedLength . isOK ( ) ) { <nl> + return parsedLength . getStatus ( ) ; <nl> } <nl> <nl> if ( path . empty ( ) ) { <nl> StatusWithMatchExpression parseStrLength ( StringData path , <nl> } <nl> <nl> auto expr = stdx : : make_unique < T > ( ) ; <nl> - auto status = expr - > init ( path , strLengthWithStatus . getValue ( ) ) ; <nl> + auto status = expr - > init ( path , parsedLength . getValue ( ) ) ; <nl> if ( ! status . isOK ( ) ) { <nl> return status ; <nl> } <nl> - return makeRestriction ( BSONType : : String , std : : move ( expr ) , typeExpr ) ; <nl> + return makeRestriction ( restrictionType , std : : move ( expr ) , typeExpr ) ; <nl> } <nl> <nl> StatusWithMatchExpression parsePattern ( StringData path , <nl> StatusWithMatchExpression parseLogicalKeyword ( StringData path , BSONElement logic <nl> return { std : : move ( listOfExpr ) } ; <nl> } <nl> <nl> + StatusWithMatchExpression parseProperties ( StringData path , <nl> + BSONElement propertiesElt , <nl> + InternalSchemaTypeExpression * typeExpr ) { <nl> + if ( propertiesElt . type ( ) ! = BSONType : : Object ) { <nl> + return { Status ( ErrorCodes : : TypeMismatch , <nl> + str : : stream ( ) < < " $ jsonSchema keyword ' " < < kSchemaPropertiesKeyword <nl> + < < " ' must be an object " ) } ; <nl> + } <nl> + auto propertiesObj = propertiesElt . embeddedObject ( ) ; <nl> + <nl> + auto andExpr = stdx : : make_unique < AndMatchExpression > ( ) ; <nl> + for ( auto & & property : propertiesObj ) { <nl> + if ( property . type ( ) ! = BSONType : : Object ) { <nl> + return { ErrorCodes : : TypeMismatch , <nl> + str : : stream ( ) < < " Nested schema for $ jsonSchema property ' " <nl> + < < property . fieldNameStringData ( ) <nl> + < < " ' must be an object " } ; <nl> + } <nl> + <nl> + auto nestedSchemaMatch = _parse ( property . fieldNameStringData ( ) , property . embeddedObject ( ) ) ; <nl> + if ( ! nestedSchemaMatch . isOK ( ) ) { <nl> + return nestedSchemaMatch . getStatus ( ) ; <nl> + } <nl> + <nl> + / / Each property either must not exist or must match the nested schema . Therefore , we <nl> + / / generate the match expression ( OR ( NOT ( EXISTS ) ) < nestedSchemaMatch > ) . <nl> + auto existsExpr = stdx : : make_unique < ExistsMatchExpression > ( ) ; <nl> + invariantOK ( existsExpr - > init ( property . fieldNameStringData ( ) ) ) ; <nl> + <nl> + auto notExpr = stdx : : make_unique < NotMatchExpression > ( ) ; <nl> + invariantOK ( notExpr - > init ( existsExpr . release ( ) ) ) ; <nl> + <nl> + auto orExpr = stdx : : make_unique < OrMatchExpression > ( ) ; <nl> + orExpr - > add ( notExpr . release ( ) ) ; <nl> + orExpr - > add ( nestedSchemaMatch . getValue ( ) . release ( ) ) ; <nl> + <nl> + andExpr - > add ( orExpr . release ( ) ) ; <nl> + } <nl> + <nl> + / / If this is a top - level schema , then we have no path and there is no need for an <nl> + / / explicit object match node . <nl> + if ( path . empty ( ) ) { <nl> + return { std : : move ( andExpr ) } ; <nl> + } <nl> + <nl> + auto objectMatch = stdx : : make_unique < InternalSchemaObjectMatchExpression > ( ) ; <nl> + auto objectMatchStatus = objectMatch - > init ( std : : move ( andExpr ) , path ) ; <nl> + if ( ! objectMatchStatus . isOK ( ) ) { <nl> + return objectMatchStatus ; <nl> + } <nl> + <nl> + return makeRestriction ( BSONType : : Object , std : : move ( objectMatch ) , typeExpr ) ; <nl> + } <nl> + <nl> / * * <nl> * Parses the logical keywords in ' keywordMap ' to their equivalent match expressions <nl> * and , on success , adds the results to ' andExpr ' . <nl> StatusWithMatchExpression parseLogicalKeyword ( StringData path , BSONElement logic <nl> * - not <nl> * - enum <nl> * / <nl> - Status parseLogicalKeywords ( StringMap < BSONElement > & keywordMap , <nl> - StringData path , <nl> - AndMatchExpression * andExpr ) { <nl> - if ( auto allOfElt = keywordMap [ kSchemaAllOfKeyword ] ) { <nl> + Status translateLogicalKeywords ( StringMap < BSONElement > * keywordMap , <nl> + StringData path , <nl> + AndMatchExpression * andExpr ) { <nl> + if ( auto allOfElt = keywordMap - > get ( kSchemaAllOfKeyword ) ) { <nl> auto allOfExpr = parseLogicalKeyword < AndMatchExpression > ( path , allOfElt ) ; <nl> if ( ! allOfExpr . isOK ( ) ) { <nl> return allOfExpr . getStatus ( ) ; <nl> Status parseLogicalKeywords ( StringMap < BSONElement > & keywordMap , <nl> andExpr - > add ( allOfExpr . getValue ( ) . release ( ) ) ; <nl> } <nl> <nl> - if ( auto anyOfElt = keywordMap [ kSchemaAnyOfKeyword ] ) { <nl> + if ( auto anyOfElt = keywordMap - > get ( kSchemaAnyOfKeyword ) ) { <nl> auto anyOfExpr = parseLogicalKeyword < OrMatchExpression > ( path , anyOfElt ) ; <nl> if ( ! anyOfExpr . isOK ( ) ) { <nl> return anyOfExpr . getStatus ( ) ; <nl> Status parseLogicalKeywords ( StringMap < BSONElement > & keywordMap , <nl> andExpr - > add ( anyOfExpr . getValue ( ) . release ( ) ) ; <nl> } <nl> <nl> - if ( auto oneOfElt = keywordMap [ kSchemaOneOfKeyword ] ) { <nl> + if ( auto oneOfElt = keywordMap - > get ( kSchemaOneOfKeyword ) ) { <nl> auto oneOfExpr = parseLogicalKeyword < InternalSchemaXorMatchExpression > ( path , oneOfElt ) ; <nl> if ( ! oneOfExpr . isOK ( ) ) { <nl> return oneOfExpr . getStatus ( ) ; <nl> Status parseLogicalKeywords ( StringMap < BSONElement > & keywordMap , <nl> andExpr - > add ( oneOfExpr . getValue ( ) . release ( ) ) ; <nl> } <nl> <nl> - if ( auto notElt = keywordMap [ kSchemaNotKeyword ] ) { <nl> + if ( auto notElt = keywordMap - > get ( kSchemaNotKeyword ) ) { <nl> if ( notElt . type ( ) ! = BSONType : : Object ) { <nl> return { ErrorCodes : : TypeMismatch , <nl> str : : stream ( ) < < " $ jsonSchema keyword ' " < < kSchemaNotKeyword <nl> Status parseLogicalKeywords ( StringMap < BSONElement > & keywordMap , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - StatusWithMatchExpression parseProperties ( StringData path , <nl> - BSONElement propertiesElt , <nl> - InternalSchemaTypeExpression * typeExpr ) { <nl> - if ( propertiesElt . type ( ) ! = BSONType : : Object ) { <nl> - return { Status ( ErrorCodes : : TypeMismatch , <nl> - str : : stream ( ) < < " $ jsonSchema keyword ' " < < kSchemaPropertiesKeyword <nl> - < < " ' must be an object " ) } ; <nl> + / * * <nl> + * Parses JSON Schema array keywords in ' keywordMap ' and adds them to ' andExpr ' . Returns a non - OK <nl> + * status if an error occurs during parsing . <nl> + * <nl> + * This function parses the following keywords : <nl> + * - minItems <nl> + * - maxItems <nl> + * - uniqueItems <nl> + * - items <nl> + * - additionalItems <nl> + * / <nl> + Status translateArrayKeywords ( StringMap < BSONElement > * keywordMap , <nl> + StringData path , <nl> + InternalSchemaTypeExpression * typeExpr , <nl> + AndMatchExpression * andExpr ) { <nl> + if ( auto minItemsElt = keywordMap - > get ( kSchemaMinItemsKeyword ) ) { <nl> + auto minItemsExpr = parseLength < InternalSchemaMinItemsMatchExpression > ( <nl> + path , minItemsElt , typeExpr , BSONType : : Array ) ; <nl> + if ( ! minItemsExpr . isOK ( ) ) { <nl> + return minItemsExpr . getStatus ( ) ; <nl> + } <nl> + andExpr - > add ( minItemsExpr . getValue ( ) . release ( ) ) ; <nl> } <nl> - auto propertiesObj = propertiesElt . embeddedObject ( ) ; <nl> <nl> - auto andExpr = stdx : : make_unique < AndMatchExpression > ( ) ; <nl> - for ( auto & & property : propertiesObj ) { <nl> - if ( property . type ( ) ! = BSONType : : Object ) { <nl> - return { ErrorCodes : : TypeMismatch , <nl> - str : : stream ( ) < < " Nested schema for $ jsonSchema property ' " <nl> - < < property . fieldNameStringData ( ) <nl> - < < " ' must be an object " } ; <nl> + if ( auto maxItemsElt = keywordMap - > get ( kSchemaMaxItemsKeyword ) ) { <nl> + auto maxItemsExpr = parseLength < InternalSchemaMaxItemsMatchExpression > ( <nl> + path , maxItemsElt , typeExpr , BSONType : : Array ) ; <nl> + if ( ! maxItemsExpr . isOK ( ) ) { <nl> + return maxItemsExpr . getStatus ( ) ; <nl> } <nl> + andExpr - > add ( maxItemsExpr . getValue ( ) . release ( ) ) ; <nl> + } <nl> <nl> - auto nestedSchemaMatch = _parse ( property . fieldNameStringData ( ) , property . embeddedObject ( ) ) ; <nl> - if ( ! nestedSchemaMatch . isOK ( ) ) { <nl> - return nestedSchemaMatch . getStatus ( ) ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Parses JSON Schema keywords related to objects in ' keywordMap ' and adds them to ' andExpr ' . <nl> + * Returns a non - OK status if an error occurs during parsing . <nl> + * <nl> + * This function parses the following keywords : <nl> + * - properties <nl> + * / <nl> + Status translateObjectKeywords ( StringMap < BSONElement > * keywordMap , <nl> + StringData path , <nl> + InternalSchemaTypeExpression * typeExpr , <nl> + AndMatchExpression * andExpr ) { <nl> + if ( auto propertiesElt = keywordMap - > get ( kSchemaPropertiesKeyword ) ) { <nl> + auto propertiesExpr = parseProperties ( path , propertiesElt , typeExpr ) ; <nl> + if ( ! propertiesExpr . isOK ( ) ) { <nl> + return propertiesExpr . getStatus ( ) ; <nl> } <nl> + andExpr - > add ( propertiesExpr . getValue ( ) . release ( ) ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> <nl> - / / Each property either must not exist or must match the nested schema . Therefore , we <nl> - / / generate the match expression ( OR ( NOT ( EXISTS ) ) < nestedSchemaMatch > ) . <nl> - auto existsExpr = stdx : : make_unique < ExistsMatchExpression > ( ) ; <nl> - invariantOK ( existsExpr - > init ( property . fieldNameStringData ( ) ) ) ; <nl> + / * * <nl> + * Parses JSON Schema scalar keywords in ' keywordMap ' and adds them to ' andExpr ' . Returns a non - OK <nl> + * status if an error occurs during parsing . <nl> + * <nl> + * This function parses the following keywords : <nl> + * - minimum <nl> + * - exclusiveMinimum <nl> + * - maximum <nl> + * - exclusiveMaximum <nl> + * - minLength <nl> + * - maxLength <nl> + * - pattern <nl> + * / <nl> + Status translateScalarKeywords ( StringMap < BSONElement > * keywordMap , <nl> + StringData path , <nl> + InternalSchemaTypeExpression * typeExpr , <nl> + AndMatchExpression * andExpr ) { <nl> + / / String keywords . <nl> + if ( auto patternElt = keywordMap - > get ( kSchemaPatternKeyword ) ) { <nl> + auto patternExpr = parsePattern ( path , patternElt , typeExpr ) ; <nl> + if ( ! patternExpr . isOK ( ) ) { <nl> + return patternExpr . getStatus ( ) ; <nl> + } <nl> + andExpr - > add ( patternExpr . getValue ( ) . release ( ) ) ; <nl> + } <nl> <nl> - auto notExpr = stdx : : make_unique < NotMatchExpression > ( ) ; <nl> - invariantOK ( notExpr - > init ( existsExpr . release ( ) ) ) ; <nl> + if ( auto maxLengthElt = keywordMap - > get ( kSchemaMaxLengthKeyword ) ) { <nl> + auto maxLengthExpr = parseLength < InternalSchemaMaxLengthMatchExpression > ( <nl> + path , maxLengthElt , typeExpr , BSONType : : String ) ; <nl> + if ( ! maxLengthExpr . isOK ( ) ) { <nl> + return maxLengthExpr . getStatus ( ) ; <nl> + } <nl> + andExpr - > add ( maxLengthExpr . getValue ( ) . release ( ) ) ; <nl> + } <nl> <nl> - auto orExpr = stdx : : make_unique < OrMatchExpression > ( ) ; <nl> - orExpr - > add ( notExpr . release ( ) ) ; <nl> - orExpr - > add ( nestedSchemaMatch . getValue ( ) . release ( ) ) ; <nl> + if ( auto minLengthElt = keywordMap - > get ( kSchemaMinLengthKeyword ) ) { <nl> + auto minLengthExpr = parseLength < InternalSchemaMinLengthMatchExpression > ( <nl> + path , minLengthElt , typeExpr , BSONType : : String ) ; <nl> + if ( ! minLengthExpr . isOK ( ) ) { <nl> + return minLengthExpr . getStatus ( ) ; <nl> + } <nl> + andExpr - > add ( minLengthExpr . getValue ( ) . release ( ) ) ; <nl> + } <nl> <nl> - andExpr - > add ( orExpr . release ( ) ) ; <nl> + / / Numeric keywords . <nl> + if ( auto multipleOfElt = keywordMap - > get ( kSchemaMultipleOfKeyword ) ) { <nl> + auto multipleOfExpr = parseMultipleOf ( path , multipleOfElt , typeExpr ) ; <nl> + if ( ! multipleOfExpr . isOK ( ) ) { <nl> + return multipleOfExpr . getStatus ( ) ; <nl> + } <nl> + andExpr - > add ( multipleOfExpr . getValue ( ) . release ( ) ) ; <nl> } <nl> <nl> - / / If this is a top - level schema , then we have no path and there is no need for an <nl> - / / explicit object match node . <nl> - if ( path . empty ( ) ) { <nl> - return { std : : move ( andExpr ) } ; <nl> + if ( auto maximumElt = keywordMap - > get ( kSchemaMaximumKeyword ) ) { <nl> + bool isExclusiveMaximum = false ; <nl> + if ( auto exclusiveMaximumElt = keywordMap - > get ( kSchemaExclusiveMaximumKeyword ) ) { <nl> + if ( ! exclusiveMaximumElt . isBoolean ( ) ) { <nl> + return { Status ( ErrorCodes : : TypeMismatch , <nl> + str : : stream ( ) < < " $ jsonSchema keyword ' " <nl> + < < kSchemaExclusiveMaximumKeyword <nl> + < < " ' must be a boolean " ) } ; <nl> + } else { <nl> + isExclusiveMaximum = exclusiveMaximumElt . boolean ( ) ; <nl> + } <nl> + } <nl> + auto maxExpr = parseMaximum ( path , maximumElt , typeExpr , isExclusiveMaximum ) ; <nl> + if ( ! maxExpr . isOK ( ) ) { <nl> + return maxExpr . getStatus ( ) ; <nl> + } <nl> + andExpr - > add ( maxExpr . getValue ( ) . release ( ) ) ; <nl> + } else if ( keywordMap - > get ( kSchemaExclusiveMaximumKeyword ) ) { <nl> + / / If " exclusiveMaximum " is present , " maximum " must also be present . <nl> + return { ErrorCodes : : FailedToParse , <nl> + str : : stream ( ) < < " $ jsonSchema keyword ' " < < kSchemaMaximumKeyword <nl> + < < " ' must be a present if " <nl> + < < kSchemaExclusiveMaximumKeyword <nl> + < < " is present " } ; <nl> } <nl> <nl> - auto objectMatch = stdx : : make_unique < InternalSchemaObjectMatchExpression > ( ) ; <nl> - auto objectMatchStatus = objectMatch - > init ( std : : move ( andExpr ) , path ) ; <nl> - if ( ! objectMatchStatus . isOK ( ) ) { <nl> - return objectMatchStatus ; <nl> + if ( auto minimumElt = keywordMap - > get ( kSchemaMinimumKeyword ) ) { <nl> + bool isExclusiveMinimum = false ; <nl> + if ( auto exclusiveMinimumElt = keywordMap - > get ( kSchemaExclusiveMinimumKeyword ) ) { <nl> + if ( ! exclusiveMinimumElt . isBoolean ( ) ) { <nl> + return { ErrorCodes : : TypeMismatch , <nl> + str : : stream ( ) < < " $ jsonSchema keyword ' " < < kSchemaExclusiveMinimumKeyword <nl> + < < " ' must be a boolean " } ; <nl> + } else { <nl> + isExclusiveMinimum = exclusiveMinimumElt . boolean ( ) ; <nl> + } <nl> + } <nl> + auto minExpr = parseMinimum ( path , minimumElt , typeExpr , isExclusiveMinimum ) ; <nl> + if ( ! minExpr . isOK ( ) ) { <nl> + return minExpr . getStatus ( ) ; <nl> + } <nl> + andExpr - > add ( minExpr . getValue ( ) . release ( ) ) ; <nl> + } else if ( keywordMap - > get ( kSchemaExclusiveMinimumKeyword ) ) { <nl> + / / If " exclusiveMinimum " is present , " minimum " must also be present . <nl> + return { ErrorCodes : : FailedToParse , <nl> + str : : stream ( ) < < " $ jsonSchema keyword ' " < < kSchemaMinimumKeyword <nl> + < < " ' must be a present if " <nl> + < < kSchemaExclusiveMinimumKeyword <nl> + < < " is present " } ; <nl> } <nl> <nl> - return makeRestriction ( BSONType : : Object , std : : move ( objectMatch ) , typeExpr ) ; <nl> + return Status : : OK ( ) ; <nl> } <nl> <nl> StatusWithMatchExpression _parse ( StringData path , BSONObj schema ) { <nl> StatusWithMatchExpression _parse ( StringData path , BSONObj schema ) { <nl> { kSchemaAnyOfKeyword , { } } , <nl> { kSchemaExclusiveMaximumKeyword , { } } , <nl> { kSchemaExclusiveMinimumKeyword , { } } , <nl> - { kSchemaMaximumKeyword , { } } , <nl> + { kSchemaMaxItemsKeyword , { } } , <nl> { kSchemaMaxLengthKeyword , { } } , <nl> - { kSchemaMinimumKeyword , { } } , <nl> + { kSchemaMaximumKeyword , { } } , <nl> + { kSchemaMinItemsKeyword , { } } , <nl> { kSchemaMinLengthKeyword , { } } , <nl> + { kSchemaMinimumKeyword , { } } , <nl> { kSchemaMultipleOfKeyword , { } } , <nl> { kSchemaNotKeyword , { } } , <nl> { kSchemaOneOfKeyword , { } } , <nl> StatusWithMatchExpression _parse ( StringData path , BSONObj schema ) { <nl> <nl> auto andExpr = stdx : : make_unique < AndMatchExpression > ( ) ; <nl> <nl> - if ( auto propertiesElt = keywordMap [ kSchemaPropertiesKeyword ] ) { <nl> - auto propertiesExpr = parseProperties ( path , propertiesElt , typeExpr . getValue ( ) . get ( ) ) ; <nl> - if ( ! propertiesExpr . isOK ( ) ) { <nl> - return propertiesExpr ; <nl> - } <nl> - andExpr - > add ( propertiesExpr . getValue ( ) . release ( ) ) ; <nl> - } <nl> - <nl> - if ( auto maximumElt = keywordMap [ kSchemaMaximumKeyword ] ) { <nl> - bool isExclusiveMaximum = false ; <nl> - if ( auto exclusiveMaximumElt = keywordMap [ kSchemaExclusiveMaximumKeyword ] ) { <nl> - if ( ! exclusiveMaximumElt . isBoolean ( ) ) { <nl> - return { Status ( ErrorCodes : : TypeMismatch , <nl> - str : : stream ( ) < < " $ jsonSchema keyword ' " <nl> - < < kSchemaExclusiveMaximumKeyword <nl> - < < " ' must be a boolean " ) } ; <nl> - } else { <nl> - isExclusiveMaximum = exclusiveMaximumElt . boolean ( ) ; <nl> - } <nl> - } <nl> - auto maxExpr = <nl> - parseMaximum ( path , maximumElt , typeExpr . getValue ( ) . get ( ) , isExclusiveMaximum ) ; <nl> - if ( ! maxExpr . isOK ( ) ) { <nl> - return maxExpr ; <nl> - } <nl> - andExpr - > add ( maxExpr . getValue ( ) . release ( ) ) ; <nl> - } else if ( keywordMap [ kSchemaExclusiveMaximumKeyword ] ) { <nl> - / / If " exclusiveMaximum " is present , " maximum " must also be present . <nl> - return { Status ( ErrorCodes : : FailedToParse , <nl> - str : : stream ( ) < < " $ jsonSchema keyword ' " < < kSchemaMaximumKeyword <nl> - < < " ' must be a present if " <nl> - < < kSchemaExclusiveMaximumKeyword <nl> - < < " is present " ) } ; <nl> - } <nl> - <nl> - if ( auto minimumElt = keywordMap [ kSchemaMinimumKeyword ] ) { <nl> - bool isExclusiveMinimum = false ; <nl> - if ( auto exclusiveMinimumElt = keywordMap [ kSchemaExclusiveMinimumKeyword ] ) { <nl> - if ( ! exclusiveMinimumElt . isBoolean ( ) ) { <nl> - return { Status ( ErrorCodes : : TypeMismatch , <nl> - str : : stream ( ) < < " $ jsonSchema keyword ' " <nl> - < < kSchemaExclusiveMinimumKeyword <nl> - < < " ' must be a boolean " ) } ; <nl> - } else { <nl> - isExclusiveMinimum = exclusiveMinimumElt . boolean ( ) ; <nl> - } <nl> - } <nl> - auto minExpr = <nl> - parseMinimum ( path , minimumElt , typeExpr . getValue ( ) . get ( ) , isExclusiveMinimum ) ; <nl> - if ( ! minExpr . isOK ( ) ) { <nl> - return minExpr ; <nl> - } <nl> - andExpr - > add ( minExpr . getValue ( ) . release ( ) ) ; <nl> - } else if ( keywordMap [ kSchemaExclusiveMinimumKeyword ] ) { <nl> - / / If " exclusiveMinimum " is present , " minimum " must also be present . <nl> - return { Status ( ErrorCodes : : FailedToParse , <nl> - str : : stream ( ) < < " $ jsonSchema keyword ' " < < kSchemaMinimumKeyword <nl> - < < " ' must be a present if " <nl> - < < kSchemaExclusiveMinimumKeyword <nl> - < < " is present " ) } ; <nl> + auto translationStatus = <nl> + translateScalarKeywords ( & keywordMap , path , typeExpr . getValue ( ) . get ( ) , andExpr . get ( ) ) ; <nl> + if ( ! translationStatus . isOK ( ) ) { <nl> + return translationStatus ; <nl> } <nl> <nl> - if ( auto maxLengthElt = keywordMap [ kSchemaMaxLengthKeyword ] ) { <nl> - auto maxLengthExpr = parseStrLength < InternalSchemaMaxLengthMatchExpression > ( <nl> - path , maxLengthElt , typeExpr . getValue ( ) . get ( ) , kSchemaMaxLengthKeyword ) ; <nl> - if ( ! maxLengthExpr . isOK ( ) ) { <nl> - return maxLengthExpr ; <nl> - } <nl> - andExpr - > add ( maxLengthExpr . getValue ( ) . release ( ) ) ; <nl> + translationStatus = <nl> + translateArrayKeywords ( & keywordMap , path , typeExpr . getValue ( ) . get ( ) , andExpr . get ( ) ) ; <nl> + if ( ! translationStatus . isOK ( ) ) { <nl> + return translationStatus ; <nl> } <nl> <nl> - if ( auto minLengthElt = keywordMap [ kSchemaMinLengthKeyword ] ) { <nl> - auto minLengthExpr = parseStrLength < InternalSchemaMinLengthMatchExpression > ( <nl> - path , minLengthElt , typeExpr . getValue ( ) . get ( ) , kSchemaMinLengthKeyword ) ; <nl> - if ( ! minLengthExpr . isOK ( ) ) { <nl> - return minLengthExpr ; <nl> - } <nl> - andExpr - > add ( minLengthExpr . getValue ( ) . release ( ) ) ; <nl> + translationStatus = <nl> + translateObjectKeywords ( & keywordMap , path , typeExpr . getValue ( ) . get ( ) , andExpr . get ( ) ) ; <nl> + if ( ! translationStatus . isOK ( ) ) { <nl> + return translationStatus ; <nl> } <nl> <nl> - if ( auto patternElt = keywordMap [ kSchemaPatternKeyword ] ) { <nl> - auto patternExpr = parsePattern ( path , patternElt , typeExpr . getValue ( ) . get ( ) ) ; <nl> - if ( ! patternExpr . isOK ( ) ) { <nl> - return patternExpr ; <nl> - } <nl> - andExpr - > add ( patternExpr . getValue ( ) . release ( ) ) ; <nl> - } <nl> - if ( auto multipleOfElt = keywordMap [ kSchemaMultipleOfKeyword ] ) { <nl> - auto multipleOfExpr = parseMultipleOf ( path , multipleOfElt , typeExpr . getValue ( ) . get ( ) ) ; <nl> - if ( ! multipleOfExpr . isOK ( ) ) { <nl> - return multipleOfExpr ; <nl> - } <nl> - andExpr - > add ( multipleOfExpr . getValue ( ) . release ( ) ) ; <nl> - } <nl> - <nl> - auto parseStatus = parseLogicalKeywords ( keywordMap , path , andExpr . get ( ) ) ; <nl> - if ( ! parseStatus . isOK ( ) ) { <nl> - return parseStatus ; <nl> + translationStatus = translateLogicalKeywords ( & keywordMap , path , andExpr . get ( ) ) ; <nl> + if ( ! translationStatus . isOK ( ) ) { <nl> + return translationStatus ; <nl> } <nl> <nl> if ( path . empty ( ) & & typeExpr . getValue ( ) & & <nl> StatusWithMatchExpression _parse ( StringData path , BSONObj schema ) { <nl> } <nl> return { std : : move ( andExpr ) } ; <nl> } <nl> - <nl> } / / namespace <nl> <nl> StatusWithMatchExpression JSONSchemaParser : : parse ( BSONObj schema ) { <nl> mmm a / src / mongo / db / matcher / schema / json_schema_parser_test . cpp <nl> ppp b / src / mongo / db / matcher / schema / json_schema_parser_test . cpp <nl> TEST ( JSONSchemaParserTest , FailsToParseIfMaximumIsNotANumber ) { <nl> TEST ( JSONSchemaParserTest , FailsToParseIfMaxLengthIsNotANumber ) { <nl> BSONObj schema = fromjson ( " { maxLength : ' foo ' } " ) ; <nl> auto result = JSONSchemaParser : : parse ( schema ) ; <nl> - ASSERT_EQ ( result . getStatus ( ) , ErrorCodes : : TypeMismatch ) ; <nl> + ASSERT_EQ ( result . getStatus ( ) , ErrorCodes : : FailedToParse ) ; <nl> } <nl> <nl> TEST ( JSONSchemaParserTest , FailsToParseIfMaxLengthIsLessThanZero ) { <nl> TEST ( JSONSchemaParserTest , FailsToParseIfExclusiveMinimumIsNotABoolean ) { <nl> TEST ( JSONSchemaParserTest , FailsToParseIfMinLengthIsNotANumber ) { <nl> BSONObj schema = fromjson ( " { minLength : ' foo ' } " ) ; <nl> auto result = JSONSchemaParser : : parse ( schema ) ; <nl> - ASSERT_EQ ( result . getStatus ( ) , ErrorCodes : : TypeMismatch ) ; <nl> + ASSERT_EQ ( result . getStatus ( ) , ErrorCodes : : FailedToParse ) ; <nl> } <nl> <nl> TEST ( JSONSchemaParserTest , FailsToParseIfMinLengthIsLessThanZero ) { <nl> TEST ( JSONSchemaParserTest , TopLevelNotTranslatesCorrectly ) { <nl> } ] } ) " ) ) ; <nl> } <nl> <nl> + TEST ( JSONSchemaParserTest , FailsToParseIfMinItemsIsNotANumber ) { <nl> + auto schema = BSON ( " minItems " < < BSON_ARRAY ( 1 ) ) ; <nl> + ASSERT_EQ ( JSONSchemaParser : : parse ( schema ) . getStatus ( ) , ErrorCodes : : FailedToParse ) ; <nl> + } <nl> + <nl> + TEST ( JSONSchemaParserTest , FailsToParseIfMinItemsIsNotANonNegativeInteger ) { <nl> + auto schema = BSON ( " minItems " < < - 1 ) ; <nl> + ASSERT_EQ ( JSONSchemaParser : : parse ( schema ) . getStatus ( ) , ErrorCodes : : FailedToParse ) ; <nl> + <nl> + schema = BSON ( " minItems " < < 3 . 14 ) ; <nl> + ASSERT_EQ ( JSONSchemaParser : : parse ( schema ) . getStatus ( ) , ErrorCodes : : FailedToParse ) ; <nl> + } <nl> + <nl> + TEST ( JSONSchemaParserTest , MinItemsTranslatesCorrectlyWithNoType ) { <nl> + auto schema = BSON ( " minItems " < < 1 ) ; <nl> + auto result = JSONSchemaParser : : parse ( schema ) ; <nl> + ASSERT_OK ( result . getStatus ( ) ) ; <nl> + ASSERT_SERIALIZES_TO ( result . getValue ( ) , fromjson ( " { $ and : [ { $ alwaysTrue : 1 } ] } " ) ) ; <nl> + <nl> + schema = fromjson ( " { properties : { a : { minItems : 1 } } } " ) ; <nl> + result = JSONSchemaParser : : parse ( schema ) ; <nl> + ASSERT_OK ( result . getStatus ( ) ) ; <nl> + <nl> + ASSERT_SERIALIZES_TO ( result . getValue ( ) , fromjson ( R " ( <nl> + { $ and : [ { <nl> + $ and : [ { <nl> + $ or : [ <nl> + { $ nor : [ { a : { $ exists : true } } ] } , <nl> + { <nl> + $ and : [ { <nl> + $ or : [ <nl> + { $ nor : [ { a : { $ _internalSchemaType : 4 } } ] } , <nl> + { a : { $ _internalSchemaMinItems : 1 } } <nl> + ] <nl> + } ] <nl> + } <nl> + ] <nl> + } ] <nl> + } ] } ) " ) ) ; <nl> + } <nl> + <nl> + TEST ( JSONSchemaParserTest , MinItemsTranslatesCorrectlyWithArrayType ) { <nl> + auto schema = fromjson ( " { properties : { a : { minItems : 1 , type : ' array ' } } } " ) ; <nl> + auto result = JSONSchemaParser : : parse ( schema ) ; <nl> + ASSERT_OK ( result . getStatus ( ) ) ; <nl> + ASSERT_SERIALIZES_TO ( result . getValue ( ) , fromjson ( R " ( <nl> + { $ and : [ { <nl> + $ and : [ { <nl> + $ or : [ <nl> + { $ nor : [ { a : { $ exists : true } } ] } , <nl> + { $ and : [ { a : { $ _internalSchemaMinItems : 1 } } , { a : { $ _internalSchemaType : 4 } } ] } <nl> + ] <nl> + } ] <nl> + } ] } ) " ) ) ; <nl> + } <nl> + <nl> + TEST ( JSONSchemaParserTest , MinItemsTranslatesCorrectlyWithNonArrayType ) { <nl> + auto schema = fromjson ( " { properties : { a : { minItems : 1 , type : ' number ' } } } " ) ; <nl> + auto result = JSONSchemaParser : : parse ( schema ) ; <nl> + ASSERT_OK ( result . getStatus ( ) ) ; <nl> + ASSERT_SERIALIZES_TO ( result . getValue ( ) , fromjson ( R " ( <nl> + { $ and : [ { <nl> + $ and : [ { <nl> + $ or : [ <nl> + { $ nor : [ { a : { $ exists : true } } ] } , <nl> + { $ and : [ { $ alwaysTrue : 1 } , { a : { $ _internalSchemaType : " number " } } ] } <nl> + ] <nl> + } ] <nl> + } ] } ) " ) ) ; <nl> + } <nl> + <nl> + TEST ( JSONSchemaParserTest , FailsToParseIfMaxItemsIsNotANumber ) { <nl> + auto schema = BSON ( " maxItems " < < BSON_ARRAY ( 1 ) ) ; <nl> + ASSERT_EQ ( JSONSchemaParser : : parse ( schema ) . getStatus ( ) , ErrorCodes : : FailedToParse ) ; <nl> + } <nl> + <nl> + TEST ( JSONSchemaParserTest , FailsToParseIfMaxItemsIsNotANonNegativeInteger ) { <nl> + auto schema = BSON ( " maxItems " < < - 1 ) ; <nl> + ASSERT_EQ ( JSONSchemaParser : : parse ( schema ) . getStatus ( ) , ErrorCodes : : FailedToParse ) ; <nl> + <nl> + schema = BSON ( " maxItems " < < 1 . 60217 ) ; <nl> + ASSERT_EQ ( JSONSchemaParser : : parse ( schema ) . getStatus ( ) , ErrorCodes : : FailedToParse ) ; <nl> + } <nl> + <nl> + TEST ( JSONSchemaParserTest , MaxItemsTranslatesCorrectlyWithNoType ) { <nl> + auto schema = BSON ( " maxItems " < < 1 ) ; <nl> + auto result = JSONSchemaParser : : parse ( schema ) ; <nl> + ASSERT_OK ( result . getStatus ( ) ) ; <nl> + ASSERT_SERIALIZES_TO ( result . getValue ( ) , fromjson ( " { $ and : [ { $ alwaysTrue : 1 } ] } " ) ) ; <nl> + <nl> + schema = fromjson ( " { properties : { a : { maxItems : 1 } } } " ) ; <nl> + result = JSONSchemaParser : : parse ( schema ) ; <nl> + ASSERT_OK ( result . getStatus ( ) ) ; <nl> + <nl> + ASSERT_SERIALIZES_TO ( result . getValue ( ) , fromjson ( R " ( <nl> + { $ and : [ { <nl> + $ and : [ { <nl> + $ or : [ <nl> + { $ nor : [ { a : { $ exists : true } } ] } , <nl> + { <nl> + $ and : [ { <nl> + $ or : [ <nl> + { $ nor : [ { a : { $ _internalSchemaType : 4 } } ] } , <nl> + { a : { $ _internalSchemaMaxItems : 1 } } <nl> + ] <nl> + } ] <nl> + } <nl> + ] <nl> + } ] <nl> + } ] } ) " ) ) ; <nl> + } <nl> + <nl> + TEST ( JSONSchemaParserTest , MaxItemsTranslatesCorrectlyWithArrayType ) { <nl> + auto schema = fromjson ( " { properties : { a : { maxItems : 1 , type : ' array ' } } } " ) ; <nl> + auto result = JSONSchemaParser : : parse ( schema ) ; <nl> + ASSERT_OK ( result . getStatus ( ) ) ; <nl> + ASSERT_SERIALIZES_TO ( result . getValue ( ) , fromjson ( R " ( <nl> + { $ and : [ { <nl> + $ and : [ { <nl> + $ or : [ <nl> + { $ nor : [ { a : { $ exists : true } } ] } , <nl> + { $ and : [ { a : { $ _internalSchemaMaxItems : 1 } } , { a : { $ _internalSchemaType : 4 } } ] } <nl> + ] <nl> + } ] <nl> + } ] } ) " ) ) ; <nl> + } <nl> + <nl> + TEST ( JSONSchemaParserTest , MaxItemsTranslatesCorrectlyWithNonArrayType ) { <nl> + auto schema = fromjson ( " { properties : { a : { maxItems : 1 , type : ' string ' } } } " ) ; <nl> + auto result = JSONSchemaParser : : parse ( schema ) ; <nl> + ASSERT_OK ( result . getStatus ( ) ) ; <nl> + ASSERT_SERIALIZES_TO ( result . getValue ( ) , fromjson ( R " ( <nl> + { $ and : [ { <nl> + $ and : [ { <nl> + $ or : [ <nl> + { $ nor : [ { a : { $ exists : true } } ] } , <nl> + { $ and : [ { $ alwaysTrue : 1 } , { a : { $ _internalSchemaType : 2 } } ] } <nl> + ] <nl> + } ] <nl> + } ] } ) " ) ) ; <nl> + } <nl> } / / namespace <nl> } / / namespace mongo <nl> | SERVER - 30178 extend JSON Schema parser to handle " minItems " and " maxItems " | mongodb/mongo | 921ba9bba8a8c555ed25dd8452eae57a1662e735 | 2017-08-21T19:22:35Z |
mmm a / include / catch_session . hpp <nl> ppp b / include / catch_session . hpp <nl> namespace Catch { <nl> < < std : : left < < std : : setw ( 16 ) < < " version : " < < libraryVersion ( ) < < std : : endl ; <nl> } <nl> <nl> - <nl> int applyCommandLine ( int argc , char const * const * const argv , OnUnusedOptions : : DoWhat unusedOptionBehaviour = OnUnusedOptions : : Fail ) { <nl> try { <nl> m_cli . setThrowOnUnrecognisedTokens ( unusedOptionBehaviour = = OnUnusedOptions : : Fail ) ; <nl> namespace Catch { <nl> # endif <nl> <nl> int run ( ) { <nl> + if ( ( m_configData . waitForKeypress & WaitForKeypress : : BeforeStart ) ! = 0 ) { <nl> + Catch : : cout ( ) < < " . . . waiting for enter / return before starting " < < std : : endl ; <nl> + std : : getchar ( ) ; <nl> + } <nl> + int exitCode = runInternal ( ) ; <nl> + if ( ( m_configData . waitForKeypress & WaitForKeypress : : BeforeExit ) ! = 0 ) { <nl> + Catch : : cout ( ) < < " . . . waiting for enter / return before exiting , with code : " < < exitCode < < std : : endl ; <nl> + std : : getchar ( ) ; <nl> + } <nl> + return exitCode ; <nl> + } <nl> + <nl> + Clara : : CommandLine < ConfigData > const & cli ( ) const { <nl> + return m_cli ; <nl> + } <nl> + std : : vector < Clara : : Parser : : Token > const & unusedTokens ( ) const { <nl> + return m_unusedTokens ; <nl> + } <nl> + ConfigData & configData ( ) { <nl> + return m_configData ; <nl> + } <nl> + Config & config ( ) { <nl> + if ( ! m_config ) <nl> + m_config = new Config ( m_configData ) ; <nl> + return * m_config ; <nl> + } <nl> + private : <nl> + <nl> + int runInternal ( ) { <nl> if ( m_configData . showHelp | | m_configData . libIdentify ) <nl> return 0 ; <nl> <nl> namespace Catch { <nl> } <nl> } <nl> <nl> - Clara : : CommandLine < ConfigData > const & cli ( ) const { <nl> - return m_cli ; <nl> - } <nl> - std : : vector < Clara : : Parser : : Token > const & unusedTokens ( ) const { <nl> - return m_unusedTokens ; <nl> - } <nl> - ConfigData & configData ( ) { <nl> - return m_configData ; <nl> - } <nl> - Config & config ( ) { <nl> - if ( ! m_config ) <nl> - m_config = new Config ( m_configData ) ; <nl> - return * m_config ; <nl> - } <nl> - private : <nl> Clara : : CommandLine < ConfigData > m_cli ; <nl> std : : vector < Clara : : Parser : : Token > m_unusedTokens ; <nl> ConfigData m_configData ; <nl> mmm a / include / internal / catch_commandline . hpp <nl> ppp b / include / internal / catch_commandline . hpp <nl> namespace Catch { <nl> else <nl> throw std : : runtime_error ( " colour mode must be one of : auto , yes or no " ) ; <nl> } <nl> + inline void setWaitForKeypress ( ConfigData & config , std : : string const & keypress ) { <nl> + auto keypressLc = toLower ( keypress ) ; <nl> + if ( keypressLc = = " start " ) <nl> + config . waitForKeypress = WaitForKeypress : : BeforeStart ; <nl> + else if ( keypressLc = = " exit " ) <nl> + config . waitForKeypress = WaitForKeypress : : BeforeExit ; <nl> + else if ( keypressLc = = " both " ) <nl> + config . waitForKeypress = WaitForKeypress : : BeforeStartAndExit ; <nl> + else <nl> + throw std : : runtime_error ( " keypress argument must be one of : start , exit or both . ' " + keypress + " ' not recognised " ) ; <nl> + } ; <nl> + <nl> + <nl> inline void forceColour ( ConfigData & config ) { <nl> config . useColour = UseColour : : Yes ; <nl> } <nl> namespace Catch { <nl> . describe ( " report name and version according to libidentify standard " ) <nl> . bind ( & ConfigData : : libIdentify ) ; <nl> <nl> + cli [ " - - wait - for - keypress " ] <nl> + . describe ( " waits for a keypress before exiting " ) <nl> + . bind ( & setWaitForKeypress , " start | exit | both " ) ; <nl> + <nl> return cli ; <nl> } <nl> <nl> mmm a / include / internal / catch_config . hpp <nl> ppp b / include / internal / catch_config . hpp <nl> namespace Catch { <nl> warnings ( WarnAbout : : Nothing ) , <nl> showDurations ( ShowDurations : : DefaultForReporter ) , <nl> runOrder ( RunTests : : InDeclarationOrder ) , <nl> - useColour ( UseColour : : Auto ) <nl> + useColour ( UseColour : : Auto ) , <nl> + waitForKeypress ( WaitForKeypress : : Never ) <nl> { } <nl> <nl> bool listTests ; <nl> namespace Catch { <nl> ShowDurations : : OrNot showDurations ; <nl> RunTests : : InWhatOrder runOrder ; <nl> UseColour : : YesOrNo useColour ; <nl> + WaitForKeypress : : When waitForKeypress ; <nl> <nl> std : : string outputFilename ; <nl> std : : string name ; <nl> mmm a / include / internal / catch_interfaces_config . h <nl> ppp b / include / internal / catch_interfaces_config . h <nl> namespace Catch { <nl> Yes , <nl> No <nl> } ; } ; <nl> + struct WaitForKeypress { enum When { <nl> + Never , <nl> + BeforeStart = 1 , <nl> + BeforeExit = 2 , <nl> + BeforeStartAndExit = BeforeStart | BeforeExit <nl> + } ; } ; <nl> <nl> class TestSpec ; <nl> <nl> | Implemented wait - for - keypress option | catchorg/Catch2 | 70e4af9d444dd9ef074ce74bc2d0ea418db56ac4 | 2017-08-15T13:12:11Z |
mmm a / src / heap . cc <nl> ppp b / src / heap . cc <nl> bool Heap : : PerformGarbageCollection ( GarbageCollector collector , <nl> <nl> UpdateSurvivalRateTrend ( start_new_space_size ) ; <nl> <nl> - size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize ( ) ; <nl> + size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects ( ) ; <nl> <nl> if ( high_survival_rate_during_scavenges & & <nl> IsStableOrIncreasingSurvivalTrend ( ) ) { <nl> void Heap : : RecordStats ( HeapStats * stats , bool take_snapshot ) { <nl> } <nl> <nl> <nl> - intptr_t Heap : : PromotedSpaceSize ( ) { <nl> - return old_pointer_space_ - > Size ( ) <nl> - + old_data_space_ - > Size ( ) <nl> - + code_space_ - > Size ( ) <nl> - + map_space_ - > Size ( ) <nl> - + cell_space_ - > Size ( ) <nl> - + lo_space_ - > Size ( ) ; <nl> - } <nl> - <nl> - <nl> intptr_t Heap : : PromotedSpaceSizeOfObjects ( ) { <nl> return old_pointer_space_ - > SizeOfObjects ( ) <nl> + old_data_space_ - > SizeOfObjects ( ) <nl> mmm a / src / heap . h <nl> ppp b / src / heap . h <nl> class Heap { <nl> PretenureFlag pretenure ) ; <nl> <nl> inline intptr_t PromotedTotalSize ( ) { <nl> - return PromotedSpaceSize ( ) + PromotedExternalMemorySize ( ) ; <nl> + return PromotedSpaceSizeOfObjects ( ) + PromotedExternalMemorySize ( ) ; <nl> } <nl> <nl> / / True if we have reached the allocation limit in the old generation that <nl> class Heap { <nl> static const intptr_t kMinimumAllocationLimit = <nl> 8 * ( Page : : kPageSize > MB ? Page : : kPageSize : MB ) ; <nl> <nl> - / / When we sweep lazily we initially guess that there is no garbage on the <nl> - / / heap and set the limits for the next GC accordingly . As we sweep we find <nl> - / / out that some of the pages contained garbage and we have to adjust <nl> - / / downwards the size of the heap . This means the limits that control the <nl> - / / timing of the next GC also need to be adjusted downwards . <nl> - void LowerOldGenLimits ( intptr_t adjustment ) { <nl> - size_of_old_gen_at_last_old_space_gc_ - = adjustment ; <nl> - old_gen_promotion_limit_ = <nl> - OldGenPromotionLimit ( size_of_old_gen_at_last_old_space_gc_ ) ; <nl> - old_gen_allocation_limit_ = <nl> - OldGenAllocationLimit ( size_of_old_gen_at_last_old_space_gc_ ) ; <nl> - } <nl> - <nl> intptr_t OldGenPromotionLimit ( intptr_t old_gen_size ) { <nl> const int divisor = FLAG_stress_compaction ? 10 : 3 ; <nl> intptr_t limit = <nl> class Heap { <nl> intptr_t adjusted_allocation_limit = <nl> old_gen_allocation_limit_ - new_space_ . Capacity ( ) / 5 ; <nl> <nl> - if ( PromotedSpaceSize ( ) > = adjusted_allocation_limit ) return true ; <nl> + if ( PromotedSpaceSizeOfObjects ( ) > = adjusted_allocation_limit ) return true ; <nl> <nl> return false ; <nl> } <nl> class Heap { <nl> GCTracer * tracer ( ) { return tracer_ ; } <nl> <nl> / / Returns the size of objects residing in non new spaces . <nl> - intptr_t PromotedSpaceSize ( ) ; <nl> intptr_t PromotedSpaceSizeOfObjects ( ) ; <nl> <nl> double total_regexp_code_generated ( ) { return total_regexp_code_generated_ ; } <nl> mmm a / src / incremental - marking - inl . h <nl> ppp b / src / incremental - marking - inl . h <nl> void IncrementalMarking : : BlackToGreyAndUnshift ( HeapObject * obj , <nl> int64_t old_bytes_rescanned = bytes_rescanned_ ; <nl> bytes_rescanned_ = old_bytes_rescanned + obj_size ; <nl> if ( ( bytes_rescanned_ > > 20 ) ! = ( old_bytes_rescanned > > 20 ) ) { <nl> - if ( bytes_rescanned_ > 2 * heap_ - > PromotedSpaceSize ( ) ) { <nl> + if ( bytes_rescanned_ > 2 * heap_ - > PromotedSpaceSizeOfObjects ( ) ) { <nl> / / If we have queued twice the heap size for rescanning then we are <nl> / / going around in circles , scanning the same objects again and again <nl> / / as the program mutates the heap faster than we can incrementally <nl> mmm a / src / incremental - marking . cc <nl> ppp b / src / incremental - marking . cc <nl> void IncrementalMarking : : ResetStepCounters ( ) { <nl> <nl> <nl> int64_t IncrementalMarking : : SpaceLeftInOldSpace ( ) { <nl> - return heap_ - > MaxOldGenerationSize ( ) - heap_ - > PromotedSpaceSize ( ) ; <nl> + return heap_ - > MaxOldGenerationSize ( ) - heap_ - > PromotedSpaceSizeOfObjects ( ) ; <nl> } <nl> <nl> } } / / namespace v8 : : internal <nl> mmm a / src / mark - compact . cc <nl> ppp b / src / mark - compact . cc <nl> void MarkCompactCollector : : SweepSpace ( PagedSpace * space , SweeperType sweeper ) { <nl> bool lazy_sweeping_active = false ; <nl> bool unused_page_present = false ; <nl> <nl> - intptr_t old_space_size = heap ( ) - > PromotedSpaceSize ( ) ; <nl> + intptr_t old_space_size = heap ( ) - > PromotedSpaceSizeOfObjects ( ) ; <nl> intptr_t space_left = <nl> Min ( heap ( ) - > OldGenPromotionLimit ( old_space_size ) , <nl> heap ( ) - > OldGenAllocationLimit ( old_space_size ) ) - old_space_size ; <nl> mmm a / src / spaces . cc <nl> ppp b / src / spaces . cc <nl> bool PagedSpace : : AdvanceSweeper ( intptr_t bytes_to_sweep ) { <nl> first_unswept_page_ = p ; <nl> } <nl> <nl> - heap ( ) - > LowerOldGenLimits ( freed_bytes ) ; <nl> - <nl> heap ( ) - > FreeQueuedChunks ( ) ; <nl> <nl> return IsSweepingComplete ( ) ; <nl> | Use correct size of promoted space for setting promotion and allocation limits . | v8/v8 | ae0a7ec93a9145652af74d26041fd96d2ca9a2c7 | 2012-05-04T09:36:46Z |
mmm a / include / coroutine . h <nl> ppp b / include / coroutine . h <nl> void coroutine_set_onYield ( coro_php_yield_t func ) ; <nl> void coroutine_set_onResume ( coro_php_resume_t func ) ; <nl> void coroutine_set_onClose ( coro_php_close_t func ) ; <nl> <nl> - # define php_yield ( ) php_coro_yield ( return_value ) ; <nl> - void php_coro_yield ( void * return_value ) ; <nl> - void php_coro_resume ( void * data ) ; <nl> + void internal_coro_yield ( void * return_value ) ; <nl> + void internal_coro_resume ( void * data ) ; <nl> <nl> # ifdef __cplusplus <nl> } / * end extern " C " * / <nl> mmm a / swoole_coroutine . cc <nl> ppp b / swoole_coroutine . cc <nl> int coro_init ( TSRMLS_D ) <nl> <nl> COROG . active = 1 ; <nl> / / set functions <nl> - coroutine_set_onYield ( php_coro_yield ) ; <nl> - coroutine_set_onResume ( php_coro_resume ) ; <nl> + coroutine_set_onYield ( internal_coro_yield ) ; <nl> + coroutine_set_onResume ( internal_coro_resume ) ; <nl> coroutine_set_onClose ( sw_coro_close ) ; <nl> return 0 ; <nl> } <nl> <nl> - <nl> - void php_coro_resume ( void * arg ) <nl> + static void resume_php_stack ( coro_task * task ) <nl> { <nl> - coro_task * task = ( coro_task * ) arg ; <nl> COROG . call_stack [ COROG . call_stack_size + + ] = task ; <nl> COROG . current_coro = task ; <nl> swTraceLog ( SW_TRACE_COROUTINE , " sw_coro_resume coro id % d " , COROG . current_coro - > cid ) ; <nl> void php_coro_resume ( void * arg ) <nl> EG ( vm_stack ) = task - > yield_stack ; <nl> EG ( vm_stack_top ) = task - > yield_vm_stack_top ; <nl> EG ( vm_stack_end ) = task - > yield_vm_stack_end ; <nl> + } <nl> + <nl> + static void save_php_stack ( coro_task * task ) <nl> + { <nl> + COROG . call_stack_size - - ; <nl> + swTraceLog ( SW_TRACE_COROUTINE , " coro_yield coro id % d " , task - > cid ) ; <nl> + task - > state = SW_CORO_YIELD ; <nl> + task - > is_yield = 1 ; <nl> + / / save vm stack <nl> + task - > yield_execute_data = EG ( current_execute_data ) ; <nl> + task - > yield_stack = EG ( vm_stack ) ; <nl> + task - > yield_vm_stack_top = EG ( vm_stack_top ) ; <nl> + task - > yield_vm_stack_end = EG ( vm_stack_end ) ; <nl> + / / restore vm stack <nl> + EG ( vm_stack ) = task - > origin_stack ; <nl> + EG ( vm_stack_top ) = task - > origin_vm_stack_top ; <nl> + EG ( vm_stack_end ) = task - > origin_vm_stack_end ; <nl> + } <nl> + void internal_coro_resume ( void * arg ) <nl> + { <nl> + coro_task * task = ( coro_task * ) arg ; <nl> + resume_php_stack ( task ) ; <nl> / / main OG <nl> if ( OG ( handlers ) . elements ) <nl> { <nl> void php_coro_resume ( void * arg ) <nl> swTraceLog ( SW_TRACE_COROUTINE , " cid = % d " , task - > cid ) ; <nl> } <nl> <nl> - void php_coro_yield ( void * arg ) <nl> + void internal_coro_yield ( void * arg ) <nl> { <nl> coro_task * task = ( coro_task * ) arg ; <nl> - COROG . call_stack_size - - ; <nl> - swTraceLog ( SW_TRACE_COROUTINE , " coro_yield coro id % d " , task - > cid ) ; <nl> - task - > state = SW_CORO_YIELD ; <nl> - task - > is_yield = 1 ; <nl> - / / save vm stack <nl> - task - > yield_execute_data = EG ( current_execute_data ) ; <nl> - task - > yield_stack = EG ( vm_stack ) ; <nl> - task - > yield_vm_stack_top = EG ( vm_stack_top ) ; <nl> - task - > yield_vm_stack_end = EG ( vm_stack_end ) ; <nl> - / / restore vm stack <nl> - EG ( vm_stack ) = task - > origin_stack ; <nl> - EG ( vm_stack_top ) = task - > origin_vm_stack_top ; <nl> - EG ( vm_stack_end ) = task - > origin_vm_stack_end ; <nl> - <nl> + save_php_stack ( task ) ; <nl> / / save output control global <nl> if ( OG ( active ) ) <nl> { <nl> void sw_coro_save ( zval * return_value , php_context * sw_current_context ) <nl> int sw_coro_resume ( php_context * sw_current_context , zval * retval , zval * coro_retval ) <nl> { <nl> coro_task * task = SWCC ( current_task ) ; <nl> - COROG . call_stack [ COROG . call_stack_size + + ] = task ; <nl> - COROG . current_coro = task ; <nl> - swTraceLog ( SW_TRACE_COROUTINE , " sw_coro_resume coro id % d " , COROG . current_coro - > cid ) ; <nl> - task - > state = SW_CORO_RUNNING ; <nl> - EG ( current_execute_data ) = task - > yield_execute_data ; <nl> - EG ( vm_stack ) = task - > yield_stack ; <nl> - EG ( vm_stack_top ) = task - > yield_vm_stack_top ; <nl> - EG ( vm_stack_end ) = task - > yield_vm_stack_end ; <nl> - <nl> + resume_php_stack ( task ) ; <nl> if ( EG ( current_execute_data ) - > prev_execute_data - > opline - > result_type ! = IS_UNUSED & & retval ) <nl> { <nl> ZVAL_COPY ( SWCC ( current_coro_return_value_ptr ) , retval ) ; <nl> void sw_coro_yield ( ) <nl> { <nl> swoole_php_fatal_error ( E_ERROR , " must be called in the coroutine . " ) ; <nl> } <nl> - <nl> coro_task * task = ( coro_task * ) sw_get_current_task ( ) ; <nl> - COROG . call_stack_size - - ; <nl> - swTraceLog ( SW_TRACE_COROUTINE , " coro_yield coro id % d " , task - > cid ) ; <nl> - task - > state = SW_CORO_YIELD ; <nl> - task - > is_yield = 1 ; <nl> - <nl> - / / save vm stack <nl> - task - > yield_execute_data = EG ( current_execute_data ) ; <nl> - task - > yield_stack = EG ( vm_stack ) ; <nl> - task - > yield_vm_stack_top = EG ( vm_stack_top ) ; <nl> - task - > yield_vm_stack_end = EG ( vm_stack_end ) ; <nl> - / / restore vm stack <nl> - EG ( vm_stack ) = task - > origin_stack ; <nl> - EG ( vm_stack_top ) = task - > origin_vm_stack_top ; <nl> - EG ( vm_stack_end ) = task - > origin_vm_stack_end ; <nl> + save_php_stack ( task ) ; <nl> coroutine_yield_naked ( task - > co ) ; <nl> } <nl> <nl> | optimize code | swoole/swoole-src | c7314ec7e6eb205b98251a19ea885eed56a6f1f5 | 2018-08-23T10:00:35Z |
mmm a / lib / FrontendTool / FrontendTool . cpp <nl> ppp b / lib / FrontendTool / FrontendTool . cpp <nl> static bool performCompileStepsPostSILGen ( <nl> return serializeSIB ( SM . get ( ) , PSPs , Instance . getASTContext ( ) , MSF ) ; <nl> <nl> { <nl> - const bool haveModulePath = PSPs . haveModuleOrModuleDocOutputPaths ( ) ; <nl> - if ( haveModulePath & & ! SM - > isSerialized ( ) ) <nl> - SM - > serialize ( ) ; <nl> - <nl> - if ( haveModulePath ) { <nl> + if ( PSPs . haveModuleOrModuleDocOutputPaths ( ) ) { <nl> if ( Action = = FrontendOptions : : ActionType : : MergeModules | | <nl> Action = = FrontendOptions : : ActionType : : EmitModuleOnly ) { <nl> / / What if MSF is a module ? <nl> mmm a / lib / SILOptimizer / PassManager / PassPipeline . cpp <nl> ppp b / lib / SILOptimizer / PassManager / PassPipeline . cpp <nl> SILPassPipelinePlan SILPassPipelinePlan : : getOnonePassPipeline ( ) { <nl> / / Has only an effect if the - gsil option is specified . <nl> P . addSILDebugInfoGenerator ( ) ; <nl> <nl> + / / Finally serialize the SIL if we are asked to . <nl> + P . addSerializeSILPass ( ) ; <nl> + <nl> return P ; <nl> } <nl> <nl> mmm a / lib / SILOptimizer / UtilityPasses / SerializeSILPass . cpp <nl> ppp b / lib / SILOptimizer / UtilityPasses / SerializeSILPass . cpp <nl> class SerializeSILPass : public SILModuleTransform { <nl> / / to avoid linker errors , the object file of the current module should <nl> / / contain all the symbols which were alive at the time of serialization . <nl> LLVM_DEBUG ( llvm : : dbgs ( ) < < " Serializing SILModule in SerializeSILPass \ n " ) ; <nl> - getModule ( ) - > serialize ( ) ; <nl> + M . serialize ( ) ; <nl> + <nl> + / / If we are not optimizing , do not strip the [ serialized ] flag . We * could * <nl> + / / do this since after serializing [ serialized ] is irrelevent . But this <nl> + / / would incur an unnecessary compile time cost since if we are not <nl> + / / optimizing we are not going to perform any sort of DFE . <nl> + if ( ! getOptions ( ) . shouldOptimize ( ) ) <nl> + return ; <nl> removeSerializedFlagFromAllFunctions ( M ) ; <nl> } <nl> } ; <nl> mmm a / test / sil - func - extractor / basic . swift <nl> ppp b / test / sil - func - extractor / basic . swift <nl> <nl> / / Passing demangled name <nl> <nl> - / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o - | % target - sil - func - extractor - module - name basic - func = " basic . foo " | % FileCheck % s - check - prefix = EXTRACT - FOO <nl> - / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o - | % target - sil - func - extractor - module - name basic - func = " basic . X . test " | % FileCheck % s - check - prefix = EXTRACT - TEST <nl> - / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o - | % target - sil - func - extractor - module - name basic - func = " basic . Vehicle . init " | % FileCheck % s - check - prefix = EXTRACT - INIT <nl> - / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o - | % target - sil - func - extractor - module - name basic - func = " basic . Vehicle . now " | % FileCheck % s - check - prefix = EXTRACT - NOW <nl> + / / RUN : % empty - directory ( % t ) <nl> + <nl> + / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o % t / demangle - ext - foo . sib ; % target - sil - func - extractor - module - name basic - func = " basic . foo " % t / demangle - ext - foo . sib | % FileCheck % s - check - prefix = EXTRACT - FOO <nl> + / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o % t / demangle - ext - test . sib ; % target - sil - func - extractor - module - name basic - func = " basic . X . test " % t / demangle - ext - test . sib | % FileCheck % s - check - prefix = EXTRACT - TEST <nl> + / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o % t / demangle - ext - init . sib ; % target - sil - func - extractor - module - name basic - func = " basic . Vehicle . init " % t / demangle - ext - init . sib | % FileCheck % s - check - prefix = EXTRACT - INIT <nl> + / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o % t / demangle - ext - now . sib ; % target - sil - func - extractor - module - name basic - func = " basic . Vehicle . now " % t / demangle - ext - now . sib | % FileCheck % s - check - prefix = EXTRACT - NOW <nl> <nl> / / Passing mangled name <nl> <nl> - / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o - | % target - sil - func - extractor - module - name basic - func = ' $ s5basic3fooSiyF ' | % FileCheck % s - check - prefix = EXTRACT - FOO <nl> - / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o - | % target - sil - func - extractor - module - name basic - func = ' $ s5basic1XV4testyyF ' | % FileCheck % s - check - prefix = EXTRACT - TEST <nl> - / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o - | % target - sil - func - extractor - module - name basic - func = ' $ s5basic7VehicleC1nACSi_tcfc ' | % FileCheck % s - check - prefix = EXTRACT - INIT <nl> - / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o - | % target - sil - func - extractor - module - name basic - func = ' $ s5basic7VehicleC3nowSiyF ' | % FileCheck % s - check - prefix = EXTRACT - NOW <nl> + / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o % t / mangle - ext - foo . sib ; % target - sil - func - extractor - module - name basic - func = ' $ s5basic3fooSiyF ' % t / mangle - ext - foo . sib | % FileCheck % s - check - prefix = EXTRACT - FOO <nl> + / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o % t / mangle - ext - test . sib ; % target - sil - func - extractor - module - name basic - func = ' $ s5basic1XV4testyyF ' % t / mangle - ext - test . sib | % FileCheck % s - check - prefix = EXTRACT - TEST <nl> + / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o % t / mangle - ext - init . sib ; % target - sil - func - extractor - module - name basic - func = ' $ s5basic7VehicleC1nACSi_tcfc ' % t / mangle - ext - init . sib | % FileCheck % s - check - prefix = EXTRACT - INIT <nl> + / / RUN : % target - swift - frontend % s - g - module - name basic - emit - sib - o % t / mangle - ext - now . sib ; % target - sil - func - extractor - module - name basic - func = ' $ s5basic7VehicleC3nowSiyF ' % t / mangle - ext - now . sib | % FileCheck % s - check - prefix = EXTRACT - NOW <nl> <nl> <nl> / / EXTRACT - FOO - NOT : sil hidden @ $ s5basic1XV4testyyF : $ @ convention ( method ) ( X ) - > ( ) { <nl> | Merge pull request from gottesmm / pr - 8d2416681ea41244ae3c2bd3d3d3bb35fcc1eade | apple/swift | cd8f565adcc832bba6d57c8cd37dece7838ebd8a | 2019-01-14T20:46:16Z |
mmm a / src / mgpcg_smoke . cpp <nl> ppp b / src / mgpcg_smoke . cpp <nl> class MGPCGSmoke { <nl> } <nl> for ( int i = 0 ; i < mg_lv - 1 ; i + + ) { <nl> / / pre - smoothing <nl> - for ( int j = 0 ; j < smoothing_iters ; j + + ) { <nl> - smooth ( i , U , B ) ; <nl> + for ( int j = 0 ; j < smoothing_iters * 1000000 ; j + + ) { <nl> + TC_TIME ( smooth ( i , U , B ) ) ; <nl> } <nl> residual ( i , U , B , R ) ; <nl> restrict ( i , R , B ) ; <nl> | Smoothing baseline 394ms | taichi-dev/taichi | 52553e2611d7d8a729a42e84929e24f12c836d04 | 2018-08-01T01:02:50Z |
mmm a / xbmc / Application . cpp <nl> ppp b / xbmc / Application . cpp <nl> void CApplication : : OnPlayBackStarted ( ) <nl> getApplicationMessenger ( ) . HttpApi ( " broadcastlevel ; OnPlayBackStarted ; 1 " ) ; <nl> # endif <nl> <nl> - CVariant param ; <nl> - param [ " speed " ] = 1 ; <nl> - CAnnouncementManager : : Announce ( Player , " xbmc " , " OnPlay " , m_itemCurrentFile , param ) ; <nl> - <nl> CGUIMessage msg ( GUI_MSG_PLAYBACK_STARTED , 0 , 0 ) ; <nl> g_windowManager . SendThreadMessage ( msg ) ; <nl> } <nl> bool CApplication : : OnMessage ( CGUIMessage & message ) <nl> CLastFmManager : : GetInstance ( ) - > OnSongChange ( * m_itemCurrentFile ) ; <nl> g_partyModeManager . OnSongChange ( true ) ; <nl> <nl> + CVariant param ; <nl> + param [ " speed " ] = 1 ; <nl> + CAnnouncementManager : : Announce ( Player , " xbmc " , " OnPlay " , m_itemCurrentFile , param ) ; <nl> + <nl> DimLCDOnPlayback ( true ) ; <nl> <nl> if ( IsPlayingAudio ( ) ) <nl> | jsonrpc : fix " id " value in Player . OnPlay notification ( fixes ) | xbmc/xbmc | 2070b0c27a97fe6b9210cf0021fc5bf9d591f0ce | 2011-07-01T08:21:35Z |
mmm a / admin / static / coffee / otherviews . coffee <nl> ppp b / admin / static / coffee / otherviews . coffee <nl> module ' ResolveIssuesView ' , - > <nl> ' NAME_CONFLICT_ISSUE ' : Handlebars . compile $ ( ' # resolve_issues - name_conflict - template ' ) . html ( ) <nl> ' PERSISTENCE_ISSUE ' : Handlebars . compile $ ( ' # resolve_issues - persistence - template ' ) . html ( ) <nl> ' VCLOCK_CONFLICT ' : Handlebars . compile $ ( ' # resolve_issues - vclock_conflict - template ' ) . html ( ) <nl> - ' PINNINGS_SHARDS_MISMATCH ' : Handlebars . compile $ ( ' # resolve_issues - pinnings_shards_mismatch - template ' ) . html ( ) <nl> <nl> unknown_issue_template : Handlebars . compile $ ( ' # resolve_issues - unknown - template ' ) . html ( ) <nl> <nl> module ' ResolveIssuesView ' , - > <nl> when ' VCLOCK_CONFLICT ' <nl> json = <nl> datetime : iso_date_from_unix_time @ model . get ( ' time ' ) <nl> - when ' PINNINGS_SHARDS_MISMATCH ' <nl> - namespace = namespaces . get @ model . get ( ' offending_namespace ' ) <nl> - json = <nl> - namespace_name : namespace . get ( ' name ' ) <nl> - namespace_uuid : namespace . get ( ' id ' ) <nl> - datetime : iso_date_from_unix_time @ model . get ( ' time ' ) <nl> else <nl> _template = @ unknown_issue_template <nl> json = <nl> mmm a / admin / templates / cluster . html <nl> ppp b / admin / templates / cluster . html <nl> < h3 class = " title " > Metadata problem ( vclock ) . < / h3 > <nl> < hr / > <nl> < / div > <nl> < / script > <nl> - < script id = " resolve_issues - pinnings_shards_mismatch - template " type = " text / x - handlebars - template " > <nl> - < div class = " issue { { # if critical } } critical { { / if } } " > <nl> - < div class = " header " > <nl> - < h3 class = " title " > The current machine assignment conflicts with the sharding scheme for the namespace < a href = " # namespaces / { { namespace_uuid } } " > { { namespace_name } } < / a > . < / h3 > <nl> - { { # if datetime } } <nl> - < p class = " datetime " > < abbr class = " timeago " title = " { { datetime } } " > on { { prettify_date datetime } } < / abbr > < / p > <nl> - { { / if } } <nl> - < / div > <nl> - < hr / > <nl> - < / div > <nl> - < / script > <nl> < script id = " resolve_issues - unknown - template " type = " text / x - handlebars - template " > <nl> < div class = " issue { { # if critical } } critical { { / if } } " > <nl> < div class = " header " > <nl> | Removing pinnings issue from the ui because it ' s no longer relevant as discussed in | rethinkdb/rethinkdb | 0515818e28144ff1d3b780a0513328e0b68c95a0 | 2012-04-04T03:02:32Z |
mmm a / stdlib / runtime / Stubs . cpp <nl> ppp b / stdlib / runtime / Stubs . cpp <nl> <nl> # include < cstdio > <nl> # include < cstdlib > <nl> # include < cstring > <nl> + # include < xlocale . h > <nl> # include " llvm / ADT / StringExtras . h " <nl> # include " Debug . h " <nl> <nl> extern " C " uint64_t swift_doubleToString ( char * Buffer , size_t BufferLength , <nl> if ( BufferLength < 32 ) <nl> swift : : crash ( " swift_doubleToString : insufficient buffer size " ) ; <nl> <nl> - int i = snprintf ( Buffer , BufferLength , " % 0 . 15g " , Value ) ; <nl> + static locale_t locale = newlocale ( LC_NUMERIC_MASK , " loc1 " , ( locale_t ) 0 ) ; <nl> + int i = snprintf_l ( Buffer , BufferLength , locale , " % 0 . 15g " , Value ) ; <nl> if ( i < 0 ) <nl> swift : : crash ( <nl> " swift_doubleToString : unexpected return value from sprintf " ) ; <nl> new file mode 100644 <nl> index 000000000000 . . 9790a3a64e73 <nl> mmm / dev / null <nl> ppp b / test / Interpreter / locale . swift <nl> <nl> + / / RUN : % target - run - simple - swift - sdk % sdk % s | FileCheck % s <nl> + / / REQUIRES : sdk <nl> + <nl> + import Darwin <nl> + <nl> + / / Ensure that printing of Double ' s is locale - insensitive . <nl> + / / CHECK : x = 123 . 4 <nl> + / / CHECK : y = 42 . 0 <nl> + setlocale ( LC_ALL , " ru_RU . UTF - 8 " ) <nl> + let x = 123 . 4 <nl> + let y = 42 . 0 <nl> + println ( " x = \ ( x ) " ) <nl> + println ( " y = \ ( y ) " ) <nl> | Make printing of double ' s locale - insensitive . | apple/swift | def1f53b7dc892c8cefbbc144a29c2662938e58c | 2014-05-16T17:43:54Z |
mmm a / src / inc / til . h <nl> ppp b / src / inc / til . h <nl> <nl> <nl> # pragma once <nl> <nl> + # include " til / at . h " <nl> # include " til / some . h " <nl> <nl> namespace til / / Terminal Implementation Library . Also : " Today I Learned " <nl> { <nl> - / / The at function declares that you ' ve already sufficiently checked that your array access <nl> - / / is in range before retrieving an item inside it at an offset . <nl> - / / This is to save double / triple / quadruple testing in circumstances where you are already <nl> - / / pivoting on the length of a set and now want to pull elements out of it by offset <nl> - / / without checking again . <nl> - / / gsl : : at will do the check again . As will . at ( ) . And using [ ] will have a warning in audit . <nl> - template < class T > <nl> - constexpr auto at ( T & cont , const size_t i ) - > decltype ( cont [ cont . size ( ) ] ) <nl> - { <nl> - # pragma warning ( suppress : 26482 ) / / Suppress bounds . 2 check for indexing with constant expressions <nl> - # pragma warning ( suppress : 26446 ) / / Suppress bounds . 4 check for subscript operator . <nl> - return cont [ i ] ; <nl> - } <nl> } <nl> <nl> / / These sit outside the namespace because they sit outside for WIL too . <nl> new file mode 100644 <nl> index 00000000000 . . 56001dfe879 <nl> mmm / dev / null <nl> ppp b / src / inc / til / at . h <nl> <nl> + / / Copyright ( c ) Microsoft Corporation . <nl> + / / Licensed under the MIT license . <nl> + <nl> + # pragma once <nl> + <nl> + namespace til <nl> + { <nl> + / / The at function declares that you ' ve already sufficiently checked that your array access <nl> + / / is in range before retrieving an item inside it at an offset . <nl> + / / This is to save double / triple / quadruple testing in circumstances where you are already <nl> + / / pivoting on the length of a set and now want to pull elements out of it by offset <nl> + / / without checking again . <nl> + / / gsl : : at will do the check again . As will . at ( ) . And using [ ] will have a warning in audit . <nl> + template < class T > <nl> + constexpr auto at ( T & cont , const size_t i ) - > decltype ( cont [ cont . size ( ) ] ) <nl> + { <nl> + # pragma warning ( suppress : 26482 ) / / Suppress bounds . 2 check for indexing with constant expressions <nl> + # pragma warning ( suppress : 26446 ) / / Suppress bounds . 4 check for subscript operator . <nl> + return cont [ i ] ; <nl> + } <nl> + } <nl> mmm a / src / inc / til / some . h <nl> ppp b / src / inc / til / some . h <nl> namespace til / / Terminal Implementation Library . Also : " Today I Learned " <nl> _outOfRange ( ) ; <nl> } <nl> <nl> - _array [ _used ] = val ; <nl> + til : : at ( _array , _used ) = val ; <nl> <nl> + + _used ; <nl> } <nl> namespace til / / Terminal Implementation Library . Also : " Today I Learned " <nl> <nl> - - _used ; <nl> <nl> - _array [ _used ] = 0 ; <nl> + til : : at ( _array , _used ) = 0 ; <nl> } <nl> <nl> [ [ noreturn ] ] void _invalidArg ( ) const <nl> mmm a / src / types / inc / viewport . hpp <nl> ppp b / src / types / inc / viewport . hpp <nl> Author ( s ) : <nl> <nl> namespace Microsoft : : Console : : Types <nl> { <nl> - struct SomeViewports ; <nl> + class Viewport ; <nl> + <nl> + using SomeViewports = til : : some < Viewport , 4 > ; <nl> <nl> class Viewport final <nl> { <nl> public : <nl> ~ Viewport ( ) { } <nl> + constexpr Viewport ( ) noexcept : <nl> + _sr ( { 0 , 0 , - 1 , - 1 } ) { } ; <nl> Viewport ( const Viewport & other ) noexcept ; <nl> Viewport ( Viewport & & ) = default ; <nl> Viewport & operator = ( const Viewport & ) & = default ; <nl> namespace Microsoft : : Console : : Types <nl> friend class ViewportTests ; <nl> # endif <nl> } ; <nl> - <nl> - struct SomeViewports final <nl> - { <nl> - unsigned char used { 0 } ; <nl> - std : : array < Viewport , 4 > viewports { Viewport : : Empty ( ) , Viewport : : Empty ( ) , Viewport : : Empty ( ) , Viewport : : Empty ( ) } ; <nl> - <nl> - / / These two methods are to make this vaguely look like a std : : vector . <nl> - <nl> - / / Size is the number of viewports that are valid inside this structure <nl> - size_t size ( ) const noexcept { return used ; } <nl> - <nl> - / / At retrieves a viewport at a particular index . If you retrieve beyond the valid size ( ) , <nl> - / / it will throw std : : out_of_range <nl> - const Viewport & at ( size_t index ) const <nl> - { <nl> - if ( index > = used ) <nl> - { <nl> - throw std : : out_of_range ( " Access attempted beyond valid size . " ) ; <nl> - } <nl> - return viewports . at ( index ) ; <nl> - } <nl> - } ; <nl> } <nl> <nl> inline COORD operator - ( const COORD & a , const COORD & b ) noexcept <nl> mmm a / src / types / viewport . cpp <nl> ppp b / src / types / viewport . cpp <nl> Viewport : : Viewport ( const Viewport & other ) noexcept : <nl> <nl> Viewport Viewport : : Empty ( ) noexcept <nl> { <nl> - return Viewport ( { 0 , 0 , - 1 , - 1 } ) ; <nl> + return Viewport ( ) ; <nl> } <nl> <nl> Viewport Viewport : : FromInclusive ( const SMALL_RECT sr ) noexcept <nl> Viewport Viewport : : ToOrigin ( ) const noexcept <nl> / / that was covered by ` main ` before the regional area of ` removeMe ` was taken out . <nl> / / - You must check that each viewport . IsValid ( ) before using it . <nl> [ [ nodiscard ] ] SomeViewports Viewport : : Subtract ( const Viewport & original , const Viewport & removeMe ) noexcept <nl> + try <nl> { <nl> SomeViewports result ; <nl> <nl> Viewport Viewport : : ToOrigin ( ) const noexcept <nl> if ( ! intersection . IsValid ( ) ) <nl> { <nl> / / Just put the original rectangle into the results and return early . <nl> - result . viewports . at ( result . used + + ) = original ; <nl> + result . push_back ( original ) ; <nl> } <nl> / / If the original rectangle matches the intersection , there is nothing to return . <nl> else if ( original ! = intersection ) <nl> Viewport Viewport : : ToOrigin ( ) const noexcept <nl> <nl> if ( top . IsValid ( ) ) <nl> { <nl> - result . viewports . at ( result . used + + ) = top ; <nl> + result . push_back ( top ) ; <nl> } <nl> <nl> if ( bottom . IsValid ( ) ) <nl> { <nl> - result . viewports . at ( result . used + + ) = bottom ; <nl> + result . push_back ( bottom ) ; <nl> } <nl> <nl> if ( left . IsValid ( ) ) <nl> { <nl> - result . viewports . at ( result . used + + ) = left ; <nl> + result . push_back ( left ) ; <nl> } <nl> <nl> if ( right . IsValid ( ) ) <nl> { <nl> - result . viewports . at ( result . used + + ) = right ; <nl> + result . push_back ( right ) ; <nl> } <nl> } <nl> <nl> return result ; <nl> } <nl> + CATCH_FAIL_FAST ( ) <nl> <nl> / / Method Description : <nl> / / - Returns true if the rectangle described by this Viewport has internal space <nl> | Use til : : some < T , N > to replace the SomeViewports class ( ) | microsoft/terminal | 69f307041701c2ec140d542a6ba2498770e01484 | 2020-01-20T20:53:24Z |
mmm a / system / include / libc / math . h <nl> ppp b / system / include / libc / math . h <nl> extern int __signbitd ( double x ) ; <nl> # define isinf ( y ) ( fpclassify ( y ) = = FP_INFINITE ) <nl> # endif <nl> <nl> + # ifndef isinff <nl> + # define isinff isinf <nl> + # endif <nl> + <nl> # ifndef isnan <nl> # define isnan ( y ) ( fpclassify ( y ) = = FP_NAN ) <nl> # endif <nl> <nl> + # ifndef isnanf <nl> + # define isnanf isnan <nl> + # endif <nl> + <nl> # define isnormal ( y ) ( fpclassify ( y ) = = FP_NORMAL ) <nl> # define signbit ( __x ) \ <nl> ( ( sizeof ( __x ) = = sizeof ( float ) ) ? __signbitf ( __x ) : \ <nl> | Merge pull request from yukoba / isinff_isnanf | emscripten-core/emscripten | 0fb0ee563b287d022877a30e265bcc96687d628f | 2013-07-30T00:56:43Z |
mmm a / Common / BestGpu . cpp <nl> ppp b / Common / BestGpu . cpp <nl> PfnDliHook __pfnDliNotifyHook2 = ( PfnDliHook ) DelayLoadNofify ; <nl> / / This is the failure hook , dliNotify = { dliFailLoadLib | dliFailGetProc } <nl> ExternC <nl> PfnDliHook __pfnDliFailureHook2 = ( PfnDliHook ) DelayLoadNofify ; <nl> - } } } <nl> # endif / / _WIN32 <nl> + } } } <nl> <nl> # endif / / CPUONLY <nl> | Fix linux builds . | microsoft/CNTK | 5c351daee347b533e59a46948eef95e3b77f2850 | 2015-07-13T21:51:03Z |
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> message ( STATUS " Assertions : $ { LLVM_ENABLE_ASSERTIONS } " ) <nl> message ( STATUS " LTO : $ { SWIFT_TOOLS_ENABLE_LTO } " ) <nl> message ( STATUS " " ) <nl> <nl> - message ( STATUS " Building Swift standard library and SDK overlays for SDKs : $ { SWIFT_SDKS } " ) <nl> - message ( STATUS " Build type : $ { SWIFT_STDLIB_BUILD_TYPE } " ) <nl> - message ( STATUS " Assertions : $ { SWIFT_STDLIB_ASSERTIONS } " ) <nl> - message ( STATUS " " ) <nl> + if ( SWIFT_BULID_STDLIB OR SWIFT_BUILD_SDK_OVERLAY ) <nl> <nl> - message ( STATUS " Building Swift runtime with : " ) <nl> - message ( STATUS " Leak Detection Checker Entrypoints : $ { SWIFT_RUNTIME_ENABLE_LEAK_CHECKER } " ) <nl> - message ( STATUS " " ) <nl> + message ( STATUS " Building Swift standard library and overlays for SDKs : $ { SWIFT_SDKS } " ) <nl> + message ( STATUS " Build type : $ { SWIFT_STDLIB_BUILD_TYPE } " ) <nl> + message ( STATUS " Assertions : $ { SWIFT_STDLIB_ASSERTIONS } " ) <nl> + message ( STATUS " " ) <nl> + <nl> + message ( STATUS " Building Swift runtime with : " ) <nl> + message ( STATUS " Leak Detection Checker Entrypoints : $ { SWIFT_RUNTIME_ENABLE_LEAK_CHECKER } " ) <nl> + message ( STATUS " " ) <nl> + <nl> + else ( ) <nl> + <nl> + message ( STATUS " Not building Swift standard library , SDK overlays , and runtime " ) <nl> + message ( STATUS " " ) <nl> + <nl> + endif ( ) <nl> <nl> # <nl> # Find required dependencies . <nl> | Merge pull request from mnvr / SR - 5817 | apple/swift | 9f92034f3bd99a1940e2a2ff3c4dc9fcb3871a4a | 2017-10-02T19:30:49Z |
mmm a / modules / planning / common / speed_profile_generator . cc <nl> ppp b / modules / planning / common / speed_profile_generator . cc <nl> using common : : math : : Vec2d ; <nl> <nl> std : : vector < SpeedPoint > SpeedProfileGenerator : : GenerateInitSpeedProfile ( <nl> const TrajectoryPoint & planning_init_point , <nl> - const ReferenceLineInfo * reference_line_info ) const { <nl> + const ReferenceLineInfo * reference_line_info ) { <nl> std : : vector < SpeedPoint > speed_profile ; <nl> const auto * last_frame = FrameHistory : : Instance ( ) - > Latest ( ) ; <nl> if ( ! last_frame ) { <nl> std : : vector < SpeedPoint > SpeedProfileGenerator : : GenerateInitSpeedProfile ( <nl> / / a dummy simple hot start <nl> / / TODO ( All ) : refine the hotstart speed profile <nl> std : : vector < SpeedPoint > SpeedProfileGenerator : : GenerateSpeedHotStart ( <nl> - const TrajectoryPoint & planning_init_point ) const { <nl> + const TrajectoryPoint & planning_init_point ) { <nl> std : : vector < SpeedPoint > hot_start_speed_profile ; <nl> double s = 0 . 0 ; <nl> double t = 0 . 0 ; <nl> SpeedData SpeedProfileGenerator : : GenerateFallbackSpeedProfile ( ) { <nl> return GenerateStopProfile ( init_v , init_a ) ; <nl> } <nl> <nl> - SpeedData SpeedProfileGenerator : : GenerateStopProfile ( <nl> - const double init_speed , const double init_acc ) const { <nl> + SpeedData SpeedProfileGenerator : : GenerateStopProfile ( const double init_speed , <nl> + const double init_acc ) { <nl> AERROR < < " Using fallback stopping profile : Slowing down the car ! " ; <nl> SpeedData speed_data ; <nl> <nl> SpeedData SpeedProfileGenerator : : GenerateStopProfile ( <nl> } <nl> <nl> SpeedData SpeedProfileGenerator : : GenerateStopProfileFromPolynomial ( <nl> - const double init_speed , const double init_acc ) const { <nl> + const double init_speed , const double init_acc ) { <nl> AERROR < < " Slowing down the car with polynomial . " ; <nl> constexpr double kMaxT = 4 . 0 ; <nl> for ( double t = 2 . 0 ; t < = kMaxT ; t + = 0 . 5 ) { <nl> SpeedData SpeedProfileGenerator : : GenerateStopProfileFromPolynomial ( <nl> } <nl> <nl> bool SpeedProfileGenerator : : IsValidProfile ( <nl> - const QuinticPolynomialCurve1d & curve ) const { <nl> + const QuinticPolynomialCurve1d & curve ) { <nl> for ( double evaluate_t = 0 . 1 ; evaluate_t < = curve . ParamLength ( ) ; <nl> evaluate_t + = 0 . 2 ) { <nl> const double v = curve . Evaluate ( 1 , evaluate_t ) ; <nl> mmm a / modules / planning / common / speed_profile_generator . h <nl> ppp b / modules / planning / common / speed_profile_generator . h <nl> namespace planning { <nl> <nl> class SpeedProfileGenerator { <nl> public : <nl> - SpeedProfileGenerator ( ) = default ; <nl> - ~ SpeedProfileGenerator ( ) = default ; <nl> + SpeedProfileGenerator ( ) = delete ; <nl> + ~ SpeedProfileGenerator ( ) = delete ; <nl> <nl> - std : : vector < common : : SpeedPoint > GenerateInitSpeedProfile ( <nl> + static std : : vector < common : : SpeedPoint > GenerateInitSpeedProfile ( <nl> const common : : TrajectoryPoint & planning_init_point , <nl> - const ReferenceLineInfo * reference_line_info ) const ; <nl> + const ReferenceLineInfo * reference_line_info ) ; <nl> <nl> - std : : vector < common : : SpeedPoint > GenerateSpeedHotStart ( <nl> - const common : : TrajectoryPoint & planning_init_point ) const ; <nl> + static std : : vector < common : : SpeedPoint > GenerateSpeedHotStart ( <nl> + const common : : TrajectoryPoint & planning_init_point ) ; <nl> <nl> - SpeedData GenerateFallbackSpeedProfile ( ) ; <nl> + static SpeedData GenerateFallbackSpeedProfile ( ) ; <nl> <nl> static SpeedData GenerateFixedDistanceCreepProfile ( const double distance , <nl> const double max_speed ) ; <nl> class SpeedProfileGenerator { <nl> const double max_speed ) ; <nl> <nl> private : <nl> - SpeedData GenerateStopProfile ( const double init_speed , <nl> - const double init_acc ) const ; <nl> + static SpeedData GenerateStopProfile ( const double init_speed , <nl> + const double init_acc ) ; <nl> <nl> - SpeedData GenerateStopProfileFromPolynomial ( const double init_speed , <nl> - const double init_acc ) const ; <nl> + static SpeedData GenerateStopProfileFromPolynomial ( const double init_speed , <nl> + const double init_acc ) ; <nl> <nl> - bool IsValidProfile ( const QuinticPolynomialCurve1d & curve ) const ; <nl> + static bool IsValidProfile ( const QuinticPolynomialCurve1d & curve ) ; <nl> } ; <nl> <nl> } / / namespace planning <nl> mmm a / modules / planning / common / speed_profile_generator_test . cc <nl> ppp b / modules / planning / common / speed_profile_generator_test . cc <nl> <nl> namespace apollo { <nl> namespace planning { <nl> <nl> - class SpeedProfileGeneratorTest : public : : testing : : Test { <nl> - public : <nl> - virtual void SetUp ( ) { } <nl> - <nl> - protected : <nl> - SpeedProfileGenerator spg_ ; <nl> - } ; <nl> - <nl> - TEST_F ( SpeedProfileGeneratorTest , GenerateFallbackSpeedProfile ) { <nl> - auto speed_data = spg_ . GenerateFallbackSpeedProfile ( ) ; <nl> + TEST ( SpeedProfileGeneratorTest , GenerateFallbackSpeedProfile ) { <nl> + auto speed_data = SpeedProfileGenerator : : GenerateFallbackSpeedProfile ( ) ; <nl> EXPECT_FALSE ( speed_data . Empty ( ) ) ; <nl> <nl> common : : TrajectoryPoint adc_planning_point ; <nl> TEST_F ( SpeedProfileGeneratorTest , GenerateFallbackSpeedProfile ) { <nl> const std : : vector < const Obstacle * > obstacles ; <nl> <nl> EgoInfo : : Instance ( ) - > Update ( adc_planning_point , vs , obstacles ) ; <nl> - auto speed_data2 = spg_ . GenerateFallbackSpeedProfile ( ) ; <nl> + auto speed_data2 = SpeedProfileGenerator : : GenerateFallbackSpeedProfile ( ) ; <nl> EXPECT_FALSE ( speed_data2 . Empty ( ) ) ; <nl> } <nl> <nl> mmm a / modules / planning / scenarios / lane_follow / lane_follow_stage . cc <nl> ppp b / modules / planning / scenarios / lane_follow / lane_follow_stage . cc <nl> Status LaneFollowStage : : PlanOnReferenceLine ( <nl> } <nl> ADEBUG < < " planning start point : " < < planning_start_point . DebugString ( ) ; <nl> auto * heuristic_speed_data = reference_line_info - > mutable_speed_data ( ) ; <nl> - auto speed_profile = speed_profile_generator_ . GenerateInitSpeedProfile ( <nl> + auto speed_profile = SpeedProfileGenerator : : GenerateInitSpeedProfile ( <nl> planning_start_point , reference_line_info ) ; <nl> if ( speed_profile . empty ( ) ) { <nl> speed_profile = <nl> - speed_profile_generator_ . GenerateSpeedHotStart ( planning_start_point ) ; <nl> + SpeedProfileGenerator : : GenerateSpeedHotStart ( planning_start_point ) ; <nl> ADEBUG < < " Using dummy hot start for speed vector " ; <nl> } <nl> heuristic_speed_data - > set_speed_vector ( speed_profile ) ; <nl> Status LaneFollowStage : : PlanOnReferenceLine ( <nl> ADEBUG < < " Speed fallback . " ; <nl> <nl> * reference_line_info - > mutable_speed_data ( ) = <nl> - speed_profile_generator_ . GenerateFallbackSpeedProfile ( ) ; <nl> + SpeedProfileGenerator : : GenerateFallbackSpeedProfile ( ) ; <nl> reference_line_info - > AddCost ( kSpeedOptimizationFallbackCost ) ; <nl> reference_line_info - > set_trajectory_type ( ADCTrajectory : : SPEED_FALLBACK ) ; <nl> } <nl> mmm a / modules / planning / scenarios / lane_follow / lane_follow_stage . h <nl> ppp b / modules / planning / scenarios / lane_follow / lane_follow_stage . h <nl> class LaneFollowStage : public Stage { <nl> private : <nl> ScenarioConfig config_ ; <nl> std : : unique_ptr < Stage > stage_ ; <nl> - SpeedProfileGenerator speed_profile_generator_ ; <nl> } ; <nl> <nl> } / / namespace scenario <nl> | Planning : directly generate speed profiles without creating instances . | ApolloAuto/apollo | d2d199f2696a281fc719ac6acdccf5c78021706d | 2018-12-13T23:17:57Z |
mmm a / html5 / default / api / methods . js <nl> ppp b / html5 / default / api / methods . js <nl> <nl> / * * <nl> * @ fileOverview The api for invoking with " $ " prefix <nl> * / <nl> - import * as _ from ' . . / util ' <nl> + import { extend , typof } from ' . . / util ' <nl> <nl> / * * <nl> * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> import * as _ from ' . . / util ' <nl> * @ return { Vm } <nl> * / <nl> export function $ ( id ) { <nl> - _ . warn ( ' Vm # $ is deprecated , please use Vm # $ vm instead ' ) <nl> + console . warn ( ' [ JS Framework ] Vm # $ is deprecated , please use Vm # $ vm instead ' ) <nl> const info = this . _ids [ id ] <nl> if ( info ) { <nl> return info . vm <nl> export function $ renderThen ( fn ) { <nl> * @ param { number } offset <nl> * / <nl> export function $ scrollTo ( id , offset ) { <nl> - _ . warn ( ' Vm # $ scrollTo is deprecated , ' + <nl> + console . warn ( ' [ JS Framework ] Vm # $ scrollTo is deprecated , ' + <nl> ' please use " require ( \ ' @ weex - module / dom \ ' ) ' + <nl> ' . scrollTo ( el , options ) " instead ' ) <nl> const el = this . $ el ( id ) <nl> export function $ transition ( id , options , callback ) { <nl> * @ property { number } env . deviceHeight <nl> * / <nl> export function $ getConfig ( callback ) { <nl> - const config = _ . extend ( { <nl> + const config = extend ( { <nl> env : global . WXEnvironment | | { } <nl> } , this . _app . options ) <nl> - if ( _ . typof ( callback ) = = = ' function ' ) { <nl> - _ . warn ( ' the callback of Vm # $ getConfig ( callback ) is deprecated , ' + <nl> + if ( typof ( callback ) = = = ' function ' ) { <nl> + console . warn ( ' [ JS Framework ] the callback of Vm # $ getConfig ( callback ) is deprecated , ' + <nl> ' this api now can directly RETURN config info . ' ) <nl> callback ( config ) <nl> } <nl> export function $ getConfig ( callback ) { <nl> * @ param { Function } callback <nl> * / <nl> export function $ sendHttp ( params , callback ) { <nl> - _ . warn ( ' Vm # $ sendHttp is deprecated , ' + <nl> + console . warn ( ' [ JS Framework ] Vm # $ sendHttp is deprecated , ' + <nl> ' please use " require ( \ ' @ weex - module / stream \ ' ) ' + <nl> ' . sendHttp ( params , callback ) " instead ' ) <nl> const stream = this . _app . requireModule ( ' stream ' ) <nl> export function $ sendHttp ( params , callback ) { <nl> * @ param { string } url <nl> * / <nl> export function $ openURL ( url ) { <nl> - _ . warn ( ' Vm # $ openURL is deprecated , ' + <nl> + console . warn ( ' [ JS Framework ] Vm # $ openURL is deprecated , ' + <nl> ' please use " require ( \ ' @ weex - module / event \ ' ) ' + <nl> ' . openURL ( url ) " instead ' ) <nl> const event = this . _app . requireModule ( ' event ' ) <nl> export function $ openURL ( url ) { <nl> * @ param { string } title <nl> * / <nl> export function $ setTitle ( title ) { <nl> - _ . warn ( ' Vm # $ setTitle is deprecated , ' + <nl> + console . warn ( ' [ JS Framework ] Vm # $ setTitle is deprecated , ' + <nl> ' please use " require ( \ ' @ weex - module / pageInfo \ ' ) ' + <nl> ' . setTitle ( title ) " instead ' ) <nl> const pageInfo = this . _app . requireModule ( ' pageInfo ' ) <nl> export function $ setTitle ( title ) { <nl> * @ param { . . . * } the rest arguments <nl> * / <nl> export function $ call ( moduleName , methodName , . . . args ) { <nl> - _ . warn ( ' Vm # $ call is deprecated , ' + <nl> + console . warn ( ' [ JS Framework ] Vm # $ call is deprecated , ' + <nl> ' please use " require ( \ ' @ weex - module / moduleName \ ' ) " instead ' ) <nl> const module = this . _app . requireModule ( moduleName ) <nl> if ( module & & module [ methodName ] ) { <nl> mmm a / html5 / default / app / bundle . js <nl> ppp b / html5 / default / app / bundle . js <nl> <nl> * / <nl> <nl> import semver from ' semver ' <nl> - import * as _ from ' . . / util ' <nl> + import { typof , isPlainObject } from ' . . / util ' <nl> import Vm from ' . . / vm ' <nl> import * as downgrade from ' . / downgrade ' <nl> <nl> export function clearCommonModules ( ) { <nl> / / Notice : DO NOT use function define ( ) { } , <nl> / / it will cause error after builded by webpack <nl> export const define = function ( name , deps , factory ) { <nl> - _ . debug ( ' define a component ' , name ) <nl> + console . debug ( ` [ JS Framework ] define a component $ { name } ` ) <nl> <nl> - if ( _ . typof ( deps ) = = = ' function ' ) { <nl> + if ( typof ( deps ) = = = ' function ' ) { <nl> factory = deps <nl> deps = [ ] <nl> } <nl> export const define = function ( name , deps , factory ) { <nl> } <nl> <nl> export function bootstrap ( name , config , data ) { <nl> - _ . debug ( ` bootstrap for $ { name } ` ) <nl> + console . debug ( ` [ JS Framework ] bootstrap for $ { name } ` ) <nl> <nl> let cleanName <nl> <nl> export function bootstrap ( name , config , data ) { <nl> return new Error ( ` Wrong component name : $ { name } ` ) <nl> } <nl> <nl> - config = _ . isPlainObject ( config ) ? config : { } <nl> + config = isPlainObject ( config ) ? config : { } <nl> <nl> if ( typeof config . transformerVersion = = = ' string ' & & <nl> - typeof global . needTransformerVersion = = = ' string ' & & <nl> + typeof global . transformerVersion = = = ' string ' & & <nl> ! semver . satisfies ( config . transformerVersion , <nl> - global . needTransformerVersion ) ) { <nl> + global . transformerVersion ) ) { <nl> return new Error ( ` JS Bundle version : $ { config . transformerVersion } ` + <nl> - ` not compatible with $ { global . needTransformerVersion } ` ) <nl> + ` not compatible with $ { global . transformerVersion } ` ) <nl> } <nl> <nl> const _checkDowngrade = downgrade . check ( config . downgrade ) <nl> export function bootstrap ( name , config , data ) { <nl> * @ deprecated <nl> * / <nl> export function register ( type , options ) { <nl> - _ . warn ( ' Register is deprecated , please install lastest transformer . ' ) <nl> + console . warn ( ' [ JS Framework ] Register is deprecated , please install lastest transformer . ' ) <nl> this . registerComponent ( type , options ) <nl> } <nl> <nl> export function register ( type , options ) { <nl> * @ deprecated <nl> * / <nl> export function render ( type , data ) { <nl> - _ . warn ( ' Render is deprecated , please install lastest transformer . ' ) <nl> + console . warn ( ' [ JS Framework ] Render is deprecated , please install lastest transformer . ' ) <nl> return this . bootstrap ( type , { } , data ) <nl> } <nl> <nl> export function render ( type , data ) { <nl> * @ deprecated <nl> * / <nl> export function require ( type ) { <nl> - _ . warn ( ' Require is deprecated , please install lastest transformer . ' ) <nl> + console . warn ( ' [ JS Framework ] Require is deprecated , please install lastest transformer . ' ) <nl> return ( data ) = > { <nl> return this . bootstrap ( type , { } , data ) <nl> } <nl> mmm a / html5 / default / app / ctrl . js <nl> ppp b / html5 / default / app / ctrl . js <nl> <nl> * corresponded with the API of instance manager ( framework . js ) <nl> * / <nl> <nl> - import * as _ from ' . . / util ' <nl> + import { extend , bind } from ' . . / util ' <nl> <nl> export function updateActions ( ) { <nl> this . differ . flush ( ) <nl> export function updateActions ( ) { <nl> } <nl> <nl> export function init ( code , data ) { <nl> - _ . debug ( ' Intialize an instance with : \ n ' , code , data ) <nl> + console . debug ( ' [ JS Framework ] Intialize an instance with : \ n ' , data ) <nl> <nl> let result <nl> / / @ see : lib / app / bundle . js <nl> - const define = _ . bind ( this . define , this ) <nl> + const define = bind ( this . define , this ) <nl> const bootstrap = ( name , config , _data ) = > { <nl> result = this . bootstrap ( name , config , _data | | data ) <nl> this . updateActions ( ) <nl> this . doc . listener . createFinish ( ) <nl> - _ . debug ( ` After intialized an instance ( $ { this . id } ) ` ) <nl> + console . debug ( ` [ JS Framework ] After intialized an instance ( $ { this . id } ) ` ) <nl> } <nl> <nl> / / backward ( register / render ) <nl> - const register = _ . bind ( this . register , this ) <nl> + const register = bind ( this . register , this ) <nl> const render = ( name , _data ) = > { <nl> result = this . bootstrap ( name , { } , _data ) <nl> } <nl> export function init ( code , data ) { <nl> } <nl> <nl> export function destroy ( ) { <nl> - _ . debug ( ` Destory an instance ( $ { this . id } ) ` ) <nl> + console . debug ( ` [ JS Framework ] Destory an instance ( $ { this . id } ) ` ) <nl> <nl> this . id = ' ' <nl> this . options = null <nl> export function getRootElement ( ) { <nl> } <nl> <nl> export function fireEvent ( ref , type , e , domChanges ) { <nl> - _ . debug ( ` Fire a " $ { type } " event on an element ( $ { ref } ) in instance ( $ { this . id } ) ` ) <nl> + console . debug ( ` [ JS Framework ] Fire a " $ { type } " event on an element ( $ { ref } ) in instance ( $ { this . id } ) ` ) <nl> if ( Array . isArray ( ref ) ) { <nl> ref . some ( ( ref ) = > { <nl> return this . fireEvent ( ref , type , e ) ! = = false <nl> export function fireEvent ( ref , type , e , domChanges ) { <nl> } <nl> <nl> export function callback ( callbackId , data , ifKeepAlive ) { <nl> - _ . debug ( ` Invoke a callback ( $ { callbackId } ) with ` , data , <nl> + console . debug ( ` [ JS Framework ] Invoke a callback ( $ { callbackId } ) with ` , data , <nl> ` in instance ( $ { this . id } ) ` ) <nl> <nl> const callback = this . callbacks [ callbackId ] <nl> export function callback ( callbackId , data , ifKeepAlive ) { <nl> } <nl> <nl> export function refreshData ( data ) { <nl> - _ . debug ( ` Refresh with ` , data , <nl> + console . debug ( ` [ JS Framework ] Refresh with ` , data , <nl> ` in instance [ $ { this . id } ] ` ) <nl> <nl> const vm = this . vm <nl> export function refreshData ( data ) { <nl> vm . refreshData ( data ) <nl> } <nl> else { <nl> - _ . extend ( vm , data ) <nl> + extend ( vm , data ) <nl> } <nl> this . updateActions ( ) <nl> this . doc . listener . refreshFinish ( ) <nl> mmm a / html5 / default / app / register . js <nl> ppp b / html5 / default / app / register . js <nl> export function requireComponent ( name ) { <nl> / * * <nl> * @ context a instance of AppInstance <nl> * / <nl> - export function registerComponent ( name , exports ) { <nl> + export function registerComponent ( name , def ) { <nl> const { customComponentMap } = this <nl> <nl> if ( customComponentMap [ name ] ) { <nl> - throw new Error ( ` define a component ( $ { name } ) that already exists ` ) <nl> + console . error ( ` [ JS Framework ] define a component ( $ { name } ) that already exists ` ) <nl> + return <nl> } <nl> <nl> - customComponentMap [ name ] = exports <nl> + customComponentMap [ name ] = def <nl> } <nl> mmm a / html5 / default / config . js <nl> ppp b / html5 / default / config . js <nl> export default { <nl> type : ' cell ' , <nl> append : ' tree ' <nl> } <nl> - } , <nl> - customComponentMap : { } , <nl> - debug : false <nl> + } <nl> } <nl> mmm a / html5 / default / core / observer . js <nl> ppp b / html5 / default / core / observer . js <nl> import { arrayMethods } from ' . / array ' <nl> import { <nl> def , <nl> remove , <nl> - isArray , <nl> isObject , <nl> isPlainObject , <nl> hasProto , <nl> export function Observer ( value ) { <nl> this . value = value <nl> this . dep = new Dep ( ) <nl> def ( value , ' __ob__ ' , this ) <nl> - if ( isArray ( value ) ) { <nl> + if ( Array . isArray ( value ) ) { <nl> const augment = hasProto <nl> ? protoAugment <nl> : copyAugment <nl> export function observe ( value , vm ) { <nl> if ( hasOwn ( value , ' __ob__ ' ) & & value . __ob__ instanceof Observer ) { <nl> ob = value . __ob__ <nl> } else if ( <nl> - ( isArray ( value ) | | isPlainObject ( value ) ) & & <nl> + ( Array . isArray ( value ) | | isPlainObject ( value ) ) & & <nl> Object . isExtensible ( value ) & & <nl> ! value . _isVue <nl> ) { <nl> export function defineReactive ( obj , key , val ) { <nl> if ( childOb ) { <nl> childOb . dep . depend ( ) <nl> } <nl> - if ( isArray ( value ) ) { <nl> + if ( Array . isArray ( value ) ) { <nl> for ( let e , i = 0 , l = value . length ; i < l ; i + + ) { <nl> e = value [ i ] <nl> e & & e . __ob__ & & e . __ob__ . dep . depend ( ) <nl> export function defineReactive ( obj , key , val ) { <nl> * / <nl> <nl> export function set ( obj , key , val ) { <nl> - if ( isArray ( obj ) ) { <nl> + if ( Array . isArray ( obj ) ) { <nl> return obj . splice ( key , 1 , val ) <nl> } <nl> if ( hasOwn ( obj , key ) ) { <nl> mmm a / html5 / default / core / state . js <nl> ppp b / html5 / default / core / state . js <nl> import { <nl> unproxy <nl> } from ' . / observer ' <nl> import { <nl> - isArray , <nl> isPlainObject , <nl> bind <nl> } from ' . . / util ' <nl> mmm a / html5 / default / core / watcher . js <nl> ppp b / html5 / default / core / watcher . js <nl> import { <nl> warn , <nl> remove , <nl> extend , <nl> - isArray , <nl> isObject , <nl> parsePath , <nl> _Set as Set <nl> function traverse ( val , seen ) { <nl> seen = seenObjects <nl> seen . clear ( ) <nl> } <nl> - isA = isArray ( val ) <nl> + isA = Array . isArray ( val ) <nl> isO = isObject ( val ) <nl> if ( isA | | isO ) { <nl> if ( val . __ob__ ) { <nl> mmm a / html5 / default / index . js <nl> ppp b / html5 / default / index . js <nl> export function createInstance ( instanceId , code , options , data ) { <nl> let instance = instanceMap [ instanceId ] <nl> options = options | | { } <nl> <nl> - config . debug = options . debug <nl> - <nl> let result <nl> if ( ! instance ) { <nl> instance = new AppInstance ( instanceId , options ) <nl> export function registerMethods ( apis ) { <nl> Vm . registerMethods ( apis ) <nl> } <nl> } <nl> + global . registerMethods = registerMethods <nl> <nl> / * * <nl> * get a whole element tree of an instance <nl> const jsHandlers = { <nl> * @ param { string } instanceId <nl> * @ param { array } tasks list with ` method ` and ` args ` <nl> * / <nl> - export function callJS ( instanceId , tasks ) { <nl> + export function receiveTasks ( instanceId , tasks ) { <nl> const instance = instanceMap [ instanceId ] <nl> if ( instance & & Array . isArray ( tasks ) ) { <nl> const results = [ ] <nl> mmm a / html5 / default / util / index . js <nl> ppp b / html5 / default / util / index . js <nl> <nl> - / * global MutationObserver * / <nl> - <nl> - / / / lang . js <nl> + export { <nl> + extend , <nl> + def , <nl> + remove , <nl> + hasOwn , <nl> + bind , <nl> + toArray , <nl> + isObject , <nl> + isPlainObject <nl> + } from ' . . / . . / shared ' <nl> <nl> / * * <nl> * Check if a string starts with $ or _ <nl> export function isReserved ( str ) { <nl> return c = = = 0x24 | | c = = = 0x5F <nl> } <nl> <nl> - / * * <nl> - * Define a property . <nl> - * <nl> - * @ param { Object } obj <nl> - * @ param { String } key <nl> - * @ param { * } val <nl> - * @ param { Boolean } [ enumerable ] <nl> - * / <nl> - <nl> - export function def ( obj , key , val , enumerable ) { <nl> - Object . defineProperty ( obj , key , { <nl> - value : val , <nl> - enumerable : ! ! enumerable , <nl> - writable : true , <nl> - configurable : true <nl> - } ) <nl> - } <nl> - <nl> - / / / env . js <nl> - <nl> / / can we use __proto__ ? <nl> export const hasProto = ' __proto__ ' in { } <nl> <nl> - / / Browser environment sniffing <nl> - export const inBrowser = <nl> - typeof window ! = = ' undefined ' & & <nl> - Object . prototype . toString . call ( window ) ! = = ' [ object Object ] ' <nl> - <nl> - / / detect devtools <nl> - export const devtools = inBrowser & & window . __VUE_DEVTOOLS_GLOBAL_HOOK__ <nl> - <nl> - / / UA sniffing for working around browser - specific quirks <nl> - const UA = inBrowser & & window . navigator . userAgent . toLowerCase ( ) <nl> - const isIos = UA & & / ( iphone | ipad | ipod | ios ) / i . test ( UA ) <nl> - const isWechat = UA & & UA . indexOf ( ' micromessenger ' ) > 0 <nl> - <nl> - / * * <nl> - * Defer a task to execute it asynchronously . Ideally this <nl> - * should be executed as a microtask , so we leverage <nl> - * MutationObserver if it ' s available , and fallback to <nl> - * setTimeout ( 0 ) . <nl> - * <nl> - * @ param { Function } cb <nl> - * @ param { Object } ctx <nl> - * / <nl> - <nl> - export const nextTick = ( function ( ) { <nl> - let callbacks = [ ] <nl> - let pending = false <nl> - let timerFunc <nl> - function nextTickHandler ( ) { <nl> - pending = false <nl> - const copies = callbacks . slice ( 0 ) <nl> - callbacks = [ ] <nl> - for ( let i = 0 ; i < copies . length ; i + + ) { <nl> - copies [ i ] ( ) <nl> - } <nl> - } <nl> - <nl> - / * istanbul ignore if * / <nl> - if ( typeof MutationObserver ! = = ' undefined ' & & ! ( isWechat & & isIos ) ) { <nl> - let counter = 1 <nl> - const observer = new MutationObserver ( nextTickHandler ) <nl> - const textNode = document . createTextNode ( counter ) <nl> - observer . observe ( textNode , { <nl> - characterData : true <nl> - } ) <nl> - timerFunc = function ( ) { <nl> - counter = ( counter + 1 ) % 2 <nl> - textNode . data = counter <nl> - } <nl> - } <nl> - else { <nl> - / / webpack attempts to inject a shim for setImmediate <nl> - / / if it is used as a global , so we have to work around that to <nl> - / / avoid bundling unnecessary code . <nl> - const context = inBrowser <nl> - ? window <nl> - : typeof global ! = = ' undefined ' ? global : { } <nl> - timerFunc = context . setImmediate | | setTimeout <nl> - } <nl> - return function ( cb , ctx ) { <nl> - const func = ctx <nl> - ? function ( ) { cb . call ( ctx ) } <nl> - : cb <nl> - callbacks . push ( func ) <nl> - if ( pending ) return <nl> - pending = true <nl> - timerFunc ( nextTickHandler , 0 ) <nl> - } <nl> - } ) ( ) <nl> - <nl> let _Set <nl> / * istanbul ignore if * / <nl> if ( typeof Set ! = = ' undefined ' & & Set . toString ( ) . match ( / native code / ) ) { <nl> else { <nl> <nl> export { _Set } <nl> <nl> - / / / shared <nl> - <nl> - / * * <nl> - * Remove an item from an array <nl> - * <nl> - * @ param { Array } arr <nl> - * @ param { * } item <nl> - * / <nl> - <nl> - export function remove ( arr , item ) { <nl> - if ( arr . length ) { <nl> - const index = arr . indexOf ( item ) <nl> - if ( index > - 1 ) { <nl> - return arr . splice ( index , 1 ) <nl> - } <nl> - } <nl> - } <nl> - <nl> - / * * <nl> - * Check whether the object has the property . <nl> - * <nl> - * @ param { Object } obj <nl> - * @ param { String } key <nl> - * @ return { Boolean } <nl> - * / <nl> - const hasOwnProperty = Object . prototype . hasOwnProperty <nl> - export function hasOwn ( obj , key ) { <nl> - return hasOwnProperty . call ( obj , key ) <nl> - } <nl> - <nl> / * * <nl> * Create a cached version of a pure function . <nl> * <nl> export const hyphenate = cached ( str = > { <nl> . toLowerCase ( ) <nl> } ) <nl> <nl> - / * * <nl> - * Simple bind , faster than native <nl> - * <nl> - * @ param { Function } fn <nl> - * @ param { Object } ctx <nl> - * @ return { Function } <nl> - * / <nl> - <nl> - export function bind ( fn , ctx ) { <nl> - return function ( a ) { <nl> - const l = arguments . length <nl> - return l <nl> - ? l > 1 <nl> - ? fn . apply ( ctx , arguments ) <nl> - : fn . call ( ctx , a ) <nl> - : fn . call ( ctx ) <nl> - } <nl> - } <nl> - <nl> - / * * <nl> - * Convert an Array - like object to a real Array . <nl> - * <nl> - * @ param { Array - like } list <nl> - * @ param { Number } [ start ] - start index <nl> - * @ return { Array } <nl> - * / <nl> - <nl> - export function toArray ( list , start ) { <nl> - start = start | | 0 <nl> - let i = list . length - start <nl> - const ret = new Array ( i ) <nl> - while ( i - - ) { <nl> - ret [ i ] = list [ i + start ] <nl> - } <nl> - return ret <nl> - } <nl> - <nl> - / * * <nl> - * Mix properties into target object . <nl> - * <nl> - * @ param { Object } to <nl> - * @ param { Object } from <nl> - * / <nl> - <nl> - export function extend ( target , . . . src ) { <nl> - if ( typeof Object . assign = = = ' function ' ) { <nl> - Object . assign ( target , . . . src ) <nl> - } <nl> - else { <nl> - const first = src . shift ( ) <nl> - for ( const key in first ) { <nl> - target [ key ] = first [ key ] <nl> - } <nl> - if ( src . length ) { <nl> - extend ( target , . . . src ) <nl> - } <nl> - } <nl> - return target <nl> - } <nl> - <nl> - / * * <nl> - * Quick object check - this is primarily used to tell <nl> - * Objects from primitive values when we know the value <nl> - * is a JSON - compliant type . <nl> - * <nl> - * @ param { * } obj <nl> - * @ return { Boolean } <nl> - * / <nl> - <nl> - export function isObject ( obj ) { <nl> - return obj ! = = null & & typeof obj = = = ' object ' <nl> - } <nl> - <nl> - / * * <nl> - * Strict object type check . Only returns true <nl> - * for plain JavaScript objects . <nl> - * <nl> - * @ param { * } obj <nl> - * @ return { Boolean } <nl> - * / <nl> - <nl> - const toString = Object . prototype . toString <nl> - const OBJECT_STRING = ' [ object Object ] ' <nl> - export function isPlainObject ( obj ) { <nl> - return toString . call ( obj ) = = = OBJECT_STRING <nl> - } <nl> - <nl> - / * * <nl> - * Array type check . <nl> - * <nl> - * @ param { * } obj <nl> - * @ return { Boolean } <nl> - * / <nl> - <nl> - export const isArray = Array . isArray <nl> - <nl> - / / / other <nl> - <nl> - export function stringify ( x ) { <nl> - return typeof x = = = ' undefined ' | | x = = = null | | typeof ( x ) = = = ' function ' <nl> - ? ' ' <nl> - : typeof x = = = ' object ' <nl> - ? x instanceof RegExp <nl> - ? x . toString ( ) <nl> - : x instanceof Date <nl> - ? JSON . parse ( JSON . stringify ( x ) ) <nl> - : JSON . stringify ( x ) <nl> - : x . toString ( ) <nl> - } <nl> - <nl> export function typof ( v ) { <nl> const s = Object . prototype . toString . call ( v ) <nl> return s . substring ( 8 , s . length - 1 ) . toLowerCase ( ) <nl> } <nl> - <nl> - export function normalize ( v ) { <nl> - const type = typof ( v ) <nl> - <nl> - switch ( type ) { <nl> - case ' undefined ' : <nl> - case ' null ' : <nl> - return ' ' <nl> - case ' regexp ' : <nl> - return v . toString ( ) <nl> - case ' date ' : <nl> - return v . toISOString ( ) <nl> - case ' number ' : <nl> - case ' string ' : <nl> - case ' boolean ' : <nl> - case ' array ' : <nl> - case ' object ' : <nl> - case ' function ' : <nl> - return v <nl> - } <nl> - } <nl> - <nl> - const enableLog = typeof console ! = = ' undefined ' & & global . IS_PRODUCT ! = = true <nl> - <nl> - / * * <nl> - * @ param { String } msg <nl> - * / <nl> - export function error ( . . . args ) { <nl> - enableLog & & console . error & & console . error ( ' [ JS Framework ] ' , . . . args ) <nl> - } <nl> - <nl> - / * * <nl> - * @ param { String } msg <nl> - * / <nl> - export function warn ( . . . args ) { <nl> - enableLog & & console . warn & & console . warn ( ' [ JS Framework ] ' , . . . args ) <nl> - } <nl> - <nl> - / * * <nl> - * @ param { String } msg <nl> - * / <nl> - export function info ( . . . args ) { <nl> - enableLog & & console . info & & console . info ( ' [ JS Framework ] ' , . . . args ) <nl> - } <nl> - <nl> - / * * <nl> - * @ param { String } msg <nl> - * / <nl> - export function debug ( . . . args ) { <nl> - enableLog & & console . debug & & console . debug ( ' [ JS Framework ] ' , . . . args ) <nl> - } <nl> - <nl> - / * * <nl> - * @ param { String } msg <nl> - * / <nl> - export function log ( . . . args ) { <nl> - enableLog & & console . log & & console . log ( ' [ JS Framework ] ' , . . . args ) <nl> - } <nl> mmm a / html5 / default / vm / compiler . js <nl> ppp b / html5 / default / vm / compiler . js <nl> <nl> * events . js : $ on <nl> * / <nl> <nl> - import * as _ from ' . . / util ' <nl> + import { <nl> + extend , <nl> + bind <nl> + } from ' . . / util ' <nl> <nl> / * * <nl> * build ( externalDirs ) <nl> export function _build ( ) { <nl> this . _compile ( template , this . _parentEl ) <nl> } <nl> <nl> - _ . debug ( ` " ready " lifecycle in Vm ( $ { this . _type } ) ` ) <nl> + console . debug ( ` [ JS Framework ] " ready " lifecycle in Vm ( $ { this . _type } ) ` ) <nl> this . $ emit ( ' hook : ready ' ) <nl> this . _ready = true <nl> } <nl> export function _compile ( target , dest , meta ) { <nl> } <nl> meta = meta | | { } <nl> if ( context . _targetIsContent ( target ) ) { <nl> - _ . debug ( ' compile " content " block by ' , target ) <nl> + console . debug ( ' [ JS Framework ] compile " content " block by ' , target ) <nl> context . _content = context . _createBlock ( dest ) <nl> return <nl> } <nl> <nl> if ( context . _targetNeedCheckRepeat ( target , meta ) ) { <nl> - _ . debug ( ' compile " repeat " logic by ' , target ) <nl> + console . debug ( ' [ JS Framework ] compile " repeat " logic by ' , target ) <nl> context . _compileRepeat ( target , dest ) <nl> return <nl> } <nl> if ( context . _targetNeedCheckShown ( target , meta ) ) { <nl> - _ . debug ( ' compile " if " logic by ' , target ) <nl> + console . debug ( ' [ JS Framework ] compile " if " logic by ' , target ) <nl> context . _compileShown ( target , dest , meta ) <nl> return <nl> } <nl> export function _compile ( target , dest , meta ) { <nl> const type = typeGetter <nl> const component = context . _targetIsComposed ( target , type ) <nl> if ( component ) { <nl> - _ . debug ( ' compile composed component by ' , target ) <nl> + console . debug ( ' [ JS Framework ] compile composed component by ' , target ) <nl> context . _compileCustomComponent ( component , target , dest , type , meta ) <nl> return <nl> } <nl> - _ . debug ( ' compile native component by ' , target ) <nl> + console . debug ( ' [ JS Framework ] compile native component by ' , target ) <nl> context . _compileNativeComponent ( target , dest , type ) <nl> } <nl> <nl> export function _compileShown ( target , dest , meta ) { <nl> * / <nl> export function _compileType ( target , dest , typeGetter , meta ) { <nl> const type = typeGetter . call ( this ) <nl> - const newMeta = Object . assign ( { type } , meta ) <nl> + const newMeta = extend ( { type } , meta ) <nl> const fragBlock = this . _createBlock ( dest ) <nl> <nl> if ( dest . element & & dest . children ) { <nl> export function _compileType ( target , dest , typeGetter , meta ) { <nl> } <nl> <nl> this . _watch ( typeGetter , ( value ) = > { <nl> - const newMeta = Object . assign ( { type : value } , meta ) <nl> + const newMeta = extend ( { type : value } , meta ) <nl> this . _removeBlock ( fragBlock , true ) <nl> this . _compile ( target , fragBlock , newMeta ) <nl> } ) <nl> export function _compileNativeComponent ( template , dest , type ) { <nl> let element <nl> if ( dest . ref = = = ' _documentElement ' ) { <nl> / / if its parent is documentElement then it ' s a body <nl> - _ . debug ( ' compile to create body for ' , type ) <nl> + console . debug ( ` [ JS Framework ] compile to create body for $ { type } ` ) <nl> element = this . _createBody ( type ) <nl> } <nl> else { <nl> - _ . debug ( ' compile to create element for ' , type ) <nl> + console . debug ( ` [ JS Framework ] compile to create element for $ { type } ` ) <nl> element = this . _createElement ( type ) <nl> } <nl> <nl> export function _compileNativeComponent ( template , dest , type ) { <nl> for ( const type in target . events ) { <nl> const handler = vm [ target . events [ type ] ] <nl> if ( handler ) { <nl> - element . addEvent ( type , _ . bind ( handler , vm ) ) <nl> + element . addEvent ( type , bind ( handler , vm ) ) <nl> } <nl> } <nl> } <nl> export function _compileNativeComponent ( template , dest , type ) { <nl> const treeMode = template . append = = = ' tree ' <nl> const app = this . _app | | { } <nl> if ( app . lastSignal ! = = - 1 & & ! treeMode ) { <nl> - _ . debug ( ' compile to append single node for ' , element ) <nl> + console . debug ( ' [ JS Framework ] compile to append single node for ' , element ) <nl> app . lastSignal = this . _attachTarget ( element , dest ) <nl> } <nl> if ( app . lastSignal ! = = - 1 ) { <nl> this . _compileChildren ( template , element ) <nl> } <nl> if ( app . lastSignal ! = = - 1 & & treeMode ) { <nl> - _ . debug ( ' compile to append whole tree for ' , element ) <nl> + console . debug ( ' [ JS Framework ] compile to append whole tree for ' , element ) <nl> app . lastSignal = this . _attachTarget ( element , dest ) <nl> } <nl> } <nl> export function _bindRepeat ( target , fragBlock , info ) { <nl> if ( ! mergedData . hasOwnProperty ( ' INDEX ' ) ) { <nl> Object . defineProperty ( mergedData , ' INDEX ' , { <nl> value : ( ) = > { <nl> - _ . warn ( ' " INDEX " in repeat is deprecated , ' + <nl> - ' please use " $ index " instead ' ) <nl> + console . warn ( ' [ JS Framework ] " INDEX " in repeat is deprecated , ' + <nl> + ' please use " $ index " instead ' ) <nl> } <nl> } ) <nl> } <nl> export function _bindRepeat ( target , fragBlock , info ) { <nl> <nl> const list = this . _watchBlock ( fragBlock , getter , ' repeat ' , <nl> ( data ) = > { <nl> - _ . debug ( ' the " repeat " item has changed ' , data ) <nl> + console . debug ( ' [ JS Framework ] the " repeat " item has changed ' , data ) <nl> if ( ! fragBlock ) { <nl> return <nl> } <nl> export function _bindRepeat ( target , fragBlock , info ) { <nl> export function _bindShown ( target , fragBlock , meta ) { <nl> const display = this . _watchBlock ( fragBlock , target . shown , ' shown ' , <nl> ( display ) = > { <nl> - _ . debug ( ' the " if " item was changed ' , display ) <nl> + console . debug ( ' [ JS Framework ] the " if " item was changed ' , display ) <nl> <nl> if ( ! fragBlock | | ! ! fragBlock . display = = = ! ! display ) { <nl> return <nl> mmm a / html5 / default / vm / directive . js <nl> ppp b / html5 / default / vm / directive . js <nl> <nl> * Directive Parser <nl> * / <nl> <nl> - import * as _ from ' . . / util ' <nl> + import { bind , typof } from ' . . / util ' <nl> <nl> import Watcher from ' . . / core / watcher ' <nl> import config from ' . . / config ' <nl> export function _applyNaitveComponentOptions ( template ) { <nl> if ( template [ key ] = = null ) { <nl> template [ key ] = options [ key ] <nl> } <nl> - else if ( _ . typof ( template [ key ] ) = = = ' object ' & & <nl> - _ . typof ( options [ key ] ) = = = ' object ' ) { <nl> + else if ( typof ( template [ key ] ) = = = ' object ' & & <nl> + typof ( options [ key ] ) = = = ' object ' ) { <nl> for ( const subkey in options [ key ] ) { <nl> if ( template [ key ] [ subkey ] = = null ) { <nl> template [ key ] [ subkey ] = options [ key ] [ subkey ] <nl> export function _setStyle ( el , style ) { <nl> * add an event type and handler to an element and generate a dom update <nl> * / <nl> export function _setEvent ( el , type , handler ) { <nl> - el . addEvent ( type , _ . bind ( handler , this ) ) <nl> + el . addEvent ( type , bind ( handler , this ) ) <nl> } <nl> <nl> / * * <nl> export function _bindEvents ( el , events ) { <nl> handler = this [ handler ] <nl> / * istanbul ignore if * / <nl> if ( ! handler ) { <nl> - _ . error ( ` The method " $ { handler } " is not defined . ` ) <nl> + console . debug ( ` [ JS Framework ] The method " $ { handler } " is not defined . ` ) <nl> } <nl> } <nl> this . _setEvent ( el , key , handler ) <nl> mmm a / html5 / default / vm / index . js <nl> ppp b / html5 / default / vm / index . js <nl> <nl> * ViewModel Constructor & definition <nl> * / <nl> <nl> - import * as _ from ' . . / util ' <nl> + import { extend } from ' . . / util ' <nl> import * as state from ' . . / core / state ' <nl> import * as compiler from ' . / compiler ' <nl> import * as directive from ' . / directive ' <nl> import { registerModules , registerMethods } from ' . . / app / register ' <nl> function callOldReadyEntry ( vm , component ) { <nl> if ( component . methods & & <nl> component . methods . ready ) { <nl> - _ . warn ( ' " exports . methods . ready " is deprecated , ' + <nl> + console . warn ( ' [ JS Framework ] " exports . methods . ready " is deprecated , ' + <nl> ' please use " exports . created " instead ' ) <nl> component . methods . ready . call ( vm ) <nl> } <nl> export default function Vm ( <nl> / / bind events and lifecycles <nl> this . _initEvents ( externalEvents ) <nl> <nl> - _ . debug ( ` " init " lifecycle in Vm ( $ { this . _type } ) ` ) <nl> + console . debug ( ` [ JS Framework ] " init " lifecycle in Vm ( $ { this . _type } ) ` ) <nl> this . $ emit ( ' hook : init ' ) <nl> this . _inited = true <nl> / / proxy data and methods <nl> / / observe data and add this to vms <nl> this . _data = typeof data = = = ' function ' ? data ( ) : data <nl> if ( mergedData ) { <nl> - _ . extend ( this . _data , mergedData ) <nl> + extend ( this . _data , mergedData ) <nl> } <nl> this . _initState ( ) <nl> <nl> - _ . debug ( ` " created " lifecycle in Vm ( $ { this . _type } ) ` ) <nl> + console . debug ( ` [ JS Framework ] " created " lifecycle in Vm ( $ { this . _type } ) ` ) <nl> this . $ emit ( ' hook : created ' ) <nl> this . _created = true <nl> / / backward old ready entry <nl> export default function Vm ( <nl> this . _build ( ) <nl> } <nl> <nl> - _ . extend ( Vm . prototype , state , compiler , directive , domHelper , events ) <nl> - _ . extend ( Vm , { <nl> + extend ( Vm . prototype , state , compiler , directive , domHelper , events ) <nl> + extend ( Vm , { <nl> registerModules , <nl> registerMethods <nl> } ) <nl> mmm a / html5 / native / index . js <nl> ppp b / html5 / native / index . js <nl> <nl> import ' . . / shared ' <nl> import runtime from ' . . / runtime ' <nl> import { subversion } from ' . . / . . / package . json ' <nl> + import * as methods from ' . . / default / api / methods ' <nl> <nl> const { native , transformer } = subversion <nl> <nl> + / / register instance management APIs <nl> for ( const methodName in runtime ) { <nl> - global [ methodName ] = function ( . . . args ) { <nl> + global [ methodName ] = ( . . . args ) = > { <nl> const ret = runtime [ methodName ] ( . . . args ) <nl> if ( ret instanceof Error ) { <nl> console . error ( ret . toString ( ) ) <nl> for ( const methodName in runtime ) { <nl> } <nl> } <nl> <nl> - Object . assign ( global , { <nl> - frameworkVersion : native , <nl> - needTransformerVersion : transformer <nl> - } ) <nl> + / / register framework meta info <nl> + global . frameworkVersion = native <nl> + global . transformerVersion = transformer <nl> <nl> - / * * <nl> - * register methods <nl> - * / <nl> - const methods = require ( ' . . / default / api / methods ' ) <nl> - const { registerMethods } = global <nl> - registerMethods ( methods ) <nl> + / / register special methods for Weex framework <nl> + global . registerMethods ( methods ) <nl> mmm a / html5 / runtime / index . js <nl> ppp b / html5 / runtime / index . js <nl> export function createInstance ( id , code , config , data ) { <nl> instanceMap [ id ] = info <nl> config = config | | { } <nl> config . bundleVersion = info . version <nl> + console . debug ( ` [ JS Framework ] create an $ { info . framework } @ $ { config . bundleVersion } instance from $ { config . bundleVersion } ` ) <nl> return frameworks [ info . framework ] . createInstance ( id , code , config , data ) <nl> } <nl> return new Error ( ` invalid instance id " $ { id } " ` ) <nl> function genInstance ( methodName ) { <nl> } <nl> } <nl> <nl> - [ ' destroyInstance ' , ' refreshInstance ' , ' callJS ' , ' getRoot ' ] . forEach ( genInstance ) <nl> + [ ' destroyInstance ' , ' refreshInstance ' , ' receiveTasks ' , ' getRoot ' ] . forEach ( genInstance ) <nl> <nl> - methods . receiveTasks = methods . callJS <nl> + function adaptInstance ( methodName , nativeMethodName ) { <nl> + methods [ nativeMethodName ] = function ( . . . args ) { <nl> + const id = args [ 0 ] <nl> + const info = instanceMap [ id ] <nl> + if ( info & & frameworks [ info . framework ] ) { <nl> + return frameworks [ info . framework ] [ methodName ] ( . . . args ) <nl> + } <nl> + return new Error ( ` invalid instance id " $ { id } " ` ) <nl> + } <nl> + } <nl> + <nl> + adaptInstance ( ' receiveTasks ' , ' callJS ' ) <nl> <nl> export default methods <nl> mmm a / html5 / shared / console . js <nl> ppp b / html5 / shared / console . js <nl> function generateLevelMap ( ) { <nl> } ) <nl> } <nl> <nl> - function normalize ( v ) { <nl> - const type = Object . prototype . toString . call ( v ) <nl> - if ( type . toLowerCase ( ) = = = ' [ object object ] ' ) { <nl> - v = JSON . stringify ( v ) <nl> - } <nl> - else { <nl> - v = String ( v ) <nl> - } <nl> - return v <nl> - } <nl> - <nl> function checkLevel ( type ) { <nl> const logLevel = ( global . WXEnvironment & & global . WXEnvironment . logLevel ) | | ' log ' <nl> return levelMap [ logLevel ] & & levelMap [ logLevel ] [ type ] <nl> } <nl> <nl> function format ( args ) { <nl> - return args . map ( v = > normalize ( v ) ) <nl> + return args . map ( ( v ) = > { <nl> + const type = Object . prototype . toString . call ( v ) <nl> + if ( type . toLowerCase ( ) = = = ' [ object object ] ' ) { <nl> + v = JSON . stringify ( v ) <nl> + } <nl> + else { <nl> + v = String ( v ) <nl> + } <nl> + return v <nl> + } ) <nl> } <nl> mmm a / html5 / shared / index . js <nl> ppp b / html5 / shared / index . js <nl> <nl> - import ' . / objectAssign ' <nl> import ' . / setTimeout ' <nl> import ' . / promise ' <nl> import ' . / console ' <nl> + <nl> + / * * <nl> + * Mix properties into target object . <nl> + * <nl> + * @ param { Object } to <nl> + * @ param { Object } from <nl> + * / <nl> + <nl> + export function extend ( target , . . . src ) { <nl> + if ( typeof Object . assign = = = ' function ' ) { <nl> + Object . assign ( target , . . . src ) <nl> + } <nl> + else { <nl> + const first = src . shift ( ) <nl> + for ( const key in first ) { <nl> + target [ key ] = first [ key ] <nl> + } <nl> + if ( src . length ) { <nl> + extend ( target , . . . src ) <nl> + } <nl> + } <nl> + return target <nl> + } <nl> + <nl> + / * * <nl> + * Define a property . <nl> + * <nl> + * @ param { Object } obj <nl> + * @ param { String } key <nl> + * @ param { * } val <nl> + * @ param { Boolean } [ enumerable ] <nl> + * / <nl> + <nl> + export function def ( obj , key , val , enumerable ) { <nl> + Object . defineProperty ( obj , key , { <nl> + value : val , <nl> + enumerable : ! ! enumerable , <nl> + writable : true , <nl> + configurable : true <nl> + } ) <nl> + } <nl> + <nl> + / * * <nl> + * Remove an item from an array <nl> + * <nl> + * @ param { Array } arr <nl> + * @ param { * } item <nl> + * / <nl> + <nl> + export function remove ( arr , item ) { <nl> + if ( arr . length ) { <nl> + const index = arr . indexOf ( item ) <nl> + if ( index > - 1 ) { <nl> + return arr . splice ( index , 1 ) <nl> + } <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Check whether the object has the property . <nl> + * <nl> + * @ param { Object } obj <nl> + * @ param { String } key <nl> + * @ return { Boolean } <nl> + * / <nl> + const hasOwnProperty = Object . prototype . hasOwnProperty <nl> + export function hasOwn ( obj , key ) { <nl> + return hasOwnProperty . call ( obj , key ) <nl> + } <nl> + <nl> + / * * <nl> + * Simple bind , faster than native <nl> + * <nl> + * @ param { Function } fn <nl> + * @ param { Object } ctx <nl> + * @ return { Function } <nl> + * / <nl> + <nl> + export function bind ( fn , ctx ) { <nl> + return function ( a ) { <nl> + const l = arguments . length <nl> + return l <nl> + ? l > 1 <nl> + ? fn . apply ( ctx , arguments ) <nl> + : fn . call ( ctx , a ) <nl> + : fn . call ( ctx ) <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Convert an Array - like object to a real Array . <nl> + * <nl> + * @ param { Array - like } list <nl> + * @ param { Number } [ start ] - start index <nl> + * @ return { Array } <nl> + * / <nl> + <nl> + export function toArray ( list , start ) { <nl> + start = start | | 0 <nl> + let i = list . length - start <nl> + const ret = new Array ( i ) <nl> + while ( i - - ) { <nl> + ret [ i ] = list [ i + start ] <nl> + } <nl> + return ret <nl> + } <nl> + <nl> + / * * <nl> + * Quick object check - this is primarily used to tell <nl> + * Objects from primitive values when we know the value <nl> + * is a JSON - compliant type . <nl> + * <nl> + * @ param { * } obj <nl> + * @ return { Boolean } <nl> + * / <nl> + <nl> + export function isObject ( obj ) { <nl> + return obj ! = = null & & typeof obj = = = ' object ' <nl> + } <nl> + <nl> + / * * <nl> + * Strict object type check . Only returns true <nl> + * for plain JavaScript objects . <nl> + * <nl> + * @ param { * } obj <nl> + * @ return { Boolean } <nl> + * / <nl> + <nl> + const toString = Object . prototype . toString <nl> + const OBJECT_STRING = ' [ object Object ] ' <nl> + export function isPlainObject ( obj ) { <nl> + return toString . call ( obj ) = = = OBJECT_STRING <nl> + } <nl> + <nl> mmm a / html5 / shared / setTimeout . js <nl> ppp b / html5 / shared / setTimeout . js <nl> <nl> const { <nl> - setTimeout , setTimeoutNative <nl> + setTimeout , <nl> + setTimeoutNative <nl> } = global <nl> <nl> - const MSG = ' Use " global . setTimeout " is unexpected , ' + <nl> - ' please use require ( " @ weex - module " ) . setTimeout instead . ' <nl> - <nl> / / fix no setTimeout on Android V8 <nl> / * istanbul ignore if * / <nl> if ( typeof setTimeout = = = ' undefined ' & & <nl> typeof setTimeoutNative = = = ' function ' ) { <nl> const timeoutMap = { } <nl> let timeoutId = 0 <nl> + <nl> global . setTimeout = ( cb , time ) = > { <nl> - console . warn ( MSG ) <nl> timeoutMap [ + + timeoutId ] = cb <nl> setTimeoutNative ( timeoutId . toString ( ) , time ) <nl> } <nl> + <nl> global . setTimeoutCallback = ( id ) = > { <nl> if ( typeof timeoutMap [ id ] = = = ' function ' ) { <nl> timeoutMap [ id ] ( ) <nl> mmm a / html5 / test / unit / default / app / bundle . js <nl> ppp b / html5 / test / unit / default / app / bundle . js <nl> describe ( ' parsing a bundle file ' , ( ) = > { <nl> const ready = sinon . spy ( ) <nl> <nl> before ( ( ) = > { <nl> - global . needTransformerVersion = ' > = 0 . 1 < 1 . 0 ' <nl> + global . transformerVersion = ' > = 0 . 1 < 1 . 0 ' <nl> app . define ( ' @ weex - component / main ' , ( require , exports , module ) = > { <nl> module . exports = { <nl> template : componentTemplate , <nl> describe ( ' parsing a bundle file ' , ( ) = > { <nl> } ) <nl> <nl> after ( ( ) = > { <nl> - global . needTransformerVersion = undefined <nl> + global . transformerVersion = undefined <nl> } ) <nl> <nl> it ( ' not a weex component ' , ( ) = > { <nl> mmm a / html5 / test / unit / default / app / ctrl . js <nl> ppp b / html5 / test / unit / default / app / ctrl . js <nl> describe ( ' the api of app ' , ( ) = > { <nl> <nl> describe ( ' init ' , ( ) = > { <nl> before ( ( ) = > { <nl> - global . needTransformerVersion = ' 0 . 1 . 3 ' <nl> + global . transformerVersion = ' 0 . 1 . 3 ' <nl> } ) <nl> <nl> after ( ( ) = > { <nl> - global . needTransformerVersion = undefined <nl> + global . transformerVersion = undefined <nl> } ) <nl> <nl> it ( ' a simple bundle ' , ( ) = > { <nl> mmm a / html5 / test / unit / default / app / register . js <nl> ppp b / html5 / test / unit / default / app / register . js <nl> <nl> import chai from ' chai ' <nl> + import sinon from ' sinon ' <nl> + import sinonChai from ' sinon - chai ' <nl> const { expect } = chai <nl> + chai . use ( sinonChai ) <nl> <nl> import { <nl> registerComponent , <nl> describe ( ' register ' , ( ) = > { <nl> <nl> describe ( ' component ' , ( ) = > { <nl> it ( ' with exports ' , ( ) = > { <nl> - const exports = { <nl> + const def = { <nl> a : ' b ' <nl> } <nl> <nl> - ctx . registerComponent ( ' componentA ' , exports ) <nl> - expect ( ctx . requireComponent ( ' componentA ' ) ) . to . deep . equal ( exports ) <nl> + ctx . registerComponent ( ' componentA ' , def ) <nl> + expect ( ctx . requireComponent ( ' componentA ' ) ) . to . deep . equal ( def ) <nl> } ) <nl> <nl> it ( ' with a existing name ' , ( ) = > { <nl> - const exports = { <nl> + const def = { <nl> a : ' b ' <nl> } <nl> - <nl> - let err <nl> - try { <nl> - ctx . registerComponent ( ' componentA ' , exports ) <nl> - } <nl> - catch ( e ) { <nl> - err = e <nl> - } <nl> - <nl> - expect ( err ) . to . be . a ( ' Error ' ) <nl> + sinon . stub ( console , ' error ' ) <nl> + ctx . registerComponent ( ' componentA ' , def ) <nl> + expect ( console . error ) . callCount ( 1 ) <nl> + console . error . restore ( ) <nl> } ) <nl> } ) <nl> <nl> mmm a / html5 / test / unit / default / test . js <nl> ppp b / html5 / test / unit / default / test . js <nl> import { subversion } from ' . . / . . / . . / . . / package . json ' <nl> <nl> Object . assign ( global , framework , { <nl> frameworkVersion : subversion . native , <nl> - needTransformerVersion : subversion . transformer <nl> + transformerVersion : subversion . transformer <nl> } ) <nl> <nl> / * * <nl> mmm a / html5 / test / unit / default / util / index . js <nl> ppp b / html5 / test / unit / default / util / index . js <nl> <nl> import chai from ' chai ' <nl> - import sinon from ' sinon ' <nl> - import sinonChai from ' sinon - chai ' <nl> const { expect } = chai <nl> - chai . use ( sinonChai ) <nl> <nl> import * as util from ' . . / . . / . . / . . / default / util ' <nl> <nl> describe ( ' Util ' , ( ) = > { <nl> } ) <nl> } ) <nl> <nl> - describe ( ' isArray ' , ( ) = > { <nl> - it ( ' should be a array ' , ( ) = > { <nl> - expect ( util . isArray ( { } ) ) . eql ( false ) <nl> - expect ( util . isArray ( [ ] ) ) . eql ( true ) <nl> - expect ( util . isArray ( ' a ' ) ) . eql ( false ) <nl> - expect ( util . isArray ( 1 ) ) . eql ( false ) <nl> - expect ( util . isArray ( true ) ) . eql ( false ) <nl> - expect ( util . isArray ( null ) ) . eql ( false ) <nl> - expect ( util . isArray ( undefined ) ) . eql ( false ) <nl> - expect ( util . isArray ( function ( ) { } ) ) . eql ( false ) <nl> - expect ( util . isArray ( / \ w * / ) ) . eql ( false ) <nl> - expect ( util . isArray ( new Date ( ) ) ) . eql ( false ) <nl> - } ) <nl> - } ) <nl> - <nl> - describe ( ' stringify ' , ( ) = > { <nl> - it ( ' should be a string ' , ( ) = > { <nl> - const date = new Date ( ) <nl> - <nl> - expect ( util . stringify ( { } ) ) . eql ( ' { } ' ) <nl> - expect ( util . stringify ( [ ] ) ) . eql ( ' [ ] ' ) <nl> - expect ( util . stringify ( ' a ' ) ) . eql ( ' a ' ) <nl> - expect ( util . stringify ( 1 ) ) . eql ( ' 1 ' ) <nl> - expect ( util . stringify ( true ) ) . eql ( ' true ' ) <nl> - expect ( util . stringify ( null ) ) . eql ( ' ' ) <nl> - expect ( util . stringify ( undefined ) ) . eql ( ' ' ) <nl> - expect ( util . stringify ( function ( ) { } ) ) . eql ( ' ' ) <nl> - expect ( util . stringify ( / \ w * / ) ) . eql ( ' / \ \ w * / ' ) <nl> - expect ( util . stringify ( date ) ) . eql ( date . toISOString ( ) ) <nl> - } ) <nl> - } ) <nl> - <nl> describe ( ' typof ' , ( ) = > { <nl> it ( ' should be real type ' , ( ) = > { <nl> expect ( util . typof ( { } ) ) . eql ( ' object ' ) <nl> describe ( ' Util ' , ( ) = > { <nl> } ) <nl> } ) <nl> <nl> - describe ( ' normalize ' , ( ) = > { <nl> - it ( ' should be normalize ' , ( ) = > { <nl> - const func = function ( ) { } <nl> - const date = new Date ( ) <nl> - <nl> - expect ( util . normalize ( { } ) ) . eql ( { } ) <nl> - expect ( util . normalize ( [ ] ) ) . eql ( [ ] ) <nl> - expect ( util . normalize ( ' a ' ) ) . eql ( ' a ' ) <nl> - expect ( util . normalize ( 1 ) ) . eql ( 1 ) <nl> - expect ( util . normalize ( true ) ) . eql ( true ) <nl> - expect ( util . normalize ( null ) ) . eql ( ' ' ) <nl> - expect ( util . normalize ( undefined ) ) . eql ( ' ' ) <nl> - expect ( util . normalize ( func ) ) . eql ( func ) <nl> - expect ( util . normalize ( / \ w * / ) ) . eql ( ' / \ \ w * / ' ) <nl> - expect ( util . normalize ( date ) ) . eql ( date . toISOString ( ) ) <nl> - } ) <nl> - } ) <nl> - <nl> describe ( ' def ' , ( ) = > { <nl> it ( ' should be define a non - enumerable property ' , ( ) = > { <nl> const obj = { } <nl> describe ( ' Util ' , ( ) = > { <nl> expect ( obj ) . eql ( { a : 1 , c : 1 } ) <nl> } ) <nl> } ) <nl> - <nl> - describe ( ' error ' , ( ) = > { <nl> - it ( ' should be log a error prefix message ' , ( ) = > { <nl> - const spy = sinon . stub ( console , ' error ' ) <nl> - util . error ( ' error message ' ) <nl> - expect ( spy . called ) . eql ( true ) <nl> - expect ( spy . firstCall . args ) . eql ( [ ' [ JS Framework ] ' , ' error message ' ] ) <nl> - console . error . restore ( ) <nl> - } ) <nl> - } ) <nl> - <nl> - describe ( ' warn ' , ( ) = > { <nl> - it ( ' should be log a warn prefix message ' , ( ) = > { <nl> - const spy = sinon . stub ( console , ' warn ' ) <nl> - util . warn ( ' warn message ' ) <nl> - expect ( spy . called ) . eql ( true ) <nl> - expect ( spy . firstCall . args ) . eql ( [ ' [ JS Framework ] ' , ' warn message ' ] ) <nl> - console . warn . restore ( ) <nl> - } ) <nl> - } ) <nl> - <nl> - describe ( ' info ' , ( ) = > { <nl> - it ( ' should be log a info prefix message ' , ( ) = > { <nl> - const spy = sinon . stub ( console , ' info ' ) <nl> - util . info ( ' info message ' ) <nl> - expect ( spy . called ) . eql ( true ) <nl> - expect ( spy . firstCall . args ) . eql ( [ ' [ JS Framework ] ' , ' info message ' ] ) <nl> - console . info . restore ( ) <nl> - } ) <nl> - } ) <nl> - <nl> - describe ( ' debug ' , ( ) = > { <nl> - it ( ' should be log a debug prefix message ' , ( ) = > { <nl> - console . debug = sinon . spy ( ) <nl> - util . debug ( ' debug message ' ) <nl> - expect ( console . debug . called ) . eql ( true ) <nl> - expect ( console . debug . firstCall . args ) . eql ( [ ' [ JS Framework ] ' , ' debug message ' ] ) <nl> - console . debug = null <nl> - } ) <nl> - } ) <nl> - <nl> - describe ( ' log ' , ( ) = > { <nl> - it ( ' should be log a verbose prefix message ' , ( ) = > { <nl> - const spy = sinon . stub ( console , ' log ' ) <nl> - util . log ( ' verbose message ' ) <nl> - expect ( spy . called ) . eql ( true ) <nl> - expect ( spy . firstCall . args ) . eql ( [ ' [ JS Framework ] ' , ' verbose message ' ] ) <nl> - console . log . restore ( ) <nl> - } ) <nl> - } ) <nl> - <nl> - describe ( ' stringify sth . ' , function ( ) { <nl> - it ( ' convert string to string ' , function ( ) { <nl> - expect ( util . stringify ( ' abcd ' ) ) . eql ( ' abcd ' ) <nl> - expect ( util . stringify ( ' ab \ ncd ' ) ) . eql ( ' ab \ ncd ' ) <nl> - expect ( util . stringify ( ' ab \ \ ncd ' ) ) . eql ( ' ab \ \ ncd ' ) <nl> - } ) <nl> - it ( ' convert number to string ' , function ( ) { <nl> - expect ( util . stringify ( 123 ) ) . eql ( ' 123 ' ) <nl> - expect ( util . stringify ( - 123 ) ) . eql ( ' - 123 ' ) <nl> - expect ( util . stringify ( 123 . 456 ) ) . eql ( ' 123 . 456 ' ) <nl> - } ) <nl> - it ( ' convert boolean to string ' , function ( ) { <nl> - expect ( util . stringify ( true ) ) . eql ( ' true ' ) <nl> - expect ( util . stringify ( ' true ' ) ) . eql ( ' true ' ) <nl> - expect ( util . stringify ( false ) ) . eql ( ' false ' ) <nl> - expect ( util . stringify ( ' false ' ) ) . eql ( ' false ' ) <nl> - } ) <nl> - it ( ' convert undefined to string ' , function ( ) { <nl> - expect ( util . stringify ( ) ) . eql ( ' ' ) <nl> - expect ( util . stringify ( undefined ) ) . eql ( ' ' ) <nl> - } ) <nl> - it ( ' convert null to string ' , function ( ) { <nl> - expect ( util . stringify ( null ) ) . eql ( ' ' ) <nl> - } ) <nl> - it ( ' convert object to string ' , function ( ) { <nl> - expect ( util . stringify ( { } ) ) . eql ( ' { } ' ) <nl> - expect ( util . stringify ( { a : 1 } ) ) . eql ( ' { " a " : 1 } ' ) <nl> - expect ( util . stringify ( { a : [ 1 , 2 ] } ) ) . eql ( ' { " a " : [ 1 , 2 ] } ' ) <nl> - } ) <nl> - it ( ' convert array to string ' , function ( ) { <nl> - expect ( util . stringify ( [ ' a ' , 1 , { b : 2 } ] ) ) . eql ( ' [ " a " , 1 , { " b " : 2 } ] ' ) <nl> - } ) <nl> - it ( ' convert regexp to string ' , function ( ) { <nl> - expect ( util . stringify ( / abcd / ) ) . eql ( ' / abcd / ' ) <nl> - expect ( util . stringify ( / ^ abcd $ / ) ) . eql ( ' / ^ abcd $ / ' ) <nl> - expect ( util . stringify ( / abcd / i ) ) . eql ( ' / abcd / i ' ) <nl> - } ) <nl> - it ( ' convert date to string ' , function ( ) { <nl> - let d <nl> - d = new Date ( 2015 , 1 , 1 ) <nl> - expect ( util . stringify ( d ) ) . eql ( d . toJSON ( ) ) <nl> - d = new Date ( ) <nl> - expect ( util . stringify ( d ) ) . eql ( d . toJSON ( ) ) <nl> - } ) <nl> - } ) <nl> } ) <nl> deleted file mode 100644 <nl> index 4cd37d1679 . . 0000000000 <nl> mmm a / html5 / test / unit / default / vm / util . js <nl> ppp / dev / null <nl> <nl> - import chai from ' chai ' <nl> - const { expect } = chai <nl> - <nl> - const _ = require ( ' . . / . . / . . / . . / default / util ' ) <nl> - <nl> - describe ( ' stringify sth . ' , function ( ) { <nl> - it ( ' convert string to string ' , function ( ) { <nl> - expect ( _ . stringify ( ' abcd ' ) ) . eql ( ' abcd ' ) <nl> - expect ( _ . stringify ( ' ab \ ncd ' ) ) . eql ( ' ab \ ncd ' ) <nl> - expect ( _ . stringify ( ' ab \ \ ncd ' ) ) . eql ( ' ab \ \ ncd ' ) <nl> - } ) <nl> - it ( ' convert number to string ' , function ( ) { <nl> - expect ( _ . stringify ( 123 ) ) . eql ( ' 123 ' ) <nl> - expect ( _ . stringify ( - 123 ) ) . eql ( ' - 123 ' ) <nl> - expect ( _ . stringify ( 123 . 456 ) ) . eql ( ' 123 . 456 ' ) <nl> - } ) <nl> - it ( ' convert boolean to string ' , function ( ) { <nl> - expect ( _ . stringify ( true ) ) . eql ( ' true ' ) <nl> - expect ( _ . stringify ( ' true ' ) ) . eql ( ' true ' ) <nl> - expect ( _ . stringify ( false ) ) . eql ( ' false ' ) <nl> - expect ( _ . stringify ( ' false ' ) ) . eql ( ' false ' ) <nl> - } ) <nl> - it ( ' convert undefined to string ' , function ( ) { <nl> - expect ( _ . stringify ( ) ) . eql ( ' ' ) <nl> - expect ( _ . stringify ( undefined ) ) . eql ( ' ' ) <nl> - } ) <nl> - it ( ' convert null to string ' , function ( ) { <nl> - expect ( _ . stringify ( null ) ) . eql ( ' ' ) <nl> - } ) <nl> - it ( ' convert object to string ' , function ( ) { <nl> - expect ( _ . stringify ( { } ) ) . eql ( ' { } ' ) <nl> - expect ( _ . stringify ( { a : 1 } ) ) . eql ( ' { " a " : 1 } ' ) <nl> - expect ( _ . stringify ( { a : [ 1 , 2 ] } ) ) . eql ( ' { " a " : [ 1 , 2 ] } ' ) <nl> - } ) <nl> - it ( ' convert array to string ' , function ( ) { <nl> - expect ( _ . stringify ( [ ' a ' , 1 , { b : 2 } ] ) ) . eql ( ' [ " a " , 1 , { " b " : 2 } ] ' ) <nl> - } ) <nl> - it ( ' convert regexp to string ' , function ( ) { <nl> - expect ( _ . stringify ( / abcd / ) ) . eql ( ' / abcd / ' ) <nl> - expect ( _ . stringify ( / ^ abcd $ / ) ) . eql ( ' / ^ abcd $ / ' ) <nl> - expect ( _ . stringify ( / abcd / i ) ) . eql ( ' / abcd / i ' ) <nl> - } ) <nl> - it ( ' convert date to string ' , function ( ) { <nl> - let d <nl> - d = new Date ( 2015 , 1 , 1 ) <nl> - expect ( _ . stringify ( d ) ) . eql ( d . toJSON ( ) ) <nl> - d = new Date ( ) <nl> - expect ( _ . stringify ( d ) ) . eql ( d . toJSON ( ) ) <nl> - } ) <nl> - it ( ' convert function to string ' , function ( ) { <nl> - / / / / jscs : disable <nl> - / / expect ( _ . stringify ( function ( ) { } ) ) . eql ( ' function ( ) { } ' ) <nl> - / / expect ( _ . stringify ( function ( ) { } ) ) . eql ( ' function ( ) { } ' ) <nl> - / / expect ( _ . stringify ( function foo ( a ) { return a + 1 } ) ) . <nl> - / / eql ( ' function foo ( a ) { return a + 1 } ' ) <nl> - / / expect ( _ . stringify ( function foo ( a ) { return a + 1 } ) ) . <nl> - / / eql ( ' function foo ( a ) { return a + 1 } ' ) <nl> - / / expect ( _ . stringify ( function foo ( a ) { return a + 1 ; } ) ) . <nl> - / / eql ( ' function foo ( a ) { return a + 1 ; } ' ) <nl> - / / / / jscs : enable <nl> - } ) <nl> - } ) <nl> mmm a / html5 / vdom / index . js <nl> ppp b / html5 / vdom / index . js <nl> <nl> * / <nl> <nl> import Listener from ' . / listener ' <nl> + import { extend } from ' . . / shared ' <nl> <nl> const DEFAULT_TAG_NAME = ' div ' <nl> <nl> Element . prototype . fireEvent = function ( type , e ) { <nl> } <nl> <nl> Element . prototype . toStyle = function ( ) { <nl> - return Object . assign ( { } , this . classStyle , this . style ) <nl> + return extend ( { } , this . classStyle , this . style ) <nl> } <nl> <nl> Element . prototype . toJSON = function ( ) { <nl> | * [ jsfm ] improved code details | apache/incubator-weex | dc73782b5d715a435f7eb5c310e306d424d8aedf | 2016-07-13T03:04:28Z |
mmm a / hphp / compiler / analysis / block_scope . cpp <nl> ppp b / hphp / compiler / analysis / block_scope . cpp <nl> AnalysisResultRawPtr BlockScope : : getContainingProgram ( ) { <nl> return AnalysisResultRawPtr ( ( AnalysisResult * ) bs ) ; <nl> } <nl> <nl> - ClassScopeRawPtr BlockScope : : getContainingClass ( ) { <nl> + FunctionScopeRawPtr BlockScope : : getContainingNonClosureFunction ( ) { <nl> BlockScope * bs = this ; <nl> - if ( bs - > is ( BlockScope : : FunctionScope ) ) { <nl> + / / walk out through all the closures <nl> + while ( bs & & bs - > is ( BlockScope : : FunctionScope ) ) { <nl> + HPHP : : FunctionScope * fs = static_cast < HPHP : : FunctionScope * > ( bs ) ; <nl> + if ( ! fs - > isClosure ( ) & & ! fs - > isGeneratorFromClosure ( ) ) { <nl> + return FunctionScopeRawPtr ( fs ) ; <nl> + } <nl> + bs = bs - > m_outerScope . get ( ) ; <nl> + } <nl> + return FunctionScopeRawPtr ( ) ; <nl> + } <nl> + <nl> + ClassScopeRawPtr BlockScope : : getContainingClass ( ) { <nl> + BlockScope * bs = getContainingNonClosureFunction ( ) . get ( ) ; <nl> + if ( ! bs ) { <nl> + bs = this ; <nl> + } <nl> + if ( bs & & bs - > is ( BlockScope : : FunctionScope ) ) { <nl> bs = bs - > m_outerScope . get ( ) ; <nl> } <nl> - if ( bs & & ! bs - > is ( BlockScope : : ClassScope ) ) { <nl> - bs = 0 ; <nl> + if ( ! bs | | ! bs - > is ( BlockScope : : ClassScope ) ) { <nl> + return ClassScopeRawPtr ( ) ; <nl> } <nl> return ClassScopeRawPtr ( ( HPHP : : ClassScope * ) bs ) ; <nl> } <nl> mmm a / hphp / compiler / analysis / block_scope . h <nl> ppp b / hphp / compiler / analysis / block_scope . h <nl> class BlockScope : private boost : : noncopyable , <nl> VariableTablePtr getVariables ( ) { return m_variables ; } <nl> ConstantTablePtr getConstants ( ) { return m_constants ; } <nl> ClassScopeRawPtr getContainingClass ( ) ; <nl> + FunctionScopeRawPtr getContainingNonClosureFunction ( ) ; <nl> FunctionScopeRawPtr getContainingFunction ( ) const { <nl> return FunctionScopeRawPtr ( is ( FunctionScope ) ? <nl> ( HPHP : : FunctionScope * ) this : 0 ) ; <nl> mmm a / hphp / compiler / analysis / class_scope . cpp <nl> ppp b / hphp / compiler / analysis / class_scope . cpp <nl> <nl> # include < compiler / analysis / variable_table . h > <nl> # include < compiler / construct . h > <nl> # include < compiler / expression / class_constant_expression . h > <nl> + # include < compiler / expression / closure_expression . h > <nl> # include < compiler / expression / constant_expression . h > <nl> # include < compiler / expression / scalar_expression . h > <nl> # include < compiler / expression / unary_op_expression . h > <nl> <nl> # include < compiler / option . h > <nl> # include < compiler / parser / parser . h > <nl> # include < compiler / statement / interface_statement . h > <nl> + # include < compiler / statement / function_statement . h > <nl> # include < compiler / statement / method_statement . h > <nl> # include < compiler / statement / statement_list . h > <nl> # include < runtime / base / builtin_functions . h > <nl> ClassScope : : importTraitMethod ( const TraitMethod & traitMethod , <nl> cloneMeth - > getModifiers ( ) ) ) ; <nl> cloneMeth - > resetScope ( cloneFuncScope , true ) ; <nl> cloneFuncScope - > setOuterScope ( shared_from_this ( ) ) ; <nl> + informClosuresAboutScopeClone ( cloneMeth , cloneFuncScope , ar ) ; <nl> <nl> cloneMeth - > addTraitMethodToScope ( ar , <nl> dynamic_pointer_cast < ClassScope > ( shared_from_this ( ) ) ) ; <nl> ClassScope : : importTraitMethod ( const TraitMethod & traitMethod , <nl> return cloneMeth ; <nl> } <nl> <nl> + void ClassScope : : informClosuresAboutScopeClone ( <nl> + ConstructPtr root , <nl> + FunctionScopePtr outerScope , <nl> + AnalysisResultPtr ar ) { <nl> + <nl> + if ( ! root ) { <nl> + return ; <nl> + } <nl> + <nl> + for ( int i = 0 ; i < root - > getKidCount ( ) ; i + + ) { <nl> + ConstructPtr cons = root - > getNthKid ( i ) ; <nl> + ClosureExpressionPtr closure = <nl> + dynamic_pointer_cast < ClosureExpression > ( cons ) ; <nl> + <nl> + if ( ! closure ) { <nl> + informClosuresAboutScopeClone ( cons , outerScope , ar ) ; <nl> + continue ; <nl> + } <nl> + <nl> + FunctionStatementPtr func = closure - > getClosureFunction ( ) ; <nl> + HPHP : : FunctionScopePtr funcScope = func - > getFunctionScope ( ) ; <nl> + assert ( funcScope - > isClosure ( ) ) ; <nl> + funcScope - > addClonedTraitOuterScope ( outerScope ) ; <nl> + / / Don ' t need to recurse <nl> + } <nl> + } <nl> + <nl> + <nl> void ClassScope : : addImportTraitMethod ( const TraitMethod & traitMethod , <nl> const string & methName ) { <nl> m_importMethToTraitMap [ methName ] . push_back ( traitMethod ) ; <nl> mmm a / hphp / compiler / analysis / class_scope . h <nl> ppp b / hphp / compiler / analysis / class_scope . h <nl> class ClassScope : public BlockScope , public FunctionContainer , <nl> <nl> void addImportTraitMethod ( const TraitMethod & traitMethod , <nl> const std : : string & methName ) ; <nl> + void informClosuresAboutScopeClone ( ConstructPtr root , <nl> + FunctionScopePtr outerScope , <nl> + AnalysisResultPtr ar ) ; <nl> <nl> void setImportTraitMethodModifiers ( const std : : string & methName , <nl> ClassScopePtr traitCls , <nl> mmm a / hphp / compiler / analysis / emitter . cpp <nl> ppp b / hphp / compiler / analysis / emitter . cpp <nl> static void checkJmpTargetEvalStack ( const SymbolicStack & source , <nl> if ( source . size ( ) ! = dest . size ( ) ) { <nl> Logger : : Warning ( " Emitter detected a point in the bytecode where the " <nl> " depth of the stack is not the same for all possible " <nl> - " control flow paths " ) ; <nl> + " control flow paths . source size : % d . dest size : % d " , <nl> + source . size ( ) , <nl> + dest . size ( ) ) ; <nl> assert ( false ) ; <nl> return ; <nl> } <nl> bool EmitterVisitor : : visitImpl ( ConstructPtr node ) { <nl> not_reached ( ) ; <nl> <nl> case Statement : : KindOfFunctionStatement : { <nl> - assert ( ! node - > getClassScope ( ) ) ; / / Handled directly by emitClass ( ) . <nl> MethodStatementPtr m ( static_pointer_cast < MethodStatement > ( node ) ) ; <nl> / / Only called for fn defs not on the top level <nl> StringData * nName = StringData : : GetStaticString ( m - > getOriginalName ( ) ) ; <nl> - if ( m - > getFunctionScope ( ) - > isGenerator ( ) ) { <nl> + FunctionScopePtr funcScope = m - > getFunctionScope ( ) ; <nl> + if ( funcScope - > isGenerator ( ) ) { <nl> if ( m - > getFileScope ( ) ! = m_file ) { <nl> / / the generator ' s definition is in another file typically <nl> / / because it was defined in a trait that got inlined into <nl> bool EmitterVisitor : : visitImpl ( ConstructPtr node ) { <nl> return false ; <nl> } <nl> <nl> - postponeMeth ( m , nullptr , true ) ; <nl> + FuncEmitter * fe = nullptr ; <nl> + if ( node - > getClassScope ( ) ) { <nl> + PreClassEmitter * currentPCE = m_curFunc - > pce ( ) ; <nl> + fe = m_ue . newMethodEmitter ( nName , currentPCE ) ; <nl> + fe - > setIsGenerator ( funcScope - > isGenerator ( ) ) ; <nl> + fe - > setIsGeneratorFromClosure ( funcScope - > isGeneratorFromClosure ( ) ) ; <nl> + fe - > setHasGeneratorAsBody ( m - > getGeneratorFunc ( ) ) ; <nl> + bool added UNUSED = currentPCE - > addMethod ( fe ) ; <nl> + assert ( added ) ; <nl> + } <nl> + <nl> + postponeMeth ( m , fe , true ) ; <nl> } else { <nl> + assert ( ! node - > getClassScope ( ) ) ; / / Handled directly by emitClass ( ) . <nl> FuncEmitter * fe = m_ue . newFuncEmitter ( nName , false ) ; <nl> e . DefFunc ( fe - > id ( ) ) ; <nl> postponeMeth ( m , fe , false ) ; <nl> bool EmitterVisitor : : visitImpl ( ConstructPtr node ) { <nl> scalarExp ( static_pointer_cast < ScalarExpression > ( node ) ) ; <nl> / / Inside traits , __class__ cannot be resolved yet , <nl> / / so emit call to get_class . <nl> - if ( scalarExp - > getType ( ) = = T_CLASS_C & & m_curFunc & & <nl> - m_curFunc - > pce ( ) & & <nl> - ( m_curFunc - > pce ( ) - > attrs ( ) & VM : : AttrTrait ) ) { <nl> + if ( scalarExp - > getType ( ) = = T_CLASS_C & & <nl> + ex - > getFunctionScope ( ) - > getContainingClass ( ) & & <nl> + ex - > getFunctionScope ( ) - > getContainingClass ( ) - > isTrait ( ) ) { <nl> + <nl> static const StringData * fname = <nl> StringData : : GetStaticString ( " get_class " ) ; <nl> Offset fpiStart = m_ue . bcPos ( ) ; <nl> bool EmitterVisitor : : visitImpl ( ConstructPtr node ) { <nl> not_implemented ( ) ; <nl> } <nl> case Expression : : KindOfClosureExpression : { <nl> - / / Closures are implemented by anonymous classes that extend Closure . <nl> - / / There is one anonymous class per closure body . <nl> + / / Closures are implemented by anonymous classes that extend Closure , <nl> + / / and an anonymous function inside the closing class . <nl> + / / There is one anonymous class and one anonymous function per closure <nl> + / / body . <nl> ClosureExpressionPtr ce ( static_pointer_cast < ClosureExpression > ( node ) ) ; <nl> <nl> / / Build a convenient list of use - variables . Each one corresponds to : <nl> bool EmitterVisitor : : visitImpl ( ConstructPtr node ) { <nl> } <nl> } <nl> <nl> + MethodStatementPtr body ( <nl> + static_pointer_cast < MethodStatement > ( ce - > getClosureFunction ( ) ) ) ; <nl> + <nl> + / / The anonymous class . <nl> StringData * className = newClosureName ( ) ; <nl> const static StringData * parentName = <nl> - StringData : : GetStaticString ( " closure " ) ; <nl> + StringData : : GetStaticString ( " Closure " ) ; <nl> const Location * sLoc = ce - > getLocation ( ) . get ( ) ; <nl> PreClassEmitter * pce = m_ue . newPreClassEmitter ( <nl> className , PreClass : : AlwaysHoistable ) ; <nl> pce - > init ( sLoc - > line0 , sLoc - > line1 , m_ue . bcPos ( ) , <nl> AttrUnique | AttrPersistent , parentName , nullptr ) ; <nl> <nl> + / / The anonymous function . <nl> + / / This is the body of the closure , preceded by <nl> + / / code that pulls the object ' s instance variables into locals . <nl> + StringData * functionName = StringData : : GetStaticString ( <nl> + ( ( char ) ParserBase : : CharClosure ) + string ( " methodFor " ) + <nl> + string ( className - > data ( ) ) <nl> + ) ; <nl> + FuncEmitter * fe ; <nl> + PreClassEmitter * currentPCE = m_curFunc - > pce ( ) ; <nl> + ClosureKind closureKind ; <nl> + if ( currentPCE ) { <nl> + closureKind = ClosureKind : : Class ; <nl> + fe = m_ue . newMethodEmitter ( functionName , currentPCE ) ; <nl> + bool added UNUSED = currentPCE - > addMethod ( fe ) ; <nl> + assert ( added ) ; <nl> + } else { <nl> + closureKind = ClosureKind : : Function ; <nl> + fe = m_ue . newFuncEmitter ( functionName , true ) ; <nl> + e . DefFunc ( fe - > id ( ) ) ; <nl> + } <nl> + <nl> + fe - > setHasGeneratorAsBody ( body - > getGeneratorFunc ( ) ) ; <nl> + fe - > setIsClosureBody ( true ) ; <nl> + postponeMeth ( <nl> + body , fe , false , <nl> + new ClosureUseVarVec ( useVars ) , closureKind ) ; <nl> + <nl> / / We ' re still at the closure definition site . Emit code to instantiate <nl> / / the new anonymous class , with the use variables as arguments . <nl> ExpressionListPtr valuesList ( ce - > getClosureValues ( ) ) ; <nl> Offset fpiStart = m_ue . bcPos ( ) ; <nl> - e . FPushCtorD ( useCount , className ) ; <nl> + int stackCount = useCount + 3 ; <nl> + e . FPushCtorD ( stackCount , className ) ; <nl> { <nl> FPIRegionRecorder fpi ( this , m_ue , m_evalStack , fpiStart ) ; <nl> for ( int i = 0 ; i < useCount ; + + i ) { <nl> emitFuncCallArg ( e , ( * valuesList ) [ i ] , i ) ; <nl> } <nl> + / / The last three params are the class name , <nl> + / / the function name of the body , and the $ this variable or null <nl> + if ( currentPCE ) { <nl> + if ( currentPCE - > attrs ( ) & AttrTrait ) { <nl> + Offset fpiStart = m_ue . bcPos ( ) ; <nl> + e . FPushFuncD ( 0 , StringData : : GetStaticString ( " get_class " ) ) ; <nl> + { <nl> + FPIRegionRecorder fpi ( this , m_ue , m_evalStack , fpiStart ) ; <nl> + } <nl> + e . FCall ( 0 ) ; <nl> + e . UnboxR ( ) ; <nl> + } else { <nl> + e . String ( currentPCE - > name ( ) ) ; <nl> + } <nl> + } else { <nl> + e . Null ( ) ; <nl> + } <nl> + e . FPassCE ( useCount + 0 ) ; <nl> + e . String ( fe - > name ( ) ) ; <nl> + e . FPassCE ( useCount + 1 ) ; <nl> + switch ( closureKind ) { <nl> + case ClosureKind : : Class : { <nl> + static const StringData * thisStr = <nl> + StringData : : GetStaticString ( " this " ) ; <nl> + if ( m_curFunc - > hasVar ( thisStr ) ) { <nl> + Id thisId = m_curFunc - > lookupVarId ( thisStr ) ; <nl> + emitVirtualLocal ( thisId ) ; <nl> + } else { <nl> + e . BareThis ( 0 ) ; <nl> + } <nl> + } break ; <nl> + <nl> + case ClosureKind : : Function : { <nl> + e . Null ( ) ; <nl> + } break ; <nl> + <nl> + } <nl> + emitFPass ( e , useCount + 2 , PassByRefKind : : ErrorOnCell ) ; <nl> } <nl> - e . FCall ( useCount ) ; <nl> + e . FCall ( stackCount ) ; <nl> emitPop ( e ) ; <nl> / / From here on out , we ' re just building metadata for the closure . <nl> <nl> / / Instance variables . <nl> - TypedValue uninit ; <nl> - tvWriteUninit ( & uninit ) ; <nl> + TypedValue tvNull ; <nl> + tvWriteNull ( & tvNull ) ; <nl> for ( int i = 0 ; i < useCount ; + + i ) { <nl> - pce - > addProperty ( useVars [ i ] . first , AttrPrivate , nullptr , & uninit ) ; <nl> + pce - > addProperty ( useVars [ i ] . first , AttrPrivate , nullptr , & tvNull ) ; <nl> } <nl> <nl> / / The constructor . This is entirely generated ; all it does is stash its <nl> bool EmitterVisitor : : visitImpl ( ConstructPtr node ) { <nl> m_postponedClosureCtors . push_back ( <nl> PostponedClosureCtor ( useVars , ce , ctor ) ) ; <nl> <nl> - / / The __invoke method . This is the body of the closure , preceded by <nl> - / / code that pulls the object ' s instance variables into locals . <nl> + / / The __invoke method . This is the body of the closure . It takes the <nl> + / / stashed variables and calls the method on the class . <nl> static const StringData * invokeName = <nl> StringData : : GetStaticString ( " __invoke " ) ; <nl> FuncEmitter * invoke = m_ue . newMethodEmitter ( invokeName , pce ) ; <nl> - invoke - > setIsClosureBody ( true ) ; <nl> - pce - > addMethod ( invoke ) ; <nl> - MethodStatementPtr body ( <nl> - static_pointer_cast < MethodStatement > ( ce - > getClosureFunction ( ) ) ) ; <nl> invoke - > setHasGeneratorAsBody ( body - > getGeneratorFunc ( ) ) ; <nl> - postponeMeth ( body , invoke , false , new ClosureUseVarVec ( useVars ) ) ; <nl> + pce - > addMethod ( invoke ) ; <nl> + <nl> + / / Basically clone body but set m_stmt to nothing <nl> + MethodStatementPtr invokeBody ( new MethodStatement ( <nl> + body - > getFunctionScope ( ) , <nl> + body - > getLocation ( ) , <nl> + body - > getModifiers ( ) , <nl> + body - > isRef ( ) , <nl> + invokeName - > toCPPString ( ) , <nl> + body - > getParams ( ) , <nl> + StatementListPtr ( ) , <nl> + 0 , <nl> + body - > getDocComment ( ) , <nl> + ExpressionListPtr ( ) , <nl> + true ) ) ; <nl> + postponeMeth ( <nl> + invokeBody , invoke , false , <nl> + new ClosureUseVarVec ( useVars ) , closureKind ) ; <nl> <nl> return true ; <nl> } <nl> void EmitterVisitor : : emitCGet ( Emitter & e ) { <nl> } <nl> } <nl> <nl> + void EmitterVisitor : : emitCGetForName ( Emitter & e , char * name ) { <nl> + e . CheckThis ( ) ; <nl> + m_evalStack . push ( StackSym : : H ) ; <nl> + m_evalStack . push ( StackSym : : P | StackSym : : T ) ; <nl> + m_evalStack . setString ( StringData : : GetStaticString ( name ) ) ; <nl> + emitCGet ( e ) ; <nl> + } <nl> + <nl> + void EmitterVisitor : : emitClosureUseVars ( <nl> + Emitter & e , int fpiStart , int paramCount , ClosureUseVarVec * useVars ) { <nl> + FPIRegionRecorder fpi ( this , m_ue , m_evalStack , fpiStart ) ; <nl> + <nl> + for ( unsigned i = 0 ; i < paramCount ; + + i ) { <nl> + emitVirtualLocal ( i ) ; <nl> + emitFPass ( e , i , PassByRefKind : : ErrorOnCell ) ; <nl> + } <nl> + <nl> + for ( unsigned i = 0 ; i < useVars - > size ( ) ; + + i ) { <nl> + StringData * name = ( * useVars ) [ i ] . first ; <nl> + if ( i ) { <nl> + m_metaInfo . add ( m_ue . bcPos ( ) , Unit : : MetaInfo : : GuardedThis , <nl> + false , 0 , 0 ) ; <nl> + } <nl> + e . CheckThis ( ) ; <nl> + m_evalStack . push ( StackSym : : H ) ; <nl> + m_evalStack . push ( StackSym : : P | StackSym : : T ) ; <nl> + m_evalStack . setString ( name ) ; <nl> + emitFPass ( e , i + paramCount , PassByRefKind : : ErrorOnCell ) ; <nl> + } <nl> + } <nl> + <nl> void EmitterVisitor : : emitVGet ( Emitter & e ) { <nl> if ( checkIfStackEmpty ( " VGet * " ) ) return ; <nl> LocationGuard loc ( e , m_tempLoc ) ; <nl> void EmitterVisitor : : emitNameString ( Emitter & e , ExpressionPtr n , <nl> <nl> void EmitterVisitor : : postponeMeth ( MethodStatementPtr m , FuncEmitter * fe , <nl> bool top , <nl> - ClosureUseVarVec * useVars / * = NULL * / ) { <nl> - m_postponedMeths . push_back ( PostponedMeth ( m , fe , top , useVars ) ) ; <nl> + ClosureUseVarVec * useVars / * = NULL * / , <nl> + ClosureKind closureKind / * = NULL * / ) { <nl> + m_postponedMeths . push_back ( PostponedMeth ( m , fe , top , useVars , closureKind ) ) ; <nl> } <nl> <nl> - void EmitterVisitor : : postponeCtor ( InterfaceStatementPtr is , FuncEmitter * fe ) { <nl> - m_postponedCtors . push_back ( PostponedCtor ( is , fe ) ) ; <nl> + void EmitterVisitor : : postponeCtor ( ConstructPtr c , FuncEmitter * fe ) { <nl> + m_postponedCtors . push_back ( PostponedCtor ( c , fe ) ) ; <nl> } <nl> <nl> void EmitterVisitor : : postponePinit ( InterfaceStatementPtr is , FuncEmitter * fe , <nl> void EmitterVisitor : : emitPostponedMeths ( ) { <nl> fe - > appendParam ( parName , pi ) ; <nl> } <nl> <nl> + if ( fe - > isClosureBody ( ) ) { <nl> + / / Because PHP is insane you can have a use variable with the same name <nl> + / / as a param name . <nl> + / / In that case , params win ( which is different than zend but much easier ) <nl> + for ( auto it = p . m_closureUseVars - > begin ( ) ; it ! = p . m_closureUseVars - > end ( ) ; ) { <nl> + StringData * name = it - > first ; <nl> + if ( fe - > hasVar ( name ) & & fe - > lookupVarId ( name ) < fe - > numParams ( ) ) { <nl> + it = p . m_closureUseVars - > erase ( it ) ; <nl> + } else { <nl> + it + + ; <nl> + } <nl> + } <nl> + <nl> + / / The params to the closure function are : <nl> + / / ( $ arg1 , $ arg2 , . . . , $ use1 , $ use2 , . . . ) <nl> + for ( auto it = p . m_closureUseVars - > begin ( ) ; it ! = p . m_closureUseVars - > end ( ) ; + + it ) { <nl> + FuncEmitter : : ParamInfo pi ; <nl> + pi . setRef ( it - > second ) ; <nl> + fe - > appendParam ( it - > first , pi ) ; <nl> + } <nl> + } <nl> + <nl> m_curFunc = fe ; <nl> <nl> / / Assign ids to all of the local variables eagerly . This gives us the <nl> void EmitterVisitor : : emitPostponedMeths ( ) { <nl> attrs = attrs | AttrUnique | AttrPersistent ; <nl> } <nl> <nl> - / / For closures , the MethodStatement didn ' t have real attributes ; enforce <nl> - / / that the __invoke method is public here <nl> - if ( fe - > isClosureBody ( ) ) { <nl> + / / For closures , the MethodStatement didn ' t have real attributes <nl> + / / make both the body and the __invoke method public , then hide the __invoke in backtraces <nl> + if ( p . m_closureUseVars ) { <nl> assert ( ! ( attrs & ( AttrProtected | AttrPrivate ) ) ) ; <nl> attrs = attrs | AttrPublic ; <nl> + if ( ! fe - > isClosureBody ( ) ) { <nl> + attrs = attrs | AttrNoInjection ; <nl> + } <nl> } <nl> <nl> Label topOfBody ( e ) ; <nl> void EmitterVisitor : : emitPostponedMeths ( ) { <nl> assert ( tc . typeName ( ) - > data ( ) ! = ( const char * ) 0xdeadba5eba11f00d ) ; <nl> e . VerifyParamType ( i ) ; <nl> } <nl> - if ( fe - > isClosureBody ( ) ) { <nl> - assert ( p . m_closureUseVars ! = nullptr ) ; <nl> + <nl> + / / The __invoke on the Closure class <nl> + / / We could do this in PHP but it would invole array serialization <nl> + if ( ! fe - > isClosureBody ( ) & & p . m_closureUseVars ! = nullptr ) { <nl> / / Emit code to unpack the instance variables ( which store the <nl> - / / use - variables ) into locals . Some of the use - variables may have the <nl> + / / use - variables ) and then call the function . <nl> + / / Some of the use - variables may have the <nl> / / same name , in which case the last one wins . <nl> - unsigned n = p . m_closureUseVars - > size ( ) ; <nl> - for ( unsigned i = 0 ; i < n ; + + i ) { <nl> - StringData * name = ( * p . m_closureUseVars ) [ i ] . first ; <nl> - bool byRef = ( * p . m_closureUseVars ) [ i ] . second ; <nl> - emitVirtualLocal ( fe - > lookupVarId ( name ) ) ; <nl> - if ( i ) { <nl> - m_metaInfo . add ( m_ue . bcPos ( ) , Unit : : MetaInfo : : GuardedThis , <nl> - false , 0 , 0 ) ; <nl> - } <nl> - e . CheckThis ( ) ; <nl> - m_evalStack . push ( StackSym : : H ) ; <nl> - m_evalStack . push ( StackSym : : T ) ; <nl> - m_evalStack . setString ( name ) ; <nl> - markProp ( e ) ; <nl> - if ( byRef ) { <nl> - emitVGet ( e ) ; <nl> - emitBind ( e ) ; <nl> - } else { <nl> - emitCGet ( e ) ; <nl> - emitSet ( e ) ; <nl> - } <nl> - emitPop ( e ) ; <nl> + <nl> + unsigned useCount = p . m_closureUseVars - > size ( ) ; <nl> + unsigned paramCount = fe - > params ( ) . size ( ) ; <nl> + <nl> + Offset fpiStart = 0 ; <nl> + <nl> + switch ( p . m_closureKind ) { <nl> + case ClosureKind : : Function : { <nl> + / / Basically ( without the locals ) : <nl> + / / $ func = $ this - > functionName ; <nl> + / / $ func ( $ arg1 , $ arg2 , . . . , $ use1 , $ use2 , . . . ) ; <nl> + <nl> + emitCGetForName ( e , " functionName " ) ; <nl> + fpiStart = m_ue . bcPos ( ) ; <nl> + e . FPushFunc ( paramCount + useCount ) ; <nl> + emitClosureUseVars ( e , fpiStart , paramCount , p . m_closureUseVars ) ; <nl> + e . FCall ( paramCount + useCount ) ; <nl> + } break ; <nl> + <nl> + case ClosureKind : : Class : { <nl> + / / Basically ( without the locals ) : <nl> + / / $ obj = $ this - > this ; <nl> + / / $ func = $ this - > functionName ; <nl> + / / $ obj - > $ func ( $ arg1 , $ arg2 , . . . , $ use1 , $ use2 , . . . ) ; <nl> + <nl> + Label noThis ; <nl> + Label end ; <nl> + emitCGetForName ( e , " this " ) ; <nl> + e . JmpZ ( noThis ) ; <nl> + <nl> + / / this is fine <nl> + emitCGetForName ( e , " this " ) ; <nl> + emitCGetForName ( e , " functionName " ) ; <nl> + fpiStart = m_ue . bcPos ( ) ; <nl> + e . FPushObjMethod ( paramCount + useCount ) ; <nl> + emitClosureUseVars ( e , fpiStart , paramCount , p . m_closureUseVars ) ; <nl> + e . FCall ( paramCount + useCount ) ; <nl> + e . Jmp ( end ) ; <nl> + <nl> + / / this is null <nl> + noThis . set ( e ) ; <nl> + emitCGetForName ( e , " functionName " ) ; <nl> + emitCGetForName ( e , " className " ) ; <nl> + emitAGet ( e ) ; <nl> + fpiStart = m_ue . bcPos ( ) ; <nl> + e . FPushClsMethod ( paramCount + useCount ) ; <nl> + emitClosureUseVars ( e , fpiStart , paramCount , p . m_closureUseVars ) ; <nl> + e . FCall ( paramCount + useCount ) ; <nl> + <nl> + end . set ( e ) ; <nl> + } break ; <nl> + <nl> } <nl> + <nl> + e . UnboxR ( ) ; <nl> + e . RetC ( ) ; <nl> } <nl> <nl> if ( funcScope - > isAbstract ( ) ) { <nl> void EmitterVisitor : : emitPostponedMeths ( ) { <nl> e . ContExit ( ) ; <nl> } else { <nl> e . Null ( ) ; <nl> - if ( ( p . m_meth - > getStmts ( ) & & p . m_meth - > getStmts ( ) - > isGuarded ( ) ) | | <nl> - ( fe - > isClosureBody ( ) & & p . m_closureUseVars - > size ( ) ) ) { <nl> + if ( p . m_meth - > getStmts ( ) & & p . m_meth - > getStmts ( ) - > isGuarded ( ) ) { <nl> m_metaInfo . add ( m_ue . bcPos ( ) , Unit : : MetaInfo : : GuardedThis , <nl> false , 0 , 0 ) ; <nl> } <nl> void EmitterVisitor : : emitPostponedCtors ( ) { <nl> <nl> Attr attrs = AttrPublic ; <nl> StringData * methDoc = empty_string . get ( ) ; <nl> - const Location * sLoc = p . m_is - > getLocation ( ) . get ( ) ; <nl> + const Location * sLoc = p . m_c - > getLocation ( ) . get ( ) ; <nl> p . m_fe - > init ( sLoc - > line0 , sLoc - > line1 , m_ue . bcPos ( ) , attrs , false , methDoc ) ; <nl> - Emitter e ( p . m_is , m_ue , * this ) ; <nl> + Emitter e ( p . m_c , m_ue , * this ) ; <nl> FuncFinisher ff ( this , e , p . m_fe ) ; <nl> e . Null ( ) ; <nl> e . RetC ( ) ; <nl> void EmitterVisitor : : emitPostponedClosureCtors ( ) { <nl> const Location * sLoc = ctor . m_expr - > getLocation ( ) . get ( ) ; <nl> fe - > init ( sLoc - > line0 , sLoc - > line1 , m_ue . bcPos ( ) , AttrPublic , false , nullptr ) ; <nl> <nl> - unsigned n = useVars . size ( ) ; <nl> + unsigned useCount = useVars . size ( ) ; <nl> + unsigned stackCount = useCount + 3 ; <nl> Emitter e ( ctor . m_expr , m_ue , * this ) ; <nl> FuncFinisher ff ( this , e , fe ) ; <nl> - if ( n > 0 ) { <nl> - for ( unsigned i = 0 ; i < n ; + + i ) { <nl> - / / To ensure that we get a new local for every use var , we call <nl> - / / appendParam with an artificial uniquified name . Because there ' s no <nl> - / / user code here , the fact that the variable has a made - up name in the <nl> - / / metadata doesn ' t matter . <nl> - std : : ostringstream num ; <nl> - num < < i ; <nl> - FuncEmitter : : ParamInfo pi ; <nl> - pi . setRef ( useVars [ i ] . second ) ; <nl> - fe - > appendParam ( StringData : : GetStaticString ( num . str ( ) ) , pi ) ; <nl> - if ( i ) { <nl> - m_metaInfo . add ( m_ue . bcPos ( ) , Unit : : MetaInfo : : GuardedThis , <nl> - false , 0 , 0 ) ; <nl> - } <nl> - e . CheckThis ( ) ; <nl> - m_evalStack . push ( StackSym : : H ) ; <nl> - m_evalStack . push ( StackSym : : T ) ; <nl> - m_evalStack . setString ( useVars [ i ] . first ) ; <nl> - markProp ( e ) ; <nl> - emitVirtualLocal ( i ) ; <nl> - if ( useVars [ i ] . second ) { <nl> - emitVGet ( e ) ; <nl> - emitBind ( e ) ; <nl> - } else { <nl> - emitCGet ( e ) ; <nl> - emitSet ( e ) ; <nl> - } <nl> - emitPop ( e ) ; <nl> + <nl> + for ( unsigned i = 0 ; i < stackCount ; + + i ) { <nl> + / / To ensure that we get a new local for every use var , we call <nl> + / / appendParam with an artificial uniquified name . Because there ' s no <nl> + / / user code here , the fact that the variable has a made - up name in the <nl> + / / metadata doesn ' t matter . <nl> + <nl> + StringData * propertyName ; <nl> + bool byRef ; <nl> + if ( i < useCount ) { <nl> + propertyName = useVars [ i ] . first ; <nl> + byRef = useVars [ i ] . second ; <nl> + } else if ( i = = useCount + 0 ) { <nl> + propertyName = StringData : : GetStaticString ( " className " ) ; <nl> + byRef = false ; <nl> + } else if ( i = = useCount + 1 ) { <nl> + propertyName = StringData : : GetStaticString ( " functionName " ) ; <nl> + byRef = false ; <nl> + } else if ( i = = useCount + 2 ) { <nl> + propertyName = StringData : : GetStaticString ( " this " ) ; <nl> + byRef = false ; <nl> + } <nl> + <nl> + std : : ostringstream num ; <nl> + num < < i ; <nl> + FuncEmitter : : ParamInfo pi ; <nl> + pi . setRef ( byRef ) ; <nl> + fe - > appendParam ( StringData : : GetStaticString ( num . str ( ) ) , pi ) ; <nl> + if ( i ) { <nl> + m_metaInfo . add ( m_ue . bcPos ( ) , Unit : : MetaInfo : : GuardedThis , <nl> + false , 0 , 0 ) ; <nl> + } <nl> + e . CheckThis ( ) ; <nl> + m_evalStack . push ( StackSym : : H ) ; <nl> + m_evalStack . push ( StackSym : : T ) ; <nl> + m_evalStack . setString ( propertyName ) ; <nl> + markProp ( e ) ; <nl> + emitVirtualLocal ( i ) ; <nl> + if ( byRef ) { <nl> + emitVGet ( e ) ; <nl> + emitBind ( e ) ; <nl> + } else { <nl> + emitCGet ( e ) ; <nl> + emitSet ( e ) ; <nl> } <nl> + emitPop ( e ) ; <nl> } <nl> + <nl> e . Null ( ) ; <nl> - if ( n > 0 ) { <nl> - m_metaInfo . add ( m_ue . bcPos ( ) , Unit : : MetaInfo : : GuardedThis , <nl> - false , 0 , 0 ) ; <nl> - } <nl> + m_metaInfo . add ( m_ue . bcPos ( ) , Unit : : MetaInfo : : GuardedThis , false , 0 , 0 ) ; <nl> e . RetC ( ) ; <nl> <nl> m_postponedClosureCtors . pop_front ( ) ; <nl> mmm a / hphp / compiler / analysis / emitter . h <nl> ppp b / hphp / compiler / analysis / emitter . h <nl> class EmitterVisitor { <nl> typedef std : : pair < StringData * , bool > ClosureUseVar ; / / ( name , byRef ) <nl> typedef std : : vector < ClosureUseVar > ClosureUseVarVec ; <nl> typedef std : : vector < std : : pair < Id , IterKind > > PendingIterVec ; <nl> + <nl> + enum class ClosureKind { <nl> + Function , <nl> + Class , <nl> + } ; <nl> + <nl> class PostponedMeth { <nl> public : <nl> PostponedMeth ( MethodStatementPtr m , FuncEmitter * fe , bool top , <nl> - ClosureUseVarVec * useVars ) <nl> - : m_meth ( m ) , m_fe ( fe ) , m_top ( top ) , m_closureUseVars ( useVars ) { } <nl> + ClosureUseVarVec * useVars , ClosureKind closureKind ) <nl> + : m_meth ( m ) , m_fe ( fe ) , m_top ( top ) , m_closureUseVars ( useVars ) , <nl> + m_closureKind ( closureKind ) { } <nl> MethodStatementPtr m_meth ; <nl> FuncEmitter * m_fe ; <nl> bool m_top ; <nl> ClosureUseVarVec * m_closureUseVars ; <nl> + ClosureKind m_closureKind ; <nl> } ; <nl> class PostponedCtor { <nl> public : <nl> - PostponedCtor ( InterfaceStatementPtr is , FuncEmitter * fe ) <nl> - : m_is ( is ) , m_fe ( fe ) { } <nl> - InterfaceStatementPtr m_is ; <nl> + PostponedCtor ( ConstructPtr s , FuncEmitter * fe ) <nl> + : m_c ( s ) , m_fe ( fe ) { } <nl> + ConstructPtr m_c ; <nl> FuncEmitter * m_fe ; <nl> } ; <nl> typedef std : : pair < StringData * , ExpressionPtr > NonScalarPair ; <nl> class EmitterVisitor { <nl> void emitCGetL2 ( Emitter & e ) ; <nl> void emitCGetL3 ( Emitter & e ) ; <nl> void emitCGet ( Emitter & e ) ; <nl> + void emitCGetForName ( Emitter & e , char * name ) ; <nl> + void emitClosureUseVars ( Emitter & e , int fpiStart , int paramCount , ClosureUseVarVec * useVars ) ; <nl> void emitVGet ( Emitter & e ) ; <nl> void emitIsset ( Emitter & e ) ; <nl> void emitIsNull ( Emitter & e ) ; <nl> class EmitterVisitor { <nl> void emitAssignment ( Emitter & e , ExpressionPtr c , int op , bool bind ) ; <nl> void emitListAssignment ( Emitter & e , ListAssignmentPtr lst ) ; <nl> void postponeMeth ( MethodStatementPtr m , FuncEmitter * fe , bool top , <nl> - ClosureUseVarVec * useVars = nullptr ) ; <nl> - void postponeCtor ( InterfaceStatementPtr m , FuncEmitter * fe ) ; <nl> + ClosureUseVarVec * useVars = nullptr , <nl> + ClosureKind closureKind = ClosureKind : : Function ) ; <nl> + void postponeCtor ( ConstructPtr m , FuncEmitter * fe ) ; <nl> void postponePinit ( InterfaceStatementPtr m , FuncEmitter * fe , NonScalarVec * v ) ; <nl> void postponeSinit ( InterfaceStatementPtr m , FuncEmitter * fe , NonScalarVec * v ) ; <nl> void postponeCinit ( InterfaceStatementPtr m , FuncEmitter * fe , NonScalarVec * v ) ; <nl> mmm a / hphp / compiler / analysis / function_scope . cpp <nl> ppp b / hphp / compiler / analysis / function_scope . cpp <nl> bool FunctionScope : : containsReference ( ) const { <nl> return m_attribute & FileScope : : ContainsReference ; <nl> } <nl> <nl> + void FunctionScope : : setContainsThis ( bool f / * = true * / ) { <nl> + m_containsThis = f ; <nl> + <nl> + BlockScopePtr bs ( this - > getOuterScope ( ) ) ; <nl> + while ( bs & & bs - > is ( BlockScope : : FunctionScope ) ) { <nl> + FunctionScopePtr fs = static_pointer_cast < FunctionScope > ( bs ) ; <nl> + if ( ! fs - > isClosure ( ) ) { <nl> + break ; <nl> + } <nl> + fs - > setContainsThis ( f ) ; <nl> + bs = bs - > getOuterScope ( ) ; <nl> + } <nl> + <nl> + for ( auto it = m_clonedTraitOuterScope . begin ( ) ; it ! = m_clonedTraitOuterScope . end ( ) ; it + + ) { <nl> + ( * it ) - > setContainsThis ( f ) ; <nl> + } <nl> + } <nl> + <nl> + void FunctionScope : : setContainsBareThis ( bool f , bool ref / * = false * / ) { <nl> + if ( f ) { <nl> + m_containsBareThis | = ref ? 2 : 1 ; <nl> + } else { <nl> + m_containsBareThis = 0 ; <nl> + } <nl> + <nl> + BlockScopePtr bs ( this - > getOuterScope ( ) ) ; <nl> + while ( bs & & bs - > is ( BlockScope : : FunctionScope ) ) { <nl> + FunctionScopePtr fs = static_pointer_cast < FunctionScope > ( bs ) ; <nl> + if ( ! fs - > isClosure ( ) ) { <nl> + break ; <nl> + } <nl> + fs - > setContainsBareThis ( f , ref ) ; <nl> + bs = bs - > getOuterScope ( ) ; <nl> + } <nl> + <nl> + for ( auto it = m_clonedTraitOuterScope . begin ( ) ; it ! = m_clonedTraitOuterScope . end ( ) ; it + + ) { <nl> + ( * it ) - > setContainsBareThis ( f , ref ) ; <nl> + } <nl> + } <nl> + <nl> bool FunctionScope : : hasImpl ( ) const { <nl> if ( ! isUserFunction ( ) ) { <nl> return ! isAbstract ( ) ; <nl> mmm a / hphp / compiler / analysis / function_scope . h <nl> ppp b / hphp / compiler / analysis / function_scope . h <nl> class FunctionScope : public BlockScope , <nl> m_volatile = false ; <nl> } <nl> <nl> + / * * <nl> + * Tell this function about another outer scope that contains it . <nl> + * / <nl> + void addClonedTraitOuterScope ( FunctionScopePtr scope ) { <nl> + m_clonedTraitOuterScope . push_back ( scope ) ; <nl> + } <nl> + <nl> / * * <nl> * Get / set original name of the function , without case being lowered . <nl> * / <nl> class FunctionScope : public BlockScope , <nl> * Whether this function contains a usage of $ this <nl> * / <nl> bool containsThis ( ) const { return m_containsThis ; } <nl> - void setContainsThis ( bool f = true ) { m_containsThis = f ; } <nl> + void setContainsThis ( bool f = true ) ; <nl> bool containsBareThis ( ) const { return m_containsBareThis ; } <nl> bool containsRefThis ( ) const { return m_containsBareThis & 2 ; } <nl> - void setContainsBareThis ( bool f , bool ref = false ) { <nl> - if ( f ) { <nl> - m_containsBareThis | = ref ? 2 : 1 ; <nl> - } else { <nl> - m_containsBareThis = 0 ; <nl> - } <nl> - } <nl> + void setContainsBareThis ( bool f , bool ref = false ) ; <nl> / * * <nl> * How many parameters a caller should provide . <nl> * / <nl> class FunctionScope : public BlockScope , <nl> ExpressionListPtr m_closureValues ; <nl> ReadWriteMutex m_inlineMutex ; <nl> unsigned m_nextID ; / / used when cloning generators for traits <nl> + std : : list < FunctionScopeRawPtr > m_clonedTraitOuterScope ; <nl> } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / hphp / compiler / expression / closure_expression . cpp <nl> ppp b / hphp / compiler / expression / closure_expression . cpp <nl> ClosureExpression : : ClosureExpression <nl> ParameterExpressionPtr param ( <nl> dynamic_pointer_cast < ParameterExpression > ( ( * vars ) [ i ] ) ) ; <nl> assert ( param ) ; <nl> + if ( param - > getName ( ) = = " this " ) { <nl> + / / " this " is automatically included . <nl> + / / Once we get rid of all the callsites , make this an error <nl> + continue ; <nl> + } <nl> if ( seenBefore . find ( param - > getName ( ) . c_str ( ) ) = = seenBefore . end ( ) ) { <nl> seenBefore . insert ( param - > getName ( ) . c_str ( ) ) ; <nl> m_vars - > insertElement ( param ) ; <nl> mmm a / hphp / compiler / expression / scalar_expression . cpp <nl> ppp b / hphp / compiler / expression / scalar_expression . cpp <nl> void ScalarExpression : : analyzeProgram ( AnalysisResultPtr ar ) { <nl> case T_METHOD_C : { <nl> if ( ! m_translated . empty ( ) ) break ; <nl> <nl> - BlockScopeRawPtr b = getScope ( ) ; <nl> - while ( b & & b - > is ( BlockScope : : FunctionScope ) ) { <nl> - b = b - > getOuterScope ( ) ; <nl> - } <nl> - m_translated . clear ( ) ; <nl> - if ( b & & b - > is ( BlockScope : : ClassScope ) ) { <nl> - ClassScopePtr clsScope = dynamic_pointer_cast < ClassScope > ( b ) ; <nl> + ClassScopePtr clsScope = getClassScope ( ) ; <nl> + if ( clsScope ) { <nl> + m_translated . clear ( ) ; <nl> if ( ! clsScope - > isTrait ( ) ) { <nl> m_translated = clsScope - > getOriginalName ( ) ; <nl> } <nl> } <nl> if ( m_type = = T_METHOD_C ) { <nl> if ( FunctionScopePtr func = getFunctionScope ( ) ) { <nl> - if ( b & & b - > is ( BlockScope : : ClassScope ) ) { <nl> + if ( clsScope ) { <nl> m_translated + = " : : " ; <nl> } <nl> m_translated + = func - > getOriginalName ( ) ; <nl> mmm a / hphp / compiler / statement / method_statement . cpp <nl> ppp b / hphp / compiler / statement / method_statement . cpp <nl> void MethodStatement : : analyzeProgram ( AnalysisResultPtr ar ) { <nl> cont - > setHidden ( ) ; <nl> getOrigGeneratorFunc ( ) - > getFunctionScope ( ) - > addUse ( <nl> funcScope , BlockScope : : UseKindClosure ) ; <nl> + getOrigGeneratorFunc ( ) - > getFunctionScope ( ) - > setContainsBareThis ( <nl> + funcScope - > containsBareThis ( ) , funcScope - > containsRefThis ( ) ) ; <nl> + getOrigGeneratorFunc ( ) - > getFunctionScope ( ) - > setContainsThis ( <nl> + funcScope - > containsThis ( ) ) ; <nl> } <nl> if ( funcScope - > isSepExtension ( ) | | <nl> Option : : IsDynamicFunction ( m_method , m_name ) | | Option : : AllDynamic ) { <nl> mmm a / hphp / doc / bytecode . specification <nl> ppp b / hphp / doc / bytecode . specification <nl> FPushObjMethodD < num params > < litstr id > [ C ] - > [ ] <nl> fatal error . Next , this instruction checks if object x has an accessible <nl> method named y . If it does , this instruction pushes a new entry on the FPI <nl> stack , initializing it with the number of parameters being passed ( given by <nl> - y ) and a reference to the FPI structure for the method named y from object x . <nl> + % 1 ) and a reference to the FPI structure for the method named y from object x . <nl> <nl> If object x does not have an accessible method named y , this instruction <nl> checks if object x has a __call method . If a __call method is found , this <nl> mmm a / hphp / runtime / ext / ext_function . cpp <nl> ppp b / hphp / runtime / ext / ext_function . cpp <nl> Variant f_func_get_arg ( int arg_num ) { <nl> if ( ar = = NULL | | arg_num < 0 | | arg_num > = ar - > numArgs ( ) ) { <nl> return false ; <nl> } <nl> + if ( ar - > m_func - > isClosureBody ( ) ) { <nl> + ar = g_vmContext - > getPrevVMState ( ar ) ; <nl> + } <nl> <nl> const int numParams = ar - > m_func - > numParams ( ) ; <nl> <nl> Array hhvm_get_frame_args ( const ActRec * ar ) { <nl> if ( ar = = NULL ) { <nl> return Array ( ) ; <nl> } <nl> + if ( ar - > m_func - > isClosureBody ( ) ) { <nl> + ar = g_vmContext - > getPrevVMState ( ar ) ; <nl> + } <nl> + <nl> int numParams = ar - > m_func - > numParams ( ) ; <nl> int numArgs = ar - > numArgs ( ) ; <nl> HphpArray * retval = NEW ( HphpArray ) ( numArgs ) ; <nl> int64 f_func_num_args ( ) { <nl> if ( ar = = NULL ) { <nl> return - 1 ; <nl> } <nl> + if ( ar - > m_func - > isClosureBody ( ) ) { <nl> + return g_vmContext - > getPrevVMState ( ar ) - > numArgs ( ) ; <nl> + } <nl> return ar - > numArgs ( ) ; <nl> } else { <nl> / / we shouldn ' t be here , since code generation will inline this function <nl> mmm a / hphp / runtime / ext / ext_reflection . cpp <nl> ppp b / hphp / runtime / ext / ext_reflection . cpp <nl> static void set_function_info ( Array & ret , const VM : : Func * func ) { <nl> } <nl> <nl> / / closure info <nl> - ret . set ( s_is_closure , func - > isClosureBody ( ) ) ; <nl> + ret . set ( <nl> + s_is_closure , <nl> + / / TODO ( ptarjan ) should this be a boolean on the func ? <nl> + func - > name ( ) - > isame ( StringData : : GetStaticString ( " __invoke " ) ) & & <nl> + func - > isMethod ( ) & & func - > cls ( ) - > parent ( ) & & <nl> + func - > cls ( ) - > parent ( ) - > name ( ) - > isame ( StringData : : GetStaticString ( " Closure " ) ) <nl> + ) ; <nl> / / Interestingly this isn ' t the same as calling isGenerator ( ) because calling <nl> / / isGenerator ( ) on the outside function for a generator returns false . <nl> ret . set ( s_is_generator , func - > hasGeneratorAsBody ( ) ) ; <nl> mmm a / hphp / runtime / vm / bytecode . cpp <nl> ppp b / hphp / runtime / vm / bytecode . cpp <nl> VMExecutionContext : : createContinuation ( ActRec * fp , <nl> / / we enter the generator body . <nl> ActRec * ar = cont - > actRec ( ) ; <nl> ar - > m_func = genFunc ; <nl> - if ( isMethod ) { <nl> - if ( obj . get ( ) ) { <nl> - ObjectData * objData = obj . get ( ) ; <nl> - ar - > setThis ( objData ) ; <nl> - objData - > incRefCount ( ) ; <nl> - } else { <nl> + if ( obj . get ( ) ) { <nl> + ObjectData * objData = obj . get ( ) ; <nl> + ar - > setThis ( objData ) ; <nl> + objData - > incRefCount ( ) ; <nl> + } else { <nl> + if ( isMethod ) { <nl> ar - > setClass ( frameStaticClass ( fp ) ) ; <nl> + } else { <nl> + ar - > setThis ( nullptr ) ; <nl> } <nl> - } else { <nl> - ar - > setThis ( nullptr ) ; <nl> } <nl> ar - > initNumArgs ( 1 ) ; <nl> ar - > setVarEnv ( nullptr ) ; <nl> inline void OPTBLD_INLINE VMExecutionContext : : iopCreateCont ( PC & pc ) { <nl> const Func * genFunc = origFunc - > getGeneratorBody ( genName ) ; <nl> assert ( genFunc ! = nullptr ) ; <nl> <nl> - bool isMethod = origFunc - > isNonClosureMethod ( ) ; <nl> + bool isMethod = origFunc - > isMethod ( ) ; <nl> c_Continuation * cont = isMethod ? <nl> createContinuation < true > ( m_fp , getArgs , origFunc , genFunc ) : <nl> createContinuation < false > ( m_fp , getArgs , origFunc , genFunc ) ; <nl> mmm a / hphp / runtime / vm / class . cpp <nl> ppp b / hphp / runtime / vm / class . cpp <nl> void Class : : setMethods ( ) { <nl> / / locals and a separate translation , not a different context <nl> / / class . <nl> f = f - > clone ( ) ; <nl> - if ( f - > attrs ( ) & AttrClone ) { <nl> + if ( f - > attrs ( ) & AttrClone | | f - > isClosureBody ( ) ) { <nl> f - > setCls ( this ) ; <nl> } <nl> f - > setNewFuncId ( ) ; <nl> mmm a / hphp / runtime / vm / func . cpp <nl> ppp b / hphp / runtime / vm / func . cpp <nl> void Func : : setCached ( ) { <nl> } <nl> <nl> const Func * Func : : getGeneratorBody ( const StringData * name ) const { <nl> - if ( isNonClosureMethod ( ) ) { <nl> + if ( isMethod ( ) ) { <nl> return cls ( ) - > lookupMethod ( name ) ; <nl> } else { <nl> return Unit : : lookupFunc ( name ) ; <nl> void FuncEmitter : : allocVarId ( const StringData * name ) { <nl> } <nl> <nl> Id FuncEmitter : : lookupVarId ( const StringData * name ) const { <nl> - assert ( name ! = nullptr ) ; <nl> - assert ( m_localNames . find ( name ) ! = m_localNames . end ( ) ) ; <nl> + assert ( this - > hasVar ( name ) ) ; <nl> return m_localNames . find ( name ) - > second ; <nl> } <nl> <nl> + bool FuncEmitter : : hasVar ( const StringData * name ) const { <nl> + assert ( name ! = nullptr ) ; <nl> + return m_localNames . find ( name ) ! = m_localNames . end ( ) ; <nl> + } <nl> + <nl> Id FuncEmitter : : allocIterator ( ) { <nl> assert ( m_numIterators > = m_nextFreeIterator ) ; <nl> Id id = m_nextFreeIterator + + ; <nl> mmm a / hphp / runtime / vm / func . h <nl> ppp b / hphp / runtime / vm / func . h <nl> class FuncEmitter { <nl> } <nl> void allocVarId ( const StringData * name ) ; <nl> Id lookupVarId ( const StringData * name ) const ; <nl> + bool hasVar ( const StringData * name ) const ; <nl> Id numParams ( ) const { return m_params . size ( ) ; } <nl> <nl> Id allocIterator ( ) ; <nl> mmm a / hphp / runtime / vm / runtime . cpp <nl> ppp b / hphp / runtime / vm / runtime . cpp <nl> HphpArray * get_static_locals ( const ActRec * ar ) { <nl> if ( ar - > m_func - > isClosureBody ( ) ) { <nl> static const StringData * s___static_locals = <nl> StringData : : GetStaticString ( " __static_locals " ) ; <nl> + / / walk back to the __invoke method on the Closure <nl> + ar = g_vmContext - > getPrevVMState ( ar ) ; <nl> assert ( ar - > hasThis ( ) ) ; <nl> ObjectData * closureObj = ar - > getThis ( ) ; <nl> assert ( closureObj ) ; <nl> mmm a / hphp / system / classes_hhvm / closure . php <nl> ppp b / hphp / system / classes_hhvm / closure . php <nl> <nl> < ? php <nl> <nl> - / / Used as the base class for all closures <nl> + / * * <nl> + * Used as the base class for all closures <nl> + * / <nl> class Closure { <nl> + / / The bound $ this for the closure . Could be null . <nl> + protected $ this ; <nl> + / / The context class for calling $ functionName when $ this is null <nl> + protected $ className ; <nl> + / / The function to call on $ this ( or $ className ) <nl> + protected $ functionName ; <nl> + <nl> + / / For storing the static locals from the closure body <nl> protected $ __static_locals ; <nl> - / / Adding a dummy __sleep ( ) to return an illegal value to make the code <nl> - / / go through error handling path <nl> + <nl> + / / All the variables from the use statement will be private variables on the <nl> + / / subclasses so they don ' t have to be packaged in an array and then back out <nl> + / / on every call <nl> + <nl> + / * * <nl> + * Adding a dummy __sleep ( ) to return an illegal value to make the code <nl> + * go through error handling path <nl> + * / <nl> public function __sleep ( ) { <nl> return false ; <nl> } <nl> - } <nl> <nl> + / * * <nl> + * This is handled by each subclass basically inlining getUseVars ( ) <nl> + * and skipping all the overhead of call_user_func_array <nl> + public function __invoke ( ) { <nl> + $ context = $ this - > this ? : $ this - > className ; <nl> + call_user_func_array ( <nl> + array ( $ context , $ this - > functionName ) , <nl> + func_get_args ( ) + $ this - > getUseVars ( ) <nl> + ) ; <nl> + } <nl> + * / <nl> + } <nl> mmm a / hphp / test / test_code_run . cpp <nl> ppp b / hphp / test / test_code_run . cpp <nl> bool TestCodeRun : : TestClosure ( ) { <nl> " } \ n " <nl> " f ( ) ; \ n " ) ; <nl> <nl> - MVCR ( " < ? php \ n " <nl> + MVCRO ( " < ? php \ n " <nl> " class Foo { \ n " <nl> " function bar ( ) { \ n " <nl> " $ abc = 123 ; \ n " <nl> bool TestCodeRun : : TestClosure ( ) { <nl> " } \ n " <nl> " } \ n " <nl> " $ a = Foo : : bar ( ) ; \ n " <nl> - " $ a ( 456 ) ; \ n " ) ; <nl> + " $ a ( 456 ) ; \ n " , <nl> + " int ( 456 ) \ n " ) ; <nl> <nl> - MVCR ( " < ? php \ n " <nl> + MVCRO ( " < ? php \ n " <nl> " class Foo { \ n " <nl> " function bar ( ) { \ n " <nl> " $ abc = 123 ; \ n " <nl> bool TestCodeRun : : TestClosure ( ) { <nl> " } \ n " <nl> " } \ n " <nl> " $ a = Foo : : bar ( ) ; \ n " <nl> - " $ a ( 456 ) ; \ n " ) ; <nl> + " $ a ( 456 ) ; \ n " , <nl> + " int ( 456 ) \ n " ) ; <nl> <nl> - MVCR ( " < ? php \ n " <nl> + MVCRO ( " < ? php \ n " <nl> " class Foo { \ n " <nl> " function bar ( ) { \ n " <nl> " $ abc = 123 ; \ n " <nl> bool TestCodeRun : : TestClosure ( ) { <nl> " } \ n " <nl> " } \ n " <nl> " $ a = Foo : : bar ( ) ; \ n " <nl> - " $ a ( 456 ) ; \ n " ) ; <nl> + " $ a ( 456 ) ; \ n " , <nl> + " int ( 456 ) \ n " ) ; <nl> <nl> MVCR ( " < ? php \ n " <nl> " class Foo { \ n " <nl> bool TestCodeRun : : TestTraits ( ) { <nl> " $ a = Foo : : bar ( ) ; \ n " <nl> " $ a ( 456 ) ; \ n " <nl> , <nl> - " int ( 123 ) \ n " <nl> + " int ( 456 ) \ n " <nl> ) ; <nl> <nl> MVCRO ( " < ? php \ n " <nl> bool TestCodeRun : : TestTraits ( ) { <nl> " $ a = Foo : : bar ( ) ; \ n " <nl> " $ a ( 456 ) ; \ n " <nl> , <nl> - " int ( 123 ) \ n " <nl> + " int ( 456 ) \ n " <nl> ) ; <nl> <nl> MVCRO ( " < ? php \ n " <nl> bool TestCodeRun : : TestTraits ( ) { <nl> " $ a = Foo : : bar ( ) ; \ n " <nl> " $ a ( 456 ) ; \ n " <nl> , <nl> - " int ( 123 ) \ n " ) ; <nl> + " int ( 456 ) \ n " ) ; <nl> <nl> MVCRO ( " < ? php \ n " <nl> " trait Too { \ n " <nl> bool TestCodeRun : : TestTraits ( ) { <nl> " class Foo { use Too ; } \ n " <nl> " Foo : : bar ( ) ; \ n " <nl> , <nl> - " string ( 0 ) \ " \ " \ n " <nl> + " string ( 3 ) \ " Foo \ " \ n " <nl> " string ( 9 ) \ " { closure } \ " \ n " <nl> ) ; <nl> <nl> mmm a / hphp / test / vm / closure . php . exp <nl> ppp b / hphp / test / vm / closure . php . exp <nl> Args : 888 500 <nl> # 0 funk ( ) called at hphp / test / vm / closure . php : 17 ] <nl> # 1 { closure } ( ) called at hphp / test / vm / closure . php : 28 ] <nl> # 2 main ( ) called at hphp / test / vm / closure . php : 33 ] <nl> - object ( Closure $ ) # 1 ( 3 ) { <nl> + object ( Closure $ ) # 1 ( 6 ) { <nl> [ " use_by_val " : " Closure $ " : private ] = > <nl> int ( 123 ) <nl> [ " use_by_ref " : " Closure $ " : private ] = > <nl> & int ( 4000 ) <nl> + [ " this " : protected ] = > <nl> + NULL <nl> + [ " className " : protected ] = > <nl> + NULL <nl> + [ " functionName " : protected ] = > <nl> + string ( 58 ) " 0methodForClosure $ " <nl> [ " __static_locals " : protected ] = > <nl> NULL <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . d8d96246666 <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / closure_class . php <nl> <nl> + < ? php <nl> + <nl> + trait A { <nl> + public function b ( ) { <nl> + return function ( ) { <nl> + return array ( <nl> + __CLASS__ , <nl> + get_class ( $ this ) <nl> + ) ; <nl> + } ; <nl> + } <nl> + } <nl> + <nl> + class C { <nl> + use A ; <nl> + public function d ( ) { <nl> + return function ( ) { <nl> + return array ( <nl> + __CLASS__ , <nl> + get_class ( $ this ) <nl> + ) ; <nl> + } ; <nl> + } <nl> + } <nl> + <nl> + $ c = new C ; <nl> + $ b = $ c - > b ( ) ; <nl> + var_dump ( $ b ( ) ) ; <nl> + $ d = $ c - > d ( ) ; <nl> + var_dump ( $ d ( ) ) ; <nl> new file mode 100644 <nl> index 00000000000 . . a1259013d50 <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / closure_class . php . exp <nl> <nl> + array ( 2 ) { <nl> + [ 0 ] = > <nl> + string ( 1 ) " C " <nl> + [ 1 ] = > <nl> + string ( 1 ) " C " <nl> + } <nl> + array ( 2 ) { <nl> + [ 0 ] = > <nl> + string ( 1 ) " C " <nl> + [ 1 ] = > <nl> + string ( 1 ) " C " <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . bf91d57edfd <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / closure_get_class . php <nl> <nl> + < ? php <nl> + # test_code_run . cpp : 33285 <nl> + <nl> + trait Too { <nl> + function bar ( ) { <nl> + $ a = function ( ) { <nl> + var_dump ( __CLASS__ ) ; <nl> + } ; <nl> + $ a ( ) ; <nl> + $ a = function ( ) { <nl> + var_dump ( get_class ( ) ) ; <nl> + } ; <nl> + $ a ( ) ; <nl> + if ( isset ( $ this ) ) { <nl> + $ a = function ( ) { <nl> + var_dump ( get_class ( $ this ) ) ; <nl> + } ; <nl> + $ a ( ) ; <nl> + } <nl> + } <nl> + } <nl> + class Foo { use Too ; } <nl> + <nl> + $ f = new Foo ; <nl> + echo " Between \ n " ; <nl> + $ f - > bar ( ) ; <nl> + echo " Between \ n " ; <nl> + $ f : : bar ( ) ; <nl> + echo " Between \ n " ; <nl> + Foo : : bar ( ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 7d807642a7f <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / closure_get_class . php . exp <nl> <nl> + Between <nl> + string ( 3 ) " Foo " <nl> + string ( 3 ) " Foo " <nl> + string ( 3 ) " Foo " <nl> + Between <nl> + string ( 3 ) " Foo " <nl> + string ( 3 ) " Foo " <nl> + Between <nl> + string ( 3 ) " Foo " <nl> + string ( 3 ) " Foo " <nl> new file mode 100644 <nl> index 00000000000 . . abda56004ce <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / closure_in_generator . php <nl> <nl> + < ? php <nl> + <nl> + class A { <nl> + public function b ( ) { <nl> + $ cl = function ( ) { <nl> + return $ this - > c ( ) ; <nl> + } ; <nl> + yield $ cl ( ) ; <nl> + } <nl> + private function c ( ) { <nl> + return ' A ' ; <nl> + } <nl> + } <nl> + <nl> + $ a = new A ; <nl> + foreach ( $ a - > b ( ) as $ c ) { <nl> + print " $ c \ n " ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . f70f10e4db1 <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / closure_in_generator . php . exp <nl> @ @ - 0 , 0 + 1 @ @ <nl> + A <nl> new file mode 100644 <nl> index 00000000000 . . fa51d336334 <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / closure_in_generator2 . php <nl> <nl> + < ? php <nl> + <nl> + class A { <nl> + public function b ( ) { <nl> + $ cl = function ( ) { <nl> + yield $ this - > c ( ) ; <nl> + } ; <nl> + yield $ cl ( ) ; <nl> + } <nl> + private function c ( ) { <nl> + return ' A ' ; <nl> + } <nl> + } <nl> + <nl> + $ a = new A ; <nl> + foreach ( $ a - > b ( ) as $ c ) { <nl> + foreach ( $ c as $ d ) { <nl> + print " $ d \ n " ; <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . f70f10e4db1 <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / closure_in_generator2 . php . exp <nl> @ @ - 0 , 0 + 1 @ @ <nl> + A <nl> new file mode 100644 <nl> index 00000000000 . . d0fdaec688f <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / closure_private . php <nl> <nl> + < ? php <nl> + <nl> + class A { <nl> + <nl> + public function testPublic ( ) { <nl> + $ a = function ( ) { <nl> + return $ this - > justReturn ( " foo " ) ; <nl> + } ; <nl> + return $ a ( ) ; <nl> + } <nl> + <nl> + public function testUse ( ) { <nl> + $ a = " foo " ; <nl> + $ b = function ( ) use ( $ a ) { <nl> + return $ this - > justReturn ( $ a ) ; <nl> + } ; <nl> + return $ b ( ) ; <nl> + } <nl> + <nl> + public function testParam ( ) { <nl> + $ a = " foo " ; <nl> + $ b = function ( $ foo ) { <nl> + return $ this - > justReturn ( $ foo ) ; <nl> + } ; <nl> + return $ b ( $ a ) ; <nl> + } <nl> + <nl> + public function testParamAndClosure ( ) { <nl> + $ a = " foo " ; <nl> + $ b = " bar " ; <nl> + $ c = function ( $ foo ) use ( $ b ) { <nl> + return $ this - > justReturn ( $ foo , $ b ) ; <nl> + } ; <nl> + return $ c ( $ a ) ; <nl> + } <nl> + <nl> + public function testByRef ( ) { <nl> + $ a = " foo " ; <nl> + $ b = " bar " ; <nl> + $ c = function ( & $ foo ) use ( & $ b ) { <nl> + $ this - > double ( $ foo , $ b ) ; <nl> + } ; <nl> + $ c ( $ a ) ; <nl> + return $ a . $ b ; <nl> + } <nl> + <nl> + public function testNotByRef ( ) { <nl> + $ a = " foo " ; <nl> + $ b = " bar " ; <nl> + $ c = function ( $ foo ) use ( $ b ) { <nl> + $ this - > double ( $ foo , $ b ) ; <nl> + } ; <nl> + $ c ( $ a ) ; <nl> + return $ a . $ b ; <nl> + } <nl> + <nl> + private function justReturn ( ) { <nl> + return func_get_args ( ) ; <nl> + } <nl> + <nl> + private function double ( & $ a , & $ b ) { <nl> + $ a = $ a . $ a ; <nl> + $ b = $ b . $ b ; <nl> + } <nl> + <nl> + } <nl> + $ a = new A ; <nl> + var_dump ( $ a - > testPublic ( ) ) ; <nl> + var_dump ( $ a - > testUse ( ) ) ; <nl> + var_dump ( $ a - > testParam ( ) ) ; <nl> + var_dump ( $ a - > testParamAndClosure ( ) ) ; <nl> + var_dump ( $ a - > testByRef ( ) ) ; <nl> + var_dump ( $ a - > testNotByRef ( ) ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 913843648ec <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / closure_private . php . exp <nl> <nl> + array ( 1 ) { <nl> + [ 0 ] = > <nl> + string ( 3 ) " foo " <nl> + } <nl> + array ( 1 ) { <nl> + [ 0 ] = > <nl> + string ( 3 ) " foo " <nl> + } <nl> + array ( 1 ) { <nl> + [ 0 ] = > <nl> + string ( 3 ) " foo " <nl> + } <nl> + array ( 2 ) { <nl> + [ 0 ] = > <nl> + string ( 3 ) " foo " <nl> + [ 1 ] = > <nl> + string ( 3 ) " bar " <nl> + } <nl> + string ( 12 ) " foofoobarbar " <nl> + string ( 6 ) " foobar " <nl> new file mode 100644 <nl> index 00000000000 . . df7392c9a8f <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / closure_recursive . php <nl> <nl> + < ? php <nl> + class A { <nl> + public function b ( ) { <nl> + return function ( ) { <nl> + return function ( ) { <nl> + return $ this - > c ( ) ; <nl> + } ; <nl> + } ; <nl> + } <nl> + private function c ( ) { <nl> + return 91 ; <nl> + } <nl> + } <nl> + $ a = new A ; <nl> + $ b = $ a - > b ( ) ; <nl> + $ first = $ b ( ) ; <nl> + $ second = $ first ( ) ; <nl> + var_dump ( $ second ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 757d2d22877 <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / closure_recursive . php . exp <nl> @ @ - 0 , 0 + 1 @ @ <nl> + int ( 91 ) <nl> new file mode 100644 <nl> index 00000000000 . . 030bf8564e1 <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / closure_recursive_bad . php <nl> <nl> + < ? php <nl> + class A { <nl> + public function b ( ) { <nl> + function c ( ) { <nl> + return function ( ) { <nl> + return $ this - > d ( ) ; <nl> + } ; <nl> + } ; <nl> + return c ( ) ; <nl> + } <nl> + private function d ( ) { <nl> + return 91 ; <nl> + } <nl> + } <nl> + $ a = new A ; <nl> + $ b = $ a - > b ( ) ; <nl> + var_dump ( $ b ( ) ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 08cbb6349dd <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / closure_recursive_bad . php . exp <nl> <nl> + HipHop Notice : Undefined variable : this in hphp / test / vm / closure_recursive_bad . php on line 6 <nl> + HipHop Fatal error : Uncaught exception ' BadMethodCallException ' with message ' Call to a member function d ( ) on a non - object ' in hphp / test / vm / closure_recursive_bad . php : 6 \ nStack trace : \ n # 0 hphp / test / vm / closure_recursive_bad . php ( 17 ) : { closure } ( ) \ n # 1 { main } <nl> new file mode 120000 <nl> index 00000000000 . . 7b75548caa7 <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / closure_recursive_bad . php . filter <nl> @ @ - 0 , 0 + 1 @ @ <nl> + filepath . filter <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . f89c0defc9d <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / zend_closure_005 . php <nl> <nl> + < ? php <nl> + <nl> + class A { <nl> + private $ x ; <nl> + <nl> + function __construct ( $ x ) { <nl> + $ this - > x = $ x ; <nl> + } <nl> + <nl> + function __destruct ( ) { <nl> + echo " Destroyed \ n " ; <nl> + } <nl> + <nl> + function getIncer ( $ val ) { <nl> + return function ( ) use ( $ val ) { <nl> + $ this - > x + = $ val ; <nl> + } ; <nl> + } <nl> + <nl> + function getPrinter ( ) { <nl> + return function ( ) { <nl> + echo $ this - > x . " \ n " ; <nl> + } ; <nl> + } <nl> + <nl> + function printX ( ) { <nl> + echo $ this - > x . " \ n " ; <nl> + } <nl> + } <nl> + <nl> + $ a = new A ( 3 ) ; <nl> + $ incer = $ a - > getIncer ( 2 ) ; <nl> + $ printer = $ a - > getPrinter ( ) ; <nl> + <nl> + $ a - > printX ( ) ; <nl> + $ printer ( ) ; <nl> + $ incer ( ) ; <nl> + $ a - > printX ( ) ; <nl> + $ printer ( ) ; <nl> + <nl> + unset ( $ a ) ; <nl> + <nl> + $ incer ( ) ; <nl> + $ printer ( ) ; <nl> + <nl> + unset ( $ incer ) ; <nl> + $ printer ( ) ; <nl> + <nl> + unset ( $ printer ) ; <nl> new file mode 100644 <nl> index 00000000000 . . f6ce55622e6 <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / zend_closure_005 . php . exp <nl> <nl> + 3 <nl> + 3 <nl> + 5 <nl> + 5 <nl> + 7 <nl> + 7 <nl> + Destroyed <nl> new file mode 100644 <nl> index 00000000000 . . d357d2f93f3 <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / zend_closure_007 . php <nl> <nl> + < ? php <nl> + <nl> + class A { <nl> + private $ x = 0 ; <nl> + <nl> + function getClosureGetter ( ) { <nl> + return function ( ) { <nl> + return function ( ) { <nl> + $ this - > x + + ; <nl> + } ; <nl> + } ; <nl> + } <nl> + <nl> + function printX ( ) { <nl> + echo $ this - > x . " \ n " ; <nl> + } <nl> + } <nl> + <nl> + $ a = new A ; <nl> + $ a - > printX ( ) ; <nl> + $ getClosure = $ a - > getClosureGetter ( ) ; <nl> + $ a - > printX ( ) ; <nl> + $ closure = $ getClosure ( ) ; <nl> + $ a - > printX ( ) ; <nl> + $ closure ( ) ; <nl> + $ a - > printX ( ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 530a8a796cb <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / zend_closure_007 . php . exp <nl> <nl> + 0 <nl> + 0 <nl> + 0 <nl> + 1 <nl> new file mode 100644 <nl> index 00000000000 . . 9ef951d5c81 <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / zend_closure_020 . php <nl> <nl> + < ? php <nl> + <nl> + class foo { <nl> + private $ test = 3 ; <nl> + <nl> + public function x ( ) { <nl> + $ a = & $ this ; <nl> + $ this - > a = function ( ) use ( & $ a ) { return $ a ; } ; <nl> + var_dump ( $ this - > a - > __invoke ( ) ) ; <nl> + var_dump ( is_a ( $ this - > a , ' closure ' ) ) ; <nl> + var_dump ( is_callable ( $ this - > a ) ) ; <nl> + <nl> + return $ this - > a ; <nl> + } <nl> + } <nl> + <nl> + $ foo = new foo ; <nl> + $ y = $ foo - > x ( ) ; <nl> + var_dump ( $ y ( ) - > test ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 6eeb369690a <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / zend_closure_020 . php . exp <nl> <nl> + object ( foo ) # 1 ( 2 ) { <nl> + [ " test " : " foo " : private ] = > <nl> + int ( 3 ) <nl> + [ " a " ] = > <nl> + object ( Closure $ ) # 2 ( 5 ) { <nl> + [ " a " : " Closure $ " : private ] = > <nl> + * RECURSION * <nl> + [ " this " : protected ] = > <nl> + * RECURSION * <nl> + [ " className " : protected ] = > <nl> + string ( 3 ) " foo " <nl> + [ " functionName " : protected ] = > <nl> + string ( 58 ) " 0methodForClosure $ " <nl> + [ " __static_locals " : protected ] = > <nl> + NULL <nl> + } <nl> + } <nl> + bool ( true ) <nl> + bool ( true ) <nl> + HipHop Fatal error : Cannot access private property foo : : $ test in hphp / test / vm / zend_closure_020 . php on line 19 <nl> new file mode 100755 <nl> index 00000000000 . . d57e0fe9f42 <nl> mmm / dev / null <nl> ppp b / hphp / test / vm / zend_closure_020 . php . filter <nl> <nl> + # ! / bin / bash <nl> + <nl> + . / test / vm / filepath . filter | . / test / vm / closure . filter <nl> | Put the body of a closure on the class intead of in the __invoke of the closure | facebook/hhvm | 8c6d77deefc2925a3e32aa65b517367f784aca39 | 2013-03-06T06:07:56Z |
mmm a / xbmc / music / windows / GUIWindowMusicNav . cpp <nl> ppp b / xbmc / music / windows / GUIWindowMusicNav . cpp <nl> bool CGUIWindowMusicNav : : ManageInfoProvider ( const CFileItemPtr item ) <nl> result = m_musicdatabase . SetScraperAll ( " musicdb : / / albums / " , nullptr ) ; <nl> } <nl> } <nl> + default : <nl> + break ; <nl> } <nl> if ( ! result ) <nl> return false ; <nl> | [ cleanup ] silence non handled switch values warning | xbmc/xbmc | bd299b14cc18420e30fb2781ffd6847828ce5dab | 2017-08-19T19:07:59Z |
mmm a / docs / LangRefNew . rst <nl> ppp b / docs / LangRefNew . rst <nl> Similarly , the setter function , whose body is part of the ` ` var - set ` ` clause <nl> <nl> If the ` ` var - set ` ` or ` ` willset ` ` clause contains a ` ` set - name ` ` clause , the <nl> identifier of that clause is used as the name of the parameter to the setter or <nl> - the observing accessor . Otherwise , the parameter name is ` ` value ` ` . <nl> - Same applies to ` ` didset ` ` clause , but the default parameter name is <nl> - ` ` oldValue ` ` . <nl> + the observing accessor . Otherwise , the parameter name is ` ` newValue ` ` . Same <nl> + applies to ` ` didset ` ` clause , but the default parameter name is ` ` oldValue ` ` . <nl> <nl> FIXME : Should the type of a pattern which isn ' t fully typed affect the <nl> type - checking of the expression ( i . e . should we compute a structured dependent <nl> | Update LangRef and simplify code completion test for value - > newValue change | apple/swift | b584eaac7dfd1d60291a5a086745951a31551f58 | 2014-03-12T07:14:32Z |
mmm a / src / widgets / editor / editor . cpp <nl> ppp b / src / widgets / editor / editor . cpp <nl> void Editor : : flashCurrentLayer ( ) <nl> } <nl> } <nl> <nl> - drawSprite ( 0 , 0 , m_sprite - > getWidth ( ) - 1 , m_sprite - > getHeight ( ) - 1 ) ; <nl> + drawSpriteSafe ( 0 , 0 , m_sprite - > getWidth ( ) - 1 , m_sprite - > getHeight ( ) - 1 ) ; <nl> gui_flip_screen ( ) ; <nl> <nl> - vsync ( ) ; <nl> - rest ( 100 ) ; <nl> - <nl> image_clear ( flash_image , flash_image - > mask_color ) ; <nl> - drawSprite ( 0 , 0 , m_sprite - > getWidth ( ) - 1 , m_sprite - > getHeight ( ) - 1 ) ; <nl> + drawSpriteSafe ( 0 , 0 , m_sprite - > getWidth ( ) - 1 , m_sprite - > getHeight ( ) - 1 ) ; <nl> } <nl> } <nl> <nl> | Fix problems in the Editor : : flashCurrentLayer ( ) drawing outside the safe editor area . | aseprite/aseprite | fe03919b1de008f13c6a5fcd702664efeb12cca8 | 2012-02-12T01:32:32Z |
mmm a / src / propsheet / PropSheetHandler . cpp <nl> ppp b / src / propsheet / PropSheetHandler . cpp <nl> class ConsolePropertySheetHandler WrlFinal : public RuntimeClass < RuntimeClassFla <nl> { <nl> g_fHostedInFileProperties = TRUE ; <nl> gpStateInfo = & g_csi ; <nl> + <nl> + / / Initialize the fIsV2Console with whatever the current v2 seeting is <nl> + / / in the registry . Usually this is set by conhost , but in this path , <nl> + / / we ' re being launched straight from explorer . See GH # 2319 , GH # 2651 <nl> + gpStateInfo - > fIsV2Console = GetConsoleBoolValue ( CONSOLE_REGISTRY_FORCEV2 , TRUE ) ; <nl> + <nl> InitRegistryValues ( gpStateInfo ) ; <nl> gpStateInfo - > Defaults = TRUE ; <nl> GetRegistryValues ( gpStateInfo ) ; <nl> mmm a / src / propsheet / console . cpp <nl> ppp b / src / propsheet / console . cpp <nl> void SaveConsoleSettingsIfNeeded ( const HWND hwnd ) <nl> if ( gpStateInfo - > LinkTitle ! = NULL ) <nl> { <nl> SetGlobalRegistryValues ( ) ; <nl> - if ( ! NT_SUCCESS ( ShortcutSerialization : : s_SetLinkValues ( gpStateInfo , g_fEastAsianSystem , g_fForceV2 ) ) ) <nl> + if ( ! NT_SUCCESS ( ShortcutSerialization : : s_SetLinkValues ( gpStateInfo , <nl> + g_fEastAsianSystem , <nl> + g_fForceV2 , <nl> + gpStateInfo - > fIsV2Console ) ) ) <nl> { <nl> WCHAR szMessage [ MAX_PATH + 100 ] ; <nl> WCHAR awchBuffer [ MAX_PATH ] = { 0 } ; <nl> mmm a / src / propsheet / console . h <nl> ppp b / src / propsheet / console . h <nl> const unsigned int TERMINAL_PAGE_INDEX = 4 ; <nl> / / number of property sheet pages <nl> static const int V1_NUMBER_OF_PAGES = 4 ; <nl> static const int NUMBER_OF_PAGES = 5 ; <nl> + <nl> + BOOL GetConsoleBoolValue ( __in PCWSTR pszValueName , __in BOOL fDefault ) ; <nl> mmm a / src / propsheet / globals . cpp <nl> ppp b / src / propsheet / globals . cpp <nl> LONG gcxScreen ; <nl> LONG gcyScreen ; <nl> <nl> BOOL g_fForceV2 ; <nl> + / / If we didn ' t launch as a v2 console window , we don ' t want to persist v2 <nl> + / / settings when we close , as they ' ll get zero ' d . Use this to track the initial <nl> + / / launch state . <nl> BOOL g_fEditKeys ; <nl> BYTE g_bPreviewOpacity = 0x00 ; / / sentinel value for initial test on dialog entry . Once initialized , won ' t be less than TRANSPARENCY_RANGE_MIN <nl> <nl> mmm a / src / propsheet / registry . cpp <nl> ppp b / src / propsheet / registry . cpp <nl> VOID SetRegistryValues ( <nl> <nl> SetGlobalRegistryValues ( ) ; <nl> <nl> - / / Save cursor type and color <nl> - dwValue = pStateInfo - > CursorType ; <nl> - LOG_IF_FAILED ( RegistrySerialization : : s_UpdateValue ( hConsoleKey , <nl> - hTitleKey , <nl> - CONSOLE_REGISTRY_CURSORTYPE , <nl> - REG_DWORD , <nl> - ( BYTE * ) & dwValue , <nl> - sizeof ( dwValue ) ) ) ; <nl> + / / Only save the " Terminal " settings if we launched as a v2 propsheet . The <nl> + / / v1 console doesn ' t know anything about these settings , and their value <nl> + / / will be incorrectly zero ' d if we save in this state . <nl> + / / See microsoft / terminal # 2319 for more details . <nl> + if ( gpStateInfo - > fIsV2Console ) <nl> + { <nl> + / / Save cursor type and color <nl> + dwValue = pStateInfo - > CursorType ; <nl> + LOG_IF_FAILED ( RegistrySerialization : : s_UpdateValue ( hConsoleKey , <nl> + hTitleKey , <nl> + CONSOLE_REGISTRY_CURSORTYPE , <nl> + REG_DWORD , <nl> + ( BYTE * ) & dwValue , <nl> + sizeof ( dwValue ) ) ) ; <nl> <nl> - dwValue = pStateInfo - > CursorColor ; <nl> - LOG_IF_FAILED ( RegistrySerialization : : s_UpdateValue ( hConsoleKey , <nl> - hTitleKey , <nl> - CONSOLE_REGISTRY_CURSORCOLOR , <nl> - REG_DWORD , <nl> - ( BYTE * ) & dwValue , <nl> - sizeof ( dwValue ) ) ) ; <nl> + dwValue = pStateInfo - > CursorColor ; <nl> + LOG_IF_FAILED ( RegistrySerialization : : s_UpdateValue ( hConsoleKey , <nl> + hTitleKey , <nl> + CONSOLE_REGISTRY_CURSORCOLOR , <nl> + REG_DWORD , <nl> + ( BYTE * ) & dwValue , <nl> + sizeof ( dwValue ) ) ) ; <nl> <nl> - dwValue = pStateInfo - > InterceptCopyPaste ; <nl> - LOG_IF_FAILED ( RegistrySerialization : : s_UpdateValue ( hConsoleKey , <nl> - hTitleKey , <nl> - CONSOLE_REGISTRY_INTERCEPTCOPYPASTE , <nl> - REG_DWORD , <nl> - ( BYTE * ) & dwValue , <nl> - sizeof ( dwValue ) ) ) ; <nl> + dwValue = pStateInfo - > InterceptCopyPaste ; <nl> + LOG_IF_FAILED ( RegistrySerialization : : s_UpdateValue ( hConsoleKey , <nl> + hTitleKey , <nl> + CONSOLE_REGISTRY_INTERCEPTCOPYPASTE , <nl> + REG_DWORD , <nl> + ( BYTE * ) & dwValue , <nl> + sizeof ( dwValue ) ) ) ; <nl> <nl> - dwValue = pStateInfo - > TerminalScrolling ; <nl> - LOG_IF_FAILED ( RegistrySerialization : : s_UpdateValue ( hConsoleKey , <nl> - hTitleKey , <nl> - CONSOLE_REGISTRY_TERMINALSCROLLING , <nl> - REG_DWORD , <nl> - ( BYTE * ) & dwValue , <nl> - sizeof ( dwValue ) ) ) ; <nl> - dwValue = pStateInfo - > DefaultForeground ; <nl> - LOG_IF_FAILED ( RegistrySerialization : : s_UpdateValue ( hConsoleKey , <nl> - hTitleKey , <nl> - CONSOLE_REGISTRY_DEFAULTFOREGROUND , <nl> - REG_DWORD , <nl> - ( BYTE * ) & dwValue , <nl> - sizeof ( dwValue ) ) ) ; <nl> - dwValue = pStateInfo - > DefaultBackground ; <nl> - LOG_IF_FAILED ( RegistrySerialization : : s_UpdateValue ( hConsoleKey , <nl> - hTitleKey , <nl> - CONSOLE_REGISTRY_DEFAULTBACKGROUND , <nl> - REG_DWORD , <nl> - ( BYTE * ) & dwValue , <nl> - sizeof ( dwValue ) ) ) ; <nl> + dwValue = pStateInfo - > TerminalScrolling ; <nl> + LOG_IF_FAILED ( RegistrySerialization : : s_UpdateValue ( hConsoleKey , <nl> + hTitleKey , <nl> + CONSOLE_REGISTRY_TERMINALSCROLLING , <nl> + REG_DWORD , <nl> + ( BYTE * ) & dwValue , <nl> + sizeof ( dwValue ) ) ) ; <nl> + dwValue = pStateInfo - > DefaultForeground ; <nl> + LOG_IF_FAILED ( RegistrySerialization : : s_UpdateValue ( hConsoleKey , <nl> + hTitleKey , <nl> + CONSOLE_REGISTRY_DEFAULTFOREGROUND , <nl> + REG_DWORD , <nl> + ( BYTE * ) & dwValue , <nl> + sizeof ( dwValue ) ) ) ; <nl> + dwValue = pStateInfo - > DefaultBackground ; <nl> + LOG_IF_FAILED ( RegistrySerialization : : s_UpdateValue ( hConsoleKey , <nl> + hTitleKey , <nl> + CONSOLE_REGISTRY_DEFAULTBACKGROUND , <nl> + REG_DWORD , <nl> + ( BYTE * ) & dwValue , <nl> + sizeof ( dwValue ) ) ) ; <nl> + } <nl> <nl> / / <nl> / / Close the registry keys <nl> mmm a / src / propslib / ShortcutSerialization . cpp <nl> ppp b / src / propslib / ShortcutSerialization . cpp <nl> void ShortcutSerialization : : s_GetLinkTitle ( _In_ PCWSTR pwszShortcutFilename , <nl> return ( SUCCEEDED ( hr ) ) ? STATUS_SUCCESS : STATUS_UNSUCCESSFUL ; <nl> } <nl> <nl> - / * * <nl> - Writes the console properties out to the link it was opened from . <nl> - Arguments : <nl> - pStateInfo - pointer to structure containing information <nl> - Return Value : <nl> - A status code if something failed or S_OK <nl> - * / <nl> + / / Function Description : <nl> + / / - Writes the console properties out to the link it was opened from . <nl> + / / Arguments : <nl> + / / - pStateInfo : pointer to structure containing information <nl> + / / - writeTerminalSettings : If true , persist the " Terminal " properties only <nl> + / / present in the v2 console . This should be false if called from a v11 <nl> + / / console . See GH # 2319 <nl> + / / Return Value : <nl> + / / - A status code if something failed or S_OK <nl> [ [ nodiscard ] ] NTSTATUS ShortcutSerialization : : s_SetLinkValues ( _In_ PCONSOLE_STATE_INFO pStateInfo , <nl> const BOOL fEastAsianSystem , <nl> - const BOOL fForceV2 ) <nl> + const BOOL fForceV2 , <nl> + const bool writeTerminalSettings ) <nl> { <nl> IShellLinkW * psl ; <nl> IPersistFile * ppf ; <nl> Return Value : <nl> s_SetLinkPropertyBoolValue ( pps , PKEY_Console_CtrlKeyShortcutsDisabled , pStateInfo - > fCtrlKeyShortcutsDisabled ) ; <nl> s_SetLinkPropertyBoolValue ( pps , PKEY_Console_LineSelection , pStateInfo - > fLineSelection ) ; <nl> s_SetLinkPropertyByteValue ( pps , PKEY_Console_WindowTransparency , pStateInfo - > bWindowTransparency ) ; <nl> - s_SetLinkPropertyDwordValue ( pps , PKEY_Console_CursorType , pStateInfo - > CursorType ) ; <nl> - s_SetLinkPropertyDwordValue ( pps , PKEY_Console_CursorColor , pStateInfo - > CursorColor ) ; <nl> s_SetLinkPropertyBoolValue ( pps , PKEY_Console_InterceptCopyPaste , pStateInfo - > InterceptCopyPaste ) ; <nl> - s_SetLinkPropertyDwordValue ( pps , PKEY_Console_DefaultForeground , pStateInfo - > DefaultForeground ) ; <nl> - s_SetLinkPropertyDwordValue ( pps , PKEY_Console_DefaultBackground , pStateInfo - > DefaultBackground ) ; <nl> - s_SetLinkPropertyBoolValue ( pps , PKEY_Console_TerminalScrolling , pStateInfo - > TerminalScrolling ) ; <nl> + <nl> + / / Only save the " Terminal " settings if we launched as a v2 <nl> + / / propsheet . The v1 console doesn ' t know anything about <nl> + / / these settings , and their value will be incorrectly <nl> + / / zero ' d if we save in this state . <nl> + / / See microsoft / terminal # 2319 for more details . <nl> + if ( writeTerminalSettings ) <nl> + { <nl> + s_SetLinkPropertyDwordValue ( pps , PKEY_Console_CursorType , pStateInfo - > CursorType ) ; <nl> + s_SetLinkPropertyDwordValue ( pps , PKEY_Console_CursorColor , pStateInfo - > CursorColor ) ; <nl> + s_SetLinkPropertyDwordValue ( pps , PKEY_Console_DefaultForeground , pStateInfo - > DefaultForeground ) ; <nl> + s_SetLinkPropertyDwordValue ( pps , PKEY_Console_DefaultBackground , pStateInfo - > DefaultBackground ) ; <nl> + s_SetLinkPropertyBoolValue ( pps , PKEY_Console_TerminalScrolling , pStateInfo - > TerminalScrolling ) ; <nl> + } <nl> hr = pps - > Commit ( ) ; <nl> pps - > Release ( ) ; <nl> } <nl> mmm a / src / propslib / ShortcutSerialization . hpp <nl> ppp b / src / propslib / ShortcutSerialization . hpp <nl> Revision History : <nl> class ShortcutSerialization <nl> { <nl> public : <nl> - [ [ nodiscard ] ] static NTSTATUS s_SetLinkValues ( _In_ PCONSOLE_STATE_INFO pStateInfo , const BOOL fEastAsianSystem , const BOOL fForceV2 ) ; <nl> + [ [ nodiscard ] ] static NTSTATUS s_SetLinkValues ( _In_ PCONSOLE_STATE_INFO pStateInfo , const BOOL fEastAsianSystem , const BOOL fForceV2 , const bool writeTerminalSettings ) ; <nl> [ [ nodiscard ] ] static NTSTATUS s_GetLinkConsoleProperties ( _Inout_ PCONSOLE_STATE_INFO pStateInfo ) ; <nl> [ [ nodiscard ] ] static NTSTATUS s_GetLinkValues ( _Inout_ PCONSOLE_STATE_INFO pStateInfo , <nl> _Out_ BOOL * const pfReadConsoleProperties , <nl> mmm a / tools / bx . ps1 <nl> ppp b / tools / bx . ps1 <nl> if ( $ projects . length - eq 0 ) <nl> } <nl> $ projectPath = $ projects . FullName <nl> <nl> - $ msBuildCondition = " ' % ( ProjectReference . Identity ) ' = = ' $ projectPath . metaproj ' " <nl> <nl> # Parse the solution ' s metaproj file . <nl> [ xml ] $ Metaproj = Get - Content " $ env : OPENCON \ OpenConsole . sln . metaproj " <nl> <nl> $ targets = $ Metaproj . Project . Target <nl> <nl> - # Filter to project targets that match out metaproj file . <nl> + # Most projects are in OpenConsole . sln . metaproj as " < project > . * proj . metaproj " . <nl> + # We ' ll filter to search for these first and foremost . <nl> + $ msBuildCondition = " ' % ( ProjectReference . Identity ) ' = = ' $ projectPath . metaproj ' " <nl> + <nl> + # Filter to project targets that match our metaproj file . <nl> # For Conhost \ Server , this will match : <nl> # [ Conhost \ Server , Conhost \ Server : Clean , Conhost \ Server : Rebuild , Conhost \ Server : Publish ] <nl> $ matchingTargets = $ targets | Where - Object { $ _ . MSBuild . Condition - eq $ msBuildCondition } <nl> <nl> + # If we didn ' t find a target , it ' s possible that the project didn ' t have a <nl> + # . metaproj in OpenConsole . sln . metaproj . Try filtering again , but leave off the <nl> + # . metaproj extension . <nl> + if ( $ matchingTargets . length - eq 0 ) <nl> + { <nl> + $ conditionNoMeta = " ' % ( ProjectReference . Identity ) ' = = ' $ projectPath ' " <nl> + $ matchingTargets = $ targets | Where - Object { $ _ . MSBuild . Condition - eq $ conditionNoMeta } <nl> + } <nl> + <nl> # Further filter to the targets that dont have a suffix ( like " : Clean " ) <nl> $ matchingTargets = $ matchingTargets | Where - Object { $ hasProperty = $ _ . MsBuild . PSobject . Properties . name - match " Targets " ; return - Not $ hasProperty } <nl> <nl> mmm a / tools / opencon . cmd <nl> ppp b / tools / opencon . cmd <nl> set copy_dir = OpenConsole \ % _r % <nl> ( xcopy / Y % _last_build % \ Nihilist . exe % TEMP % \ % copy_dir % \ Nihilist . exe * ) > nul <nl> ( xcopy / Y % _last_build % \ console . dll % TEMP % \ % copy_dir % \ console . dll * ) > nul <nl> <nl> + echo Launching ` % TEMP % \ % copy_dir % \ OpenConsole . exe % * ` . . . <nl> start % TEMP % \ % copy_dir % \ OpenConsole . exe % * <nl> | Prevent the v1 propsheet from zeroing colors , causing black text on black background . ( ) | microsoft/terminal | b97db6303000f2a3c670862c0771069dfcca9972 | 2019-10-02T23:04:59Z |
mmm a / modules / planning / reference_line / spiral_reference_line_smoother . cc <nl> ppp b / modules / planning / reference_line / spiral_reference_line_smoother . cc <nl> std : : vector < common : : PathPoint > SpiralReferenceLineSmoother : : Interpolate ( <nl> size_t num_of_points = <nl> static_cast < size_t > ( std : : ceil ( delta_s / resolution ) + 1 ) ; <nl> for ( size_t i = 1 ; i < = num_of_points ; + + i ) { <nl> - const double inter_s = delta_s / static_cast < double > ( num_of_points * i ) ; <nl> + const double inter_s = delta_s / static_cast < double > ( num_of_points ) <nl> + * static_cast < double > ( i ) ; <nl> const double dx = spiral_curve . ComputeCartesianDeviationX < 10 > ( inter_s ) ; <nl> const double dy = spiral_curve . ComputeCartesianDeviationY < 10 > ( inter_s ) ; <nl> <nl> | planning : revert changes in spiral reference line smoother | ApolloAuto/apollo | 0c0aeafb48e492e87cc38955a1f3b387f11316e7 | 2018-12-13T23:18:29Z |
mmm a / src / arm / builtins - arm . cc <nl> ppp b / src / arm / builtins - arm . cc <nl> static void CallRuntimePassFunction ( <nl> FrameScope scope ( masm , StackFrame : : INTERNAL ) ; <nl> / / Push a copy of the function onto the stack . <nl> __ push ( r1 ) ; <nl> - / / Push call kind information and function as parameter to the runtime call . <nl> - __ Push ( r5 , r1 ) ; <nl> + / / Push function as parameter to the runtime call . <nl> + __ Push ( r1 ) ; <nl> <nl> __ CallRuntime ( function_id , 1 ) ; <nl> - / / Restore call kind information . <nl> - __ pop ( r5 ) ; <nl> / / Restore receiver . <nl> __ pop ( r1 ) ; <nl> } <nl> static void Generate_JSConstructStubHelper ( MacroAssembler * masm , <nl> masm - > isolate ( ) - > builtins ( ) - > HandleApiCallConstruct ( ) ; <nl> ParameterCount expected ( 0 ) ; <nl> __ InvokeCode ( code , expected , expected , <nl> - RelocInfo : : CODE_TARGET , CALL_FUNCTION , CALL_AS_METHOD ) ; <nl> + RelocInfo : : CODE_TARGET , CALL_FUNCTION ) ; <nl> } else { <nl> ParameterCount actual ( r0 ) ; <nl> - __ InvokeFunction ( r1 , actual , CALL_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + __ InvokeFunction ( r1 , actual , CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> / / Store offset of return address for deoptimizer . <nl> static void Generate_JSEntryTrampolineHelper ( MacroAssembler * masm , <nl> __ CallStub ( & stub ) ; <nl> } else { <nl> ParameterCount actual ( r0 ) ; <nl> - __ InvokeFunction ( r1 , actual , CALL_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + __ InvokeFunction ( r1 , actual , CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> / / Exit the JS frame and remove the parameters ( except function ) , and <nl> / / return . <nl> static void CallCompileOptimized ( MacroAssembler * masm , bool concurrent ) { <nl> FrameScope scope ( masm , StackFrame : : INTERNAL ) ; <nl> / / Push a copy of the function onto the stack . <nl> __ push ( r1 ) ; <nl> - / / Push call kind information and function as parameter to the runtime call . <nl> - __ Push ( r5 , r1 ) ; <nl> + / / Push function as parameter to the runtime call . <nl> + __ Push ( r1 ) ; <nl> / / Whether to compile in a background thread . <nl> __ Push ( masm - > isolate ( ) - > factory ( ) - > ToBoolean ( concurrent ) ) ; <nl> <nl> __ CallRuntime ( Runtime : : kCompileOptimized , 2 ) ; <nl> - / / Restore call kind information . <nl> - __ pop ( r5 ) ; <nl> / / Restore receiver . <nl> __ pop ( r1 ) ; <nl> } <nl> void Builtins : : Generate_FunctionCall ( MacroAssembler * masm ) { <nl> __ b ( eq , & function ) ; <nl> / / Expected number of arguments is 0 for CALL_NON_FUNCTION . <nl> __ mov ( r2 , Operand : : Zero ( ) ) ; <nl> - __ SetCallKind ( r5 , CALL_AS_METHOD ) ; <nl> __ cmp ( r4 , Operand ( 1 ) ) ; <nl> __ b ( ne , & non_proxy ) ; <nl> <nl> void Builtins : : Generate_FunctionCall ( MacroAssembler * masm ) { <nl> FieldMemOperand ( r3 , SharedFunctionInfo : : kFormalParameterCountOffset ) ) ; <nl> __ SmiUntag ( r2 ) ; <nl> __ ldr ( r3 , FieldMemOperand ( r1 , JSFunction : : kCodeEntryOffset ) ) ; <nl> - __ SetCallKind ( r5 , CALL_AS_FUNCTION ) ; <nl> __ cmp ( r2 , r0 ) ; / / Check formal and actual parameter counts . <nl> __ Jump ( masm - > isolate ( ) - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) , <nl> RelocInfo : : CODE_TARGET , <nl> ne ) ; <nl> <nl> ParameterCount expected ( 0 ) ; <nl> - __ InvokeCode ( r3 , expected , expected , JUMP_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_FUNCTION ) ; <nl> + __ InvokeCode ( r3 , expected , expected , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> void Builtins : : Generate_FunctionApply ( MacroAssembler * masm ) { <nl> __ ldr ( r1 , MemOperand ( fp , kFunctionOffset ) ) ; <nl> __ CompareObjectType ( r1 , r2 , r2 , JS_FUNCTION_TYPE ) ; <nl> __ b ( ne , & call_proxy ) ; <nl> - __ InvokeFunction ( r1 , actual , CALL_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_FUNCTION ) ; <nl> + __ InvokeFunction ( r1 , actual , CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> <nl> frame_scope . GenerateLeaveFrame ( ) ; <nl> __ add ( sp , sp , Operand ( 3 * kPointerSize ) ) ; <nl> void Builtins : : Generate_FunctionApply ( MacroAssembler * masm ) { <nl> __ push ( r1 ) ; / / add function proxy as last argument <nl> __ add ( r0 , r0 , Operand ( 1 ) ) ; <nl> __ mov ( r2 , Operand : : Zero ( ) ) ; <nl> - __ SetCallKind ( r5 , CALL_AS_FUNCTION ) ; <nl> __ GetBuiltinEntry ( r3 , Builtins : : CALL_FUNCTION_PROXY ) ; <nl> __ Call ( masm - > isolate ( ) - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) , <nl> RelocInfo : : CODE_TARGET ) ; <nl> void Builtins : : Generate_ArgumentsAdaptorTrampoline ( MacroAssembler * masm ) { <nl> / / - - r1 : function ( passed through to callee ) <nl> / / - - r2 : expected number of arguments <nl> / / - - r3 : code entry to call <nl> - / / - - r5 : call kind information <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> Label invoke , dont_adapt_arguments ; <nl> mmm a / src / arm / code - stubs - arm . cc <nl> ppp b / src / arm / code - stubs - arm . cc <nl> void CallFunctionStub : : Generate ( MacroAssembler * masm ) { <nl> / / r1 : pushed function <nl> ParameterCount actual ( argc_ ) ; <nl> <nl> - __ InvokeFunction ( r1 , <nl> - actual , <nl> - JUMP_FUNCTION , <nl> - NullCallWrapper ( ) , <nl> - CALL_AS_FUNCTION ) ; <nl> + __ InvokeFunction ( r1 , actual , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> <nl> / / Slow - case : Non - function called . <nl> __ bind ( & slow ) ; <nl> void CallFunctionStub : : Generate ( MacroAssembler * masm ) { <nl> __ mov ( r0 , Operand ( argc_ + 1 , RelocInfo : : NONE32 ) ) ; <nl> __ mov ( r2 , Operand : : Zero ( ) ) ; <nl> __ GetBuiltinEntry ( r3 , Builtins : : CALL_FUNCTION_PROXY ) ; <nl> - __ SetCallKind ( r5 , CALL_AS_FUNCTION ) ; <nl> { <nl> Handle < Code > adaptor = <nl> masm - > isolate ( ) - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) ; <nl> void CallFunctionStub : : Generate ( MacroAssembler * masm ) { <nl> __ mov ( r0 , Operand ( argc_ ) ) ; / / Set up the number of arguments . <nl> __ mov ( r2 , Operand : : Zero ( ) ) ; <nl> __ GetBuiltinEntry ( r3 , Builtins : : CALL_NON_FUNCTION ) ; <nl> - __ SetCallKind ( r5 , CALL_AS_FUNCTION ) ; <nl> __ Jump ( masm - > isolate ( ) - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) , <nl> RelocInfo : : CODE_TARGET ) ; <nl> } <nl> void CallConstructStub : : Generate ( MacroAssembler * masm ) { <nl> __ bind ( & do_call ) ; <nl> / / Set expected number of arguments to zero ( not changing r0 ) . <nl> __ mov ( r2 , Operand : : Zero ( ) ) ; <nl> - __ SetCallKind ( r5 , CALL_AS_METHOD ) ; <nl> __ Jump ( masm - > isolate ( ) - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) , <nl> RelocInfo : : CODE_TARGET ) ; <nl> } <nl> void StubFailureTailCallTrampolineStub : : Generate ( MacroAssembler * masm ) { <nl> __ sub ( r0 , r0 , Operand ( 1 ) ) ; <nl> masm - > LeaveFrame ( StackFrame : : STUB_FAILURE_TRAMPOLINE ) ; <nl> ParameterCount argument_count ( r0 ) ; <nl> - __ InvokeFunction ( <nl> - r1 , argument_count , JUMP_FUNCTION , NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + __ InvokeFunction ( r1 , argument_count , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> mmm a / src / arm / full - codegen - arm . cc <nl> ppp b / src / arm / full - codegen - arm . cc <nl> void FullCodeGenerator : : Generate ( ) { <nl> / / object ) . <nl> if ( info - > is_classic_mode ( ) & & ! info - > is_native ( ) ) { <nl> Label ok ; <nl> - __ cmp ( r5 , Operand : : Zero ( ) ) ; <nl> - __ b ( eq , & ok ) ; <nl> - <nl> int receiver_offset = info - > scope ( ) - > num_parameters ( ) * kPointerSize ; <nl> __ ldr ( r2 , MemOperand ( sp , receiver_offset ) ) ; <nl> __ CompareRoot ( r2 , Heap : : kUndefinedValueRootIndex ) ; <nl> void FullCodeGenerator : : EmitCallFunction ( CallRuntime * expr ) { <nl> / / InvokeFunction requires the function in r1 . Move it in there . <nl> __ mov ( r1 , result_register ( ) ) ; <nl> ParameterCount count ( arg_count ) ; <nl> - __ InvokeFunction ( r1 , count , CALL_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_FUNCTION ) ; <nl> + __ InvokeFunction ( r1 , count , CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> __ ldr ( cp , MemOperand ( fp , StandardFrameConstants : : kContextOffset ) ) ; <nl> __ jmp ( & done ) ; <nl> <nl> mmm a / src / arm / ic - arm . cc <nl> ppp b / src / arm / ic - arm . cc <nl> static void GenerateFunctionTailCall ( MacroAssembler * masm , <nl> <nl> / / Invoke the function . <nl> ParameterCount actual ( argc ) ; <nl> - __ InvokeFunction ( r1 , actual , JUMP_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + __ InvokeFunction ( r1 , actual , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> void CallICBase : : GenerateMiss ( MacroAssembler * masm , <nl> } <nl> <nl> / / Invoke the function . <nl> - CallKind call_kind = CallICBase : : Contextual : : decode ( extra_state ) <nl> - ? CALL_AS_FUNCTION <nl> - : CALL_AS_METHOD ; <nl> ParameterCount actual ( argc ) ; <nl> - __ InvokeFunction ( r1 , <nl> - actual , <nl> - JUMP_FUNCTION , <nl> - NullCallWrapper ( ) , <nl> - call_kind ) ; <nl> + __ InvokeFunction ( r1 , actual , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> mmm a / src / arm / lithium - codegen - arm . cc <nl> ppp b / src / arm / lithium - codegen - arm . cc <nl> bool LCodeGen : : GeneratePrologue ( ) { <nl> info_ - > is_classic_mode ( ) & & <nl> ! info_ - > is_native ( ) ) { <nl> Label ok ; <nl> - __ cmp ( r5 , Operand : : Zero ( ) ) ; <nl> - __ b ( eq , & ok ) ; <nl> - <nl> int receiver_offset = info_ - > scope ( ) - > num_parameters ( ) * kPointerSize ; <nl> __ ldr ( r2 , MemOperand ( sp , receiver_offset ) ) ; <nl> __ CompareRoot ( r2 , Heap : : kUndefinedValueRootIndex ) ; <nl> void LCodeGen : : DoApplyArguments ( LApplyArguments * instr ) { <nl> / / The number of arguments is stored in receiver which is r0 , as expected <nl> / / by InvokeFunction . <nl> ParameterCount actual ( receiver ) ; <nl> - __ InvokeFunction ( function , actual , CALL_FUNCTION , <nl> - safepoint_generator , CALL_AS_FUNCTION ) ; <nl> + __ InvokeFunction ( function , actual , CALL_FUNCTION , safepoint_generator ) ; <nl> } <nl> <nl> <nl> void LCodeGen : : CallKnownFunction ( Handle < JSFunction > function , <nl> int formal_parameter_count , <nl> int arity , <nl> LInstruction * instr , <nl> - CallKind call_kind , <nl> R1State r1_state ) { <nl> bool dont_adapt_arguments = <nl> formal_parameter_count = = SharedFunctionInfo : : kDontAdaptArgumentsSentinel ; <nl> void LCodeGen : : CallKnownFunction ( Handle < JSFunction > function , <nl> } <nl> <nl> / / Invoke function . <nl> - __ SetCallKind ( r5 , call_kind ) ; <nl> __ ldr ( ip , FieldMemOperand ( r1 , JSFunction : : kCodeEntryOffset ) ) ; <nl> __ Call ( ip ) ; <nl> <nl> void LCodeGen : : CallKnownFunction ( Handle < JSFunction > function , <nl> SafepointGenerator generator ( this , pointers , Safepoint : : kLazyDeopt ) ; <nl> ParameterCount count ( arity ) ; <nl> ParameterCount expected ( formal_parameter_count ) ; <nl> - __ InvokeFunction ( <nl> - function , expected , count , CALL_FUNCTION , generator , call_kind ) ; <nl> + __ InvokeFunction ( function , expected , count , CALL_FUNCTION , generator ) ; <nl> } <nl> } <nl> <nl> void LCodeGen : : DoCallConstantFunction ( LCallConstantFunction * instr ) { <nl> instr - > hydrogen ( ) - > formal_parameter_count ( ) , <nl> instr - > arity ( ) , <nl> instr , <nl> - CALL_AS_FUNCTION , <nl> R1_UNINITIALIZED ) ; <nl> } <nl> <nl> void LCodeGen : : DoInvokeFunction ( LInvokeFunction * instr ) { <nl> LPointerMap * pointers = instr - > pointer_map ( ) ; <nl> SafepointGenerator generator ( this , pointers , Safepoint : : kLazyDeopt ) ; <nl> ParameterCount count ( instr - > arity ( ) ) ; <nl> - __ InvokeFunction ( r1 , count , CALL_FUNCTION , generator , CALL_AS_FUNCTION ) ; <nl> + __ InvokeFunction ( r1 , count , CALL_FUNCTION , generator ) ; <nl> } else { <nl> CallKnownFunction ( known_function , <nl> instr - > hydrogen ( ) - > formal_parameter_count ( ) , <nl> instr - > arity ( ) , <nl> instr , <nl> - CALL_AS_FUNCTION , <nl> R1_CONTAINS_TARGET ) ; <nl> } <nl> } <nl> void LCodeGen : : DoCallKnownGlobal ( LCallKnownGlobal * instr ) { <nl> instr - > hydrogen ( ) - > formal_parameter_count ( ) , <nl> instr - > arity ( ) , <nl> instr , <nl> - CALL_AS_FUNCTION , <nl> R1_UNINITIALIZED ) ; <nl> } <nl> <nl> mmm a / src / arm / lithium - codegen - arm . h <nl> ppp b / src / arm / lithium - codegen - arm . h <nl> class LCodeGen : public LCodeGenBase { <nl> int formal_parameter_count , <nl> int arity , <nl> LInstruction * instr , <nl> - CallKind call_kind , <nl> R1State r1_state ) ; <nl> <nl> void RecordSafepointWithLazyDeopt ( LInstruction * instr , <nl> mmm a / src / arm / macro - assembler - arm . cc <nl> ppp b / src / arm / macro - assembler - arm . cc <nl> void MacroAssembler : : MovFromFloatParameter ( DwVfpRegister dst ) { <nl> } <nl> <nl> <nl> - void MacroAssembler : : SetCallKind ( Register dst , CallKind call_kind ) { <nl> - / / This macro takes the dst register to make the code more readable <nl> - / / at the call sites . However , the dst register has to be r5 to <nl> - / / follow the calling convention which requires the call type to be <nl> - / / in r5 . <nl> - ASSERT ( dst . is ( r5 ) ) ; <nl> - if ( call_kind = = CALL_AS_FUNCTION ) { <nl> - mov ( dst , Operand ( Smi : : FromInt ( 1 ) ) ) ; <nl> - } else { <nl> - mov ( dst , Operand ( Smi : : FromInt ( 0 ) ) ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> void MacroAssembler : : InvokePrologue ( const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> Handle < Code > code_constant , <nl> void MacroAssembler : : InvokePrologue ( const ParameterCount & expected , <nl> Label * done , <nl> bool * definitely_mismatches , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> bool definitely_matches = false ; <nl> * definitely_mismatches = false ; <nl> Label regular_invoke ; <nl> void MacroAssembler : : InvokePrologue ( const ParameterCount & expected , <nl> isolate ( ) - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) ; <nl> if ( flag = = CALL_FUNCTION ) { <nl> call_wrapper . BeforeCall ( CallSize ( adaptor ) ) ; <nl> - SetCallKind ( r5 , call_kind ) ; <nl> Call ( adaptor ) ; <nl> call_wrapper . AfterCall ( ) ; <nl> if ( ! * definitely_mismatches ) { <nl> b ( done ) ; <nl> } <nl> } else { <nl> - SetCallKind ( r5 , call_kind ) ; <nl> Jump ( adaptor , RelocInfo : : CODE_TARGET ) ; <nl> } <nl> bind ( & regular_invoke ) ; <nl> void MacroAssembler : : InvokeCode ( Register code , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> / / You can ' t call a function without a valid frame . <nl> ASSERT ( flag = = JUMP_FUNCTION | | has_frame ( ) ) ; <nl> <nl> void MacroAssembler : : InvokeCode ( Register code , <nl> bool definitely_mismatches = false ; <nl> InvokePrologue ( expected , actual , Handle < Code > : : null ( ) , code , <nl> & done , & definitely_mismatches , flag , <nl> - call_wrapper , call_kind ) ; <nl> + call_wrapper ) ; <nl> if ( ! definitely_mismatches ) { <nl> if ( flag = = CALL_FUNCTION ) { <nl> call_wrapper . BeforeCall ( CallSize ( code ) ) ; <nl> - SetCallKind ( r5 , call_kind ) ; <nl> Call ( code ) ; <nl> call_wrapper . AfterCall ( ) ; <nl> } else { <nl> ASSERT ( flag = = JUMP_FUNCTION ) ; <nl> - SetCallKind ( r5 , call_kind ) ; <nl> Jump ( code ) ; <nl> } <nl> <nl> void MacroAssembler : : InvokeCode ( Handle < Code > code , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> RelocInfo : : Mode rmode , <nl> - InvokeFlag flag , <nl> - CallKind call_kind ) { <nl> + InvokeFlag flag ) { <nl> / / You can ' t call a function without a valid frame . <nl> ASSERT ( flag = = JUMP_FUNCTION | | has_frame ( ) ) ; <nl> <nl> void MacroAssembler : : InvokeCode ( Handle < Code > code , <nl> bool definitely_mismatches = false ; <nl> InvokePrologue ( expected , actual , code , no_reg , <nl> & done , & definitely_mismatches , flag , <nl> - NullCallWrapper ( ) , call_kind ) ; <nl> + NullCallWrapper ( ) ) ; <nl> if ( ! definitely_mismatches ) { <nl> if ( flag = = CALL_FUNCTION ) { <nl> - SetCallKind ( r5 , call_kind ) ; <nl> Call ( code , rmode ) ; <nl> } else { <nl> - SetCallKind ( r5 , call_kind ) ; <nl> Jump ( code , rmode ) ; <nl> } <nl> <nl> void MacroAssembler : : InvokeCode ( Handle < Code > code , <nl> void MacroAssembler : : InvokeFunction ( Register fun , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> / / You can ' t call a function without a valid frame . <nl> ASSERT ( flag = = JUMP_FUNCTION | | has_frame ( ) ) ; <nl> <nl> void MacroAssembler : : InvokeFunction ( Register fun , <nl> FieldMemOperand ( r1 , JSFunction : : kCodeEntryOffset ) ) ; <nl> <nl> ParameterCount expected ( expected_reg ) ; <nl> - InvokeCode ( code_reg , expected , actual , flag , call_wrapper , call_kind ) ; <nl> + InvokeCode ( code_reg , expected , actual , flag , call_wrapper ) ; <nl> } <nl> <nl> <nl> void MacroAssembler : : InvokeFunction ( Register function , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> / / You can ' t call a function without a valid frame . <nl> ASSERT ( flag = = JUMP_FUNCTION | | has_frame ( ) ) ; <nl> <nl> void MacroAssembler : : InvokeFunction ( Register function , <nl> / / allow recompilation to take effect without changing any of the <nl> / / call sites . <nl> ldr ( r3 , FieldMemOperand ( r1 , JSFunction : : kCodeEntryOffset ) ) ; <nl> - InvokeCode ( r3 , expected , actual , flag , call_wrapper , call_kind ) ; <nl> + InvokeCode ( r3 , expected , actual , flag , call_wrapper ) ; <nl> } <nl> <nl> <nl> void MacroAssembler : : InvokeFunction ( Handle < JSFunction > function , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> Move ( r1 , function ) ; <nl> - InvokeFunction ( r1 , expected , actual , flag , call_wrapper , call_kind ) ; <nl> + InvokeFunction ( r1 , expected , actual , flag , call_wrapper ) ; <nl> } <nl> <nl> <nl> void MacroAssembler : : InvokeBuiltin ( Builtins : : JavaScript id , <nl> GetBuiltinEntry ( r2 , id ) ; <nl> if ( flag = = CALL_FUNCTION ) { <nl> call_wrapper . BeforeCall ( CallSize ( r2 ) ) ; <nl> - SetCallKind ( r5 , CALL_AS_METHOD ) ; <nl> Call ( r2 ) ; <nl> call_wrapper . AfterCall ( ) ; <nl> } else { <nl> ASSERT ( flag = = JUMP_FUNCTION ) ; <nl> - SetCallKind ( r5 , CALL_AS_METHOD ) ; <nl> Jump ( r2 ) ; <nl> } <nl> } <nl> mmm a / src / arm / macro - assembler - arm . h <nl> ppp b / src / arm / macro - assembler - arm . h <nl> class MacroAssembler : public Assembler { <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> / / JavaScript invokes <nl> <nl> - / / Set up call kind marking in ecx . The method takes ecx as an <nl> - / / explicit first parameter to make the code more readable at the <nl> - / / call sites . <nl> - void SetCallKind ( Register dst , CallKind kind ) ; <nl> - <nl> / / Invoke the JavaScript function code by either calling or jumping . <nl> void InvokeCode ( Register code , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) ; <nl> + const CallWrapper & call_wrapper ) ; <nl> <nl> void InvokeCode ( Handle < Code > code , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> RelocInfo : : Mode rmode , <nl> - InvokeFlag flag , <nl> - CallKind call_kind ) ; <nl> + InvokeFlag flag ) ; <nl> <nl> / / Invoke the JavaScript function in the given register . Changes the <nl> / / current context to the context in the function before invoking . <nl> void InvokeFunction ( Register function , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) ; <nl> + const CallWrapper & call_wrapper ) ; <nl> <nl> void InvokeFunction ( Register function , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) ; <nl> + const CallWrapper & call_wrapper ) ; <nl> <nl> void InvokeFunction ( Handle < JSFunction > function , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) ; <nl> + const CallWrapper & call_wrapper ) ; <nl> <nl> void IsObjectJSObjectType ( Register heap_object , <nl> Register map , <nl> class MacroAssembler : public Assembler { <nl> Label * done , <nl> bool * definitely_mismatches , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) ; <nl> + const CallWrapper & call_wrapper ) ; <nl> <nl> void InitializeNewString ( Register string , <nl> Register length , <nl> mmm a / src / arm / stub - cache - arm . cc <nl> ppp b / src / arm / stub - cache - arm . cc <nl> void CallStubCompiler : : GenerateJumpFunction ( Handle < Object > object , <nl> PatchImplicitReceiver ( object ) ; <nl> <nl> / / Invoke the function . <nl> - __ InvokeFunction ( r1 , arguments ( ) , JUMP_FUNCTION , <nl> - NullCallWrapper ( ) , call_kind ( ) ) ; <nl> + __ InvokeFunction ( r1 , arguments ( ) , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> void StoreStubCompiler : : GenerateStoreViaSetter ( <nl> ParameterCount actual ( 1 ) ; <nl> ParameterCount expected ( setter ) ; <nl> __ InvokeFunction ( setter , expected , actual , <nl> - CALL_FUNCTION , NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> } else { <nl> / / If we generate a global code snippet for deoptimization only , remember <nl> / / the place to continue after deoptimization . <nl> void LoadStubCompiler : : GenerateLoadViaGetter ( MacroAssembler * masm , <nl> ParameterCount actual ( 0 ) ; <nl> ParameterCount expected ( getter ) ; <nl> __ InvokeFunction ( getter , expected , actual , <nl> - CALL_FUNCTION , NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> } else { <nl> / / If we generate a global code snippet for deoptimization only , remember <nl> / / the place to continue after deoptimization . <nl> mmm a / src / ast . cc <nl> ppp b / src / ast . cc <nl> Handle < JSObject > Call : : GetPrototypeForPrimitiveCheck ( <nl> <nl> <nl> void Call : : RecordTypeFeedback ( TypeFeedbackOracle * oracle , <nl> - CallKind call_kind ) { <nl> + ContextualMode contextual_mode ) { <nl> is_monomorphic_ = oracle - > CallIsMonomorphic ( CallFeedbackId ( ) ) ; <nl> Property * property = expression ( ) - > AsProperty ( ) ; <nl> if ( property = = NULL ) { <nl> void Call : : RecordTypeFeedback ( TypeFeedbackOracle * oracle , <nl> receiver_types_ . Clear ( ) ; <nl> if ( check_type_ = = RECEIVER_MAP_CHECK ) { <nl> oracle - > CallReceiverTypes ( CallFeedbackId ( ) , <nl> - name , arguments ( ) - > length ( ) , call_kind , & receiver_types_ ) ; <nl> + name , arguments ( ) - > length ( ) , contextual_mode , & receiver_types_ ) ; <nl> is_monomorphic_ = is_monomorphic_ & & receiver_types_ . length ( ) > 0 ; <nl> } else { <nl> holder_ = GetPrototypeForPrimitiveCheck ( check_type_ , oracle - > isolate ( ) ) ; <nl> mmm a / src / ast . h <nl> ppp b / src / ast . h <nl> class Call V8_FINAL : public Expression { <nl> <nl> / / Type feedback information . <nl> TypeFeedbackId CallFeedbackId ( ) const { return reuse ( id ( ) ) ; } <nl> - void RecordTypeFeedback ( TypeFeedbackOracle * oracle , CallKind call_kind ) ; <nl> + void RecordTypeFeedback ( TypeFeedbackOracle * oracle , <nl> + ContextualMode contextual_mode ) ; <nl> virtual SmallMapList * GetReceiverTypes ( ) V8_OVERRIDE { <nl> return & receiver_types_ ; <nl> } <nl> mmm a / src / hydrogen . cc <nl> ppp b / src / hydrogen . cc <nl> int HOptimizedGraphBuilder : : InliningAstSize ( Handle < JSFunction > target ) { <nl> } <nl> <nl> <nl> - bool HOptimizedGraphBuilder : : TryInline ( CallKind call_kind , <nl> - Handle < JSFunction > target , <nl> + bool HOptimizedGraphBuilder : : TryInline ( Handle < JSFunction > target , <nl> int arguments_count , <nl> HValue * implicit_return_value , <nl> BailoutId ast_id , <nl> bool HOptimizedGraphBuilder : : TryInline ( CallKind call_kind , <nl> <nl> <nl> bool HOptimizedGraphBuilder : : TryInlineCall ( Call * expr , bool drop_extra ) { <nl> - / / The function call we are inlining is a method call if the call <nl> - / / is a property call . <nl> - CallKind call_kind = ( expr - > expression ( ) - > AsProperty ( ) = = NULL ) <nl> - ? CALL_AS_FUNCTION <nl> - : CALL_AS_METHOD ; <nl> - <nl> - return TryInline ( call_kind , <nl> - expr - > target ( ) , <nl> + return TryInline ( expr - > target ( ) , <nl> expr - > arguments ( ) - > length ( ) , <nl> NULL , <nl> expr - > id ( ) , <nl> bool HOptimizedGraphBuilder : : TryInlineCall ( Call * expr , bool drop_extra ) { <nl> <nl> bool HOptimizedGraphBuilder : : TryInlineConstruct ( CallNew * expr , <nl> HValue * implicit_return_value ) { <nl> - return TryInline ( CALL_AS_FUNCTION , <nl> - expr - > target ( ) , <nl> + return TryInline ( expr - > target ( ) , <nl> expr - > arguments ( ) - > length ( ) , <nl> implicit_return_value , <nl> expr - > id ( ) , <nl> bool HOptimizedGraphBuilder : : TryInlineConstruct ( CallNew * expr , <nl> bool HOptimizedGraphBuilder : : TryInlineGetter ( Handle < JSFunction > getter , <nl> BailoutId ast_id , <nl> BailoutId return_id ) { <nl> - return TryInline ( CALL_AS_METHOD , <nl> - getter , <nl> + return TryInline ( getter , <nl> 0 , <nl> NULL , <nl> ast_id , <nl> bool HOptimizedGraphBuilder : : TryInlineSetter ( Handle < JSFunction > setter , <nl> BailoutId id , <nl> BailoutId assignment_id , <nl> HValue * implicit_return_value ) { <nl> - return TryInline ( CALL_AS_METHOD , <nl> - setter , <nl> + return TryInline ( setter , <nl> 1 , <nl> implicit_return_value , <nl> id , assignment_id , <nl> bool HOptimizedGraphBuilder : : TryInlineSetter ( Handle < JSFunction > setter , <nl> bool HOptimizedGraphBuilder : : TryInlineApply ( Handle < JSFunction > function , <nl> Call * expr , <nl> int arguments_count ) { <nl> - return TryInline ( CALL_AS_METHOD , <nl> - function , <nl> + return TryInline ( function , <nl> arguments_count , <nl> NULL , <nl> expr - > id ( ) , <nl> mmm a / src / hydrogen . h <nl> ppp b / src / hydrogen . h <nl> class HOptimizedGraphBuilder : public HGraphBuilder , public AstVisitor { <nl> Handle < JSFunction > target ) ; <nl> <nl> int InliningAstSize ( Handle < JSFunction > target ) ; <nl> - bool TryInline ( CallKind call_kind , <nl> - Handle < JSFunction > target , <nl> + bool TryInline ( Handle < JSFunction > target , <nl> int arguments_count , <nl> HValue * implicit_return_value , <nl> BailoutId ast_id , <nl> mmm a / src / ia32 / builtins - ia32 . cc <nl> ppp b / src / ia32 / builtins - ia32 . cc <nl> static void CallRuntimePassFunction ( <nl> FrameScope scope ( masm , StackFrame : : INTERNAL ) ; <nl> / / Push a copy of the function . <nl> __ push ( edi ) ; <nl> - / / Push call kind information . <nl> - __ push ( ecx ) ; <nl> / / Function is also the parameter to the runtime call . <nl> __ push ( edi ) ; <nl> <nl> __ CallRuntime ( function_id , 1 ) ; <nl> - / / Restore call kind information . <nl> - __ pop ( ecx ) ; <nl> / / Restore receiver . <nl> __ pop ( edi ) ; <nl> } <nl> static void Generate_JSConstructStubHelper ( MacroAssembler * masm , <nl> masm - > isolate ( ) - > builtins ( ) - > HandleApiCallConstruct ( ) ; <nl> ParameterCount expected ( 0 ) ; <nl> __ InvokeCode ( code , expected , expected , RelocInfo : : CODE_TARGET , <nl> - CALL_FUNCTION , NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> } else { <nl> ParameterCount actual ( eax ) ; <nl> __ InvokeFunction ( edi , actual , CALL_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + NullCallWrapper ( ) ) ; <nl> } <nl> <nl> / / Store offset of return address for deoptimizer . <nl> static void Generate_JSEntryTrampolineHelper ( MacroAssembler * masm , <nl> } else { <nl> ParameterCount actual ( eax ) ; <nl> __ InvokeFunction ( edi , actual , CALL_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + NullCallWrapper ( ) ) ; <nl> } <nl> <nl> / / Exit the internal frame . Notice that this also removes the empty . <nl> static void CallCompileOptimized ( MacroAssembler * masm , bool concurrent ) { <nl> FrameScope scope ( masm , StackFrame : : INTERNAL ) ; <nl> / / Push a copy of the function . <nl> __ push ( edi ) ; <nl> - / / Push call kind information . <nl> - __ push ( ecx ) ; <nl> / / Function is also the parameter to the runtime call . <nl> __ push ( edi ) ; <nl> / / Whether to compile in a background thread . <nl> __ Push ( masm - > isolate ( ) - > factory ( ) - > ToBoolean ( concurrent ) ) ; <nl> <nl> __ CallRuntime ( Runtime : : kCompileOptimized , 2 ) ; <nl> - / / Restore call kind information . <nl> - __ pop ( ecx ) ; <nl> / / Restore receiver . <nl> __ pop ( edi ) ; <nl> } <nl> void Builtins : : Generate_FunctionCall ( MacroAssembler * masm ) { <nl> __ push ( edi ) ; / / re - add proxy object as additional argument <nl> __ push ( edx ) ; <nl> __ inc ( eax ) ; <nl> - __ SetCallKind ( ecx , CALL_AS_FUNCTION ) ; <nl> __ GetBuiltinEntry ( edx , Builtins : : CALL_FUNCTION_PROXY ) ; <nl> __ jmp ( masm - > isolate ( ) - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) , <nl> RelocInfo : : CODE_TARGET ) ; <nl> <nl> __ bind ( & non_proxy ) ; <nl> - __ SetCallKind ( ecx , CALL_AS_METHOD ) ; <nl> __ GetBuiltinEntry ( edx , Builtins : : CALL_NON_FUNCTION ) ; <nl> __ jmp ( masm - > isolate ( ) - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) , <nl> RelocInfo : : CODE_TARGET ) ; <nl> void Builtins : : Generate_FunctionCall ( MacroAssembler * masm ) { <nl> FieldOperand ( edx , SharedFunctionInfo : : kFormalParameterCountOffset ) ) ; <nl> __ mov ( edx , FieldOperand ( edi , JSFunction : : kCodeEntryOffset ) ) ; <nl> __ SmiUntag ( ebx ) ; <nl> - __ SetCallKind ( ecx , CALL_AS_FUNCTION ) ; <nl> __ cmp ( eax , ebx ) ; <nl> __ j ( not_equal , <nl> masm - > isolate ( ) - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) ) ; <nl> <nl> ParameterCount expected ( 0 ) ; <nl> - __ InvokeCode ( edx , expected , expected , JUMP_FUNCTION , NullCallWrapper ( ) , <nl> - CALL_AS_FUNCTION ) ; <nl> + __ InvokeCode ( edx , expected , expected , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> void Builtins : : Generate_FunctionApply ( MacroAssembler * masm ) { <nl> __ mov ( edi , Operand ( ebp , kFunctionOffset ) ) ; <nl> __ CmpObjectType ( edi , JS_FUNCTION_TYPE , ecx ) ; <nl> __ j ( not_equal , & call_proxy ) ; <nl> - __ InvokeFunction ( edi , actual , CALL_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_FUNCTION ) ; <nl> + __ InvokeFunction ( edi , actual , CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> <nl> frame_scope . GenerateLeaveFrame ( ) ; <nl> __ ret ( 3 * kPointerSize ) ; / / remove this , receiver , and arguments <nl> void Builtins : : Generate_FunctionApply ( MacroAssembler * masm ) { <nl> __ push ( edi ) ; / / add function proxy as last argument <nl> __ inc ( eax ) ; <nl> __ Set ( ebx , Immediate ( 0 ) ) ; <nl> - __ SetCallKind ( ecx , CALL_AS_FUNCTION ) ; <nl> __ GetBuiltinEntry ( edx , Builtins : : CALL_FUNCTION_PROXY ) ; <nl> __ call ( masm - > isolate ( ) - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) , <nl> RelocInfo : : CODE_TARGET ) ; <nl> void Builtins : : Generate_ArgumentsAdaptorTrampoline ( MacroAssembler * masm ) { <nl> / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> / / - - eax : actual number of arguments <nl> / / - - ebx : expected number of arguments <nl> - / / - - ecx : call kind information <nl> / / - - edx : code entry to call <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> mmm a / src / ia32 / code - stubs - ia32 . cc <nl> ppp b / src / ia32 / code - stubs - ia32 . cc <nl> void CallFunctionStub : : Generate ( MacroAssembler * masm ) { <nl> / / Fast - case : Just invoke the function . <nl> ParameterCount actual ( argc_ ) ; <nl> <nl> - __ InvokeFunction ( edi , <nl> - actual , <nl> - JUMP_FUNCTION , <nl> - NullCallWrapper ( ) , <nl> - CALL_AS_FUNCTION ) ; <nl> + __ InvokeFunction ( edi , actual , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> <nl> / / Slow - case : Non - function called . <nl> __ bind ( & slow ) ; <nl> void CallFunctionStub : : Generate ( MacroAssembler * masm ) { <nl> __ push ( ecx ) ; <nl> __ Set ( eax , Immediate ( argc_ + 1 ) ) ; <nl> __ Set ( ebx , Immediate ( 0 ) ) ; <nl> - __ SetCallKind ( ecx , CALL_AS_FUNCTION ) ; <nl> __ GetBuiltinEntry ( edx , Builtins : : CALL_FUNCTION_PROXY ) ; <nl> { <nl> Handle < Code > adaptor = isolate - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) ; <nl> void CallFunctionStub : : Generate ( MacroAssembler * masm ) { <nl> __ mov ( Operand ( esp , ( argc_ + 1 ) * kPointerSize ) , edi ) ; <nl> __ Set ( eax , Immediate ( argc_ ) ) ; <nl> __ Set ( ebx , Immediate ( 0 ) ) ; <nl> - __ SetCallKind ( ecx , CALL_AS_FUNCTION ) ; <nl> __ GetBuiltinEntry ( edx , Builtins : : CALL_NON_FUNCTION ) ; <nl> Handle < Code > adaptor = isolate - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) ; <nl> __ jmp ( adaptor , RelocInfo : : CODE_TARGET ) ; <nl> void CallConstructStub : : Generate ( MacroAssembler * masm ) { <nl> __ Set ( ebx , Immediate ( 0 ) ) ; <nl> Handle < Code > arguments_adaptor = <nl> masm - > isolate ( ) - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) ; <nl> - __ SetCallKind ( ecx , CALL_AS_METHOD ) ; <nl> __ jmp ( arguments_adaptor , RelocInfo : : CODE_TARGET ) ; <nl> } <nl> <nl> void StubFailureTailCallTrampolineStub : : Generate ( MacroAssembler * masm ) { <nl> __ sub ( eax , Immediate ( 1 ) ) ; <nl> masm - > LeaveFrame ( StackFrame : : STUB_FAILURE_TRAMPOLINE ) ; <nl> ParameterCount argument_count ( eax ) ; <nl> - __ InvokeFunction ( <nl> - edi , argument_count , JUMP_FUNCTION , NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + __ InvokeFunction ( edi , argument_count , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> mmm a / src / ia32 / full - codegen - ia32 . cc <nl> ppp b / src / ia32 / full - codegen - ia32 . cc <nl> class JumpPatchSite BASE_EMBEDDED { <nl> / / formal parameter count expected by the function . <nl> / / <nl> / / The live registers are : <nl> - / / o ecx : CallKind <nl> / / o edi : the JS function object being called ( i . e . ourselves ) <nl> / / o esi : our context <nl> / / o ebp : our caller ' s frame pointer <nl> void FullCodeGenerator : : Generate ( ) { <nl> / / object ) . <nl> if ( info - > is_classic_mode ( ) & & ! info - > is_native ( ) ) { <nl> Label ok ; <nl> - __ test ( ecx , ecx ) ; <nl> - __ j ( zero , & ok , Label : : kNear ) ; <nl> - <nl> / / + 1 for return address . <nl> int receiver_offset = ( info - > scope ( ) - > num_parameters ( ) + 1 ) * kPointerSize ; <nl> __ mov ( ecx , Operand ( esp , receiver_offset ) ) ; <nl> void FullCodeGenerator : : EmitCallFunction ( CallRuntime * expr ) { <nl> / / InvokeFunction requires the function in edi . Move it in there . <nl> __ mov ( edi , result_register ( ) ) ; <nl> ParameterCount count ( arg_count ) ; <nl> - __ InvokeFunction ( edi , count , CALL_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_FUNCTION ) ; <nl> + __ InvokeFunction ( edi , count , CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> __ mov ( esi , Operand ( ebp , StandardFrameConstants : : kContextOffset ) ) ; <nl> __ jmp ( & done ) ; <nl> <nl> mmm a / src / ia32 / ic - ia32 . cc <nl> ppp b / src / ia32 / ic - ia32 . cc <nl> static void GenerateFunctionTailCall ( MacroAssembler * masm , <nl> <nl> / / Invoke the function . <nl> ParameterCount actual ( argc ) ; <nl> - __ InvokeFunction ( edi , actual , JUMP_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + __ InvokeFunction ( edi , actual , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> void CallICBase : : GenerateMiss ( MacroAssembler * masm , <nl> } <nl> <nl> / / Invoke the function . <nl> - CallKind call_kind = CallICBase : : Contextual : : decode ( extra_state ) <nl> - ? CALL_AS_FUNCTION <nl> - : CALL_AS_METHOD ; <nl> ParameterCount actual ( argc ) ; <nl> - __ InvokeFunction ( edi , <nl> - actual , <nl> - JUMP_FUNCTION , <nl> - NullCallWrapper ( ) , <nl> - call_kind ) ; <nl> + __ InvokeFunction ( edi , actual , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> mmm a / src / ia32 / lithium - codegen - ia32 . cc <nl> ppp b / src / ia32 / lithium - codegen - ia32 . cc <nl> bool LCodeGen : : GeneratePrologue ( ) { <nl> info_ - > is_classic_mode ( ) & & <nl> ! info_ - > is_native ( ) ) { <nl> Label ok ; <nl> - __ test ( ecx , ecx ) ; <nl> - __ j ( zero , & ok , Label : : kNear ) ; <nl> - <nl> / / + 1 for return address . <nl> int receiver_offset = ( scope ( ) - > num_parameters ( ) + 1 ) * kPointerSize ; <nl> __ mov ( ecx , Operand ( esp , receiver_offset ) ) ; <nl> void LCodeGen : : DoApplyArguments ( LApplyArguments * instr ) { <nl> SafepointGenerator safepoint_generator ( <nl> this , pointers , Safepoint : : kLazyDeopt ) ; <nl> ParameterCount actual ( eax ) ; <nl> - __ InvokeFunction ( function , actual , CALL_FUNCTION , <nl> - safepoint_generator , CALL_AS_FUNCTION ) ; <nl> + __ InvokeFunction ( function , actual , CALL_FUNCTION , safepoint_generator ) ; <nl> } <nl> <nl> <nl> void LCodeGen : : CallKnownFunction ( Handle < JSFunction > function , <nl> int formal_parameter_count , <nl> int arity , <nl> LInstruction * instr , <nl> - CallKind call_kind , <nl> EDIState edi_state ) { <nl> bool dont_adapt_arguments = <nl> formal_parameter_count = = SharedFunctionInfo : : kDontAdaptArgumentsSentinel ; <nl> void LCodeGen : : CallKnownFunction ( Handle < JSFunction > function , <nl> } <nl> <nl> / / Invoke function directly . <nl> - __ SetCallKind ( ecx , call_kind ) ; <nl> if ( function . is_identical_to ( info ( ) - > closure ( ) ) ) { <nl> __ CallSelf ( ) ; <nl> } else { <nl> void LCodeGen : : CallKnownFunction ( Handle < JSFunction > function , <nl> this , pointers , Safepoint : : kLazyDeopt ) ; <nl> ParameterCount count ( arity ) ; <nl> ParameterCount expected ( formal_parameter_count ) ; <nl> - __ InvokeFunction ( <nl> - function , expected , count , CALL_FUNCTION , generator , call_kind ) ; <nl> + __ InvokeFunction ( function , expected , count , CALL_FUNCTION , generator ) ; <nl> } <nl> } <nl> <nl> void LCodeGen : : DoCallConstantFunction ( LCallConstantFunction * instr ) { <nl> instr - > hydrogen ( ) - > formal_parameter_count ( ) , <nl> instr - > arity ( ) , <nl> instr , <nl> - CALL_AS_FUNCTION , <nl> EDI_UNINITIALIZED ) ; <nl> } <nl> <nl> void LCodeGen : : DoInvokeFunction ( LInvokeFunction * instr ) { <nl> SafepointGenerator generator ( <nl> this , pointers , Safepoint : : kLazyDeopt ) ; <nl> ParameterCount count ( instr - > arity ( ) ) ; <nl> - __ InvokeFunction ( edi , count , CALL_FUNCTION , generator , CALL_AS_FUNCTION ) ; <nl> + __ InvokeFunction ( edi , count , CALL_FUNCTION , generator ) ; <nl> } else { <nl> CallKnownFunction ( known_function , <nl> instr - > hydrogen ( ) - > formal_parameter_count ( ) , <nl> instr - > arity ( ) , <nl> instr , <nl> - CALL_AS_FUNCTION , <nl> EDI_CONTAINS_TARGET ) ; <nl> } <nl> } <nl> void LCodeGen : : DoCallKnownGlobal ( LCallKnownGlobal * instr ) { <nl> instr - > hydrogen ( ) - > formal_parameter_count ( ) , <nl> instr - > arity ( ) , <nl> instr , <nl> - CALL_AS_FUNCTION , <nl> EDI_UNINITIALIZED ) ; <nl> } <nl> <nl> mmm a / src / ia32 / lithium - codegen - ia32 . h <nl> ppp b / src / ia32 / lithium - codegen - ia32 . h <nl> class LCodeGen : public LCodeGenBase { <nl> int formal_parameter_count , <nl> int arity , <nl> LInstruction * instr , <nl> - CallKind call_kind , <nl> EDIState edi_state ) ; <nl> <nl> void RecordSafepointWithLazyDeopt ( LInstruction * instr , <nl> mmm a / src / ia32 / macro - assembler - ia32 . cc <nl> ppp b / src / ia32 / macro - assembler - ia32 . cc <nl> void MacroAssembler : : JumpToExternalReference ( const ExternalReference & ext ) { <nl> } <nl> <nl> <nl> - void MacroAssembler : : SetCallKind ( Register dst , CallKind call_kind ) { <nl> - / / This macro takes the dst register to make the code more readable <nl> - / / at the call sites . However , the dst register has to be ecx to <nl> - / / follow the calling convention which requires the call type to be <nl> - / / in ecx . <nl> - ASSERT ( dst . is ( ecx ) ) ; <nl> - if ( call_kind = = CALL_AS_FUNCTION ) { <nl> - / / Set to some non - zero smi by updating the least significant <nl> - / / byte . <nl> - mov_b ( dst , 1 < < kSmiTagSize ) ; <nl> - } else { <nl> - / / Set to smi zero by clearing the register . <nl> - xor_ ( dst , dst ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> void MacroAssembler : : InvokePrologue ( const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> Handle < Code > code_constant , <nl> void MacroAssembler : : InvokePrologue ( const ParameterCount & expected , <nl> bool * definitely_mismatches , <nl> InvokeFlag flag , <nl> Label : : Distance done_near , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> bool definitely_matches = false ; <nl> * definitely_mismatches = false ; <nl> Label invoke ; <nl> void MacroAssembler : : InvokePrologue ( const ParameterCount & expected , <nl> <nl> if ( flag = = CALL_FUNCTION ) { <nl> call_wrapper . BeforeCall ( CallSize ( adaptor , RelocInfo : : CODE_TARGET ) ) ; <nl> - SetCallKind ( ecx , call_kind ) ; <nl> call ( adaptor , RelocInfo : : CODE_TARGET ) ; <nl> call_wrapper . AfterCall ( ) ; <nl> if ( ! * definitely_mismatches ) { <nl> jmp ( done , done_near ) ; <nl> } <nl> } else { <nl> - SetCallKind ( ecx , call_kind ) ; <nl> jmp ( adaptor , RelocInfo : : CODE_TARGET ) ; <nl> } <nl> bind ( & invoke ) ; <nl> void MacroAssembler : : InvokeCode ( const Operand & code , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> / / You can ' t call a function without a valid frame . <nl> ASSERT ( flag = = JUMP_FUNCTION | | has_frame ( ) ) ; <nl> <nl> void MacroAssembler : : InvokeCode ( const Operand & code , <nl> bool definitely_mismatches = false ; <nl> InvokePrologue ( expected , actual , Handle < Code > : : null ( ) , code , <nl> & done , & definitely_mismatches , flag , Label : : kNear , <nl> - call_wrapper , call_kind ) ; <nl> + call_wrapper ) ; <nl> if ( ! definitely_mismatches ) { <nl> if ( flag = = CALL_FUNCTION ) { <nl> call_wrapper . BeforeCall ( CallSize ( code ) ) ; <nl> - SetCallKind ( ecx , call_kind ) ; <nl> call ( code ) ; <nl> call_wrapper . AfterCall ( ) ; <nl> } else { <nl> ASSERT ( flag = = JUMP_FUNCTION ) ; <nl> - SetCallKind ( ecx , call_kind ) ; <nl> jmp ( code ) ; <nl> } <nl> bind ( & done ) ; <nl> void MacroAssembler : : InvokeCode ( Handle < Code > code , <nl> const ParameterCount & actual , <nl> RelocInfo : : Mode rmode , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> / / You can ' t call a function without a valid frame . <nl> ASSERT ( flag = = JUMP_FUNCTION | | has_frame ( ) ) ; <nl> <nl> void MacroAssembler : : InvokeCode ( Handle < Code > code , <nl> Operand dummy ( eax , 0 ) ; <nl> bool definitely_mismatches = false ; <nl> InvokePrologue ( expected , actual , code , dummy , & done , & definitely_mismatches , <nl> - flag , Label : : kNear , call_wrapper , call_kind ) ; <nl> + flag , Label : : kNear , call_wrapper ) ; <nl> if ( ! definitely_mismatches ) { <nl> if ( flag = = CALL_FUNCTION ) { <nl> call_wrapper . BeforeCall ( CallSize ( code , rmode ) ) ; <nl> - SetCallKind ( ecx , call_kind ) ; <nl> call ( code , rmode ) ; <nl> call_wrapper . AfterCall ( ) ; <nl> } else { <nl> ASSERT ( flag = = JUMP_FUNCTION ) ; <nl> - SetCallKind ( ecx , call_kind ) ; <nl> jmp ( code , rmode ) ; <nl> } <nl> bind ( & done ) ; <nl> void MacroAssembler : : InvokeCode ( Handle < Code > code , <nl> void MacroAssembler : : InvokeFunction ( Register fun , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> / / You can ' t call a function without a valid frame . <nl> ASSERT ( flag = = JUMP_FUNCTION | | has_frame ( ) ) ; <nl> <nl> void MacroAssembler : : InvokeFunction ( Register fun , <nl> <nl> ParameterCount expected ( ebx ) ; <nl> InvokeCode ( FieldOperand ( edi , JSFunction : : kCodeEntryOffset ) , <nl> - expected , actual , flag , call_wrapper , call_kind ) ; <nl> + expected , actual , flag , call_wrapper ) ; <nl> } <nl> <nl> <nl> void MacroAssembler : : InvokeFunction ( Register fun , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> / / You can ' t call a function without a valid frame . <nl> ASSERT ( flag = = JUMP_FUNCTION | | has_frame ( ) ) ; <nl> <nl> void MacroAssembler : : InvokeFunction ( Register fun , <nl> mov ( esi , FieldOperand ( edi , JSFunction : : kContextOffset ) ) ; <nl> <nl> InvokeCode ( FieldOperand ( edi , JSFunction : : kCodeEntryOffset ) , <nl> - expected , actual , flag , call_wrapper , call_kind ) ; <nl> + expected , actual , flag , call_wrapper ) ; <nl> } <nl> <nl> <nl> void MacroAssembler : : InvokeFunction ( Handle < JSFunction > function , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> LoadHeapObject ( edi , function ) ; <nl> - InvokeFunction ( edi , expected , actual , flag , call_wrapper , call_kind ) ; <nl> + InvokeFunction ( edi , expected , actual , flag , call_wrapper ) ; <nl> } <nl> <nl> <nl> void MacroAssembler : : InvokeBuiltin ( Builtins : : JavaScript id , <nl> ParameterCount expected ( 0 ) ; <nl> GetBuiltinFunction ( edi , id ) ; <nl> InvokeCode ( FieldOperand ( edi , JSFunction : : kCodeEntryOffset ) , <nl> - expected , expected , flag , call_wrapper , CALL_AS_METHOD ) ; <nl> + expected , expected , flag , call_wrapper ) ; <nl> } <nl> <nl> <nl> mmm a / src / ia32 / macro - assembler - ia32 . h <nl> ppp b / src / ia32 / macro - assembler - ia32 . h <nl> class MacroAssembler : public Assembler { <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> / / JavaScript invokes <nl> <nl> - / / Set up call kind marking in ecx . The method takes ecx as an <nl> - / / explicit first parameter to make the code more readable at the <nl> - / / call sites . <nl> - void SetCallKind ( Register dst , CallKind kind ) ; <nl> - <nl> / / Invoke the JavaScript function code by either calling or jumping . <nl> void InvokeCode ( Register code , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> - InvokeCode ( Operand ( code ) , expected , actual , flag , call_wrapper , call_kind ) ; <nl> + const CallWrapper & call_wrapper ) { <nl> + InvokeCode ( Operand ( code ) , expected , actual , flag , call_wrapper ) ; <nl> } <nl> <nl> void InvokeCode ( const Operand & code , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) ; <nl> + const CallWrapper & call_wrapper ) ; <nl> <nl> void InvokeCode ( Handle < Code > code , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> RelocInfo : : Mode rmode , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) ; <nl> + const CallWrapper & call_wrapper ) ; <nl> <nl> / / Invoke the JavaScript function in the given register . Changes the <nl> / / current context to the context in the function before invoking . <nl> void InvokeFunction ( Register function , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) ; <nl> + const CallWrapper & call_wrapper ) ; <nl> <nl> void InvokeFunction ( Register function , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) ; <nl> + const CallWrapper & call_wrapper ) ; <nl> <nl> void InvokeFunction ( Handle < JSFunction > function , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) ; <nl> + const CallWrapper & call_wrapper ) ; <nl> <nl> / / Invoke specified builtin JavaScript function . Adds an entry to <nl> / / the unresolved list if the name does not resolve . <nl> class MacroAssembler : public Assembler { <nl> bool * definitely_mismatches , <nl> InvokeFlag flag , <nl> Label : : Distance done_distance , <nl> - const CallWrapper & call_wrapper = NullCallWrapper ( ) , <nl> - CallKind call_kind = CALL_AS_METHOD ) ; <nl> + const CallWrapper & call_wrapper = NullCallWrapper ( ) ) ; <nl> <nl> void EnterExitFramePrologue ( ) ; <nl> void EnterExitFrameEpilogue ( int argc , bool save_doubles ) ; <nl> mmm a / src / ia32 / stub - cache - ia32 . cc <nl> ppp b / src / ia32 / stub - cache - ia32 . cc <nl> void CallStubCompiler : : GenerateJumpFunction ( Handle < Object > object , <nl> PatchImplicitReceiver ( object ) ; <nl> <nl> / / Invoke the function . <nl> - __ InvokeFunction ( edi , arguments ( ) , JUMP_FUNCTION , <nl> - NullCallWrapper ( ) , call_kind ( ) ) ; <nl> + __ InvokeFunction ( edi , arguments ( ) , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> void StoreStubCompiler : : GenerateStoreViaSetter ( <nl> ParameterCount actual ( 1 ) ; <nl> ParameterCount expected ( setter ) ; <nl> __ InvokeFunction ( setter , expected , actual , <nl> - CALL_FUNCTION , NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> } else { <nl> / / If we generate a global code snippet for deoptimization only , remember <nl> / / the place to continue after deoptimization . <nl> void LoadStubCompiler : : GenerateLoadViaGetter ( MacroAssembler * masm , <nl> ParameterCount actual ( 0 ) ; <nl> ParameterCount expected ( getter ) ; <nl> __ InvokeFunction ( getter , expected , actual , <nl> - CALL_FUNCTION , NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> } else { <nl> / / If we generate a global code snippet for deoptimization only , remember <nl> / / the place to continue after deoptimization . <nl> mmm a / src / stub - cache . cc <nl> ppp b / src / stub - cache . cc <nl> void StubCompiler : : LookupPostInterceptor ( Handle < JSObject > holder , <nl> # define __ ACCESS_MASM ( masm ( ) ) <nl> <nl> <nl> - CallKind CallStubCompiler : : call_kind ( ) { <nl> - return CallICBase : : Contextual : : decode ( extra_state ( ) ) <nl> - ? CALL_AS_FUNCTION <nl> - : CALL_AS_METHOD ; <nl> - } <nl> - <nl> - <nl> void CallStubCompiler : : HandlerFrontendFooter ( Label * miss ) { <nl> __ bind ( miss ) ; <nl> GenerateMissBranch ( ) ; <nl> void CallStubCompiler : : GenerateJumpFunctionIgnoreReceiver ( <nl> Handle < JSFunction > function ) { <nl> ParameterCount expected ( function ) ; <nl> __ InvokeFunction ( function , expected , arguments ( ) , <nl> - JUMP_FUNCTION , NullCallWrapper ( ) , call_kind ( ) ) ; <nl> + JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> void CallStubCompiler : : GenerateJumpFunction ( Handle < Object > object , <nl> PatchImplicitReceiver ( object ) ; <nl> ParameterCount expected ( function ) ; <nl> __ InvokeFunction ( actual_closure , expected , arguments ( ) , <nl> - JUMP_FUNCTION , NullCallWrapper ( ) , call_kind ( ) ) ; <nl> + JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> mmm a / src / stub - cache . h <nl> ppp b / src / stub - cache . h <nl> class CallStubCompiler : public StubCompiler { <nl> Handle < JSFunction > function , <nl> Handle < String > name ) ; <nl> <nl> - CallKind call_kind ( ) ; <nl> - <nl> Handle < Code > GetCode ( Code : : StubType type , Handle < Name > name ) ; <nl> Handle < Code > GetCode ( Handle < JSFunction > function ) ; <nl> <nl> mmm a / src / type - info . cc <nl> ppp b / src / type - info . cc <nl> KeyedAccessStoreMode TypeFeedbackOracle : : GetStoreMode ( <nl> void TypeFeedbackOracle : : CallReceiverTypes ( TypeFeedbackId id , <nl> Handle < String > name , <nl> int arity , <nl> - CallKind call_kind , <nl> + ContextualMode contextual_mode , <nl> SmallMapList * types ) { <nl> / / Note : Currently we do not take string extra ic data into account <nl> / / here . <nl> - ContextualMode contextual_mode = call_kind = = CALL_AS_FUNCTION <nl> - ? CONTEXTUAL <nl> - : NOT_CONTEXTUAL ; <nl> ExtraICState extra_ic_state = <nl> CallIC : : Contextual : : encode ( contextual_mode ) ; <nl> - <nl> Code : : Flags flags = Code : : ComputeMonomorphicFlags ( <nl> Code : : CALL_IC , extra_ic_state , OWN_MAP , Code : : NORMAL , arity ) ; <nl> CollectReceiverTypes ( id , name , flags , types ) ; <nl> mmm a / src / type - info . h <nl> ppp b / src / type - info . h <nl> class TypeFeedbackOracle : public ZoneObject { <nl> void CallReceiverTypes ( TypeFeedbackId id , <nl> Handle < String > name , <nl> int arity , <nl> - CallKind call_kind , <nl> + ContextualMode contextual_mode , <nl> SmallMapList * types ) ; <nl> void PropertyReceiverTypes ( TypeFeedbackId id , <nl> Handle < String > name , <nl> mmm a / src / typing . cc <nl> ppp b / src / typing . cc <nl> void AstTyper : : VisitProperty ( Property * expr ) { <nl> <nl> <nl> void AstTyper : : VisitCall ( Call * expr ) { <nl> - / / Collect type feedback . <nl> Expression * callee = expr - > expression ( ) ; <nl> Property * prop = callee - > AsProperty ( ) ; <nl> - if ( prop ! = NULL ) { <nl> - expr - > RecordTypeFeedback ( oracle ( ) , CALL_AS_METHOD ) ; <nl> - } else { <nl> - expr - > RecordTypeFeedback ( oracle ( ) , CALL_AS_FUNCTION ) ; <nl> - } <nl> + ContextualMode contextual_mode = prop = = NULL ? CONTEXTUAL : NOT_CONTEXTUAL ; <nl> + / / Collect type feedback . <nl> + expr - > RecordTypeFeedback ( oracle ( ) , contextual_mode ) ; <nl> <nl> RECURSE ( Visit ( expr - > expression ( ) ) ) ; <nl> ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> mmm a / src / v8globals . h <nl> ppp b / src / v8globals . h <nl> enum SmiCheckType { <nl> } ; <nl> <nl> <nl> - / / Used to specify whether a receiver is implicitly or explicitly <nl> - / / provided to a call . <nl> - enum CallKind { <nl> - CALL_AS_METHOD , <nl> - CALL_AS_FUNCTION <nl> - } ; <nl> - <nl> - <nl> enum ScopeType { <nl> EVAL_SCOPE , / / The top - level scope for an eval source . <nl> FUNCTION_SCOPE , / / The top - level scope for a function . <nl> mmm a / src / x64 / builtins - x64 . cc <nl> ppp b / src / x64 / builtins - x64 . cc <nl> static void CallRuntimePassFunction ( <nl> FrameScope scope ( masm , StackFrame : : INTERNAL ) ; <nl> / / Push a copy of the function onto the stack . <nl> __ push ( rdi ) ; <nl> - / / Push call kind information . <nl> - __ push ( rcx ) ; <nl> / / Function is also the parameter to the runtime call . <nl> __ push ( rdi ) ; <nl> <nl> __ CallRuntime ( function_id , 1 ) ; <nl> - / / Restore call kind information . <nl> - __ pop ( rcx ) ; <nl> / / Restore receiver . <nl> __ pop ( rdi ) ; <nl> } <nl> static void Generate_JSConstructStubHelper ( MacroAssembler * masm , <nl> masm - > isolate ( ) - > builtins ( ) - > HandleApiCallConstruct ( ) ; <nl> ParameterCount expected ( 0 ) ; <nl> __ InvokeCode ( code , expected , expected , RelocInfo : : CODE_TARGET , <nl> - CALL_FUNCTION , NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> } else { <nl> ParameterCount actual ( rax ) ; <nl> - __ InvokeFunction ( rdi , actual , CALL_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + __ InvokeFunction ( rdi , actual , CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> / / Store offset of return address for deoptimizer . <nl> static void Generate_JSEntryTrampolineHelper ( MacroAssembler * masm , <nl> } else { <nl> ParameterCount actual ( rax ) ; <nl> / / Function must be in rdi . <nl> - __ InvokeFunction ( rdi , actual , CALL_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + __ InvokeFunction ( rdi , actual , CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> / / Exit the internal frame . Notice that this also removes the empty <nl> / / context and the function left on the stack by the code <nl> static void CallCompileOptimized ( MacroAssembler * masm , <nl> FrameScope scope ( masm , StackFrame : : INTERNAL ) ; <nl> / / Push a copy of the function onto the stack . <nl> __ push ( rdi ) ; <nl> - / / Push call kind information . <nl> - __ push ( rcx ) ; <nl> / / Function is also the parameter to the runtime call . <nl> __ push ( rdi ) ; <nl> / / Whether to compile in a background thread . <nl> __ Push ( masm - > isolate ( ) - > factory ( ) - > ToBoolean ( concurrent ) ) ; <nl> <nl> __ CallRuntime ( Runtime : : kCompileOptimized , 2 ) ; <nl> - / / Restore call kind information . <nl> - __ pop ( rcx ) ; <nl> / / Restore receiver . <nl> __ pop ( rdi ) ; <nl> } <nl> void Builtins : : Generate_FunctionCall ( MacroAssembler * masm ) { <nl> __ testq ( rdx , rdx ) ; <nl> __ j ( zero , & function ) ; <nl> __ Set ( rbx , 0 ) ; <nl> - __ SetCallKind ( rcx , CALL_AS_METHOD ) ; <nl> __ cmpq ( rdx , Immediate ( 1 ) ) ; <nl> __ j ( not_equal , & non_proxy ) ; <nl> <nl> void Builtins : : Generate_FunctionCall ( MacroAssembler * masm ) { <nl> FieldOperand ( rdx , <nl> SharedFunctionInfo : : kFormalParameterCountOffset ) ) ; <nl> __ movq ( rdx , FieldOperand ( rdi , JSFunction : : kCodeEntryOffset ) ) ; <nl> - __ SetCallKind ( rcx , CALL_AS_FUNCTION ) ; <nl> __ cmpq ( rax , rbx ) ; <nl> __ j ( not_equal , <nl> masm - > isolate ( ) - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) , <nl> RelocInfo : : CODE_TARGET ) ; <nl> <nl> ParameterCount expected ( 0 ) ; <nl> - __ InvokeCode ( rdx , expected , expected , JUMP_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_FUNCTION ) ; <nl> + __ InvokeCode ( rdx , expected , expected , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> void Builtins : : Generate_FunctionApply ( MacroAssembler * masm ) { <nl> __ movq ( rdi , Operand ( rbp , kFunctionOffset ) ) ; <nl> __ CmpObjectType ( rdi , JS_FUNCTION_TYPE , rcx ) ; <nl> __ j ( not_equal , & call_proxy ) ; <nl> - __ InvokeFunction ( rdi , actual , CALL_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_FUNCTION ) ; <nl> + __ InvokeFunction ( rdi , actual , CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> <nl> frame_scope . GenerateLeaveFrame ( ) ; <nl> __ ret ( 3 * kPointerSize ) ; / / remove this , receiver , and arguments <nl> void Builtins : : Generate_FunctionApply ( MacroAssembler * masm ) { <nl> __ push ( rdi ) ; / / add function proxy as last argument <nl> __ incq ( rax ) ; <nl> __ Set ( rbx , 0 ) ; <nl> - __ SetCallKind ( rcx , CALL_AS_FUNCTION ) ; <nl> __ GetBuiltinEntry ( rdx , Builtins : : CALL_FUNCTION_PROXY ) ; <nl> __ call ( masm - > isolate ( ) - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) , <nl> RelocInfo : : CODE_TARGET ) ; <nl> void Builtins : : Generate_ArgumentsAdaptorTrampoline ( MacroAssembler * masm ) { <nl> / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> / / - - rax : actual number of arguments <nl> / / - - rbx : expected number of arguments <nl> - / / - - rcx : call kind information <nl> / / - - rdx : code entry to call <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> mmm a / src / x64 / code - stubs - x64 . cc <nl> ppp b / src / x64 / code - stubs - x64 . cc <nl> void CallFunctionStub : : Generate ( MacroAssembler * masm ) { <nl> / / Fast - case : Just invoke the function . <nl> ParameterCount actual ( argc_ ) ; <nl> <nl> - __ InvokeFunction ( rdi , <nl> - actual , <nl> - JUMP_FUNCTION , <nl> - NullCallWrapper ( ) , <nl> - CALL_AS_FUNCTION ) ; <nl> + __ InvokeFunction ( rdi , actual , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> <nl> / / Slow - case : Non - function called . <nl> __ bind ( & slow ) ; <nl> void CallFunctionStub : : Generate ( MacroAssembler * masm ) { <nl> __ PushReturnAddressFrom ( rcx ) ; <nl> __ Set ( rax , argc_ + 1 ) ; <nl> __ Set ( rbx , 0 ) ; <nl> - __ SetCallKind ( rcx , CALL_AS_FUNCTION ) ; <nl> __ GetBuiltinEntry ( rdx , Builtins : : CALL_FUNCTION_PROXY ) ; <nl> { <nl> Handle < Code > adaptor = <nl> void CallFunctionStub : : Generate ( MacroAssembler * masm ) { <nl> __ movq ( args . GetReceiverOperand ( ) , rdi ) ; <nl> __ Set ( rax , argc_ ) ; <nl> __ Set ( rbx , 0 ) ; <nl> - __ SetCallKind ( rcx , CALL_AS_FUNCTION ) ; <nl> __ GetBuiltinEntry ( rdx , Builtins : : CALL_NON_FUNCTION ) ; <nl> Handle < Code > adaptor = <nl> isolate - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) ; <nl> void CallConstructStub : : Generate ( MacroAssembler * masm ) { <nl> __ bind ( & do_call ) ; <nl> / / Set expected number of arguments to zero ( not changing rax ) . <nl> __ Set ( rbx , 0 ) ; <nl> - __ SetCallKind ( rcx , CALL_AS_METHOD ) ; <nl> __ Jump ( masm - > isolate ( ) - > builtins ( ) - > ArgumentsAdaptorTrampoline ( ) , <nl> RelocInfo : : CODE_TARGET ) ; <nl> } <nl> void StubFailureTailCallTrampolineStub : : Generate ( MacroAssembler * masm ) { <nl> __ subl ( rax , Immediate ( 1 ) ) ; <nl> masm - > LeaveFrame ( StackFrame : : STUB_FAILURE_TRAMPOLINE ) ; <nl> ParameterCount argument_count ( rax ) ; <nl> - __ InvokeFunction ( <nl> - rdi , argument_count , JUMP_FUNCTION , NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + __ InvokeFunction ( rdi , argument_count , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> mmm a / src / x64 / full - codegen - x64 . cc <nl> ppp b / src / x64 / full - codegen - x64 . cc <nl> void FullCodeGenerator : : Generate ( ) { <nl> / / object ) . <nl> if ( info - > is_classic_mode ( ) & & ! info - > is_native ( ) ) { <nl> Label ok ; <nl> - __ testq ( rcx , rcx ) ; <nl> - __ j ( zero , & ok , Label : : kNear ) ; <nl> - <nl> / / + 1 for return address . <nl> StackArgumentsAccessor args ( rsp , info - > scope ( ) - > num_parameters ( ) ) ; <nl> __ movq ( rcx , args . GetReceiverOperand ( ) ) ; <nl> void FullCodeGenerator : : EmitCallFunction ( CallRuntime * expr ) { <nl> / / InvokeFunction requires the function in rdi . Move it in there . <nl> __ movq ( rdi , result_register ( ) ) ; <nl> ParameterCount count ( arg_count ) ; <nl> - __ InvokeFunction ( rdi , count , CALL_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_FUNCTION ) ; <nl> + __ InvokeFunction ( rdi , count , CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> __ movq ( rsi , Operand ( rbp , StandardFrameConstants : : kContextOffset ) ) ; <nl> __ jmp ( & done ) ; <nl> <nl> mmm a / src / x64 / ic - x64 . cc <nl> ppp b / src / x64 / ic - x64 . cc <nl> static void GenerateFunctionTailCall ( MacroAssembler * masm , <nl> <nl> / / Invoke the function . <nl> ParameterCount actual ( argc ) ; <nl> - __ InvokeFunction ( rdi , actual , JUMP_FUNCTION , <nl> - NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + __ InvokeFunction ( rdi , actual , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> void CallICBase : : GenerateMiss ( MacroAssembler * masm , <nl> } <nl> <nl> / / Invoke the function . <nl> - CallKind call_kind = CallICBase : : Contextual : : decode ( extra_state ) <nl> - ? CALL_AS_FUNCTION <nl> - : CALL_AS_METHOD ; <nl> ParameterCount actual ( argc ) ; <nl> - __ InvokeFunction ( rdi , <nl> - actual , <nl> - JUMP_FUNCTION , <nl> - NullCallWrapper ( ) , <nl> - call_kind ) ; <nl> + __ InvokeFunction ( rdi , actual , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> mmm a / src / x64 / lithium - codegen - x64 . cc <nl> ppp b / src / x64 / lithium - codegen - x64 . cc <nl> bool LCodeGen : : GeneratePrologue ( ) { <nl> info_ - > is_classic_mode ( ) & & <nl> ! info_ - > is_native ( ) ) { <nl> Label ok ; <nl> - __ testq ( rcx , rcx ) ; <nl> - __ j ( zero , & ok , Label : : kNear ) ; <nl> - <nl> StackArgumentsAccessor args ( rsp , scope ( ) - > num_parameters ( ) ) ; <nl> __ movq ( rcx , args . GetReceiverOperand ( ) ) ; <nl> <nl> void LCodeGen : : DoApplyArguments ( LApplyArguments * instr ) { <nl> SafepointGenerator safepoint_generator ( <nl> this , pointers , Safepoint : : kLazyDeopt ) ; <nl> ParameterCount actual ( rax ) ; <nl> - __ InvokeFunction ( function , actual , CALL_FUNCTION , <nl> - safepoint_generator , CALL_AS_FUNCTION ) ; <nl> + __ InvokeFunction ( function , actual , CALL_FUNCTION , safepoint_generator ) ; <nl> } <nl> <nl> <nl> void LCodeGen : : CallKnownFunction ( Handle < JSFunction > function , <nl> int formal_parameter_count , <nl> int arity , <nl> LInstruction * instr , <nl> - CallKind call_kind , <nl> RDIState rdi_state ) { <nl> bool dont_adapt_arguments = <nl> formal_parameter_count = = SharedFunctionInfo : : kDontAdaptArgumentsSentinel ; <nl> void LCodeGen : : CallKnownFunction ( Handle < JSFunction > function , <nl> } <nl> <nl> / / Invoke function . <nl> - __ SetCallKind ( rcx , call_kind ) ; <nl> if ( function . is_identical_to ( info ( ) - > closure ( ) ) ) { <nl> __ CallSelf ( ) ; <nl> } else { <nl> void LCodeGen : : CallKnownFunction ( Handle < JSFunction > function , <nl> this , pointers , Safepoint : : kLazyDeopt ) ; <nl> ParameterCount count ( arity ) ; <nl> ParameterCount expected ( formal_parameter_count ) ; <nl> - __ InvokeFunction ( <nl> - function , expected , count , CALL_FUNCTION , generator , call_kind ) ; <nl> + __ InvokeFunction ( function , expected , count , CALL_FUNCTION , generator ) ; <nl> } <nl> } <nl> <nl> void LCodeGen : : DoCallConstantFunction ( LCallConstantFunction * instr ) { <nl> instr - > hydrogen ( ) - > formal_parameter_count ( ) , <nl> instr - > arity ( ) , <nl> instr , <nl> - CALL_AS_FUNCTION , <nl> RDI_UNINITIALIZED ) ; <nl> } <nl> <nl> void LCodeGen : : DoInvokeFunction ( LInvokeFunction * instr ) { <nl> LPointerMap * pointers = instr - > pointer_map ( ) ; <nl> SafepointGenerator generator ( this , pointers , Safepoint : : kLazyDeopt ) ; <nl> ParameterCount count ( instr - > arity ( ) ) ; <nl> - __ InvokeFunction ( rdi , count , CALL_FUNCTION , generator , CALL_AS_FUNCTION ) ; <nl> + __ InvokeFunction ( rdi , count , CALL_FUNCTION , generator ) ; <nl> } else { <nl> CallKnownFunction ( known_function , <nl> instr - > hydrogen ( ) - > formal_parameter_count ( ) , <nl> instr - > arity ( ) , <nl> instr , <nl> - CALL_AS_FUNCTION , <nl> RDI_CONTAINS_TARGET ) ; <nl> } <nl> } <nl> void LCodeGen : : DoCallKnownGlobal ( LCallKnownGlobal * instr ) { <nl> instr - > hydrogen ( ) - > formal_parameter_count ( ) , <nl> instr - > arity ( ) , <nl> instr , <nl> - CALL_AS_FUNCTION , <nl> RDI_UNINITIALIZED ) ; <nl> } <nl> <nl> mmm a / src / x64 / lithium - codegen - x64 . h <nl> ppp b / src / x64 / lithium - codegen - x64 . h <nl> class LCodeGen : public LCodeGenBase { <nl> int formal_parameter_count , <nl> int arity , <nl> LInstruction * instr , <nl> - CallKind call_kind , <nl> RDIState rdi_state ) ; <nl> <nl> void RecordSafepointWithLazyDeopt ( LInstruction * instr , <nl> mmm a / src / x64 / macro - assembler - x64 . cc <nl> ppp b / src / x64 / macro - assembler - x64 . cc <nl> void MacroAssembler : : InvokeBuiltin ( Builtins : : JavaScript id , <nl> / / parameter count to avoid emitting code to do the check . <nl> ParameterCount expected ( 0 ) ; <nl> GetBuiltinEntry ( rdx , id ) ; <nl> - InvokeCode ( rdx , expected , expected , flag , call_wrapper , CALL_AS_METHOD ) ; <nl> + InvokeCode ( rdx , expected , expected , flag , call_wrapper ) ; <nl> } <nl> <nl> <nl> void MacroAssembler : : DebugBreak ( ) { <nl> # endif / / ENABLE_DEBUGGER_SUPPORT <nl> <nl> <nl> - void MacroAssembler : : SetCallKind ( Register dst , CallKind call_kind ) { <nl> - / / This macro takes the dst register to make the code more readable <nl> - / / at the call sites . However , the dst register has to be rcx to <nl> - / / follow the calling convention which requires the call type to be <nl> - / / in rcx . <nl> - ASSERT ( dst . is ( rcx ) ) ; <nl> - if ( call_kind = = CALL_AS_FUNCTION ) { <nl> - LoadSmiConstant ( dst , Smi : : FromInt ( 1 ) ) ; <nl> - } else { <nl> - LoadSmiConstant ( dst , Smi : : FromInt ( 0 ) ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> void MacroAssembler : : InvokeCode ( Register code , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> / / You can ' t call a function without a valid frame . <nl> ASSERT ( flag = = JUMP_FUNCTION | | has_frame ( ) ) ; <nl> <nl> void MacroAssembler : : InvokeCode ( Register code , <nl> & definitely_mismatches , <nl> flag , <nl> Label : : kNear , <nl> - call_wrapper , <nl> - call_kind ) ; <nl> + call_wrapper ) ; <nl> if ( ! definitely_mismatches ) { <nl> if ( flag = = CALL_FUNCTION ) { <nl> call_wrapper . BeforeCall ( CallSize ( code ) ) ; <nl> - SetCallKind ( rcx , call_kind ) ; <nl> call ( code ) ; <nl> call_wrapper . AfterCall ( ) ; <nl> } else { <nl> ASSERT ( flag = = JUMP_FUNCTION ) ; <nl> - SetCallKind ( rcx , call_kind ) ; <nl> jmp ( code ) ; <nl> } <nl> bind ( & done ) ; <nl> void MacroAssembler : : InvokeCode ( Handle < Code > code , <nl> const ParameterCount & actual , <nl> RelocInfo : : Mode rmode , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> / / You can ' t call a function without a valid frame . <nl> ASSERT ( flag = = JUMP_FUNCTION | | has_frame ( ) ) ; <nl> <nl> void MacroAssembler : : InvokeCode ( Handle < Code > code , <nl> & definitely_mismatches , <nl> flag , <nl> Label : : kNear , <nl> - call_wrapper , <nl> - call_kind ) ; <nl> + call_wrapper ) ; <nl> if ( ! definitely_mismatches ) { <nl> if ( flag = = CALL_FUNCTION ) { <nl> call_wrapper . BeforeCall ( CallSize ( code ) ) ; <nl> - SetCallKind ( rcx , call_kind ) ; <nl> Call ( code , rmode ) ; <nl> call_wrapper . AfterCall ( ) ; <nl> } else { <nl> ASSERT ( flag = = JUMP_FUNCTION ) ; <nl> - SetCallKind ( rcx , call_kind ) ; <nl> Jump ( code , rmode ) ; <nl> } <nl> bind ( & done ) ; <nl> void MacroAssembler : : InvokeCode ( Handle < Code > code , <nl> void MacroAssembler : : InvokeFunction ( Register function , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> / / You can ' t call a function without a valid frame . <nl> ASSERT ( flag = = JUMP_FUNCTION | | has_frame ( ) ) ; <nl> <nl> void MacroAssembler : : InvokeFunction ( Register function , <nl> movq ( rdx , FieldOperand ( rdi , JSFunction : : kCodeEntryOffset ) ) ; <nl> <nl> ParameterCount expected ( rbx ) ; <nl> - InvokeCode ( rdx , expected , actual , flag , call_wrapper , call_kind ) ; <nl> + InvokeCode ( rdx , expected , actual , flag , call_wrapper ) ; <nl> } <nl> <nl> <nl> void MacroAssembler : : InvokeFunction ( Register function , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> / / You can ' t call a function without a valid frame . <nl> ASSERT ( flag = = JUMP_FUNCTION | | has_frame ( ) ) ; <nl> <nl> void MacroAssembler : : InvokeFunction ( Register function , <nl> / / the executable code . <nl> movq ( rdx , FieldOperand ( rdi , JSFunction : : kCodeEntryOffset ) ) ; <nl> <nl> - InvokeCode ( rdx , expected , actual , flag , call_wrapper , call_kind ) ; <nl> + InvokeCode ( rdx , expected , actual , flag , call_wrapper ) ; <nl> } <nl> <nl> <nl> void MacroAssembler : : InvokeFunction ( Handle < JSFunction > function , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> Move ( rdi , function ) ; <nl> - InvokeFunction ( rdi , expected , actual , flag , call_wrapper , call_kind ) ; <nl> + InvokeFunction ( rdi , expected , actual , flag , call_wrapper ) ; <nl> } <nl> <nl> <nl> void MacroAssembler : : InvokePrologue ( const ParameterCount & expected , <nl> bool * definitely_mismatches , <nl> InvokeFlag flag , <nl> Label : : Distance near_jump , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) { <nl> + const CallWrapper & call_wrapper ) { <nl> bool definitely_matches = false ; <nl> * definitely_mismatches = false ; <nl> Label invoke ; <nl> void MacroAssembler : : InvokePrologue ( const ParameterCount & expected , <nl> <nl> if ( flag = = CALL_FUNCTION ) { <nl> call_wrapper . BeforeCall ( CallSize ( adaptor ) ) ; <nl> - SetCallKind ( rcx , call_kind ) ; <nl> Call ( adaptor , RelocInfo : : CODE_TARGET ) ; <nl> call_wrapper . AfterCall ( ) ; <nl> if ( ! * definitely_mismatches ) { <nl> jmp ( done , near_jump ) ; <nl> } <nl> } else { <nl> - SetCallKind ( rcx , call_kind ) ; <nl> Jump ( adaptor , RelocInfo : : CODE_TARGET ) ; <nl> } <nl> bind ( & invoke ) ; <nl> mmm a / src / x64 / macro - assembler - x64 . h <nl> ppp b / src / x64 / macro - assembler - x64 . h <nl> class MacroAssembler : public Assembler { <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> / / JavaScript invokes <nl> <nl> - / / Set up call kind marking in rcx . The method takes rcx as an <nl> - / / explicit first parameter to make the code more readable at the <nl> - / / call sites . <nl> - void SetCallKind ( Register dst , CallKind kind ) ; <nl> - <nl> / / Invoke the JavaScript function code by either calling or jumping . <nl> void InvokeCode ( Register code , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) ; <nl> + const CallWrapper & call_wrapper ) ; <nl> <nl> void InvokeCode ( Handle < Code > code , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> RelocInfo : : Mode rmode , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) ; <nl> + const CallWrapper & call_wrapper ) ; <nl> <nl> / / Invoke the JavaScript function in the given register . Changes the <nl> / / current context to the context in the function before invoking . <nl> void InvokeFunction ( Register function , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) ; <nl> + const CallWrapper & call_wrapper ) ; <nl> <nl> void InvokeFunction ( Register function , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) ; <nl> + const CallWrapper & call_wrapper ) ; <nl> <nl> void InvokeFunction ( Handle < JSFunction > function , <nl> const ParameterCount & expected , <nl> const ParameterCount & actual , <nl> InvokeFlag flag , <nl> - const CallWrapper & call_wrapper , <nl> - CallKind call_kind ) ; <nl> + const CallWrapper & call_wrapper ) ; <nl> <nl> / / Invoke specified builtin JavaScript function . Adds an entry to <nl> / / the unresolved list if the name does not resolve . <nl> class MacroAssembler : public Assembler { <nl> bool * definitely_mismatches , <nl> InvokeFlag flag , <nl> Label : : Distance near_jump = Label : : kFar , <nl> - const CallWrapper & call_wrapper = NullCallWrapper ( ) , <nl> - CallKind call_kind = CALL_AS_METHOD ) ; <nl> + const CallWrapper & call_wrapper = NullCallWrapper ( ) ) ; <nl> <nl> void EnterExitFramePrologue ( bool save_rax ) ; <nl> <nl> mmm a / src / x64 / stub - cache - x64 . cc <nl> ppp b / src / x64 / stub - cache - x64 . cc <nl> void CallStubCompiler : : GenerateJumpFunction ( Handle < Object > object , <nl> PatchImplicitReceiver ( object ) ; <nl> <nl> / / Invoke the function . <nl> - __ InvokeFunction ( rdi , arguments ( ) , JUMP_FUNCTION , <nl> - NullCallWrapper ( ) , call_kind ( ) ) ; <nl> + __ InvokeFunction ( rdi , arguments ( ) , JUMP_FUNCTION , NullCallWrapper ( ) ) ; <nl> } <nl> <nl> <nl> void StoreStubCompiler : : GenerateStoreViaSetter ( <nl> ParameterCount actual ( 1 ) ; <nl> ParameterCount expected ( setter ) ; <nl> __ InvokeFunction ( setter , expected , actual , <nl> - CALL_FUNCTION , NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> } else { <nl> / / If we generate a global code snippet for deoptimization only , remember <nl> / / the place to continue after deoptimization . <nl> void LoadStubCompiler : : GenerateLoadViaGetter ( MacroAssembler * masm , <nl> ParameterCount actual ( 0 ) ; <nl> ParameterCount expected ( getter ) ; <nl> __ InvokeFunction ( getter , expected , actual , <nl> - CALL_FUNCTION , NullCallWrapper ( ) , CALL_AS_METHOD ) ; <nl> + CALL_FUNCTION , NullCallWrapper ( ) ) ; <nl> } else { <nl> / / If we generate a global code snippet for deoptimization only , remember <nl> / / the place to continue after deoptimization . <nl> | Remove CALL_AS_FUNCTION and CALL_AS_METHOD . | v8/v8 | 1257ba358cf309e10e53ac40ac61817225f9923b | 2014-01-14T14:36:24Z |
mmm a / src / csharp / Grpc . IntegrationTesting / Grpc . IntegrationTesting . csproj <nl> ppp b / src / csharp / Grpc . IntegrationTesting / Grpc . IntegrationTesting . csproj <nl> <nl> < SpecificVersion > False < / SpecificVersion > <nl> < HintPath > . . \ packages \ BouncyCastle . 1 . 7 . 0 \ lib \ Net40 - Client \ BouncyCastle . Crypto . dll < / HintPath > <nl> < / Reference > <nl> + < Reference Include = " CommandLine " > <nl> + < HintPath > . . \ packages \ CommandLineParser . 1 . 9 . 71 \ lib \ net45 \ CommandLine . dll < / HintPath > <nl> + < / Reference > <nl> < Reference Include = " Google . Apis . Auth , Version = 1 . 9 . 3 . 19379 , Culture = neutral , PublicKeyToken = 4b01fa6e34db77ab , processorArchitecture = MSIL " > <nl> < SpecificVersion > False < / SpecificVersion > <nl> < HintPath > . . \ packages \ Google . Apis . Auth . 1 . 9 . 3 \ lib \ net40 \ Google . Apis . Auth . dll < / HintPath > <nl> mmm a / src / csharp / Grpc . IntegrationTesting / InteropClient . cs <nl> ppp b / src / csharp / Grpc . IntegrationTesting / InteropClient . cs <nl> <nl> using System . Threading ; <nl> using System . Threading . Tasks ; <nl> <nl> + using CommandLine ; <nl> using Google . Apis . Auth . OAuth2 ; <nl> using Google . Protobuf ; <nl> using Grpc . Auth ; <nl> <nl> using Grpc . Core . Utils ; <nl> using Grpc . Testing ; <nl> using NUnit . Framework ; <nl> + using CommandLine . Text ; <nl> + using System . IO ; <nl> <nl> namespace Grpc . IntegrationTesting <nl> { <nl> public class InteropClient <nl> { <nl> - private const string ServiceAccountUser = " 155450119199 - 3psnrh1sdr3d8cpj1v46naggf81mhdnk @ developer . gserviceaccount . com " ; <nl> - private const string ComputeEngineUser = " 155450119199 - r5aaqa2vqoa9g5mv2m6s3m1l293rlmel @ developer . gserviceaccount . com " ; <nl> - private const string AuthScope = " https : / / www . googleapis . com / auth / xapi . zoo " ; <nl> - private const string AuthScopeResponse = " xapi . zoo " ; <nl> - <nl> private class ClientOptions <nl> { <nl> - public bool help ; <nl> - public string serverHost = " 127 . 0 . 0 . 1 " ; <nl> - public string serverHostOverride = TestCredentials . DefaultHostOverride ; <nl> - public int ? serverPort ; <nl> - public string testCase = " large_unary " ; <nl> - public bool useTls ; <nl> - public bool useTestCa ; <nl> + [ Option ( " server_host " , DefaultValue = " 127 . 0 . 0 . 1 " ) ] <nl> + public string ServerHost { get ; set ; } <nl> + <nl> + [ Option ( " server_host_override " , DefaultValue = TestCredentials . DefaultHostOverride ) ] <nl> + public string ServerHostOverride { get ; set ; } <nl> + <nl> + [ Option ( " server_port " , Required = true ) ] <nl> + public int ServerPort { get ; set ; } <nl> + <nl> + [ Option ( " test_case " , DefaultValue = " large_unary " ) ] <nl> + public string TestCase { get ; set ; } <nl> + <nl> + [ Option ( " use_tls " ) ] <nl> + public bool UseTls { get ; set ; } <nl> + <nl> + [ Option ( " use_test_ca " ) ] <nl> + public bool UseTestCa { get ; set ; } <nl> + <nl> + [ Option ( " default_service_account " , Required = false ) ] <nl> + public string DefaultServiceAccount { get ; set ; } <nl> + <nl> + [ Option ( " oauth_scope " , Required = false ) ] <nl> + public string OAuthScope { get ; set ; } <nl> + <nl> + [ Option ( " service_account_key_file " , Required = false ) ] <nl> + public string ServiceAccountKeyFile { get ; set ; } <nl> + <nl> + [ HelpOption ] <nl> + public string GetUsage ( ) <nl> + { <nl> + var help = new HelpText <nl> + { <nl> + Heading = " gRPC C # interop testing client " , <nl> + AddDashesToOption = true <nl> + } ; <nl> + help . AddPreOptionsLine ( " Usage : " ) ; <nl> + help . AddOptions ( this ) ; <nl> + return help ; <nl> + } <nl> } <nl> <nl> ClientOptions options ; <nl> private InteropClient ( ClientOptions options ) <nl> <nl> public static void Run ( string [ ] args ) <nl> { <nl> - Console . WriteLine ( " gRPC C # interop testing client " ) ; <nl> - ClientOptions options = ParseArguments ( args ) ; <nl> - <nl> - if ( options . serverHost = = null | | ! options . serverPort . HasValue | | options . testCase = = null ) <nl> - { <nl> - Console . WriteLine ( " Missing required argument . " ) ; <nl> - Console . WriteLine ( ) ; <nl> - options . help = true ; <nl> - } <nl> - <nl> - if ( options . help ) <nl> + var options = new ClientOptions ( ) ; <nl> + if ( ! Parser . Default . ParseArguments ( args , options ) ) <nl> { <nl> - Console . WriteLine ( " Usage : " ) ; <nl> - Console . WriteLine ( " - - server_host = HOSTNAME " ) ; <nl> - Console . WriteLine ( " - - server_host_override = HOSTNAME " ) ; <nl> - Console . WriteLine ( " - - server_port = PORT " ) ; <nl> - Console . WriteLine ( " - - test_case = TESTCASE " ) ; <nl> - Console . WriteLine ( " - - use_tls = BOOLEAN " ) ; <nl> - Console . WriteLine ( " - - use_test_ca = BOOLEAN " ) ; <nl> - Console . WriteLine ( ) ; <nl> Environment . Exit ( 1 ) ; <nl> } <nl> <nl> public static void Run ( string [ ] args ) <nl> <nl> private async Task Run ( ) <nl> { <nl> - Credentials credentials = null ; <nl> - if ( options . useTls ) <nl> - { <nl> - credentials = TestCredentials . CreateTestClientCredentials ( options . useTestCa ) ; <nl> - } <nl> - <nl> + var credentials = options . UseTls ? TestCredentials . CreateTestClientCredentials ( options . UseTestCa ) : Credentials . Insecure ; <nl> + <nl> List < ChannelOption > channelOptions = null ; <nl> - if ( ! string . IsNullOrEmpty ( options . serverHostOverride ) ) <nl> + if ( ! string . IsNullOrEmpty ( options . ServerHostOverride ) ) <nl> { <nl> channelOptions = new List < ChannelOption > <nl> { <nl> - new ChannelOption ( ChannelOptions . SslTargetNameOverride , options . serverHostOverride ) <nl> + new ChannelOption ( ChannelOptions . SslTargetNameOverride , options . ServerHostOverride ) <nl> } ; <nl> } <nl> - <nl> - var channel = new Channel ( options . serverHost , options . serverPort . Value , credentials , channelOptions ) ; <nl> + Console . WriteLine ( options . ServerHost ) ; <nl> + Console . WriteLine ( options . ServerPort ) ; <nl> + var channel = new Channel ( options . ServerHost , options . ServerPort , credentials , channelOptions ) ; <nl> TestService . TestServiceClient client = new TestService . TestServiceClient ( channel ) ; <nl> - await RunTestCaseAsync ( options . testCase , client ) ; <nl> + await RunTestCaseAsync ( client , options ) ; <nl> channel . ShutdownAsync ( ) . Wait ( ) ; <nl> } <nl> <nl> - private async Task RunTestCaseAsync ( string testCase , TestService . TestServiceClient client ) <nl> + private async Task RunTestCaseAsync ( TestService . TestServiceClient client , ClientOptions options ) <nl> { <nl> - switch ( testCase ) <nl> + switch ( options . TestCase ) <nl> { <nl> case " empty_unary " : <nl> RunEmptyUnary ( client ) ; <nl> private async Task RunTestCaseAsync ( string testCase , TestService . TestServiceClie <nl> case " empty_stream " : <nl> await RunEmptyStreamAsync ( client ) ; <nl> break ; <nl> - case " service_account_creds " : <nl> - await RunServiceAccountCredsAsync ( client ) ; <nl> - break ; <nl> case " compute_engine_creds " : <nl> - await RunComputeEngineCredsAsync ( client ) ; <nl> + await RunComputeEngineCredsAsync ( client , options . DefaultServiceAccount , options . OAuthScope ) ; <nl> break ; <nl> case " jwt_token_creds " : <nl> - await RunJwtTokenCredsAsync ( client ) ; <nl> + await RunJwtTokenCredsAsync ( client , options . DefaultServiceAccount ) ; <nl> break ; <nl> case " oauth2_auth_token " : <nl> - await RunOAuth2AuthTokenAsync ( client ) ; <nl> + await RunOAuth2AuthTokenAsync ( client , options . DefaultServiceAccount , options . OAuthScope ) ; <nl> break ; <nl> case " per_rpc_creds " : <nl> - await RunPerRpcCredsAsync ( client ) ; <nl> + await RunPerRpcCredsAsync ( client , options . DefaultServiceAccount ) ; <nl> break ; <nl> case " cancel_after_begin " : <nl> await RunCancelAfterBeginAsync ( client ) ; <nl> private async Task RunTestCaseAsync ( string testCase , TestService . TestServiceClie <nl> RunBenchmarkEmptyUnary ( client ) ; <nl> break ; <nl> default : <nl> - throw new ArgumentException ( " Unknown test case " + testCase ) ; <nl> + throw new ArgumentException ( " Unknown test case " + options . TestCase ) ; <nl> } <nl> } <nl> <nl> public static async Task RunEmptyStreamAsync ( TestService . ITestServiceClient clie <nl> Console . WriteLine ( " Passed ! " ) ; <nl> } <nl> <nl> - public static async Task RunServiceAccountCredsAsync ( TestService . TestServiceClient client ) <nl> - { <nl> - Console . WriteLine ( " running service_account_creds " ) ; <nl> - var credential = await GoogleCredential . GetApplicationDefaultAsync ( ) ; <nl> - credential = credential . CreateScoped ( new [ ] { AuthScope } ) ; <nl> - client . HeaderInterceptor = AuthInterceptors . FromCredential ( credential ) ; <nl> - <nl> - var request = new SimpleRequest <nl> - { <nl> - ResponseType = PayloadType . COMPRESSABLE , <nl> - ResponseSize = 314159 , <nl> - Payload = CreateZerosPayload ( 271828 ) , <nl> - FillUsername = true , <nl> - FillOauthScope = true <nl> - } ; <nl> - <nl> - var response = client . UnaryCall ( request ) ; <nl> - <nl> - Assert . AreEqual ( PayloadType . COMPRESSABLE , response . Payload . Type ) ; <nl> - Assert . AreEqual ( 314159 , response . Payload . Body . Length ) ; <nl> - Assert . AreEqual ( AuthScopeResponse , response . OauthScope ) ; <nl> - Assert . AreEqual ( ServiceAccountUser , response . Username ) ; <nl> - Console . WriteLine ( " Passed ! " ) ; <nl> - } <nl> - <nl> - public static async Task RunComputeEngineCredsAsync ( TestService . TestServiceClient client ) <nl> + public static async Task RunComputeEngineCredsAsync ( TestService . TestServiceClient client , string defaultServiceAccount , string oauthScope ) <nl> { <nl> Console . WriteLine ( " running compute_engine_creds " ) ; <nl> var credential = await GoogleCredential . GetApplicationDefaultAsync ( ) ; <nl> public static async Task RunComputeEngineCredsAsync ( TestService . TestServiceClien <nl> <nl> Assert . AreEqual ( PayloadType . COMPRESSABLE , response . Payload . Type ) ; <nl> Assert . AreEqual ( 314159 , response . Payload . Body . Length ) ; <nl> - Assert . AreEqual ( AuthScopeResponse , response . OauthScope ) ; <nl> - Assert . AreEqual ( ComputeEngineUser , response . Username ) ; <nl> + Assert . False ( string . IsNullOrEmpty ( response . OauthScope ) ) ; <nl> + Assert . True ( oauthScope . Contains ( response . OauthScope ) ) ; <nl> + Assert . AreEqual ( defaultServiceAccount , response . Username ) ; <nl> Console . WriteLine ( " Passed ! " ) ; <nl> } <nl> <nl> - public static async Task RunJwtTokenCredsAsync ( TestService . TestServiceClient client ) <nl> + public static async Task RunJwtTokenCredsAsync ( TestService . TestServiceClient client , string defaultServiceAccount ) <nl> { <nl> Console . WriteLine ( " running jwt_token_creds " ) ; <nl> var credential = await GoogleCredential . GetApplicationDefaultAsync ( ) ; <nl> - / / check this a credential with scope support , but don ' t add the scope . <nl> Assert . IsTrue ( credential . IsCreateScopedRequired ) ; <nl> client . HeaderInterceptor = AuthInterceptors . FromCredential ( credential ) ; <nl> <nl> public static async Task RunJwtTokenCredsAsync ( TestService . TestServiceClient cli <nl> ResponseSize = 314159 , <nl> Payload = CreateZerosPayload ( 271828 ) , <nl> FillUsername = true , <nl> - FillOauthScope = true <nl> } ; <nl> <nl> var response = client . UnaryCall ( request ) ; <nl> <nl> Assert . AreEqual ( PayloadType . COMPRESSABLE , response . Payload . Type ) ; <nl> Assert . AreEqual ( 314159 , response . Payload . Body . Length ) ; <nl> - Assert . AreEqual ( ServiceAccountUser , response . Username ) ; <nl> + Assert . AreEqual ( defaultServiceAccount , response . Username ) ; <nl> Console . WriteLine ( " Passed ! " ) ; <nl> } <nl> <nl> - public static async Task RunOAuth2AuthTokenAsync ( TestService . TestServiceClient client ) <nl> + public static async Task RunOAuth2AuthTokenAsync ( TestService . TestServiceClient client , string defaultServiceAccount , string oauthScope ) <nl> { <nl> Console . WriteLine ( " running oauth2_auth_token " ) ; <nl> - ITokenAccess credential = ( await GoogleCredential . GetApplicationDefaultAsync ( ) ) . CreateScoped ( new [ ] { AuthScope } ) ; <nl> + ITokenAccess credential = ( await GoogleCredential . GetApplicationDefaultAsync ( ) ) . CreateScoped ( new [ ] { oauthScope } ) ; <nl> string oauth2Token = await credential . GetAccessTokenForRequestAsync ( ) ; <nl> <nl> client . HeaderInterceptor = AuthInterceptors . FromAccessToken ( oauth2Token ) ; <nl> public static async Task RunOAuth2AuthTokenAsync ( TestService . TestServiceClient c <nl> <nl> var response = client . UnaryCall ( request ) ; <nl> <nl> - Assert . AreEqual ( AuthScopeResponse , response . OauthScope ) ; <nl> - Assert . AreEqual ( ServiceAccountUser , response . Username ) ; <nl> + Assert . False ( string . IsNullOrEmpty ( response . OauthScope ) ) ; <nl> + Assert . True ( oauthScope . Contains ( response . OauthScope ) ) ; <nl> + Assert . AreEqual ( defaultServiceAccount , response . Username ) ; <nl> Console . WriteLine ( " Passed ! " ) ; <nl> } <nl> <nl> - public static async Task RunPerRpcCredsAsync ( TestService . TestServiceClient client ) <nl> + public static async Task RunPerRpcCredsAsync ( TestService . TestServiceClient client , string defaultServiceAccount ) <nl> { <nl> Console . WriteLine ( " running per_rpc_creds " ) ; <nl> <nl> - ITokenAccess credential = ( await GoogleCredential . GetApplicationDefaultAsync ( ) ) . CreateScoped ( new [ ] { AuthScope } ) ; <nl> - string oauth2Token = await credential . GetAccessTokenForRequestAsync ( ) ; <nl> - var headerInterceptor = AuthInterceptors . FromAccessToken ( oauth2Token ) ; <nl> + ITokenAccess credential = await GoogleCredential . GetApplicationDefaultAsync ( ) ; <nl> + string accessToken = await credential . GetAccessTokenForRequestAsync ( ) ; <nl> + var headerInterceptor = AuthInterceptors . FromAccessToken ( accessToken ) ; <nl> <nl> var request = new SimpleRequest <nl> { <nl> FillUsername = true , <nl> - FillOauthScope = true <nl> } ; <nl> <nl> var headers = new Metadata ( ) ; <nl> headerInterceptor ( null , " " , headers ) ; <nl> var response = client . UnaryCall ( request , headers : headers ) ; <nl> <nl> - Assert . AreEqual ( AuthScopeResponse , response . OauthScope ) ; <nl> - Assert . AreEqual ( ServiceAccountUser , response . Username ) ; <nl> + Assert . AreEqual ( defaultServiceAccount , response . Username ) ; <nl> Console . WriteLine ( " Passed ! " ) ; <nl> } <nl> <nl> private static Payload CreateZerosPayload ( int size ) <nl> { <nl> return new Payload { Body = ByteString . CopyFrom ( new byte [ size ] ) } ; <nl> } <nl> - <nl> - private static ClientOptions ParseArguments ( string [ ] args ) <nl> - { <nl> - var options = new ClientOptions ( ) ; <nl> - foreach ( string arg in args ) <nl> - { <nl> - ParseArgument ( arg , options ) ; <nl> - if ( options . help ) <nl> - { <nl> - break ; <nl> - } <nl> - } <nl> - return options ; <nl> - } <nl> - <nl> - private static void ParseArgument ( string arg , ClientOptions options ) <nl> - { <nl> - Match match ; <nl> - match = Regex . Match ( arg , " - - server_host = ( . * ) " ) ; <nl> - if ( match . Success ) <nl> - { <nl> - options . serverHost = match . Groups [ 1 ] . Value . Trim ( ) ; <nl> - return ; <nl> - } <nl> - <nl> - match = Regex . Match ( arg , " - - server_host_override = ( . * ) " ) ; <nl> - if ( match . Success ) <nl> - { <nl> - options . serverHostOverride = match . Groups [ 1 ] . Value . Trim ( ) ; <nl> - return ; <nl> - } <nl> - <nl> - match = Regex . Match ( arg , " - - server_port = ( . * ) " ) ; <nl> - if ( match . Success ) <nl> - { <nl> - options . serverPort = int . Parse ( match . Groups [ 1 ] . Value . Trim ( ) ) ; <nl> - return ; <nl> - } <nl> - <nl> - match = Regex . Match ( arg , " - - test_case = ( . * ) " ) ; <nl> - if ( match . Success ) <nl> - { <nl> - options . testCase = match . Groups [ 1 ] . Value . Trim ( ) ; <nl> - return ; <nl> - } <nl> - <nl> - match = Regex . Match ( arg , " - - use_tls = ( . * ) " ) ; <nl> - if ( match . Success ) <nl> - { <nl> - options . useTls = bool . Parse ( match . Groups [ 1 ] . Value . Trim ( ) ) ; <nl> - return ; <nl> - } <nl> - <nl> - match = Regex . Match ( arg , " - - use_test_ca = ( . * ) " ) ; <nl> - if ( match . Success ) <nl> - { <nl> - options . useTestCa = bool . Parse ( match . Groups [ 1 ] . Value . Trim ( ) ) ; <nl> - return ; <nl> - } <nl> - <nl> - Console . WriteLine ( string . Format ( " Unrecognized argument \ " { 0 } \ " " , arg ) ) ; <nl> - options . help = true ; <nl> - } <nl> } <nl> } <nl> mmm a / src / csharp / Grpc . IntegrationTesting / InteropServer . cs <nl> ppp b / src / csharp / Grpc . IntegrationTesting / InteropServer . cs <nl> <nl> using System . IO ; <nl> using System . Text . RegularExpressions ; <nl> using System . Threading . Tasks ; <nl> + <nl> + using CommandLine ; <nl> + using CommandLine . Text ; <nl> using Grpc . Core ; <nl> using Grpc . Core . Utils ; <nl> using Grpc . Testing ; <nl> public class InteropServer <nl> { <nl> private class ServerOptions <nl> { <nl> - public bool help ; <nl> - public int ? port = 8070 ; <nl> - public bool useTls ; <nl> + [ Option ( " port " , DefaultValue = 8070 ) ] <nl> + public int Port { get ; set ; } <nl> + <nl> + [ Option ( " use_tls " ) ] <nl> + public bool UseTls { get ; set ; } <nl> + <nl> + [ HelpOption ] <nl> + public string GetUsage ( ) <nl> + { <nl> + var help = new HelpText <nl> + { <nl> + Heading = " gRPC C # interop testing server " , <nl> + AddDashesToOption = true <nl> + } ; <nl> + help . AddPreOptionsLine ( " Usage : " ) ; <nl> + help . AddOptions ( this ) ; <nl> + return help ; <nl> + } <nl> } <nl> <nl> ServerOptions options ; <nl> private InteropServer ( ServerOptions options ) <nl> <nl> public static void Run ( string [ ] args ) <nl> { <nl> - Console . WriteLine ( " gRPC C # interop testing server " ) ; <nl> - ServerOptions options = ParseArguments ( args ) ; <nl> - <nl> - if ( ! options . port . HasValue ) <nl> - { <nl> - Console . WriteLine ( " Missing required argument . " ) ; <nl> - Console . WriteLine ( ) ; <nl> - options . help = true ; <nl> - } <nl> - <nl> - if ( options . help ) <nl> + var options = new ServerOptions ( ) ; <nl> + if ( ! Parser . Default . ParseArguments ( args , options ) ) <nl> { <nl> - Console . WriteLine ( " Usage : " ) ; <nl> - Console . WriteLine ( " - - port = PORT " ) ; <nl> - Console . WriteLine ( " - - use_tls = BOOLEAN " ) ; <nl> - Console . WriteLine ( ) ; <nl> Environment . Exit ( 1 ) ; <nl> } <nl> <nl> private void Run ( ) <nl> } ; <nl> <nl> string host = " 0 . 0 . 0 . 0 " ; <nl> - int port = options . port . Value ; <nl> - if ( options . useTls ) <nl> + int port = options . Port ; <nl> + if ( options . UseTls ) <nl> { <nl> server . Ports . Add ( host , port , TestCredentials . CreateTestServerCredentials ( ) ) ; <nl> } <nl> else <nl> { <nl> - server . Ports . Add ( host , options . port . Value , ServerCredentials . Insecure ) ; <nl> + server . Ports . Add ( host , options . Port , ServerCredentials . Insecure ) ; <nl> } <nl> Console . WriteLine ( " Running server on " + string . Format ( " { 0 } : { 1 } " , host , port ) ) ; <nl> server . Start ( ) ; <nl> <nl> server . ShutdownTask . Wait ( ) ; <nl> } <nl> - <nl> - private static ServerOptions ParseArguments ( string [ ] args ) <nl> - { <nl> - var options = new ServerOptions ( ) ; <nl> - foreach ( string arg in args ) <nl> - { <nl> - ParseArgument ( arg , options ) ; <nl> - if ( options . help ) <nl> - { <nl> - break ; <nl> - } <nl> - } <nl> - return options ; <nl> - } <nl> - <nl> - private static void ParseArgument ( string arg , ServerOptions options ) <nl> - { <nl> - Match match ; <nl> - match = Regex . Match ( arg , " - - port = ( . * ) " ) ; <nl> - if ( match . Success ) <nl> - { <nl> - options . port = int . Parse ( match . Groups [ 1 ] . Value . Trim ( ) ) ; <nl> - return ; <nl> - } <nl> - <nl> - match = Regex . Match ( arg , " - - use_tls = ( . * ) " ) ; <nl> - if ( match . Success ) <nl> - { <nl> - options . useTls = bool . Parse ( match . Groups [ 1 ] . Value . Trim ( ) ) ; <nl> - return ; <nl> - } <nl> - <nl> - Console . WriteLine ( string . Format ( " Unrecognized argument \ " { 0 } \ " " , arg ) ) ; <nl> - options . help = true ; <nl> - } <nl> } <nl> } <nl> mmm a / src / csharp / Grpc . IntegrationTesting / packages . config <nl> ppp b / src / csharp / Grpc . IntegrationTesting / packages . config <nl> <nl> < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> < packages > <nl> < package id = " BouncyCastle " version = " 1 . 7 . 0 " targetFramework = " net45 " / > <nl> + < package id = " CommandLineParser " version = " 1 . 9 . 71 " targetFramework = " net45 " / > <nl> < package id = " Google . Apis . Auth " version = " 1 . 9 . 3 " targetFramework = " net45 " / > <nl> < package id = " Google . Apis . Core " version = " 1 . 9 . 3 " targetFramework = " net45 " / > <nl> < package id = " Google . Protobuf " version = " 3 . 0 . 0 - alpha4 " targetFramework = " net45 " / > <nl> | update the interop tests based on spec | grpc/grpc | b26972fab46cb8ef26945372b1f26942cd41972d | 2015-09-04T00:47:14Z |
mmm a / modules / planning / tasks / speed_decider / speed_decider . cc <nl> ppp b / modules / planning / tasks / speed_decider / speed_decider . cc <nl> Status SpeedDecider : : MakeObjectDecision ( <nl> if ( boundary . boundary_type ( ) = = StBoundary : : BoundaryType : : KEEP_CLEAR ) { <nl> ObjectDecisionType stop_decision ; <nl> if ( CreateStopDecision ( * path_obstacle , & stop_decision , <nl> - FLAGS_stop_distance_traffic_light ) ) { <nl> + - FLAGS_stop_distance_traffic_light ) ) { <nl> path_obstacle - > AddLongitudinalDecision ( " dp_st_graph / keep_clear " , <nl> stop_decision ) ; <nl> } <nl> Status SpeedDecider : : MakeObjectDecision ( <nl> if ( IsFollowTooClose ( * path_obstacle ) ) { <nl> ObjectDecisionType stop_decision ; <nl> if ( CreateStopDecision ( * path_obstacle , & stop_decision , <nl> - FLAGS_min_stop_distance_obstacle ) ) { <nl> + - FLAGS_min_stop_distance_obstacle ) ) { <nl> path_obstacle - > AddLongitudinalDecision ( " dp_st_graph / too_close " , <nl> stop_decision ) ; <nl> } <nl> Status SpeedDecider : : MakeObjectDecision ( <nl> case CROSS : { <nl> ObjectDecisionType stop_decision ; <nl> if ( CreateStopDecision ( * path_obstacle , & stop_decision , <nl> - FLAGS_min_stop_distance_obstacle ) ) { <nl> + - FLAGS_min_stop_distance_obstacle ) ) { <nl> path_obstacle - > AddLongitudinalDecision ( " dp_st_graph " , stop_decision ) ; <nl> } <nl> } break ; <nl> | Planning : fixed a bug for stop distance . | ApolloAuto/apollo | 2f218747b62827e17171c1a7b87198861df57cc1 | 2017-12-28T19:19:23Z |
mmm a / include / v8 . h <nl> ppp b / include / v8 . h <nl> template < class T > class NonCopyablePersistentTraits ; <nl> template < class T > class PersistentBase ; <nl> template < class T , <nl> class M = NonCopyablePersistentTraits < T > > class Persistent ; <nl> - template < class T > <nl> - class Global ; <nl> + template < class T > class UniquePersistent ; <nl> template < class K , class V , class T > class PersistentValueMap ; <nl> template < class K , class V , class T > <nl> class PersistentValueMapBase ; <nl> template < class T > class PersistentBase { <nl> template < class F > friend class Handle ; <nl> template < class F > friend class Local ; <nl> template < class F1 , class F2 > friend class Persistent ; <nl> - template < class F > <nl> - friend class Global ; <nl> + template < class F > friend class UniquePersistent ; <nl> template < class F > friend class PersistentBase ; <nl> template < class F > friend class ReturnValue ; <nl> template < class F1 , class F2 , class F3 > <nl> template < class T , class M > class Persistent : public PersistentBase < T > { <nl> * <nl> * Note : Persistent class hierarchy is subject to future changes . <nl> * / <nl> - template < class T > <nl> - class Global : public PersistentBase < T > { <nl> + template < class T > <nl> + class UniquePersistent : public PersistentBase < T > { <nl> public : <nl> / * * <nl> - * A Global with no storage cell . <nl> + * A UniquePersistent with no storage cell . <nl> * / <nl> - V8_INLINE Global ( ) : PersistentBase < T > ( nullptr ) { } <nl> + V8_INLINE UniquePersistent ( ) : PersistentBase < T > ( nullptr ) { } <nl> / * * <nl> - * Construct a Global from a Handle . <nl> + * Construct a UniquePersistent from a Handle . <nl> * When the Handle is non - empty , a new storage cell is created <nl> * pointing to the same object , and no flags are set . <nl> * / <nl> template < class S > <nl> - V8_INLINE Global ( Isolate * isolate , Handle < S > that ) <nl> + V8_INLINE UniquePersistent ( Isolate * isolate , Handle < S > that ) <nl> : PersistentBase < T > ( PersistentBase < T > : : New ( isolate , * that ) ) { <nl> TYPE_CHECK ( T , S ) ; <nl> } <nl> / * * <nl> - * Construct a Global from a PersistentBase . <nl> + * Construct a UniquePersistent from a PersistentBase . <nl> * When the Persistent is non - empty , a new storage cell is created <nl> * pointing to the same object , and no flags are set . <nl> * / <nl> template < class S > <nl> - V8_INLINE Global ( Isolate * isolate , const PersistentBase < S > & that ) <nl> - : PersistentBase < T > ( PersistentBase < T > : : New ( isolate , that . val_ ) ) { <nl> + V8_INLINE UniquePersistent ( Isolate * isolate , const PersistentBase < S > & that ) <nl> + : PersistentBase < T > ( PersistentBase < T > : : New ( isolate , that . val_ ) ) { <nl> TYPE_CHECK ( T , S ) ; <nl> } <nl> / * * <nl> * Move constructor . <nl> * / <nl> - V8_INLINE Global ( Global & & other ) : PersistentBase < T > ( other . val_ ) { <nl> + V8_INLINE UniquePersistent ( UniquePersistent & & other ) <nl> + : PersistentBase < T > ( other . val_ ) { <nl> other . val_ = nullptr ; <nl> } <nl> - V8_INLINE ~ Global ( ) { this - > Reset ( ) ; } <nl> + V8_INLINE ~ UniquePersistent ( ) { this - > Reset ( ) ; } <nl> / * * <nl> * Move via assignment . <nl> * / <nl> template < class S > <nl> - V8_INLINE Global & operator = ( Global < S > & & rhs ) { <nl> + V8_INLINE UniquePersistent & operator = ( UniquePersistent < S > & & rhs ) { <nl> TYPE_CHECK ( T , S ) ; <nl> if ( this ! = & rhs ) { <nl> this - > Reset ( ) ; <nl> class Global : public PersistentBase < T > { <nl> / * * <nl> * Pass allows returning uniques from functions , etc . <nl> * / <nl> - Global Pass ( ) { return static_cast < Global & & > ( * this ) ; } <nl> + UniquePersistent Pass ( ) { return static_cast < UniquePersistent & & > ( * this ) ; } <nl> <nl> private : <nl> - Global ( Global & ) = delete ; <nl> - void operator = ( Global & ) = delete ; <nl> + UniquePersistent ( UniquePersistent & ) = delete ; <nl> + void operator = ( UniquePersistent & ) = delete ; <nl> } ; <nl> <nl> <nl> - / / UniquePersistent is an alias for Global for historical reason . <nl> - template < class T > <nl> - using UniquePersistent = Global < T > ; <nl> - <nl> - <nl> / * * <nl> * A stack - allocated class that governs a number of local handles . <nl> * After a handle scope has been created , all local handles will be <nl> mmm a / test / cctest / test - api . cc <nl> ppp b / test / cctest / test - api . cc <nl> THREADED_TEST ( InternalFieldsAlignedPointers ) { <nl> void * huge = reinterpret_cast < void * > ( ~ static_cast < uintptr_t > ( 1 ) ) ; <nl> CheckAlignedPointerInInternalField ( obj , huge ) ; <nl> <nl> - v8 : : Global < v8 : : Object > persistent ( isolate , obj ) ; <nl> + v8 : : UniquePersistent < v8 : : Object > persistent ( isolate , obj ) ; <nl> CHECK_EQ ( 1 , Object : : InternalFieldCount ( persistent ) ) ; <nl> CHECK_EQ ( huge , Object : : GetAlignedPointerFromInternalField ( persistent , 0 ) ) ; <nl> } <nl> THREADED_TEST ( ResettingGlobalHandleToEmpty ) { <nl> <nl> <nl> template < class T > <nl> - static v8 : : Global < T > PassUnique ( v8 : : Global < T > unique ) { <nl> + static v8 : : UniquePersistent < T > PassUnique ( v8 : : UniquePersistent < T > unique ) { <nl> return unique . Pass ( ) ; <nl> } <nl> <nl> <nl> template < class T > <nl> - static v8 : : Global < T > ReturnUnique ( v8 : : Isolate * isolate , <nl> - const v8 : : Persistent < T > & global ) { <nl> - v8 : : Global < String > unique ( isolate , global ) ; <nl> + static v8 : : UniquePersistent < T > ReturnUnique ( v8 : : Isolate * isolate , <nl> + const v8 : : Persistent < T > & global ) { <nl> + v8 : : UniquePersistent < String > unique ( isolate , global ) ; <nl> return unique . Pass ( ) ; <nl> } <nl> <nl> <nl> - THREADED_TEST ( Global ) { <nl> + THREADED_TEST ( UniquePersistent ) { <nl> v8 : : Isolate * isolate = CcTest : : isolate ( ) ; <nl> v8 : : Persistent < String > global ; <nl> { <nl> THREADED_TEST ( Global ) { <nl> reinterpret_cast < v8 : : internal : : Isolate * > ( isolate ) - > global_handles ( ) ; <nl> int initial_handle_count = global_handles - > global_handles_count ( ) ; <nl> { <nl> - v8 : : Global < String > unique ( isolate , global ) ; <nl> + v8 : : UniquePersistent < String > unique ( isolate , global ) ; <nl> CHECK_EQ ( initial_handle_count + 1 , global_handles - > global_handles_count ( ) ) ; <nl> / / Test assignment via Pass <nl> { <nl> - v8 : : Global < String > copy = unique . Pass ( ) ; <nl> + v8 : : UniquePersistent < String > copy = unique . Pass ( ) ; <nl> CHECK ( unique . IsEmpty ( ) ) ; <nl> CHECK ( copy = = global ) ; <nl> CHECK_EQ ( initial_handle_count + 1 , <nl> THREADED_TEST ( Global ) { <nl> } <nl> / / Test ctor via Pass <nl> { <nl> - v8 : : Global < String > copy ( unique . Pass ( ) ) ; <nl> + v8 : : UniquePersistent < String > copy ( unique . Pass ( ) ) ; <nl> CHECK ( unique . IsEmpty ( ) ) ; <nl> CHECK ( copy = = global ) ; <nl> CHECK_EQ ( initial_handle_count + 1 , <nl> THREADED_TEST ( Global ) { <nl> } <nl> / / Test pass through function call <nl> { <nl> - v8 : : Global < String > copy = PassUnique ( unique . Pass ( ) ) ; <nl> + v8 : : UniquePersistent < String > copy = PassUnique ( unique . Pass ( ) ) ; <nl> CHECK ( unique . IsEmpty ( ) ) ; <nl> CHECK ( copy = = global ) ; <nl> CHECK_EQ ( initial_handle_count + 1 , <nl> THREADED_TEST ( Global ) { <nl> } <nl> / / Test pass from function call <nl> { <nl> - v8 : : Global < String > unique = ReturnUnique ( isolate , global ) ; <nl> + v8 : : UniquePersistent < String > unique = ReturnUnique ( isolate , global ) ; <nl> CHECK ( unique = = global ) ; <nl> CHECK_EQ ( initial_handle_count + 1 , global_handles - > global_handles_count ( ) ) ; <nl> } <nl> class WeakStdMapTraits : public v8 : : StdMapTraits < K , V > { <nl> return data . GetParameter ( ) - > key ; <nl> } <nl> static void DisposeCallbackData ( WeakCallbackDataType * data ) { delete data ; } <nl> - static void Dispose ( v8 : : Isolate * isolate , v8 : : Global < V > value , K key ) { } <nl> + static void Dispose ( v8 : : Isolate * isolate , v8 : : UniquePersistent < V > value , <nl> + K key ) { } <nl> } ; <nl> <nl> <nl> static void TestPersistentValueMap ( ) { <nl> typename Map : : PersistentValueReference ref = map . GetReference ( 7 ) ; <nl> CHECK ( expected - > Equals ( ref . NewLocal ( isolate ) ) ) ; <nl> } <nl> - v8 : : Global < v8 : : Object > removed = map . Remove ( 7 ) ; <nl> + v8 : : UniquePersistent < v8 : : Object > removed = map . Remove ( 7 ) ; <nl> CHECK_EQ ( 0 , static_cast < int > ( map . Size ( ) ) ) ; <nl> CHECK ( expected = = removed ) ; <nl> removed = map . Remove ( 7 ) ; <nl> static void TestPersistentValueMap ( ) { <nl> { <nl> typename Map : : PersistentValueReference ref ; <nl> Local < v8 : : Object > expected2 = v8 : : Object : : New ( isolate ) ; <nl> - removed = map . Set ( 8 , v8 : : Global < v8 : : Object > ( isolate , expected2 ) , & ref ) ; <nl> + removed = map . Set ( 8 , v8 : : UniquePersistent < v8 : : Object > ( isolate , expected2 ) , <nl> + & ref ) ; <nl> CHECK_EQ ( 1 , static_cast < int > ( map . Size ( ) ) ) ; <nl> CHECK ( expected = = removed ) ; <nl> CHECK ( expected2 - > Equals ( ref . NewLocal ( isolate ) ) ) ; <nl> TEST ( PersistentValueVector ) { <nl> <nl> Local < v8 : : Object > obj1 = v8 : : Object : : New ( isolate ) ; <nl> Local < v8 : : Object > obj2 = v8 : : Object : : New ( isolate ) ; <nl> - v8 : : Global < v8 : : Object > obj3 ( isolate , v8 : : Object : : New ( isolate ) ) ; <nl> + v8 : : UniquePersistent < v8 : : Object > obj3 ( isolate , v8 : : Object : : New ( isolate ) ) ; <nl> <nl> CHECK ( vector . IsEmpty ( ) ) ; <nl> CHECK_EQ ( 0 , static_cast < int > ( vector . Size ( ) ) ) ; <nl> | Revert of rename UniquePersistent to Global ( patchset id : 20001 of https : / / codereview . chromium . org / 980173003 / ) | v8/v8 | 8465f397927bb10ea32c958cf4f82a8e15c02e24 | 2015-03-05T12:10:32Z |
mmm a / Source / Common / Include / Sequences . h <nl> ppp b / Source / Common / Include / Sequences . h <nl> static inline std : : pair < size_t , size_t > ColumnRangeWithMBLayoutFor ( size_t numCol <nl> { <nl> / / MBLayout of data and of FrameRange must be identical pointers , <nl> / / or in case of broadcasting , respective parent pointers . <nl> - / / MBLayouts that are identical in content but not object identity ( pointer ) are not admissible . <nl> - / / For those cases , use a ReconcileDynamicAxis node . <nl> + / / MBLayouts that are identical in content but not object identity ( pointer ) are admissible . <nl> + / / We rely on a runtime check . If this is inefficient , use a ReconcileDynamicAxis node . <nl> + / / ( Note : Earlier versions of CNTK did not accept same - content MBLayouts . ) <nl> if ( fr . m_pMBLayout ! = pMBLayout ) <nl> { <nl> / / if broadcast allowed then it is allowed to broadcast from an outer - loop value <nl> static inline std : : pair < size_t , size_t > ColumnRangeWithMBLayoutFor ( size_t numCol <nl> if ( fr . m_broadcastAllowed & & ! pMBLayout & & numCols = = 1 ) <nl> return std : : pair < size_t , size_t > ( 0 , numCols ) ; <nl> if ( fr . m_pMBLayout & & pMBLayout & & * fr . m_pMBLayout = = * pMBLayout ) <nl> - ; / / LogicError ( " DataFor : FrameRange ' s dynamic axis is inconsistent with matrix . They are compatible though - - are you missing a ReconcileDynamicAxis operation ? " ) ; <nl> + ; / / layouts are compatible - - you may proceed <nl> else <nl> LogicError ( " DataFor : FrameRange ' s dynamic axis is inconsistent with matrix . " ) ; <nl> } <nl> static inline std : : pair < DimensionVector , DimensionVector > TensorSliceWithMBLayou <nl> <nl> / / MBLayout of data and of FrameRange must be identical pointers , <nl> / / or in case of broadcasting , respective parent pointers . <nl> - / / MBLayouts that are identical in content but not object identity ( pointer ) are not admissible . <nl> - / / For those cases , use a ReconcileDynamicAxis node . <nl> + / / MBLayouts that are identical in content but not object identity ( pointer ) are admissible . <nl> + / / We rely on a runtime check . If this is inefficient , use a ReconcileDynamicAxis node . <nl> + / / ( Note : Earlier versions of CNTK did not accept same - content MBLayouts . ) <nl> if ( isTimeIteration & & fr . m_pMBLayout ! = pMBLayout ) <nl> { <nl> / / if broadcast allowed then it is allowed to broadcast from an outer - loop value <nl> static inline std : : pair < DimensionVector , DimensionVector > TensorSliceWithMBLayou <nl> if ( fr . m_pMBLayout / * get data for a loop * / & & ! pMBLayout / * ' data ' is not samples * / & & fr . m_broadcastAllowed / * we ' re OK with that * / ) <nl> ; / / the time dimension is broadcasting - - leave it as is <nl> else if ( fr . m_pMBLayout & & pMBLayout & & * fr . m_pMBLayout = = * pMBLayout ) <nl> - ; / / LogicError ( " DataFor : FrameRange ' s dynamic axis is inconsistent with matrix . They are compatible though - - are you missing a ReconcileDynamicAxis operation ? % s vs . % s " , <nl> - / / static_cast < string > ( * ( fr . m_pMBLayout ) ) . c_str ( ) , static_cast < string > ( * ( pMBLayout ) ) . c_str ( ) ) ; <nl> + ; / / layouts are compatible - - you may proceed <nl> else <nl> LogicError ( " DataFor : FrameRange ' s dynamic axis is inconsistent with matrix : % s vs . % s " , <nl> static_cast < string > ( * ( fr . m_pMBLayout ) ) . c_str ( ) , static_cast < string > ( * ( pMBLayout ) ) . c_str ( ) ) ; <nl> mmm a / Source / ComputationNetworkLib / ComputationNode . cpp <nl> ppp b / Source / ComputationNetworkLib / ComputationNode . cpp <nl> void ComputationNodeBase : : ValidateMBLayout ( const ComputationNodeBasePtr which , c <nl> # else <nl> / / We will let this slip with a reminder , assuming that this will be caught at runtime . <nl> / / By allowing this , users will not need ReconcileDynamicAxis ( ) for reductions over a sequence like BS . Sequences . Last ( ) . <nl> - fprintf ( stderr , " WARNING : % ls : Dynamic axes mismatch between % ls and % ls . If they are incompatible , this will fail later . If this is by design , use ReconcileDynamicAxis ( ) . \ n " , <nl> + fprintf ( stderr , " WARNING : % ls : Dynamic axes mismatch between % ls and % ls . If they are incompatible , this will fail later . \ n " , <nl> NodeDescription ( ) . c_str ( ) , which - > NodeDescription ( ) . c_str ( ) , vsWhich - > NodeDescription ( ) . c_str ( ) ) ; <nl> # endif <nl> } <nl> | ( addressed CR feedback ) | microsoft/CNTK | 08730dafb71aaca23afc0efa16623ff5b749287b | 2016-08-12T01:41:14Z |
mmm a / lib / SIL / SILWitnessTable . cpp <nl> ppp b / lib / SIL / SILWitnessTable . cpp <nl> bool SILWitnessTable : : conformanceIsSerialized ( ProtocolConformance * conformance , <nl> return true ; <nl> <nl> auto * nominal = conformance - > getType ( ) - > getAnyNominal ( ) ; <nl> - / / Only serialize if the witness table is sufficiently static , and resilience <nl> - / / is explicitly enabled for this compilation or if we serialize all eligible <nl> - / / witness tables . <nl> - auto moduleIsResilient = strategy = = ResilienceStrategy : : Resilient ; <nl> + / / Only serialize witness tables for fixed layout types . <nl> + / / <nl> + / / FIXME : This is not the right long term solution . We need an explicit <nl> + / / mechanism for declaring conformances as ' fragile ' . <nl> auto protocolIsPublic = <nl> conformance - > getProtocol ( ) - > getEffectiveAccess ( ) > = AccessLevel : : Public ; <nl> auto typeIsPublic = nominal - > getEffectiveAccess ( ) > = AccessLevel : : Public ; <nl> | Merge pull request from slavapestov / fix - a - warning | apple/swift | 1f0fcb8d092931708c07dae944f2964b0ef5947e | 2017-10-22T02:11:28Z |
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> set ( SOURCES <nl> utilities / merge_operators / string_append / stringappend . cc <nl> utilities / merge_operators / string_append / stringappend2 . cc <nl> utilities / merge_operators / put . cc <nl> + utilities / merge_operators / max . cc <nl> utilities / merge_operators / uint64add . cc <nl> utilities / options / options_util . cc <nl> utilities / redis / redis_lists . cc <nl> mmm a / Makefile <nl> ppp b / Makefile <nl> TESTS = \ <nl> memory_test \ <nl> merge_test \ <nl> merger_test \ <nl> + util_merge_operators_test \ <nl> options_file_test \ <nl> redis_test \ <nl> reduce_levels_test \ <nl> merge_test : db / merge_test . o $ ( LIBOBJECTS ) $ ( TESTHARNESS ) <nl> merger_test : table / merger_test . o $ ( LIBOBJECTS ) $ ( TESTHARNESS ) <nl> $ ( AM_LINK ) <nl> <nl> + util_merge_operators_test : utilities / util_merge_operators_test . o $ ( LIBOBJECTS ) $ ( TESTHARNESS ) <nl> + $ ( AM_LINK ) <nl> + <nl> options_file_test : db / options_file_test . o $ ( LIBOBJECTS ) $ ( TESTHARNESS ) <nl> $ ( AM_LINK ) <nl> <nl> mmm a / src . mk <nl> ppp b / src . mk <nl> LIB_SOURCES = \ <nl> utilities / leveldb_options / leveldb_options . cc \ <nl> utilities / memory / memory_util . cc \ <nl> utilities / merge_operators / put . cc \ <nl> + utilities / merge_operators / max . cc \ <nl> utilities / merge_operators / string_append / stringappend2 . cc \ <nl> utilities / merge_operators / string_append / stringappend . cc \ <nl> utilities / merge_operators / uint64add . cc \ <nl> mmm a / utilities / merge_operators . h <nl> ppp b / utilities / merge_operators . h <nl> class MergeOperators { <nl> static std : : shared_ptr < MergeOperator > CreateUInt64AddOperator ( ) ; <nl> static std : : shared_ptr < MergeOperator > CreateStringAppendOperator ( ) ; <nl> static std : : shared_ptr < MergeOperator > CreateStringAppendTESTOperator ( ) ; <nl> + static std : : shared_ptr < MergeOperator > CreateMaxOperator ( ) ; <nl> <nl> / / Will return a different merge operator depending on the string . <nl> / / TODO : Hook the " name " up to the actual Name ( ) of the MergeOperators ? <nl> class MergeOperators { <nl> return CreateStringAppendOperator ( ) ; <nl> } else if ( name = = " stringappendtest " ) { <nl> return CreateStringAppendTESTOperator ( ) ; <nl> + } else if ( name = = " max " ) { <nl> + return CreateMaxOperator ( ) ; <nl> } else { <nl> / / Empty or unknown , just return nullptr <nl> return nullptr ; <nl> new file mode 100644 <nl> index 0000000000 . . ee05a7cc20 <nl> mmm / dev / null <nl> ppp b / utilities / merge_operators / max . cc <nl> <nl> + / / Copyright ( c ) 2011 - present , Facebook , Inc . All rights reserved . <nl> + / / This source code is licensed under the BSD - style license found in the <nl> + / / LICENSE file in the root directory of this source tree . An additional grant <nl> + / / of patent rights can be found in the PATENTS file in the same directory . <nl> + <nl> + # include < memory > <nl> + <nl> + # include " rocksdb / merge_operator . h " <nl> + # include " rocksdb / slice . h " <nl> + # include " utilities / merge_operators . h " <nl> + <nl> + using rocksdb : : Slice ; <nl> + using rocksdb : : Logger ; <nl> + using rocksdb : : MergeOperator ; <nl> + <nl> + namespace { / / anonymous namespace <nl> + <nl> + / / Merge operator that picks the maximum operand , Comparison is based on <nl> + / / Slice : : compare <nl> + class MaxOperator : public MergeOperator { <nl> + public : <nl> + virtual bool FullMerge ( const Slice & key , const Slice * existing_value , <nl> + const std : : deque < std : : string > & operand_list , <nl> + std : : string * new_value , <nl> + Logger * logger ) const override { <nl> + Slice max ; <nl> + if ( existing_value ) { <nl> + max = Slice ( existing_value - > data ( ) , existing_value - > size ( ) ) ; <nl> + } <nl> + <nl> + for ( const auto & op : operand_list ) { <nl> + if ( max . compare ( op ) < 0 ) { <nl> + max = Slice ( op . data ( ) , op . size ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + new_value - > assign ( max . data ( ) , max . size ( ) ) ; <nl> + return true ; <nl> + } <nl> + <nl> + virtual bool PartialMerge ( const Slice & key , const Slice & left_operand , <nl> + const Slice & right_operand , std : : string * new_value , <nl> + Logger * logger ) const override { <nl> + if ( left_operand . compare ( right_operand ) > = 0 ) { <nl> + new_value - > assign ( left_operand . data ( ) , left_operand . size ( ) ) ; <nl> + } else { <nl> + new_value - > assign ( right_operand . data ( ) , right_operand . size ( ) ) ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + virtual bool PartialMergeMulti ( const Slice & key , <nl> + const std : : deque < Slice > & operand_list , <nl> + std : : string * new_value , <nl> + Logger * logger ) const override { <nl> + Slice max ; <nl> + for ( const auto & operand : operand_list ) { <nl> + if ( max . compare ( operand ) < 0 ) { <nl> + max = operand ; <nl> + } <nl> + } <nl> + <nl> + new_value - > assign ( max . data ( ) , max . size ( ) ) ; <nl> + return true ; <nl> + } <nl> + <nl> + virtual const char * Name ( ) const override { return " MaxOperator " ; } <nl> + } ; <nl> + <nl> + } / / end of anonymous namespace <nl> + <nl> + namespace rocksdb { <nl> + <nl> + std : : shared_ptr < MergeOperator > MergeOperators : : CreateMaxOperator ( ) { <nl> + return std : : make_shared < MaxOperator > ( ) ; <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000 . . c80ce6ffb7 <nl> mmm / dev / null <nl> ppp b / utilities / util_merge_operators_test . cc <nl> <nl> + / / Copyright ( c ) 2011 - present , Facebook , Inc . All rights reserved . <nl> + / / This source code is licensed under the BSD - style license found in the <nl> + / / LICENSE file in the root directory of this source tree . An additional grant <nl> + / / of patent rights can be found in the PATENTS file in the same directory . <nl> + <nl> + # include " util / testharness . h " <nl> + # include " util / testutil . h " <nl> + # include " utilities / merge_operators . h " <nl> + <nl> + namespace rocksdb { <nl> + <nl> + class UtilMergeOperatorTest : public testing : : Test { <nl> + public : <nl> + UtilMergeOperatorTest ( ) { } <nl> + <nl> + std : : string FullMerge ( std : : string existing_value , <nl> + std : : deque < std : : string > operands , <nl> + std : : string key = " " ) { <nl> + Slice existing_value_slice ( existing_value ) ; <nl> + std : : string result ; <nl> + <nl> + merge_operator_ - > FullMerge ( key , & existing_value_slice , operands , & result , <nl> + nullptr ) ; <nl> + return result ; <nl> + } <nl> + <nl> + std : : string FullMerge ( std : : deque < std : : string > operands , <nl> + std : : string key = " " ) { <nl> + std : : string result ; <nl> + <nl> + merge_operator_ - > FullMerge ( key , nullptr , operands , & result , nullptr ) ; <nl> + return result ; <nl> + } <nl> + <nl> + std : : string PartialMerge ( std : : string left , std : : string right , <nl> + std : : string key = " " ) { <nl> + std : : string result ; <nl> + <nl> + merge_operator_ - > PartialMerge ( key , left , right , & result , nullptr ) ; <nl> + return result ; <nl> + } <nl> + <nl> + std : : string PartialMergeMulti ( std : : deque < std : : string > operands , <nl> + std : : string key = " " ) { <nl> + std : : string result ; <nl> + std : : deque < Slice > operands_slice ( operands . begin ( ) , operands . end ( ) ) ; <nl> + <nl> + merge_operator_ - > PartialMergeMulti ( key , operands_slice , & result , nullptr ) ; <nl> + return result ; <nl> + } <nl> + <nl> + protected : <nl> + std : : shared_ptr < MergeOperator > merge_operator_ ; <nl> + } ; <nl> + <nl> + TEST_F ( UtilMergeOperatorTest , MaxMergeOperator ) { <nl> + merge_operator_ = MergeOperators : : CreateMaxOperator ( ) ; <nl> + <nl> + EXPECT_EQ ( " B " , FullMerge ( " B " , { " A " } ) ) ; <nl> + EXPECT_EQ ( " B " , FullMerge ( " A " , { " B " } ) ) ; <nl> + EXPECT_EQ ( " " , FullMerge ( { " " , " " , " " } ) ) ; <nl> + EXPECT_EQ ( " A " , FullMerge ( { " A " } ) ) ; <nl> + EXPECT_EQ ( " ABC " , FullMerge ( { " ABC " } ) ) ; <nl> + EXPECT_EQ ( " Z " , FullMerge ( { " ABC " , " Z " , " C " , " AXX " } ) ) ; <nl> + EXPECT_EQ ( " ZZZ " , FullMerge ( { " ABC " , " CC " , " Z " , " ZZZ " } ) ) ; <nl> + EXPECT_EQ ( " a " , FullMerge ( " a " , { " ABC " , " CC " , " Z " , " ZZZ " } ) ) ; <nl> + <nl> + EXPECT_EQ ( " z " , PartialMergeMulti ( { " a " , " z " , " efqfqwgwew " , " aaz " , " hhhhh " } ) ) ; <nl> + <nl> + EXPECT_EQ ( " b " , PartialMerge ( " a " , " b " ) ) ; <nl> + EXPECT_EQ ( " z " , PartialMerge ( " z " , " azzz " ) ) ; <nl> + EXPECT_EQ ( " a " , PartialMerge ( " a " , " " ) ) ; <nl> + } <nl> + <nl> + } / / namespace rocksdb <nl> + <nl> + int main ( int argc , char * * argv ) { <nl> + : : testing : : InitGoogleTest ( & argc , argv ) ; <nl> + return RUN_ALL_TESTS ( ) ; <nl> + } <nl> | Add MaxOperator to utilities / merge_operators / | facebook/rocksdb | 1f2dca0eaa21ec24103290a82d21af12050b4977 | 2016-05-19T22:51:29Z |
new file mode 100644 <nl> index 00000000000 . . 808aa5c5b82 <nl> mmm / dev / null <nl> ppp b / tests / other / malloc_implicit / test . cpp <nl> <nl> + # include < stdlib . h > <nl> + # include < stdio . h > <nl> + # include < assert . h > <nl> + int main ( ) { <nl> + const char * home = getenv ( " HOME " ) ; <nl> + for ( unsigned int i = 0 ; i < 5 ; + + i ) { <nl> + const char * curr = getenv ( " HOME " ) ; <nl> + assert ( curr = = home ) ; <nl> + } <nl> + printf ( " ok \ n " ) ; <nl> + } <nl> + <nl> new file mode 100644 <nl> index 00000000000 . . 4f25bafb19d <nl> mmm / dev / null <nl> ppp b / tests / other / malloc_implicit / test . out <nl> <nl> + ok <nl> + <nl> mmm a / tests / test_other . py <nl> ppp b / tests / test_other . py <nl> def __exit__ ( self , type , value , traceback ) : <nl> self . clean_emcc_files_in_temp_dir ( ) <nl> <nl> class other ( RunnerCore ) : <nl> + # Utility to run a simple test in this suite . This receives a directory which <nl> + # should contain a test . cpp and test . out files , compiles the cpp , and runs it <nl> + # to verify the output , with optional compile and run arguments . <nl> + # TODO : use in more places <nl> + def do_other_test ( self , dirname , emcc_args = [ ] , run_args = [ ] ) : <nl> + shutil . copyfile ( path_from_root ( ' tests ' , dirname , ' test . cpp ' ) , ' test . cpp ' ) <nl> + run_process ( [ PYTHON , EMCC , ' test . cpp ' ] + emcc_args ) <nl> + expected = open ( path_from_root ( ' tests ' , dirname , ' test . out ' ) ) . read ( ) <nl> + seen = run_js ( ' a . out . js ' , args = run_args ) + ' \ n ' <nl> + self . assertContained ( expected , seen ) <nl> + <nl> def test_emcc_v ( self ) : <nl> for compiler in [ EMCC , EMXX ] : <nl> # - v , without input files <nl> def test_dashM_respect_dashO ( self ) : <nl> assert len ( without_dash_o ) ! = 0 <nl> <nl> def test_malloc_implicit ( self ) : <nl> - open ( ' src . cpp ' , ' w ' ) . write ( r ' ' ' <nl> - # include < stdlib . h > <nl> - # include < stdio . h > <nl> - # include < assert . h > <nl> - int main ( ) { <nl> - const char * home = getenv ( " HOME " ) ; <nl> - for ( unsigned int i = 0 ; i < 5 ; + + i ) { <nl> - const char * curr = getenv ( " HOME " ) ; <nl> - assert ( curr = = home ) ; <nl> - } <nl> - printf ( " ok \ n " ) ; <nl> - } <nl> - ' ' ' ) <nl> - Popen ( [ PYTHON , EMCC , ' src . cpp ' ] ) . communicate ( ) <nl> - self . assertContained ( ' ok ' , run_js ( ' a . out . js ' ) ) <nl> + self . do_other_test ( os . path . join ( ' other ' , ' malloc_implicit ' ) ) <nl> <nl> def test_switch64phi ( self ) : <nl> # issue 2539 , fastcomp segfault on phi - i64 interaction <nl> | add do_other_test ( ) , a simple way to run a test in the ' other ' suite ( ) | emscripten-core/emscripten | 9ff0a9ae7c83f26a37786f573cfb2fac5be479a1 | 2018-04-23T18:15:47Z |
mmm a / Cocos2dSimpleGame / HelloWorldScene . cpp <nl> ppp b / Cocos2dSimpleGame / HelloWorldScene . cpp <nl> void HelloWorld : : spriteMoveFinished ( CCNode * sender ) <nl> void HelloWorld : : addTarget ( ) <nl> { <nl> <nl> - CCSprite * target = CCSprite : : spriteWithFile ( ResInfo [ 4 ] . ResName . c_str ( ) ) ; <nl> + CCSprite * target = CCSprite : : spriteWithFile ( ResInfo [ 4 ] . ResName ) ; <nl> <nl> / / Determine where to spawn the target along the Y axis <nl> CGSize winSize = CCDirector : : getSharedDirector ( ) - > getWinSize ( ) ; <nl> bool HelloWorld : : init ( ) <nl> / / cocos2d - uphone : add a menu item with " X " image , which is clicked to quit the program <nl> / / add a " close " icon , it ' s an autorelease object <nl> CCMenuItemImage * pCloseItem = CCMenuItemImage : : itemFromNormalImage ( <nl> - ResInfo [ 0 ] . ResName . c_str ( ) , <nl> - ResInfo [ 1 ] . ResName . c_str ( ) , <nl> + ResInfo [ 0 ] . ResName , <nl> + ResInfo [ 1 ] . ResName , <nl> this , <nl> menu_selector ( HelloWorld : : menuCloseCallback ) ) ; <nl> pCloseItem - > setPosition ( ccp ( CCDirector : : getSharedDirector ( ) - > getWinSize ( ) . width - 30 , 30 ) ) ; <nl> bool HelloWorld : : init ( ) <nl> / / and as close to the left side edge as we can get <nl> / / Remember that position is based on the anchor point , and by default the anchor <nl> / / point is the middle of the object . <nl> - CCSprite * player = CCSprite : : spriteWithFile ( ResInfo [ 2 ] . ResName . c_str ( ) ) ; <nl> + CCSprite * player = CCSprite : : spriteWithFile ( ResInfo [ 2 ] . ResName ) ; <nl> player - > setPosition ( ccp ( player - > getContentSize ( ) . width / 2 , winSize . height / 2 ) ) ; <nl> this - > addChild ( player ) ; <nl> <nl> void HelloWorld : : ccTouchesEnded ( NSSet * touches , UIEvent * event ) <nl> <nl> / / Set up initial location of projectile <nl> CGSize winSize = CCDirector : : getSharedDirector ( ) - > getWinSize ( ) ; <nl> - CCSprite * projectile = CCSprite : : spriteWithFile ( ResInfo [ 3 ] . ResName . c_str ( ) ) ; <nl> + CCSprite * projectile = CCSprite : : spriteWithFile ( ResInfo [ 3 ] . ResName ) ; <nl> projectile - > setPosition ( ccp ( 20 , winSize . height / 2 ) ) ; <nl> <nl> / / Determinie offset of location to projectile <nl> mmm a / cocos2dx / platform / uphone / CCXFileUtils_uphone . h <nl> ppp b / cocos2dx / platform / uphone / CCXFileUtils_uphone . h <nl> namespace cocos2d { <nl> <nl> typedef struct <nl> { <nl> - std : : string ResName ; <nl> + const char * ResName ; <nl> int nResID ; <nl> } T_ResourceInfo ; <nl> <nl> | issue | cocos2d/cocos2d-x | 1f8442cc05dccb0e144e5dfe6d86015a7202765a | 2010-10-21T02:24:46Z |
mmm a / src / builtins / base . tq <nl> ppp b / src / builtins / base . tq <nl> <nl> # include ' src / objects / js - regexp - string - iterator . h ' <nl> # include ' src / objects / module . h ' <nl> # include ' src / objects / stack - frame - info . h ' <nl> - # include ' src / objects / template - objects . h ' <nl> # include ' src / builtins / builtins - regexp - gen . h ' <nl> <nl> type void ; <nl> extern class JSSloppyArgumentsObject extends JSArgumentsObjectWithLength { <nl> callee : Object ; <nl> } <nl> <nl> - @ hasSameInstanceTypeAsParent <nl> - @ noVerifier <nl> - extern class JSStrictArgumentsObject extends JSArgumentsObjectWithLength { <nl> - } <nl> - <nl> extern class JSArrayIterator extends JSObject { <nl> iterated_object : JSReceiver ; <nl> next_index : Number ; <nl> transient type FastJSArrayWithNoCustomIteration extends FastJSArray ; <nl> <nl> type NoSharedNameSentinel extends Smi ; <nl> <nl> - type ModuleInfo extends FixedArray ; <nl> - type ObjectHashTable extends FixedArray ; <nl> - <nl> - extern class Module extends Struct { <nl> - code : SharedFunctionInfo | JSFunction | <nl> - JSGeneratorObject | ModuleInfo ; <nl> - exports : ObjectHashTable ; <nl> - regular_exports : FixedArray ; <nl> - regular_imports : FixedArray ; <nl> - hash : Smi ; <nl> - status : Smi ; <nl> - module_namespace : JSModuleNamespace | Undefined ; <nl> - requested_modules : FixedArray ; <nl> - dfs_index : Smi ; <nl> - dfs_ancestor_index : Smi ; <nl> - exception : Object ; <nl> - script : Script ; <nl> - @ noVerifier import_meta : Hole | JSObject ; <nl> - } <nl> - <nl> - extern class JSModuleNamespace extends JSObject { <nl> - module : Module ; <nl> - to_string_tag_field : Object ; <nl> - } <nl> - <nl> - @ hasSameInstanceTypeAsParent <nl> - @ noVerifier <nl> - extern class TemplateList extends FixedArray { <nl> - } <nl> - <nl> + type JSModuleNamespace extends JSObject ; <nl> type WeakArrayList extends HeapObject ; <nl> <nl> @ abstract <nl> mmm a / src / objects - debug . cc <nl> ppp b / src / objects - debug . cc <nl> void BigInt : : BigIntVerify ( Isolate * isolate ) { <nl> CHECK_IMPLIES ( is_zero ( ) , ! sign ( ) ) ; / / There is no - 0n . <nl> } <nl> <nl> - USE_TORQUE_VERIFIER ( JSModuleNamespace ) <nl> + void JSModuleNamespace : : JSModuleNamespaceVerify ( Isolate * isolate ) { <nl> + CHECK ( IsJSModuleNamespace ( ) ) ; <nl> + VerifyPointer ( isolate , module ( ) ) ; <nl> + } <nl> <nl> void ModuleInfoEntry : : ModuleInfoEntryVerify ( Isolate * isolate ) { <nl> TorqueGeneratedClassVerifiers : : ModuleInfoEntryVerify ( * this , isolate ) ; <nl> void ModuleInfoEntry : : ModuleInfoEntryVerify ( Isolate * isolate ) { <nl> } <nl> <nl> void Module : : ModuleVerify ( Isolate * isolate ) { <nl> - TorqueGeneratedClassVerifiers : : ModuleVerify ( * this , isolate ) ; <nl> + CHECK ( IsModule ( ) ) ; <nl> + <nl> + VerifyPointer ( isolate , code ( ) ) ; <nl> + VerifyPointer ( isolate , exports ( ) ) ; <nl> + VerifyPointer ( isolate , module_namespace ( ) ) ; <nl> + VerifyPointer ( isolate , requested_modules ( ) ) ; <nl> + VerifyPointer ( isolate , script ( ) ) ; <nl> + VerifyPointer ( isolate , import_meta ( ) ) ; <nl> + VerifyPointer ( isolate , exception ( ) ) ; <nl> + VerifySmiField ( kHashOffset ) ; <nl> + VerifySmiField ( kStatusOffset ) ; <nl> <nl> CHECK ( ( status ( ) > = kEvaluating & & code ( ) - > IsModuleInfo ( ) ) | | <nl> ( status ( ) = = kInstantiated & & code ( ) - > IsJSGeneratorObject ( ) ) | | <nl> void UncompiledDataWithoutPreparseData : : UncompiledDataWithoutPreparseDataVerify ( <nl> USE_TORQUE_VERIFIER ( InterpreterData ) <nl> <nl> # ifdef V8_INTL_SUPPORT <nl> - <nl> - USE_TORQUE_VERIFIER ( JSV8BreakIterator ) <nl> - <nl> - USE_TORQUE_VERIFIER ( JSCollator ) <nl> + void JSV8BreakIterator : : JSV8BreakIteratorVerify ( Isolate * isolate ) { <nl> + JSObjectVerify ( isolate ) ; <nl> + VerifyObjectField ( isolate , kLocaleOffset ) ; <nl> + VerifyObjectField ( isolate , kTypeOffset ) ; <nl> + VerifyObjectField ( isolate , kBreakIteratorOffset ) ; <nl> + VerifyObjectField ( isolate , kUnicodeStringOffset ) ; <nl> + VerifyObjectField ( isolate , kBoundAdoptTextOffset ) ; <nl> + VerifyObjectField ( isolate , kBoundFirstOffset ) ; <nl> + VerifyObjectField ( isolate , kBoundNextOffset ) ; <nl> + VerifyObjectField ( isolate , kBoundCurrentOffset ) ; <nl> + VerifyObjectField ( isolate , kBoundBreakTypeOffset ) ; <nl> + } <nl> + <nl> + void JSCollator : : JSCollatorVerify ( Isolate * isolate ) { <nl> + CHECK ( IsJSCollator ( ) ) ; <nl> + JSObjectVerify ( isolate ) ; <nl> + VerifyObjectField ( isolate , kICUCollatorOffset ) ; <nl> + VerifyObjectField ( isolate , kBoundCompareOffset ) ; <nl> + } <nl> <nl> void JSDateTimeFormat : : JSDateTimeFormatVerify ( Isolate * isolate ) { <nl> TorqueGeneratedClassVerifiers : : JSDateTimeFormatVerify ( * this , isolate ) ; <nl> mmm a / src / objects / intl - objects . tq <nl> ppp b / src / objects / intl - objects . tq <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - # include ' src / objects / js - break - iterator . h ' <nl> - # include ' src / objects / js - collator . h ' <nl> # include ' src / objects / js - number - format . h ' <nl> # include ' src / objects / js - objects . h ' <nl> # include ' src / objects / js - plural - rules . h ' <nl> extern class JSSegmentIterator extends JSObject { <nl> unicode_string : Foreign ; / / Managed < icu : : UnicodeString > <nl> flags : Smi ; <nl> } <nl> - <nl> - extern class JSV8BreakIterator extends JSObject { <nl> - locale : String ; <nl> - break_iterator : Foreign ; / / Managed < icu : : BreakIterator > ; <nl> - unicode_string : Foreign ; / / Managed < icu : : UnicodeString > ; <nl> - bound_adopt_text : Undefined | JSFunction ; <nl> - bound_first : Undefined | JSFunction ; <nl> - bound_next : Undefined | JSFunction ; <nl> - bound_current : Undefined | JSFunction ; <nl> - bound_break_type : Undefined | JSFunction ; <nl> - break_iterator_type : Smi ; <nl> - } <nl> - <nl> - extern class JSCollator extends JSObject { <nl> - icu_collator : Foreign ; / / Managed < icu : : Collator > <nl> - bound_compare : Undefined | JSFunction ; <nl> - } <nl> mmm a / src / objects / js - break - iterator - inl . h <nl> ppp b / src / objects / js - break - iterator - inl . h <nl> OBJECT_CONSTRUCTORS_IMPL ( JSV8BreakIterator , JSObject ) <nl> <nl> inline void JSV8BreakIterator : : set_type ( Type type ) { <nl> DCHECK_GT ( JSV8BreakIterator : : Type : : COUNT , type ) ; <nl> - WRITE_FIELD ( * this , kBreakIteratorTypeOffset , <nl> - Smi : : FromInt ( static_cast < int > ( type ) ) ) ; <nl> + WRITE_FIELD ( * this , kTypeOffset , Smi : : FromInt ( static_cast < int > ( type ) ) ) ; <nl> } <nl> <nl> inline JSV8BreakIterator : : Type JSV8BreakIterator : : type ( ) const { <nl> - Object value = READ_FIELD ( * this , kBreakIteratorTypeOffset ) ; <nl> + Object value = READ_FIELD ( * this , kTypeOffset ) ; <nl> return static_cast < JSV8BreakIterator : : Type > ( Smi : : ToInt ( value ) ) ; <nl> } <nl> <nl> mmm a / src / objects / js - break - iterator . h <nl> ppp b / src / objects / js - break - iterator . h <nl> <nl> # include " src / objects . h " <nl> # include " src / objects / intl - objects . h " <nl> # include " src / objects / managed . h " <nl> - # include " torque - generated / field - offsets - tq . h " <nl> <nl> / / Has to be the last include ( doesn ' t have include guards ) : <nl> # include " src / objects / object - macros . h " <nl> class JSV8BreakIterator : public JSObject { <nl> DECL_ACCESSORS ( bound_break_type , Object ) <nl> <nl> / / Layout description . <nl> - DEFINE_FIELD_OFFSET_CONSTANTS ( JSObject : : kHeaderSize , <nl> - TORQUE_GENERATED_JSV8BREAK_ITERATOR_FIELDS ) <nl> + # define BREAK_ITERATOR_FIELDS ( V ) \ <nl> + / * Pointer fields . * / \ <nl> + V ( kLocaleOffset , kTaggedSize ) \ <nl> + V ( kTypeOffset , kTaggedSize ) \ <nl> + V ( kBreakIteratorOffset , kTaggedSize ) \ <nl> + V ( kUnicodeStringOffset , kTaggedSize ) \ <nl> + V ( kBoundAdoptTextOffset , kTaggedSize ) \ <nl> + V ( kBoundFirstOffset , kTaggedSize ) \ <nl> + V ( kBoundNextOffset , kTaggedSize ) \ <nl> + V ( kBoundCurrentOffset , kTaggedSize ) \ <nl> + V ( kBoundBreakTypeOffset , kTaggedSize ) \ <nl> + / * Total Size * / \ <nl> + V ( kSize , 0 ) <nl> + <nl> + DEFINE_FIELD_OFFSET_CONSTANTS ( JSObject : : kHeaderSize , BREAK_ITERATOR_FIELDS ) <nl> + # undef BREAK_ITERATOR_FIELDS <nl> <nl> OBJECT_CONSTRUCTORS ( JSV8BreakIterator , JSObject ) ; <nl> } ; <nl> mmm a / src / objects / js - collator - inl . h <nl> ppp b / src / objects / js - collator - inl . h <nl> namespace internal { <nl> <nl> OBJECT_CONSTRUCTORS_IMPL ( JSCollator , JSObject ) <nl> <nl> - ACCESSORS ( JSCollator , icu_collator , Managed < icu : : Collator > , kIcuCollatorOffset ) <nl> + ACCESSORS ( JSCollator , icu_collator , Managed < icu : : Collator > , kICUCollatorOffset ) <nl> ACCESSORS ( JSCollator , bound_compare , Object , kBoundCompareOffset ) <nl> <nl> CAST_ACCESSOR ( JSCollator ) <nl> mmm a / src / objects / js - collator . h <nl> ppp b / src / objects / js - collator . h <nl> class JSCollator : public JSObject { <nl> DECL_VERIFIER ( JSCollator ) <nl> <nl> / / Layout description . <nl> - DEFINE_FIELD_OFFSET_CONSTANTS ( JSObject : : kHeaderSize , <nl> - TORQUE_GENERATED_JSCOLLATOR_FIELDS ) <nl> + # define JS_COLLATOR_FIELDS ( V ) \ <nl> + V ( kICUCollatorOffset , kTaggedSize ) \ <nl> + V ( kBoundCompareOffset , kTaggedSize ) \ <nl> + / * Total size . * / \ <nl> + V ( kSize , 0 ) <nl> + <nl> + DEFINE_FIELD_OFFSET_CONSTANTS ( JSObject : : kHeaderSize , JS_COLLATOR_FIELDS ) <nl> + # undef JS_COLLATOR_FIELDS <nl> <nl> DECL_ACCESSORS ( icu_collator , Managed < icu : : Collator > ) <nl> DECL_ACCESSORS ( bound_compare , Object ) <nl> mmm a / src / objects / module . h <nl> ppp b / src / objects / module . h <nl> <nl> # include " src / objects / fixed - array . h " <nl> # include " src / objects / js - objects . h " <nl> # include " src / objects / struct . h " <nl> - # include " torque - generated / field - offsets - tq . h " <nl> <nl> / / Has to be the last include ( doesn ' t have include guards ) : <nl> # include " src / objects / object - macros . h " <nl> class Module : public Struct { <nl> Handle < Module > module ) ; <nl> <nl> / / Layout description . <nl> - DEFINE_FIELD_OFFSET_CONSTANTS ( Struct : : kHeaderSize , <nl> - TORQUE_GENERATED_MODULE_FIELDS ) <nl> + # define MODULE_FIELDS ( V ) \ <nl> + V ( kCodeOffset , kTaggedSize ) \ <nl> + V ( kExportsOffset , kTaggedSize ) \ <nl> + V ( kRegularExportsOffset , kTaggedSize ) \ <nl> + V ( kRegularImportsOffset , kTaggedSize ) \ <nl> + V ( kHashOffset , kTaggedSize ) \ <nl> + V ( kModuleNamespaceOffset , kTaggedSize ) \ <nl> + V ( kRequestedModulesOffset , kTaggedSize ) \ <nl> + V ( kStatusOffset , kTaggedSize ) \ <nl> + V ( kDfsIndexOffset , kTaggedSize ) \ <nl> + V ( kDfsAncestorIndexOffset , kTaggedSize ) \ <nl> + V ( kExceptionOffset , kTaggedSize ) \ <nl> + V ( kScriptOffset , kTaggedSize ) \ <nl> + V ( kImportMetaOffset , kTaggedSize ) \ <nl> + / * Total size . * / \ <nl> + V ( kSize , 0 ) <nl> + <nl> + DEFINE_FIELD_OFFSET_CONSTANTS ( Struct : : kHeaderSize , MODULE_FIELDS ) <nl> + # undef MODULE_FIELDS <nl> <nl> private : <nl> friend class Factory ; <nl> class JSModuleNamespace : public JSObject { <nl> LookupIterator * it ) ; <nl> <nl> / / In - object fields . <nl> - static constexpr int kToStringTagFieldIndex = 0 ; <nl> - static constexpr int kInObjectFieldCount = 1 ; <nl> + enum { <nl> + kToStringTagFieldIndex , <nl> + kInObjectFieldCount , <nl> + } ; <nl> <nl> - / / Layout description . <nl> - DEFINE_FIELD_OFFSET_CONSTANTS ( JSObject : : kHeaderSize , <nl> - TORQUE_GENERATED_JSMODULE_NAMESPACE_FIELDS ) <nl> + / / Layout description . <nl> + # define JS_MODULE_NAMESPACE_FIELDS ( V ) \ <nl> + V ( kModuleOffset , kTaggedSize ) \ <nl> + / * Header size . * / \ <nl> + V ( kHeaderSize , 0 ) \ <nl> + V ( kInObjectFieldsOffset , kTaggedSize * kInObjectFieldCount ) \ <nl> + / * Total size . * / \ <nl> + V ( kSize , 0 ) <nl> <nl> - static const int kHeaderSize = kToStringTagFieldOffset ; <nl> + DEFINE_FIELD_OFFSET_CONSTANTS ( JSObject : : kHeaderSize , <nl> + JS_MODULE_NAMESPACE_FIELDS ) <nl> + # undef JS_MODULE_NAMESPACE_FIELDS <nl> <nl> OBJECT_CONSTRUCTORS ( JSModuleNamespace , JSObject ) ; <nl> } ; <nl> class ModuleInfo : public FixedArray { <nl> kRegularExportExportNamesOffset , <nl> kRegularExportLength <nl> } ; <nl> - <nl> OBJECT_CONSTRUCTORS ( ModuleInfo , FixedArray ) ; <nl> } ; <nl> <nl> mmm a / tools / torque / format - torque . py <nl> ppp b / tools / torque / format - torque . py <nl> def postprocess ( output ) : <nl> <nl> while True : <nl> old = output <nl> - output = re . sub ( r ' ( \ w + ) \ s { 0 , 1 } \ | ( \ n { 0 , 1 } \ s * ) / \ * \ * / ( \ s * \ w + ) ' , <nl> - r ' \ 1 | \ 2 \ 3 ' , output ) <nl> + output = re . sub ( r ' ( \ w + ) \ s { 0 , 1 } \ | \ s { 0 , 1 } / \ * \ * / ( \ s * \ w + ) ' , <nl> + r ' \ 1 | \ 2 ' , output ) <nl> if old = = output : <nl> break ; <nl> <nl> | Revert " [ torque ] Convert few class layout to torque and updated torque code formatter . " | v8/v8 | 2c355e54d8615c2a0991c3031878e5d4c4f37c20 | 2019-05-18T21:29:56Z |
mmm a / tensorflow / tensorboard / components / vz - projector / scatterPlotWebGL . ts <nl> ppp b / tensorflow / tensorboard / components / vz - projector / scatterPlotWebGL . ts <nl> const TAR_2D = { <nl> * / <nl> export class ScatterPlotWebGL implements ScatterPlot { <nl> private dataSet : DataSet ; <nl> + private spriteImage : HTMLImageElement ; <nl> private containerNode : HTMLElement ; <nl> - <nl> private visualizers : ScatterPlotWebGLVisualizer [ ] = [ ] ; <nl> <nl> private highlightedPoints : number [ ] = [ ] ; <nl> export class ScatterPlotWebGL implements ScatterPlot { <nl> this . renderer . render ( this . scene , this . perspCamera ) ; <nl> this . addInteractionListeners ( ) ; <nl> <nl> - this . addVisualizer ( <nl> - new ScatterPlotWebGLVisualizerAxes ( this . xScale , this . yScale ) ) ; <nl> + this . addAxesToScene ( ) ; <nl> } <nl> <nl> private addInteractionListeners ( ) { <nl> export class ScatterPlotWebGL implements ScatterPlot { <nl> } <nl> } <nl> <nl> - / * * Removes all geometry from the scene . * / <nl> private removeAll ( ) { <nl> this . visualizers . forEach ( v = > { <nl> v . removeAllFromScene ( this . scene ) ; <nl> export class ScatterPlotWebGL implements ScatterPlot { <nl> } ) ; <nl> } <nl> <nl> + private addAxesToScene ( ) { <nl> + this . addVisualizer ( <nl> + new ScatterPlotWebGLVisualizerAxes ( this . xScale , this . yScale ) ) ; <nl> + } <nl> + <nl> + private sceneIs3D ( ) : boolean { <nl> + return this . zAccessor ! = null ; <nl> + } <nl> + <nl> / / PUBLIC API <nl> <nl> / * * Adds a visualizer to the set , will start dispatching events to it * / <nl> addVisualizer ( visualizer : ScatterPlotWebGLVisualizer ) { <nl> this . visualizers . push ( visualizer ) ; <nl> + if ( this . dataSet ) { <nl> + visualizer . onDataSet ( this . dataSet , this . spriteImage ) ; <nl> + } <nl> + if ( this . labelAccessor ) { <nl> + visualizer . onSetLabelAccessor ( this . labelAccessor ) ; <nl> + } <nl> + if ( this . scene ) { <nl> + visualizer . onRecreateScene ( <nl> + this . scene , this . sceneIs3D ( ) , this . backgroundColor ) ; <nl> + } <nl> + } <nl> + <nl> + / * * Removes all visualizers attached to this scatter plot . * / <nl> + removeAllVisualizers ( ) { <nl> + this . removeAll ( ) ; <nl> + this . visualizers = [ ] ; <nl> + this . addAxesToScene ( ) ; <nl> } <nl> <nl> recreateScene ( ) { <nl> this . removeAll ( ) ; <nl> this . cancelAnimation ( ) ; <nl> - let sceneIs3D = this . zAccessor ! = null ; <nl> - if ( sceneIs3D ) { <nl> + if ( this . sceneIs3D ( ) ) { <nl> this . makeCamera3D ( ) ; <nl> } else { <nl> this . makeCamera2D ( ) ; <nl> } <nl> this . visualizers . forEach ( v = > { <nl> - v . onRecreateScene ( this . scene , sceneIs3D , this . backgroundColor ) ; <nl> + v . onRecreateScene ( this . scene , this . sceneIs3D ( ) , this . backgroundColor ) ; <nl> } ) ; <nl> this . resize ( false ) ; <nl> this . render ( ) ; <nl> export class ScatterPlotWebGL implements ScatterPlot { <nl> setDataSet ( dataSet : DataSet , spriteImage : HTMLImageElement ) { <nl> this . removeAll ( ) ; <nl> this . dataSet = dataSet ; <nl> + this . spriteImage = spriteImage ; <nl> + this . nearestPoint = null ; <nl> this . labeledPoints = [ ] ; <nl> this . highlightedPoints = [ ] ; <nl> this . visualizers . forEach ( v = > { <nl> mmm a / tensorflow / tensorboard / components / vz - projector / scatterPlotWebGLVisualizer3DLabels . ts <nl> ppp b / tensorflow / tensorboard / components / vz - projector / scatterPlotWebGLVisualizer3DLabels . ts <nl> const RGB_ELEMENTS_PER_ENTRY = 3 ; <nl> const XYZ_ELEMENTS_PER_ENTRY = 3 ; <nl> const UV_ELEMENTS_PER_ENTRY = 2 ; <nl> const VERTICES_PER_GLYPH = 2 * 3 ; / / 2 triangles , 3 verts per triangle <nl> + const POINT_COLOR = 0xFFFFFF ; <nl> <nl> / * * <nl> * Each label is made up of triangles ( two per letter . ) Each vertex , then , is <nl> const FRAGMENT_SHADER = ` <nl> gl_FragColor = vec4 ( vColor , 1 . 0 ) ; <nl> } else { <nl> vec4 fromTexture = texture2D ( texture , vUv ) ; <nl> - vec4 color = vec4 ( vColor , 1 . 0 ) ; <nl> - gl_FragColor = color + fromTexture ; <nl> + gl_FragColor = vec4 ( vColor , 1 . 0 ) * fromTexture ; <nl> } <nl> } ` ; <nl> <nl> export class ScatterPlotWebGLVisualizer3DLabels implements <nl> private scene : THREE . Scene ; <nl> private labelAccessor : ( index : number ) = > string ; <nl> private geometry : THREE . BufferGeometry ; <nl> + private pickingColors : Float32Array ; <nl> + private renderColors : Float32Array ; <nl> private material : THREE . ShaderMaterial ; <nl> private uniforms : Object ; <nl> private labelsMesh : THREE . Mesh ; <nl> private positions : THREE . BufferAttribute ; <nl> + private defaultPointColor = POINT_COLOR ; <nl> private totalVertexCount : number ; <nl> private labelVertexMap : number [ ] [ ] ; <nl> private glyphTexture : GlyphTexture ; <nl> export class ScatterPlotWebGLVisualizer3DLabels implements <nl> let numTotalLetters = 0 ; <nl> this . labelVertexMap = [ ] ; <nl> for ( let i = 0 ; i < this . dataSet . points . length ; i + + ) { <nl> - let label = this . labelAccessor ( i ) ; <nl> + let label : string = this . labelAccessor ( i ) . toString ( ) ; <nl> let vertsArray : number [ ] = [ ] ; <nl> for ( let j = 0 ; j < label . length ; j + + ) { <nl> for ( let k = 0 ; k < VERTICES_PER_GLYPH ; k + + ) { <nl> export class ScatterPlotWebGLVisualizer3DLabels implements <nl> this . totalVertexCount = numTotalLetters * VERTICES_PER_GLYPH ; <nl> } <nl> <nl> + private createColorBuffers ( ) { <nl> + let numPoints = this . dataSet . points . length ; <nl> + this . pickingColors = <nl> + new Float32Array ( this . totalVertexCount * RGB_ELEMENTS_PER_ENTRY ) ; <nl> + this . renderColors = <nl> + new Float32Array ( this . totalVertexCount * RGB_ELEMENTS_PER_ENTRY ) ; <nl> + for ( let i = 0 ; i < numPoints ; i + + ) { <nl> + let color = new THREE . Color ( i ) ; <nl> + this . labelVertexMap [ i ] . forEach ( ( j ) = > { <nl> + this . pickingColors [ RGB_ELEMENTS_PER_ENTRY * j ] = color . r ; <nl> + this . pickingColors [ RGB_ELEMENTS_PER_ENTRY * j + 1 ] = color . g ; <nl> + this . pickingColors [ RGB_ELEMENTS_PER_ENTRY * j + 2 ] = color . b ; <nl> + this . renderColors [ RGB_ELEMENTS_PER_ENTRY * j ] = 1 . 0 ; <nl> + this . renderColors [ RGB_ELEMENTS_PER_ENTRY * j + 1 ] = 1 . 0 ; <nl> + this . renderColors [ RGB_ELEMENTS_PER_ENTRY * j + 2 ] = 1 . 0 ; <nl> + } ) ; <nl> + } <nl> + } <nl> + <nl> private createLabelGeometry ( ) { <nl> + this . processLabelVerts ( ) ; <nl> + this . createColorBuffers ( ) ; <nl> + <nl> + let positionArray = <nl> + new Float32Array ( this . totalVertexCount * XYZ_ELEMENTS_PER_ENTRY ) ; <nl> + this . positions = <nl> + new THREE . BufferAttribute ( positionArray , XYZ_ELEMENTS_PER_ENTRY ) ; <nl> + <nl> let posArray = <nl> new Float32Array ( this . totalVertexCount * XYZ_ELEMENTS_PER_ENTRY ) ; <nl> let uvArray = <nl> export class ScatterPlotWebGLVisualizer3DLabels implements <nl> <nl> let lettersSoFar = 0 ; <nl> for ( let i = 0 ; i < this . dataSet . points . length ; i + + ) { <nl> - let label = this . labelAccessor ( i ) ; <nl> + let label : string = this . labelAccessor ( i ) . toString ( ) ; <nl> let leftOffset = 0 ; <nl> / / Determine length of word in pixels . <nl> for ( let j = 0 ; j < label . length ; j + + ) { <nl> export class ScatterPlotWebGLVisualizer3DLabels implements <nl> } <nl> } <nl> <nl> + for ( let i = 0 ; i < this . dataSet . points . length ; i + + ) { <nl> + let pp = this . dataSet . points [ i ] . projectedPoint ; <nl> + this . labelVertexMap [ i ] . forEach ( ( j ) = > { <nl> + this . positions . setXYZ ( j , pp [ 0 ] , pp [ 1 ] , pp [ 2 ] ) ; <nl> + } ) ; <nl> + } ; <nl> + <nl> this . labelsMesh = new THREE . Mesh ( this . geometry , this . material ) ; <nl> } <nl> <nl> export class ScatterPlotWebGLVisualizer3DLabels implements <nl> } <nl> } <nl> <nl> + private colorSprites ( colorAccessor : ( index : number ) = > string ) { <nl> + if ( this . geometry = = null | | this . dataSet = = null ) { <nl> + return ; <nl> + } <nl> + let colors = this . geometry . getAttribute ( ' color ' ) as THREE . BufferAttribute ; <nl> + colors . array = this . renderColors ; <nl> + let getColor : ( index : number ) = > string = <nl> + colorAccessor ? colorAccessor : ( ) = > ( this . defaultPointColor as any ) ; <nl> + for ( let i = 0 ; i < this . dataSet . points . length ; i + + ) { <nl> + let color = new THREE . Color ( getColor ( i ) ) ; <nl> + this . labelVertexMap [ i ] . forEach ( ( j ) = > { <nl> + colors . setXYZ ( j , color . r , color . g , color . b ) ; <nl> + } ) ; <nl> + } <nl> + colors . needsUpdate = true ; <nl> + } <nl> + <nl> + private highlightSprites ( <nl> + highlightedPoints : number [ ] , highlightStroke : ( index : number ) = > string ) { <nl> + if ( this . geometry = = null | | this . dataSet = = null ) { <nl> + return ; <nl> + } <nl> + if ( highlightedPoints & & highlightStroke ) { <nl> + let colors = this . geometry . getAttribute ( ' color ' ) as THREE . BufferAttribute ; <nl> + for ( let i = 0 ; i < highlightedPoints . length ; i + + ) { <nl> + let assocPoint = highlightedPoints [ i ] ; <nl> + let color = new THREE . Color ( highlightStroke ( i ) ) ; <nl> + this . labelVertexMap [ assocPoint ] . forEach ( ( j ) = > { <nl> + colors . setXYZ ( j , color . r , color . g , color . b ) ; <nl> + } ) ; <nl> + } <nl> + colors . needsUpdate = true ; <nl> + } <nl> + } <nl> + <nl> onRecreateScene ( <nl> scene : THREE . Scene , sceneIs3D : boolean , backgroundColor : number ) { <nl> this . scene = scene ; <nl> export class ScatterPlotWebGLVisualizer3DLabels implements <nl> this . dataSet = dataSet ; <nl> } <nl> <nl> - onPickingRender ( camera : THREE . Camera , cameraTarget : THREE . Vector3 ) { } <nl> + onPickingRender ( camera : THREE . Camera , cameraTarget : THREE . Vector3 ) { <nl> + this . material . uniforms . texture . value = this . glyphTexture . texture ; <nl> + this . material . uniforms . picking . value = true ; <nl> + this . material . uniforms . camPos . value = camera . position ; <nl> + <nl> + let colors = this . geometry . getAttribute ( ' color ' ) as THREE . BufferAttribute ; <nl> + colors . array = this . pickingColors ; <nl> + colors . needsUpdate = true ; <nl> + } <nl> + <nl> + onRender ( rc : RenderContext ) { <nl> + this . colorSprites ( rc . colorAccessor ) ; <nl> + this . highlightSprites ( rc . highlightedPoints , rc . highlightStroke ) ; <nl> <nl> - onRender ( renderContext : RenderContext ) { <nl> this . material . uniforms . texture . value = this . glyphTexture . texture ; <nl> this . material . uniforms . picking . value = false ; <nl> - this . material . uniforms . camPos . value = renderContext . camera . position ; <nl> + this . material . uniforms . camPos . value = rc . camera . position ; <nl> + <nl> + let colors = this . geometry . getAttribute ( ' color ' ) as THREE . BufferAttribute ; <nl> + colors . array = this . renderColors ; <nl> + colors . needsUpdate = true ; <nl> } <nl> <nl> onUpdate ( ) { <nl> - this . processLabelVerts ( ) ; <nl> - let positionArray = <nl> - new Float32Array ( this . totalVertexCount * XYZ_ELEMENTS_PER_ENTRY ) ; <nl> - this . positions = <nl> - new THREE . BufferAttribute ( positionArray , XYZ_ELEMENTS_PER_ENTRY ) ; <nl> - <nl> this . createLabels ( ) ; <nl> if ( this . labelsMesh & & this . scene ) { <nl> this . scene . add ( this . labelsMesh ) ; <nl> } <nl> - for ( let i = 0 ; i < this . dataSet . points . length ; i + + ) { <nl> - let pp = this . dataSet . points [ i ] . projectedPoint ; <nl> - this . labelVertexMap [ i ] . forEach ( ( j ) = > { <nl> - this . positions . setXYZ ( j , pp [ 0 ] , pp [ 1 ] , pp [ 2 ] ) ; <nl> - } ) ; <nl> - } ; <nl> } <nl> <nl> - onResize ( newWidth : number , newHeight : number ) { } <nl> onSelectionChanged ( selection : number [ ] ) { } <nl> + <nl> + onResize ( newWidth : number , newHeight : number ) { } <nl> } <nl> mmm a / tensorflow / tensorboard / components / vz - projector / scatterPlotWebGLVisualizerSprites . ts <nl> ppp b / tensorflow / tensorboard / components / vz - projector / scatterPlotWebGLVisualizerSprites . ts <nl> const FRAGMENT_SHADER = ` <nl> } ` ; <nl> <nl> / * * <nl> - * This uses GL point sprites to render <nl> - * the scatter plot dataset , and a 2D HTML canvas to render labels . <nl> + * Uses GL point sprites to render the dataset . <nl> * / <nl> export class ScatterPlotWebGLVisualizerSprites implements <nl> ScatterPlotWebGLVisualizer { <nl> export class ScatterPlotWebGLVisualizerSprites implements <nl> this . sceneIs3D = sceneIs3D ; <nl> this . fog = new THREE . Fog ( backgroundColor ) ; <nl> scene . fog = this . fog ; <nl> - this . addSprites ( scene ) ; <nl> - this . colorSprites ( null ) ; <nl> - this . highlightSprites ( null , null ) ; <nl> + if ( this . dataSet ) { <nl> + this . addSprites ( scene ) ; <nl> + this . colorSprites ( null ) ; <nl> + this . highlightSprites ( null , null ) ; <nl> + } <nl> } <nl> <nl> onUpdate ( ) { <nl> mmm a / tensorflow / tensorboard / components / vz - projector / scatterPlotWebGLVisualizerTraces . ts <nl> ppp b / tensorflow / tensorboard / components / vz - projector / scatterPlotWebGLVisualizerTraces . ts <nl> export class ScatterPlotWebGLVisualizerTraces implements <nl> <nl> onDataSet ( dataSet : DataSet , spriteImage : HTMLImageElement ) { <nl> this . dataSet = dataSet ; <nl> - / / Set up the position buffer arrays for each trace . <nl> - for ( let i = 0 ; i < this . dataSet . traces . length ; i + + ) { <nl> - let dataTrace = this . dataSet . traces [ i ] ; <nl> - let traces = new Float32Array ( <nl> - 2 * ( dataTrace . pointIndices . length - 1 ) * XYZ_NUM_BYTES ) ; <nl> - this . tracePositionBuffer [ i ] = <nl> - new THREE . BufferAttribute ( traces , XYZ_NUM_BYTES ) ; <nl> + if ( dataSet ) { <nl> + / / Set up the position buffer arrays for each trace . <nl> + for ( let i = 0 ; i < this . dataSet . traces . length ; i + + ) { <nl> + let dataTrace = this . dataSet . traces [ i ] ; <nl> + let traces = new Float32Array ( <nl> + 2 * ( dataTrace . pointIndices . length - 1 ) * XYZ_NUM_BYTES ) ; <nl> + this . tracePositionBuffer [ i ] = <nl> + new THREE . BufferAttribute ( traces , XYZ_NUM_BYTES ) ; <nl> + } <nl> } <nl> } <nl> <nl> export class ScatterPlotWebGLVisualizerTraces implements <nl> } <nl> <nl> onUpdate ( ) { <nl> + if ( ! this . dataSet ) { <nl> + return ; <nl> + } <nl> for ( let i = 0 ; i < this . dataSet . traces . length ; i + + ) { <nl> let dataTrace = this . dataSet . traces [ i ] ; <nl> <nl> mmm a / tensorflow / tensorboard / components / vz - projector / util . ts <nl> ppp b / tensorflow / tensorboard / components / vz - projector / util . ts <nl> export function shuffle < T > ( array : T [ ] ) : T [ ] { <nl> / * * Retrieves a projected point from the data set as a THREE . js vector * / <nl> export function getProjectedPointFromIndex ( <nl> dataSet : DataSet , i : number ) : THREE . Vector3 { <nl> - return new THREE . Vector3 ( <nl> - dataSet . points [ i ] . projectedPoint [ 0 ] , dataSet . points [ i ] . projectedPoint [ 1 ] , <nl> - dataSet . points [ i ] . projectedPoint [ 2 ] ) ; <nl> + let pp = dataSet . points [ i ] . projectedPoint ; <nl> + let v = new THREE . Vector3 ( pp [ 0 ] , pp [ 1 ] , pp [ 2 ] ) ; <nl> + return v ; <nl> } <nl> <nl> / * * Projects a 3d point into screen space * / <nl> mmm a / tensorflow / tensorboard / components / vz - projector / vz - projector . html <nl> ppp b / tensorflow / tensorboard / components / vz - projector / vz - projector . html <nl> < h3 > Vertical < / h3 > <nl> < i class = " material - icons " > brightness_2 < / i > <nl> Night Mode <nl> < / button > <nl> + < button class = " menu - button labels3DMode " title = " 3D Labels " > <nl> + < i class = " material - icons " > flip_to_front < / i > <nl> + 3D Labels <nl> + < / button > <nl> < div class = " ink - fabs " > <nl> < div class = " ink - fab reset - zoom " title = " Zoom to fit all " > <nl> < i class = " material - icons resetZoom " > home < / i > <nl> < h3 > Vertical < / h3 > <nl> < / div > <nl> < / template > <nl> < / dom - module > <nl> + <nl> mmm a / tensorflow / tensorboard / components / vz - projector / vz - projector . ts <nl> ppp b / tensorflow / tensorboard / components / vz - projector / vz - projector . ts <nl> import { DataProvider , getDataProvider } from ' . / data - loader ' ; <nl> import * as knn from ' . / knn ' ; <nl> import { Mode , ScatterPlot } from ' . / scatterPlot ' ; <nl> import { ScatterPlotWebGL } from ' . / scatterPlotWebGL ' ; <nl> + import { ScatterPlotWebGLVisualizer3DLabels } from ' . / scatterPlotWebGLVisualizer3DLabels ' ; <nl> import { ScatterPlotWebGLVisualizerCanvasLabels } from ' . / scatterPlotWebGLVisualizerCanvasLabels ' ; <nl> import { ScatterPlotWebGLVisualizerSprites } from ' . / scatterPlotWebGLVisualizerSprites ' ; <nl> import { ScatterPlotWebGLVisualizerTraces } from ' . / scatterPlotWebGLVisualizerTraces ' ; <nl> import { ColorOption , DataPanel } from ' . / vz - projector - data - panel ' ; <nl> / / tslint : disable - next - line : no - unused - variable <nl> import { PolymerElement , PolymerHTMLElement } from ' . / vz - projector - util ' ; <nl> <nl> + <nl> / * * T - SNE perplexity . Roughly how many neighbors each point influences . * / <nl> let perplexity : number = 30 ; <nl> / * * T - SNE learning rate . * / <nl> export class Projector extends ProjectorPolymer { <nl> / / The working subset of the data source ' s original data set . <nl> private currentDataSet : DataSet ; <nl> private scatterPlot : ScatterPlot ; <nl> + private labels3D : boolean = false ; <nl> private dim : number ; <nl> private selectedDistance : ( a : number [ ] , b : number [ ] ) = > number ; <nl> private highlightedPoints : { index : number , color : string } [ ] ; <nl> export class Projector extends ProjectorPolymer { <nl> } ) ; <nl> <nl> let selectModeButton = this . dom . select ( ' . selectMode ' ) ; <nl> - <nl> selectModeButton . on ( ' click ' , ( ) = > { <nl> let mode = this . scatterPlot . getMode ( ) ; <nl> this . scatterPlot . setMode ( mode = = = Mode . SELECT ? Mode . HOVER : Mode . SELECT ) ; <nl> export class Projector extends ProjectorPolymer { <nl> this . scatterPlot . setDayNightMode ( modeIsNight ) ; <nl> } ) ; <nl> <nl> + let labels3DModeButton = this . dom . select ( ' . labels3DMode ' ) ; <nl> + labels3DModeButton . on ( ' click ' , ( ) = > { <nl> + this . labels3D = ! this . labels3D ; <nl> + this . createVisualizers ( ) ; <nl> + this . scatterPlot . recreateScene ( ) ; <nl> + this . scatterPlot . update ( ) ; <nl> + this . updateMenuButtons ( ) ; <nl> + } ) ; <nl> + <nl> / / Resize <nl> window . addEventListener ( ' resize ' , ( ) = > { <nl> this . scatterPlot . resize ( ) ; <nl> export class Projector extends ProjectorPolymer { <nl> <nl> / / Canvas <nl> { <nl> - let container = this . dom . select ( ' # scatter ' ) ; <nl> - let scatterPlotWebGL = new ScatterPlotWebGL ( <nl> - container , i = > ' ' + this . points [ i ] . metadata [ ' label ' ] ) ; <nl> - <nl> - scatterPlotWebGL . addVisualizer ( <nl> - new ScatterPlotWebGLVisualizerSprites ( scatterPlotWebGL ) ) ; <nl> - <nl> - scatterPlotWebGL . addVisualizer ( <nl> - new ScatterPlotWebGLVisualizerTraces ( scatterPlotWebGL ) ) ; <nl> - <nl> - scatterPlotWebGL . addVisualizer ( <nl> - new ScatterPlotWebGLVisualizerCanvasLabels ( container ) ) ; <nl> - <nl> - this . scatterPlot = scatterPlotWebGL ; <nl> + this . scatterPlot = new ScatterPlotWebGL ( <nl> + this . getScatterContainer ( ) , <nl> + i = > ' ' + this . points [ i ] . metadata [ ' label ' ] ) ; <nl> + this . createVisualizers ( ) ; <nl> } <nl> <nl> this . scatterPlot . onHover ( hoveredIndex = > { <nl> export class Projector extends ProjectorPolymer { <nl> } ) ; <nl> } <nl> <nl> + private getScatterContainer ( ) : d3 . Selection < any > { <nl> + return this . dom . select ( ' # scatter ' ) ; <nl> + } <nl> + <nl> + private createVisualizers ( ) { <nl> + let scatterPlotWebGL = this . scatterPlot as ScatterPlotWebGL ; <nl> + scatterPlotWebGL . removeAllVisualizers ( ) ; <nl> + <nl> + if ( this . labels3D ) { <nl> + scatterPlotWebGL . addVisualizer ( <nl> + new ScatterPlotWebGLVisualizer3DLabels ( scatterPlotWebGL ) ) ; <nl> + } else { <nl> + scatterPlotWebGL . addVisualizer ( <nl> + new ScatterPlotWebGLVisualizerSprites ( scatterPlotWebGL ) ) ; <nl> + <nl> + scatterPlotWebGL . addVisualizer ( <nl> + new ScatterPlotWebGLVisualizerTraces ( scatterPlotWebGL ) ) ; <nl> + <nl> + scatterPlotWebGL . addVisualizer ( new ScatterPlotWebGLVisualizerCanvasLabels ( <nl> + this . getScatterContainer ( ) ) ) ; <nl> + } <nl> + } <nl> + <nl> private updateSelection ( points : number [ ] ) { <nl> / / If no points are selected , unselect everything . <nl> if ( ! points . length ) { <nl> export class Projector extends ProjectorPolymer { <nl> ( searchBox . select ( ' input ' ) . node ( ) as HTMLInputElement ) . focus ( ) ; <nl> this . dom . select ( ' . selectMode ' ) <nl> . classed ( ' selected ' , this . scatterPlot . getMode ( ) = = = Mode . SELECT ) ; <nl> + this . dom . select ( ' . labels3DMode ' ) . classed ( ' selected ' , this . labels3D ) ; <nl> } <nl> <nl> / * * <nl> | Add support for toggling 3D labels on and off . 3D label selection + kNN highlighting now works more or less like sprites do . | tensorflow/tensorflow | 803c605e6939115db2079bc5c5b210f9b3bce4e7 | 2016-09-26T18:31:26Z |
mmm a / tensorflow / python / tools / graph_metrics . py <nl> ppp b / tensorflow / python / tools / graph_metrics . py <nl> def calculate_graph_metrics ( graph_def , statistic_types , input_layer , <nl> input_shape = None <nl> if input_shape_override : <nl> input_shape = input_shape_override <nl> + if input_shape is None : <nl> + raise ValueError ( " " " No input shape was provided on the command line , " " " <nl> + " " " and the input op itself had no default shape , so " " " <nl> + " " " shape inference couldn ' t be performed . This is " " " <nl> + " " " required for metrics calculations . " " " ) <nl> input_shape [ 0 ] = batch_size <nl> input_tensor . set_shape ( input_shape ) <nl> for node in graph_def . node : <nl> | Report a better error when we have no input shape for metrics calculations . | tensorflow/tensorflow | 4220fd6738500d5dda1217f84f56a4071c02d3ee | 2016-03-03T23:50:22Z |
mmm a / include / swift / Frontend / Frontend . h <nl> ppp b / include / swift / Frontend / Frontend . h <nl> class CompilerInstance { <nl> return PrimarySourceFiles ; <nl> } <nl> <nl> + / / / Gets the Primary Source File if one exists , otherwise the main <nl> + / / / module . If multiple Primary Source Files exist , fails with an <nl> + / / / assertion . <nl> + ModuleOrSourceFile getPrimarySourceFileOrMainModule ( ) { <nl> + if ( PrimarySourceFiles . empty ( ) ) <nl> + return getMainModule ( ) ; <nl> + else <nl> + return getPrimarySourceFile ( ) ; <nl> + } <nl> + <nl> / / / Gets the SourceFile which is the primary input for this CompilerInstance . <nl> / / / \ returns the primary SourceFile , or nullptr if there is no primary input ; <nl> / / / if there are _multiple_ primary inputs , fails with an assertion . <nl> mmm a / lib / FrontendTool / FrontendTool . cpp <nl> ppp b / lib / FrontendTool / FrontendTool . cpp <nl> static void countStatsPostSema ( UnifiedStatsReporter & Stats , <nl> C . NumReferencedMemberNames = R - > getUsedMembers ( ) . size ( ) ; <nl> } <nl> <nl> - if ( auto * SF = Instance . getPrimarySourceFile ( ) ) { <nl> - countStatsOfSourceFile ( Stats , Instance , SF ) ; <nl> + if ( ! Instance . getPrimarySourceFiles ( ) . empty ( ) ) { <nl> + for ( auto SF : Instance . getPrimarySourceFiles ( ) ) <nl> + countStatsOfSourceFile ( Stats , Instance , SF ) ; <nl> } else if ( auto * M = Instance . getMainModule ( ) ) { <nl> / / No primary source file , but a main module ; this is WMO - mode <nl> for ( auto * F : M - > getFiles ( ) ) { <nl> static bool performCompile ( CompilerInstance & Instance , <nl> return Context . hadError ( ) ; <nl> } <nl> <nl> - SourceFile * PrimarySourceFile = Instance . getPrimarySourceFile ( ) ; <nl> - <nl> / / We ' ve been told to dump the AST ( either after parsing or type - checking , <nl> / / which is already differentiated in CompilerInstance : : performSema ( ) ) , <nl> / / so dump or print the main source file and return . <nl> static bool performCompile ( CompilerInstance & Instance , <nl> Action = = FrontendOptions : : ActionType : : DumpScopeMaps | | <nl> Action = = FrontendOptions : : ActionType : : DumpTypeRefinementContexts | | <nl> Action = = FrontendOptions : : ActionType : : DumpInterfaceHash ) { <nl> - SourceFile * SF = PrimarySourceFile ; <nl> + SourceFile * SF = Instance . getPrimarySourceFile ( ) ; <nl> if ( ! SF ) { <nl> SourceFileKind Kind = Invocation . getSourceFileKind ( ) ; <nl> SF = & Instance . getMainModule ( ) - > getMainSourceFile ( Kind ) ; <nl> static bool performCompile ( CompilerInstance & Instance , <nl> ( void ) emitMakeDependencies ( Context . Diags , * Instance . getDependencyTracker ( ) , <nl> opts ) ; <nl> <nl> - if ( shouldTrackReferences ) <nl> - emitReferenceDependencies ( Context . Diags , Instance . getPrimarySourceFile ( ) , <nl> - * Instance . getDependencyTracker ( ) , opts ) ; <nl> + if ( shouldTrackReferences ) { <nl> + for ( auto * SF : Instance . getPrimarySourceFiles ( ) ) { <nl> + emitReferenceDependencies ( Context . Diags , SF , <nl> + * Instance . getDependencyTracker ( ) , opts ) ; <nl> + } <nl> + } <nl> <nl> if ( ! opts . LoadedModuleTracePath . empty ( ) ) <nl> ( void ) emitLoadedModuleTrace ( Context , * Instance . getDependencyTracker ( ) , <nl> static bool performCompile ( CompilerInstance & Instance , <nl> if ( Context . hadError ( ) ) { <nl> if ( shouldIndex ) { <nl> / / Emit the index store data even if there were compiler errors . <nl> - if ( emitIndexData ( PrimarySourceFile , Invocation , Instance ) ) <nl> + if ( emitIndexData ( Instance . getPrimarySourceFile ( ) , <nl> + Invocation , Instance ) ) <nl> return true ; <nl> } <nl> return true ; <nl> static bool performCompile ( CompilerInstance & Instance , <nl> return printAsObjC ( opts . ObjCHeaderOutputPath , Instance . getMainModule ( ) , <nl> opts . ImplicitObjCHeaderPath , moduleIsPublic ) ; <nl> if ( shouldIndex ) { <nl> - if ( emitIndexData ( PrimarySourceFile , Invocation , Instance ) ) <nl> + if ( emitIndexData ( Instance . getPrimarySourceFile ( ) , <nl> + Invocation , Instance ) ) <nl> return true ; <nl> } <nl> return Context . hadError ( ) ; <nl> static bool performCompile ( CompilerInstance & Instance , <nl> return SASTF & & SASTF - > isSIB ( ) ; <nl> } ; <nl> if ( opts . Inputs . hasPrimaryInputs ( ) ) { <nl> - FileUnit * PrimaryFile = PrimarySourceFile ; <nl> + FileUnit * PrimaryFile = Instance . getPrimarySourceFile ( ) ; <nl> if ( ! PrimaryFile ) { <nl> for ( FileUnit * fileUnit : Instance . getMainModule ( ) - > getFiles ( ) ) { <nl> if ( auto SASTF = dyn_cast < SerializedASTFile > ( fileUnit ) ) { <nl> static bool performCompile ( CompilerInstance & Instance , <nl> if ( Invocation . getSILOptions ( ) . LinkMode = = SILOptions : : LinkAll ) <nl> performSILLinking ( SM . get ( ) , true ) ; <nl> <nl> - auto DC = PrimarySourceFile ? ModuleOrSourceFile ( PrimarySourceFile ) : <nl> - Instance . getMainModule ( ) ; <nl> + auto DC = Instance . getPrimarySourceFileOrMainModule ( ) ; <nl> if ( ! opts . ModuleOutputPath . empty ( ) ) { <nl> SerializationOptions serializationOpts ; <nl> serializationOpts . OutputPath = opts . ModuleOutputPath . c_str ( ) ; <nl> static bool performCompile ( CompilerInstance & Instance , <nl> <nl> auto SerializeSILModuleAction = [ & ] ( ) { <nl> if ( ! opts . ModuleOutputPath . empty ( ) | | ! opts . ModuleDocOutputPath . empty ( ) ) { <nl> - auto DC = PrimarySourceFile ? ModuleOrSourceFile ( PrimarySourceFile ) <nl> - : Instance . getMainModule ( ) ; <nl> + auto DC = Instance . getPrimarySourceFileOrMainModule ( ) ; <nl> if ( ! opts . ModuleOutputPath . empty ( ) ) { <nl> SerializationOptions serializationOpts ; <nl> serializationOpts . OutputPath = opts . ModuleOutputPath . c_str ( ) ; <nl> static bool performCompile ( CompilerInstance & Instance , <nl> <nl> / / Get the main source file ' s private discriminator and attach it to <nl> / / the compile unit ' s flags . <nl> - if ( PrimarySourceFile ) { <nl> - Identifier PD = PrimarySourceFile - > getPrivateDiscriminator ( ) ; <nl> + if ( IRGenOpts . DebugInfoKind ! = IRGenDebugInfoKind : : None & & <nl> + Instance . getPrimarySourceFile ( ) ) { <nl> + Identifier PD = Instance . getPrimarySourceFile ( ) - > getPrivateDiscriminator ( ) ; <nl> if ( ! PD . empty ( ) ) <nl> IRGenOpts . DWARFDebugFlags + = ( " - private - discriminator " + PD . str ( ) ) . str ( ) ; <nl> } <nl> static bool performCompile ( CompilerInstance & Instance , <nl> } <nl> <nl> if ( Action = = FrontendOptions : : ActionType : : EmitSIB ) { <nl> - auto DC = PrimarySourceFile ? ModuleOrSourceFile ( PrimarySourceFile ) : <nl> - Instance . getMainModule ( ) ; <nl> + auto DC = Instance . getPrimarySourceFileOrMainModule ( ) ; <nl> if ( ! opts . ModuleOutputPath . empty ( ) ) { <nl> SerializationOptions serializationOpts ; <nl> serializationOpts . OutputPath = opts . ModuleOutputPath . c_str ( ) ; <nl> static bool performCompile ( CompilerInstance & Instance , <nl> if ( Action = = FrontendOptions : : ActionType : : MergeModules | | <nl> Action = = FrontendOptions : : ActionType : : EmitModuleOnly ) { <nl> if ( shouldIndex ) { <nl> - if ( emitIndexData ( PrimarySourceFile , Invocation , Instance ) ) <nl> + if ( emitIndexData ( Instance . getPrimarySourceFile ( ) , <nl> + Invocation , Instance ) ) <nl> return true ; <nl> } <nl> return Context . hadError ( ) ; <nl> static bool performCompile ( CompilerInstance & Instance , <nl> / / TODO : remove once the frontend understands what action it should perform <nl> IRGenOpts . OutputKind = getOutputKind ( Action ) ; <nl> if ( Action = = FrontendOptions : : ActionType : : Immediate ) { <nl> - assert ( ! PrimarySourceFile & & " - i doesn ' t work in - primary - file mode " ) ; <nl> + assert ( Instance . getPrimarySourceFiles ( ) . empty ( ) & & <nl> + " - i doesn ' t work in - primary - file mode " ) ; <nl> IRGenOpts . UseJIT = true ; <nl> IRGenOpts . DebugInfoKind = IRGenDebugInfoKind : : Normal ; <nl> const ProcessCmdLine & CmdLine = ProcessCmdLine ( opts . ImmediateArgv . begin ( ) , <nl> static bool performCompile ( CompilerInstance & Instance , <nl> auto & LLVMContext = getGlobalLLVMContext ( ) ; <nl> std : : unique_ptr < llvm : : Module > IRModule ; <nl> llvm : : GlobalVariable * HashGlobal ; <nl> - if ( PrimarySourceFile ) { <nl> - IRModule = performIRGeneration ( IRGenOpts , * PrimarySourceFile , std : : move ( SM ) , <nl> + if ( ! Instance . getPrimarySourceFiles ( ) . empty ( ) ) { <nl> + IRModule = performIRGeneration ( IRGenOpts , <nl> + * Instance . getPrimarySourceFile ( ) , <nl> + std : : move ( SM ) , <nl> opts . getSingleOutputFilename ( ) , LLVMContext , <nl> 0 , & HashGlobal ) ; <nl> } else { <nl> static bool performCompile ( CompilerInstance & Instance , <nl> / / Walk the AST for indexing after IR generation . Walking it before seems <nl> / / to cause miscompilation issues . <nl> if ( shouldIndex ) { <nl> - if ( emitIndexData ( PrimarySourceFile , Invocation , Instance ) ) <nl> + if ( emitIndexData ( Instance . getPrimarySourceFile ( ) , Invocation , Instance ) ) <nl> return true ; <nl> } <nl> <nl> static bool performCompile ( CompilerInstance & Instance , <nl> const auto & SILOpts = Invocation . getSILOptions ( ) ; <nl> const auto hasMultipleIGMs = SILOpts . hasMultipleIGMs ( ) ; <nl> bool error ; <nl> - if ( PrimarySourceFile ) <nl> - error = validateTBD ( PrimarySourceFile , * IRModule , hasMultipleIGMs , <nl> + if ( ! Instance . getPrimarySourceFiles ( ) . empty ( ) ) <nl> + error = validateTBD ( Instance . getPrimarySourceFile ( ) , <nl> + * IRModule , hasMultipleIGMs , <nl> allSymbols ) ; <nl> else <nl> error = validateTBD ( Instance . getMainModule ( ) , * IRModule , hasMultipleIGMs , <nl> | [ BatchMode ] Sink a bunch of FrontendTool uses of getPrimarySourceFile ( ) | apple/swift | 2494b148f1dacbbe77fb780022fa7cbfacf8b7a8 | 2018-01-17T19:57:13Z |
new file mode 100755 <nl> index 0000000000000 . . 3d2cff76cab1a <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / ci_build / linux / cpu / run_cc_core . sh <nl> <nl> + # ! / usr / bin / env bash <nl> + # Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + set - e <nl> + set - x <nl> + <nl> + N_JOBS = $ ( grep - c ^ processor / proc / cpuinfo ) <nl> + <nl> + echo " " <nl> + echo " Bazel will use $ { N_JOBS } concurrent job ( s ) . " <nl> + echo " " <nl> + <nl> + # Run configure . <nl> + export TF_NEED_GCP = 0 <nl> + export TF_NEED_HDFS = 0 <nl> + export TF_NEED_CUDA = 0 <nl> + # Only running cc tests , python version does not matter . <nl> + export PYTHON_BIN_PATH = ` which python ` <nl> + yes " " | . / configure <nl> + <nl> + # Run bazel test command . Double test timeouts to avoid flakes . <nl> + bazel test - - test_tag_filters = - gpu , - benchmark - test - - test_lang_filters = cc - k \ <nl> + - - jobs = $ { N_JOBS } - - test_timeout 300 , 450 , 1200 , 3600 - - build_tests_only - - \ <nl> + / / tensorflow / . . . - / / tensorflow / compiler / . . . - / / tensorflow / contrib / . . . <nl> | A script for running only cc tests in TF . | tensorflow/tensorflow | bc90ff98b7128095f987d1986575c87c38ca833c | 2017-02-14T01:07:38Z |
mmm a / tensorflow / tensorflow . bzl <nl> ppp b / tensorflow / tensorflow . bzl <nl> <nl> # - * - Python - * - <nl> <nl> - <nl> # Return the options to use for a C + + library or binary build . <nl> # Uses the " : optmode " config_setting to pick the options . <nl> load ( <nl> load ( <nl> " tf_cuda_tests_tags " , <nl> " tf_sycl_tests_tags " , <nl> " tf_additional_xla_deps_py " , <nl> - " if_static " , ) <nl> + " if_static " , <nl> + ) <nl> load ( <nl> " @ local_config_cuda / / cuda : build_defs . bzl " , <nl> " if_cuda " , <nl> - " cuda_default_copts " , ) <nl> - <nl> + " cuda_default_copts " , <nl> + ) <nl> load ( <nl> " / / third_party / mkl : build_defs . bzl " , <nl> - " if_mkl " , ) <nl> - <nl> + " if_mkl " , <nl> + ) <nl> def register_extension_info ( * * kwargs ) : <nl> pass <nl> <nl> - <nl> # Given a source file , generate a test name . <nl> # i . e . " common_runtime / direct_session_test . cc " becomes <nl> # " common_runtime_direct_session_test " <nl> def src_to_test_name ( src ) : <nl> return src . replace ( " / " , " _ " ) . split ( " . " ) [ 0 ] <nl> <nl> - <nl> def full_path ( relative_paths ) : <nl> return [ PACKAGE_NAME + " / " + relative for relative in relative_paths ] <nl> <nl> - <nl> # List of proto files for android builds <nl> def tf_android_core_proto_sources ( core_proto_sources_relative ) : <nl> return [ <nl> " / / tensorflow / core : " + p for p in core_proto_sources_relative <nl> ] <nl> <nl> - <nl> # Returns the list of pb . h and proto . h headers that are generated for <nl> # tf_android_core_proto_sources ( ) . <nl> def tf_android_core_proto_headers ( core_proto_sources_relative ) : <nl> def tf_android_core_proto_headers ( core_proto_sources_relative ) : <nl> for p in core_proto_sources_relative <nl> ] ) <nl> <nl> - <nl> # Sanitize a dependency so that it works correctly from code that includes <nl> # TensorFlow as a submodule . <nl> def clean_dep ( dep ) : <nl> return str ( Label ( dep ) ) <nl> <nl> - <nl> def if_android_x86 ( a ) : <nl> return select ( { <nl> clean_dep ( " / / tensorflow : android_x86 " ) : a , <nl> def if_android_x86 ( a ) : <nl> " / / conditions : default " : [ ] , <nl> } ) <nl> <nl> - <nl> def if_android_arm ( a ) : <nl> return select ( { <nl> clean_dep ( " / / tensorflow : android_arm " ) : a , <nl> " / / conditions : default " : [ ] , <nl> } ) <nl> <nl> - <nl> def if_android_arm64 ( a ) : <nl> return select ( { <nl> clean_dep ( " / / tensorflow : android_arm64 " ) : a , <nl> " / / conditions : default " : [ ] , <nl> } ) <nl> <nl> - <nl> def if_android_mips ( a ) : <nl> return select ( { <nl> clean_dep ( " / / tensorflow : android_mips " ) : a , <nl> " / / conditions : default " : [ ] , <nl> } ) <nl> <nl> - <nl> def if_not_android ( a ) : <nl> return select ( { <nl> clean_dep ( " / / tensorflow : android " ) : [ ] , <nl> " / / conditions : default " : a , <nl> } ) <nl> <nl> - <nl> def if_not_android_mips_and_mips64 ( a ) : <nl> return select ( { <nl> clean_dep ( " / / tensorflow : android_mips " ) : [ ] , <nl> def if_not_android_mips_and_mips64 ( a ) : <nl> " / / conditions : default " : a , <nl> } ) <nl> <nl> - <nl> def if_android ( a ) : <nl> return select ( { <nl> clean_dep ( " / / tensorflow : android " ) : a , <nl> " / / conditions : default " : [ ] , <nl> } ) <nl> <nl> - <nl> def if_ios ( a ) : <nl> return select ( { <nl> clean_dep ( " / / tensorflow : ios " ) : a , <nl> " / / conditions : default " : [ ] , <nl> } ) <nl> <nl> - <nl> def if_mobile ( a ) : <nl> return select ( { <nl> clean_dep ( " / / tensorflow : android " ) : a , <nl> def if_mobile ( a ) : <nl> " / / conditions : default " : [ ] , <nl> } ) <nl> <nl> - <nl> def if_not_mobile ( a ) : <nl> return select ( { <nl> clean_dep ( " / / tensorflow : android " ) : [ ] , <nl> def if_not_mobile ( a ) : <nl> " / / conditions : default " : a , <nl> } ) <nl> <nl> - <nl> def if_not_windows ( a ) : <nl> return select ( { <nl> clean_dep ( " / / tensorflow : windows " ) : [ ] , <nl> def if_not_windows ( a ) : <nl> " / / conditions : default " : a , <nl> } ) <nl> <nl> - <nl> def if_linux_x86_64 ( a ) : <nl> return select ( { <nl> clean_dep ( " / / tensorflow : linux_x86_64 " ) : a , <nl> WIN_COPTS = [ <nl> " / DTENSORFLOW_USE_EIGEN_THREADPOOL " , <nl> " / DEIGEN_AVOID_STL_ARRAY " , <nl> " / Iexternal / gemmlowp " , <nl> - " / wd4018 " , # - Wno - sign - compare <nl> - " / U_HAS_EXCEPTIONS " , " / D_HAS_EXCEPTIONS = 1 " , " / EHsc " , # - fno - exceptions <nl> + " / wd4018 " , # - Wno - sign - compare <nl> + " / U_HAS_EXCEPTIONS " , <nl> + " / D_HAS_EXCEPTIONS = 1 " , <nl> + " / EHsc " , # - fno - exceptions <nl> " / DNOGDI " , <nl> ] <nl> <nl> def tf_copts ( android_optimization_level_override = " - O2 " ) : <nl> " / / conditions : default " : [ " - pthread " ] <nl> } ) ) <nl> <nl> - <nl> def tf_opts_nortti_if_android ( ) : <nl> return if_android ( [ <nl> " - fno - rtti " , <nl> def tf_opts_nortti_if_android ( ) : <nl> " - DGOOGLE_PROTOBUF_NO_STATIC_INITIALIZER " , <nl> ] ) <nl> <nl> - <nl> # LINT . ThenChange ( / / tensorflow / contrib / android / cmake / CMakeLists . txt ) <nl> <nl> - <nl> # Given a list of " op_lib_names " ( a list of files in the ops directory <nl> # without their . cc extensions ) , generate a library for that file . <nl> def tf_gen_op_libs ( op_lib_names , deps = None ) : <nl> def tf_gen_op_libs ( op_lib_names , deps = None ) : <nl> alwayslink = 1 , <nl> linkstatic = 1 , ) <nl> <nl> - <nl> def _make_search_paths ( prefix , levels_to_root ) : <nl> return " , " . join ( <nl> [ " - rpath , % s / % s " % ( prefix , " / " . join ( [ " . . " ] * search_level ) ) <nl> for search_level in range ( levels_to_root + 1 ) ] ) <nl> <nl> - <nl> def _rpath_linkopts ( name ) : <nl> # Search parent directories up to the TensorFlow root directory for shared <nl> # object dependencies , even if this op shared object is deeply nested <nl> def _rpath_linkopts ( name ) : <nl> ] , <nl> } ) <nl> <nl> - <nl> # Bazel - generated shared objects which must be linked into TensorFlow binaries <nl> # to define symbols from / / tensorflow / core : framework and / / tensorflow / core : lib . <nl> def tf_binary_additional_srcs ( ) : <nl> def tf_binary_additional_srcs ( ) : <nl> clean_dep ( " / / tensorflow : libtensorflow_framework . so " ) , <nl> ] ) <nl> <nl> - <nl> def tf_cc_shared_object ( <nl> name , <nl> srcs = [ ] , <nl> def tf_cc_shared_object ( <nl> * * kwargs ) <nl> <nl> register_extension_info ( <nl> - extension_name = " tf_cc_shared_object " , <nl> - label_regex_for_dep = " { extension_name } " ) <nl> - <nl> + extension_name = " tf_cc_shared_object " , <nl> + label_regex_for_dep = " { extension_name } " , <nl> + ) <nl> <nl> # Links in the framework shared object <nl> # ( / / third_party / tensorflow : libtensorflow_framework . so ) when not building <nl> def tf_cc_binary ( name , <nl> * * kwargs ) <nl> <nl> register_extension_info ( <nl> - extension_name = " tf_cc_binary " , <nl> - label_regex_for_dep = " { extension_name } . * " ) <nl> - <nl> + extension_name = " tf_cc_binary " , <nl> + label_regex_for_dep = " { extension_name } . * " , <nl> + ) <nl> <nl> def tf_gen_op_wrapper_cc ( name , <nl> out_ops_file , <nl> def tf_gen_op_wrapper_cc ( name , <nl> " $ ( location : " + out_ops_file + " . cc ) " + override_arg + " " + <nl> str ( include_internal_ops ) + " " + api_def_args_str ) ) <nl> <nl> - <nl> # Given a list of " op_lib_names " ( a list of files in the ops directory <nl> # without their . cc extensions ) , generate individual C + + . cc and . h <nl> # files for each of the ops files mentioned , and then generate a <nl> def tf_gen_op_wrappers_cc ( name , <nl> alwayslink = 1 , <nl> visibility = [ clean_dep ( " / / tensorflow : internal " ) ] ) <nl> <nl> - <nl> # Generates a Python library target wrapping the ops registered in " deps " . <nl> # <nl> # Args : <nl> def tf_gen_op_wrapper_py ( name , <nl> clean_dep ( " / / tensorflow / python : framework_for_generated_wrappers_v2 " ) , <nl> ] , ) <nl> <nl> - <nl> # Define a bazel macro that creates cc_test for tensorflow . <nl> # <nl> # Links in the framework shared object <nl> def tf_cc_test ( name , <nl> * * kwargs ) <nl> <nl> register_extension_info ( <nl> - extension_name = " tf_cc_test " , <nl> - label_regex_for_dep = " { extension_name } . * " ) <nl> - <nl> + extension_name = " tf_cc_test " , <nl> + label_regex_for_dep = " { extension_name } . * " , <nl> + ) <nl> <nl> # Part of the testing workflow requires a distinguishable name for the build <nl> # rules that involve a GPU , even if otherwise identical to the base rule . <nl> def tf_cc_test_gpu ( name , <nl> args = args ) <nl> <nl> register_extension_info ( <nl> - extension_name = " tf_cc_test_gpu " , <nl> - label_regex_for_dep = " { extension_name } " ) <nl> - <nl> + extension_name = " tf_cc_test_gpu " , <nl> + label_regex_for_dep = " { extension_name } " , <nl> + ) <nl> <nl> def tf_cuda_cc_test ( name , <nl> srcs = [ ] , <nl> def tf_cuda_cc_test ( name , <nl> args = args ) <nl> <nl> register_extension_info ( <nl> - extension_name = " tf_cuda_cc_test " , <nl> - label_regex_for_dep = " { extension_name } " ) <nl> - <nl> + extension_name = " tf_cuda_cc_test " , <nl> + label_regex_for_dep = " { extension_name } " , <nl> + ) <nl> <nl> def tf_cuda_only_cc_test ( name , <nl> srcs = [ ] , <nl> def tf_cuda_only_cc_test ( name , <nl> tags = tags + tf_cuda_tests_tags ( ) ) <nl> <nl> register_extension_info ( <nl> - extension_name = " tf_cuda_only_cc_test " , <nl> - label_regex_for_dep = " { extension_name } _gpu " ) <nl> - <nl> + extension_name = " tf_cuda_only_cc_test " , <nl> + label_regex_for_dep = " { extension_name } _gpu " , <nl> + ) <nl> <nl> # Create a cc_test for each of the tensorflow tests listed in " tests " <nl> def tf_cc_tests ( srcs , <nl> def tf_cc_tests ( srcs , <nl> linkopts = linkopts , <nl> nocopts = nocopts ) <nl> <nl> - <nl> def tf_cc_test_mkl ( srcs , <nl> deps , <nl> name = " " , <nl> def tf_cc_test_mkl ( srcs , <nl> args = None ) : <nl> if_mkl ( tf_cc_tests ( srcs , deps , name , linkstatic = linkstatic , tags = tags , size = size , args = args , nocopts = " - fno - exceptions " ) ) <nl> <nl> - <nl> def tf_cc_tests_gpu ( srcs , <nl> deps , <nl> name = " " , <nl> def tf_cc_tests_gpu ( srcs , <nl> args = None ) : <nl> tf_cc_tests ( srcs , deps , linkstatic , tags = tags , size = size , args = args ) <nl> <nl> - <nl> def tf_cuda_cc_tests ( srcs , <nl> deps , <nl> name = " " , <nl> def tf_java_test ( name , <nl> * * kwargs ) <nl> <nl> register_extension_info ( <nl> - extension_name = " tf_java_test " , <nl> - label_regex_for_dep = " { extension_name } " ) <nl> - <nl> + extension_name = " tf_java_test " , <nl> + label_regex_for_dep = " { extension_name } " , <nl> + ) <nl> <nl> def _cuda_copts ( ) : <nl> " " " Gets the appropriate set of copts for ( maybe ) CUDA compilation . <nl> def _cuda_copts ( ) : <nl> ] ) , <nl> } ) <nl> <nl> - <nl> # Build defs for TensorFlow kernels <nl> <nl> - <nl> # When this target is built using - - config = cuda , a cc_library is built <nl> # that passes - DGOOGLE_CUDA = 1 and ' - x cuda ' , linking in additional <nl> # libraries needed by GPU kernels . <nl> def tf_gpu_kernel_library ( srcs , <nl> * * kwargs ) <nl> <nl> register_extension_info ( <nl> - extension_name = " tf_gpu_kernel_library " , <nl> - label_regex_for_dep = " { extension_name } " ) <nl> - <nl> + extension_name = " tf_gpu_kernel_library " , <nl> + label_regex_for_dep = " { extension_name } " , <nl> + ) <nl> <nl> def tf_cuda_library ( deps = None , cuda_deps = None , copts = None , * * kwargs ) : <nl> " " " Generate a cc_library with a conditional set of CUDA dependencies . <nl> def tf_cuda_library ( deps = None , cuda_deps = None , copts = None , * * kwargs ) : <nl> * * kwargs ) <nl> <nl> register_extension_info ( <nl> - extension_name = " tf_cuda_library " , <nl> - label_regex_for_dep = " { extension_name } " ) <nl> - <nl> - <nl> + extension_name = " tf_cuda_library " , <nl> + label_regex_for_dep = " { extension_name } " , <nl> + ) <nl> <nl> def tf_kernel_library ( name , <nl> prefix = None , <nl> def tf_kernel_library ( name , <nl> * * kwargs ) <nl> <nl> register_extension_info ( <nl> - extension_name = " tf_kernel_library " , <nl> - label_regex_for_dep = " { extension_name } ( _gpu ) ? " ) <nl> - <nl> + extension_name = " tf_kernel_library " , <nl> + label_regex_for_dep = " { extension_name } ( _gpu ) ? " , <nl> + ) <nl> <nl> def tf_mkl_kernel_library ( name , <nl> prefix = None , <nl> def tf_mkl_kernel_library ( name , <nl> ) ) <nl> <nl> register_extension_info ( <nl> - extension_name = " tf_mkl_kernel_library " , <nl> - label_regex_for_dep = " { extension_name } " ) <nl> - <nl> + extension_name = " tf_mkl_kernel_library " , <nl> + label_regex_for_dep = " { extension_name } " , <nl> + ) <nl> <nl> # Bazel rules for building swig files . <nl> def _py_wrap_cc_impl ( ctx ) : <nl> def _py_wrap_cc_impl ( ctx ) : <nl> progress_message = " SWIGing " + src . path ) <nl> return struct ( files = depset ( outputs ) ) <nl> <nl> - <nl> _py_wrap_cc = rule ( <nl> - attrs = { <nl> - " srcs " : <nl> - attr . label_list ( <nl> - mandatory = True , <nl> - allow_files = True , ) , <nl> - " swig_includes " : <nl> - attr . label_list ( <nl> - cfg = " data " , <nl> - allow_files = True , ) , <nl> - " deps " : <nl> - attr . label_list ( <nl> - allow_files = True , <nl> - providers = [ " cc " ] , ) , <nl> - " toolchain_deps " : <nl> - attr . label_list ( <nl> - allow_files = True , ) , <nl> - " module_name " : <nl> - attr . string ( mandatory = True ) , <nl> - " py_module_name " : <nl> - attr . string ( mandatory = True ) , <nl> - " _swig " : <nl> - attr . label ( <nl> - default = Label ( " @ swig / / : swig " ) , <nl> - executable = True , <nl> - cfg = " host " , ) , <nl> - " _swiglib " : <nl> - attr . label ( <nl> - default = Label ( " @ swig / / : templates " ) , <nl> - allow_files = True , ) , <nl> + attrs = { <nl> + " srcs " : attr . label_list ( <nl> + mandatory = True , <nl> + allow_files = True , <nl> + ) , <nl> + " swig_includes " : attr . label_list ( <nl> + cfg = " data " , <nl> + allow_files = True , <nl> + ) , <nl> + " deps " : attr . label_list ( <nl> + allow_files = True , <nl> + providers = [ " cc " ] , <nl> + ) , <nl> + " toolchain_deps " : attr . label_list ( <nl> + allow_files = True , <nl> + ) , <nl> + " module_name " : attr . string ( mandatory = True ) , <nl> + " py_module_name " : attr . string ( mandatory = True ) , <nl> + " _swig " : attr . label ( <nl> + default = Label ( " @ swig / / : swig " ) , <nl> + executable = True , <nl> + cfg = " host " , <nl> + ) , <nl> + " _swiglib " : attr . label ( <nl> + default = Label ( " @ swig / / : templates " ) , <nl> + allow_files = True , <nl> + ) , <nl> } , <nl> - outputs = { <nl> + outputs = { <nl> " cc_out " : " % { module_name } . cc " , <nl> " py_out " : " % { py_module_name } . py " , <nl> } , <nl> - implementation = _py_wrap_cc_impl , ) <nl> - <nl> + implementation = _py_wrap_cc_impl , <nl> + ) <nl> <nl> def _get_repository_roots ( ctx , files ) : <nl> " " " Returns abnormal root directories under which files reside . <nl> def _get_repository_roots ( ctx , files ) : <nl> result [ root ] - = 1 <nl> return [ k for v , k in sorted ( [ ( v , k ) for k , v in result . items ( ) ] ) ] <nl> <nl> - <nl> # Bazel rule for collecting the header files that a target depends on . <nl> def _transitive_hdrs_impl ( ctx ) : <nl> outputs = depset ( ) <nl> def _transitive_hdrs_impl ( ctx ) : <nl> outputs + = dep . cc . transitive_headers <nl> return struct ( files = outputs ) <nl> <nl> - <nl> _transitive_hdrs = rule ( <nl> - attrs = { <nl> + attrs = { <nl> " deps " : attr . label_list ( <nl> - allow_files = True , <nl> - providers = [ " cc " ] , ) , <nl> + allow_files = True , <nl> + providers = [ " cc " ] , <nl> + ) , <nl> } , <nl> - implementation = _transitive_hdrs_impl , ) <nl> - <nl> + implementation = _transitive_hdrs_impl , <nl> + ) <nl> <nl> def transitive_hdrs ( name , deps = [ ] , * * kwargs ) : <nl> _transitive_hdrs ( name = name + " _gather " , deps = deps ) <nl> native . filegroup ( name = name , srcs = [ " : " + name + " _gather " ] ) <nl> <nl> - <nl> # Create a header only library that includes all the headers exported by <nl> # the libraries in deps . <nl> def cc_header_only_library ( name , deps = [ ] , includes = [ ] , * * kwargs ) : <nl> def cc_header_only_library ( name , deps = [ ] , includes = [ ] , * * kwargs ) : <nl> includes = includes , <nl> * * kwargs ) <nl> <nl> - <nl> def tf_custom_op_library_additional_deps ( ) : <nl> return [ <nl> " @ protobuf_archive / / : protobuf_headers " , <nl> def tf_custom_op_library_additional_deps ( ) : <nl> clean_dep ( " / / tensorflow / core : framework_headers_lib " ) , <nl> ] <nl> <nl> - <nl> # Traverse the dependency graph along the " deps " attribute of the <nl> # target and return a struct with one field called ' tf_collected_deps ' . <nl> # tf_collected_deps will be the union of the deps of the current target <nl> def _collect_deps_aspect_impl ( target , ctx ) : <nl> alldeps = alldeps | dep . tf_collected_deps <nl> return struct ( tf_collected_deps = alldeps ) <nl> <nl> - <nl> collect_deps_aspect = aspect ( <nl> - implementation = _collect_deps_aspect_impl , attr_aspects = [ " deps " ] ) <nl> - <nl> + attr_aspects = [ " deps " ] , <nl> + implementation = _collect_deps_aspect_impl , <nl> + ) <nl> <nl> def _dep_label ( dep ) : <nl> label = dep . label <nl> return label . package + " : " + label . name <nl> <nl> - <nl> # This rule checks that the transitive dependencies of targets listed <nl> # in the ' deps ' attribute don ' t depend on the targets listed in <nl> # the ' disallowed_deps ' attribute . <nl> def _check_deps_impl ( ctx ) : <nl> disallowed_dep ) ) <nl> return struct ( ) <nl> <nl> - <nl> check_deps = rule ( <nl> _check_deps_impl , <nl> - attrs = { <nl> - " deps " : <nl> - attr . label_list ( <nl> - aspects = [ collect_deps_aspect ] , mandatory = True , <nl> - allow_files = True ) , <nl> - " disallowed_deps " : <nl> - attr . label_list ( mandatory = True , allow_files = True ) <nl> - } , ) <nl> - <nl> + attrs = { <nl> + " deps " : attr . label_list ( <nl> + aspects = [ collect_deps_aspect ] , <nl> + mandatory = True , <nl> + allow_files = True , <nl> + ) , <nl> + " disallowed_deps " : attr . label_list ( <nl> + mandatory = True , <nl> + allow_files = True , <nl> + ) , <nl> + } , <nl> + ) <nl> <nl> # Helper to build a dynamic library ( . so ) from the sources containing <nl> # implementations of custom ops and kernels . <nl> def tf_custom_op_library ( name , srcs = [ ] , gpu_srcs = [ ] , deps = [ ] , linkopts = [ ] ) : <nl> } ) , ) <nl> <nl> register_extension_info ( <nl> - extension_name = " tf_custom_op_library " , <nl> - label_regex_for_dep = " { extension_name } " ) <nl> - <nl> + extension_name = " tf_custom_op_library " , <nl> + label_regex_for_dep = " { extension_name } " , <nl> + ) <nl> <nl> def tf_custom_op_py_library ( name , <nl> srcs = [ ] , <nl> def tf_custom_op_py_library ( name , <nl> deps = deps , ) <nl> <nl> register_extension_info ( <nl> - extension_name = " tf_custom_op_py_library " , <nl> - label_regex_for_dep = " { extension_name } " ) <nl> - <nl> + extension_name = " tf_custom_op_py_library " , <nl> + label_regex_for_dep = " { extension_name } " , <nl> + ) <nl> <nl> def tf_extension_linkopts ( ) : <nl> return [ ] # No extension link opts <nl> <nl> - <nl> def tf_extension_copts ( ) : <nl> return [ ] # No extension c opts <nl> <nl> - <nl> def tf_py_wrap_cc ( name , <nl> srcs , <nl> swig_includes = [ ] , <nl> def tf_py_wrap_cc ( name , <nl> " / / conditions : default " : [ " : " + cc_library_name ] , <nl> } ) ) <nl> <nl> - <nl> def py_test ( deps = [ ] , * * kwargs ) : <nl> native . py_test ( <nl> deps = select ( { <nl> def py_test ( deps = [ ] , * * kwargs ) : <nl> * * kwargs ) <nl> <nl> register_extension_info ( <nl> - extension_name = " py_test " , <nl> - label_regex_for_dep = " { extension_name } " ) <nl> - <nl> + extension_name = " py_test " , <nl> + label_regex_for_dep = " { extension_name } " , <nl> + ) <nl> <nl> def tf_py_test ( name , <nl> srcs , <nl> def tf_py_test ( name , <nl> srcs_version = " PY2AND3 " ) <nl> <nl> register_extension_info ( <nl> - extension_name = " tf_py_test " , <nl> - label_regex_map = { " additional_deps " : " deps : { extension_name } " } ) <nl> - <nl> + extension_name = " tf_py_test " , <nl> + label_regex_map = { " additional_deps " : " deps : { extension_name } " } , <nl> + ) <nl> <nl> def cuda_py_test ( name , <nl> srcs , <nl> def cuda_py_test ( name , <nl> xla_enabled = xla_enabled ) <nl> <nl> register_extension_info ( <nl> - extension_name = " cuda_py_test " , <nl> - label_regex_map = { " additional_deps " : " additional_deps : { extension_name } " } ) <nl> - <nl> + extension_name = " cuda_py_test " , <nl> + label_regex_map = { " additional_deps " : " additional_deps : { extension_name } " } , <nl> + ) <nl> <nl> def sycl_py_test ( name , <nl> srcs , <nl> def sycl_py_test ( name , <nl> xla_enabled = xla_enabled ) <nl> <nl> register_extension_info ( <nl> - extension_name = " sycl_py_test " , <nl> - label_regex_map = { " additional_deps " : " additional_deps : { extension_name } " } ) <nl> - <nl> + extension_name = " sycl_py_test " , <nl> + label_regex_map = { " additional_deps " : " additional_deps : { extension_name } " } , <nl> + ) <nl> <nl> def py_tests ( name , <nl> srcs , <nl> def py_tests ( name , <nl> additional_deps = additional_deps , <nl> xla_enabled = xla_enabled ) <nl> <nl> - <nl> def cuda_py_tests ( name , <nl> srcs , <nl> size = " medium " , <nl> def cuda_py_tests ( name , <nl> prefix = prefix , <nl> xla_enabled = xla_enabled ) <nl> <nl> - <nl> # Creates a genrule named < name > for running tools / proto_text ' s generator to <nl> # make the proto_text functions , for the protos passed in < srcs > . <nl> # <nl> def tf_generate_proto_text_sources ( name , srcs_relative_dir , srcs ) : <nl> ] , ) <nl> return struct ( hdrs = out_hdrs , srcs = out_srcs ) <nl> <nl> - <nl> def tf_genrule_cmd_append_to_srcs ( to_append ) : <nl> return ( " cat $ ( SRCS ) > $ ( @ ) & & " + " echo > > $ ( @ ) & & " + " echo " + to_append + <nl> " > > $ ( @ ) " ) <nl> <nl> - <nl> def tf_version_info_genrule ( ) : <nl> native . genrule ( <nl> name = " version_info_gen " , <nl> def tf_version_info_genrule ( ) : <nl> local = 1 , <nl> tools = [ clean_dep ( " / / tensorflow / tools / git : gen_git_source . py " ) ] , ) <nl> <nl> - <nl> def tf_py_build_info_genrule ( ) : <nl> native . genrule ( <nl> name = " py_build_info_gen " , <nl> def tf_py_build_info_genrule ( ) : <nl> local = 1 , <nl> tools = [ clean_dep ( " / / tensorflow / tools / build_info : gen_build_info . py " ) ] , ) <nl> <nl> - <nl> def cc_library_with_android_deps ( deps , <nl> android_deps = [ ] , <nl> common_deps = [ ] , <nl> def cc_library_with_android_deps ( deps , <nl> native . cc_library ( deps = deps , * * kwargs ) <nl> <nl> register_extension_info ( <nl> - extension_name = " cc_library_with_android_deps " , <nl> - label_regex_for_dep = " { extension_name } " ) <nl> + extension_name = " cc_library_with_android_deps " , <nl> + label_regex_for_dep = " { extension_name } " , <nl> + ) <nl> | Small reformatting of tensorflow . bzl | tensorflow/tensorflow | 7248c3ec2c87648fec732e17f3e749d12d113abe | 2017-12-02T01:13:56Z |
similarity index 100 % <nl> rename from test / 1_stdlib / NewArray . swift . gyb <nl> rename to validation - test / stdlib / NewArray . swift . gyb <nl> | Move NewArray . swift . gyb to validation - test | apple/swift | 3f448b389658e31782268d573b6334064c64398e | 2016-03-20T01:07:50Z |
mmm a / gloo / transport / ibverbs / CMakeLists . txt <nl> ppp b / gloo / transport / ibverbs / CMakeLists . txt <nl> list ( APPEND GLOO_TRANSPORT_SRCS <nl> " $ { CMAKE_CURRENT_SOURCE_DIR } / address . cc " <nl> " $ { CMAKE_CURRENT_SOURCE_DIR } / buffer . cc " <nl> " $ { CMAKE_CURRENT_SOURCE_DIR } / device . cc " <nl> + " $ { CMAKE_CURRENT_SOURCE_DIR } / memory_region . cc " <nl> " $ { CMAKE_CURRENT_SOURCE_DIR } / pair . cc " <nl> ) <nl> <nl> mmm a / gloo / transport / ibverbs / buffer . cc <nl> ppp b / gloo / transport / ibverbs / buffer . cc <nl> void Buffer : : send ( size_t offset , size_t length ) { <nl> <nl> struct ibv_send_wr wr ; <nl> memset ( & wr , 0 , sizeof ( wr ) ) ; <nl> - wr . wr_id = ( uint64_t ) ( ( Handler * ) this ) ; <nl> + wr . wr_id = slot_ ; <nl> wr . sg_list = & list ; <nl> wr . num_sge = 1 ; <nl> wr . opcode = IBV_WR_RDMA_WRITE_WITH_IMM ; <nl> void Buffer : : handleCompletion ( struct ibv_wc * wc ) { <nl> } else if ( wc - > opcode = = IBV_WC_RDMA_WRITE ) { <nl> if ( debug_ ) { <nl> std : : cout < < " [ " < < getpid ( ) < < " ] " ; <nl> - std : : cout < < " sent " < < wc - > byte_len < < " bytes " ; <nl> + std : : cout < < " send complete " ; <nl> std : : cout < < std : : endl ; <nl> } <nl> std : : unique_lock < std : : mutex > lock ( m_ ) ; <nl> mmm a / gloo / transport / ibverbs / buffer . h <nl> ppp b / gloo / transport / ibverbs / buffer . h <nl> namespace gloo { <nl> namespace transport { <nl> namespace ibverbs { <nl> <nl> - class Buffer : public : : gloo : : transport : : Buffer , public Handler { <nl> + class Buffer : public : : gloo : : transport : : Buffer { <nl> public : <nl> virtual ~ Buffer ( ) ; <nl> <nl> class Buffer : public : : gloo : : transport : : Buffer , public Handler { <nl> virtual void waitRecv ( ) override ; <nl> virtual void waitSend ( ) override ; <nl> <nl> - virtual void handleCompletion ( struct ibv_wc * wc ) override ; <nl> + void handleCompletion ( struct ibv_wc * wc ) ; <nl> <nl> protected : <nl> / / May only be constructed from helper function in pair . cc <nl> mmm a / gloo / transport / ibverbs / device . cc <nl> ppp b / gloo / transport / ibverbs / device . cc <nl> Device : : Device ( const struct attr & attr , ibv_context * context ) <nl> comp_channel_ = ibv_create_comp_channel ( context_ ) ; <nl> GLOO_ENFORCE ( comp_channel_ ) ; <nl> <nl> - / / Completion queue <nl> - cq_ = ibv_create_cq ( context_ , 64 , nullptr , comp_channel_ , 0 ) ; <nl> - GLOO_ENFORCE ( cq_ ) ; <nl> - <nl> - / / Arm notification mechanism for completion queue <nl> - / / The second argument is named solicited_only and is <nl> - / / set to 0 because we want notifications for everything . <nl> - rv = ibv_req_notify_cq ( cq_ , 0 ) ; <nl> - GLOO_ENFORCE_NE ( rv , - 1 ) ; <nl> - <nl> / / Start thread to poll completion queue and dispatch <nl> / / completions for completed work requests . <nl> done_ = false ; <nl> Device : : ~ Device ( ) { <nl> done_ = true ; <nl> loop_ - > join ( ) ; <nl> <nl> - rv = ibv_destroy_cq ( cq_ ) ; <nl> - GLOO_ENFORCE_NE ( rv , - 1 ) ; <nl> - <nl> rv = ibv_destroy_comp_channel ( comp_channel_ ) ; <nl> GLOO_ENFORCE_NE ( rv , - 1 ) ; <nl> <nl> void Device : : loop ( ) { <nl> pfd . events = POLLIN ; <nl> pfd . revents = 0 ; <nl> <nl> - / / Keep array for completed work requests on stack <nl> - std : : array < struct ibv_wc , capacity_ > wc ; <nl> - <nl> while ( ! done_ ) { <nl> do { <nl> rv = poll ( & pfd , 1 , 10 ) ; <nl> void Device : : loop ( ) { <nl> break ; <nl> } <nl> <nl> - ibv_cq * cq ; <nl> - void * cq_context ; <nl> - rv = ibv_get_cq_event ( comp_channel_ , & cq , & cq_context ) ; <nl> + struct ibv_cq * cq ; <nl> + void * cqContext ; <nl> + rv = ibv_get_cq_event ( comp_channel_ , & cq , & cqContext ) ; <nl> GLOO_ENFORCE_NE ( rv , - 1 ) ; <nl> <nl> - / / Only handle CQEs from our own CQ <nl> - GLOO_ENFORCE_EQ ( cq , cq_ ) ; <nl> - ibv_ack_cq_events ( cq_ , 1 ) ; <nl> - <nl> - / / Arm notification mechanism for completion queue <nl> - / / The second argument is named solicited_only and is <nl> - / / set to 0 because we want notifications for everything . <nl> - rv = ibv_req_notify_cq ( cq_ , 0 ) ; <nl> - GLOO_ENFORCE_NE ( rv , - 1 ) ; <nl> - <nl> - / / Invoke handler for every work completion . <nl> - auto nwc = ibv_poll_cq ( cq_ , capacity_ , wc . data ( ) ) ; <nl> - GLOO_ENFORCE_GE ( nwc , 0 ) ; <nl> - for ( int i = 0 ; i < nwc ; i + + ) { <nl> - if ( wc [ i ] . status ! = IBV_WC_SUCCESS ) { <nl> - continue ; <nl> - } <nl> - <nl> - auto h = reinterpret_cast < Handler * > ( wc [ i ] . wr_id ) ; <nl> - h - > handleCompletion ( & wc [ i ] ) ; <nl> - } <nl> + / / Completion queue context is a Pair * . <nl> + / / Delegate handling of this event to the pair itself . <nl> + Pair * pair = static_cast < Pair * > ( cqContext ) ; <nl> + pair - > handleCompletions ( ) ; <nl> } <nl> } <nl> } / / namespace ibverbs <nl> mmm a / gloo / transport / ibverbs / device . h <nl> ppp b / gloo / transport / ibverbs / device . h <nl> std : : shared_ptr < : : gloo : : transport : : Device > CreateDevice ( <nl> class Pair ; <nl> class Buffer ; <nl> <nl> - / / Pure virtual base class for Pair / Buffer . <nl> - / / Used to dispatch completion handling from device loop . <nl> - class Handler { <nl> - public : <nl> - virtual ~ Handler ( ) { } <nl> - virtual void handleCompletion ( struct ibv_wc * wc ) = 0 ; <nl> - } ; <nl> - <nl> class Device : public : : gloo : : transport : : Device , <nl> public std : : enable_shared_from_this < Device > { <nl> static const int capacity_ = 64 ; <nl> class Device : public : : gloo : : transport : : Device , <nl> ibv_context * context_ ; <nl> ibv_pd * pd_ ; <nl> ibv_comp_channel * comp_channel_ ; <nl> - ibv_cq * cq_ ; <nl> <nl> void loop ( ) ; <nl> <nl> new file mode 100644 <nl> index 000000000000 . . d490d23883c5 <nl> mmm / dev / null <nl> ppp b / gloo / transport / ibverbs / memory_region . cc <nl> <nl> + / * * <nl> + * Copyright ( c ) 2017 - present , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed under the BSD - style license found in the <nl> + * LICENSE file in the root directory of this source tree . An additional grant <nl> + * of patent rights can be found in the PATENTS file in the same directory . <nl> + * / <nl> + <nl> + # include " gloo / transport / ibverbs / memory_region . h " <nl> + <nl> + # include < string . h > <nl> + <nl> + # include " gloo / common / logging . h " <nl> + <nl> + namespace gloo { <nl> + namespace transport { <nl> + namespace ibverbs { <nl> + <nl> + MemoryRegion : : MemoryRegion ( struct ibv_pd * pd ) { <nl> + memset ( & src_ , 0 , sizeof ( src_ ) ) ; <nl> + <nl> + / / Map this region so it can be used as source for a send , or as a <nl> + / / target for a receive . <nl> + mr_ = ibv_reg_mr ( pd , & src_ , sizeof ( src_ ) , IBV_ACCESS_LOCAL_WRITE ) ; <nl> + } <nl> + <nl> + MemoryRegion : : MemoryRegion ( struct ibv_pd * pd , struct ibv_mr * src ) <nl> + : MemoryRegion ( pd ) { <nl> + memcpy ( & src_ , src , sizeof ( src_ ) ) ; <nl> + } <nl> + <nl> + MemoryRegion : : ~ MemoryRegion ( ) { <nl> + int rv ; <nl> + <nl> + rv = ibv_dereg_mr ( mr_ ) ; <nl> + GLOO_ENFORCE_NE ( rv , - 1 ) ; <nl> + } <nl> + <nl> + } / / namespace ibverbs <nl> + } / / namespace transport <nl> + } / / namespace gloo <nl> new file mode 100644 <nl> index 000000000000 . . df9f79ed9902 <nl> mmm / dev / null <nl> ppp b / gloo / transport / ibverbs / memory_region . h <nl> <nl> + / * * <nl> + * Copyright ( c ) 2017 - present , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed under the BSD - style license found in the <nl> + * LICENSE file in the root directory of this source tree . An additional grant <nl> + * of patent rights can be found in the PATENTS file in the same directory . <nl> + * / <nl> + <nl> + # pragma once <nl> + <nl> + # include " gloo / transport / ibverbs / device . h " <nl> + <nl> + namespace gloo { <nl> + namespace transport { <nl> + namespace ibverbs { <nl> + <nl> + / / MemoryRegion is used to send local ibv_mr to remote side of pair . <nl> + / / Every pair has one instance per slot to receive ibv_mr ' s . <nl> + / / For every receive buffer created on this pair , another instance <nl> + / / is created to the ibv_mr of that buffer can be sent to its peer . <nl> + class MemoryRegion { <nl> + public : <nl> + explicit MemoryRegion ( struct ibv_pd * ) ; <nl> + explicit MemoryRegion ( struct ibv_pd * , struct ibv_mr * ) ; <nl> + MemoryRegion ( const MemoryRegion & that ) = delete ; <nl> + MemoryRegion & operator = ( const MemoryRegion & that ) = delete ; <nl> + ~ MemoryRegion ( ) ; <nl> + <nl> + / / Construct and return scatter / gather element for this memory region . <nl> + struct ibv_sge sge ( ) const { <nl> + struct ibv_sge list ; <nl> + list . addr = ( uint64_t ) & src_ ; <nl> + list . length = sizeof ( src_ ) ; <nl> + list . lkey = mr_ - > lkey ; <nl> + return list ; <nl> + } <nl> + <nl> + struct ibv_mr mr ( ) const { <nl> + return src_ ; <nl> + } <nl> + <nl> + protected : <nl> + / / The ibv_mr that is read from or written to . <nl> + struct ibv_mr src_ ; <nl> + <nl> + / / The ibv_mr to hold the registration of src_ . <nl> + struct ibv_mr * mr_ ; <nl> + } ; <nl> + <nl> + } / / namespace ibverbs <nl> + } / / namespace transport <nl> + } / / namespace gloo <nl> mmm a / gloo / transport / ibverbs / pair . cc <nl> ppp b / gloo / transport / ibverbs / pair . cc <nl> <nl> # include < stdlib . h > <nl> # include < string . h > <nl> <nl> + # include " gloo / common / common . h " <nl> # include " gloo / common / logging . h " <nl> <nl> namespace gloo { <nl> namespace transport { <nl> namespace ibverbs { <nl> <nl> Pair : : Pair ( const std : : shared_ptr < Device > & dev ) <nl> - : dev_ ( dev ) , peer_memory_regions_ready_ ( 0 ) { <nl> + : dev_ ( dev ) , <nl> + peerMemoryRegionsReady_ ( 0 ) { <nl> int rv ; <nl> <nl> - memset ( peer_memory_regions_ . data ( ) , 0 , sizeof ( peer_memory_regions_ ) ) ; <nl> - memset ( completion_handlers_ . data ( ) , 0 , sizeof ( completion_handlers_ ) ) ; <nl> + memset ( peerMemoryRegions_ . data ( ) , 0 , sizeof ( peerMemoryRegions_ ) ) ; <nl> + memset ( sendCompletionHandlers_ . data ( ) , 0 , sizeof ( sendCompletionHandlers_ ) ) ; <nl> + memset ( recvCompletionHandlers_ . data ( ) , 0 , sizeof ( recvCompletionHandlers_ ) ) ; <nl> + <nl> + / / Create completion queue <nl> + { <nl> + / / Have to register this completion queue with the device ' s <nl> + / / completion channel to support asynchronous completion handling . <nl> + / / Pairs use asynchronous completion handling by default so <nl> + / / we call ibv_req_notify_cq ( 3 ) to request the first notification . <nl> + cq_ = ibv_create_cq ( <nl> + dev_ - > context_ , <nl> + kCompletionQueueCapacity , <nl> + this , <nl> + dev_ - > comp_channel_ , <nl> + 0 ) ; <nl> + GLOO_ENFORCE ( cq_ ) ; <nl> + <nl> + / / Arm notification mechanism for completion queue . <nl> + rv = ibv_req_notify_cq ( cq_ , kNotifyOnAnyCompletion ) ; <nl> + GLOO_ENFORCE_NE ( rv , - 1 ) ; <nl> + } <nl> <nl> / / Create queue pair <nl> { <nl> struct ibv_qp_init_attr attr ; <nl> memset ( & attr , 0 , sizeof ( struct ibv_qp_init_attr ) ) ; <nl> - attr . send_cq = dev_ - > cq_ ; <nl> - attr . recv_cq = dev_ - > cq_ ; <nl> - attr . cap . max_send_wr = Pair : : MAX_BUFFERS ; <nl> - attr . cap . max_recv_wr = Pair : : MAX_BUFFERS ; <nl> + attr . send_cq = cq_ ; <nl> + attr . recv_cq = cq_ ; <nl> + attr . cap . max_send_wr = Pair : : kMaxBuffers ; <nl> + attr . cap . max_recv_wr = Pair : : kMaxBuffers ; <nl> attr . cap . max_send_sge = 1 ; <nl> attr . cap . max_recv_sge = 1 ; <nl> attr . qp_type = IBV_QPT_RC ; <nl> Pair : : Pair ( const std : : shared_ptr < Device > & dev ) <nl> / / The remote side of this pair will call the ' recv ' function to <nl> / / register a receive buffer . The memory region will be registered <nl> / / and the identifier sent to this side of the pair . <nl> - for ( int i = 0 ; i < MAX_BUFFERS ; + + i ) { <nl> + for ( int i = 0 ; i < kMaxBuffers ; + + i ) { <nl> receiveMemoryRegion ( ) ; <nl> } <nl> } <nl> Pair : : ~ Pair ( ) { <nl> <nl> rv = ibv_destroy_qp ( qp_ ) ; <nl> GLOO_ENFORCE_NE ( rv , - 1 ) ; <nl> + <nl> + rv = ibv_destroy_cq ( cq_ ) ; <nl> + GLOO_ENFORCE_NE ( rv , - 1 ) ; <nl> } <nl> <nl> const Address & Pair : : address ( ) const { <nl> void Pair : : setSync ( bool / * sync * / , bool / * busyPoll * / ) { <nl> } <nl> <nl> void Pair : : receiveMemoryRegion ( ) { <nl> - struct ibv_mr * mr = new struct ibv_mr ; <nl> - struct ibv_mr * init ; <nl> - int rv ; <nl> - <nl> - / / Keep list of ibv_mr ' s that the other side of this pair can write <nl> - / / into . They are written in a FIFO order so the handler can always <nl> - / / pop off the first entry upon receiving a write . <nl> - tmp_memory_regions_ . push_back ( mr ) ; <nl> - <nl> - / / Map the memory region struct itself so the other side of this <nl> - / / pair can write into it . <nl> - init = ibv_reg_mr ( dev_ - > pd_ , mr , sizeof ( * mr ) , IBV_ACCESS_LOCAL_WRITE ) ; <nl> - mapped_recv_regions_ . push_back ( init ) ; <nl> - <nl> - struct ibv_sge list ; <nl> - list . addr = ( uint64_t ) mr ; <nl> - list . length = sizeof ( * mr ) ; <nl> - list . lkey = init - > lkey ; <nl> - <nl> + auto mr = make_unique < MemoryRegion > ( dev_ - > pd_ ) ; <nl> + struct ibv_sge list = mr - > sge ( ) ; <nl> struct ibv_recv_wr wr ; <nl> memset ( & wr , 0 , sizeof ( wr ) ) ; <nl> - wr . wr_id = ( uint64_t ) ( ( Handler * ) this ) ; <nl> wr . sg_list = & list ; <nl> wr . num_sge = 1 ; <nl> <nl> / / The work request is serialized and sent to the driver so it <nl> - / / doesn ' t need to live beyond the ibv_post_recv call ( and be kept <nl> - / / on the stack ) . <nl> + / / doesn ' t need to be valid after the ibv_post_recv call . <nl> struct ibv_recv_wr * bad_wr ; <nl> - rv = ibv_post_recv ( qp_ , & wr , & bad_wr ) ; <nl> + int rv = ibv_post_recv ( qp_ , & wr , & bad_wr ) ; <nl> GLOO_ENFORCE_NE ( rv , - 1 ) ; <nl> - } <nl> - <nl> - void Pair : : sendMemoryRegion ( Handler * h , struct ibv_mr * mr , int slot ) { <nl> - struct ibv_mr * init ; <nl> - int rv ; <nl> - <nl> - / / Register completion handler for this memory region . <nl> - completion_handlers_ [ slot ] = h ; <nl> - <nl> - / / First post receive work request to avoid racing with <nl> - / / a send to this region from the other side of this pair . <nl> - postReceive ( ) ; <nl> <nl> - / / Map the memory region struct itself so it can be sent to <nl> - / / the other side of this pair . <nl> - init = <nl> - ibv_reg_mr ( dev_ - > pd_ , mr , sizeof ( struct ibv_mr ) , IBV_ACCESS_LOCAL_WRITE ) ; <nl> - mapped_send_regions_ . push_back ( init ) ; <nl> - <nl> - struct ibv_sge list ; <nl> - list . addr = ( uint64_t ) mr ; <nl> - list . length = sizeof ( struct ibv_mr ) ; <nl> - list . lkey = init - > lkey ; <nl> + / / Keep memory region around so that the other side of this pair can <nl> + / / write into it . They are written in a FIFO order so the handler <nl> + / / can always pop off the first entry upon handling the completion . <nl> + mappedRecvRegions_ . push_back ( std : : move ( mr ) ) ; <nl> + } <nl> <nl> + void Pair : : sendMemoryRegion ( struct ibv_mr * src , int slot ) { <nl> + auto mr = make_unique < MemoryRegion > ( dev_ - > pd_ , src ) ; <nl> + struct ibv_sge list = mr - > sge ( ) ; <nl> struct ibv_send_wr wr ; <nl> memset ( & wr , 0 , sizeof ( wr ) ) ; <nl> - wr . wr_id = ( uint64_t ) ( ( Handler * ) this ) ; <nl> wr . sg_list = & list ; <nl> wr . num_sge = 1 ; <nl> wr . opcode = IBV_WR_SEND_WITH_IMM ; <nl> wr . send_flags = IBV_SEND_SIGNALED ; <nl> wr . imm_data = slot ; <nl> <nl> + / / First post receive work request to avoid racing with <nl> + / / a send to this region from the other side of this pair . <nl> + postReceive ( ) ; <nl> + <nl> / / The work request is serialized and sent to the driver so it <nl> / / doesn ' t need to be valid after the ibv_post_send call . <nl> struct ibv_send_wr * bad_wr ; <nl> - rv = ibv_post_send ( qp_ , & wr , & bad_wr ) ; <nl> + int rv = ibv_post_send ( qp_ , & wr , & bad_wr ) ; <nl> GLOO_ENFORCE_NE ( rv , - 1 ) ; <nl> + <nl> + / / Keep memory region around until this send operation completes . <nl> + / / They are posted in a FIFO order so the handler can always pop off <nl> + / / the first entry upon handling the completion . <nl> + mappedSendRegions_ . push_back ( std : : move ( mr ) ) ; <nl> } <nl> <nl> const struct ibv_mr * Pair : : getMemoryRegion ( int slot ) { <nl> - if ( peer_memory_regions_ready_ . load ( ) & ( 1 < < slot ) ) { <nl> - return peer_memory_regions_ [ slot ] ; <nl> + if ( peerMemoryRegionsReady_ . load ( ) & ( 1 < < slot ) ) { <nl> + return & peerMemoryRegions_ [ slot ] ; <nl> } <nl> <nl> std : : unique_lock < std : : mutex > lock ( m_ ) ; <nl> - while ( peer_memory_regions_ [ slot ] = = nullptr ) { <nl> + while ( peerMemoryRegions_ [ slot ] . addr = = nullptr ) { <nl> cv_ . wait ( lock ) ; <nl> } <nl> - peer_memory_regions_ready_ & = ( 1 < < slot ) ; <nl> - return peer_memory_regions_ [ slot ] ; <nl> + peerMemoryRegionsReady_ & = ( 1 < < slot ) ; <nl> + return & peerMemoryRegions_ [ slot ] ; <nl> } <nl> <nl> void Pair : : postReceive ( ) { <nl> void Pair : : postReceive ( ) { <nl> <nl> struct ibv_recv_wr wr ; <nl> memset ( & wr , 0 , sizeof ( wr ) ) ; <nl> - wr . wr_id = ( uint64_t ) ( ( Handler * ) this ) ; <nl> struct ibv_recv_wr * bad_wr ; <nl> rv = ibv_post_recv ( qp_ , & wr , & bad_wr ) ; <nl> GLOO_ENFORCE_NE ( rv , - 1 ) ; <nl> void Pair : : postReceive ( ) { <nl> std : : unique_ptr < : : gloo : : transport : : Buffer > <nl> Pair : : createSendBuffer ( int slot , void * ptr , size_t size ) { <nl> auto buffer = new Buffer ( this , slot , ptr , size ) ; <nl> + sendCompletionHandlers_ [ slot ] = buffer ; <nl> return std : : unique_ptr < : : gloo : : transport : : Buffer > ( buffer ) ; <nl> } <nl> <nl> std : : unique_ptr < : : gloo : : transport : : Buffer > <nl> Pair : : createRecvBuffer ( int slot , void * ptr , size_t size ) { <nl> auto buffer = new Buffer ( this , slot , ptr , size ) ; <nl> - sendMemoryRegion ( buffer , buffer - > mr_ , buffer - > slot_ ) ; <nl> + recvCompletionHandlers_ [ slot ] = buffer ; <nl> + sendMemoryRegion ( buffer - > mr_ , buffer - > slot_ ) ; <nl> return std : : unique_ptr < : : gloo : : transport : : Buffer > ( buffer ) ; <nl> } <nl> <nl> + / / handleCompletions is called by the device thread when it <nl> + / / received an event for this pair ' s completion queue on its <nl> + / / completion channel . <nl> + void Pair : : handleCompletions ( ) { <nl> + std : : array < struct ibv_wc , kCompletionQueueCapacity > wc ; <nl> + int rv ; <nl> + <nl> + ibv_ack_cq_events ( cq_ , 1 ) ; <nl> + <nl> + / / Arm notification mechanism for completion queue . <nl> + rv = ibv_req_notify_cq ( cq_ , kNotifyOnAnyCompletion ) ; <nl> + GLOO_ENFORCE_NE ( rv , - 1 ) ; <nl> + <nl> + / / Invoke handler for every work completion . <nl> + auto nwc = ibv_poll_cq ( cq_ , wc . size ( ) , wc . data ( ) ) ; <nl> + <nl> + GLOO_ENFORCE_GE ( nwc , 0 ) ; <nl> + for ( int i = 0 ; i < nwc ; i + + ) { <nl> + if ( wc [ i ] . status ! = IBV_WC_SUCCESS ) { <nl> + continue ; <nl> + } <nl> + <nl> + handleCompletion ( & wc [ i ] ) ; <nl> + } <nl> + } <nl> + <nl> void Pair : : handleCompletion ( struct ibv_wc * wc ) { <nl> - if ( wc - > opcode & IBV_WC_RECV ) { <nl> - int slot = wc - > imm_data & MASK_BUFFER_SLOT ; <nl> - if ( wc - > opcode = = IBV_WC_RECV_RDMA_WITH_IMM ) { <nl> - / / Regular RDMA write . <nl> - / / Post new receive work request to backfill for this <nl> - / / completed work request . <nl> - postReceive ( ) ; <nl> - / / Forward to appropriate buffer completion handler . <nl> - completion_handlers_ [ slot ] - > handleCompletion ( wc ) ; <nl> - } else { <nl> - / / Memory region write . <nl> - / / The first receive work request has taken a write <nl> - / / containing the ibv_mr of the other side of the peer . <nl> - struct ibv_mr * mr ; <nl> - int rv ; <nl> - <nl> - / / The buffer trying to write to this slot might be waiting for <nl> - / / the other side of this pair to send its memory region . <nl> - / / Lock access , and notify anybody waiting afterwards . <nl> - { <nl> - std : : lock_guard < std : : mutex > lock ( m_ ) ; <nl> - <nl> - / / Move ibv_mr from memory region ' inbox ' to final slot . <nl> - mr = tmp_memory_regions_ . front ( ) ; <nl> - tmp_memory_regions_ . pop_front ( ) ; <nl> - peer_memory_regions_ [ slot ] = mr ; <nl> - <nl> - / / Deregister mapping for this ibv_mr . <nl> - mr = mapped_recv_regions_ . front ( ) ; <nl> - mapped_recv_regions_ . pop_front ( ) ; <nl> - rv = ibv_dereg_mr ( mr ) ; <nl> - GLOO_ENFORCE_NE ( rv , - 1 ) ; <nl> - } <nl> - <nl> - cv_ . notify_all ( ) ; <nl> + if ( wc - > opcode = = IBV_WC_RECV_RDMA_WITH_IMM ) { <nl> + / / Incoming RDMA write completed . Post new receive work request to <nl> + / / backfill for this completed work request . <nl> + postReceive ( ) ; <nl> + <nl> + / / Slot is encoded in immediate data on receive work completion . <nl> + / / It is set in the Buffer : : send function . <nl> + / / Bits outside of kBufferSlotMask are currently unused . <nl> + int slot = wc - > imm_data & kBufferSlotMask ; <nl> + GLOO_ENFORCE_EQ ( wc - > imm_data & ~ kBufferSlotMask , 0 ) ; <nl> + GLOO_ENFORCE ( recvCompletionHandlers_ [ slot ] ! = nullptr ) ; <nl> + recvCompletionHandlers_ [ slot ] - > handleCompletion ( wc ) ; <nl> + } else if ( wc - > opcode = = IBV_WC_RDMA_WRITE ) { <nl> + / / Outbound RDMA write completed . <nl> + / / Slot is encoded in wr_id fields on send work request . Unlike <nl> + / / the receive work completions , the immediate data field on send <nl> + / / work requests are not pass to the respective work completion . <nl> + int slot = wc - > wr_id & kBufferSlotMask ; <nl> + GLOO_ENFORCE_EQ ( wc - > wr_id & ~ kBufferSlotMask , 0 ) ; <nl> + GLOO_ENFORCE ( sendCompletionHandlers_ [ slot ] ! = nullptr ) ; <nl> + sendCompletionHandlers_ [ slot ] - > handleCompletion ( wc ) ; <nl> + } else if ( wc - > opcode = = IBV_WC_RECV ) { <nl> + / / Memory region recv completed . <nl> + / / <nl> + / / Only used by the remote side of the pair to pass ibv_mr ' s . <nl> + / / They are written to in FIFO order , so we can pick up <nl> + / / and use the first MemoryRegion instance in the list of <nl> + / / mapped receive regions . <nl> + / / <nl> + / / The buffer trying to write to this slot might be waiting for <nl> + / / the other side of this pair to send its memory region . <nl> + / / Lock access , and notify anybody waiting afterwards . <nl> + / / <nl> + { <nl> + std : : lock_guard < std : : mutex > lock ( m_ ) ; <nl> + GLOO_ENFORCE_GT ( mappedRecvRegions_ . size ( ) , 0 ) ; <nl> + auto mr = std : : move ( mappedRecvRegions_ . front ( ) ) ; <nl> + mappedRecvRegions_ . pop_front ( ) ; <nl> + <nl> + / / Slot is encoded in immediate data on receive work completion . <nl> + / / It is set in the Pair : : sendMemoryRegion function . <nl> + / / Bits outside of kBufferSlotMask are currently unused . <nl> + int slot = wc - > imm_data & kBufferSlotMask ; <nl> + GLOO_ENFORCE_EQ ( wc - > imm_data & ~ kBufferSlotMask , 0 ) ; <nl> + <nl> + / / Move ibv_mr from memory region ' inbox ' to final slot . <nl> + peerMemoryRegions_ [ slot ] = mr - > mr ( ) ; <nl> } <nl> + <nl> + / / Notify any buffer waiting for the details of its remote peer . <nl> + cv_ . notify_all ( ) ; <nl> } else if ( wc - > opcode = = IBV_WC_SEND ) { <nl> - / / Nop <nl> + / / Memory region send completed . <nl> + mappedSendRegions_ . pop_front ( ) ; <nl> } else { <nl> GLOO_ENFORCE ( false , " Unexpected completion with opcode : " , wc - > opcode ) ; <nl> } <nl> mmm a / gloo / transport / ibverbs / pair . h <nl> ppp b / gloo / transport / ibverbs / pair . h <nl> <nl> <nl> # include " gloo / transport / ibverbs / address . h " <nl> # include " gloo / transport / ibverbs / device . h " <nl> + # include " gloo / transport / ibverbs / memory_region . h " <nl> # include " gloo / transport / pair . h " <nl> <nl> namespace gloo { <nl> namespace ibverbs { <nl> / / Forward declaration <nl> class Buffer ; <nl> <nl> - class Pair : public : : gloo : : transport : : Pair , public Handler { <nl> - static const int MASK_BUFFER_SLOT = 0x7 ; <nl> - static const int MAX_BUFFERS = MASK_BUFFER_SLOT + 1 ; <nl> + class Pair : public : : gloo : : transport : : Pair { <nl> + static constexpr int kBufferSlotMask = 0x7 ; <nl> + static constexpr int kMaxBuffers = kBufferSlotMask + 1 ; <nl> + <nl> + / / Use 3x the maximum number of buffers as the capacity <nl> + / / for entries in this pair ' s completion queue . <nl> + / / <nl> + / / There are a maximum of : <nl> + / / - MAX_BUFFERS posted receive work requests to receive memory <nl> + / / regions from the other side of the pair . <nl> + / / - MAX_BUFFERS posted send work requests to send memory <nl> + / / regions to the other side of the pair . <nl> + / / - MAX_BUFFERS posted receive work requests for RDMA writes <nl> + / / from the other side of the pair . These requests are posted <nl> + / / at the same time a buffer ' s local memory region is sent to <nl> + / / the other side of the pair . <nl> + static constexpr auto kCompletionQueueCapacity = 3 * kMaxBuffers ; <nl> + <nl> + / / The ibv_req_notify ( 3 ) function takes an argument called <nl> + / / ' solicited_only ' which makes it only trigger a notification for <nl> + / / work requests that are flagged as solicited . Every completion <nl> + / / should trigger a notification , so always pass 0 . <nl> + static constexpr auto kNotifyOnAnyCompletion = 0 ; <nl> <nl> public : <nl> explicit Pair ( const std : : shared_ptr < Device > & dev ) ; <nl> class Pair : public : : gloo : : transport : : Pair , public Handler { <nl> virtual std : : unique_ptr < : : gloo : : transport : : Buffer > <nl> createRecvBuffer ( int slot , void * ptr , size_t size ) override ; <nl> <nl> - virtual void handleCompletion ( struct ibv_wc * wc ) override ; <nl> + void handleCompletions ( ) ; <nl> + <nl> + void handleCompletion ( struct ibv_wc * wc ) ; <nl> <nl> protected : <nl> std : : shared_ptr < Device > dev_ ; <nl> class Pair : public : : gloo : : transport : : Pair , public Handler { <nl> Address self_ ; <nl> Address peer_ ; <nl> <nl> - struct ibv_cq * write_cq_ ; <nl> + struct ibv_cq * cq_ ; <nl> struct ibv_qp * qp_ ; <nl> <nl> std : : mutex m_ ; <nl> std : : condition_variable cv_ ; <nl> <nl> - / / For the remote peer to write their ibv_mr ' s into . <nl> - / / After writing , this pair ' s handler pops off the <nl> - / / first element and places it in peer_memory_regions_ <nl> - / / according to the slot specified in the immediate <nl> - / / part of the write . <nl> - std : : list < struct ibv_mr * > tmp_memory_regions_ ; <nl> - <nl> / / For us to copy the remote peer ' s ibv_mr into . <nl> / / Use an array instead of container so that the Buffer <nl> / / class can use it without holding a lock . <nl> - std : : array < struct ibv_mr * , MAX_BUFFERS > peer_memory_regions_ ; <nl> - std : : atomic < uint64_t > peer_memory_regions_ready_ ; <nl> - <nl> - / / Keep track of ibv_mr ' s for peer_memory_regions , <nl> - / / so they can be dereg ' d when done . <nl> - std : : list < struct ibv_mr * > mapped_recv_regions_ ; <nl> - std : : list < struct ibv_mr * > mapped_send_regions_ ; <nl> - <nl> - / / When we receive an RDMA write we need to dispatch <nl> - / / the completion to the right handler ( buffer ) . <nl> - std : : array < Handler * , MAX_BUFFERS > completion_handlers_ ; <nl> + std : : array < struct ibv_mr , kMaxBuffers > peerMemoryRegions_ ; <nl> + std : : atomic < uint64_t > peerMemoryRegionsReady_ ; <nl> + <nl> + / / These lists store memory regions that the remote side of the pair <nl> + / / can send to and that the local side of the pair can send from . <nl> + / / <nl> + / / After receiving a memory region from the remote side of the pair , <nl> + / / the memory region is popped off the front of this list and is <nl> + / / destructed , after storing the remote details in <nl> + / / peerMemoryRegions_ . <nl> + / / <nl> + / / When registering a receive buffer , the local ibv_mr is sent <nl> + / / to the remote side of the pair , and the corresponding MemoryRegion <nl> + / / instance is kept around in the mappedSendRegions_ list until <nl> + / / the send operation complete . <nl> + / / <nl> + std : : list < std : : unique_ptr < MemoryRegion > > mappedSendRegions_ ; <nl> + std : : list < std : : unique_ptr < MemoryRegion > > mappedRecvRegions_ ; <nl> + <nl> + / / Completions on behalf of buffers need to be forwarded to those buffers . <nl> + std : : array < Buffer * , kMaxBuffers > sendCompletionHandlers_ ; <nl> + std : : array < Buffer * , kMaxBuffers > recvCompletionHandlers_ ; <nl> <nl> void receiveMemoryRegion ( ) ; <nl> - void sendMemoryRegion ( Handler * h , struct ibv_mr * mr , int slot ) ; <nl> + void sendMemoryRegion ( struct ibv_mr * mr , int slot ) ; <nl> const struct ibv_mr * getMemoryRegion ( int slot ) ; <nl> <nl> void postReceive ( ) ; <nl> | Refactor ibverbs transport to prepare for sync mode | pytorch/pytorch | fc7f0269808581499571c5db8af87311c943cd4e | 2017-03-02T18:16:38Z |
new file mode 100644 <nl> index 00000000000 . . db42a6abc37 <nl> mmm / dev / null <nl> ppp b / hphp / hack / src / hhbc / hhas_typedef . rs <nl> <nl> + / / Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + / / <nl> + / / This source code is licensed under the MIT license found in the <nl> + / / LICENSE file in the " hack " directory of this source tree . <nl> + <nl> + use hhas_attribute_rust as hhas_attribute ; <nl> + use hhas_type ; <nl> + use hhbc_id_rust as hhas_id ; <nl> + use runtime ; <nl> + <nl> + use hhas_attribute : : HhasAttribute ; <nl> + use runtime : : TypedValue ; <nl> + <nl> + pub struct Typedef < ' a > { <nl> + pub name : hhas_id : : class : : Type < ' a > , <nl> + pub attributes : Vec < HhasAttribute > , <nl> + pub type_info : hhas_type : : Info , <nl> + pub type_structure : TypedValue , <nl> + } <nl> | Port hhas_typedef with generic ID | facebook/hhvm | 5679ed0e615bc48502561c9b9d6c11f880fae01d | 2019-10-21T22:52:06Z |
mmm a / test / cpp / util / test_credentials_provider . cc <nl> ppp b / test / cpp / util / test_credentials_provider . cc <nl> class DefaultCredentialsProvider : public CredentialsProvider { <nl> gpr_once g_once_init_provider = GPR_ONCE_INIT ; <nl> CredentialsProvider * g_provider = nullptr ; <nl> <nl> - void CreateDefaultProvider ( ) { <nl> - g_provider = new DefaultCredentialsProvider ; <nl> - } <nl> + void CreateDefaultProvider ( ) { g_provider = new DefaultCredentialsProvider ; } <nl> <nl> CredentialsProvider * GetProvider ( ) { <nl> gpr_once_init ( & g_once_init_provider , & CreateDefaultProvider ) ; <nl> | Fix clang format issue | grpc/grpc | 9e5a05af8a322991ce4f909ce84a58889b1f151b | 2016-03-01T17:40:12Z |
mmm a / tensorflow / core / grappler / op_types . cc <nl> ppp b / tensorflow / core / grappler / op_types . cc <nl> bool IsFreeOfSideEffect ( const NodeDef & node ) { <nl> bool ModifiesInputsInPlace ( const NodeDef & node ) { <nl> / / Some nodes do in - place updates on regular tensor inputs . <nl> string op_name = node . op ( ) ; <nl> + <nl> + / / Ops that modify resource variables effectively modify one of their inputs . <nl> + if ( op_name = = " AssignVariableOp " | | op_name = = " AssignAddVariableOp " | | <nl> + op_name = = " AssignSubVariableOp " | | op_name = = " ResourceScatterUpdate " | | <nl> + op_name = = " ResourceScatterAdd " | | op_name = = " ResourceScatterSub " | | <nl> + op_name = = " ResourceScatterMul " | | op_name = = " ResourceScatterDiv " | | <nl> + op_name = = " ResourceScatterMin " | | op_name = = " ResourceScatterMax " ) { <nl> + return false ; <nl> + } <nl> + <nl> std : : transform ( op_name . begin ( ) , op_name . end ( ) , op_name . begin ( ) , : : tolower ) ; <nl> if ( str_util : : StrContains ( op_name , " inplace " ) ) { <nl> return true ; <nl> | Add support for resource variables | tensorflow/tensorflow | da92e74fcb28f31c2a4163c58e6e585f561b1c33 | 2018-04-02T22:51:48Z |
mmm a / src / zone / accounting - allocator . cc <nl> ppp b / src / zone / accounting - allocator . cc <nl> void AccountingAllocator : : ReturnSegment ( Segment * segment , <nl> segment - > ZapHeader ( ) ; <nl> if ( COMPRESS_ZONES_BOOL & & supports_compression ) { <nl> CHECK ( FreePages ( bounded_page_allocator_ . get ( ) , segment , segment_size ) ) ; <nl> - <nl> } else { <nl> free ( segment ) ; <nl> } <nl> mmm a / src / zone / zone . h <nl> ppp b / src / zone / zone . h <nl> class V8_EXPORT_PRIVATE Zone final { <nl> / / associated with the T type . <nl> template < typename T , typename . . . Args > <nl> T * New ( Args & & . . . args ) { <nl> - size_t size = RoundUp ( sizeof ( T ) , kAlignmentInBytes ) ; <nl> - void * memory = Allocate < T > ( size ) ; <nl> + void * memory = Allocate < T > ( sizeof ( T ) ) ; <nl> return new ( memory ) T ( std : : forward < Args > ( args ) . . . ) ; <nl> } <nl> <nl> | [ zone ] Remove redundant size roundup in Zone | v8/v8 | f542fdefd5fc9e6f58cc6452dc22b076253a703f | 2020-09-29T10:10:11Z |
mmm a / dbms / include / DB / AggregateFunctions / AggregateFunctionUniq . h <nl> ppp b / dbms / include / DB / AggregateFunctions / AggregateFunctionUniq . h <nl> <nl> # include < DB / Interpreters / AggregationCommon . h > <nl> # include < DB / Common / HashTable / HashSet . h > <nl> # include < DB / Common / HyperLogLogWithSmallSetOptimization . h > <nl> + # include < DB / Common / CombinedCardinalityEstimator . h > <nl> <nl> # include < DB / Columns / ColumnString . h > <nl> <nl> struct AggregateFunctionUniqExactData < String > <nl> } ; <nl> <nl> <nl> + template < typename T > <nl> + struct AggregateFunctionUniqCombinedData <nl> + { <nl> + using Key = T ; <nl> + using Set = CombinedCardinalityEstimator < Key , HashSet < Key , DefaultHash < Key > , HashTableGrower < 4 > > , 16 , 16 , 19 > ; <nl> + Set set ; <nl> + <nl> + static String getName ( ) { return " uniqCombined " ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct AggregateFunctionUniqCombinedData < String > <nl> + { <nl> + using Key = UInt64 ; <nl> + using Set = CombinedCardinalityEstimator < Key , HashSet < Key , DefaultHash < Key > , HashTableGrower < 4 > > , 16 , 16 , 19 > ; <nl> + Set set ; <nl> + <nl> + static String getName ( ) { return " uniqCombined " ; } <nl> + } ; <nl> + <nl> namespace detail <nl> { <nl> / * * Структура для делегации работы по добавлению одного элемента в агрегатные функции uniq . <nl> namespace detail <nl> data . set . insert ( key ) ; <nl> } <nl> } ; <nl> + <nl> + template < typename T > <nl> + struct OneAdder < T , AggregateFunctionUniqCombinedData < T > > <nl> + { <nl> + static void addOne ( AggregateFunctionUniqCombinedData < T > & data , const IColumn & column , size_t row_num ) <nl> + { <nl> + if ( data . set . isMedium ( ) ) <nl> + data . set . insert ( static_cast < const ColumnVector < T > & > ( column ) . getData ( ) [ row_num ] ) ; <nl> + else <nl> + data . set . insert ( AggregateFunctionUniqTraits < T > : : hash ( static_cast < const ColumnVector < T > & > ( column ) . getData ( ) [ row_num ] ) ) ; <nl> + } <nl> + } ; <nl> + <nl> + template < > <nl> + struct OneAdder < String , AggregateFunctionUniqCombinedData < String > > <nl> + { <nl> + static void addOne ( AggregateFunctionUniqCombinedData < String > & data , const IColumn & column , size_t row_num ) <nl> + { <nl> + StringRef value = column . getDataAt ( row_num ) ; <nl> + data . set . insert ( CityHash64 ( value . data , value . size ) ) ; <nl> + } <nl> + } ; <nl> } <nl> <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 3feca851b09 <nl> mmm / dev / null <nl> ppp b / dbms / include / DB / Common / CombinedCardinalityEstimator . h <nl> <nl> + # pragma once <nl> + <nl> + # include < DB / Common / HashTable / SmallTable . h > <nl> + # include < DB / Common / HashTable / HashSet . h > <nl> + # include < DB / Common / HyperLogLogWithSmallSetOptimization . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + template < typename Key , typename HashType , UInt8 small_set_size , UInt8 medium_set_power , UInt8 K > <nl> + class CombinedCardinalityEstimator <nl> + { <nl> + public : <nl> + using Self = CombinedCardinalityEstimator < Key , HashType , small_set_size , medium_set_power , K > ; <nl> + <nl> + private : <nl> + using Small = SmallSet < Key , small_set_size > ; <nl> + using Medium = HashType ; <nl> + using Large = HyperLogLogWithSmallSetOptimization < Key , small_set_size , K > ; <nl> + enum class ContainerType { SMALL , MEDIUM , LARGE } ; <nl> + <nl> + public : <nl> + ~ CombinedCardinalityEstimator ( ) <nl> + { <nl> + if ( container_type = = ContainerType : : MEDIUM ) <nl> + { <nl> + delete medium ; <nl> + <nl> + if ( current_memory_tracker ) <nl> + current_memory_tracker - > free ( sizeof ( medium ) ) ; <nl> + } <nl> + else if ( container_type = = ContainerType : : LARGE ) <nl> + { <nl> + delete large ; <nl> + <nl> + if ( current_memory_tracker ) <nl> + current_memory_tracker - > free ( sizeof ( large ) ) ; <nl> + } <nl> + } <nl> + <nl> + void insert ( Key value ) <nl> + { <nl> + if ( container_type = = ContainerType : : SMALL ) <nl> + { <nl> + if ( small . find ( value ) = = small . end ( ) ) <nl> + { <nl> + if ( ! small . full ( ) ) <nl> + small . insert ( value ) ; <nl> + else <nl> + { <nl> + toMedium ( ) ; <nl> + medium - > insert ( value ) ; <nl> + } <nl> + } <nl> + } <nl> + else if ( container_type = = ContainerType : : MEDIUM ) <nl> + { <nl> + if ( medium - > size ( ) < medium_set_size ) <nl> + medium - > insert ( value ) ; <nl> + else <nl> + { <nl> + toLarge ( ) ; <nl> + large - > insert ( value ) ; <nl> + } <nl> + } <nl> + else if ( container_type = = ContainerType : : LARGE ) <nl> + large - > insert ( value ) ; <nl> + } <nl> + <nl> + UInt32 size ( ) const <nl> + { <nl> + if ( container_type = = ContainerType : : SMALL ) <nl> + return small . size ( ) ; <nl> + else if ( container_type = = ContainerType : : MEDIUM ) <nl> + return medium - > size ( ) ; <nl> + else if ( container_type = = ContainerType : : LARGE ) <nl> + return large - > size ( ) ; <nl> + <nl> + return 0 ; <nl> + } <nl> + <nl> + void merge ( const Self & rhs ) <nl> + { <nl> + ContainerType res = max ( container_type , rhs . container_type ) ; <nl> + <nl> + if ( container_type ! = res ) <nl> + { <nl> + if ( res = = ContainerType : : MEDIUM ) <nl> + toMedium ( ) ; <nl> + else if ( res = = ContainerType : : LARGE ) <nl> + toLarge ( ) ; <nl> + } <nl> + <nl> + if ( container_type = = ContainerType : : SMALL ) <nl> + { <nl> + for ( const auto & x : rhs . small ) <nl> + insert ( x ) ; <nl> + } <nl> + else if ( container_type = = ContainerType : : MEDIUM ) <nl> + { <nl> + if ( rhs . container_type = = ContainerType : : SMALL ) <nl> + { <nl> + for ( const auto & x : rhs . small ) <nl> + insert ( x ) ; <nl> + } <nl> + else if ( rhs . container_type = = ContainerType : : MEDIUM ) <nl> + { <nl> + for ( const auto & x : * rhs . medium ) <nl> + insert ( x ) ; <nl> + } <nl> + } <nl> + else if ( container_type = = ContainerType : : LARGE ) <nl> + { <nl> + if ( rhs . container_type = = ContainerType : : SMALL ) <nl> + { <nl> + for ( const auto & x : rhs . small ) <nl> + insert ( x ) ; <nl> + } <nl> + else if ( rhs . container_type = = ContainerType : : MEDIUM ) <nl> + { <nl> + for ( const auto & x : * rhs . medium ) <nl> + insert ( x ) ; <nl> + } <nl> + else if ( rhs . container_type = = ContainerType : : LARGE ) <nl> + large - > merge ( * rhs . large ) ; <nl> + } <nl> + } <nl> + <nl> + void read ( DB : : ReadBuffer & in ) <nl> + { <nl> + UInt8 v ; <nl> + readBinary ( v , in ) ; <nl> + ContainerType t = static_cast < ContainerType > ( v ) ; <nl> + <nl> + if ( t = = ContainerType : : SMALL ) <nl> + small . read ( in ) ; <nl> + else if ( t = = ContainerType : : MEDIUM ) <nl> + { <nl> + toMedium ( ) ; <nl> + medium - > read ( in ) ; <nl> + } <nl> + else if ( t = = ContainerType : : LARGE ) <nl> + { <nl> + toLarge ( ) ; <nl> + large - > read ( in ) ; <nl> + } <nl> + } <nl> + <nl> + void readAndMerge ( DB : : ReadBuffer & in ) <nl> + { <nl> + Self other ; <nl> + other . read ( in ) ; <nl> + merge ( other ) ; <nl> + } <nl> + <nl> + void write ( DB : : WriteBuffer & out ) const <nl> + { <nl> + UInt8 v = static_cast < unsigned int > ( container_type ) ; <nl> + writeBinary ( v , out ) ; <nl> + <nl> + if ( container_type = = ContainerType : : SMALL ) <nl> + small . write ( out ) ; <nl> + else if ( container_type = = ContainerType : : MEDIUM ) <nl> + medium - > write ( out ) ; <nl> + else if ( container_type = = ContainerType : : LARGE ) <nl> + large - > write ( out ) ; <nl> + } <nl> + <nl> + bool isMedium ( ) const <nl> + { <nl> + return container_type = = ContainerType : : MEDIUM ; <nl> + } <nl> + <nl> + private : <nl> + void toMedium ( ) <nl> + { <nl> + if ( current_memory_tracker ) <nl> + current_memory_tracker - > alloc ( sizeof ( medium ) ) ; <nl> + <nl> + Medium * tmp_medium = new Medium ; <nl> + <nl> + for ( const auto & x : small ) <nl> + tmp_medium - > insert ( x ) ; <nl> + <nl> + medium = tmp_medium ; <nl> + <nl> + container_type = ContainerType : : MEDIUM ; <nl> + } <nl> + <nl> + void toLarge ( ) <nl> + { <nl> + if ( current_memory_tracker ) <nl> + current_memory_tracker - > alloc ( sizeof ( large ) ) ; <nl> + <nl> + Large * tmp_large = new Large ; <nl> + <nl> + for ( const auto & x : * medium ) <nl> + tmp_large - > insert ( x ) ; <nl> + <nl> + large = tmp_large ; <nl> + <nl> + delete medium ; <nl> + medium = nullptr ; <nl> + <nl> + if ( current_memory_tracker ) <nl> + current_memory_tracker - > free ( sizeof ( medium ) ) ; <nl> + <nl> + container_type = ContainerType : : LARGE ; <nl> + } <nl> + <nl> + ContainerType max ( const ContainerType & lhs , const ContainerType & rhs ) <nl> + { <nl> + unsigned int res = std : : max ( static_cast < unsigned int > ( lhs ) , static_cast < unsigned int > ( rhs ) ) ; <nl> + return static_cast < ContainerType > ( res ) ; <nl> + } <nl> + <nl> + private : <nl> + ContainerType container_type = ContainerType : : SMALL ; <nl> + const UInt32 medium_set_size = 1UL < < medium_set_power ; <nl> + Small small ; <nl> + Medium * medium = nullptr ; <nl> + Large * large = nullptr ; <nl> + } ; <nl> + <nl> + } <nl> mmm a / dbms / include / DB / Common / HashTable / HashTable . h <nl> ppp b / dbms / include / DB / Common / HashTable / HashTable . h <nl> class HashTable : <nl> { <nl> Cell x ; <nl> x . read ( rb ) ; <nl> - insert ( x ) ; <nl> + insert ( Cell : : getKey ( x . getValue ( ) ) ) ; <nl> } <nl> } <nl> <nl> class HashTable : <nl> Cell x ; <nl> DB : : assertString ( " , " , rb ) ; <nl> x . readText ( rb ) ; <nl> - insert ( x ) ; <nl> + insert ( Cell : : getKey ( x . getValue ( ) ) ) ; <nl> } <nl> } <nl> <nl> mmm a / dbms / src / AggregateFunctions / AggregateFunctionFactory . cpp <nl> ppp b / dbms / src / AggregateFunctions / AggregateFunctionFactory . cpp <nl> AggregateFunctionPtr AggregateFunctionFactory : : get ( const String & name , const Da <nl> else <nl> throw Exception ( " Illegal type " + argument_types [ 0 ] - > getName ( ) + " of argument for aggregate function " + name , ErrorCodes : : ILLEGAL_TYPE_OF_ARGUMENT ) ; <nl> } <nl> + else if ( name = = " uniqCombined " ) <nl> + { <nl> + if ( argument_types . size ( ) ! = 1 ) <nl> + throw Exception ( " Incorrect number of arguments for aggregate function " + name , ErrorCodes : : NUMBER_OF_ARGUMENTS_DOESNT_MATCH ) ; <nl> + <nl> + const IDataType & argument_type = * argument_types [ 0 ] ; <nl> + <nl> + AggregateFunctionPtr res = createWithNumericType < AggregateFunctionUniq , AggregateFunctionUniqCombinedData > ( * argument_types [ 0 ] ) ; <nl> + <nl> + if ( res ) <nl> + return res ; <nl> + else if ( typeid_cast < const DataTypeDate * > ( & argument_type ) ) <nl> + return new AggregateFunctionUniq < DataTypeDate : : FieldType , AggregateFunctionUniqCombinedData < DataTypeDate : : FieldType > > ; <nl> + else if ( typeid_cast < const DataTypeDateTime * > ( & argument_type ) ) <nl> + return new AggregateFunctionUniq < DataTypeDateTime : : FieldType , AggregateFunctionUniqCombinedData < DataTypeDateTime : : FieldType > > ; <nl> + else if ( typeid_cast < const DataTypeString * > ( & argument_type ) | | typeid_cast < const DataTypeFixedString * > ( & argument_type ) ) <nl> + return new AggregateFunctionUniq < String , AggregateFunctionUniqCombinedData < String > > ; <nl> + else <nl> + throw Exception ( " Illegal type " + argument_types [ 0 ] - > getName ( ) + " of argument for aggregate function " + name , ErrorCodes : : ILLEGAL_TYPE_OF_ARGUMENT ) ; <nl> + } <nl> else if ( name = = " uniqUpTo " ) <nl> { <nl> if ( argument_types . size ( ) ! = 1 ) <nl> const AggregateFunctionFactory : : FunctionNames & AggregateFunctionFactory : : getFun <nl> " uniq " , <nl> " uniqHLL12 " , <nl> " uniqExact " , <nl> + " uniqCombined " , <nl> " uniqUpTo " , <nl> " groupArray " , <nl> " groupUniqArray " , <nl> | dbms : Server : Feature development . [ # METR - 17276 ] | ClickHouse/ClickHouse | ef92ac4f3d7f1f9444c95369ba51f82f3aedd4b0 | 2015-07-20T14:22:08Z |
mmm a / examples / directx11_example / imgui_impl_dx11 . cpp <nl> ppp b / examples / directx11_example / imgui_impl_dx11 . cpp <nl> static HWND g_hWnd = 0 ; <nl> static ID3D11Device * g_pd3dDevice = NULL ; <nl> static ID3D11DeviceContext * g_pd3dDeviceContext = NULL ; <nl> static ID3D11Buffer * g_pVB = NULL ; <nl> + static ID3D11Buffer * g_pIB = NULL ; <nl> static ID3D10Blob * g_pVertexShaderBlob = NULL ; <nl> static ID3D11VertexShader * g_pVertexShader = NULL ; <nl> static ID3D11InputLayout * g_pInputLayout = NULL ; <nl> static ID3D11PixelShader * g_pPixelShader = NULL ; <nl> static ID3D11SamplerState * g_pFontSampler = NULL ; <nl> static ID3D11ShaderResourceView * g_pFontTextureView = NULL ; <nl> static ID3D11BlendState * g_blendState = NULL ; <nl> - static int VERTEX_BUFFER_SIZE = 30000 ; / / TODO : Make vertex buffer smaller and grow dynamically as needed . <nl> + static int VERTEX_BUFFER_SIZE = 30000 ; / / TODO : Make buffers smaller and grow dynamically as needed . <nl> + static int INDEX_BUFFER_SIZE = 30000 ; / / TODO : Make buffers smaller and grow dynamically as needed . <nl> <nl> struct CUSTOMVERTEX <nl> { <nl> struct VERTEX_CONSTANT_BUFFER <nl> static void ImGui_ImplDX11_RenderDrawLists ( ImDrawList * * const cmd_lists , int cmd_lists_count ) <nl> { <nl> / / Copy and convert all vertices into a single contiguous buffer <nl> - D3D11_MAPPED_SUBRESOURCE mappedResource ; <nl> - if ( g_pd3dDeviceContext - > Map ( g_pVB , 0 , D3D11_MAP_WRITE_DISCARD , 0 , & mappedResource ) ! = S_OK ) <nl> + D3D11_MAPPED_SUBRESOURCE vtx_resource , idx_resource ; <nl> + if ( g_pd3dDeviceContext - > Map ( g_pVB , 0 , D3D11_MAP_WRITE_DISCARD , 0 , & vtx_resource ) ! = S_OK ) <nl> return ; <nl> - CUSTOMVERTEX * vtx_dst = ( CUSTOMVERTEX * ) mappedResource . pData ; <nl> + if ( g_pd3dDeviceContext - > Map ( g_pIB , 0 , D3D11_MAP_WRITE_DISCARD , 0 , & idx_resource ) ! = S_OK ) <nl> + return ; <nl> + CUSTOMVERTEX * vtx_dst = ( CUSTOMVERTEX * ) vtx_resource . pData ; <nl> + ImDrawIdx * idx_dst = ( ImDrawIdx * ) idx_resource . pData ; <nl> for ( int n = 0 ; n < cmd_lists_count ; n + + ) <nl> { <nl> const ImDrawList * cmd_list = cmd_lists [ n ] ; <nl> static void ImGui_ImplDX11_RenderDrawLists ( ImDrawList * * const cmd_lists , int cmd <nl> vtx_dst + + ; <nl> vtx_src + + ; <nl> } <nl> + memcpy ( idx_dst , & cmd_list - > idx_buffer [ 0 ] , cmd_list - > idx_buffer . size ( ) * sizeof ( ImDrawIdx ) ) ; <nl> + idx_dst + = cmd_list - > idx_buffer . size ( ) ; <nl> } <nl> g_pd3dDeviceContext - > Unmap ( g_pVB , 0 ) ; <nl> + g_pd3dDeviceContext - > Unmap ( g_pIB , 0 ) ; <nl> <nl> / / Setup orthographic projection matrix into our constant buffer <nl> { <nl> static void ImGui_ImplDX11_RenderDrawLists ( ImDrawList * * const cmd_lists , int cmd <nl> unsigned int offset = 0 ; <nl> g_pd3dDeviceContext - > IASetInputLayout ( g_pInputLayout ) ; <nl> g_pd3dDeviceContext - > IASetVertexBuffers ( 0 , 1 , & g_pVB , & stride , & offset ) ; <nl> + g_pd3dDeviceContext - > IASetIndexBuffer ( g_pIB , DXGI_FORMAT_R16_UINT , 0 ) ; <nl> g_pd3dDeviceContext - > IASetPrimitiveTopology ( D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST ) ; <nl> g_pd3dDeviceContext - > VSSetShader ( g_pVertexShader , NULL , 0 ) ; <nl> g_pd3dDeviceContext - > VSSetConstantBuffers ( 0 , 1 , & g_pVertexConstantBuffer ) ; <nl> static void ImGui_ImplDX11_RenderDrawLists ( ImDrawList * * const cmd_lists , int cmd <nl> <nl> / / Render command lists <nl> int vtx_offset = 0 ; <nl> + int idx_offset = 0 ; <nl> for ( int n = 0 ; n < cmd_lists_count ; n + + ) <nl> { <nl> const ImDrawList * cmd_list = cmd_lists [ n ] ; <nl> static void ImGui_ImplDX11_RenderDrawLists ( ImDrawList * * const cmd_lists , int cmd <nl> const D3D11_RECT r = { ( LONG ) pcmd - > clip_rect . x , ( LONG ) pcmd - > clip_rect . y , ( LONG ) pcmd - > clip_rect . z , ( LONG ) pcmd - > clip_rect . w } ; <nl> g_pd3dDeviceContext - > PSSetShaderResources ( 0 , 1 , ( ID3D11ShaderResourceView * * ) & pcmd - > texture_id ) ; <nl> g_pd3dDeviceContext - > RSSetScissorRects ( 1 , & r ) ; <nl> - g_pd3dDeviceContext - > Draw ( pcmd - > vtx_count , vtx_offset ) ; <nl> + g_pd3dDeviceContext - > DrawIndexed ( pcmd - > idx_count , idx_offset , vtx_offset ) ; <nl> } <nl> - vtx_offset + = pcmd - > vtx_count ; <nl> + idx_offset + = pcmd - > idx_count ; <nl> } <nl> + vtx_offset + = cmd_list - > vtx_buffer . size ( ) ; <nl> } <nl> <nl> / / Restore modified state <nl> bool ImGui_ImplDX11_CreateDeviceObjects ( ) <nl> bufferDesc . ByteWidth = VERTEX_BUFFER_SIZE * sizeof ( CUSTOMVERTEX ) ; <nl> bufferDesc . BindFlags = D3D11_BIND_VERTEX_BUFFER ; <nl> bufferDesc . CPUAccessFlags = D3D11_CPU_ACCESS_WRITE ; <nl> - bufferDesc . MiscFlags = 0 ; <nl> if ( g_pd3dDevice - > CreateBuffer ( & bufferDesc , NULL , & g_pVB ) < 0 ) <nl> return false ; <nl> } <nl> <nl> + / / Create the index buffer <nl> + { <nl> + D3D11_BUFFER_DESC bufferDesc ; <nl> + memset ( & bufferDesc , 0 , sizeof ( D3D11_BUFFER_DESC ) ) ; <nl> + bufferDesc . Usage = D3D11_USAGE_DYNAMIC ; <nl> + bufferDesc . ByteWidth = INDEX_BUFFER_SIZE * sizeof ( ImDrawIdx ) ; <nl> + bufferDesc . BindFlags = D3D11_BIND_INDEX_BUFFER ; <nl> + bufferDesc . CPUAccessFlags = D3D11_CPU_ACCESS_WRITE ; <nl> + if ( g_pd3dDevice - > CreateBuffer ( & bufferDesc , NULL , & g_pIB ) < 0 ) <nl> + return false ; <nl> + } <nl> + <nl> ImGui_ImplDX11_CreateFontsTexture ( ) ; <nl> <nl> return true ; <nl> void ImGui_ImplDX11_InvalidateDeviceObjects ( ) <nl> <nl> if ( g_pFontSampler ) { g_pFontSampler - > Release ( ) ; g_pFontSampler = NULL ; } <nl> if ( g_pFontTextureView ) { g_pFontTextureView - > Release ( ) ; ImGui : : GetIO ( ) . Fonts - > TexID = 0 ; } <nl> + if ( g_pIB ) { g_pIB - > Release ( ) ; g_pIB = NULL ; } <nl> if ( g_pVB ) { g_pVB - > Release ( ) ; g_pVB = NULL ; } <nl> <nl> if ( g_blendState ) { g_blendState - > Release ( ) ; g_blendState = NULL ; } <nl> mmm a / examples / directx9_example / imgui_impl_dx9 . cpp <nl> ppp b / examples / directx9_example / imgui_impl_dx9 . cpp <nl> static INT64 g_Time = 0 ; <nl> static INT64 g_TicksPerSecond = 0 ; <nl> static LPDIRECT3DDEVICE9 g_pd3dDevice = NULL ; <nl> static LPDIRECT3DVERTEXBUFFER9 g_pVB = NULL ; <nl> - static int VERTEX_BUFFER_SIZE = 30000 ; / / TODO : Make vertex buffer smaller and grow dynamically as needed . <nl> + static LPDIRECT3DINDEXBUFFER9 g_pIB = NULL ; <nl> + static int VERTEX_BUFFER_SIZE = 30000 ; / / TODO : Make buffers smaller and grow dynamically as needed . <nl> + static int INDEX_BUFFER_SIZE = 30000 ; / / TODO : Make buffers smaller and grow dynamically as needed . <nl> <nl> struct CUSTOMVERTEX <nl> { <nl> struct CUSTOMVERTEX <nl> static void ImGui_ImplDX9_RenderDrawLists ( ImDrawList * * const cmd_lists , int cmd_lists_count ) <nl> { <nl> size_t total_vtx_count = 0 ; <nl> + size_t total_idx_count = 0 ; <nl> for ( int n = 0 ; n < cmd_lists_count ; n + + ) <nl> + { <nl> total_vtx_count + = cmd_lists [ n ] - > vtx_buffer . size ( ) ; <nl> + total_idx_count + = cmd_lists [ n ] - > idx_buffer . size ( ) ; <nl> + } <nl> if ( total_vtx_count = = 0 ) <nl> return ; <nl> <nl> / / Copy and convert all vertices into a single contiguous buffer <nl> CUSTOMVERTEX * vtx_dst ; <nl> + ImDrawIdx * idx_dst ; <nl> if ( g_pVB - > Lock ( 0 , ( UINT ) total_vtx_count , ( void * * ) & vtx_dst , D3DLOCK_DISCARD ) < 0 ) <nl> return ; <nl> + if ( g_pIB - > Lock ( 0 , ( UINT ) total_idx_count , ( void * * ) & idx_dst , D3DLOCK_DISCARD ) < 0 ) <nl> + return ; <nl> for ( int n = 0 ; n < cmd_lists_count ; n + + ) <nl> { <nl> const ImDrawList * cmd_list = cmd_lists [ n ] ; <nl> static void ImGui_ImplDX9_RenderDrawLists ( ImDrawList * * const cmd_lists , int cmd_ <nl> vtx_dst + + ; <nl> vtx_src + + ; <nl> } <nl> + memcpy ( idx_dst , & cmd_list - > idx_buffer [ 0 ] , cmd_list - > idx_buffer . size ( ) * sizeof ( ImDrawIdx ) ) ; <nl> + idx_dst + = cmd_list - > idx_buffer . size ( ) ; <nl> } <nl> g_pVB - > Unlock ( ) ; <nl> + g_pIB - > Unlock ( ) ; <nl> g_pd3dDevice - > SetStreamSource ( 0 , g_pVB , 0 , sizeof ( CUSTOMVERTEX ) ) ; <nl> + g_pd3dDevice - > SetIndices ( g_pIB ) ; <nl> g_pd3dDevice - > SetFVF ( D3DFVF_CUSTOMVERTEX ) ; <nl> <nl> / / Setup render state : fixed - pipeline , alpha - blending , no face culling , no depth testing <nl> static void ImGui_ImplDX9_RenderDrawLists ( ImDrawList * * const cmd_lists , int cmd_ <nl> <nl> / / Render command lists <nl> int vtx_offset = 0 ; <nl> + int idx_offset = 0 ; <nl> for ( int n = 0 ; n < cmd_lists_count ; n + + ) <nl> { <nl> const ImDrawList * cmd_list = cmd_lists [ n ] ; <nl> static void ImGui_ImplDX9_RenderDrawLists ( ImDrawList * * const cmd_lists , int cmd_ <nl> const RECT r = { ( LONG ) pcmd - > clip_rect . x , ( LONG ) pcmd - > clip_rect . y , ( LONG ) pcmd - > clip_rect . z , ( LONG ) pcmd - > clip_rect . w } ; <nl> g_pd3dDevice - > SetTexture ( 0 , ( LPDIRECT3DTEXTURE9 ) pcmd - > texture_id ) ; <nl> g_pd3dDevice - > SetScissorRect ( & r ) ; <nl> - g_pd3dDevice - > DrawPrimitive ( D3DPT_TRIANGLELIST , vtx_offset , pcmd - > vtx_count / 3 ) ; <nl> + g_pd3dDevice - > DrawIndexedPrimitive ( D3DPT_TRIANGLELIST , vtx_offset , 0 , cmd_list - > vtx_buffer . size ( ) , idx_offset , pcmd - > idx_count / 3 ) ; <nl> } <nl> - vtx_offset + = pcmd - > vtx_count ; <nl> + idx_offset + = pcmd - > idx_count ; <nl> } <nl> + vtx_offset + = cmd_list - > vtx_buffer . size ( ) ; <nl> } <nl> } <nl> <nl> bool ImGui_ImplDX9_CreateDeviceObjects ( ) <nl> if ( g_pd3dDevice - > CreateVertexBuffer ( VERTEX_BUFFER_SIZE * sizeof ( CUSTOMVERTEX ) , D3DUSAGE_DYNAMIC | D3DUSAGE_WRITEONLY , D3DFVF_CUSTOMVERTEX , D3DPOOL_DEFAULT , & g_pVB , NULL ) < 0 ) <nl> return false ; <nl> <nl> + if ( g_pd3dDevice - > CreateIndexBuffer ( INDEX_BUFFER_SIZE * sizeof ( ImDrawIdx ) , D3DUSAGE_DYNAMIC | D3DUSAGE_WRITEONLY , D3DFMT_INDEX16 , D3DPOOL_DEFAULT , & g_pIB , NULL ) < 0 ) <nl> + return false ; <nl> + <nl> ImGui_ImplDX9_CreateFontsTexture ( ) ; <nl> return true ; <nl> } <nl> void ImGui_ImplDX9_InvalidateDeviceObjects ( ) <nl> g_pVB - > Release ( ) ; <nl> g_pVB = NULL ; <nl> } <nl> + if ( g_pIB ) <nl> + { <nl> + g_pIB - > Release ( ) ; <nl> + g_pIB = NULL ; <nl> + } <nl> if ( LPDIRECT3DTEXTURE9 tex = ( LPDIRECT3DTEXTURE9 ) ImGui : : GetIO ( ) . Fonts - > TexID ) <nl> { <nl> tex - > Release ( ) ; <nl> mmm a / examples / opengl3_example / imgui_impl_glfw_gl3 . cpp <nl> ppp b / examples / opengl3_example / imgui_impl_glfw_gl3 . cpp <nl> static GLuint g_FontTexture = 0 ; <nl> static int g_ShaderHandle = 0 , g_VertHandle = 0 , g_FragHandle = 0 ; <nl> static int g_AttribLocationTex = 0 , g_AttribLocationProjMtx = 0 ; <nl> static int g_AttribLocationPosition = 0 , g_AttribLocationUV = 0 , g_AttribLocationColor = 0 ; <nl> - static size_t g_VboMaxSize = 20000 ; <nl> + static size_t g_VboSize = 0 ; <nl> static unsigned int g_VboHandle = 0 , g_VaoHandle = 0 ; <nl> <nl> / / This is the main rendering function that you have to implement and provide to ImGui ( via setting up ' RenderDrawListsFn ' in the ImGuiIO structure ) <nl> static void ImGui_ImplGlfwGL3_RenderDrawLists ( ImDrawList * * const cmd_lists , int <nl> for ( int n = 0 ; n < cmd_lists_count ; n + + ) <nl> total_vtx_count + = cmd_lists [ n ] - > vtx_buffer . size ( ) ; <nl> glBindBuffer ( GL_ARRAY_BUFFER , g_VboHandle ) ; <nl> - size_t neededBufferSize = total_vtx_count * sizeof ( ImDrawVert ) ; <nl> - if ( neededBufferSize > g_VboMaxSize ) <nl> + size_t needed_vtx_size = total_vtx_count * sizeof ( ImDrawVert ) ; <nl> + if ( g_VboSize < needed_vtx_size ) <nl> { <nl> - g_VboMaxSize = neededBufferSize + 5000 ; / / Grow buffer <nl> - glBufferData ( GL_ARRAY_BUFFER , g_VboMaxSize , NULL , GL_STREAM_DRAW ) ; <nl> + g_VboSize = needed_vtx_size + 8192 ; / / Grow buffer <nl> + glBufferData ( GL_ARRAY_BUFFER , g_VboSize , NULL , GL_STREAM_DRAW ) ; <nl> } <nl> <nl> / / Copy and convert all vertices into a single contiguous buffer <nl> - unsigned char * buffer_data = ( unsigned char * ) glMapBuffer ( GL_ARRAY_BUFFER , GL_WRITE_ONLY ) ; <nl> - if ( ! buffer_data ) <nl> + unsigned char * vtx_data = ( unsigned char * ) glMapBuffer ( GL_ARRAY_BUFFER , GL_WRITE_ONLY ) ; <nl> + if ( ! vtx_data ) <nl> return ; <nl> for ( int n = 0 ; n < cmd_lists_count ; n + + ) <nl> { <nl> const ImDrawList * cmd_list = cmd_lists [ n ] ; <nl> - memcpy ( buffer_data , & cmd_list - > vtx_buffer [ 0 ] , cmd_list - > vtx_buffer . size ( ) * sizeof ( ImDrawVert ) ) ; <nl> - buffer_data + = cmd_list - > vtx_buffer . size ( ) * sizeof ( ImDrawVert ) ; <nl> + memcpy ( vtx_data , & cmd_list - > vtx_buffer [ 0 ] , cmd_list - > vtx_buffer . size ( ) * sizeof ( ImDrawVert ) ) ; <nl> + vtx_data + = cmd_list - > vtx_buffer . size ( ) * sizeof ( ImDrawVert ) ; <nl> } <nl> glUnmapBuffer ( GL_ARRAY_BUFFER ) ; <nl> glBindBuffer ( GL_ARRAY_BUFFER , 0 ) ; <nl> glBindVertexArray ( g_VaoHandle ) ; <nl> <nl> - int cmd_offset = 0 ; <nl> + int vtx_offset = 0 ; <nl> for ( int n = 0 ; n < cmd_lists_count ; n + + ) <nl> { <nl> const ImDrawList * cmd_list = cmd_lists [ n ] ; <nl> - int vtx_offset = cmd_offset ; <nl> + const ImDrawIdx * idx_buffer = ( const unsigned short * ) & cmd_list - > idx_buffer . front ( ) ; <nl> + <nl> const ImDrawCmd * pcmd_end = cmd_list - > commands . end ( ) ; <nl> for ( const ImDrawCmd * pcmd = cmd_list - > commands . begin ( ) ; pcmd ! = pcmd_end ; pcmd + + ) <nl> { <nl> static void ImGui_ImplGlfwGL3_RenderDrawLists ( ImDrawList * * const cmd_lists , int <nl> { <nl> glBindTexture ( GL_TEXTURE_2D , ( GLuint ) ( intptr_t ) pcmd - > texture_id ) ; <nl> glScissor ( ( int ) pcmd - > clip_rect . x , ( int ) ( height - pcmd - > clip_rect . w ) , ( int ) ( pcmd - > clip_rect . z - pcmd - > clip_rect . x ) , ( int ) ( pcmd - > clip_rect . w - pcmd - > clip_rect . y ) ) ; <nl> - glDrawArrays ( GL_TRIANGLES , vtx_offset , pcmd - > vtx_count ) ; <nl> + glDrawElementsBaseVertex ( GL_TRIANGLES , pcmd - > idx_count , GL_UNSIGNED_SHORT , idx_buffer , vtx_offset ) ; <nl> } <nl> - vtx_offset + = pcmd - > vtx_count ; <nl> + idx_buffer + = pcmd - > idx_count ; <nl> } <nl> - cmd_offset = vtx_offset ; <nl> + vtx_offset + = cmd_list - > vtx_buffer . size ( ) ; <nl> } <nl> <nl> / / Restore modified state <nl> bool ImGui_ImplGlfwGL3_CreateDeviceObjects ( ) <nl> g_AttribLocationColor = glGetAttribLocation ( g_ShaderHandle , " Color " ) ; <nl> <nl> glGenBuffers ( 1 , & g_VboHandle ) ; <nl> - glBindBuffer ( GL_ARRAY_BUFFER , g_VboHandle ) ; <nl> - glBufferData ( GL_ARRAY_BUFFER , g_VboMaxSize , NULL , GL_DYNAMIC_DRAW ) ; <nl> <nl> glGenVertexArrays ( 1 , & g_VaoHandle ) ; <nl> glBindVertexArray ( g_VaoHandle ) ; <nl> mmm a / examples / opengl_example / imgui_impl_glfw . cpp <nl> ppp b / examples / opengl_example / imgui_impl_glfw . cpp <nl> static void ImGui_ImplGlfw_RenderDrawLists ( ImDrawList * * const cmd_lists , int cmd <nl> { <nl> const ImDrawList * cmd_list = cmd_lists [ n ] ; <nl> const unsigned char * vtx_buffer = ( const unsigned char * ) & cmd_list - > vtx_buffer . front ( ) ; <nl> + const ImDrawIdx * idx_buffer = ( const unsigned short * ) & cmd_list - > idx_buffer . front ( ) ; <nl> glVertexPointer ( 2 , GL_FLOAT , sizeof ( ImDrawVert ) , ( void * ) ( vtx_buffer + OFFSETOF ( ImDrawVert , pos ) ) ) ; <nl> glTexCoordPointer ( 2 , GL_FLOAT , sizeof ( ImDrawVert ) , ( void * ) ( vtx_buffer + OFFSETOF ( ImDrawVert , uv ) ) ) ; <nl> glColorPointer ( 4 , GL_UNSIGNED_BYTE , sizeof ( ImDrawVert ) , ( void * ) ( vtx_buffer + OFFSETOF ( ImDrawVert , col ) ) ) ; <nl> <nl> - int vtx_offset = 0 ; <nl> for ( size_t cmd_i = 0 ; cmd_i < cmd_list - > commands . size ( ) ; cmd_i + + ) <nl> { <nl> const ImDrawCmd * pcmd = & cmd_list - > commands [ cmd_i ] ; <nl> static void ImGui_ImplGlfw_RenderDrawLists ( ImDrawList * * const cmd_lists , int cmd <nl> { <nl> glBindTexture ( GL_TEXTURE_2D , ( GLuint ) ( intptr_t ) pcmd - > texture_id ) ; <nl> glScissor ( ( int ) pcmd - > clip_rect . x , ( int ) ( height - pcmd - > clip_rect . w ) , ( int ) ( pcmd - > clip_rect . z - pcmd - > clip_rect . x ) , ( int ) ( pcmd - > clip_rect . w - pcmd - > clip_rect . y ) ) ; <nl> - glDrawArrays ( GL_TRIANGLES , vtx_offset , pcmd - > vtx_count ) ; <nl> + glDrawElements ( GL_TRIANGLES , pcmd - > idx_count , GL_UNSIGNED_SHORT , idx_buffer ) ; <nl> } <nl> - vtx_offset + = pcmd - > vtx_count ; <nl> + idx_buffer + = pcmd - > idx_count ; <nl> } <nl> } <nl> # undef OFFSETOF <nl> mmm a / imgui . cpp <nl> ppp b / imgui . cpp <nl> static inline void AddDrawListToRenderList ( ImVector < ImDrawList * > & out_render_lis <nl> { <nl> if ( ! draw_list - > commands . empty ( ) & & ! draw_list - > vtx_buffer . empty ( ) ) <nl> { <nl> - if ( draw_list - > commands . back ( ) . vtx_count = = 0 ) <nl> + if ( draw_list - > commands . back ( ) . idx_count = = 0 ) <nl> draw_list - > commands . pop_back ( ) ; <nl> out_render_list . push_back ( draw_list ) ; <nl> GImGui - > IO . MetricsRenderVertices + = ( int ) draw_list - > vtx_buffer . size ( ) ; <nl> + GImGui - > IO . MetricsRenderIndices + = ( int ) draw_list - > idx_buffer . size ( ) ; <nl> } <nl> } <nl> <nl> void ImGui : : Render ( ) <nl> } <nl> <nl> / / Gather windows to render <nl> - g . IO . MetricsRenderVertices = 0 ; <nl> + g . IO . MetricsRenderVertices = g . IO . MetricsRenderIndices = 0 ; <nl> for ( size_t i = 0 ; i < IM_ARRAYSIZE ( g . RenderDrawLists ) ; i + + ) <nl> g . RenderDrawLists [ i ] . resize ( 0 ) ; <nl> for ( size_t i = 0 ; i ! = g . Windows . size ( ) ; i + + ) <nl> void ImDrawList : : Clear ( ) <nl> commands . resize ( 0 ) ; <nl> vtx_buffer . resize ( 0 ) ; <nl> vtx_write = NULL ; <nl> + vtx_current_idx = 0 ; <nl> + idx_buffer . resize ( 0 ) ; <nl> + idx_write = NULL ; <nl> clip_rect_stack . resize ( 0 ) ; <nl> texture_id_stack . resize ( 0 ) ; <nl> } <nl> void ImDrawList : : ClearFreeMemory ( ) <nl> commands . clear ( ) ; <nl> vtx_buffer . clear ( ) ; <nl> vtx_write = NULL ; <nl> + vtx_current_idx = 0 ; <nl> + idx_buffer . clear ( ) ; <nl> + idx_write = NULL ; <nl> clip_rect_stack . clear ( ) ; <nl> texture_id_stack . clear ( ) ; <nl> } <nl> void ImDrawList : : ClearFreeMemory ( ) <nl> void ImDrawList : : AddDrawCmd ( ) <nl> { <nl> ImDrawCmd draw_cmd ; <nl> - draw_cmd . vtx_count = 0 ; <nl> + draw_cmd . idx_count = 0 ; <nl> draw_cmd . clip_rect = clip_rect_stack . empty ( ) ? GNullClipRect : clip_rect_stack . back ( ) ; <nl> draw_cmd . texture_id = texture_id_stack . empty ( ) ? NULL : texture_id_stack . back ( ) ; <nl> draw_cmd . user_callback = NULL ; <nl> void ImDrawList : : AddDrawCmd ( ) <nl> void ImDrawList : : AddCallback ( ImDrawCallback callback , void * callback_data ) <nl> { <nl> ImDrawCmd * current_cmd = commands . empty ( ) ? NULL : & commands . back ( ) ; <nl> - if ( ! current_cmd | | current_cmd - > vtx_count ! = 0 | | current_cmd - > user_callback ! = NULL ) <nl> + if ( ! current_cmd | | current_cmd - > idx_count ! = 0 | | current_cmd - > user_callback ! = NULL ) <nl> { <nl> AddDrawCmd ( ) ; <nl> current_cmd = & commands . back ( ) ; <nl> void ImDrawList : : AddCallback ( ImDrawCallback callback , void * callback_data ) <nl> void ImDrawList : : UpdateClipRect ( ) <nl> { <nl> ImDrawCmd * current_cmd = commands . empty ( ) ? NULL : & commands . back ( ) ; <nl> - if ( ! current_cmd | | ( current_cmd - > vtx_count ! = 0 ) | | current_cmd - > user_callback ! = NULL ) <nl> + if ( ! current_cmd | | ( current_cmd - > idx_count ! = 0 ) | | current_cmd - > user_callback ! = NULL ) <nl> { <nl> AddDrawCmd ( ) ; <nl> } <nl> void ImDrawList : : UpdateTextureID ( ) <nl> { <nl> ImDrawCmd * current_cmd = commands . empty ( ) ? NULL : & commands . back ( ) ; <nl> const ImTextureID texture_id = texture_id_stack . empty ( ) ? NULL : texture_id_stack . back ( ) ; <nl> - if ( ! current_cmd | | ( current_cmd - > vtx_count ! = 0 & & current_cmd - > texture_id ! = texture_id ) | | current_cmd - > user_callback ! = NULL ) <nl> + if ( ! current_cmd | | ( current_cmd - > idx_count ! = 0 & & current_cmd - > texture_id ! = texture_id ) | | current_cmd - > user_callback ! = NULL ) <nl> { <nl> AddDrawCmd ( ) ; <nl> } <nl> void ImDrawList : : PopTextureID ( ) <nl> UpdateTextureID ( ) ; <nl> } <nl> <nl> - void ImDrawList : : PrimReserve ( unsigned int vtx_count ) <nl> + void ImDrawList : : PrimReserve ( unsigned int idx_count , unsigned int vtx_count ) <nl> { <nl> ImDrawCmd & draw_cmd = commands . back ( ) ; <nl> - draw_cmd . vtx_count + = vtx_count ; <nl> - <nl> + draw_cmd . idx_count + = idx_count ; <nl> + <nl> size_t vtx_buffer_size = vtx_buffer . size ( ) ; <nl> vtx_buffer . resize ( vtx_buffer_size + vtx_count ) ; <nl> vtx_write = & vtx_buffer [ vtx_buffer_size ] ; <nl> + <nl> + size_t idx_buffer_size = idx_buffer . size ( ) ; <nl> + idx_buffer . resize ( idx_buffer_size + idx_count ) ; <nl> + idx_write = & idx_buffer [ idx_buffer_size ] ; <nl> } <nl> <nl> void ImDrawList : : PrimTriangle ( const ImVec2 & a , const ImVec2 & b , const ImVec2 & c , ImU32 col ) <nl> { <nl> const ImVec2 uv = GImGui - > FontTexUvWhitePixel ; <nl> - vtx_write [ 0 ] . pos = a ; vtx_write [ 0 ] . uv = uv ; vtx_write [ 0 ] . col = col ; <nl> - vtx_write [ 1 ] . pos = b ; vtx_write [ 1 ] . uv = uv ; vtx_write [ 1 ] . col = col ; <nl> - vtx_write [ 2 ] . pos = c ; vtx_write [ 2 ] . uv = uv ; vtx_write [ 2 ] . col = col ; <nl> + idx_write [ 0 ] = vtx_current_idx ; idx_write [ 1 ] = vtx_current_idx + 1 ; idx_write [ 2 ] = vtx_current_idx + 2 ; <nl> + vtx_write [ 0 ] . pos = a ; vtx_write [ 0 ] . uv = uv ; vtx_write [ 0 ] . col = col ; <nl> + vtx_write [ 1 ] . pos = b ; vtx_write [ 1 ] . uv = uv ; vtx_write [ 1 ] . col = col ; <nl> + vtx_write [ 2 ] . pos = c ; vtx_write [ 2 ] . uv = uv ; vtx_write [ 2 ] . col = col ; <nl> vtx_write + = 3 ; <nl> + vtx_current_idx + = 3 ; <nl> + idx_write + = 3 ; <nl> } <nl> <nl> void ImDrawList : : PrimRect ( const ImVec2 & a , const ImVec2 & c , ImU32 col ) <nl> void ImDrawList : : PrimRect ( const ImVec2 & a , const ImVec2 & c , ImU32 col ) <nl> const ImVec2 uv = GImGui - > FontTexUvWhitePixel ; <nl> const ImVec2 b ( c . x , a . y ) ; <nl> const ImVec2 d ( a . x , c . y ) ; <nl> - vtx_write [ 0 ] . pos = a ; vtx_write [ 0 ] . uv = uv ; vtx_write [ 0 ] . col = col ; <nl> - vtx_write [ 1 ] . pos = b ; vtx_write [ 1 ] . uv = uv ; vtx_write [ 1 ] . col = col ; <nl> - vtx_write [ 2 ] . pos = c ; vtx_write [ 2 ] . uv = uv ; vtx_write [ 2 ] . col = col ; <nl> - vtx_write [ 3 ] . pos = a ; vtx_write [ 3 ] . uv = uv ; vtx_write [ 3 ] . col = col ; <nl> - vtx_write [ 4 ] . pos = c ; vtx_write [ 4 ] . uv = uv ; vtx_write [ 4 ] . col = col ; <nl> - vtx_write [ 5 ] . pos = d ; vtx_write [ 5 ] . uv = uv ; vtx_write [ 5 ] . col = col ; <nl> - vtx_write + = 6 ; <nl> + idx_write [ 0 ] = vtx_current_idx ; idx_write [ 1 ] = vtx_current_idx + 1 ; idx_write [ 2 ] = vtx_current_idx + 2 ; <nl> + idx_write [ 3 ] = vtx_current_idx ; idx_write [ 4 ] = vtx_current_idx + 2 ; idx_write [ 5 ] = vtx_current_idx + 3 ; <nl> + vtx_write [ 0 ] . pos = a ; vtx_write [ 0 ] . uv = uv ; vtx_write [ 0 ] . col = col ; <nl> + vtx_write [ 1 ] . pos = b ; vtx_write [ 1 ] . uv = uv ; vtx_write [ 1 ] . col = col ; <nl> + vtx_write [ 2 ] . pos = c ; vtx_write [ 2 ] . uv = uv ; vtx_write [ 2 ] . col = col ; <nl> + vtx_write [ 3 ] . pos = d ; vtx_write [ 3 ] . uv = uv ; vtx_write [ 3 ] . col = col ; <nl> + vtx_write + = 4 ; <nl> + vtx_current_idx + = 4 ; <nl> + idx_write + = 6 ; <nl> } <nl> <nl> void ImDrawList : : PrimRectUV ( const ImVec2 & a , const ImVec2 & c , const ImVec2 & uv_a , const ImVec2 & uv_c , ImU32 col ) <nl> void ImDrawList : : PrimRectUV ( const ImVec2 & a , const ImVec2 & c , const ImVec2 & uv_a <nl> const ImVec2 d ( a . x , c . y ) ; <nl> const ImVec2 uv_b ( uv_c . x , uv_a . y ) ; <nl> const ImVec2 uv_d ( uv_a . x , uv_c . y ) ; <nl> - vtx_write [ 0 ] . pos = a ; vtx_write [ 0 ] . uv = uv_a ; vtx_write [ 0 ] . col = col ; <nl> - vtx_write [ 1 ] . pos = b ; vtx_write [ 1 ] . uv = uv_b ; vtx_write [ 1 ] . col = col ; <nl> - vtx_write [ 2 ] . pos = c ; vtx_write [ 2 ] . uv = uv_c ; vtx_write [ 2 ] . col = col ; <nl> - vtx_write [ 3 ] . pos = a ; vtx_write [ 3 ] . uv = uv_a ; vtx_write [ 3 ] . col = col ; <nl> - vtx_write [ 4 ] . pos = c ; vtx_write [ 4 ] . uv = uv_c ; vtx_write [ 4 ] . col = col ; <nl> - vtx_write [ 5 ] . pos = d ; vtx_write [ 5 ] . uv = uv_d ; vtx_write [ 5 ] . col = col ; <nl> - vtx_write + = 6 ; <nl> + idx_write [ 0 ] = vtx_current_idx ; idx_write [ 1 ] = vtx_current_idx + 1 ; idx_write [ 2 ] = vtx_current_idx + 2 ; <nl> + idx_write [ 3 ] = vtx_current_idx ; idx_write [ 4 ] = vtx_current_idx + 2 ; idx_write [ 5 ] = vtx_current_idx + 3 ; <nl> + vtx_write [ 0 ] . pos = a ; vtx_write [ 0 ] . uv = uv_a ; vtx_write [ 0 ] . col = col ; <nl> + vtx_write [ 1 ] . pos = b ; vtx_write [ 1 ] . uv = uv_b ; vtx_write [ 1 ] . col = col ; <nl> + vtx_write [ 2 ] . pos = c ; vtx_write [ 2 ] . uv = uv_c ; vtx_write [ 2 ] . col = col ; <nl> + vtx_write [ 3 ] . pos = d ; vtx_write [ 3 ] . uv = uv_d ; vtx_write [ 3 ] . col = col ; <nl> + vtx_write + = 4 ; <nl> + vtx_current_idx + = 4 ; <nl> + idx_write + = 6 ; <nl> } <nl> <nl> void ImDrawList : : PrimQuad ( const ImVec2 & a , const ImVec2 & b , const ImVec2 & c , const ImVec2 & d , ImU32 col ) <nl> { <nl> const ImVec2 uv = GImGui - > FontTexUvWhitePixel ; <nl> - vtx_write [ 0 ] . pos = a ; vtx_write [ 0 ] . uv = uv ; vtx_write [ 0 ] . col = col ; <nl> - vtx_write [ 1 ] . pos = b ; vtx_write [ 1 ] . uv = uv ; vtx_write [ 1 ] . col = col ; <nl> - vtx_write [ 2 ] . pos = c ; vtx_write [ 2 ] . uv = uv ; vtx_write [ 2 ] . col = col ; <nl> - vtx_write [ 3 ] . pos = a ; vtx_write [ 3 ] . uv = uv ; vtx_write [ 3 ] . col = col ; <nl> - vtx_write [ 4 ] . pos = c ; vtx_write [ 4 ] . uv = uv ; vtx_write [ 4 ] . col = col ; <nl> - vtx_write [ 5 ] . pos = d ; vtx_write [ 5 ] . uv = uv ; vtx_write [ 5 ] . col = col ; <nl> - vtx_write + = 6 ; <nl> + idx_write [ 0 ] = vtx_current_idx ; idx_write [ 1 ] = vtx_current_idx + 1 ; idx_write [ 2 ] = vtx_current_idx + 2 ; <nl> + idx_write [ 3 ] = vtx_current_idx ; idx_write [ 4 ] = vtx_current_idx + 2 ; idx_write [ 5 ] = vtx_current_idx + 3 ; <nl> + vtx_write [ 0 ] . pos = a ; vtx_write [ 0 ] . uv = uv ; vtx_write [ 0 ] . col = col ; <nl> + vtx_write [ 1 ] . pos = b ; vtx_write [ 1 ] . uv = uv ; vtx_write [ 1 ] . col = col ; <nl> + vtx_write [ 2 ] . pos = c ; vtx_write [ 2 ] . uv = uv ; vtx_write [ 2 ] . col = col ; <nl> + vtx_write [ 3 ] . pos = d ; vtx_write [ 3 ] . uv = uv ; vtx_write [ 3 ] . col = col ; <nl> + vtx_write + = 4 ; <nl> + vtx_current_idx + = 4 ; <nl> + idx_write + = 6 ; <nl> } <nl> <nl> / / FIXME - OPT : In many instances the caller could provide a normal . <nl> void ImDrawList : : AddLine ( const ImVec2 & a , const ImVec2 & b , ImU32 col , float thic <nl> if ( ( col > > 24 ) = = 0 ) <nl> return ; <nl> <nl> - PrimReserve ( 6 ) ; <nl> + PrimReserve ( 6 , 4 ) ; <nl> PrimLine ( a , b , col , thickness ) ; <nl> } <nl> <nl> void ImDrawList : : AddArcFast ( const ImVec2 & center , float radius , ImU32 col , int a <nl> } <nl> circle_vtx_builds = true ; <nl> } <nl> - <nl> - const ImVec2 uv = GImGui - > FontTexUvWhitePixel ; <nl> + <nl> if ( filled ) <nl> { <nl> - PrimReserve ( ( unsigned int ) ( a_max - a_min ) * 3 ) ; <nl> - for ( int a0 = a_min ; a0 < a_max ; a0 + + ) <nl> + PrimReserve ( ( unsigned int ) ( a_max - a_min ) * 3 , ( unsigned int ) ( a_max - a_min + 1 ) + 1 ) ; <nl> + ImDrawIdx idx = vtx_current_idx ; <nl> + for ( int a = 0 ; a < a_max - a_min ; a + + ) <nl> { <nl> - int a1 = ( a0 + 1 = = SAMPLES ) ? 0 : a0 + 1 ; <nl> - PrimVtx ( center + circle_vtx [ a0 ] * radius , uv , col ) ; <nl> - PrimVtx ( center + circle_vtx [ a1 ] * radius , uv , col ) ; <nl> - PrimVtx ( center + third_point_offset , uv , col ) ; <nl> + PrimIdx ( idx + 1 + a ) ; <nl> + PrimIdx ( idx + 1 + a + 1 ) ; <nl> + PrimIdx ( idx ) ; <nl> } <nl> + const ImVec2 uv = GImGui - > FontTexUvWhitePixel ; <nl> + PrimVtx ( center + third_point_offset , uv , col ) ; <nl> + for ( int a = a_min ; a < a_max + 1 ; a + + ) <nl> + PrimVtx ( center + circle_vtx [ ( a > = SAMPLES ) ? a - SAMPLES : a ] * radius , uv , col ) ; <nl> } <nl> else <nl> { <nl> - PrimReserve ( ( unsigned int ) ( a_max - a_min ) * 6 ) ; <nl> + / / FIXME - OPT : Wasting vertices . <nl> + PrimReserve ( ( unsigned int ) ( a_max - a_min ) * 6 , ( unsigned int ) ( a_max - a_min ) * 4 ) ; <nl> for ( int a0 = a_min ; a0 < a_max ; a0 + + ) <nl> - { <nl> + { <nl> int a1 = ( a0 + 1 = = SAMPLES ) ? 0 : a0 + 1 ; <nl> PrimLine ( center + circle_vtx [ a0 ] * radius , center + circle_vtx [ a1 ] * radius , col ) ; <nl> - } <nl> + } <nl> } <nl> } <nl> <nl> void ImDrawList : : AddRect ( const ImVec2 & a , const ImVec2 & b , ImU32 col , float roun <nl> <nl> if ( r = = 0 . 0f | | rounding_corners = = 0 ) <nl> { <nl> - PrimReserve ( 4 * 6 ) ; <nl> + PrimReserve ( 6 * 4 , 4 * 4 ) ; <nl> PrimLine ( ImVec2 ( a . x , a . y ) , ImVec2 ( b . x , a . y ) , col ) ; <nl> PrimLine ( ImVec2 ( b . x , a . y ) , ImVec2 ( b . x , b . y ) , col ) ; <nl> PrimLine ( ImVec2 ( b . x , b . y ) , ImVec2 ( a . x , b . y ) , col ) ; <nl> void ImDrawList : : AddRect ( const ImVec2 & a , const ImVec2 & b , ImU32 col , float roun <nl> } <nl> else <nl> { <nl> - PrimReserve ( 4 * 6 ) ; <nl> + PrimReserve ( 6 * 4 , 4 * 4 ) ; <nl> PrimLine ( ImVec2 ( a . x + ( ( rounding_corners & 1 ) ? r : 0 ) , a . y ) , ImVec2 ( b . x - ( ( rounding_corners & 2 ) ? r : 0 ) , a . y ) , col ) ; <nl> PrimLine ( ImVec2 ( b . x , a . y + ( ( rounding_corners & 2 ) ? r : 0 ) ) , ImVec2 ( b . x , b . y - ( ( rounding_corners & 4 ) ? r : 0 ) ) , col ) ; <nl> PrimLine ( ImVec2 ( b . x - ( ( rounding_corners & 4 ) ? r : 0 ) , b . y ) , ImVec2 ( a . x + ( ( rounding_corners & 8 ) ? r : 0 ) , b . y ) , col ) ; <nl> void ImDrawList : : AddRectFilled ( const ImVec2 & a , const ImVec2 & b , ImU32 col , floa <nl> r = ImMin ( r , fabsf ( b . x - a . x ) * ( ( ( rounding_corners & ( 1 | 2 ) ) = = ( 1 | 2 ) ) | | ( ( rounding_corners & ( 4 | 8 ) ) = = ( 4 | 8 ) ) ? 0 . 5f : 1 . 0f ) ) ; <nl> r = ImMin ( r , fabsf ( b . y - a . y ) * ( ( ( rounding_corners & ( 1 | 8 ) ) = = ( 1 | 8 ) ) | | ( ( rounding_corners & ( 2 | 4 ) ) = = ( 2 | 4 ) ) ? 0 . 5f : 1 . 0f ) ) ; <nl> <nl> - const ImVec2 uv = GImGui - > FontTexUvWhitePixel ; <nl> if ( r = = 0 . 0f | | rounding_corners = = 0 ) <nl> { <nl> / / Use triangle so we can merge more draw calls together ( at the cost of extra vertices ) <nl> - PrimReserve ( 6 ) ; <nl> + PrimReserve ( 6 , 4 ) ; <nl> PrimRect ( a , b , col ) ; <nl> } <nl> else <nl> { <nl> - PrimReserve ( 6 + 6 * 2 ) ; <nl> + PrimReserve ( 6 * 3 , 4 * 3 ) ; <nl> PrimRect ( ImVec2 ( a . x + r , a . y ) , ImVec2 ( b . x - r , b . y ) , col ) ; <nl> <nl> float top_y = ( rounding_corners & 1 ) ? a . y + r : a . y ; <nl> void ImDrawList : : AddTriangleFilled ( const ImVec2 & a , const ImVec2 & b , const ImVec <nl> if ( ( col > > 24 ) = = 0 ) <nl> return ; <nl> <nl> - PrimReserve ( 3 ) ; <nl> + PrimReserve ( 3 , 3 ) ; <nl> PrimTriangle ( a , b , c , col ) ; <nl> } <nl> <nl> void ImDrawList : : AddCircle ( const ImVec2 & centre , float radius , ImU32 col , int nu <nl> if ( ( col > > 24 ) = = 0 ) <nl> return ; <nl> <nl> - PrimReserve ( ( unsigned int ) num_segments * 6 ) ; <nl> + PrimReserve ( num_segments * 6 , num_segments * 4 ) ; <nl> + <nl> const float a_step = 2 * PI / ( float ) num_segments ; <nl> float a0 = 0 . 0f ; <nl> + ImVec2 p0 = centre + ImVec2 ( cosf ( a0 ) , sinf ( a0 ) ) * radius ; <nl> for ( int i = 0 ; i < num_segments ; i + + ) <nl> { <nl> const float a1 = ( i + 1 ) = = num_segments ? 0 . 0f : a0 + a_step ; <nl> - PrimLine ( centre + ImVec2 ( cosf ( a0 ) , sinf ( a0 ) ) * radius , centre + ImVec2 ( cosf ( a1 ) , sinf ( a1 ) ) * radius , col ) ; <nl> + const ImVec2 p1 = centre + ImVec2 ( cosf ( a1 ) , sinf ( a1 ) ) * radius ; <nl> + PrimLine ( p0 , p1 , col ) ; <nl> a0 = a1 ; <nl> + p0 = p1 ; <nl> } <nl> } <nl> <nl> void ImDrawList : : AddCircleFilled ( const ImVec2 & centre , float radius , ImU32 col , <nl> return ; <nl> <nl> const ImVec2 uv = GImGui - > FontTexUvWhitePixel ; <nl> - PrimReserve ( ( unsigned int ) num_segments * 3 ) ; <nl> + const ImDrawIdx idx = vtx_current_idx ; <nl> + PrimReserve ( ( unsigned int ) ( num_segments * 3 ) , ( unsigned int ) ( 1 + num_segments ) ) ; <nl> + <nl> const float a_step = 2 * PI / ( float ) num_segments ; <nl> float a0 = 0 . 0f ; <nl> - for ( int i = 0 ; i < num_segments ; i + + ) <nl> + PrimVtx ( centre , uv , col ) ; <nl> + for ( int i = 0 ; i < num_segments ; i + + , a0 + = a_step ) <nl> { <nl> - const float a1 = ( i + 1 ) = = num_segments ? 0 . 0f : a0 + a_step ; <nl> - PrimVtx ( centre + ImVec2 ( cosf ( a0 ) , sinf ( a0 ) ) * radius , uv , col ) ; <nl> - PrimVtx ( centre + ImVec2 ( cosf ( a1 ) , sinf ( a1 ) ) * radius , uv , col ) ; <nl> - PrimVtx ( centre , uv , col ) ; <nl> - a0 = a1 ; <nl> + PrimVtx ( centre + ImVec2 ( cosf ( a0 ) , sinf ( a0 ) ) * radius , uv , col ) ; <nl> + PrimIdx ( idx ) ; <nl> + PrimIdx ( idx + 1 + i ) ; <nl> + PrimIdx ( idx + 1 + ( ( i + 1 = = num_segments ) ? 0 : i + 1 ) ) ; <nl> } <nl> } <nl> <nl> void ImDrawList : : AddText ( const ImFont * font , float font_size , const ImVec2 & pos , <nl> <nl> / / reserve vertices for worse case <nl> const unsigned int char_count = ( unsigned int ) ( text_end - text_begin ) ; <nl> - const unsigned int vtx_count_max = char_count * 6 ; <nl> + const unsigned int vtx_count_max = char_count * 4 ; <nl> + const unsigned int idx_count_max = char_count * 6 ; <nl> const size_t vtx_begin = vtx_buffer . size ( ) ; <nl> - PrimReserve ( vtx_count_max ) ; <nl> + const size_t idx_begin = idx_buffer . size ( ) ; <nl> + PrimReserve ( idx_count_max , vtx_count_max ) ; <nl> <nl> font - > RenderText ( font_size , pos , col , clip_rect_stack . back ( ) , text_begin , text_end , this , wrap_width , cpu_clip_max ) ; <nl> <nl> / / give back unused vertices <nl> + / / FIXME - OPT <nl> vtx_buffer . resize ( ( size_t ) ( vtx_write - & vtx_buffer . front ( ) ) ) ; <nl> - const size_t vtx_count = vtx_buffer . size ( ) - vtx_begin ; <nl> - commands . back ( ) . vtx_count - = ( unsigned int ) ( vtx_count_max - vtx_count ) ; <nl> - vtx_write - = ( vtx_count_max - vtx_count ) ; <nl> + idx_buffer . resize ( ( size_t ) ( idx_write - & idx_buffer . front ( ) ) ) ; <nl> + unsigned int vtx_unused = vtx_count_max - ( unsigned int ) ( vtx_buffer . size ( ) - vtx_begin ) ; <nl> + unsigned int idx_unused = idx_count_max - ( unsigned int ) ( idx_buffer . size ( ) - idx_begin ) ; <nl> + commands . back ( ) . idx_count - = idx_unused ; <nl> + vtx_write - = vtx_unused ; <nl> + idx_write - = idx_unused ; <nl> + vtx_current_idx = ( ImDrawIdx ) vtx_buffer . size ( ) ; <nl> } <nl> <nl> void ImDrawList : : AddImage ( ImTextureID user_texture_id , const ImVec2 & a , const ImVec2 & b , const ImVec2 & uv0 , const ImVec2 & uv1 , ImU32 col ) <nl> void ImDrawList : : AddImage ( ImTextureID user_texture_id , const ImVec2 & a , const Im <nl> if ( push_texture_id ) <nl> PushTextureID ( user_texture_id ) ; <nl> <nl> - PrimReserve ( 6 ) ; <nl> + PrimReserve ( 6 , 4 ) ; <nl> PrimRectUV ( a , b , uv0 , uv1 , col ) ; <nl> <nl> if ( push_texture_id ) <nl> void ImFont : : RenderText ( float size , ImVec2 pos , ImU32 col , const ImVec4 & clip_re <nl> float x = pos . x ; <nl> float y = pos . y ; <nl> <nl> - ImDrawVert * out_vertices = draw_list - > vtx_write ; <nl> + ImDrawVert * vtx_write = draw_list - > vtx_write ; <nl> + ImDrawIdx vtx_current_idx = draw_list - > vtx_current_idx ; <nl> + ImDrawIdx * idx_write = draw_list - > idx_write ; <nl> <nl> const char * s = text_begin ; <nl> while ( s < text_end ) <nl> void ImFont : : RenderText ( float size , ImVec2 pos , ImU32 col , const ImVec4 & clip_re <nl> } <nl> <nl> / / NB : we are not calling PrimRectUV ( ) here because non - inlined causes too much overhead in a debug build . <nl> - out_vertices [ 0 ] . pos = ImVec2 ( x1 , y1 ) ; <nl> - out_vertices [ 0 ] . uv = ImVec2 ( u1 , v1 ) ; <nl> - out_vertices [ 0 ] . col = col ; <nl> - <nl> - out_vertices [ 1 ] . pos = ImVec2 ( x2 , y1 ) ; <nl> - out_vertices [ 1 ] . uv = ImVec2 ( u2 , v1 ) ; <nl> - out_vertices [ 1 ] . col = col ; <nl> - <nl> - out_vertices [ 2 ] . pos = ImVec2 ( x2 , y2 ) ; <nl> - out_vertices [ 2 ] . uv = ImVec2 ( u2 , v2 ) ; <nl> - out_vertices [ 2 ] . col = col ; <nl> - <nl> - out_vertices [ 3 ] = out_vertices [ 0 ] ; <nl> - out_vertices [ 4 ] = out_vertices [ 2 ] ; <nl> - <nl> - out_vertices [ 5 ] . pos = ImVec2 ( x1 , y2 ) ; <nl> - out_vertices [ 5 ] . uv = ImVec2 ( u1 , v2 ) ; <nl> - out_vertices [ 5 ] . col = col ; <nl> - <nl> - out_vertices + = 6 ; <nl> + / / inlined : <nl> + { <nl> + idx_write [ 0 ] = vtx_current_idx ; idx_write [ 1 ] = vtx_current_idx + 1 ; idx_write [ 2 ] = vtx_current_idx + 2 ; <nl> + idx_write [ 3 ] = vtx_current_idx ; idx_write [ 4 ] = vtx_current_idx + 2 ; idx_write [ 5 ] = vtx_current_idx + 3 ; <nl> + vtx_write [ 0 ] . pos . x = x1 ; vtx_write [ 0 ] . pos . y = y1 ; vtx_write [ 0 ] . col = col ; vtx_write [ 0 ] . uv . x = u1 ; vtx_write [ 0 ] . uv . y = v1 ; <nl> + vtx_write [ 1 ] . pos . x = x2 ; vtx_write [ 1 ] . pos . y = y1 ; vtx_write [ 1 ] . col = col ; vtx_write [ 1 ] . uv . x = u2 ; vtx_write [ 1 ] . uv . y = v1 ; <nl> + vtx_write [ 2 ] . pos . x = x2 ; vtx_write [ 2 ] . pos . y = y2 ; vtx_write [ 2 ] . col = col ; vtx_write [ 2 ] . uv . x = u2 ; vtx_write [ 2 ] . uv . y = v2 ; <nl> + vtx_write [ 3 ] . pos . x = x1 ; vtx_write [ 3 ] . pos . y = y2 ; vtx_write [ 3 ] . col = col ; vtx_write [ 3 ] . uv . x = u1 ; vtx_write [ 3 ] . uv . y = v2 ; <nl> + vtx_write + = 4 ; <nl> + vtx_current_idx + = 4 ; <nl> + idx_write + = 6 ; <nl> + } <nl> } <nl> } <nl> } <nl> void ImFont : : RenderText ( float size , ImVec2 pos , ImU32 col , const ImVec4 & clip_re <nl> x + = char_width ; <nl> } <nl> <nl> - draw_list - > vtx_write = out_vertices ; <nl> + draw_list - > vtx_write = vtx_write ; <nl> + draw_list - > vtx_current_idx = vtx_current_idx ; <nl> + draw_list - > idx_write = idx_write ; <nl> } <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> void ImGui : : ShowMetricsWindow ( bool * opened ) <nl> { <nl> ImGui : : Text ( " ImGui % s " , ImGui : : GetVersion ( ) ) ; <nl> ImGui : : Text ( " Application average % . 3f ms / frame ( % . 1f FPS ) " , 1000 . 0f / ImGui : : GetIO ( ) . Framerate , ImGui : : GetIO ( ) . Framerate ) ; <nl> - ImGui : : Text ( " % d vertices " , ImGui : : GetIO ( ) . MetricsRenderVertices ) ; <nl> + ImGui : : Text ( " % d vertices , % d triangles " , ImGui : : GetIO ( ) . MetricsRenderVertices , ImGui : : GetIO ( ) . MetricsRenderIndices / 3 ) ; <nl> ImGui : : Separator ( ) ; <nl> <nl> struct Funcs <nl> { <nl> static void NodeDrawList ( ImDrawList * draw_list , const char * label ) <nl> { <nl> - bool opened = ImGui : : TreeNode ( draw_list , " % s : % d vtx , % d cmds " , label , draw_list - > vtx_buffer . size ( ) , draw_list - > commands . size ( ) ) ; <nl> + bool opened = ImGui : : TreeNode ( draw_list , " % s : % d vtx , % d indices , % d cmds " , label , draw_list - > vtx_buffer . size ( ) , draw_list - > idx_buffer . size ( ) , draw_list - > commands . size ( ) ) ; <nl> if ( draw_list = = ImGui : : GetWindowDrawList ( ) ) <nl> { <nl> ImGui : : SameLine ( ) ; <nl> void ImGui : : ShowMetricsWindow ( bool * opened ) <nl> if ( pcmd - > user_callback ) <nl> ImGui : : BulletText ( " Callback % p , user_data % p " , pcmd - > user_callback , pcmd - > user_callback_data ) ; <nl> else <nl> - ImGui : : BulletText ( " Draw % d vtx , tex = % p " , pcmd - > vtx_count , pcmd - > texture_id ) ; <nl> + ImGui : : BulletText ( " Draw % d indexed vtx , tex = % p " , pcmd - > idx_count , pcmd - > texture_id ) ; <nl> ImGui : : TreePop ( ) ; <nl> } <nl> <nl> mmm a / imgui . h <nl> ppp b / imgui . h <nl> struct ImGuiIO <nl> bool WantCaptureKeyboard ; / / Widget is active ( = ImGui will use your keyboard input ) <nl> float Framerate ; / / Framerate estimation , in frame per second . Rolling average estimation based on IO . DeltaTime over 120 frames <nl> int MetricsRenderVertices ; / / Vertices processed during last call to Render ( ) <nl> + int MetricsRenderIndices ; / / <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> / / [ Internal ] ImGui will maintain those fields for you <nl> typedef void ( * ImDrawCallback ) ( const ImDrawList * parent_list , const ImDrawCmd * c <nl> / / Typically , 1 command = 1 gpu draw call ( unless command is a callback ) <nl> struct ImDrawCmd <nl> { <nl> - unsigned int vtx_count ; / / Number of vertices ( multiple of 3 ) to be drawn as triangles . The vertices are stored in the callee ImDrawList ' s vtx_buffer [ ] array . <nl> + unsigned int idx_count ; / / Number of indices ( multiple of 3 ) to be rendered as triangles . Vertices are stored in the callee ImDrawList ' s vtx_buffer [ ] array , indices in idx_buffer [ ] . <nl> ImVec4 clip_rect ; / / Clipping rectangle ( x1 , y1 , x2 , y2 ) <nl> ImTextureID texture_id ; / / User - provided texture ID . Set by user in ImfontAtlas : : SetTexID ( ) for fonts or passed to Image * ( ) functions . Ignore if never using images or multiple fonts atlas . <nl> ImDrawCallback user_callback ; / / If ! = NULL , call the function instead of rendering the vertices . vtx_count will be 0 . clip_rect and texture_id will be set normally . <nl> void * user_callback_data ; / / The draw callback code can access this . <nl> } ; <nl> <nl> + / / Vertex index <nl> + typedef unsigned short ImDrawIdx ; <nl> + <nl> / / Vertex layout <nl> # ifndef IMGUI_OVERRIDE_DRAWVERT_STRUCT_LAYOUT <nl> struct ImDrawVert <nl> struct ImDrawList <nl> / / This is what you have to render <nl> ImVector < ImDrawCmd > commands ; / / Commands . Typically 1 command = 1 gpu draw call . <nl> ImVector < ImDrawVert > vtx_buffer ; / / Vertex buffer . Each command consume ImDrawCmd : : vtx_count of those <nl> + ImVector < ImDrawIdx > idx_buffer ; / / Index buffer . Each command consume ImDrawCmd : : idx_count of those <nl> <nl> / / [ Internal to ImGui ] <nl> ImVector < ImVec4 > clip_rect_stack ; / / [ Internal ] <nl> ImVector < ImTextureID > texture_id_stack ; / / [ Internal ] <nl> ImDrawVert * vtx_write ; / / [ Internal ] point within vtx_buffer after each add command ( to avoid using the ImVector < > operators too much ) <nl> + ImDrawIdx vtx_current_idx ; / / [ Internal ] = = vtx_buffer . size ( ) <nl> + ImDrawIdx * idx_write ; / / [ Internal ] point within idx_buffer after each add command ( to avoid using the ImVector < > operators too much ) <nl> <nl> ImDrawList ( ) { Clear ( ) ; } <nl> IMGUI_API void Clear ( ) ; <nl> struct ImDrawList <nl> IMGUI_API void AddImage ( ImTextureID user_texture_id , const ImVec2 & a , const ImVec2 & b , const ImVec2 & uv0 , const ImVec2 & uv1 , ImU32 col = 0xFFFFFFFF ) ; <nl> <nl> / / Advanced <nl> - IMGUI_API void AddCallback ( ImDrawCallback callback , void * callback_data ) ; / / Your rendering function must check for ' user_callback ' in ImDrawCmd and call the function instead of rendering triangles . <nl> - IMGUI_API void AddDrawCmd ( ) ; / / This is useful if you need to forcefully create a new draw call ( to allow for dependent rendering / blending ) . Otherwise primitives are merged into the same draw - call as much as possible <nl> + IMGUI_API void AddCallback ( ImDrawCallback callback , void * callback_data ) ; / / Your rendering function must check for ' user_callback ' in ImDrawCmd and call the function instead of rendering triangles . <nl> + IMGUI_API void AddDrawCmd ( ) ; / / This is useful if you need to forcefully create a new draw call ( to allow for dependent rendering / blending ) . Otherwise primitives are merged into the same draw - call as much as possible <nl> <nl> / / Internal helpers <nl> - IMGUI_API void PrimReserve ( unsigned int vtx_count ) ; <nl> + IMGUI_API void PrimReserve ( unsigned int idx_count , unsigned int vtx_count ) ; <nl> IMGUI_API void PrimTriangle ( const ImVec2 & a , const ImVec2 & b , const ImVec2 & c , ImU32 col ) ; <nl> IMGUI_API void PrimRect ( const ImVec2 & a , const ImVec2 & b , ImU32 col ) ; <nl> IMGUI_API void PrimRectUV ( const ImVec2 & a , const ImVec2 & b , const ImVec2 & uv_a , const ImVec2 & uv_b , ImU32 col ) ; <nl> struct ImDrawList <nl> IMGUI_API void PrimLine ( const ImVec2 & a , const ImVec2 & b , ImU32 col , float thickness = 1 . 0f ) ; <nl> IMGUI_API void UpdateClipRect ( ) ; <nl> IMGUI_API void UpdateTextureID ( ) ; <nl> - IMGUI_API void PrimVtx ( const ImVec2 & pos , const ImVec2 & uv , ImU32 col ) { vtx_write - > pos = pos ; vtx_write - > uv = uv ; vtx_write - > col = col ; vtx_write + + ; } <nl> + IMGUI_API void PrimVtx ( const ImVec2 & pos , const ImVec2 & uv , ImU32 col ) { vtx_write - > pos = pos ; vtx_write - > uv = uv ; vtx_write - > col = col ; vtx_write + + ; vtx_current_idx + + ; } <nl> + IMGUI_API void PrimIdx ( unsigned int idx ) { * idx_write + + = ( ImDrawIdx ) idx ; } <nl> } ; <nl> <nl> / / Load and rasterize multiple TTF fonts into a same texture . <nl> | Indexed rendering . Not in main branch because breaks rendering code too much . Will merge in trunk along with more major graphics changes lat | ocornut/imgui | 1746b04065b87c7db716502267233295c55e8c0b | 2015-04-09T20:05:35Z |
mmm a / src / arch / runtime / message_hub . cc <nl> ppp b / src / arch / runtime / message_hub . cc <nl> <nl> - / / Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + / / Copyright 2010 - 2013 RethinkDB , all rights reserved . <nl> # include " arch / runtime / message_hub . hpp " <nl> <nl> # include < math . h > <nl> <nl> linux_message_hub_t : : linux_message_hub_t ( linux_event_queue_t * queue , linux_thread_pool_t * thread_pool , int current_thread ) <nl> : queue_ ( queue ) , thread_pool_ ( thread_pool ) , current_thread_ ( current_thread ) { <nl> <nl> - / / We have to do this through dynamically , otherwise we might <nl> - / / allocate far too many file descriptors since this is what the <nl> - / / constructor of the system_event_t object ( which is a member of <nl> - / / notify_t ) does . <nl> - notify_ = new notify_t [ thread_pool_ - > n_threads ] ; <nl> - <nl> - for ( int i = 0 ; i < thread_pool_ - > n_threads ; i + + ) { <nl> - / / Create notify fd for other cores that send work to us <nl> - notify_ [ i ] . parent = this ; <nl> - <nl> - queue_ - > watch_resource ( notify_ [ i ] . event . get_notify_fd ( ) , poll_event_in , & notify_ [ i ] ) ; <nl> - } <nl> + queue_ - > watch_resource ( event_ . get_notify_fd ( ) , poll_event_in , this ) ; <nl> } <nl> <nl> linux_message_hub_t : : ~ linux_message_hub_t ( ) { <nl> linux_message_hub_t : : ~ linux_message_hub_t ( ) { <nl> } <nl> <nl> guarantee ( incoming_messages_ . empty ( ) ) ; <nl> - <nl> - delete [ ] notify_ ; <nl> } <nl> <nl> void linux_message_hub_t : : do_store_message ( unsigned int nthread , linux_thread_message_t * msg ) { <nl> void linux_message_hub_t : : store_message_sometime ( unsigned int nthread , linux_thr <nl> <nl> <nl> void linux_message_hub_t : : insert_external_message ( linux_thread_message_t * msg ) { <nl> + bool do_wake_up ; <nl> { <nl> spinlock_acq_t acq ( & incoming_messages_lock_ ) ; <nl> + do_wake_up = incoming_messages_ . empty ( ) ; <nl> incoming_messages_ . push_back ( msg ) ; <nl> } <nl> <nl> / / Wakey wakey eggs and bakey <nl> - notify_ [ current_thread_ ] . event . wakey_wakey ( ) ; <nl> + if ( do_wake_up ) { <nl> + event_ . wakey_wakey ( ) ; <nl> + } <nl> } <nl> <nl> - void linux_message_hub_t : : notify_t : : on_event ( int events ) { <nl> - <nl> + void linux_message_hub_t : : on_event ( int events ) { <nl> if ( events ! = poll_event_in ) { <nl> logERR ( " Unexpected event mask : % d " , events ) ; <nl> } <nl> <nl> - / / Read from the event so level - triggered mechanism such as poll <nl> - / / don ' t pester us and use 100 % cpu <nl> - event . consume_wakey_wakeys ( ) ; <nl> + / / You must read wakey - wakeys so that the pipe - based implementation doesn ' t fill <nl> + / / up and so that poll - based event triggering doesn ' t infinite - loop . <nl> + event_ . consume_wakey_wakeys ( ) ; <nl> <nl> msg_list_t msg_list ; <nl> <nl> / / Pull the messages <nl> { <nl> - spinlock_acq_t acq ( & parent - > incoming_messages_lock_ ) ; <nl> - msg_list . append_and_clear ( & parent - > incoming_messages_ ) ; <nl> + spinlock_acq_t acq ( & incoming_messages_lock_ ) ; <nl> + msg_list . append_and_clear ( & incoming_messages_ ) ; <nl> } <nl> <nl> # ifndef NDEBUG <nl> void linux_message_hub_t : : notify_t : : on_event ( int events ) { <nl> # ifndef NDEBUG <nl> if ( m - > reloop_count_ > 0 ) { <nl> - - m - > reloop_count_ ; <nl> - parent - > do_store_message ( parent - > current_thread_ , m ) ; <nl> + do_store_message ( current_thread_ , m ) ; <nl> continue ; <nl> } <nl> # endif <nl> void linux_message_hub_t : : push_messages ( ) { <nl> { <nl> spinlock_acq_t acq ( & thread_pool_ - > threads [ i ] - > message_hub . incoming_messages_lock_ ) ; <nl> <nl> - / / We only need to do a wake up if the global <nl> + / / We only need to do a wake up if we ' re the first people to do a <nl> + / / wake up . <nl> do_wake_up = thread_pool_ - > threads [ i ] - > message_hub . incoming_messages_ . empty ( ) ; <nl> <nl> thread_pool_ - > threads [ i ] - > message_hub . incoming_messages_ . append_and_clear ( & queue - > msg_local_list ) ; <nl> void linux_message_hub_t : : push_messages ( ) { <nl> <nl> / / Wakey wakey , perhaps eggs and bakey <nl> if ( do_wake_up ) { <nl> - thread_pool_ - > threads [ i ] - > message_hub . notify_ [ current_thread_ ] . event . wakey_wakey ( ) ; <nl> + thread_pool_ - > threads [ i ] - > message_hub . event_ . wakey_wakey ( ) ; <nl> } <nl> } <nl> } <nl> mmm a / src / arch / runtime / message_hub . hpp <nl> ppp b / src / arch / runtime / message_hub . hpp <nl> <nl> - / / Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + / / Copyright 2010 - 2013 RethinkDB , all rights reserved . <nl> # ifndef ARCH_RUNTIME_MESSAGE_HUB_HPP_ <nl> # define ARCH_RUNTIME_MESSAGE_HUB_HPP_ <nl> <nl> <nl> <nl> class linux_thread_pool_t ; <nl> <nl> - / / TODO : perhaps we can issue cache prefetching commands to the CPU to <nl> - / / speed up the process of sending messages across cores . <nl> - <nl> / * There is one message hub per thread , NOT one message hub for the entire program . <nl> <nl> Each message hub stores messages that are going from that message hub ' s home thread to <nl> other threads . It keeps a separate queue for messages destined for each other thread . * / <nl> <nl> - class linux_message_hub_t { <nl> + class linux_message_hub_t : private linux_event_callback_t { <nl> public : <nl> typedef intrusive_list_t < linux_thread_message_t > msg_list_t ; <nl> <nl> class linux_message_hub_t { <nl> msg_list_t incoming_messages_ ; <nl> spinlock_t incoming_messages_lock_ ; <nl> <nl> - / * We keep one notify_t for each other message hub that we interact with . When it has <nl> - messages for us , it signals the appropriate notify_t from our set of notify_ts . We get <nl> - the notification and call push_messages on the sender in reply . * / <nl> - struct notify_t : public linux_event_callback_t <nl> - { <nl> - public : <nl> - / * message_hubs [ i ] - > notify [ j ] . on_event ( ) is called when thread # j has messages for <nl> - thread # i . * / <nl> - void on_event ( int events ) ; <nl> - <nl> - public : <nl> - system_event_t event ; / / the eventfd to notify <nl> - <nl> - / * hub - > notify [ i ] . parent = hub * / <nl> - linux_message_hub_t * parent ; <nl> - } ; <nl> - notify_t * notify_ ; <nl> + void on_event ( int events ) ; <nl> + <nl> + / / The eventfd ( or pipe - based alternative ) notified after the first incoming <nl> + / / message is put onto incoming_messages_ . <nl> + system_event_t event_ ; <nl> <nl> / * The thread that we queue messages originating from . ( Recall that there is one <nl> message_hub_t per thread . ) * / <nl> | Made the message hub use only one eventfd per thread ( or maybe two per thread , sigh ) . | rethinkdb/rethinkdb | b7f61d60f8f5337dff6c2bbc646184c87d3b803f | 2013-08-02T06:59:52Z |
mmm a / src / video_core / shader / decode / memory . cpp <nl> ppp b / src / video_core / shader / decode / memory . cpp <nl> u32 ShaderIR : : DecodeMemory ( NodeBlock & bb , u32 pc ) { <nl> } <nl> case OpCode : : Id : : RED : { <nl> UNIMPLEMENTED_IF_MSG ( instr . red . type ! = GlobalAtomicType : : U32 ) ; <nl> - UNIMPLEMENTED_IF_MSG ( instr . red . operation ! = AtomicOp : : Add ) ; <nl> const auto [ real_address , base_address , descriptor ] = <nl> TrackGlobalMemory ( bb , instr , true , true ) ; <nl> if ( ! real_address | | ! base_address ) { <nl> u32 ShaderIR : : DecodeMemory ( NodeBlock & bb , u32 pc ) { <nl> } <nl> Node gmem = MakeNode < GmemNode > ( real_address , base_address , descriptor ) ; <nl> Node value = GetRegister ( instr . gpr0 ) ; <nl> - bb . push_back ( Operation ( OperationCode : : ReduceIAdd , move ( gmem ) , move ( value ) ) ) ; <nl> + bb . push_back ( Operation ( GetAtomOperation ( instr . red . operation ) , move ( gmem ) , move ( value ) ) ) ; <nl> break ; <nl> } <nl> case OpCode : : Id : : ATOM : { <nl> | Merge pull request from ReinUsesLisp / red - op | yuzu-emu/yuzu | 623d9c47a2d1a5b32ad67d3e1d86baf9d10851ef | 2020-05-26T16:50:41Z |
mmm a / cocos / cocos2d . h <nl> ppp b / cocos / cocos2d . h <nl> THE SOFTWARE . <nl> <nl> # if ( CC_TARGET_PLATFORM = = CC_PLATFORM_WINRT ) <nl> # include " platform / winrt / CCApplication . h " <nl> - # include " platform / winrt / CCGLViewImpl . h " <nl> + # include " platform / winrt / CCGLViewImpl - winrt . h " <nl> # include " platform / winrt / CCGL . h " <nl> # include " platform / winrt / CCStdC . h " <nl> - # include " platform / winrt / CCPrecompiledShaders . h " <nl> # endif / / CC_TARGET_PLATFORM = = CC_PLATFORM_WINRT <nl> <nl> # if ( CC_TARGET_PLATFORM = = CC_PLATFORM_WP8 ) <nl> | updated winrt included | cocos2d/cocos2d-x | d3aa4c04756d70b51f61c01ca75c201cdfe27f03 | 2014-10-14T21:22:41Z |
mmm a / folly / io / Compression . cpp <nl> ppp b / folly / io / Compression . cpp <nl> std : : unique_ptr < IOBuf > NoCompressionCodec : : doUncompress ( <nl> return data - > clone ( ) ; <nl> } <nl> <nl> + # if ( FOLLY_HAVE_LIBLZ4 | | FOLLY_HAVE_LIBLZMA ) <nl> + <nl> namespace { <nl> <nl> void encodeVarintToIOBuf ( uint64_t val , folly : : IOBuf * out ) { <nl> inline uint64_t decodeVarintFromCursor ( folly : : io : : Cursor & cursor ) { <nl> <nl> } / / namespace <nl> <nl> + # endif / / FOLLY_HAVE_LIBLZ4 | | FOLLY_HAVE_LIBLZMA <nl> + <nl> # if FOLLY_HAVE_LIBLZ4 <nl> <nl> / * * <nl> | add preprocessor to hide unused functions | facebook/folly | 832c3155276738e038fbaef30d140c56d8e6c919 | 2015-03-03T03:28:27Z |
mmm a / README . md <nl> ppp b / README . md <nl> step 2 : start srs < br / > <nl> < / pre > <nl> step 3 ( optional ) : start nginx for HLS < br / > <nl> < pre > <nl> - sudo . / objs / nginx / _release / sbin / nginx <nl> + sudo . / objs / nginx / sbin / nginx <nl> < / pre > <nl> step 4 : publish live stream < br / > <nl> < pre > <nl> new file mode 100644 <nl> index 000000000 . . 09b1ef6e2 <nl> Binary files / dev / null and b / trunk / 3rdparty / nginx - 1 . 5 . 0 . zip differ <nl> mmm a / trunk / conf / srs . conf <nl> ppp b / trunk / conf / srs . conf <nl> vhost __defaultVhost__ { <nl> enabled on ; <nl> gop_cache on ; <nl> hls on ; <nl> - hls_path . / hls ; <nl> + hls_path . / objs / nginx / html ; <nl> } <nl> # the vhost disabled . <nl> vhost removed . vhost . com { <nl> vhost no - hls . vhost . com { <nl> # default : on <nl> hls on ; <nl> # the hls output path . <nl> - # default : . / hls <nl> - hls_path / data / nginx / html / hls ; <nl> + # the app dir is auto created under the hls_path . <nl> + # for example , for rtmp stream : <nl> + # rtmp : / / 127 . 0 . 0 . 1 / live / livestream <nl> + # http : / / 127 . 0 . 0 . 1 / live / livestream . m3u8 <nl> + # where hls_path is / hls , srs will create the following files : <nl> + # / hls / live the app dir for all streams . <nl> + # / hls / live / livestream . m3u8 the HLS m3u8 file . <nl> + # / hls / live / livestream - 1 . ts the HLS media / ts file . <nl> + # in a word , the hls_path is for vhost . <nl> + # default : . / objs / nginx / html <nl> + hls_path / data / nginx / html ; <nl> } <nl> # the vhost with hls disabled . <nl> vhost no - hls . vhost . com { <nl> mmm a / trunk / configure <nl> ppp b / trunk / configure <nl> ret = $ ? ; if [ [ $ ret - ne 0 ] ] ; then echo " build http - parser - 2 . 1 failed , ret = $ ret " ; <nl> if [ [ ! - f $ { GLOBAL_DIR_OBJS } / http - parser - 2 . 1 / http_parser . h ] ] ; then echo " build http - parser - 2 . 1 failed " ; exit - 1 ; fi <nl> if [ [ ! - f $ { GLOBAL_DIR_OBJS } / http - parser - 2 . 1 / libhttp_parser . a ] ] ; then echo " build http - parser - 2 . 1 failed " ; exit - 1 ; fi <nl> <nl> + # nginx for HLS , nginx - 1 . 5 . 0 <nl> + if [ [ - f $ { GLOBAL_DIR_OBJS } / nginx - 1 . 5 . 0 / _release / sbin / nginx ] ] ; then <nl> + echo " nginx - 1 . 5 . 0 is ok . " ; <nl> + else <nl> + echo " build nginx - 1 . 5 . 0 " ; <nl> + ( <nl> + rm - rf $ { GLOBAL_DIR_OBJS } / nginx - 1 . 5 . 0 & & cd $ { GLOBAL_DIR_OBJS } & & <nl> + unzip - q . . / 3rdparty / nginx - 1 . 5 . 0 . zip & & cd nginx - 1 . 5 . 0 & & <nl> + . / configure - - prefix = ` pwd ` / _release & & make & & make install & & <nl> + ln - sf ` pwd ` / _release . . / nginx <nl> + ) <nl> + fi <nl> + # check status <nl> + ret = $ ? ; if [ [ $ ret - ne 0 ] ] ; then echo " build nginx - 1 . 5 . 0 failed , ret = $ ret " ; exit $ ret ; fi <nl> + if [ ! - f $ { GLOBAL_DIR_OBJS } / nginx - 1 . 5 . 0 / _release / sbin / nginx ] ; then echo " build nginx - 1 . 5 . 0 failed . " ; exit - 1 ; fi <nl> + <nl> + # use current user to config nginx , <nl> + # srs will write ts / m3u8 file use current user , <nl> + # nginx default use nobody , so cannot read the ts / m3u8 created by srs . <nl> + cp $ { GLOBAL_DIR_OBJS } / nginx / conf / nginx . conf $ { GLOBAL_DIR_OBJS } / nginx / conf / nginx . conf . bk <nl> + sed - i " s / ^ . user nobody ; / user ` whoami ` ; / g " $ { GLOBAL_DIR_OBJS } / nginx / conf / nginx . conf <nl> + <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # generate Makefile . <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> echo ' configure ok ! ' <nl> echo " you can : " <nl> echo " \ " make \ " to build the srs ( simple rtmp server ) . " <nl> echo " \ " make help \ " to get the usage of make " <nl> + echo " \ " sudo . / objs / nginx / sbin / nginx \ " to start the nginx http server for hls " <nl> + echo " \ " . / objs / simple_rtmp_server - c conf / srs . conf \ " to start the srs live server " <nl> mmm a / trunk / src / core / srs_core . hpp <nl> ppp b / trunk / src / core / srs_core . hpp <nl> CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE . <nl> ( void ) 0 <nl> <nl> / / current release version <nl> - # define RTMP_SIG_SRS_VERSION " 0 . 4 . 0 " <nl> + # define RTMP_SIG_SRS_VERSION " 0 . 5 . 0 " <nl> / / server info . <nl> # define RTMP_SIG_SRS_KEY " srs " <nl> # define RTMP_SIG_SRS_ROLE " origin server " <nl> mmm a / trunk / src / core / srs_core_config . hpp <nl> ppp b / trunk / src / core / srs_core_config . hpp <nl> CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE . <nl> / / default vhost for rtmp <nl> # define RTMP_VHOST_DEFAULT " __defaultVhost__ " <nl> <nl> - # define SRS_CONF_DEFAULT_HLS_PATH " . / hls " <nl> + # define SRS_CONF_DEFAULT_HLS_PATH " . / objs / nginx / html " <nl> <nl> class SrsFileBuffer <nl> { <nl> | add nginx and used as http server | ossrs/srs | 57ea56970d86879e7354e9668748e1b3f26b1d19 | 2013-11-26T08:45:50Z |
mmm a / tests / bash - bats / eosio_build . sh <nl> ppp b / tests / bash - bats / eosio_build . sh <nl> TEST_LABEL = " [ eosio_build ] " <nl> if [ [ $ NAME = ~ " Amazon Linux " ] ] | | [ [ $ NAME = = " CentOS Linux " ] ] ; then <nl> # which package isn ' t installed <nl> uninstall - package which WETRUN & > / dev / null <nl> - run bash - c " printf \ " y \ nn \ nn \ n \ " | . / scripts / eosio_build . sh " <nl> + run bash - c " printf \ " y \ ny \ nn \ nn \ n \ " | . / scripts / eosio_build . sh " <nl> [ [ ! - z $ ( echo " $ { output } " | grep " EOSIO compiler checks require the ' which ' " ) ] ] | | exit <nl> - [ [ ! - z $ ( echo " $ { output } " | grep " Please install the ' which ' " ) ] ] | | exit <nl> fi <nl> <nl> if [ [ $ ARCH = = " Linux " ] ] ; then <nl> if [ [ $ NAME = = " CentOS Linux " ] ] ; then # Centos has the SCL prompt before checking for the compiler <nl> # No c + + ! <nl> - run bash - c " printf \ " y \ ny \ nn \ n \ " | . / $ { SCRIPT_LOCATION } " <nl> + run bash - c " printf \ " y \ ny \ ny \ nn \ n \ " | . / $ { SCRIPT_LOCATION } " <nl> else <nl> # No c + + ! <nl> - run bash - c " printf \ " y \ nn \ nn \ n \ " | . / $ { SCRIPT_LOCATION } " <nl> + run bash - c " printf \ " y \ ny \ ny \ nn \ nn \ n \ " | . / $ { SCRIPT_LOCATION } " <nl> fi <nl> [ [ ! - z $ ( echo " $ { output } " | grep " Unable to find compiler " ) ] ] | | exit <nl> fi <nl> <nl> - # - P with - y <nl> - cd . . # Also test that we can run the script from a directory other than the root <nl> - run bash - c " . / eos / $ SCRIPT_LOCATION - y - P " <nl> + cd . / scripts # Also test that we can run the script from a directory other than the root <nl> + run bash - c " . / eosio_build . sh - y - P " <nl> [ [ ! - z $ ( echo " $ { output } " | grep " PIN_COMPILER : true " ) ] ] | | exit <nl> [ [ " $ { output } " = ~ - DCMAKE_TOOLCHAIN_FILE = \ ' . * / scripts / . . / build / pinned_toolchain . cmake \ ' ] ] | | exit <nl> [ [ " $ { output } " = ~ " Clang 8 successfully installed " ] ] | | exit <nl> - cd eos <nl> # - P with prompts <nl> + cd . . <nl> run bash - c " printf \ " y \ nn \ nn \ nn \ n \ " | . / $ SCRIPT_LOCATION - P " <nl> [ [ " $ { output } " = ~ . * User . aborted . * ] ] | | exit <nl> # lack of - m <nl> [ [ ! - z $ ( echo " $ { output } " | grep " ENABLE_MONGO : false " ) ] ] | | exit <nl> [ [ ! - z $ ( echo " $ { output } " | grep " INSTALL_MONGO : false " ) ] ] | | exit <nl> # lack of - i <nl> - # [ [ ! - z $ ( echo " $ { output } " | grep " INSTALL_LOCATION : $ { HOME } " ) ] ] | | exit <nl> [ [ ! - z $ ( echo " $ { output } " | grep " EOSIO_INSTALL_DIR : $ { HOME } / eosio / $ { EOSIO_VERSION } " ) ] ] | | exit <nl> # # - o <nl> run bash - c " printf \ " y \ ny \ nn \ nn \ n \ " | . / $ SCRIPT_LOCATION - o Debug - P " <nl> mmm a / tests / bash - bats / eosio_build_amazonlinux . sh <nl> ppp b / tests / bash - bats / eosio_build_amazonlinux . sh <nl> export TEST_LABEL = " [ eosio_build_amazonlinux ] " <nl> run bash - c " printf \ " y \ n % . 0s \ " { 1 . . 100 } | . / $ SCRIPT_LOCATION - P - i / NEWPATH " <nl> [ [ ! - z $ ( echo " $ { output } " | grep " Executing : make - j $ { JOBS } " ) ] ] | | exit <nl> # # # Make sure deps are loaded properly <nl> - [ [ ! - z $ ( echo " $ { output } " | grep " Executing : cd $ { SRC_DIR } " ) ] ] | | exit <nl> + [ [ ! - z $ ( echo " $ { output } " | grep " Executing : cd / NEWPATH / src " ) ] ] | | exit <nl> [ [ ! - z $ ( echo " $ { output } " | grep " Starting EOSIO Dependency Install " ) ] ] | | exit <nl> [ [ ! - z $ ( echo " $ { output } " | grep " Executing : eval / usr / bin / yum - y update " ) ] ] | | exit <nl> if [ [ $ NAME = = " Amazon Linux " ] ] ; then <nl> export TEST_LABEL = " [ eosio_build_amazonlinux ] " <nl> [ [ - z $ ( echo " $ { output } " | grep " MongoDB C + + driver successfully installed " ) ] ] | | exit # Mongo is off <nl> # Ensure PIN_COMPILER = false uses proper flags for the various installs <nl> install - package gcc - c + + WETRUN <nl> + install - package clang WETRUN <nl> run bash - c " . / $ SCRIPT_LOCATION - y " <nl> [ [ ! - z $ ( echo " $ { output } " | grep " - G ' Unix Makefiles ' " ) ] ] | | exit # CMAKE <nl> [ [ ! - z $ ( echo " $ { output } " | grep " - - with - iostreams - - with - date_time " ) ] ] | | exit # BOOST <nl> uninstall - package gcc - c + + WETRUN <nl> + uninstall - package clang WETRUN <nl> } <nl> \ No newline at end of file <nl> mmm a / tests / bash - bats / eosio_build_centos . sh <nl> ppp b / tests / bash - bats / eosio_build_centos . sh <nl> export TEST_LABEL = " [ eosio_build_centos ] " <nl> # Ensure SCL and devtoolset - 8 for c + + binary installation <nl> run bash - c " printf \ " y \ n % . 0s \ " { 1 . . 100 } | . / $ { SCRIPT_LOCATION } - i / NEWPATH " <nl> [ [ ! - z $ ( echo " $ { output } " | grep " centos - release - scl - 2 - 3 . el7 . centos . noarch found " ) ] ] | | exit <nl> - [ [ ! - z $ ( echo " $ { output } " | grep " devtoolset - 8 - 8 . 0 - 2 . el7 . 0 . 1 . x86_64 found " ) ] ] | | exit <nl> + [ [ ! - z $ ( echo " $ { output } " | grep " devtoolset - 8 - 8 . 0 - 2 . el7 . x86_64 found " ) ] ] | | exit <nl> [ [ ! - z $ ( echo " $ { output } " | grep " Executing : source / opt / rh / devtoolset - 8 / enable " ) ] ] | | exit <nl> [ [ ! - z $ ( echo " $ { output } " | grep " Executing : make - j $ { JOBS } " ) ] ] | | exit <nl> [ [ ! - z $ ( echo " $ { output } " | grep " Starting EOSIO Dependency Install " ) ] ] | | exit <nl> export TEST_LABEL = " [ eosio_build_centos ] " <nl> [ [ ! - z $ ( echo " $ { output } " | grep " EOSIO has been successfully built " ) ] ] | | exit <nl> uninstall - package devtoolset - 8 * WETRUN & > / dev / null <nl> uninstall - package centos - release - scl WETRUN & > / dev / null <nl> - <nl> } <nl> \ No newline at end of file <nl> mmm a / tests / bash - bats / eosio_build_darwin . sh <nl> ppp b / tests / bash - bats / eosio_build_darwin . sh <nl> export TEST_LABEL = " [ eosio_build_darwin ] " <nl> # # # Make sure deps are loaded properly <nl> [ [ ! - z $ ( echo " $ { output } " | grep " Starting EOSIO Dependency Install " ) ] ] | | exit <nl> [ [ ! - z $ ( echo " $ { output } " | grep " Executing : / usr / bin / xcode - select - - install " ) ] ] | | exit <nl> - # [ [ ! - z $ ( echo " $ { output } " | grep " automake . * NOT . * found " ) ] ] | | exit <nl> [ [ - z $ ( echo " $ { output } " | grep " - NOT found . " ) ] ] | | exit <nl> rm - f $ CMAKE <nl> [ [ ! - z $ ( echo " $ { output } " | grep " [ Updating HomeBrew ] " ) ] ] | | exit <nl> mmm a / tests / bash - bats / eosio_build_ubuntu . sh <nl> ppp b / tests / bash - bats / eosio_build_ubuntu . sh <nl> export TEST_LABEL = " [ eosio_build_ubuntu ] " <nl> set_system_vars # Obtain current machine ' s resources and set the necessary variables ( like JOBS , etc ) <nl> <nl> # Testing clang already existing ( no pinning of clang8 ) <nl> - [ [ " $ ( echo $ { VERSION_ID } ) " = = " 16 . 04 " ] ] & & install - package clang WETRUN & > / dev / null | | install - package build - essential WETRUN <nl> + install - package clang WETRUN & > / dev / null <nl> run bash - c " printf \ " y \ n % . 0s \ " { 1 . . 100 } | . / $ SCRIPT_LOCATION - i / NEWPATH " <nl> <nl> [ [ ! - z $ ( echo " $ { output } " | grep " Executing : make - j $ { JOBS } " ) ] ] | | exit <nl> mmm a / tests / bash - bats / modules / clang . sh <nl> ppp b / tests / bash - bats / modules / clang . sh <nl> load . . / helpers / functions <nl> @ test " $ { TEST_LABEL } > Testing CLANG " { <nl> <nl> if [ [ $ NAME = = " Darwin " ] ] ; then <nl> - run bash - c " printf \ " y \ n % . 0s \ " { 1 . . 100 } | . / $ SCRIPT_LOCATION " <nl> + run bash - c " printf \ " y \ n % . 0s \ " { 1 . . 100 } | . / $ SCRIPT_LOCATION - i / NEWPATH " <nl> # # CLANG already exists ( c + + / default ) <nl> [ [ ! - z $ ( echo " $ { output } " | grep " PIN_COMPILER : true " ) ] ] | | exit <nl> [ [ ! - z $ ( echo " $ { output } " | grep " DCMAKE_CXX_COMPILER = ' c + + ' " ) ] ] | | exit <nl> [ [ ! - z $ ( echo " $ { output } " | grep " DCMAKE_C_COMPILER = ' cc ' " ) ] ] | | exit <nl> elif [ [ $ NAME = = " Ubuntu " ] ] ; then <nl> install - package build - essential WETRUN 1 > / dev / null # ubuntu 18 build - essential will be high enough , 16 won ' t and has a version < 7 <nl> - run bash - c " printf \ " y \ n % . 0s \ " { 1 . . 100 } | . / $ SCRIPT_LOCATION " <nl> + run bash - c " printf \ " y \ n % . 0s \ " { 1 . . 100 } | . / $ SCRIPT_LOCATION - i / NEWPATH " <nl> # # CLANG already exists ( c + + / default ) ( Ubuntu doesn ' t have clang > 7 , so we need to make sure it installs Clang 8 ) <nl> [ [ ! - z $ ( echo " $ { output } " | grep " PIN_COMPILER : false " ) ] ] | | exit <nl> - if [ [ $ VERSION_ID = = " 16 . 04 " ] ] ; then <nl> - [ [ ! - z $ ( echo " $ { output } " | grep " Unable to find C + + 17 support " ) ] ] | | exit <nl> - [ [ ! - z $ ( echo " $ { output } " | grep " Clang 8 successfully installed " ) ] ] | | exit <nl> - [ [ ! - z $ ( echo " $ { output } " | grep " $ CLANG_ROOT " ) ] ] | | exit <nl> - fi <nl> + # if [ [ $ VERSION_ID = = " 16 . 04 " ] ] ; then <nl> + # [ [ ! - z $ ( echo " $ { output } " | grep " Unable to find compiler " ) ] ] | | exit <nl> + # [ [ ! - z $ ( echo " $ { output } " | grep " Clang 8 successfully installed " ) ] ] | | exit <nl> + # [ [ ! - z $ ( echo " $ { output } " | grep " $ CLANG_ROOT " ) ] ] | | exit <nl> + # fi <nl> # # CLANG <nl> uninstall - package build - essential WETRUN 1 > / dev / null <nl> run bash - c " . / $ SCRIPT_LOCATION - y - P " <nl> mmm a / tests / bash - bats / modules / mongodb . sh <nl> ppp b / tests / bash - bats / modules / mongodb . sh <nl> load . . / helpers / functions <nl> @ test " $ { TEST_LABEL } > MongoDB " { <nl> # Existing MongoDB <nl> if [ [ $ NAME = = " CentOS Linux " ] ] | | [ [ $ NAME = = " Amazon Linux " ] ] ; then <nl> - run bash - c " printf \ " y \ yn \ nn \ ny \ ny \ ny \ n \ " | . / $ SCRIPT_LOCATION - m - P " # which prompt requires first y <nl> + run bash - c " printf \ " y \ ny \ nn \ ny \ ny \ ny \ n \ " | . / $ SCRIPT_LOCATION - m - P " # which prompt requires first y <nl> else <nl> run bash - c " printf \ " y \ nn \ nn \ ny \ ny \ n \ " | . / $ SCRIPT_LOCATION - m - P " <nl> fi <nl> [ [ ! - z $ ( echo " $ { output } " | grep " Existing MongoDB will be used " ) ] ] | | exit <nl> [ [ - z $ ( echo " $ { output } " | grep " Ensuring MongoDB installation " ) ] ] | | exit <nl> # Installing ours <nl> - run bash - c " printf \ " y \ ny \ ny \ ny \ ny \ n \ " | . / $ SCRIPT_LOCATION - m - P " <nl> + run bash - c " printf \ " y \ ny \ ny \ ny \ ny \ ny \ n \ " | . / $ SCRIPT_LOCATION - m - P " <nl> [ [ - z $ ( echo " $ { output } " | grep " Existing MongoDB will be used " ) ] ] | | exit <nl> [ [ ! - z $ ( echo " $ { output } " | grep " Ensuring MongoDB installation " ) ] ] | | exit <nl> } <nl> mmm a / tests / bash - bats / modules / root - user . sh <nl> ppp b / tests / bash - bats / modules / root - user . sh <nl> <nl> load . . / helpers / functions <nl> <nl> @ test " $ { TEST_LABEL } > Testing root user run " { <nl> - run bash - c " printf \ " y \ n % . 0s \ " { 1 . . 100 } | . / $ SCRIPT_LOCATION - P " <nl> + run bash - c " printf \ " y \ n % . 0s \ " { 1 . . 100 } | . / $ SCRIPT_LOCATION - P - i / NEWPATH " <nl> [ [ ! - z $ ( echo " $ { output } " | grep " User : $ ( whoami ) " ) ] ] | | exit <nl> if [ [ $ ARCH = = " Linux " ] ] ; then <nl> [ [ - z $ ( echo " $ { output } " | grep " $ SUDO_LOCATION - E " ) ] ] | | exit <nl> fi <nl> export CURRENT_USER = test <nl> - run bash - c " printf \ " n \ n \ " | . / $ SCRIPT_LOCATION - P " <nl> + run bash - c " printf \ " y \ nn \ n \ " | . / $ SCRIPT_LOCATION - P " <nl> [ [ ! - z $ ( echo " $ { output } " | grep " User : test " ) ] ] | | exit <nl> if [ [ $ ARCH = = " Linux " ] ] ; then <nl> [ [ ! - z $ ( echo " $ { output } " | grep " Please install the ' sudo ' command before proceeding " ) ] ] | | exit <nl> fi <nl> install - package sudo WETRUN <nl> export SUDO_LOCATION = $ ( command - v sudo ) <nl> - run bash - c " printf \ " y \ n % . 0s \ " { 1 . . 100 } | . / $ SCRIPT_LOCATION - P " <nl> + run bash - c " printf \ " y \ n % . 0s \ " { 1 . . 100 } | . / $ SCRIPT_LOCATION - P - i / NEWPATH " <nl> [ [ ! - z $ ( echo " $ { output } " | grep " User : test " ) ] ] | | exit <nl> if [ [ $ ARCH = = " Linux " ] ] ; then <nl> [ [ ! - z $ ( echo " $ { output } " | grep " $ SUDO_LOCATION - E . * install - y . * " ) ] ] | | exit <nl> | Backport of BATS test fixes from develop . | EOSIO/eos | a17ed93c6f95c8fbb175ea0494fd41767cf8d455 | 2019-06-24T13:30:23Z |
mmm a / dbms / include / DB / Interpreters / ProcessList . h <nl> ppp b / dbms / include / DB / Interpreters / ProcessList . h <nl> class ProcessList <nl> parent . cont . erase ( it ) ; <nl> - - parent . cur_size ; <nl> parent . have_space . signal ( ) ; <nl> - UserToQueries : : iterator quieries = parent . userToQueries . find ( it - > user ) ; <nl> - QueryToElement : : iterator element = quieries - > second . find ( it - > query_id ) ; <nl> - quieries - > second . erase ( element ) ; <nl> + UserToQueries : : iterator queries = parent . userToQueries . find ( it - > user ) ; <nl> + if ( queries ! = parent . userToQueries . end ( ) ) <nl> + { <nl> + QueryToElement : : iterator element = queries - > second . find ( it - > query_id ) ; <nl> + if ( element ! = queries - > second . end ( ) ) <nl> + queries - > second . erase ( element ) ; <nl> + } <nl> } <nl> <nl> Element & get ( ) { return * it ; } <nl> class ProcessList <nl> if ( max_size & & cur_size > = max_size & & ( ! max_wait_milliseconds | | ! have_space . tryWait ( mutex , max_wait_milliseconds ) ) ) <nl> throw Exception ( " Too much simultaneous queries . Maximum : " + toString ( max_size ) , ErrorCodes : : TOO_MUCH_SIMULTANEOUS_QUERIES ) ; <nl> <nl> - UserToQueries : : iterator quieries = userToQueries . find ( user_ ) ; <nl> + UserToQueries : : iterator queries = userToQueries . find ( user_ ) ; <nl> <nl> - if ( quieries ! = userToQueries . end ( ) ) <nl> + if ( queries ! = userToQueries . end ( ) ) <nl> { <nl> - QueryToElement : : iterator element = quieries - > second . find ( query_id_ ) ; <nl> - if ( element ! = quieries - > second . end ( ) ) <nl> + QueryToElement : : iterator element = queries - > second . find ( query_id_ ) ; <nl> + if ( element ! = queries - > second . end ( ) ) <nl> { <nl> if ( ! replace_running_query ) <nl> throw Exception ( " Query with id = " + query_id_ + " is already running . " , <nl> ErrorCodes : : QUERY_ID_ALREADY_RUNNING ) ; <nl> element - > second - > is_cancelled = true ; <nl> - quieries - > second . erase ( element ) ; <nl> + queries - > second . erase ( element ) ; <nl> } <nl> } <nl> <nl> | dbms : renamed variable , more accurate erasing from hashmap [ METR - 8818 ] | ClickHouse/ClickHouse | c74a21d4fc022709555919ce4cd5fe2acf4a87d8 | 2014-02-12T17:44:48Z |
mmm a / include / swift / AST / Diagnostics . def <nl> ppp b / include / swift / AST / Diagnostics . def <nl> NOTE ( note_while_inlining , sil_analysis , none , <nl> ERROR ( integer_literal_overflow , sil_gen , none , <nl> " integer literal overflows when stored into % 0 " , <nl> ( Type ) ) <nl> + WARNING ( integer_literal_overflow_warn , sil_gen , none , <nl> + " integer literal overflows when stored into % 0 " , <nl> + ( Type ) ) <nl> ERROR ( arithmetic_operation_overflow , sil_analysis , none , <nl> " arithmetic operation ' % 0 % 1 % 2 ' ( on type % 3 ) results in an overflow " , <nl> ( StringRef , StringRef , StringRef , Type ) ) <nl> mmm a / lib / SILPasses / ConstantPropagation . cpp <nl> ppp b / lib / SILPasses / ConstantPropagation . cpp <nl> static SILInstruction * constantFoldBuiltin ( ApplyInst * AI , <nl> <nl> / / Check for overflow . <nl> if ( SrcVal ! = T ) { <nl> + / / FIXME : This will prevent hard error in cases the error is comming <nl> + / / from ObjC interoperability code . Currently , we treat NSUInteger as <nl> + / / Int . <nl> + if ( Loc . getSourceLoc ( ) . isInvalid ( ) ) { <nl> + diagnose ( M . getASTContext ( ) , Loc . getSourceLoc ( ) , <nl> + diag : : integer_literal_overflow_warn , <nl> + CE ? CE - > getType ( ) : DestTy ) ; <nl> + return nullptr ; <nl> + } <nl> diagnose ( M . getASTContext ( ) , Loc . getSourceLoc ( ) , <nl> diag : : integer_literal_overflow , <nl> CE ? CE - > getType ( ) : DestTy ) ; <nl> | Demote the integer literal overflow error into warning in some cases . | apple/swift | 65a4766c2975131d965c999bb3a7b2b1f0cebc1b | 2013-10-16T00:56:26Z |
new file mode 100644 <nl> index 00000000000 . . ec8f5735957 <nl> mmm / dev / null <nl> ppp b / CONTRIBUTING . md <nl> <nl> + # Contributing to ClickHouse <nl> + <nl> + # # Technical info <nl> + Developer guide for writing code for ClickHouse is published on official website alongside the usage and operations documentation : <nl> + https : / / clickhouse . yandex / docs / en / development / index . html <nl> + <nl> + # # Legal info <nl> + <nl> + In order for us ( YANDEX LLC ) to accept patches and other contributions from you , you will have to adopt our Yandex Contributor License Agreement ( the " * * CLA * * " ) . The current version of the CLA you may find here : <nl> + 1 ) https : / / yandex . ru / legal / cla / ? lang = en ( in English ) and <nl> + 2 ) https : / / yandex . ru / legal / cla / ? lang = ru ( in Russian ) . <nl> + <nl> + By adopting the CLA , you state the following : <nl> + <nl> + * You obviously wish and are willingly licensing your contributions to us for our open source projects under the terms of the CLA , <nl> + * You has read the terms and conditions of the CLA and agree with them in full , <nl> + * You are legally able to provide and license your contributions as stated , <nl> + * We may use your contributions for our open source projects and for any other our project too , <nl> + * We rely on your assurances concerning the rights of third parties in relation to your contributes . <nl> + <nl> + If you agree with these principles , please read and adopt our CLA . By providing us your contributions , you hereby declare that you has already read and adopt our CLA , and we may freely merge your contributions with our corresponding open source project and use it in further in accordance with terms and conditions of the CLA . <nl> + <nl> + If you have already adopted terms and conditions of the CLA , you are able to provide your contributes . When you submit your pull request , please add the following information into it : <nl> + <nl> + ` ` ` <nl> + I hereby agree to the terms of the CLA available at : [ link ] . <nl> + ` ` ` <nl> + <nl> + Replace the bracketed text as follows : <nl> + * [ link ] is the link at the current version of the CLA ( you may add here a link https : / / yandex . ru / legal / cla / ? lang = en ( in English ) or a link https : / / yandex . ru / legal / cla / ? lang = ru ( in Russian ) . <nl> + <nl> + It is enough to provide us such notification at once . <nl> | Initial CONTRIBUTING . md | ClickHouse/ClickHouse | 2a2d5b64481dcac5bf25f0e241e53911f0c72fd8 | 2017-06-19T18:42:54Z |
mmm a / ports / v - hacd / CONTROL <nl> ppp b / ports / v - hacd / CONTROL <nl> <nl> Source : v - hacd <nl> Version : 3 . 2 . 0 <nl> + Port - Version : 1 <nl> Homepage : https : / / github . com / kmammou / v - hacd <nl> Description : The V - HACD library decomposes a 3D surface into a set of " near " convex parts . <nl> Supports : ! arm <nl> mmm a / ports / v - hacd / fix - cmake . patch <nl> ppp b / ports / v - hacd / fix - cmake . patch <nl> index 46fc1b1 . . 2260fdc 100644 <nl> mmm a / src / VHACD_Lib / CMakeLists . txt <nl> ppp b / src / VHACD_Lib / CMakeLists . txt <nl> - <nl> + <nl> + cmake_minimum_required ( VERSION 3 . 10 . 0 ) <nl> + <nl> project ( VHACD_LIB CXX C ) <nl> <nl> - <nl> target_include_directories ( vhacd PUBLIC <nl> - + $ < BUILD_INTERFACE : $ { CMAKE_CURRENT_SOURCE_DIR } / public / > ) <nl> + + $ < BUILD_INTERFACE : $ { CMAKE_CURRENT_SOURCE_DIR } / public / > <nl> + + $ < INSTALL_INTERFACE : include > ) <nl> + <nl> + target_include_directories ( vhacd PRIVATE <nl> $ < BUILD_INTERFACE : $ { CMAKE_CURRENT_SOURCE_DIR } / inc / > <nl> - $ < INSTALL_INTERFACE : include / > # < prefix > / include / mylib <nl> + - $ < INSTALL_INTERFACE : include / > # < prefix > / include / mylib <nl> ) <nl> - install ( FILES $ { PROJECT_INL_FILES } DESTINATION include ) <nl> + <nl> + <nl> + message ( " [ VHACD ] \ t - > CMAKE_INSTALL_PREFIX " $ { CMAKE_INSTALL_PREFIX } ) <nl> + install ( TARGETS vhacd EXPORT vhacd - targets DESTINATION lib ) <nl> + - install ( FILES $ { PROJECT_INC_FILES } DESTINATION include ) <nl> + - install ( FILES $ { PROJECT_INL_FILES } DESTINATION include ) <nl> + + install ( FILES $ { CMAKE_CURRENT_SOURCE_DIR } / public / VHACD . h DESTINATION $ { CMAKE_INSTALL_PREFIX } / include / ) <nl> + + <nl> + <nl> + <nl> set ( VHACD_LIB_VERSION 3 . 2 . 0 ) <nl> include ( CMakePackageConfigHelpers ) <nl> write_basic_package_version_file ( <nl> DESTINATION <nl> $ { ConfigPackageLocation } <nl> COMPONENT <nl> + Devel <nl> + ) <nl> + <nl> + <nl> diff - - git a / src / VHACD_Lib / cmake / vhacd - config . cmake b / src / VHACD_Lib / cmake / vhacd - config . cmake <nl> index 8fc5c58 . . 7677f58 100644 <nl> mmm a / src / VHACD_Lib / cmake / vhacd - config . cmake <nl> | [ v - hacd ] Fix cmake patch for include files ( ) | microsoft/vcpkg | d1929059861f183ec8c50dc114442a08aa7b2e8d | 2020-10-09T05:15:17Z |
mmm a / tensorflow / core / profiler / BUILD <nl> ppp b / tensorflow / core / profiler / BUILD <nl> tf_proto_library ( <nl> cc_api_version = 2 , <nl> ) <nl> <nl> + tf_proto_library ( <nl> + name = " profiler_service_monitor_result_proto " , <nl> + srcs = [ " profiler_service_monitor_result . proto " ] , <nl> + cc_api_version = 2 , <nl> + ) <nl> + <nl> tf_proto_library ( <nl> name = " profiler_service_proto " , <nl> srcs = [ " profiler_service . proto " ] , <nl> has_services = 1 , <nl> cc_api_version = 2 , <nl> cc_grpc_version = 1 , <nl> - protodeps = [ " : op_profile_proto " ] + tf_additional_all_protos ( ) , <nl> + protodeps = [ <nl> + " : op_profile_proto " , <nl> + " : profiler_service_monitor_result_proto " , <nl> + ] + tf_additional_all_protos ( ) , <nl> ) <nl> <nl> tf_proto_library ( <nl> tf_proto_library ( <nl> " profiler_service . proto " , <nl> " profiler_analysis . proto " , <nl> " op_profile . proto " , <nl> + " profiler_service_monitor_result . proto " , <nl> ] , <nl> ) , <nl> cc_api_version = 2 , <nl> mmm a / tensorflow / core / profiler / profiler_service . proto <nl> ppp b / tensorflow / core / profiler / profiler_service . proto <nl> package tensorflow ; <nl> import " tensorflow / core / framework / graph . proto " ; <nl> import " tensorflow / core / profiler / op_profile . proto " ; <nl> import " tensorflow / core / protobuf / config . proto " ; <nl> + import " tensorflow / core / profiler / profiler_service_monitor_result . proto " ; <nl> <nl> / / The ProfilerService service retrieves performance information about <nl> / / the programs running on connected devices over a period of time . <nl> message MonitorRequest { <nl> message MonitorResponse { <nl> / / Properly formatted string data that can be directly returned back to user . <nl> string data = 1 ; <nl> - / / The following are the raw components used to construct field data . <nl> - / / Percentage of time when device is idle . <nl> - double device_idle_time_percent = 2 ; <nl> - / / TPU matrix unit utilization percentage . <nl> - double matrix_unit_utilization_percent = 3 ; <nl> - / / Average step time in millisecond . <nl> - double step_time_ms_avg = 4 ; <nl> - / / Minimum step time in millisecond . <nl> - double step_time_ms_min = 5 ; <nl> - / / Maximum step time in millisecond . <nl> - double step_time_ms_max = 6 ; <nl> - / / Average infeed percentage . <nl> - double infeed_percent_avg = 7 ; <nl> - / / Minimum infeed percentage . <nl> - double infeed_percent_min = 8 ; <nl> - / / Maximum infeed percentage . <nl> - double infeed_percent_max = 9 ; <nl> - <nl> - / / next - field : 10 <nl> + <nl> + / / A collection of monitoring results for each field show in data . <nl> + ProfilerServiceMonitorResult monitor_result = 2 ; <nl> + <nl> + / / next - field : 3 <nl> } <nl> new file mode 100644 <nl> index 0000000000000 . . 48ec2113e2c69 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / profiler / profiler_service_monitor_result . proto <nl> <nl> + syntax = " proto3 " ; <nl> + <nl> + package tensorflow ; <nl> + <nl> + message ProfilerServiceMonitorResult { <nl> + / / Represents the different types of responses from the profiling service . <nl> + enum ResponseType { <nl> + / / No result is returned from the profiling service . <nl> + EMPTY_RESULT = 0 ; <nl> + / / Only device utilization is available . <nl> + UTIL_ONLY = 1 ; <nl> + / / Both device utilization and device idle time are available . <nl> + UTIL_IDLE = 2 ; <nl> + / / Device utilization , device idle time , step time , and infeed percentage <nl> + / / are all available . <nl> + UTIL_IDLE_STEP = 3 ; <nl> + } <nl> + <nl> + / / Type of profiling responses . <nl> + ResponseType response_type = 1 ; <nl> + / / Percentage of time when device is idle . <nl> + double device_idle_time_percent = 2 ; <nl> + / / TPU matrix unit utilization percentage . <nl> + double matrix_unit_utilization_percent = 3 ; <nl> + / / Average step time in millisecond . <nl> + double step_time_ms_avg = 4 ; <nl> + / / Minimum step time in millisecond . <nl> + double step_time_ms_min = 5 ; <nl> + / / Maximum step time in millisecond . <nl> + double step_time_ms_max = 6 ; <nl> + / / Average infeed percentage . <nl> + double infeed_percent_avg = 7 ; <nl> + / / Minimum infeed percentage . <nl> + double infeed_percent_min = 8 ; <nl> + / / Maximum infeed percentage . <nl> + double infeed_percent_max = 9 ; <nl> + <nl> + / / next - field : 10 <nl> + } <nl> | Add support to TPU automatic profiler . | tensorflow/tensorflow | 7b32b175fe7e87766dce28749c16236111bea6a9 | 2019-10-16T19:22:46Z |
mmm a / Makefile . am <nl> ppp b / Makefile . am <nl> SUBDIRS + = src / ccmain src / api . tessdata doc unittest <nl> <nl> EXTRA_DIST = README . md LICENSE <nl> EXTRA_DIST + = aclocal . m4 config configure . ac autogen . sh <nl> - EXTRA_DIST + = tesseract . pc . in $ ( TRAINING_SUBDIR ) java doc langtests unlvtests <nl> + EXTRA_DIST + = tesseract . pc . in $ ( TRAINING_SUBDIR ) java doc <nl> EXTRA_DIST + = CMakeLists . txt tesseract . pc . cmake cmake VERSION src / vs2010 cppan . yml <nl> <nl> DIST_SUBDIRS = $ ( SUBDIRS ) $ ( TRAINING_SUBDIR ) <nl> mmm a / configure . ac <nl> ppp b / configure . ac <nl> fi <nl> <nl> # Output files <nl> AC_CONFIG_FILES ( [ Makefile tesseract . pc ] ) <nl> - AC_CONFIG_FILES ( [ langtests / Makefile ] ) <nl> AC_CONFIG_FILES ( [ src / api / Makefile ] ) <nl> AC_CONFIG_FILES ( [ src / api / tess_version . h ] ) <nl> AC_CONFIG_FILES ( [ src / arch / Makefile ] ) <nl> AC_CONFIG_FILES ( [ src / wordrec / Makefile ] ) <nl> AC_CONFIG_FILES ( [ tessdata / Makefile ] ) <nl> AC_CONFIG_FILES ( [ tessdata / configs / Makefile ] ) <nl> AC_CONFIG_FILES ( [ tessdata / tessconfigs / Makefile ] ) <nl> - AC_CONFIG_FILES ( [ unlvtests / Makefile ] ) <nl> AC_CONFIG_FILES ( [ unittest / Makefile ] ) <nl> AC_CONFIG_FILES ( [ java / Makefile ] ) <nl> AC_CONFIG_FILES ( [ java / com / Makefile ] ) <nl> deleted file mode 100644 <nl> index d9f9b7fa1 . . 000000000 <nl> mmm a / langtests / . gitignore <nl> ppp / dev / null <nl> <nl> - # <nl> - results / * <nl> deleted file mode 100644 <nl> index 2103eeef8 . . 000000000 <nl> mmm a / langtests / Makefile . am <nl> ppp / dev / null <nl> <nl> - <nl> - EXTRA_DIST = README . md <nl> - EXTRA_DIST + = frk_setup . sh <nl> - EXTRA_DIST + = frk_test . sh <nl> - EXTRA_DIST + = counttestset . sh <nl> - EXTRA_DIST + = runlangtests . sh <nl> - EXTRA_DIST + = runtestset . sh <nl> - EXTRA_DIST + = reports / * <nl> deleted file mode 100644 <nl> index 2730fd898 . . 000000000 <nl> mmm a / langtests / README . md <nl> ppp / dev / null <nl> <nl> - # Language tests . <nl> - The scripts in this directory make it possible to test Accuracy of Tesseract for different languages . <nl> - # # Setup <nl> - # # # Step 1 : If not already installed , download the modified ISRI toolkit , <nl> - make and install the tools in / usr / local / bin . <nl> - ` ` ` <nl> - git clone https : / / github . com / Shreeshrii / ocr - evaluation - tools . git <nl> - cd ~ / ocr - evaluation - tools <nl> - sudo make install <nl> - ` ` ` <nl> - # # # Step 2 : If not alrady built , Build tesseract . <nl> - Use binaries from the tesseract / src / api and tesseract / src / training directory . <nl> - # # # Step 3 <nl> - Download images and corresponding ground truth text for the language to be tested . <nl> - Each testset should have only one kind of images ( eg . tif , png , jpg etc ) . <nl> - The ground truth text files should have the same base filename with txt extension . <nl> - As needed , modify the filenames and create the ` pages ` file for each testset . <nl> - Instructions for testing Fraktur and Sanskrit languages are given below as an example . <nl> - # # Testing for Fraktur - frk and script / Fraktur <nl> - # # # Download the images and groundtruth , modify to required format . <nl> - ` ` ` <nl> - bash - x frk_setup . sh <nl> - ` ` ` <nl> - # # # Run tests for Fraktur - frk and script / Fraktur <nl> - ` ` ` <nl> - bash - x frk_test . sh <nl> - ` ` ` <nl> - # # Testing for Sanskrit - san and script / Devanagari <nl> - # # # Download the images and groundtruth , modify to required format . <nl> - ` ` ` <nl> - bash - x deva_setup . sh <nl> - ` ` ` <nl> - # # # Run tests <nl> - ` ` ` <nl> - bash - x deva_test . sh <nl> - ` ` ` <nl> - <nl> - # # # Notes from Nick White regarding wordacc <nl> - <nl> - If you just want to remove all lines which have 100 % recognition , <nl> - you can add a ' awk ' command like this : <nl> - <nl> - ocrevalutf8 wordacc ground . txt ocr . txt | awk ' $ 3 ! = 100 { print $ 0 } ' <nl> - results . txt <nl> - <nl> - or if you ' ve already got a results file you want to change , you can do this : <nl> - <nl> - awk ' $ 3 ! = 100 { print $ 0 } ' results . txt newresults . txt <nl> - <nl> - If you only want the last sections where things are broken down by <nl> - word , you can add a sed commend , like this : <nl> - <nl> - ocrevalutf8 wordacc ground . txt ocr . txt | sed ' / ^ Count Missed % Right $ / , $ <nl> - ! d ' | awk ' $ 3 ! = 100 { print $ 0 } ' results . txt <nl> deleted file mode 100755 <nl> index 9c3c825de . . 000000000 <nl> mmm a / langtests / counttestset . sh <nl> ppp / dev / null <nl> <nl> - # ! / bin / bash <nl> - # File : counttestset . sh <nl> - # Description : Script to count the errors on a single UNLV set . <nl> - # Author : Ray Smith <nl> - # Created : Wed Jun 13 11 : 58 : 01 PDT 2007 <nl> - # <nl> - # ( C ) Copyright 2007 , Google Inc . <nl> - # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - # you may not use this file except in compliance with the License . <nl> - # You may obtain a copy of the License at <nl> - # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - # Unless required by applicable law or agreed to in writing , software <nl> - # distributed under the License is distributed on an " AS IS " BASIS , <nl> - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - # See the License for the specific language governing permissions and <nl> - # limitations under the License . <nl> - <nl> - if [ $ # - ne 2 ] <nl> - then <nl> - echo " Usage : $ 0 pagesfile langcode " <nl> - exit 1 <nl> - fi <nl> - <nl> - pages = $ 1 <nl> - langcode = $ 2 <nl> - <nl> - imdir = $ { pages % / pages } <nl> - setname = $ { imdir # # * / } <nl> - resdir = langtests / results / $ setname <nl> - mkdir - p langtests / reports <nl> - echo " Counting on set $ setname in directory $ imdir to $ resdir " <nl> - accfiles = " " <nl> - wafiles = " " <nl> - while read page dir <nl> - do <nl> - if [ " $ dir " ] <nl> - then <nl> - srcdir = " $ imdir / $ dir " <nl> - else <nl> - srcdir = " $ imdir " <nl> - fi <nl> - echo " $ srcdir / $ page " <nl> - # Count character errors . <nl> - ocrevalutf8 accuracy " $ srcdir / $ page . txt " " $ resdir / $ page . txt " > " $ resdir / $ page . acc " <nl> - accfiles = " $ accfiles $ resdir / $ page . acc " <nl> - # Count word errors . <nl> - ocrevalutf8 wordacc - S " $ resdir / $ langcode . stopwords " " $ srcdir / $ page . txt " " $ resdir / $ page . txt " > " $ resdir / $ page . wa " <nl> - wafiles = " $ wafiles $ resdir / $ page . wa " <nl> - done < " $ pages " <nl> - <nl> - accsum $ accfiles > " langtests / results / $ setname . characc " <nl> - wordaccsum $ wafiles > " langtests / results / $ setname . wordacc " <nl> deleted file mode 100644 <nl> index 15d5d8beb . . 000000000 <nl> mmm a / langtests / deva_setup . sh <nl> ppp / dev / null <nl> <nl> - # ! / bin / bash <nl> - # <nl> - mkdir - p ~ / lang - files <nl> - rm - rf ~ / lang - files / san - * <nl> - for testset in vedic fontsamples oldstyle shreelipi alphabetsamples <nl> - do <nl> - cd ~ / lang - files <nl> - mkdir - p . / san - $ testset <nl> - cp ~ / lang - deva - downloads / imagessan / $ testset / * . * . / san - $ testset / <nl> - cd . / san - $ testset / <nl> - rename s / - gt . txt / . txt / * . txt <nl> - ls - 1 * . png > pages <nl> - sed - i - e ' s / . png / / g ' pages <nl> - done <nl> - <nl> - mkdir - p ~ / lang - stopwords <nl> - cd ~ / lang - stopwords <nl> - cp ~ / lang - deva - downloads / imagessan / stopwords . txt . / san . stopwords . txt <nl> deleted file mode 100644 <nl> index 9add94b49 . . 000000000 <nl> mmm a / langtests / deva_test . sh <nl> ppp / dev / null <nl> <nl> - # ! / bin / bash <nl> - # run langtests / runlangtests . sh with the root data dir , testname , tessdata - dir , language code and image extension <nl> - <nl> - cd ~ / tesseract <nl> - <nl> - langtests / runlangtests . sh ~ / lang - files 4_fast_Devanagari . . / tessdata_fast / script Devanagari png <nl> - langtests / runlangtests . sh ~ / lang - files 4_best_int_Devanagari . . / tessdata / script Devanagari png <nl> - langtests / runlangtests . sh ~ / lang - files 4_best_Devanagari . . / tessdata_best / script Devanagari png <nl> - langtests / runlangtests . sh ~ / lang - files 4_fast_san . . / tessdata_fast san png <nl> - langtests / runlangtests . sh ~ / lang - files 4_best_int_san . . / tessdata san png <nl> - langtests / runlangtests . sh ~ / lang - files 4_best_san . . / tessdata_best san png <nl> - <nl> - langtests / runlangtests . sh ~ / lang - files 4_plus40k_san . . / tesstutorial - deva san png <nl> - <nl> - # / home / ubuntu / tesstutorial - deva / san . traineddata at n iterations <nl> - <nl> - # # # It takes a while to run . <nl> - <nl> deleted file mode 100644 <nl> index e86b6109f . . 000000000 <nl> mmm a / langtests / frk_setup . sh <nl> ppp / dev / null <nl> <nl> - # ! / bin / bash <nl> - # <nl> - mkdir - p ~ / lang - downloads <nl> - cd ~ / lang - downloads <nl> - wget - O frk - jbarth - ubhd . zip http : / / digi . ub . uni - heidelberg . de / diglitData / v / abbyy11r8 - vs - tesseract4 . zip <nl> - wget - O frk - stweil - gt . zip https : / / digi . bib . uni - mannheim . de / ~ stweil / fraktur - gt . zip <nl> - <nl> - mkdir - p ~ / lang - files <nl> - cd ~ / lang - files <nl> - unzip ~ / lang - downloads / frk - jbarth - ubhd . zip - d frk <nl> - unzip ~ / lang - downloads / frk - stweil - gt . zip - d frk <nl> - mkdir - p . / frk - ligatures <nl> - cp . / frk / abbyy - vs - tesseract / * . tif . / frk - ligatures / <nl> - cp . / frk / gt / * . txt . / frk - ligatures / <nl> - <nl> - cd . / frk - ligatures / <nl> - ls - 1 * . tif > pages <nl> - sed - i - e ' s / . tif / / g ' pages <nl> - <nl> - mkdir - p ~ / lang - stopwords <nl> - cd ~ / lang - stopwords <nl> - wget - O frk . stopwords . txt https : / / raw . githubusercontent . com / stopwords - iso / stopwords - de / master / stopwords - de . txt <nl> - <nl> - echo " Edit ~ / lang - files / stopwords / frk . stopwords . txt as wordacc uses a space delimited stopwords file , not line delimited . " <nl> deleted file mode 100644 <nl> index 0ab32821b . . 000000000 <nl> mmm a / langtests / frk_test . sh <nl> ppp / dev / null <nl> <nl> - # ! / bin / bash <nl> - # <nl> - # run langtests / runlangtests . sh with the root ISRI data dir , testname , tessdata - dir , language code : <nl> - <nl> - cd ~ / tesseract <nl> - langtests / runlangtests . sh ~ / lang - files 4_fast_Fraktur . . / tessdata_fast / script Fraktur tif <nl> - <nl> - langtests / runlangtests . sh ~ / lang - files 4_fast_frk . . / tessdata_fast frk tif <nl> - langtests / runlangtests . sh ~ / lang - files 4_best_int_frk . . / tessdata frk tif <nl> - langtests / runlangtests . sh ~ / lang - files 4_best_frk . . / tessdata_best frk tif <nl> - <nl> - # # # It takes a while to run . <nl> - <nl> deleted file mode 100644 <nl> index ad988e6ec . . 000000000 <nl> mmm a / langtests / reports / 4_best_Devanagari . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_Devanagari san - alphabetsamples 2013 56 . 17 % 1323 12 . 27 % 1323 12 . 27 606 . 28s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_Devanagari san - fontsamples 388 94 . 82 % 87 86 . 38 % 87 86 . 38 570 . 17s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_Devanagari san - oldstyle 2796 59 . 93 % 523 39 . 61 % 523 39 . 61 447 . 73s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_Devanagari san - shreelipi 830 94 . 01 % 311 81 . 40 % 311 81 . 40 1137 . 51s <nl> deleted file mode 100644 <nl> index 0b963f682 . . 000000000 <nl> mmm a / langtests / reports / 4_best_frk . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_frk frk - ligatures 178 94 . 73 % 100 81 . 31 % 74 75 . 17 94 . 29s <nl> deleted file mode 100644 <nl> index fe31dc7c2 . . 000000000 <nl> mmm a / langtests / reports / 4_best_int_Devanagari . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_int_Devanagari san - alphabetsamples 2010 56 . 24 % 1321 12 . 40 % 1321 12 . 40 556 . 26s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_int_Devanagari san - fontsamples 396 94 . 72 % 89 86 . 07 % 89 86 . 07 524 . 07s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_int_Devanagari san - oldstyle 2812 59 . 70 % 523 39 . 61 % 523 39 . 61 416 . 57s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_int_Devanagari san - shreelipi 829 94 . 01 % 314 81 . 22 % 314 81 . 22 1087 . 02s <nl> deleted file mode 100644 <nl> index 5d2bc7767 . . 000000000 <nl> mmm a / langtests / reports / 4_best_int_frk . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_int_frk frk - ligatures 244 92 . 78 % 109 79 . 63 % 80 73 . 15 367 . 73s <nl> deleted file mode 100644 <nl> index 3e140f1c4 . . 000000000 <nl> mmm a / langtests / reports / 4_best_int_san . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_int_san san - alphabetsamples 2342 49 . 01 % 1353 10 . 28 % 1353 10 . 28 281 . 60s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_int_san san - fontsamples 474 93 . 68 % 126 80 . 28 % 126 80 . 28 281 . 05s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_int_san san - oldstyle 3121 55 . 27 % 602 30 . 48 % 602 30 . 48 206 . 20s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_int_san san - shreelipi 1163 91 . 60 % 417 75 . 06 % 417 75 . 06 606 . 80s <nl> deleted file mode 100644 <nl> index 948f62a4f . . 000000000 <nl> mmm a / langtests / reports / 4_best_san . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_san san - alphabetsamples 2335 49 . 16 % 1348 10 . 61 % 1348 10 . 61 300 . 24s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_san san - fontsamples 473 93 . 69 % 126 80 . 28 % 126 80 . 28 267 . 05s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_san san - oldstyle 3121 55 . 27 % 598 30 . 95 % 598 30 . 95 205 . 28s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_best_san san - shreelipi 1168 91 . 56 % 414 75 . 24 % 414 75 . 24 610 . 52s <nl> deleted file mode 100644 <nl> index 356e68828 . . 000000000 <nl> mmm a / langtests / reports / 4_fast_Devanagari . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_fast_Devanagari san - alphabetsamples 2017 56 . 09 % 1317 12 . 67 % 1317 12 . 67 400 . 38s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_fast_Devanagari san - fontsamples 433 94 . 22 % 108 83 . 10 % 108 83 . 10 287 . 48s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_fast_Devanagari san - oldstyle 2883 58 . 68 % 543 37 . 30 % 543 37 . 30 289 . 85s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_fast_Devanagari san - shreelipi 750 94 . 58 % 279 83 . 31 % 279 83 . 31 813 . 19s <nl> deleted file mode 100644 <nl> index b8f8e81b7 . . 000000000 <nl> mmm a / langtests / reports / 4_fast_Fraktur . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_fast_Fraktur frk - ligatures 265 92 . 16 % 116 78 . 32 % 82 72 . 48 91 . 29s <nl> deleted file mode 100644 <nl> index 42ce1bcd3 . . 000000000 <nl> mmm a / langtests / reports / 4_fast_frk . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_fast_frk frk - ligatures 244 92 . 78 % 109 79 . 63 % 80 73 . 15 89 . 98s <nl> deleted file mode 100644 <nl> index e37ff3caf . . 000000000 <nl> mmm a / langtests / reports / 4_fast_san . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_fast_san san - alphabetsamples 2342 49 . 01 % 1353 10 . 28 % 1353 10 . 28 276 . 73s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_fast_san san - fontsamples 474 93 . 68 % 126 80 . 28 % 126 80 . 28 278 . 34s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_fast_san san - oldstyle 3121 55 . 27 % 602 30 . 48 % 602 30 . 48 222 . 35s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_fast_san san - shreelipi 1163 91 . 60 % 417 75 . 06 % 417 75 . 06 626 . 40s <nl> deleted file mode 100644 <nl> index e8251c99f . . 000000000 <nl> mmm a / langtests / reports / 4_plus10k_san . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus10k_san san - alphabetsamples 1725 62 . 44 % 1112 26 . 26 % 1112 26 . 26 160 . 48s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus10k_san san - fontsamples 349 95 . 34 % 73 88 . 58 % 73 88 . 58 138 . 09s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus10k_san san - oldstyle 2818 59 . 62 % 548 36 . 72 % 548 36 . 72 120 . 83s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus10k_san san - shreelipi 746 94 . 61 % 279 83 . 31 % 279 83 . 31 292 . 70s <nl> deleted file mode 100644 <nl> index 640f81493 . . 000000000 <nl> mmm a / langtests / reports / 4_plus20k_san . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus20k_san san - alphabetsamples 1441 68 . 63 % 841 44 . 23 % 841 44 . 23 156 . 57s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus20k_san san - fontsamples 356 95 . 25 % 75 88 . 26 % 75 88 . 26 135 . 13s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus20k_san san - oldstyle 2862 58 . 99 % 555 35 . 91 % 555 35 . 91 118 . 21s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus20k_san san - shreelipi 726 94 . 76 % 267 84 . 03 % 267 84 . 03 295 . 68s <nl> deleted file mode 100644 <nl> index febc57572 . . 000000000 <nl> mmm a / langtests / reports / 4_plus30k_san . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus30k_san san - alphabetsamples 1656 63 . 95 % 937 37 . 86 % 937 37 . 86 615 . 62s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus30k_san san - fontsamples 429 94 . 28 % 89 86 . 07 % 89 86 . 07 617 . 42s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus30k_san san - oldstyle 2885 58 . 66 % 561 35 . 22 % 561 35 . 22 432 . 58s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus30k_san san - shreelipi 447 96 . 77 % 123 92 . 64 % 123 92 . 64 1081 . 29s <nl> deleted file mode 100644 <nl> index 1ead5c1f8 . . 000000000 <nl> mmm a / langtests / reports / 4_plus40k_san . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus40k_san san - alphabetsamples 1380 69 . 95 % 775 48 . 61 % 775 48 . 61 1198 . 16s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus40k_san san - fontsamples 401 94 . 65 % 79 87 . 64 % 79 87 . 64 1275 . 08s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus40k_san san - oldstyle 2860 59 . 01 % 534 38 . 34 % 534 38 . 34 977 . 65s <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWErrors Accuracy TimeTaken <nl> - 4_plus40k_san san - shreelipi 441 96 . 81 % 113 93 . 24 % 113 93 . 24 2301 . 53s <nl> deleted file mode 100755 <nl> index 300c68eee . . 000000000 <nl> mmm a / langtests / runlangtests . sh <nl> ppp / dev / null <nl> <nl> - # ! / bin / bash <nl> - # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> - # File : runlangtests . sh <nl> - # Description : Script to run a set of accuracy test sets for any language . <nl> - # based on runalltests . sh by Ray Smith <nl> - # Author : Shree Devi Kumar <nl> - # Created : June 09 , 2018 <nl> - # <nl> - # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - # you may not use this file except in compliance with the License . <nl> - # You may obtain a copy of the License at <nl> - # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - # Unless required by applicable law or agreed to in writing , software <nl> - # distributed under the License is distributed on an " AS IS " BASIS , <nl> - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - # See the License for the specific language governing permissions and <nl> - # limitations under the License . <nl> - # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> - if [ $ # - ne 5 ] <nl> - then <nl> - echo " Usage : $ 0 unlv - data - dir version - id tessdata - dir langcode imgext " <nl> - exit 1 <nl> - fi <nl> - <nl> - tessdata = $ 3 <nl> - lang = $ 4 <nl> - imgext = $ 5 <nl> - <nl> - # timesum computes the total cpu time <nl> - timesum ( ) { <nl> - awk ' BEGIN { <nl> - total = 0 . 0 ; <nl> - } <nl> - { <nl> - total + = $ 2 ; <nl> - } <nl> - END { <nl> - printf ( " % . 2f \ n " , total ) ; <nl> - } ' " $ 1 " <nl> - } <nl> - <nl> - imdir = " $ 1 " <nl> - vid = " $ 2 " <nl> - bindir = $ { 0 % / * } <nl> - if [ " $ bindir " = " $ 0 " ] <nl> - then <nl> - bindir = " . / " <nl> - fi <nl> - rdir = langtests / reports <nl> - if [ " $ lang " = " frk " ] | | [ " $ lang " = " Fraktur " ] <nl> - then <nl> - testsets = " frk - ligatures " <nl> - fi <nl> - if [ " $ lang " = " san " ] | | [ " $ lang " = " Devanagari " ] <nl> - then <nl> - testsets = " san - fontsamples san - oldstyle san - shreelipi san - alphabetsamples " <nl> - # # # testsets = " san - fontsamples " <nl> - fi <nl> - <nl> - totalerrs = 0 <nl> - totalwerrs = 0 <nl> - totalnswerrs = 0 <nl> - for set in $ testsets <nl> - do <nl> - resdir = langtests / results / $ set <nl> - mkdir - p " $ resdir " <nl> - cp ~ / lang - stopwords / frk . stopwords . txt " $ resdir / $ lang . stopwords " <nl> - if [ - r " $ imdir / $ set / pages " ] <nl> - then <nl> - # Run tesseract on all the pages . <nl> - $ bindir / runtestset . sh " $ imdir / $ set / pages " " $ tessdata " " $ lang " " $ imgext " <nl> - # Count the errors on all the pages . <nl> - $ bindir / counttestset . sh " $ imdir / $ set / pages " $ lang <nl> - # Get the new character word and nonstop word errors and accuracy . <nl> - cherrs = $ ( head - 4 " langtests / results / $ set . characc " | tail - 1 | cut - c1 - 9 | <nl> - tr - d ' [ : blank : ] ' ) <nl> - chacc = $ ( head - 5 " langtests / results / $ set . characc " | tail - 1 | cut - c1 - 9 | <nl> - tr - d ' [ : blank : ] ' ) <nl> - wderrs = $ ( head - 4 " langtests / results / $ set . wordacc " | tail - 1 | cut - c1 - 9 | <nl> - tr - d ' [ : blank : ] ' ) <nl> - wdacc = $ ( head - 5 " langtests / results / $ set . wordacc " | tail - 1 | cut - c1 - 9 | <nl> - tr - d ' [ : blank : ] ' ) <nl> - nswderrs = $ ( grep Total " langtests / results / $ set . wordacc " | head - 2 | tail - 1 | <nl> - cut - c10 - 17 | tr - d ' [ : blank : ] ' ) <nl> - nswdacc = $ ( grep Total " langtests / results / $ set . wordacc " | head - 2 | tail - 1 | <nl> - cut - c19 - 26 | tr - d ' [ : blank : ] ' ) <nl> - <nl> - sumfile = $ rdir / $ vid . $ set . sum <nl> - if [ - r " langtests / results / $ set . times " ] <nl> - then <nl> - total_time = $ ( timesum " langtests / results / $ set . times " ) <nl> - else <nl> - total_time = ' 0 . 0 ' <nl> - fi <nl> - echo " RELEASE TestSet CharErrors Accuracy WordErrors Accuracy \ <nl> - NonStopWErrors Accuracy TimeTaken " > " $ sumfile " <nl> - echo " $ vid $ set $ cherrs $ chacc $ wderrs $ wdacc \ <nl> - $ nswderrs $ nswdacc $ { total_time } s " > > " $ sumfile " <nl> - fi <nl> - done <nl> - <nl> - cat " $ rdir / $ vid " . * . sum > " $ rdir / $ vid " . summary <nl> - <nl> - mv " $ rdir / $ vid " . * . sum langtests / results / <nl> - cat " $ rdir / $ vid " . summary <nl> deleted file mode 100755 <nl> index 3771e79d9 . . 000000000 <nl> mmm a / langtests / runtestset . sh <nl> ppp / dev / null <nl> <nl> - # ! / bin / bash <nl> - # File : runtestset . sh <nl> - # Description : Script to run tesseract on a single UNLV set . <nl> - # Author : Ray Smith <nl> - # Created : Wed Jun 13 10 : 13 : 01 PDT 2007 <nl> - # <nl> - # ( C ) Copyright 2007 , Google Inc . <nl> - # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - # you may not use this file except in compliance with the License . <nl> - # You may obtain a copy of the License at <nl> - # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - # Unless required by applicable law or agreed to in writing , software <nl> - # distributed under the License is distributed on an " AS IS " BASIS , <nl> - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - # See the License for the specific language governing permissions and <nl> - # limitations under the License . <nl> - <nl> - if [ $ # - ne 4 ] <nl> - then <nl> - echo " Usage : $ 0 pagesfile tessdata - dir langcode imgext " <nl> - exit 1 <nl> - fi <nl> - <nl> - tess = " time - f % U - o times . txt . / src / api / tesseract " <nl> - <nl> - tessdata = $ 2 <nl> - langcode = $ 3 <nl> - imgext = $ 4 <nl> - pages = $ 1 <nl> - imdir = $ { pages % / pages } <nl> - setname = $ { imdir # # * / } <nl> - <nl> - config = " " <nl> - resdir = langtests / results / $ setname <nl> - <nl> - echo - e " Testing on set $ setname in directory $ imdir to $ resdir \ n " <nl> - mkdir - p " $ resdir " <nl> - rm - f " langtests / results / $ setname . times " <nl> - while read page dir <nl> - do <nl> - # A pages file may be a list of files with subdirs or maybe just <nl> - # a plain list of files so accommodate both . <nl> - if [ " $ dir " ] <nl> - then <nl> - srcdir = " $ imdir / $ dir " <nl> - else <nl> - srcdir = " $ imdir " <nl> - fi <nl> - echo " $ srcdir / $ page " <nl> - $ tess " $ srcdir / $ page . $ imgext " " $ resdir / $ page " - - tessdata - dir $ tessdata - - oem 1 - l $ langcode - - psm 6 $ config 2 > & 1 | grep - v " OCR Engine " | grep - v " Page 1 " <nl> - if [ - r times . txt ] <nl> - then <nl> - read t < times . txt <nl> - echo " $ page $ t " > > " langtests / results / $ setname . times " <nl> - echo - e " \ 033M $ page $ t " <nl> - if [ " $ t " = " Command terminated by signal 2 " ] <nl> - then <nl> - exit 0 <nl> - fi <nl> - fi <nl> - done < " $ pages " <nl> deleted file mode 100644 <nl> index 68471bf27 . . 000000000 <nl> mmm a / unlvtests / Makefile . am <nl> ppp / dev / null <nl> <nl> - <nl> - EXTRA_DIST = README . md <nl> - EXTRA_DIST + = counttestset . sh <nl> - EXTRA_DIST + = runalltests . sh <nl> - EXTRA_DIST + = runalltests_spa . sh <nl> - EXTRA_DIST + = runtestset . sh <nl> - EXTRA_DIST + = reports / 1995 . bus . 3B . sum <nl> - EXTRA_DIST + = reports / 1995 . doe3 . 3B . sum <nl> - EXTRA_DIST + = reports / 1995 . mag . 3B . sum <nl> - EXTRA_DIST + = reports / 1995 . news . 3B . sum <nl> - EXTRA_DIST + = reports / 2 . 03 . summary <nl> - EXTRA_DIST + = reports / 2 . 04 . summary <nl> deleted file mode 100644 <nl> index 32687f1ab . . 000000000 <nl> mmm a / unlvtests / README . md <nl> ppp / dev / null <nl> <nl> - # # How to run UNLV tests . <nl> - <nl> - The scripts in this directory make it possible to duplicate the tests <nl> - published in the Fourth Annual Test of OCR Accuracy . <nl> - See http : / / www . expervision . com / wp - content / uploads / 2012 / 12 / 1995 . The_Fourth_Annual_Test_of_OCR_Accuracy . pdf <nl> - but first you have to get the tools and data used by UNLV : <nl> - <nl> - # # # Step 1 : to download the images go to <nl> - https : / / sourceforge . net / projects / isri - ocr - evaluation - tools - alt / files / <nl> - and get doe3 . 3B . tar . gz , bus . 3B . tar . gz , mag . 3B . tar . gz and news . 3B . tar . gz <nl> - spn . 3B . tar . gz is incorrect in this repo , so get it from code . google <nl> - <nl> - ` ` ` <nl> - mkdir - p ~ / isri - downloads <nl> - cd ~ / isri - downloads <nl> - curl - L https : / / sourceforge . net / projects / isri - ocr - evaluation - tools - alt / files / bus . 3B . tar . gz > bus . 3B . tar . gz <nl> - curl - L https : / / sourceforge . net / projects / isri - ocr - evaluation - tools - alt / files / doe3 . 3B . tar . gz > doe3 . 3B . tar . gz <nl> - curl - L https : / / sourceforge . net / projects / isri - ocr - evaluation - tools - alt / files / mag . 3B . tar . gz > mag . 3B . tar . gz <nl> - curl - L https : / / sourceforge . net / projects / isri - ocr - evaluation - tools - alt / files / news . 3B . tar . gz > news . 3B . tar . gz <nl> - curl - L https : / / storage . googleapis . com / google - code - archive - downloads / v2 / code . google . com / isri - ocr - evaluation - tools / spn . 3B . tar . gz > spn . 3B . tar . gz <nl> - ` ` ` <nl> - <nl> - # # # Step 2 : extract the files . <nl> - It doesn ' t really matter where <nl> - in your filesystem you put them , but they must go under a common <nl> - root so you have directories doe3 . 3B , bus . 3B , mag . 3B and news . 3B . in , for example , <nl> - ~ / ISRI - OCRtk . <nl> - <nl> - ` ` ` <nl> - mkdir - p ~ / ISRI - OCRtk <nl> - cd ~ / ISRI - OCRtk <nl> - tar xzvf ~ / isri - downloads / bus . 3B . tar . gz <nl> - tar xzvf ~ / isri - downloads / doe3 . 3B . tar . gz <nl> - tar xzvf ~ / isri - downloads / mag . 3B . tar . gz <nl> - tar xzvf ~ / isri - downloads / news . 3B . tar . gz <nl> - tar xzvf ~ / isri - downloads / spn . 3B . tar . gz <nl> - mkdir - p stopwords <nl> - cd stopwords <nl> - wget - O spa . stopwords . txt https : / / raw . githubusercontent . com / stopwords - iso / stopwords - es / master / stopwords - es . txt <nl> - ` ` ` <nl> - Edit ~ / ISRI - OCRtk / stopwords / spa . stopwords . txt <nl> - wordacc uses a space delimited stopwords file , not line delimited . <nl> - s / \ n / / g <nl> - <nl> - Edit ~ / ISRI - OCRtk / spn . 3B / pages <nl> - Delete the line containing the following imagename as it [ crashes tesseract ] ( https : / / github . com / tesseract - ocr / tesseract / issues / 1647 # issuecomment - 395954717 ) . <nl> - <nl> - 7733_005 . 3B 3 <nl> - <nl> - # # # Step 3 : Download the modified ISRI toolkit , make and install the tools : <nl> - These will be installed in / usr / local / bin . <nl> - <nl> - ` ` ` <nl> - git clone https : / / github . com / Shreeshrii / ocr - evaluation - tools . git <nl> - cd ~ / ocr - evaluation - tools <nl> - sudo make install <nl> - ` ` ` <nl> - <nl> - # # # Step 4 : cd back to your main tesseract - ocr dir and Build tesseract . <nl> - <nl> - # # # Step 5 : run unlvtests / runalltests . sh with the root ISRI data dir , testname , tessdata - dir : <nl> - <nl> - ` ` ` <nl> - unlvtests / runalltests . sh ~ / ISRI - OCRtk 4_fast_eng . . / tessdata_fast <nl> - ` ` ` <nl> - and go to the gym , have lunch etc . It takes a while to run . <nl> - <nl> - # # # Step 6 : There should be a RELEASE . summary file <nl> - * unlvtests / reports / 4 - beta_fast . summary * that contains the final summarized accuracy <nl> - report and comparison with the 1995 results . <nl> - <nl> - # # # Step 7 : run the test for Spanish . <nl> - <nl> - ` ` ` <nl> - unlvtests / runalltests_spa . sh ~ / ISRI - OCRtk 4_fast_spa . . / tessdata_fast <nl> - ` ` ` <nl> - <nl> - # # # # Notes from Nick White regarding wordacc <nl> - <nl> - If you just want to remove all lines which have 100 % recognition , <nl> - you can add a ' awk ' command like this : <nl> - <nl> - ocrevalutf8 wordacc ground . txt ocr . txt | awk ' $ 3 ! = 100 { print $ 0 } ' <nl> - results . txt <nl> - <nl> - or if you ' ve already got a results file you want to change , you can do this : <nl> - <nl> - awk ' $ 3 ! = 100 { print $ 0 } ' results . txt newresults . txt <nl> - <nl> - If you only want the last sections where things are broken down by <nl> - word , you can add a sed command , like this : <nl> - <nl> - ocrevalutf8 wordacc ground . txt ocr . txt | sed ' / ^ Count Missed % Right $ / , $ <nl> - ! d ' | awk ' $ 3 ! = 100 { print $ 0 } ' results . txt <nl> deleted file mode 100755 <nl> index b37882856 . . 000000000 <nl> mmm a / unlvtests / counttestset . sh <nl> ppp / dev / null <nl> <nl> - # ! / bin / bash <nl> - # File : counttestset . sh <nl> - # Description : Script to count the errors on a single UNLV set . <nl> - # Author : Ray Smith <nl> - # Created : Wed Jun 13 11 : 58 : 01 PDT 2007 <nl> - # <nl> - # ( C ) Copyright 2007 , Google Inc . <nl> - # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - # you may not use this file except in compliance with the License . <nl> - # You may obtain a copy of the License at <nl> - # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - # Unless required by applicable law or agreed to in writing , software <nl> - # distributed under the License is distributed on an " AS IS " BASIS , <nl> - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - # See the License for the specific language governing permissions and <nl> - # limitations under the License . <nl> - <nl> - if [ $ # - ne 2 ] <nl> - then <nl> - echo " Usage : $ 0 pagesfile langcode " <nl> - exit 1 <nl> - fi <nl> - if [ ! - d src / api ] <nl> - then <nl> - echo " Run $ 0 from the tesseract - ocr root directory ! " <nl> - exit 1 <nl> - fi <nl> - <nl> - pages = $ 1 <nl> - langcode = $ 2 <nl> - <nl> - imdir = $ { pages % / pages } <nl> - setname = $ { imdir # # * / } <nl> - resdir = unlvtests / results / $ setname <nl> - mkdir - p unlvtests / reports <nl> - echo " Counting on set $ setname in directory $ imdir to $ resdir " <nl> - accfiles = " " <nl> - wafiles = " " <nl> - while read page dir <nl> - do <nl> - if [ " $ dir " ] <nl> - then <nl> - srcdir = " $ imdir / $ dir " <nl> - else <nl> - srcdir = " $ imdir " <nl> - fi <nl> - # echo " $ srcdir / $ page . tif " <nl> - # Convert groundtruth and recognized text to UTF - 8 to correctly treat accented letters . <nl> - iconv - f ISO8859 - 1 - t UTF - 8 " $ srcdir / $ page . txt " > " $ srcdir / $ page . text " <nl> - iconv - f ISO8859 - 1 - t UTF - 8 " $ resdir / $ page . unlv " > " $ resdir / $ page . text " <nl> - # Count character errors . <nl> - ocrevalutf8 accuracy " $ srcdir / $ page . text " " $ resdir / $ page . text " > " $ resdir / $ page . acc " <nl> - accfiles = " $ accfiles $ resdir / $ page . acc " <nl> - # Count word errors . <nl> - # langcode should be either eng or spa <nl> - if [ " $ langcode " = " eng " ] <nl> - then <nl> - ocrevalutf8 wordacc " $ srcdir / $ page . text " " $ resdir / $ page . text " > " $ resdir / $ page . wa " <nl> - else <nl> - cp ~ / ISRI - OCRtk / stopwords / spa . stopwords . txt " $ resdir / spa . stopwords " <nl> - ocrevalutf8 wordacc - S " $ resdir / spa . stopwords " " $ srcdir / $ page . text " " $ resdir / $ page . text " > " $ resdir / $ page . wa " <nl> - fi <nl> - wafiles = " $ wafiles $ resdir / $ page . wa " <nl> - done < " $ pages " <nl> - <nl> - accsum $ accfiles > " unlvtests / results / $ setname . characc " <nl> - wordaccsum $ wafiles > " unlvtests / results / $ setname . wordacc " <nl> - <nl> deleted file mode 100755 <nl> index 34ad6d699 . . 000000000 <nl> mmm a / unlvtests / reorgdata . sh <nl> ppp / dev / null <nl> <nl> - # ! / bin / bash <nl> - # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - # you may not use this file except in compliance with the License . <nl> - # You may obtain a copy of the License at <nl> - # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - # Unless required by applicable law or agreed to in writing , software <nl> - # distributed under the License is distributed on an " AS IS " BASIS , <nl> - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - # See the License for the specific language governing permissions and <nl> - # limitations under the License . <nl> - <nl> - if [ $ # - ne 1 ] <nl> - then <nl> - echo " Usage : $ 0 scantype " <nl> - echo " UNLV data comes in several scan types : " <nl> - echo " 3B = 300 dpi binary " <nl> - echo " 3A = adaptive thresholded 300 dpi " <nl> - echo " 3G = 300 dpi grey " <nl> - echo " 4B = 400dpi binary " <nl> - echo " 2B = 200dpi binary " <nl> - echo " For now we only use 3B " <nl> - exit 1 <nl> - fi <nl> - ext = $ 1 <nl> - <nl> - # There are several test sets without meaningful names , so rename <nl> - # them with something a bit more meaningful . <nl> - # Each s is oldname / newname <nl> - for s in 3 / doe3 B / bus M / mag N / news L / legal R / rep S / spn Z / zset <nl> - do <nl> - old = $ { s % / * } <nl> - # if this set was downloaded then process it . <nl> - if [ - r " $ old / PAGES " ] <nl> - then <nl> - new = $ { s # * / } . $ ext <nl> - mkdir - p " $ new " <nl> - echo " Set $ old - > $ new " <nl> - # The pages file had - instead of _ so fix it and add the extension . <nl> - for page in $ ( cat $ old / PAGES ) <nl> - do <nl> - echo " $ { page % - * } _ $ { page # * - } . $ ext " <nl> - done > " $ new / pages " <nl> - for f in $ ( cat " $ new / pages " ) <nl> - do <nl> - # Put a tif extension on the tif files . <nl> - cp " $ old / $ { old } _B / $ f " " $ new / $ f . tif " <nl> - # Put a uzn extension on the zone files . <nl> - cp " $ old / $ { old } _B / $ { f } Z " " $ new / $ f . uzn " <nl> - # Cat all the truth files together and put into a single txt file . <nl> - cat " $ old / $ { old } _GT / $ { f % . $ ext } " . Z * > " $ new / $ f . txt " <nl> - done <nl> - fi <nl> - done <nl> deleted file mode 100644 <nl> index 00eb97a86 . . 000000000 <nl> mmm a / unlvtests / reports / 1995 . bus . 3B . sum <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - 1995 bus . 3B 5959 98 . 14 % 0 . 00 % 1631 96 . 83 % 0 . 00 % 1293 95 . 73 % 0 . 00 % <nl> deleted file mode 100644 <nl> index 7eb753aee . . 000000000 <nl> mmm a / unlvtests / reports / 1995 . doe3 . 3B . sum <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - 1995 doe3 . 3B 36349 97 . 52 % 0 . 00 % 7826 96 . 34 % 0 . 00 % 7042 94 . 87 % 0 . 00 % <nl> deleted file mode 100644 <nl> index e718c5433 . . 000000000 <nl> mmm a / unlvtests / reports / 1995 . mag . 3B . sum <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - 1995 mag . 3B 15043 97 . 74 % 0 . 00 % 4566 96 . 01 % 0 . 00 % 3379 94 . 99 % 0 . 00 % <nl> deleted file mode 100644 <nl> index bd0b7c68d . . 000000000 <nl> mmm a / unlvtests / reports / 1995 . news . 3B . sum <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - 1995 news . 3B 6432 98 . 69 % 0 . 00 % 1946 97 . 68 % 0 . 00 % 1502 96 . 94 % 0 . 00 % <nl> deleted file mode 100644 <nl> index 70f9cef84 . . 000000000 <nl> mmm a / unlvtests / reports / 2 . 03 . summary <nl> ppp / dev / null <nl> <nl> - 1995 bus . 3B 5959 98 . 14 % 0 . 00 % 1631 96 . 83 % 0 . 00 % 1293 95 . 73 % 0 . 00 % <nl> - 1995 doe3 . 3B 36349 97 . 52 % 0 . 00 % 7826 96 . 34 % 0 . 00 % 7042 94 . 87 % 0 . 00 % <nl> - 1995 mag . 3B 15043 97 . 74 % 0 . 00 % 4566 96 . 01 % 0 . 00 % 3379 94 . 99 % 0 . 00 % <nl> - 1995 news . 3B 6432 98 . 69 % 0 . 00 % 1946 97 . 68 % 0 . 00 % 1502 96 . 94 % 0 . 00 % <nl> - 2 . 03 bus . 3B 6422 97 . 99 % 7 . 77 % 1750 96 . 60 % 7 . 30 % 1361 95 . 51 5 . 26 % <nl> - 2 . 03 doe3 . 3B 29520 97 . 98 % - 18 . 79 % 7966 96 . 27 % 1 . 79 % 6764 95 . 07 - 3 . 95 % <nl> - 2 . 03 mag . 3B 14568 97 . 81 % - 3 . 16 % 4288 96 . 25 % - 6 . 09 % 3054 95 . 47 - 9 . 62 % <nl> - 2 . 03 news . 3B 7655 98 . 44 % 19 . 01 % 1730 97 . 94 % - 11 . 10 % 1208 97 . 54 - 19 . 57 % <nl> - 2 . 03 Total 58165 - - 8 . 81 % 15734 - - 1 . 47 % 12387 - - 6 . 27 % <nl> deleted file mode 100644 <nl> index ed6a10a5a . . 000000000 <nl> mmm a / unlvtests / reports / 2 . 04 . summary <nl> ppp / dev / null <nl> <nl> - 1995 bus . 3B 5959 98 . 14 % 0 . 00 % 1631 96 . 83 % 0 . 00 % 1293 95 . 73 % 0 . 00 % <nl> - 1995 doe3 . 3B 36349 97 . 52 % 0 . 00 % 7826 96 . 34 % 0 . 00 % 7042 94 . 87 % 0 . 00 % <nl> - 1995 mag . 3B 15043 97 . 74 % 0 . 00 % 4566 96 . 01 % 0 . 00 % 3379 94 . 99 % 0 . 00 % <nl> - 1995 news . 3B 6432 98 . 69 % 0 . 00 % 1946 97 . 68 % 0 . 00 % 1502 96 . 94 % 0 . 00 % <nl> - 2 . 04 bus . 3B 6422 97 . 99 % 7 . 77 % 1750 96 . 60 % 7 . 30 % 1361 95 . 51 5 . 26 % <nl> - 2 . 04 doe3 . 3B 29514 97 . 98 % - 18 . 80 % 7963 96 . 27 % 1 . 75 % 6762 95 . 07 - 3 . 98 % <nl> - 2 . 04 mag . 3B 14568 97 . 81 % - 3 . 16 % 4289 96 . 25 % - 6 . 07 % 3053 95 . 47 - 9 . 65 % <nl> - 2 . 04 news . 3B 7655 98 . 44 % 19 . 01 % 1730 97 . 94 % - 11 . 10 % 1208 97 . 54 - 19 . 57 % <nl> - 2 . 04 Total 58159 - - 8 . 82 % 15732 - - 1 . 48 % 12384 - - 6 . 30 % <nl> deleted file mode 100644 <nl> index cbb92073a . . 000000000 <nl> mmm a / unlvtests / reports / 4_best_int_spa . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWordErrors Accuracy TimeTaken <nl> - 4_best_int_spa spn . 3B 2846 99 . 18 % 937 98 . 39 % 739 97 . 54 6478 . 02s <nl> deleted file mode 100644 <nl> index 69a7b75d8 . . 000000000 <nl> mmm a / unlvtests / reports / 4_best_spa . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWordErrors Accuracy TimeTaken <nl> - 4_best_spa spn . 3B 2823 99 . 19 % 924 98 . 41 % 729 97 . 57 7233 . 76s <nl> deleted file mode 100644 <nl> index e03253473 . . 000000000 <nl> mmm a / unlvtests / reports / 4_fast_eng . summary <nl> ppp / dev / null <nl> <nl> - 1995 bus . 3B 5959 98 . 14 % 0 . 00 % 1631 96 . 83 % 0 . 00 % 1293 95 . 73 % 0 . 00 % <nl> - 1995 doe3 . 3B 36349 97 . 52 % 0 . 00 % 7826 96 . 34 % 0 . 00 % 7042 94 . 87 % 0 . 00 % <nl> - 1995 mag . 3B 15043 97 . 74 % 0 . 00 % 4566 96 . 01 % 0 . 00 % 3379 94 . 99 % 0 . 00 % <nl> - 1995 news . 3B 6432 98 . 69 % 0 . 00 % 1946 97 . 68 % 0 . 00 % 1502 96 . 94 % 0 . 00 % <nl> - 4_fast_eng bus . 3B 6124 98 . 11 % 2 . 77 % 1138 97 . 88 % - 30 . 23 % 963 97 . 05 - 25 . 52 % 3935 . 26s <nl> - 4_fast_eng doe3 . 3B 30029 97 . 96 % - 17 . 39 % 13781 94 . 45 % 76 . 09 % 13178 92 . 38 87 . 13 % 18847 . 36s <nl> - 4_fast_eng mag . 3B 10934 98 . 37 % - 27 . 32 % 3343 97 . 15 % - 26 . 78 % 2813 96 . 06 - 16 . 75 % 6867 . 14s <nl> - 4_fast_eng news . 3B 5734 98 . 84 % - 10 . 85 % 1322 98 . 45 % - 32 . 07 % 1040 97 . 94 - 30 . 76 % 5527 . 38s <nl> - 4_fast_eng Total 52821 - - 17 . 19 % 19584 - 22 . 64 % 17994 - 36 . 15 % <nl> deleted file mode 100644 <nl> index 6d25fe333 . . 000000000 <nl> mmm a / unlvtests / reports / 4_fast_spa . summary <nl> ppp / dev / null <nl> <nl> - RELEASE TestSet CharErrors Accuracy WordErrors Accuracy NonStopWordErrors Accuracy TimeTaken <nl> - 4_fast_spa spn . 3B 2841 99 . 18 % 879 98 . 49 % 742 97 . 53 3838 . 82s <nl> deleted file mode 100755 <nl> index 628a457ed . . 000000000 <nl> mmm a / unlvtests / runalltests . sh <nl> ppp / dev / null <nl> <nl> - # ! / bin / bash <nl> - # File : runalltests . sh <nl> - # Description : Script to run a set of UNLV test sets for English . <nl> - # Author : Ray Smith <nl> - # Created : Thu Jun 14 08 : 21 : 01 PDT 2007 <nl> - # <nl> - # ( C ) Copyright 2007 , Google Inc . <nl> - # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - # you may not use this file except in compliance with the License . <nl> - # You may obtain a copy of the License at <nl> - # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - # Unless required by applicable law or agreed to in writing , software <nl> - # distributed under the License is distributed on an " AS IS " BASIS , <nl> - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - # See the License for the specific language governing permissions and <nl> - # limitations under the License . <nl> - <nl> - if [ $ # - ne 3 ] <nl> - then <nl> - echo " Usage : $ 0 unlv - data - dir version - id tessdata - dir " <nl> - exit 1 <nl> - fi <nl> - if [ ! - d src / api ] <nl> - then <nl> - echo " Run $ 0 from the tesseract - ocr root directory ! " <nl> - exit 1 <nl> - fi <nl> - if [ ! - r src / api / tesseract ] & & [ ! - r tesseract . exe ] <nl> - then <nl> - echo " Please build tesseract before running $ 0 " <nl> - exit 1 <nl> - fi <nl> - tessdata = $ 3 <nl> - <nl> - # deltapc new old calculates the % change from old to new <nl> - deltapc ( ) { <nl> - awk ' BEGIN { <nl> - printf ( " % . 2f " , 100 . 0 * ( ' " $ 1 " ' - ' " $ 2 " ' ) / ' " $ 2 " ' ) ; <nl> - } ' <nl> - } <nl> - <nl> - # timesum computes the total cpu time <nl> - timesum ( ) { <nl> - awk ' BEGIN { <nl> - total = 0 . 0 ; <nl> - } <nl> - { <nl> - total + = $ 2 ; <nl> - } <nl> - END { <nl> - printf ( " % . 2f \ n " , total ) ; <nl> - } ' " $ 1 " <nl> - } <nl> - <nl> - imdir = " $ 1 " <nl> - vid = " $ 2 " <nl> - bindir = $ { 0 % / * } <nl> - if [ " $ bindir " = " $ 0 " ] <nl> - then <nl> - bindir = " . / " <nl> - fi <nl> - rdir = unlvtests / reports <nl> - <nl> - testsets = " bus . 3B doe3 . 3B mag . 3B news . 3B " <nl> - # testsets = " bus . 3B " <nl> - <nl> - totalerrs = 0 <nl> - totalwerrs = 0 <nl> - totalnswerrs = 0 <nl> - totalolderrs = 0 <nl> - totaloldwerrs = 0 <nl> - totaloldnswerrs = 0 <nl> - for set in $ testsets <nl> - do <nl> - if [ - r " $ imdir / $ set / pages " ] <nl> - then <nl> - # Run tesseract on all the pages . <nl> - $ bindir / runtestset . sh " $ imdir / $ set / pages " " $ tessdata " " eng " <nl> - # Count the errors on all the pages . <nl> - $ bindir / counttestset . sh " $ imdir / $ set / pages " " eng " <nl> - # Get the old character word and nonstop word errors . <nl> - olderrs = $ ( cut - f3 " unlvtests / reports / 1995 . $ set . sum " ) <nl> - oldwerrs = $ ( cut - f6 " unlvtests / reports / 1995 . $ set . sum " ) <nl> - oldnswerrs = $ ( cut - f9 " unlvtests / reports / 1995 . $ set . sum " ) <nl> - # Get the new character word and nonstop word errors and accuracy . <nl> - cherrs = $ ( head - 4 " unlvtests / results / $ set . characc " | tail - 1 | cut - c1 - 9 | <nl> - tr - d ' [ : blank : ] ' ) <nl> - chacc = $ ( head - 5 " unlvtests / results / $ set . characc " | tail - 1 | cut - c1 - 9 | <nl> - tr - d ' [ : blank : ] ' ) <nl> - wderrs = $ ( head - 4 " unlvtests / results / $ set . wordacc " | tail - 1 | cut - c1 - 9 | <nl> - tr - d ' [ : blank : ] ' ) <nl> - wdacc = $ ( head - 5 " unlvtests / results / $ set . wordacc " | tail - 1 | cut - c1 - 9 | <nl> - tr - d ' [ : blank : ] ' ) <nl> - nswderrs = $ ( grep Total " unlvtests / results / $ set . wordacc " | head - 2 | tail - 1 | <nl> - cut - c10 - 17 | tr - d ' [ : blank : ] ' ) <nl> - nswdacc = $ ( grep Total " unlvtests / results / $ set . wordacc " | head - 2 | tail - 1 | <nl> - cut - c19 - 26 | tr - d ' [ : blank : ] ' ) <nl> - # Compute the percent change . <nl> - chdelta = $ ( deltapc " $ cherrs " " $ olderrs " ) <nl> - wdelta = $ ( deltapc " $ wderrs " " $ oldwerrs " ) <nl> - nswdelta = $ ( deltapc " $ nswderrs " " $ oldnswerrs " ) <nl> - sumfile = $ rdir / $ vid . $ set . sum <nl> - if [ - r " unlvtests / results / $ set . times " ] <nl> - then <nl> - total_time = $ ( timesum " unlvtests / results / $ set . times " ) <nl> - if [ - r " unlvtests / results / prev / $ set . times " ] <nl> - then <nl> - paste " unlvtests / results / prev / $ set . times " " unlvtests / results / $ set . times " | <nl> - awk ' { printf ( " % s % . 2f \ n " , $ 1 , $ 4 - $ 2 ) ; } ' | sort - k2n > " unlvtests / results / $ set . timedelta " <nl> - fi <nl> - else <nl> - total_time = ' 0 . 0 ' <nl> - fi <nl> - echo " $ vid $ set $ cherrs $ chacc $ chdelta % $ wderrs $ wdacc \ <nl> - $ wdelta % $ nswderrs $ nswdacc $ nswdelta % $ { total_time } s " > " $ sumfile " <nl> - # Sum totals over all the testsets . <nl> - let totalerrs = totalerrs + cherrs <nl> - let totalwerrs = totalwerrs + wderrs <nl> - let totalnswerrs = totalnswerrs + nswderrs <nl> - let totalolderrs = totalolderrs + olderrs <nl> - let totaloldwerrs = totaloldwerrs + oldwerrs <nl> - let totaloldnswerrs = totaloldnswerrs + oldnswerrs <nl> - fi <nl> - done <nl> - # Compute grand total percent change . <nl> - chdelta = $ ( deltapc $ totalerrs $ totalolderrs ) <nl> - wdelta = $ ( deltapc $ totalwerrs $ totaloldwerrs ) <nl> - nswdelta = $ ( deltapc $ totalnswerrs $ totaloldnswerrs ) <nl> - tfile = $ rdir / $ vid . total . sum <nl> - echo " $ vid Total $ totalerrs - $ chdelta % $ totalwerrs \ <nl> - - $ wdelta % $ totalnswerrs - $ nswdelta % " > " $ tfile " <nl> - cat $ rdir / 1995 . * . sum " $ rdir / $ vid " . * . sum > " $ rdir / $ vid " . summary <nl> - <nl> - mv " $ rdir / $ vid " . * . sum unlvtests / results / <nl> - cat " $ rdir / $ vid " . summary <nl> deleted file mode 100755 <nl> index a6e218bbc . . 000000000 <nl> mmm a / unlvtests / runalltests_spa . sh <nl> ppp / dev / null <nl> <nl> - # ! / bin / bash <nl> - # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> - # File : runalltests_spa . sh <nl> - # Description : Script to run a set of UNLV test sets for Spanish . <nl> - # based on runalltests . sh by Ray Smith <nl> - # Author : Shree Devi Kumar <nl> - # Created : June 09 , 2018 <nl> - # <nl> - # ( C ) Copyright 2007 , Google Inc . <nl> - # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - # you may not use this file except in compliance with the License . <nl> - # You may obtain a copy of the License at <nl> - # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - # Unless required by applicable law or agreed to in writing , software <nl> - # distributed under the License is distributed on an " AS IS " BASIS , <nl> - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - # See the License for the specific language governing permissions and <nl> - # limitations under the License . <nl> - # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> - if [ $ # - ne 3 ] <nl> - then <nl> - echo " Usage : $ 0 unlv - data - dir version - id tessdata - dir " <nl> - exit 1 <nl> - fi <nl> - if [ ! - d src / api ] <nl> - then <nl> - echo " Run $ 0 from the tesseract - ocr root directory ! " <nl> - exit 1 <nl> - fi <nl> - if [ ! - r src / api / tesseract ] & & [ ! - r tesseract . exe ] <nl> - then <nl> - echo " Please build tesseract before running $ 0 " <nl> - exit 1 <nl> - fi <nl> - tessdata = $ 3 <nl> - lang = $ 4 <nl> - <nl> - # timesum computes the total cpu time <nl> - timesum ( ) { <nl> - awk ' BEGIN { <nl> - total = 0 . 0 ; <nl> - } <nl> - { <nl> - total + = $ 2 ; <nl> - } <nl> - END { <nl> - printf ( " % . 2f \ n " , total ) ; <nl> - } ' " $ 1 " <nl> - } <nl> - <nl> - imdir = " $ 1 " <nl> - vid = " $ 2 " <nl> - bindir = $ { 0 % / * } <nl> - if [ " $ bindir " = " $ 0 " ] <nl> - then <nl> - bindir = " . / " <nl> - fi <nl> - rdir = unlvtests / reports <nl> - <nl> - testsets = " spn . 3B " <nl> - <nl> - totalerrs = 0 <nl> - totalwerrs = 0 <nl> - totalnswerrs = 0 <nl> - for set in $ testsets <nl> - do <nl> - if [ - r " $ imdir / $ set / pages " ] <nl> - then <nl> - # Run tesseract on all the pages . <nl> - $ bindir / runtestset . sh " $ imdir / $ set / pages " " $ tessdata " " spa " <nl> - # Count the errors on all the pages . <nl> - $ bindir / counttestset . sh " $ imdir / $ set / pages " " spa " <nl> - # Get the new character word and nonstop word errors and accuracy . <nl> - cherrs = $ ( head - 4 " unlvtests / results / $ set . characc " | tail - 1 | cut - c1 - 9 | <nl> - tr - d ' [ : blank : ] ' ) <nl> - chacc = $ ( head - 5 " unlvtests / results / $ set . characc " | tail - 1 | cut - c1 - 9 | <nl> - tr - d ' [ : blank : ] ' ) <nl> - wderrs = $ ( head - 4 " unlvtests / results / $ set . wordacc " | tail - 1 | cut - c1 - 9 | <nl> - tr - d ' [ : blank : ] ' ) <nl> - wdacc = $ ( head - 5 " unlvtests / results / $ set . wordacc " | tail - 1 | cut - c1 - 9 | <nl> - tr - d ' [ : blank : ] ' ) <nl> - nswderrs = $ ( grep Total " unlvtests / results / $ set . wordacc " | head - 2 | tail - 1 | <nl> - cut - c10 - 17 | tr - d ' [ : blank : ] ' ) <nl> - nswdacc = $ ( grep Total " unlvtests / results / $ set . wordacc " | head - 2 | tail - 1 | <nl> - cut - c19 - 26 | tr - d ' [ : blank : ] ' ) <nl> - <nl> - sumfile = $ rdir / $ vid . $ set . sum <nl> - if [ - r " unlvtests / results / $ set . times " ] <nl> - then <nl> - total_time = $ ( timesum " unlvtests / results / $ set . times " ) <nl> - if [ - r " unlvtests / results / prev / $ set . times " ] <nl> - then <nl> - paste " unlvtests / results / prev / $ set . times " " unlvtests / results / $ set . times " | <nl> - awk ' { printf ( " % s % . 2f \ n " , $ 1 , $ 4 - $ 2 ) ; } ' | sort - k2n > " unlvtests / results / $ set . timedelta " <nl> - fi <nl> - else <nl> - total_time = ' 0 . 0 ' <nl> - fi <nl> - echo " RELEASE TestSet CharErrors Accuracy WordErrors Accuracy \ <nl> - NonStopWordErrors Accuracy TimeTaken " > " $ sumfile " <nl> - echo " $ vid $ set $ cherrs $ chacc $ wderrs $ wdacc \ <nl> - $ nswderrs $ nswdacc $ { total_time } s " > > " $ sumfile " <nl> - fi <nl> - done <nl> - <nl> - cat " $ rdir / $ vid " . * . sum > " $ rdir / $ vid " . summary <nl> - <nl> - mv " $ rdir / $ vid " . * . sum unlvtests / results / <nl> - cat " $ rdir / $ vid " . summary <nl> deleted file mode 100755 <nl> index 783f0bfb7 . . 000000000 <nl> mmm a / unlvtests / runtestset . sh <nl> ppp / dev / null <nl> <nl> - # ! / bin / bash <nl> - # File : runtestset . sh <nl> - # Description : Script to run tesseract on a single UNLV set . <nl> - # Author : Ray Smith <nl> - # Created : Wed Jun 13 10 : 13 : 01 PDT 2007 <nl> - # <nl> - # ( C ) Copyright 2007 , Google Inc . <nl> - # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - # you may not use this file except in compliance with the License . <nl> - # You may obtain a copy of the License at <nl> - # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - # Unless required by applicable law or agreed to in writing , software <nl> - # distributed under the License is distributed on an " AS IS " BASIS , <nl> - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - # See the License for the specific language governing permissions and <nl> - # limitations under the License . <nl> - <nl> - if [ $ # - ne 3 ] & & [ $ # - ne 4 ] <nl> - then <nl> - echo " Usage : $ 0 pagesfile tessdata - dir lang [ - zoning ] " <nl> - exit 1 <nl> - fi <nl> - if [ ! - d src / api ] <nl> - then <nl> - echo " Run $ 0 from the tesseract - ocr root directory ! " <nl> - exit 1 <nl> - fi <nl> - if [ ! - r src / api / tesseract ] <nl> - then <nl> - if [ ! - r tesseract . exe ] <nl> - then <nl> - echo " Please build tesseract before running $ 0 " <nl> - exit 1 <nl> - else <nl> - tess = " . / tesseract . exe " <nl> - fi <nl> - else <nl> - tess = " time - f % U - o times . txt src / api / tesseract " <nl> - # tess = " time - f % U - o times . txt tesseract " <nl> - fi <nl> - <nl> - tessdata = $ 2 <nl> - lang = $ 3 <nl> - pages = $ 1 <nl> - imdir = $ { pages % / pages } <nl> - setname = $ { imdir # # * / } <nl> - if [ $ # - eq 4 ] & & [ " $ 4 " = " - zoning " ] <nl> - then <nl> - config = unlv . auto <nl> - resdir = unlvtests / results / zoning . $ setname <nl> - else <nl> - config = unlv <nl> - resdir = unlvtests / results / $ setname <nl> - fi <nl> - echo - e " Testing on set $ setname in directory $ imdir to $ resdir \ n " <nl> - mkdir - p " $ resdir " <nl> - rm - f " unlvtests / results / $ setname . times " <nl> - while read page dir <nl> - do <nl> - # A pages file may be a list of files with subdirs or maybe just <nl> - # a plain list of files so accommodate both . <nl> - if [ " $ dir " ] <nl> - then <nl> - srcdir = " $ imdir / $ dir " <nl> - else <nl> - srcdir = " $ imdir " <nl> - fi <nl> - # echo " $ srcdir / $ page . tif " <nl> - $ tess " $ srcdir / $ page . tif " " $ resdir / $ page " - - tessdata - dir $ tessdata - - oem 1 - l $ lang - - psm 6 $ config 2 > & 1 | grep - v " OCR Engine " | grep - v " Page 1 " <nl> - if [ - r times . txt ] <nl> - then <nl> - read t < times . txt <nl> - echo " $ page $ t " > > " unlvtests / results / $ setname . times " <nl> - echo - e " \ 033M $ page $ t " <nl> - if [ " $ t " = " Command terminated by signal 2 " ] <nl> - then <nl> - exit 0 <nl> - fi <nl> - fi <nl> - done < " $ pages " <nl> | move langtests and unlvtests from tesseract - ocr repository to test repository | tesseract-ocr/tesseract | cdfb7680108e388b6532c8ca956bdeb09d33697f | 2018-11-08T21:31:32Z |
mmm a / cmake / modules / AddSwift . cmake <nl> ppp b / cmake / modules / AddSwift . cmake <nl> function ( _add_variant_c_compile_flags ) <nl> swift_android_libcxx_include_paths ( CFLAGS_CXX_INCLUDES ) <nl> swift_android_include_for_arch ( " $ { CFLAGS_ARCH } " " $ { CFLAGS_ARCH } _INCLUDE " ) <nl> foreach ( path IN LISTS CFLAGS_CXX_INCLUDES $ { CFLAGS_ARCH } _INCLUDE ) <nl> - list ( APPEND result " \ " $ { CMAKE_INCLUDE_FLAG_C } $ { path } \ " " ) <nl> + list ( APPEND result - isystem ; $ { path } ) <nl> endforeach ( ) <nl> list ( APPEND result " - D__ANDROID_API__ = $ { SWIFT_ANDROID_API_LEVEL } " ) <nl> elseif ( CFLAGS_SDK STREQUAL WINDOWS ) <nl> | Merge remote - tracking branch ' origin / master ' into master - next | apple/swift | 55806395fbde12af4e64647c4e90bc549465575c | 2019-07-10T22:50:50Z |
mmm a / jstests / aggregation / testshard1 . js <nl> ppp b / jstests / aggregation / testshard1 . js <nl> for ( i = 0 ; i < 6 ; + + i ) { <nl> ' agg sharded test simple match failed ' ) ; <nl> } <nl> <nl> + function testSkipLimit ( ops , expectedCount ) { <nl> + if ( expectedCount > 10 ) { <nl> + / / make shard - > mongos intermediate results less than 16MB <nl> + ops . unshift ( { $ project : { _id : 1 } } ) <nl> + } <nl> + <nl> + ops . push ( { $ group : { _id : 1 , count : { $ sum : 1 } } } ) ; <nl> + <nl> + var out = db . runCommand ( { aggregate : " ts1 " , pipeline : ops } ) ; <nl> + assert . commandWorked ( out ) ; <nl> + assert . eq ( out . result [ 0 ] . count , expectedCount ) ; <nl> + } <nl> + <nl> + testSkipLimit ( [ ] , nItems ) ; / / control <nl> + testSkipLimit ( [ { $ skip : 10 } ] , nItems - 10 ) ; <nl> + testSkipLimit ( [ { $ limit : 10 } ] , 10 ) ; <nl> + testSkipLimit ( [ { $ skip : 5 } , { $ limit : 10 } ] , 10 ) ; <nl> + testSkipLimit ( [ { $ limit : 10 } , { $ skip : 5 } ] , 10 - 5 ) ; <nl> + testSkipLimit ( [ { $ skip : 5 } , { $ skip : 3 } , { $ limit : 10 } ] , 10 ) ; <nl> + testSkipLimit ( [ { $ skip : 5 } , { $ limit : 10 } , { $ skip : 3 } ] , 10 - 3 ) ; <nl> + testSkipLimit ( [ { $ limit : 10 } , { $ skip : 5 } , { $ skip : 3 } ] , 10 - 3 - 5 ) ; <nl> + <nl> / / shut everything down <nl> shardedAggTest . stop ( ) ; <nl> mmm a / src / mongo / db / pipeline / document_source . h <nl> ppp b / src / mongo / db / pipeline / document_source . h <nl> namespace mongo { <nl> <nl> <nl> class DocumentSourceLimit : <nl> - public DocumentSource { <nl> + public SplittableDocumentSource { <nl> public : <nl> / / virtuals from DocumentSource <nl> virtual ~ DocumentSourceLimit ( ) ; <nl> namespace mongo { <nl> static intrusive_ptr < DocumentSourceLimit > create ( <nl> const intrusive_ptr < ExpressionContext > & pExpCtx ) ; <nl> <nl> + / / Virtuals for SplittableDocumentSource <nl> + / / Need to run on rounter . Running on shard as well is an optimization . <nl> + virtual intrusive_ptr < DocumentSource > getShardSource ( ) { return this ; } <nl> + virtual intrusive_ptr < DocumentSource > getRouterSource ( ) { return this ; } <nl> + <nl> + long long getLimit ( ) const { return limit ; } <nl> + void setLimit ( long long newLimit ) { limit = newLimit ; } <nl> + <nl> / * * <nl> Create a limiting DocumentSource from BSON . <nl> <nl> namespace mongo { <nl> } ; <nl> <nl> class DocumentSourceSkip : <nl> - public DocumentSource { <nl> + public SplittableDocumentSource { <nl> public : <nl> / / virtuals from DocumentSource <nl> virtual ~ DocumentSourceSkip ( ) ; <nl> namespace mongo { <nl> static intrusive_ptr < DocumentSourceSkip > create ( <nl> const intrusive_ptr < ExpressionContext > & pExpCtx ) ; <nl> <nl> + / / Virtuals for SplittableDocumentSource <nl> + / / Need to run on rounter . Can ' t run on shards . <nl> + virtual intrusive_ptr < DocumentSource > getShardSource ( ) { return NULL ; } <nl> + virtual intrusive_ptr < DocumentSource > getRouterSource ( ) { return this ; } <nl> + <nl> + long long getSkip ( ) const { return skip ; } <nl> + void setSkip ( long long newSkip ) { skip = newSkip ; } <nl> + <nl> / * * <nl> Create a skipping DocumentSource from BSON . <nl> <nl> mmm a / src / mongo / db / pipeline / document_source_limit . cpp <nl> ppp b / src / mongo / db / pipeline / document_source_limit . cpp <nl> <nl> namespace mongo { <nl> const char DocumentSourceLimit : : limitName [ ] = " $ limit " ; <nl> <nl> - DocumentSourceLimit : : DocumentSourceLimit ( <nl> - const intrusive_ptr < ExpressionContext > & pExpCtx ) : <nl> - DocumentSource ( pExpCtx ) , <nl> + DocumentSourceLimit : : DocumentSourceLimit ( const intrusive_ptr < ExpressionContext > & pExpCtx ) : <nl> + SplittableDocumentSource ( pExpCtx ) , <nl> limit ( 0 ) , <nl> count ( 0 ) { <nl> } <nl> mmm a / src / mongo / db / pipeline / document_source_skip . cpp <nl> ppp b / src / mongo / db / pipeline / document_source_skip . cpp <nl> namespace mongo { <nl> <nl> const char DocumentSourceSkip : : skipName [ ] = " $ skip " ; <nl> <nl> - DocumentSourceSkip : : DocumentSourceSkip ( <nl> - const intrusive_ptr < ExpressionContext > & pExpCtx ) : <nl> - DocumentSource ( pExpCtx ) , <nl> + DocumentSourceSkip : : DocumentSourceSkip ( const intrusive_ptr < ExpressionContext > & pExpCtx ) : <nl> + SplittableDocumentSource ( pExpCtx ) , <nl> skip ( 0 ) , <nl> count ( 0 ) { <nl> } <nl> mmm a / src / mongo / db / pipeline / pipeline . cpp <nl> ppp b / src / mongo / db / pipeline / pipeline . cpp <nl> namespace mongo { <nl> } <nl> } <nl> <nl> + / * Move limits in front of skips . This is more optimal for sharding <nl> + * since currently , we can only split the pipeline at a single source <nl> + * and it is better to limit the results coming from each shard <nl> + * / <nl> + for ( int i = pSourceVector - > size ( ) - 1 ; i > = 1 / * not looking at 0 * / ; i - - ) { <nl> + DocumentSourceLimit * limit = <nl> + dynamic_cast < DocumentSourceLimit * > ( ( * pSourceVector ) [ i ] . get ( ) ) ; <nl> + DocumentSourceSkip * skip = <nl> + dynamic_cast < DocumentSourceSkip * > ( ( * pSourceVector ) [ i - 1 ] . get ( ) ) ; <nl> + if ( limit & & skip ) { <nl> + / / Increase limit by skip since the skipped docs now pass through the $ limit <nl> + limit - > setLimit ( limit - > getLimit ( ) + skip - > getSkip ( ) ) ; <nl> + swap ( ( * pSourceVector ) [ i ] , ( * pSourceVector ) [ i - 1 ] ) ; <nl> + <nl> + / / Start at back again . This is needed to handle cases with more than 1 $ limit <nl> + / / ( S means skip , L means limit ) <nl> + / / <nl> + / / These two would work without second pass ( assuming back to front ordering ) <nl> + / / SL - > LS <nl> + / / SSL - > LSS <nl> + / / <nl> + / / The following cases need a second pass to handle the second limit <nl> + / / SLL - > LLS <nl> + / / SSLL - > LLSS <nl> + / / SLSL - > LLSS <nl> + i = pSourceVector - > size ( ) ; / / decremented before next pass <nl> + } <nl> + } <nl> + <nl> / * <nl> Coalesce adjacent filters where possible . Two adjacent filters <nl> are equivalent to one filter whose predicate is the conjunction of <nl> | SERVER - 7408 Correctly handle $ skip and $ limit in sharded agg | mongodb/mongo | d8384a53036b471099b8d293ac21b7bd4809df33 | 2012-11-05T18:03:26Z |
mmm a / BUILD <nl> ppp b / BUILD <nl> grpc_cc_library ( <nl> name = " grpc_codegen " , <nl> language = " c " , <nl> public_hdrs = [ <nl> - " include / grpc / impl / codegen / byte_buffer . h " , <nl> " include / grpc / impl / codegen / byte_buffer_reader . h " , <nl> " include / grpc / impl / codegen / compression_types . h " , <nl> " include / grpc / impl / codegen / connectivity_state . h " , <nl> grpc_cc_library ( <nl> public_hdrs = [ <nl> " include / grpc + + / impl / codegen / async_stream . h " , <nl> " include / grpc + + / impl / codegen / async_unary_call . h " , <nl> - " include / grpc + + / impl / codegen / byte_buffer . h " , <nl> " include / grpc + + / impl / codegen / call . h " , <nl> " include / grpc + + / impl / codegen / call_hook . h " , <nl> " include / grpc + + / impl / codegen / channel_interface . h " , <nl> mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> target_link_libraries ( grpc <nl> ) <nl> <nl> foreach ( _hdr <nl> - include / grpc / impl / codegen / byte_buffer . h <nl> include / grpc / impl / codegen / byte_buffer_reader . h <nl> include / grpc / impl / codegen / compression_types . h <nl> include / grpc / impl / codegen / connectivity_state . h <nl> target_link_libraries ( grpc_cronet <nl> ) <nl> <nl> foreach ( _hdr <nl> - include / grpc / impl / codegen / byte_buffer . h <nl> include / grpc / impl / codegen / byte_buffer_reader . h <nl> include / grpc / impl / codegen / compression_types . h <nl> include / grpc / impl / codegen / connectivity_state . h <nl> target_link_libraries ( grpc_test_util <nl> ) <nl> <nl> foreach ( _hdr <nl> - include / grpc / impl / codegen / byte_buffer . h <nl> include / grpc / impl / codegen / byte_buffer_reader . h <nl> include / grpc / impl / codegen / compression_types . h <nl> include / grpc / impl / codegen / connectivity_state . h <nl> target_link_libraries ( grpc_test_util_unsecure <nl> ) <nl> <nl> foreach ( _hdr <nl> - include / grpc / impl / codegen / byte_buffer . h <nl> include / grpc / impl / codegen / byte_buffer_reader . h <nl> include / grpc / impl / codegen / compression_types . h <nl> include / grpc / impl / codegen / connectivity_state . h <nl> target_link_libraries ( grpc_unsecure <nl> ) <nl> <nl> foreach ( _hdr <nl> - include / grpc / impl / codegen / byte_buffer . h <nl> include / grpc / impl / codegen / byte_buffer_reader . h <nl> include / grpc / impl / codegen / compression_types . h <nl> include / grpc / impl / codegen / connectivity_state . h <nl> foreach ( _hdr <nl> include / grpc / slice_buffer . h <nl> include / grpc / status . h <nl> include / grpc / support / workaround_list . h <nl> - include / grpc / impl / codegen / byte_buffer . h <nl> include / grpc / impl / codegen / byte_buffer_reader . h <nl> include / grpc / impl / codegen / compression_types . h <nl> include / grpc / impl / codegen / connectivity_state . h <nl> foreach ( _hdr <nl> include / grpc / impl / codegen / status . h <nl> include / grpc + + / impl / codegen / async_stream . h <nl> include / grpc + + / impl / codegen / async_unary_call . h <nl> - include / grpc + + / impl / codegen / byte_buffer . h <nl> include / grpc + + / impl / codegen / call . h <nl> include / grpc + + / impl / codegen / call_hook . h <nl> include / grpc + + / impl / codegen / channel_interface . h <nl> foreach ( _hdr <nl> include / grpc / slice_buffer . h <nl> include / grpc / status . h <nl> include / grpc / support / workaround_list . h <nl> - include / grpc / impl / codegen / byte_buffer . h <nl> include / grpc / impl / codegen / byte_buffer_reader . h <nl> include / grpc / impl / codegen / compression_types . h <nl> include / grpc / impl / codegen / connectivity_state . h <nl> foreach ( _hdr <nl> include / grpc / impl / codegen / status . h <nl> include / grpc + + / impl / codegen / async_stream . h <nl> include / grpc + + / impl / codegen / async_unary_call . h <nl> - include / grpc + + / impl / codegen / byte_buffer . h <nl> include / grpc + + / impl / codegen / call . h <nl> include / grpc + + / impl / codegen / call_hook . h <nl> include / grpc + + / impl / codegen / channel_interface . h <nl> target_link_libraries ( grpc + + _test_util <nl> foreach ( _hdr <nl> include / grpc + + / impl / codegen / async_stream . h <nl> include / grpc + + / impl / codegen / async_unary_call . h <nl> - include / grpc + + / impl / codegen / byte_buffer . h <nl> include / grpc + + / impl / codegen / call . h <nl> include / grpc + + / impl / codegen / call_hook . h <nl> include / grpc + + / impl / codegen / channel_interface . h <nl> foreach ( _hdr <nl> include / grpc + + / impl / codegen / stub_options . h <nl> include / grpc + + / impl / codegen / sync_stream . h <nl> include / grpc + + / impl / codegen / time . h <nl> - include / grpc / impl / codegen / byte_buffer . h <nl> include / grpc / impl / codegen / byte_buffer_reader . h <nl> include / grpc / impl / codegen / compression_types . h <nl> include / grpc / impl / codegen / connectivity_state . h <nl> target_link_libraries ( grpc + + _test_util_unsecure <nl> foreach ( _hdr <nl> include / grpc + + / impl / codegen / async_stream . h <nl> include / grpc + + / impl / codegen / async_unary_call . h <nl> - include / grpc + + / impl / codegen / byte_buffer . h <nl> include / grpc + + / impl / codegen / call . h <nl> include / grpc + + / impl / codegen / call_hook . h <nl> include / grpc + + / impl / codegen / channel_interface . h <nl> foreach ( _hdr <nl> include / grpc + + / impl / codegen / stub_options . h <nl> include / grpc + + / impl / codegen / sync_stream . h <nl> include / grpc + + / impl / codegen / time . h <nl> - include / grpc / impl / codegen / byte_buffer . h <nl> include / grpc / impl / codegen / byte_buffer_reader . h <nl> include / grpc / impl / codegen / compression_types . h <nl> include / grpc / impl / codegen / connectivity_state . h <nl> foreach ( _hdr <nl> include / grpc / slice_buffer . h <nl> include / grpc / status . h <nl> include / grpc / support / workaround_list . h <nl> - include / grpc / impl / codegen / byte_buffer . h <nl> include / grpc / impl / codegen / byte_buffer_reader . h <nl> include / grpc / impl / codegen / compression_types . h <nl> include / grpc / impl / codegen / connectivity_state . h <nl> foreach ( _hdr <nl> include / grpc / impl / codegen / status . h <nl> include / grpc + + / impl / codegen / async_stream . h <nl> include / grpc + + / impl / codegen / async_unary_call . h <nl> - include / grpc + + / impl / codegen / byte_buffer . h <nl> include / grpc + + / impl / codegen / call . h <nl> include / grpc + + / impl / codegen / call_hook . h <nl> include / grpc + + / impl / codegen / channel_interface . h <nl> mmm a / Makefile <nl> ppp b / Makefile <nl> LIBGRPC_SRC = \ <nl> src / core / plugin_registry / grpc_plugin_registry . c \ <nl> <nl> PUBLIC_HEADERS_C + = \ <nl> - include / grpc / impl / codegen / byte_buffer . h \ <nl> include / grpc / impl / codegen / byte_buffer_reader . h \ <nl> include / grpc / impl / codegen / compression_types . h \ <nl> include / grpc / impl / codegen / connectivity_state . h \ <nl> LIBGRPC_CRONET_SRC = \ <nl> src / core / plugin_registry / grpc_cronet_plugin_registry . c \ <nl> <nl> PUBLIC_HEADERS_C + = \ <nl> - include / grpc / impl / codegen / byte_buffer . h \ <nl> include / grpc / impl / codegen / byte_buffer_reader . h \ <nl> include / grpc / impl / codegen / compression_types . h \ <nl> include / grpc / impl / codegen / connectivity_state . h \ <nl> LIBGRPC_TEST_UTIL_SRC = \ <nl> src / core / ext / filters / http / server / http_server_filter . c \ <nl> <nl> PUBLIC_HEADERS_C + = \ <nl> - include / grpc / impl / codegen / byte_buffer . h \ <nl> include / grpc / impl / codegen / byte_buffer_reader . h \ <nl> include / grpc / impl / codegen / compression_types . h \ <nl> include / grpc / impl / codegen / connectivity_state . h \ <nl> LIBGRPC_TEST_UTIL_UNSECURE_SRC = \ <nl> src / core / ext / filters / http / server / http_server_filter . c \ <nl> <nl> PUBLIC_HEADERS_C + = \ <nl> - include / grpc / impl / codegen / byte_buffer . h \ <nl> include / grpc / impl / codegen / byte_buffer_reader . h \ <nl> include / grpc / impl / codegen / compression_types . h \ <nl> include / grpc / impl / codegen / connectivity_state . h \ <nl> LIBGRPC_UNSECURE_SRC = \ <nl> src / core / plugin_registry / grpc_unsecure_plugin_registry . c \ <nl> <nl> PUBLIC_HEADERS_C + = \ <nl> - include / grpc / impl / codegen / byte_buffer . h \ <nl> include / grpc / impl / codegen / byte_buffer_reader . h \ <nl> include / grpc / impl / codegen / compression_types . h \ <nl> include / grpc / impl / codegen / connectivity_state . h \ <nl> PUBLIC_HEADERS_CXX + = \ <nl> include / grpc / slice_buffer . h \ <nl> include / grpc / status . h \ <nl> include / grpc / support / workaround_list . h \ <nl> - include / grpc / impl / codegen / byte_buffer . h \ <nl> include / grpc / impl / codegen / byte_buffer_reader . h \ <nl> include / grpc / impl / codegen / compression_types . h \ <nl> include / grpc / impl / codegen / connectivity_state . h \ <nl> PUBLIC_HEADERS_CXX + = \ <nl> include / grpc / impl / codegen / status . h \ <nl> include / grpc + + / impl / codegen / async_stream . h \ <nl> include / grpc + + / impl / codegen / async_unary_call . h \ <nl> - include / grpc + + / impl / codegen / byte_buffer . h \ <nl> include / grpc + + / impl / codegen / call . h \ <nl> include / grpc + + / impl / codegen / call_hook . h \ <nl> include / grpc + + / impl / codegen / channel_interface . h \ <nl> PUBLIC_HEADERS_CXX + = \ <nl> include / grpc / slice_buffer . h \ <nl> include / grpc / status . h \ <nl> include / grpc / support / workaround_list . h \ <nl> - include / grpc / impl / codegen / byte_buffer . h \ <nl> include / grpc / impl / codegen / byte_buffer_reader . h \ <nl> include / grpc / impl / codegen / compression_types . h \ <nl> include / grpc / impl / codegen / connectivity_state . h \ <nl> PUBLIC_HEADERS_CXX + = \ <nl> include / grpc / impl / codegen / status . h \ <nl> include / grpc + + / impl / codegen / async_stream . h \ <nl> include / grpc + + / impl / codegen / async_unary_call . h \ <nl> - include / grpc + + / impl / codegen / byte_buffer . h \ <nl> include / grpc + + / impl / codegen / call . h \ <nl> include / grpc + + / impl / codegen / call_hook . h \ <nl> include / grpc + + / impl / codegen / channel_interface . h \ <nl> LIBGRPC + + _TEST_UTIL_SRC = \ <nl> PUBLIC_HEADERS_CXX + = \ <nl> include / grpc + + / impl / codegen / async_stream . h \ <nl> include / grpc + + / impl / codegen / async_unary_call . h \ <nl> - include / grpc + + / impl / codegen / byte_buffer . h \ <nl> include / grpc + + / impl / codegen / call . h \ <nl> include / grpc + + / impl / codegen / call_hook . h \ <nl> include / grpc + + / impl / codegen / channel_interface . h \ <nl> PUBLIC_HEADERS_CXX + = \ <nl> include / grpc + + / impl / codegen / stub_options . h \ <nl> include / grpc + + / impl / codegen / sync_stream . h \ <nl> include / grpc + + / impl / codegen / time . h \ <nl> - include / grpc / impl / codegen / byte_buffer . h \ <nl> include / grpc / impl / codegen / byte_buffer_reader . h \ <nl> include / grpc / impl / codegen / compression_types . h \ <nl> include / grpc / impl / codegen / connectivity_state . h \ <nl> LIBGRPC + + _TEST_UTIL_UNSECURE_SRC = \ <nl> PUBLIC_HEADERS_CXX + = \ <nl> include / grpc + + / impl / codegen / async_stream . h \ <nl> include / grpc + + / impl / codegen / async_unary_call . h \ <nl> - include / grpc + + / impl / codegen / byte_buffer . h \ <nl> include / grpc + + / impl / codegen / call . h \ <nl> include / grpc + + / impl / codegen / call_hook . h \ <nl> include / grpc + + / impl / codegen / channel_interface . h \ <nl> PUBLIC_HEADERS_CXX + = \ <nl> include / grpc + + / impl / codegen / stub_options . h \ <nl> include / grpc + + / impl / codegen / sync_stream . h \ <nl> include / grpc + + / impl / codegen / time . h \ <nl> - include / grpc / impl / codegen / byte_buffer . h \ <nl> include / grpc / impl / codegen / byte_buffer_reader . h \ <nl> include / grpc / impl / codegen / compression_types . h \ <nl> include / grpc / impl / codegen / connectivity_state . h \ <nl> PUBLIC_HEADERS_CXX + = \ <nl> include / grpc / slice_buffer . h \ <nl> include / grpc / status . h \ <nl> include / grpc / support / workaround_list . h \ <nl> - include / grpc / impl / codegen / byte_buffer . h \ <nl> include / grpc / impl / codegen / byte_buffer_reader . h \ <nl> include / grpc / impl / codegen / compression_types . h \ <nl> include / grpc / impl / codegen / connectivity_state . h \ <nl> PUBLIC_HEADERS_CXX + = \ <nl> include / grpc / impl / codegen / status . h \ <nl> include / grpc + + / impl / codegen / async_stream . h \ <nl> include / grpc + + / impl / codegen / async_unary_call . h \ <nl> - include / grpc + + / impl / codegen / byte_buffer . h \ <nl> include / grpc + + / impl / codegen / call . h \ <nl> include / grpc + + / impl / codegen / call_hook . h \ <nl> include / grpc + + / impl / codegen / channel_interface . h \ <nl> mmm a / build . yaml <nl> ppp b / build . yaml <nl> filegroups : <nl> - grpc_deadline_filter <nl> - name : grpc_codegen <nl> public_headers : <nl> - - include / grpc / impl / codegen / byte_buffer . h <nl> - include / grpc / impl / codegen / byte_buffer_reader . h <nl> - include / grpc / impl / codegen / compression_types . h <nl> - include / grpc / impl / codegen / connectivity_state . h <nl> filegroups : <nl> public_headers : <nl> - include / grpc + + / impl / codegen / async_stream . h <nl> - include / grpc + + / impl / codegen / async_unary_call . h <nl> - - include / grpc + + / impl / codegen / byte_buffer . h <nl> - include / grpc + + / impl / codegen / call . h <nl> - include / grpc + + / impl / codegen / call_hook . h <nl> - include / grpc + + / impl / codegen / channel_interface . h <nl> mmm a / gRPC - Core . podspec <nl> ppp b / gRPC - Core . podspec <nl> Pod : : Spec . new do | s | <nl> ' include / grpc / impl / codegen / sync_generic . h ' , <nl> ' include / grpc / impl / codegen / sync_posix . h ' , <nl> ' include / grpc / impl / codegen / sync_windows . h ' , <nl> - ' include / grpc / impl / codegen / byte_buffer . h ' , <nl> ' include / grpc / impl / codegen / byte_buffer_reader . h ' , <nl> ' include / grpc / impl / codegen / compression_types . h ' , <nl> ' include / grpc / impl / codegen / connectivity_state . h ' , <nl> mmm a / grpc . def <nl> ppp b / grpc . def <nl> <nl> EXPORTS <nl> + grpc_raw_byte_buffer_create <nl> + grpc_raw_compressed_byte_buffer_create <nl> + grpc_byte_buffer_copy <nl> + grpc_byte_buffer_length <nl> + grpc_byte_buffer_destroy <nl> + grpc_byte_buffer_reader_init <nl> + grpc_byte_buffer_reader_destroy <nl> + grpc_byte_buffer_reader_next <nl> + grpc_byte_buffer_reader_readall <nl> + grpc_raw_byte_buffer_from_reader <nl> census_initialize <nl> census_shutdown <nl> census_supported <nl> EXPORTS <nl> grpc_server_add_secure_http2_port <nl> grpc_call_set_credentials <nl> grpc_server_credentials_set_auth_metadata_processor <nl> - grpc_raw_byte_buffer_create <nl> - grpc_raw_compressed_byte_buffer_create <nl> - grpc_byte_buffer_copy <nl> - grpc_byte_buffer_length <nl> - grpc_byte_buffer_destroy <nl> - grpc_byte_buffer_reader_init <nl> - grpc_byte_buffer_reader_destroy <nl> - grpc_byte_buffer_reader_next <nl> - grpc_byte_buffer_reader_readall <nl> - grpc_raw_byte_buffer_from_reader <nl> grpc_slice_ref <nl> grpc_slice_unref <nl> grpc_slice_copy <nl> mmm a / grpc . gemspec <nl> ppp b / grpc . gemspec <nl> Gem : : Specification . new do | s | <nl> s . files + = % w ( src / core / lib / support / tmpfile_posix . c ) <nl> s . files + = % w ( src / core / lib / support / tmpfile_windows . c ) <nl> s . files + = % w ( src / core / lib / support / wrap_memcpy . c ) <nl> - s . files + = % w ( include / grpc / impl / codegen / byte_buffer . h ) <nl> s . files + = % w ( include / grpc / impl / codegen / byte_buffer_reader . h ) <nl> s . files + = % w ( include / grpc / impl / codegen / compression_types . h ) <nl> s . files + = % w ( include / grpc / impl / codegen / connectivity_state . h ) <nl> deleted file mode 100644 <nl> index 87d390c688a . . 00000000000 <nl> mmm a / include / grpc + + / impl / codegen / byte_buffer . h <nl> ppp / dev / null <nl> <nl> - / * <nl> - * <nl> - * Copyright 2017 gRPC authors . <nl> - * <nl> - * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - * you may not use this file except in compliance with the License . <nl> - * You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , software <nl> - * distributed under the License is distributed on an " AS IS " BASIS , <nl> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - * See the License for the specific language governing permissions and <nl> - * limitations under the License . <nl> - * <nl> - * / <nl> - <nl> - # ifndef GRPCXX_IMPL_CODEGEN_BYTE_BUFFER_H <nl> - # define GRPCXX_IMPL_CODEGEN_BYTE_BUFFER_H <nl> - <nl> - # include < grpc / impl / codegen / byte_buffer . h > <nl> - <nl> - # include < grpc + + / impl / codegen / config . h > <nl> - # include < grpc + + / impl / codegen / core_codegen_interface . h > <nl> - # include < grpc + + / impl / codegen / serialization_traits . h > <nl> - # include < grpc + + / impl / codegen / slice . h > <nl> - # include < grpc + + / impl / codegen / status . h > <nl> - <nl> - # include < vector > <nl> - <nl> - namespace grpc { <nl> - <nl> - template < class R > <nl> - class CallOpRecvMessage ; <nl> - class MethodHandler ; <nl> - namespace internal { <nl> - template < class M , class T > <nl> - class MessageDeserializer ; <nl> - } <nl> - <nl> - / / / A sequence of bytes . <nl> - class ByteBuffer final { <nl> - public : <nl> - / / / Constuct an empty buffer . <nl> - ByteBuffer ( ) : buffer_ ( nullptr ) { } <nl> - <nl> - / / / Construct buffer from \ a slices , of which there are \ a nslices . <nl> - ByteBuffer ( const Slice * slices , size_t nslices ) ; <nl> - <nl> - / / / Constuct a byte buffer by referencing elements of existing buffer <nl> - / / / \ a buf . Wrapper of core function grpc_byte_buffer_copy <nl> - ByteBuffer ( const ByteBuffer & buf ) ; <nl> - <nl> - ~ ByteBuffer ( ) { <nl> - if ( buffer_ ) { <nl> - g_core_codegen_interface - > grpc_byte_buffer_destroy ( buffer_ ) ; <nl> - } <nl> - } <nl> - <nl> - ByteBuffer & operator = ( const ByteBuffer & ) ; <nl> - <nl> - / / / Dump ( read ) the buffer contents into \ a slices . <nl> - Status Dump ( std : : vector < Slice > * slices ) const ; <nl> - <nl> - / / / Remove all data . <nl> - void Clear ( ) { <nl> - if ( buffer_ ) { <nl> - g_core_codegen_interface - > grpc_byte_buffer_destroy ( buffer_ ) ; <nl> - buffer_ = nullptr ; <nl> - } <nl> - } <nl> - <nl> - / / / Make a duplicate copy of the internals of this byte <nl> - / / / buffer so that we have our own owned version of it . <nl> - / / / bbuf . Duplicate ( ) ; is equivalent to bbuf = bbuf ; but is actually readable <nl> - void Duplicate ( ) { <nl> - buffer_ = g_core_codegen_interface - > grpc_byte_buffer_copy ( buffer_ ) ; <nl> - } <nl> - <nl> - / / / Forget underlying byte buffer without destroying <nl> - / / / Use this only for un - owned byte buffers <nl> - void Release ( ) { buffer_ = nullptr ; } <nl> - <nl> - / / / Buffer size in bytes . <nl> - size_t Length ( ) const ; <nl> - <nl> - / / / Swap the state of * this and * other . <nl> - void Swap ( ByteBuffer * other ) ; <nl> - <nl> - / / / Is this ByteBuffer valid ? <nl> - bool Valid ( ) const { return ( buffer_ ! = nullptr ) ; } <nl> - <nl> - private : <nl> - friend class SerializationTraits < ByteBuffer , void > ; <nl> - friend class CallOpSendMessage ; <nl> - template < class R > <nl> - friend class CallOpRecvMessage ; <nl> - friend class CallOpGenericRecvMessage ; <nl> - friend class MethodHandler ; <nl> - template < class M , class T > <nl> - friend class internal : : MessageDeserializer ; <nl> - <nl> - / / takes ownership <nl> - void set_buffer ( grpc_byte_buffer * buf ) { <nl> - if ( buffer_ ) { <nl> - Clear ( ) ; <nl> - } <nl> - buffer_ = buf ; <nl> - } <nl> - <nl> - grpc_byte_buffer * c_buffer ( ) { return buffer_ ; } <nl> - grpc_byte_buffer * * c_buffer_ptr ( ) { return & buffer_ ; } <nl> - <nl> - / / DEPRECATED : Implicit conversion to transparently <nl> - / / support deprecated SerializationTraits API <nl> - / / No need to inline since deprecated <nl> - operator grpc_byte_buffer * ( ) ; <nl> - operator const grpc_byte_buffer * ( ) const ; <nl> - <nl> - grpc_byte_buffer * buffer_ ; <nl> - } ; <nl> - <nl> - template < > <nl> - class SerializationTraits < ByteBuffer , void > { <nl> - public : <nl> - static Status Deserialize ( const ByteBuffer & byte_buffer , ByteBuffer * dest ) { <nl> - dest - > set_buffer ( byte_buffer . buffer_ ) ; <nl> - return Status : : OK ; <nl> - } <nl> - static Status Serialize ( const ByteBuffer & source , ByteBuffer * buffer , <nl> - bool * own_buffer ) { <nl> - * buffer = source ; <nl> - * own_buffer = true ; <nl> - return Status : : OK ; <nl> - } <nl> - } ; <nl> - <nl> - } / / namespace grpc <nl> - <nl> - # endif / / GRPCXX_IMPL_CODEGEN_BYTE_BUFFER_H <nl> mmm a / include / grpc + + / impl / codegen / call . h <nl> ppp b / include / grpc + + / impl / codegen / call . h <nl> <nl> # include < map > <nl> # include < memory > <nl> <nl> - # include < grpc + + / impl / codegen / byte_buffer . h > <nl> # include < grpc + + / impl / codegen / call_hook . h > <nl> # include < grpc + + / impl / codegen / client_context . h > <nl> # include < grpc + + / impl / codegen / completion_queue_tag . h > <nl> <nl> # include < grpc / impl / codegen / compression_types . h > <nl> # include < grpc / impl / codegen / grpc_types . h > <nl> <nl> + struct grpc_byte_buffer ; <nl> + <nl> namespace grpc { <nl> <nl> class ByteBuffer ; <nl> class CallOpSendInitialMetadata { <nl> <nl> class CallOpSendMessage { <nl> public : <nl> - CallOpSendMessage ( ) : send_buf_ ( ) { } <nl> + CallOpSendMessage ( ) : send_buf_ ( nullptr ) { } <nl> <nl> / / / Send \ a message using \ a options for the write . The \ a options are cleared <nl> / / / after use . <nl> class CallOpSendMessage { <nl> <nl> protected : <nl> void AddOp ( grpc_op * ops , size_t * nops ) { <nl> - if ( ! send_buf_ . Valid ( ) ) return ; <nl> + if ( send_buf_ = = nullptr ) return ; <nl> grpc_op * op = & ops [ ( * nops ) + + ] ; <nl> op - > op = GRPC_OP_SEND_MESSAGE ; <nl> op - > flags = write_options_ . flags ( ) ; <nl> op - > reserved = NULL ; <nl> - op - > data . send_message . send_message = send_buf_ . c_buffer ( ) ; <nl> + op - > data . send_message . send_message = send_buf_ ; <nl> / / Flags are per - message : clear them after use . <nl> write_options_ . Clear ( ) ; <nl> } <nl> - void FinishOp ( bool * status ) { send_buf_ . Clear ( ) ; } <nl> + void FinishOp ( bool * status ) { <nl> + g_core_codegen_interface - > grpc_byte_buffer_destroy ( send_buf_ ) ; <nl> + send_buf_ = nullptr ; <nl> + } <nl> <nl> private : <nl> - template < class M , class T = void > <nl> - class MessageSerializer ; <nl> - <nl> - ByteBuffer send_buf_ ; <nl> + grpc_byte_buffer * send_buf_ ; <nl> WriteOptions write_options_ ; <nl> } ; <nl> <nl> - namespace internal { <nl> - template < class T > <nl> - T Example ( ) ; <nl> - } / / namespace internal <nl> - <nl> - template < class M > <nl> - class CallOpSendMessage : : MessageSerializer < <nl> - M , typename std : : enable_if < std : : is_same < <nl> - : : grpc : : Status , decltype ( SerializationTraits < M > : : Serialize ( <nl> - internal : : Example < const M & > ( ) , <nl> - internal : : Example < grpc_byte_buffer * * > ( ) , <nl> - internal : : Example < bool * > ( ) ) ) > : : value > : : type > { <nl> - public : <nl> - static Status SendMessageInternal ( const M & message , ByteBuffer * bbuf , <nl> - bool * own_buf ) { <nl> - return SerializationTraits < M > : : Serialize ( message , bbuf - > c_buffer_ptr ( ) , <nl> - own_buf ) ; <nl> - } <nl> - } ; <nl> - <nl> - template < class M > <nl> - class CallOpSendMessage : : MessageSerializer < <nl> - M , typename std : : enable_if < std : : is_same < <nl> - : : grpc : : Status , decltype ( SerializationTraits < M > : : Serialize ( <nl> - internal : : Example < const M & > ( ) , <nl> - internal : : Example < : : grpc : : ByteBuffer * > ( ) , <nl> - internal : : Example < bool * > ( ) ) ) > : : value > : : type > { <nl> - public : <nl> - static Status SendMessageInternal ( const M & message , ByteBuffer * bbuf , <nl> - bool * own_buf ) { <nl> - return SerializationTraits < M > : : Serialize ( message , bbuf , own_buf ) ; <nl> - } <nl> - } ; <nl> - <nl> template < class M > <nl> Status CallOpSendMessage : : SendMessage ( const M & message , WriteOptions options ) { <nl> write_options_ = options ; <nl> bool own_buf ; <nl> Status result = <nl> - MessageSerializer < M > : : SendMessageInternal ( message , & send_buf_ , & own_buf ) ; <nl> + SerializationTraits < M > : : Serialize ( message , & send_buf_ , & own_buf ) ; <nl> if ( ! own_buf ) { <nl> - send_buf_ . Duplicate ( ) ; <nl> + send_buf_ = g_core_codegen_interface - > grpc_byte_buffer_copy ( send_buf_ ) ; <nl> } <nl> return result ; <nl> } <nl> Status CallOpSendMessage : : SendMessage ( const M & message ) { <nl> return SendMessage ( message , WriteOptions ( ) ) ; <nl> } <nl> <nl> - namespace internal { <nl> - template < class M , class T = void > <nl> - class MessageDeserializer ; <nl> - <nl> - template < class M > <nl> - class MessageDeserializer < <nl> - M , typename std : : enable_if < std : : is_same < <nl> - : : grpc : : Status , decltype ( SerializationTraits < M > : : Deserialize ( <nl> - internal : : Example < const : : grpc : : ByteBuffer & > ( ) , <nl> - internal : : Example < M * > ( ) ) ) > : : value > : : type > { <nl> - public : <nl> - static Status Deserialize ( const ByteBuffer & bbuf , M * message ) { <nl> - return SerializationTraits < M > : : Deserialize ( bbuf , message ) ; <nl> - } <nl> - } ; <nl> - <nl> - template < class M > <nl> - class MessageDeserializer < <nl> - M , typename std : : enable_if < std : : is_same < <nl> - : : grpc : : Status , decltype ( SerializationTraits < M > : : Deserialize ( <nl> - internal : : Example < grpc_byte_buffer * > ( ) , <nl> - internal : : Example < M * > ( ) ) ) > : : value > : : type > { <nl> - public : <nl> - static Status Deserialize ( const ByteBuffer & bbuf , M * message ) { <nl> - return SerializationTraits < M > : : Deserialize ( <nl> - const_cast < ByteBuffer & > ( bbuf ) . c_buffer ( ) , message ) ; <nl> - } <nl> - } ; <nl> - } / / namespace internal <nl> - <nl> template < class R > <nl> class CallOpRecvMessage { <nl> public : <nl> class CallOpRecvMessage { <nl> op - > op = GRPC_OP_RECV_MESSAGE ; <nl> op - > flags = 0 ; <nl> op - > reserved = NULL ; <nl> - op - > data . recv_message . recv_message = recv_buf_ . c_buffer_ptr ( ) ; <nl> + op - > data . recv_message . recv_message = & recv_buf_ ; <nl> } <nl> <nl> void FinishOp ( bool * status ) { <nl> if ( message_ = = nullptr ) return ; <nl> - if ( recv_buf_ . Valid ( ) ) { <nl> + if ( recv_buf_ ) { <nl> if ( * status ) { <nl> got_message = * status = <nl> - internal : : MessageDeserializer < R > : : Deserialize ( recv_buf_ , message_ ) <nl> - . ok ( ) ; <nl> - recv_buf_ . Release ( ) ; <nl> + SerializationTraits < R > : : Deserialize ( recv_buf_ , message_ ) . ok ( ) ; <nl> } else { <nl> got_message = false ; <nl> - recv_buf_ . Clear ( ) ; <nl> + g_core_codegen_interface - > grpc_byte_buffer_destroy ( recv_buf_ ) ; <nl> } <nl> } else { <nl> got_message = false ; <nl> class CallOpRecvMessage { <nl> <nl> private : <nl> R * message_ ; <nl> - ByteBuffer recv_buf_ ; <nl> + grpc_byte_buffer * recv_buf_ ; <nl> bool allow_not_getting_message_ ; <nl> } ; <nl> <nl> namespace CallOpGenericRecvMessageHelper { <nl> class DeserializeFunc { <nl> public : <nl> - virtual Status Deserialize ( const ByteBuffer & buf ) = 0 ; <nl> + virtual Status Deserialize ( grpc_byte_buffer * buf ) = 0 ; <nl> virtual ~ DeserializeFunc ( ) { } <nl> } ; <nl> <nl> template < class R > <nl> class DeserializeFuncType final : public DeserializeFunc { <nl> public : <nl> DeserializeFuncType ( R * message ) : message_ ( message ) { } <nl> - Status Deserialize ( const ByteBuffer & buf ) override { <nl> - return grpc : : internal : : MessageDeserializer < R > : : Deserialize ( buf , message_ ) ; <nl> + Status Deserialize ( grpc_byte_buffer * buf ) override { <nl> + return SerializationTraits < R > : : Deserialize ( buf , message_ ) ; <nl> } <nl> <nl> ~ DeserializeFuncType ( ) override { } <nl> class CallOpGenericRecvMessage { <nl> op - > op = GRPC_OP_RECV_MESSAGE ; <nl> op - > flags = 0 ; <nl> op - > reserved = NULL ; <nl> - op - > data . recv_message . recv_message = recv_buf_ . c_buffer_ptr ( ) ; <nl> + op - > data . recv_message . recv_message = & recv_buf_ ; <nl> } <nl> <nl> void FinishOp ( bool * status ) { <nl> if ( ! deserialize_ ) return ; <nl> - if ( recv_buf_ . Valid ( ) ) { <nl> + if ( recv_buf_ ) { <nl> if ( * status ) { <nl> got_message = true ; <nl> * status = deserialize_ - > Deserialize ( recv_buf_ ) . ok ( ) ; <nl> - recv_buf_ . Release ( ) ; <nl> } else { <nl> got_message = false ; <nl> - recv_buf_ . Clear ( ) ; <nl> + g_core_codegen_interface - > grpc_byte_buffer_destroy ( recv_buf_ ) ; <nl> } <nl> } else { <nl> got_message = false ; <nl> class CallOpGenericRecvMessage { <nl> <nl> private : <nl> std : : unique_ptr < CallOpGenericRecvMessageHelper : : DeserializeFunc > deserialize_ ; <nl> - ByteBuffer recv_buf_ ; <nl> + grpc_byte_buffer * recv_buf_ ; <nl> bool allow_not_getting_message_ ; <nl> } ; <nl> <nl> mmm a / include / grpc + + / impl / codegen / method_handler_impl . h <nl> ppp b / include / grpc + + / impl / codegen / method_handler_impl . h <nl> <nl> # ifndef GRPCXX_IMPL_CODEGEN_METHOD_HANDLER_IMPL_H <nl> # define GRPCXX_IMPL_CODEGEN_METHOD_HANDLER_IMPL_H <nl> <nl> - # include < grpc + + / impl / codegen / byte_buffer . h > <nl> # include < grpc + + / impl / codegen / core_codegen_interface . h > <nl> # include < grpc + + / impl / codegen / rpc_service_method . h > <nl> # include < grpc + + / impl / codegen / sync_stream . h > <nl> class RpcMethodHandler : public MethodHandler { <nl> <nl> void RunHandler ( const HandlerParameter & param ) final { <nl> RequestType req ; <nl> - Status status = internal : : MessageDeserializer < RequestType > : : Deserialize ( <nl> - param . request , & req ) ; <nl> + Status status = <nl> + SerializationTraits < RequestType > : : Deserialize ( param . request , & req ) ; <nl> ResponseType rsp ; <nl> if ( status . ok ( ) ) { <nl> status = func_ ( service_ , param . server_context , & req , & rsp ) ; <nl> class ServerStreamingHandler : public MethodHandler { <nl> <nl> void RunHandler ( const HandlerParameter & param ) final { <nl> RequestType req ; <nl> - Status status = internal : : MessageDeserializer < RequestType > : : Deserialize ( <nl> - param . request , & req ) ; <nl> + Status status = <nl> + SerializationTraits < RequestType > : : Deserialize ( param . request , & req ) ; <nl> <nl> if ( status . ok ( ) ) { <nl> ServerWriter < ResponseType > writer ( param . call , param . server_context ) ; <nl> mmm a / include / grpc + + / impl / codegen / rpc_service_method . h <nl> ppp b / include / grpc + + / impl / codegen / rpc_service_method . h <nl> <nl> # include < memory > <nl> # include < vector > <nl> <nl> - # include < grpc + + / impl / codegen / byte_buffer . h > <nl> # include < grpc + + / impl / codegen / config . h > <nl> # include < grpc + + / impl / codegen / rpc_method . h > <nl> # include < grpc + + / impl / codegen / status . h > <nl> <nl> + extern " C " { <nl> + struct grpc_byte_buffer ; <nl> + } <nl> + <nl> namespace grpc { <nl> class ServerContext ; <nl> class StreamContextInterface ; <nl> class MethodHandler { <nl> virtual ~ MethodHandler ( ) { } <nl> struct HandlerParameter { <nl> HandlerParameter ( Call * c , ServerContext * context , grpc_byte_buffer * req ) <nl> - : call ( c ) , server_context ( context ) { <nl> - request . set_buffer ( req ) ; <nl> - } <nl> - ~ HandlerParameter ( ) { request . Release ( ) ; } <nl> + : call ( c ) , server_context ( context ) , request ( req ) { } <nl> Call * call ; <nl> ServerContext * server_context ; <nl> - / / Handler required to destroy these contents <nl> - ByteBuffer request ; <nl> + / / Handler required to grpc_byte_buffer_destroy this <nl> + grpc_byte_buffer * request ; <nl> } ; <nl> virtual void RunHandler ( const HandlerParameter & param ) = 0 ; <nl> } ; <nl> mmm a / include / grpc + + / impl / codegen / serialization_traits . h <nl> ppp b / include / grpc + + / impl / codegen / serialization_traits . h <nl> namespace grpc { <nl> / / / Used for hooking different message serialization API ' s into GRPC . <nl> / / / Each SerializationTraits implementation must provide the following <nl> / / / functions : <nl> - / / / 1 . static Status Serialize ( const Message & msg , <nl> - / / / ByteBuffer * buffer , <nl> - / / / bool * own_buffer ) ; <nl> - / / / AND / OR <nl> - / / / static Status Serialize ( const Message & msg , <nl> - / / / grpc_byte_buffer * * buffer , <nl> - / / / bool * own_buffer ) ; <nl> - / / / The former is preferred ; the latter is deprecated <nl> + / / / static Status Serialize ( const Message & msg , <nl> + / / / grpc_byte_buffer * * buffer , <nl> + / / / bool * own_buffer ) ; <nl> + / / / static Status Deserialize ( grpc_byte_buffer * buffer , <nl> + / / / Message * msg , <nl> + / / / int max_receive_message_size ) ; <nl> / / / <nl> - / / / 2 . static Status Deserialize ( const ByteBuffer & buffer , <nl> - / / / Message * msg ) ; <nl> - / / / AND / OR <nl> - / / / static Status Deserialize ( grpc_byte_buffer * buffer , <nl> - / / / Message * msg ) ; <nl> - / / / The former is preferred ; the latter is deprecated <nl> - / / / <nl> - / / / Serialize is required to convert message to a ByteBuffer , and <nl> - / / / return that byte buffer through * buffer . * own_buffer should <nl> + / / / Serialize is required to convert message to a grpc_byte_buffer , and <nl> + / / / to store a pointer to that byte buffer at * buffer . * own_buffer should <nl> / / / be set to true if the caller owns said byte buffer , or false if <nl> / / / ownership is retained elsewhere . <nl> / / / <nl> mmm a / include / grpc + + / impl / codegen / slice . h <nl> ppp b / include / grpc + + / impl / codegen / slice . h <nl> <nl> # ifndef GRPCXX_IMPL_CODEGEN_SLICE_H <nl> # define GRPCXX_IMPL_CODEGEN_SLICE_H <nl> <nl> - # include < grpc + + / impl / codegen / config . h > <nl> # include < grpc + + / impl / codegen / core_codegen_interface . h > <nl> # include < grpc + + / impl / codegen / string_ref . h > <nl> <nl> - # include < grpc / impl / codegen / slice . h > <nl> - <nl> namespace grpc { <nl> <nl> - / / / A wrapper around \ a grpc_slice . <nl> - / / / <nl> - / / / A slice represents a contiguous reference counted array of bytes . <nl> - / / / It is cheap to take references to a slice , and it is cheap to create a <nl> - / / / slice pointing to a subset of another slice . <nl> - class Slice final { <nl> - public : <nl> - / / / Construct an empty slice . <nl> - Slice ( ) ; <nl> - / / / Destructor - drops one reference . <nl> - ~ Slice ( ) ; <nl> - <nl> - enum AddRef { ADD_REF } ; <nl> - / / / Construct a slice from \ a slice , adding a reference . <nl> - Slice ( grpc_slice slice , AddRef ) ; <nl> - <nl> - enum StealRef { STEAL_REF } ; <nl> - / / / Construct a slice from \ a slice , stealing a reference . <nl> - Slice ( grpc_slice slice , StealRef ) ; <nl> - <nl> - / / / Allocate a slice of specified size <nl> - Slice ( size_t len ) ; <nl> - <nl> - / / / Construct a slice from a copied buffer <nl> - Slice ( const void * buf , size_t len ) ; <nl> - <nl> - / / / Construct a slice from a copied string <nl> - Slice ( const grpc : : string & str ) ; <nl> - <nl> - enum StaticSlice { STATIC_SLICE } ; <nl> - <nl> - / / / Construct a slice from a static buffer <nl> - Slice ( const void * buf , size_t len , StaticSlice ) ; <nl> - <nl> - / / / Copy constructor , adds a reference . <nl> - Slice ( const Slice & other ) ; <nl> - <nl> - / / / Assignment , reference count is unchanged . <nl> - Slice & operator = ( Slice other ) { <nl> - std : : swap ( slice_ , other . slice_ ) ; <nl> - return * this ; <nl> - } <nl> - <nl> - / / / Create a slice pointing at some data . Calls malloc to allocate a refcount <nl> - / / / for the object , and arranges that destroy will be called with the <nl> - / / / user data pointer passed in at destruction . Can be the same as buf or <nl> - / / / different ( e . g . , if data is part of a larger structure that must be <nl> - / / / destroyed when the data is no longer needed ) <nl> - Slice ( void * buf , size_t len , void ( * destroy ) ( void * ) , void * user_data ) ; <nl> - <nl> - / / / Specialization of above for common case where buf = = user_data <nl> - Slice ( void * buf , size_t len , void ( * destroy ) ( void * ) ) <nl> - : Slice ( buf , len , destroy , buf ) { } <nl> - <nl> - / / / Similar to the above but has a destroy that also takes slice length <nl> - Slice ( void * buf , size_t len , void ( * destroy ) ( void * , size_t ) ) ; <nl> - <nl> - / / / Byte size . <nl> - size_t size ( ) const { return GRPC_SLICE_LENGTH ( slice_ ) ; } <nl> - <nl> - / / / Raw pointer to the beginning ( first element ) of the slice . <nl> - const uint8_t * begin ( ) const { return GRPC_SLICE_START_PTR ( slice_ ) ; } <nl> - <nl> - / / / Raw pointer to the end ( one byte \ em past the last element ) of the slice . <nl> - const uint8_t * end ( ) const { return GRPC_SLICE_END_PTR ( slice_ ) ; } <nl> - <nl> - / / / Raw C slice . Caller needs to call grpc_slice_unref when done . <nl> - grpc_slice c_slice ( ) const ; <nl> - <nl> - private : <nl> - friend class ByteBuffer ; <nl> - <nl> - grpc_slice slice_ ; <nl> - } ; <nl> - <nl> inline grpc : : string_ref StringRefFromSlice ( const grpc_slice * slice ) { <nl> return grpc : : string_ref ( <nl> reinterpret_cast < const char * > ( GRPC_SLICE_START_PTR ( * slice ) ) , <nl> mmm a / include / grpc + + / support / byte_buffer . h <nl> ppp b / include / grpc + + / support / byte_buffer . h <nl> <nl> # ifndef GRPCXX_SUPPORT_BYTE_BUFFER_H <nl> # define GRPCXX_SUPPORT_BYTE_BUFFER_H <nl> <nl> - # include < grpc + + / impl / codegen / byte_buffer . h > <nl> # include < grpc + + / impl / serialization_traits . h > <nl> # include < grpc + + / support / config . h > <nl> # include < grpc + + / support / slice . h > <nl> <nl> # include < grpc / grpc . h > <nl> # include < grpc / support / log . h > <nl> <nl> + # include < vector > <nl> + <nl> + namespace grpc { <nl> + <nl> + / / / A sequence of bytes . <nl> + class ByteBuffer final { <nl> + public : <nl> + / / / Constuct an empty buffer . <nl> + ByteBuffer ( ) : buffer_ ( nullptr ) { } <nl> + <nl> + / / / Construct buffer from \ a slices , of which there are \ a nslices . <nl> + ByteBuffer ( const Slice * slices , size_t nslices ) ; <nl> + <nl> + / / / Constuct a byte buffer by referencing elements of existing buffer <nl> + / / / \ a buf . Wrapper of core function grpc_byte_buffer_copy <nl> + ByteBuffer ( const ByteBuffer & buf ) ; <nl> + <nl> + ~ ByteBuffer ( ) ; <nl> + <nl> + ByteBuffer & operator = ( const ByteBuffer & ) ; <nl> + <nl> + / / / Dump ( read ) the buffer contents into \ a slices . <nl> + Status Dump ( std : : vector < Slice > * slices ) const ; <nl> + <nl> + / / / Remove all data . <nl> + void Clear ( ) ; <nl> + <nl> + / / / Buffer size in bytes . <nl> + size_t Length ( ) const ; <nl> + <nl> + / / / Swap the state of * this and * other . <nl> + void Swap ( ByteBuffer * other ) ; <nl> + <nl> + private : <nl> + friend class SerializationTraits < ByteBuffer , void > ; <nl> + <nl> + / / takes ownership <nl> + void set_buffer ( grpc_byte_buffer * buf ) { <nl> + if ( buffer_ ) { <nl> + Clear ( ) ; <nl> + } <nl> + buffer_ = buf ; <nl> + } <nl> + <nl> + / / For \ a SerializationTraits ' s usage . <nl> + grpc_byte_buffer * buffer ( ) const { return buffer_ ; } <nl> + <nl> + grpc_byte_buffer * buffer_ ; <nl> + } ; <nl> + <nl> + template < > <nl> + class SerializationTraits < ByteBuffer , void > { <nl> + public : <nl> + static Status Deserialize ( grpc_byte_buffer * byte_buffer , ByteBuffer * dest ) { <nl> + dest - > set_buffer ( byte_buffer ) ; <nl> + return Status : : OK ; <nl> + } <nl> + static Status Serialize ( const ByteBuffer & source , grpc_byte_buffer * * buffer , <nl> + bool * own_buffer ) { <nl> + * buffer = grpc_byte_buffer_copy ( source . buffer ( ) ) ; <nl> + * own_buffer = true ; <nl> + return Status : : OK ; <nl> + } <nl> + } ; <nl> + <nl> + } / / namespace grpc <nl> + <nl> # endif / / GRPCXX_SUPPORT_BYTE_BUFFER_H <nl> mmm a / include / grpc + + / support / slice . h <nl> ppp b / include / grpc + + / support / slice . h <nl> <nl> # ifndef GRPCXX_SUPPORT_SLICE_H <nl> # define GRPCXX_SUPPORT_SLICE_H <nl> <nl> - # include < grpc + + / impl / codegen / slice . h > <nl> # include < grpc + + / support / config . h > <nl> # include < grpc / slice . h > <nl> <nl> + namespace grpc { <nl> + <nl> + / / / A wrapper around \ a grpc_slice . <nl> + / / / <nl> + / / / A slice represents a contiguous reference counted array of bytes . <nl> + / / / It is cheap to take references to a slice , and it is cheap to create a <nl> + / / / slice pointing to a subset of another slice . <nl> + class Slice final { <nl> + public : <nl> + / / / Construct an empty slice . <nl> + Slice ( ) ; <nl> + / / / Destructor - drops one reference . <nl> + ~ Slice ( ) ; <nl> + <nl> + enum AddRef { ADD_REF } ; <nl> + / / / Construct a slice from \ a slice , adding a reference . <nl> + Slice ( grpc_slice slice , AddRef ) ; <nl> + <nl> + enum StealRef { STEAL_REF } ; <nl> + / / / Construct a slice from \ a slice , stealing a reference . <nl> + Slice ( grpc_slice slice , StealRef ) ; <nl> + <nl> + / / / Allocate a slice of specified size <nl> + Slice ( size_t len ) ; <nl> + <nl> + / / / Construct a slice from a copied buffer <nl> + Slice ( const void * buf , size_t len ) ; <nl> + <nl> + / / / Construct a slice from a copied string <nl> + Slice ( const grpc : : string & str ) ; <nl> + <nl> + enum StaticSlice { STATIC_SLICE } ; <nl> + <nl> + / / / Construct a slice from a static buffer <nl> + Slice ( const void * buf , size_t len , StaticSlice ) ; <nl> + <nl> + / / / Copy constructor , adds a reference . <nl> + Slice ( const Slice & other ) ; <nl> + <nl> + / / / Assignment , reference count is unchanged . <nl> + Slice & operator = ( Slice other ) { <nl> + std : : swap ( slice_ , other . slice_ ) ; <nl> + return * this ; <nl> + } <nl> + <nl> + / / / Create a slice pointing at some data . Calls malloc to allocate a refcount <nl> + / / / for the object , and arranges that destroy will be called with the <nl> + / / / user data pointer passed in at destruction . Can be the same as buf or <nl> + / / / different ( e . g . , if data is part of a larger structure that must be <nl> + / / / destroyed when the data is no longer needed ) <nl> + Slice ( void * buf , size_t len , void ( * destroy ) ( void * ) , void * user_data ) ; <nl> + <nl> + / / / Specialization of above for common case where buf = = user_data <nl> + Slice ( void * buf , size_t len , void ( * destroy ) ( void * ) ) <nl> + : Slice ( buf , len , destroy , buf ) { } <nl> + <nl> + / / / Similar to the above but has a destroy that also takes slice length <nl> + Slice ( void * buf , size_t len , void ( * destroy ) ( void * , size_t ) ) ; <nl> + <nl> + / / / Byte size . <nl> + size_t size ( ) const { return GRPC_SLICE_LENGTH ( slice_ ) ; } <nl> + <nl> + / / / Raw pointer to the beginning ( first element ) of the slice . <nl> + const uint8_t * begin ( ) const { return GRPC_SLICE_START_PTR ( slice_ ) ; } <nl> + <nl> + / / / Raw pointer to the end ( one byte \ em past the last element ) of the slice . <nl> + const uint8_t * end ( ) const { return GRPC_SLICE_END_PTR ( slice_ ) ; } <nl> + <nl> + / / / Raw C slice . Caller needs to call grpc_slice_unref when done . <nl> + grpc_slice c_slice ( ) const { return grpc_slice_ref ( slice_ ) ; } <nl> + <nl> + private : <nl> + friend class ByteBuffer ; <nl> + <nl> + grpc_slice slice_ ; <nl> + } ; <nl> + <nl> + } / / namespace grpc <nl> + <nl> # endif / / GRPCXX_SUPPORT_SLICE_H <nl> mmm a / include / grpc / byte_buffer . h <nl> ppp b / include / grpc / byte_buffer . h <nl> <nl> # ifndef GRPC_BYTE_BUFFER_H <nl> # define GRPC_BYTE_BUFFER_H <nl> <nl> - # include < grpc / impl / codegen / byte_buffer . h > <nl> + # include < grpc / impl / codegen / grpc_types . h > <nl> # include < grpc / slice_buffer . h > <nl> <nl> + # ifdef __cplusplus <nl> + extern " C " { <nl> + # endif <nl> + <nl> + / * * Returns a RAW byte buffer instance over the given slices ( up to \ a nslices ) . <nl> + * <nl> + * Increases the reference count for all \ a slices processed . The user is <nl> + * responsible for invoking grpc_byte_buffer_destroy on the returned instance . * / <nl> + GRPCAPI grpc_byte_buffer * grpc_raw_byte_buffer_create ( grpc_slice * slices , <nl> + size_t nslices ) ; <nl> + <nl> + / * * Returns a * compressed * RAW byte buffer instance over the given slices ( up to <nl> + * \ a nslices ) . The \ a compression argument defines the compression algorithm <nl> + * used to generate the data in \ a slices . <nl> + * <nl> + * Increases the reference count for all \ a slices processed . The user is <nl> + * responsible for invoking grpc_byte_buffer_destroy on the returned instance . * / <nl> + GRPCAPI grpc_byte_buffer * grpc_raw_compressed_byte_buffer_create ( <nl> + grpc_slice * slices , size_t nslices , grpc_compression_algorithm compression ) ; <nl> + <nl> + / * * Copies input byte buffer \ a bb . <nl> + * <nl> + * Increases the reference count of all the source slices . The user is <nl> + * responsible for calling grpc_byte_buffer_destroy over the returned copy . * / <nl> + GRPCAPI grpc_byte_buffer * grpc_byte_buffer_copy ( grpc_byte_buffer * bb ) ; <nl> + <nl> + / * * Returns the size of the given byte buffer , in bytes . * / <nl> + GRPCAPI size_t grpc_byte_buffer_length ( grpc_byte_buffer * bb ) ; <nl> + <nl> + / * * Destroys \ a byte_buffer deallocating all its memory . * / <nl> + GRPCAPI void grpc_byte_buffer_destroy ( grpc_byte_buffer * byte_buffer ) ; <nl> + <nl> + / * * Reader for byte buffers . Iterates over slices in the byte buffer * / <nl> + struct grpc_byte_buffer_reader ; <nl> + typedef struct grpc_byte_buffer_reader grpc_byte_buffer_reader ; <nl> + <nl> + / * * Initialize \ a reader to read over \ a buffer . <nl> + * Returns 1 upon success , 0 otherwise . * / <nl> + GRPCAPI int grpc_byte_buffer_reader_init ( grpc_byte_buffer_reader * reader , <nl> + grpc_byte_buffer * buffer ) ; <nl> + <nl> + / * * Cleanup and destroy \ a reader * / <nl> + GRPCAPI void grpc_byte_buffer_reader_destroy ( grpc_byte_buffer_reader * reader ) ; <nl> + <nl> + / * * Updates \ a slice with the next piece of data from from \ a reader and returns <nl> + * 1 . Returns 0 at the end of the stream . Caller is responsible for calling <nl> + * grpc_slice_unref on the result . * / <nl> + GRPCAPI int grpc_byte_buffer_reader_next ( grpc_byte_buffer_reader * reader , <nl> + grpc_slice * slice ) ; <nl> + <nl> + / * * Merge all data from \ a reader into single slice * / <nl> + GRPCAPI grpc_slice <nl> + grpc_byte_buffer_reader_readall ( grpc_byte_buffer_reader * reader ) ; <nl> + <nl> + / * * Returns a RAW byte buffer instance from the output of \ a reader . * / <nl> + GRPCAPI grpc_byte_buffer * grpc_raw_byte_buffer_from_reader ( <nl> + grpc_byte_buffer_reader * reader ) ; <nl> + <nl> + # ifdef __cplusplus <nl> + } <nl> + # endif <nl> + <nl> # endif / * GRPC_BYTE_BUFFER_H * / <nl> deleted file mode 100644 <nl> index fc333057134 . . 00000000000 <nl> mmm a / include / grpc / impl / codegen / byte_buffer . h <nl> ppp / dev / null <nl> <nl> - / * <nl> - * <nl> - * Copyright 2015 gRPC authors . <nl> - * <nl> - * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - * you may not use this file except in compliance with the License . <nl> - * You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , software <nl> - * distributed under the License is distributed on an " AS IS " BASIS , <nl> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - * See the License for the specific language governing permissions and <nl> - * limitations under the License . <nl> - * <nl> - * / <nl> - <nl> - # ifndef GRPC_IMPL_CODEGEN_BYTE_BUFFER_H <nl> - # define GRPC_IMPL_CODEGEN_BYTE_BUFFER_H <nl> - <nl> - # include < grpc / impl / codegen / grpc_types . h > <nl> - <nl> - # ifdef __cplusplus <nl> - extern " C " { <nl> - # endif <nl> - <nl> - / * * Returns a RAW byte buffer instance over the given slices ( up to \ a nslices ) . <nl> - * <nl> - * Increases the reference count for all \ a slices processed . The user is <nl> - * responsible for invoking grpc_byte_buffer_destroy on the returned instance . * / <nl> - GRPCAPI grpc_byte_buffer * grpc_raw_byte_buffer_create ( grpc_slice * slices , <nl> - size_t nslices ) ; <nl> - <nl> - / * * Returns a * compressed * RAW byte buffer instance over the given slices ( up to <nl> - * \ a nslices ) . The \ a compression argument defines the compression algorithm <nl> - * used to generate the data in \ a slices . <nl> - * <nl> - * Increases the reference count for all \ a slices processed . The user is <nl> - * responsible for invoking grpc_byte_buffer_destroy on the returned instance . * / <nl> - GRPCAPI grpc_byte_buffer * grpc_raw_compressed_byte_buffer_create ( <nl> - grpc_slice * slices , size_t nslices , grpc_compression_algorithm compression ) ; <nl> - <nl> - / * * Copies input byte buffer \ a bb . <nl> - * <nl> - * Increases the reference count of all the source slices . The user is <nl> - * responsible for calling grpc_byte_buffer_destroy over the returned copy . * / <nl> - GRPCAPI grpc_byte_buffer * grpc_byte_buffer_copy ( grpc_byte_buffer * bb ) ; <nl> - <nl> - / * * Returns the size of the given byte buffer , in bytes . * / <nl> - GRPCAPI size_t grpc_byte_buffer_length ( grpc_byte_buffer * bb ) ; <nl> - <nl> - / * * Destroys \ a byte_buffer deallocating all its memory . * / <nl> - GRPCAPI void grpc_byte_buffer_destroy ( grpc_byte_buffer * byte_buffer ) ; <nl> - <nl> - / * * Reader for byte buffers . Iterates over slices in the byte buffer * / <nl> - struct grpc_byte_buffer_reader ; <nl> - typedef struct grpc_byte_buffer_reader grpc_byte_buffer_reader ; <nl> - <nl> - / * * Initialize \ a reader to read over \ a buffer . <nl> - * Returns 1 upon success , 0 otherwise . * / <nl> - GRPCAPI int grpc_byte_buffer_reader_init ( grpc_byte_buffer_reader * reader , <nl> - grpc_byte_buffer * buffer ) ; <nl> - <nl> - / * * Cleanup and destroy \ a reader * / <nl> - GRPCAPI void grpc_byte_buffer_reader_destroy ( grpc_byte_buffer_reader * reader ) ; <nl> - <nl> - / * * Updates \ a slice with the next piece of data from from \ a reader and returns <nl> - * 1 . Returns 0 at the end of the stream . Caller is responsible for calling <nl> - * grpc_slice_unref on the result . * / <nl> - GRPCAPI int grpc_byte_buffer_reader_next ( grpc_byte_buffer_reader * reader , <nl> - grpc_slice * slice ) ; <nl> - <nl> - / * * Merge all data from \ a reader into single slice * / <nl> - GRPCAPI grpc_slice <nl> - grpc_byte_buffer_reader_readall ( grpc_byte_buffer_reader * reader ) ; <nl> - <nl> - / * * Returns a RAW byte buffer instance from the output of \ a reader . * / <nl> - GRPCAPI grpc_byte_buffer * grpc_raw_byte_buffer_from_reader ( <nl> - grpc_byte_buffer_reader * reader ) ; <nl> - <nl> - # ifdef __cplusplus <nl> - } <nl> - # endif <nl> - <nl> - # endif / * GRPC_IMPL_CODEGEN_BYTE_BUFFER_H * / <nl> mmm a / package . xml <nl> ppp b / package . xml <nl> <nl> < file baseinstalldir = " / " name = " src / core / lib / support / tmpfile_posix . c " role = " src " / > <nl> < file baseinstalldir = " / " name = " src / core / lib / support / tmpfile_windows . c " role = " src " / > <nl> < file baseinstalldir = " / " name = " src / core / lib / support / wrap_memcpy . c " role = " src " / > <nl> - < file baseinstalldir = " / " name = " include / grpc / impl / codegen / byte_buffer . h " role = " src " / > <nl> < file baseinstalldir = " / " name = " include / grpc / impl / codegen / byte_buffer_reader . h " role = " src " / > <nl> < file baseinstalldir = " / " name = " include / grpc / impl / codegen / compression_types . h " role = " src " / > <nl> < file baseinstalldir = " / " name = " include / grpc / impl / codegen / connectivity_state . h " role = " src " / > <nl> mmm a / src / core / lib / iomgr / resource_quota . c <nl> ppp b / src / core / lib / iomgr / resource_quota . c <nl> <nl> # include < stdint . h > <nl> # include < string . h > <nl> <nl> - # include < grpc / slice_buffer . h > <nl> # include < grpc / support / alloc . h > <nl> # include < grpc / support / log . h > <nl> # include < grpc / support / string_util . h > <nl> mmm a / src / cpp / server / health / default_health_check_service . cc <nl> ppp b / src / cpp / server / health / default_health_check_service . cc <nl> <nl> # include < mutex > <nl> <nl> # include < grpc + + / impl / codegen / method_handler_impl . h > <nl> - # include < grpc / slice . h > <nl> # include < grpc / support / alloc . h > <nl> # include < grpc / support / log . h > <nl> <nl> mmm a / src / cpp / util / byte_buffer_cc . cc <nl> ppp b / src / cpp / util / byte_buffer_cc . cc <nl> <nl> * <nl> * / <nl> <nl> - # include < grpc + + / impl / grpc_library . h > <nl> # include < grpc + + / support / byte_buffer . h > <nl> - # include < grpc / byte_buffer . h > <nl> # include < grpc / byte_buffer_reader . h > <nl> <nl> namespace grpc { <nl> <nl> - static internal : : GrpcLibraryInitializer g_gli_initializer ; <nl> - <nl> ByteBuffer : : ByteBuffer ( const Slice * slices , size_t nslices ) { <nl> / / The following assertions check that the representation of a grpc : : Slice is <nl> / / identical to that of a grpc_slice : it has a grpc_slice field , and nothing <nl> ByteBuffer : : ByteBuffer ( const Slice * slices , size_t nslices ) { <nl> " Slice must have same representation as grpc_slice " ) ; <nl> static_assert ( sizeof ( Slice ) = = sizeof ( grpc_slice ) , <nl> " Slice must have same representation as grpc_slice " ) ; <nl> - g_gli_initializer . summon ( ) ; / / Make sure that initializer linked in <nl> / / The const_cast is legal if grpc_raw_byte_buffer_create ( ) does no more <nl> / / than its advertised side effect of increasing the reference count of the <nl> / / slices it processes , and such an increase does not affect the semantics <nl> ByteBuffer : : ByteBuffer ( const Slice * slices , size_t nslices ) { <nl> reinterpret_cast < grpc_slice * > ( const_cast < Slice * > ( slices ) ) , nslices ) ; <nl> } <nl> <nl> + ByteBuffer : : ~ ByteBuffer ( ) { <nl> + if ( buffer_ ) { <nl> + grpc_byte_buffer_destroy ( buffer_ ) ; <nl> + } <nl> + } <nl> + <nl> + void ByteBuffer : : Clear ( ) { <nl> + if ( buffer_ ) { <nl> + grpc_byte_buffer_destroy ( buffer_ ) ; <nl> + buffer_ = nullptr ; <nl> + } <nl> + } <nl> + <nl> Status ByteBuffer : : Dump ( std : : vector < Slice > * slices ) const { <nl> slices - > clear ( ) ; <nl> if ( ! buffer_ ) { <nl> ByteBuffer : : ByteBuffer ( const ByteBuffer & buf ) <nl> : buffer_ ( grpc_byte_buffer_copy ( buf . buffer_ ) ) { } <nl> <nl> ByteBuffer & ByteBuffer : : operator = ( const ByteBuffer & buf ) { <nl> - if ( this ! = & buf ) { <nl> - Clear ( ) ; / / first remove existing data <nl> - } <nl> + Clear ( ) ; / / first remove existing data <nl> if ( buf . buffer_ ) { <nl> buffer_ = grpc_byte_buffer_copy ( buf . buffer_ ) ; / / then copy <nl> } <nl> void ByteBuffer : : Swap ( ByteBuffer * other ) { <nl> buffer_ = tmp ; <nl> } <nl> <nl> - ByteBuffer : : operator grpc_byte_buffer * ( ) { <nl> - / / The following assertions check that the representation of a ByteBuffer is <nl> - / / identical to grpc_byte_buffer * : it has a grpc_byte_buffer * field , <nl> - / / and nothing else . <nl> - static_assert ( std : : is_same < decltype ( buffer_ ) , grpc_byte_buffer * > : : value , <nl> - " ByteBuffer must have same representation as " <nl> - " grpc_byte_buffer * " ) ; <nl> - static_assert ( sizeof ( ByteBuffer ) = = sizeof ( grpc_byte_buffer * ) , <nl> - " ByteBuffer must have same representation as " <nl> - " grpc_byte_buffer * " ) ; <nl> - return buffer_ ; <nl> - } <nl> - <nl> - ByteBuffer : : operator const grpc_byte_buffer * ( ) const { return buffer_ ; } <nl> - <nl> } / / namespace grpc <nl> mmm a / src / cpp / util / slice_cc . cc <nl> ppp b / src / cpp / util / slice_cc . cc <nl> Slice : : Slice ( void * buf , size_t len , void ( * destroy ) ( void * ) , void * user_data ) <nl> Slice : : Slice ( void * buf , size_t len , void ( * destroy ) ( void * , size_t ) ) <nl> : slice_ ( grpc_slice_new_with_len ( buf , len , destroy ) ) { } <nl> <nl> - grpc_slice Slice : : c_slice ( ) const { return grpc_slice_ref ( slice_ ) ; } <nl> - <nl> } / / namespace grpc <nl> mmm a / src / ruby / ext / grpc / rb_grpc_imports . generated . c <nl> ppp b / src / ruby / ext / grpc / rb_grpc_imports . generated . c <nl> <nl> <nl> # include " rb_grpc_imports . generated . h " <nl> <nl> + grpc_raw_byte_buffer_create_type grpc_raw_byte_buffer_create_import ; <nl> + grpc_raw_compressed_byte_buffer_create_type grpc_raw_compressed_byte_buffer_create_import ; <nl> + grpc_byte_buffer_copy_type grpc_byte_buffer_copy_import ; <nl> + grpc_byte_buffer_length_type grpc_byte_buffer_length_import ; <nl> + grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import ; <nl> + grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import ; <nl> + grpc_byte_buffer_reader_destroy_type grpc_byte_buffer_reader_destroy_import ; <nl> + grpc_byte_buffer_reader_next_type grpc_byte_buffer_reader_next_import ; <nl> + grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_import ; <nl> + grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import ; <nl> census_initialize_type census_initialize_import ; <nl> census_shutdown_type census_shutdown_import ; <nl> census_supported_type census_supported_import ; <nl> grpc_ssl_server_credentials_create_ex_type grpc_ssl_server_credentials_create_ex <nl> grpc_server_add_secure_http2_port_type grpc_server_add_secure_http2_port_import ; <nl> grpc_call_set_credentials_type grpc_call_set_credentials_import ; <nl> grpc_server_credentials_set_auth_metadata_processor_type grpc_server_credentials_set_auth_metadata_processor_import ; <nl> - grpc_raw_byte_buffer_create_type grpc_raw_byte_buffer_create_import ; <nl> - grpc_raw_compressed_byte_buffer_create_type grpc_raw_compressed_byte_buffer_create_import ; <nl> - grpc_byte_buffer_copy_type grpc_byte_buffer_copy_import ; <nl> - grpc_byte_buffer_length_type grpc_byte_buffer_length_import ; <nl> - grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import ; <nl> - grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import ; <nl> - grpc_byte_buffer_reader_destroy_type grpc_byte_buffer_reader_destroy_import ; <nl> - grpc_byte_buffer_reader_next_type grpc_byte_buffer_reader_next_import ; <nl> - grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_import ; <nl> - grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import ; <nl> grpc_slice_ref_type grpc_slice_ref_import ; <nl> grpc_slice_unref_type grpc_slice_unref_import ; <nl> grpc_slice_copy_type grpc_slice_copy_import ; <nl> gpr_sleep_until_type gpr_sleep_until_import ; <nl> gpr_timespec_to_micros_type gpr_timespec_to_micros_import ; <nl> <nl> void grpc_rb_load_imports ( HMODULE library ) { <nl> + grpc_raw_byte_buffer_create_import = ( grpc_raw_byte_buffer_create_type ) GetProcAddress ( library , " grpc_raw_byte_buffer_create " ) ; <nl> + grpc_raw_compressed_byte_buffer_create_import = ( grpc_raw_compressed_byte_buffer_create_type ) GetProcAddress ( library , " grpc_raw_compressed_byte_buffer_create " ) ; <nl> + grpc_byte_buffer_copy_import = ( grpc_byte_buffer_copy_type ) GetProcAddress ( library , " grpc_byte_buffer_copy " ) ; <nl> + grpc_byte_buffer_length_import = ( grpc_byte_buffer_length_type ) GetProcAddress ( library , " grpc_byte_buffer_length " ) ; <nl> + grpc_byte_buffer_destroy_import = ( grpc_byte_buffer_destroy_type ) GetProcAddress ( library , " grpc_byte_buffer_destroy " ) ; <nl> + grpc_byte_buffer_reader_init_import = ( grpc_byte_buffer_reader_init_type ) GetProcAddress ( library , " grpc_byte_buffer_reader_init " ) ; <nl> + grpc_byte_buffer_reader_destroy_import = ( grpc_byte_buffer_reader_destroy_type ) GetProcAddress ( library , " grpc_byte_buffer_reader_destroy " ) ; <nl> + grpc_byte_buffer_reader_next_import = ( grpc_byte_buffer_reader_next_type ) GetProcAddress ( library , " grpc_byte_buffer_reader_next " ) ; <nl> + grpc_byte_buffer_reader_readall_import = ( grpc_byte_buffer_reader_readall_type ) GetProcAddress ( library , " grpc_byte_buffer_reader_readall " ) ; <nl> + grpc_raw_byte_buffer_from_reader_import = ( grpc_raw_byte_buffer_from_reader_type ) GetProcAddress ( library , " grpc_raw_byte_buffer_from_reader " ) ; <nl> census_initialize_import = ( census_initialize_type ) GetProcAddress ( library , " census_initialize " ) ; <nl> census_shutdown_import = ( census_shutdown_type ) GetProcAddress ( library , " census_shutdown " ) ; <nl> census_supported_import = ( census_supported_type ) GetProcAddress ( library , " census_supported " ) ; <nl> void grpc_rb_load_imports ( HMODULE library ) { <nl> grpc_server_add_secure_http2_port_import = ( grpc_server_add_secure_http2_port_type ) GetProcAddress ( library , " grpc_server_add_secure_http2_port " ) ; <nl> grpc_call_set_credentials_import = ( grpc_call_set_credentials_type ) GetProcAddress ( library , " grpc_call_set_credentials " ) ; <nl> grpc_server_credentials_set_auth_metadata_processor_import = ( grpc_server_credentials_set_auth_metadata_processor_type ) GetProcAddress ( library , " grpc_server_credentials_set_auth_metadata_processor " ) ; <nl> - grpc_raw_byte_buffer_create_import = ( grpc_raw_byte_buffer_create_type ) GetProcAddress ( library , " grpc_raw_byte_buffer_create " ) ; <nl> - grpc_raw_compressed_byte_buffer_create_import = ( grpc_raw_compressed_byte_buffer_create_type ) GetProcAddress ( library , " grpc_raw_compressed_byte_buffer_create " ) ; <nl> - grpc_byte_buffer_copy_import = ( grpc_byte_buffer_copy_type ) GetProcAddress ( library , " grpc_byte_buffer_copy " ) ; <nl> - grpc_byte_buffer_length_import = ( grpc_byte_buffer_length_type ) GetProcAddress ( library , " grpc_byte_buffer_length " ) ; <nl> - grpc_byte_buffer_destroy_import = ( grpc_byte_buffer_destroy_type ) GetProcAddress ( library , " grpc_byte_buffer_destroy " ) ; <nl> - grpc_byte_buffer_reader_init_import = ( grpc_byte_buffer_reader_init_type ) GetProcAddress ( library , " grpc_byte_buffer_reader_init " ) ; <nl> - grpc_byte_buffer_reader_destroy_import = ( grpc_byte_buffer_reader_destroy_type ) GetProcAddress ( library , " grpc_byte_buffer_reader_destroy " ) ; <nl> - grpc_byte_buffer_reader_next_import = ( grpc_byte_buffer_reader_next_type ) GetProcAddress ( library , " grpc_byte_buffer_reader_next " ) ; <nl> - grpc_byte_buffer_reader_readall_import = ( grpc_byte_buffer_reader_readall_type ) GetProcAddress ( library , " grpc_byte_buffer_reader_readall " ) ; <nl> - grpc_raw_byte_buffer_from_reader_import = ( grpc_raw_byte_buffer_from_reader_type ) GetProcAddress ( library , " grpc_raw_byte_buffer_from_reader " ) ; <nl> grpc_slice_ref_import = ( grpc_slice_ref_type ) GetProcAddress ( library , " grpc_slice_ref " ) ; <nl> grpc_slice_unref_import = ( grpc_slice_unref_type ) GetProcAddress ( library , " grpc_slice_unref " ) ; <nl> grpc_slice_copy_import = ( grpc_slice_copy_type ) GetProcAddress ( library , " grpc_slice_copy " ) ; <nl> mmm a / src / ruby / ext / grpc / rb_grpc_imports . generated . h <nl> ppp b / src / ruby / ext / grpc / rb_grpc_imports . generated . h <nl> <nl> <nl> # include < windows . h > <nl> <nl> + # include < grpc / byte_buffer . h > <nl> # include < grpc / census . h > <nl> # include < grpc / compression . h > <nl> # include < grpc / grpc . h > <nl> # include < grpc / grpc_posix . h > <nl> # include < grpc / grpc_security . h > <nl> - # include < grpc / impl / codegen / byte_buffer . h > <nl> # include < grpc / slice . h > <nl> # include < grpc / slice_buffer . h > <nl> # include < grpc / support / alloc . h > <nl> <nl> # include < grpc / support / thd . h > <nl> # include < grpc / support / time . h > <nl> <nl> + typedef grpc_byte_buffer * ( * grpc_raw_byte_buffer_create_type ) ( grpc_slice * slices , size_t nslices ) ; <nl> + extern grpc_raw_byte_buffer_create_type grpc_raw_byte_buffer_create_import ; <nl> + # define grpc_raw_byte_buffer_create grpc_raw_byte_buffer_create_import <nl> + typedef grpc_byte_buffer * ( * grpc_raw_compressed_byte_buffer_create_type ) ( grpc_slice * slices , size_t nslices , grpc_compression_algorithm compression ) ; <nl> + extern grpc_raw_compressed_byte_buffer_create_type grpc_raw_compressed_byte_buffer_create_import ; <nl> + # define grpc_raw_compressed_byte_buffer_create grpc_raw_compressed_byte_buffer_create_import <nl> + typedef grpc_byte_buffer * ( * grpc_byte_buffer_copy_type ) ( grpc_byte_buffer * bb ) ; <nl> + extern grpc_byte_buffer_copy_type grpc_byte_buffer_copy_import ; <nl> + # define grpc_byte_buffer_copy grpc_byte_buffer_copy_import <nl> + typedef size_t ( * grpc_byte_buffer_length_type ) ( grpc_byte_buffer * bb ) ; <nl> + extern grpc_byte_buffer_length_type grpc_byte_buffer_length_import ; <nl> + # define grpc_byte_buffer_length grpc_byte_buffer_length_import <nl> + typedef void ( * grpc_byte_buffer_destroy_type ) ( grpc_byte_buffer * byte_buffer ) ; <nl> + extern grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import ; <nl> + # define grpc_byte_buffer_destroy grpc_byte_buffer_destroy_import <nl> + typedef int ( * grpc_byte_buffer_reader_init_type ) ( grpc_byte_buffer_reader * reader , grpc_byte_buffer * buffer ) ; <nl> + extern grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import ; <nl> + # define grpc_byte_buffer_reader_init grpc_byte_buffer_reader_init_import <nl> + typedef void ( * grpc_byte_buffer_reader_destroy_type ) ( grpc_byte_buffer_reader * reader ) ; <nl> + extern grpc_byte_buffer_reader_destroy_type grpc_byte_buffer_reader_destroy_import ; <nl> + # define grpc_byte_buffer_reader_destroy grpc_byte_buffer_reader_destroy_import <nl> + typedef int ( * grpc_byte_buffer_reader_next_type ) ( grpc_byte_buffer_reader * reader , grpc_slice * slice ) ; <nl> + extern grpc_byte_buffer_reader_next_type grpc_byte_buffer_reader_next_import ; <nl> + # define grpc_byte_buffer_reader_next grpc_byte_buffer_reader_next_import <nl> + typedef grpc_slice ( * grpc_byte_buffer_reader_readall_type ) ( grpc_byte_buffer_reader * reader ) ; <nl> + extern grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_import ; <nl> + # define grpc_byte_buffer_reader_readall grpc_byte_buffer_reader_readall_import <nl> + typedef grpc_byte_buffer * ( * grpc_raw_byte_buffer_from_reader_type ) ( grpc_byte_buffer_reader * reader ) ; <nl> + extern grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import ; <nl> + # define grpc_raw_byte_buffer_from_reader grpc_raw_byte_buffer_from_reader_import <nl> typedef int ( * census_initialize_type ) ( int features ) ; <nl> extern census_initialize_type census_initialize_import ; <nl> # define census_initialize census_initialize_import <nl> extern grpc_call_set_credentials_type grpc_call_set_credentials_import ; <nl> typedef void ( * grpc_server_credentials_set_auth_metadata_processor_type ) ( grpc_server_credentials * creds , grpc_auth_metadata_processor processor ) ; <nl> extern grpc_server_credentials_set_auth_metadata_processor_type grpc_server_credentials_set_auth_metadata_processor_import ; <nl> # define grpc_server_credentials_set_auth_metadata_processor grpc_server_credentials_set_auth_metadata_processor_import <nl> - typedef grpc_byte_buffer * ( * grpc_raw_byte_buffer_create_type ) ( grpc_slice * slices , size_t nslices ) ; <nl> - extern grpc_raw_byte_buffer_create_type grpc_raw_byte_buffer_create_import ; <nl> - # define grpc_raw_byte_buffer_create grpc_raw_byte_buffer_create_import <nl> - typedef grpc_byte_buffer * ( * grpc_raw_compressed_byte_buffer_create_type ) ( grpc_slice * slices , size_t nslices , grpc_compression_algorithm compression ) ; <nl> - extern grpc_raw_compressed_byte_buffer_create_type grpc_raw_compressed_byte_buffer_create_import ; <nl> - # define grpc_raw_compressed_byte_buffer_create grpc_raw_compressed_byte_buffer_create_import <nl> - typedef grpc_byte_buffer * ( * grpc_byte_buffer_copy_type ) ( grpc_byte_buffer * bb ) ; <nl> - extern grpc_byte_buffer_copy_type grpc_byte_buffer_copy_import ; <nl> - # define grpc_byte_buffer_copy grpc_byte_buffer_copy_import <nl> - typedef size_t ( * grpc_byte_buffer_length_type ) ( grpc_byte_buffer * bb ) ; <nl> - extern grpc_byte_buffer_length_type grpc_byte_buffer_length_import ; <nl> - # define grpc_byte_buffer_length grpc_byte_buffer_length_import <nl> - typedef void ( * grpc_byte_buffer_destroy_type ) ( grpc_byte_buffer * byte_buffer ) ; <nl> - extern grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import ; <nl> - # define grpc_byte_buffer_destroy grpc_byte_buffer_destroy_import <nl> - typedef int ( * grpc_byte_buffer_reader_init_type ) ( grpc_byte_buffer_reader * reader , grpc_byte_buffer * buffer ) ; <nl> - extern grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import ; <nl> - # define grpc_byte_buffer_reader_init grpc_byte_buffer_reader_init_import <nl> - typedef void ( * grpc_byte_buffer_reader_destroy_type ) ( grpc_byte_buffer_reader * reader ) ; <nl> - extern grpc_byte_buffer_reader_destroy_type grpc_byte_buffer_reader_destroy_import ; <nl> - # define grpc_byte_buffer_reader_destroy grpc_byte_buffer_reader_destroy_import <nl> - typedef int ( * grpc_byte_buffer_reader_next_type ) ( grpc_byte_buffer_reader * reader , grpc_slice * slice ) ; <nl> - extern grpc_byte_buffer_reader_next_type grpc_byte_buffer_reader_next_import ; <nl> - # define grpc_byte_buffer_reader_next grpc_byte_buffer_reader_next_import <nl> - typedef grpc_slice ( * grpc_byte_buffer_reader_readall_type ) ( grpc_byte_buffer_reader * reader ) ; <nl> - extern grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_import ; <nl> - # define grpc_byte_buffer_reader_readall grpc_byte_buffer_reader_readall_import <nl> - typedef grpc_byte_buffer * ( * grpc_raw_byte_buffer_from_reader_type ) ( grpc_byte_buffer_reader * reader ) ; <nl> - extern grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import ; <nl> - # define grpc_raw_byte_buffer_from_reader grpc_raw_byte_buffer_from_reader_import <nl> typedef grpc_slice ( * grpc_slice_ref_type ) ( grpc_slice s ) ; <nl> extern grpc_slice_ref_type grpc_slice_ref_import ; <nl> # define grpc_slice_ref grpc_slice_ref_import <nl> mmm a / test / core / surface / public_headers_must_be_c89 . c <nl> ppp b / test / core / surface / public_headers_must_be_c89 . c <nl> <nl> # include < grpc / grpc_security . h > <nl> # include < grpc / grpc_security_constants . h > <nl> # include < grpc / impl / codegen / atm . h > <nl> - # include < grpc / impl / codegen / byte_buffer . h > <nl> # include < grpc / impl / codegen / byte_buffer_reader . h > <nl> # include < grpc / impl / codegen / compression_types . h > <nl> # include < grpc / impl / codegen / connectivity_state . h > <nl> mmm a / test / cpp / util / byte_buffer_test . cc <nl> ppp b / test / cpp / util / byte_buffer_test . cc <nl> TEST_F ( ByteBufferTest , SerializationMakesCopy ) { <nl> std : : vector < Slice > slices ; <nl> slices . push_back ( Slice ( hello , Slice : : STEAL_REF ) ) ; <nl> slices . push_back ( Slice ( world , Slice : : STEAL_REF ) ) ; <nl> - ByteBuffer send_buffer ; <nl> + grpc_byte_buffer * send_buffer = nullptr ; <nl> bool owned = false ; <nl> ByteBuffer buffer ( & slices [ 0 ] , 2 ) ; <nl> slices . clear ( ) ; <nl> TEST_F ( ByteBufferTest , SerializationMakesCopy ) { <nl> buffer , & send_buffer , & owned ) ; <nl> EXPECT_TRUE ( status . ok ( ) ) ; <nl> EXPECT_TRUE ( owned ) ; <nl> - EXPECT_TRUE ( send_buffer . Valid ( ) ) ; <nl> + EXPECT_TRUE ( send_buffer ! = nullptr ) ; <nl> + grpc_byte_buffer_destroy ( send_buffer ) ; <nl> } <nl> <nl> } / / namespace <nl> mmm a / tools / doxygen / Doxyfile . c + + <nl> ppp b / tools / doxygen / Doxyfile . c + + <nl> include / grpc + + / impl / channel_argument_option . h \ <nl> include / grpc + + / impl / client_unary_call . h \ <nl> include / grpc + + / impl / codegen / async_stream . h \ <nl> include / grpc + + / impl / codegen / async_unary_call . h \ <nl> - include / grpc + + / impl / codegen / byte_buffer . h \ <nl> include / grpc + + / impl / codegen / call . h \ <nl> include / grpc + + / impl / codegen / call_hook . h \ <nl> include / grpc + + / impl / codegen / channel_interface . h \ <nl> include / grpc / impl / codegen / atm . h \ <nl> include / grpc / impl / codegen / atm_gcc_atomic . h \ <nl> include / grpc / impl / codegen / atm_gcc_sync . h \ <nl> include / grpc / impl / codegen / atm_windows . h \ <nl> - include / grpc / impl / codegen / byte_buffer . h \ <nl> include / grpc / impl / codegen / byte_buffer_reader . h \ <nl> include / grpc / impl / codegen / compression_types . h \ <nl> include / grpc / impl / codegen / connectivity_state . h \ <nl> mmm a / tools / doxygen / Doxyfile . c + + . internal <nl> ppp b / tools / doxygen / Doxyfile . c + + . internal <nl> include / grpc + + / impl / channel_argument_option . h \ <nl> include / grpc + + / impl / client_unary_call . h \ <nl> include / grpc + + / impl / codegen / async_stream . h \ <nl> include / grpc + + / impl / codegen / async_unary_call . h \ <nl> - include / grpc + + / impl / codegen / byte_buffer . h \ <nl> include / grpc + + / impl / codegen / call . h \ <nl> include / grpc + + / impl / codegen / call_hook . h \ <nl> include / grpc + + / impl / codegen / channel_interface . h \ <nl> include / grpc / impl / codegen / atm . h \ <nl> include / grpc / impl / codegen / atm_gcc_atomic . h \ <nl> include / grpc / impl / codegen / atm_gcc_sync . h \ <nl> include / grpc / impl / codegen / atm_windows . h \ <nl> - include / grpc / impl / codegen / byte_buffer . h \ <nl> include / grpc / impl / codegen / byte_buffer_reader . h \ <nl> include / grpc / impl / codegen / compression_types . h \ <nl> include / grpc / impl / codegen / connectivity_state . h \ <nl> mmm a / tools / doxygen / Doxyfile . core <nl> ppp b / tools / doxygen / Doxyfile . core <nl> include / grpc / impl / codegen / atm_gcc_sync . h \ <nl> include / grpc / impl / codegen / atm_gcc_sync . h \ <nl> include / grpc / impl / codegen / atm_windows . h \ <nl> include / grpc / impl / codegen / atm_windows . h \ <nl> - include / grpc / impl / codegen / byte_buffer . h \ <nl> include / grpc / impl / codegen / byte_buffer_reader . h \ <nl> include / grpc / impl / codegen / compression_types . h \ <nl> include / grpc / impl / codegen / connectivity_state . h \ <nl> mmm a / tools / doxygen / Doxyfile . core . internal <nl> ppp b / tools / doxygen / Doxyfile . core . internal <nl> include / grpc / impl / codegen / atm_gcc_sync . h \ <nl> include / grpc / impl / codegen / atm_gcc_sync . h \ <nl> include / grpc / impl / codegen / atm_windows . h \ <nl> include / grpc / impl / codegen / atm_windows . h \ <nl> - include / grpc / impl / codegen / byte_buffer . h \ <nl> include / grpc / impl / codegen / byte_buffer_reader . h \ <nl> include / grpc / impl / codegen / compression_types . h \ <nl> include / grpc / impl / codegen / connectivity_state . h \ <nl> mmm a / tools / run_tests / generated / sources_and_headers . json <nl> ppp b / tools / run_tests / generated / sources_and_headers . json <nl> <nl> " gpr_codegen " <nl> ] , <nl> " headers " : [ <nl> - " include / grpc / impl / codegen / byte_buffer . h " , <nl> " include / grpc / impl / codegen / byte_buffer_reader . h " , <nl> " include / grpc / impl / codegen / compression_types . h " , <nl> " include / grpc / impl / codegen / connectivity_state . h " , <nl> <nl> " language " : " c " , <nl> " name " : " grpc_codegen " , <nl> " src " : [ <nl> - " include / grpc / impl / codegen / byte_buffer . h " , <nl> " include / grpc / impl / codegen / byte_buffer_reader . h " , <nl> " include / grpc / impl / codegen / compression_types . h " , <nl> " include / grpc / impl / codegen / connectivity_state . h " , <nl> <nl> " headers " : [ <nl> " include / grpc + + / impl / codegen / async_stream . h " , <nl> " include / grpc + + / impl / codegen / async_unary_call . h " , <nl> - " include / grpc + + / impl / codegen / byte_buffer . h " , <nl> " include / grpc + + / impl / codegen / call . h " , <nl> " include / grpc + + / impl / codegen / call_hook . h " , <nl> " include / grpc + + / impl / codegen / channel_interface . h " , <nl> <nl> " src " : [ <nl> " include / grpc + + / impl / codegen / async_stream . h " , <nl> " include / grpc + + / impl / codegen / async_unary_call . h " , <nl> - " include / grpc + + / impl / codegen / byte_buffer . h " , <nl> " include / grpc + + / impl / codegen / call . h " , <nl> " include / grpc + + / impl / codegen / call_hook . h " , <nl> " include / grpc + + / impl / codegen / channel_interface . h " , <nl> | Revert " Allow SerializationTraits to use grpc : : ByteBuffer rather than only grpc_byte_buffer " | grpc/grpc | 5432dd887430984713f9fc3ad595c85e995d6451 | 2017-09-14T15:55:42Z |
mmm a / include / xgboost / data . h <nl> ppp b / include / xgboost / data . h <nl> class SparsePage { <nl> * \ brief Push row block into the page . <nl> * \ param batch the row batch . <nl> * / <nl> - inline void Push ( const dmlc : : RowBlock < uint32_t > & batch ) { <nl> - auto & data_vec = data . HostVector ( ) ; <nl> - auto & offset_vec = offset . HostVector ( ) ; <nl> - data_vec . reserve ( data . Size ( ) + batch . offset [ batch . size ] - batch . offset [ 0 ] ) ; <nl> - offset_vec . reserve ( offset . Size ( ) + batch . size ) ; <nl> - CHECK ( batch . index ! = nullptr ) ; <nl> - for ( size_t i = 0 ; i < batch . size ; + + i ) { <nl> - offset_vec . push_back ( offset_vec . back ( ) + batch . offset [ i + 1 ] - batch . offset [ i ] ) ; <nl> - } <nl> - for ( size_t i = batch . offset [ 0 ] ; i < batch . offset [ batch . size ] ; + + i ) { <nl> - uint32_t index = batch . index [ i ] ; <nl> - bst_float fvalue = batch . value = = nullptr ? 1 . 0f : batch . value [ i ] ; <nl> - data_vec . emplace_back ( index , fvalue ) ; <nl> - } <nl> - CHECK_EQ ( offset_vec . back ( ) , data . Size ( ) ) ; <nl> - } <nl> + void Push ( const dmlc : : RowBlock < uint32_t > & batch ) ; <nl> / * ! <nl> * \ brief Push a sparse page <nl> * \ param batch the row page <nl> * / <nl> - inline void Push ( const SparsePage & batch ) { <nl> - auto & data_vec = data . HostVector ( ) ; <nl> - auto & offset_vec = offset . HostVector ( ) ; <nl> - const auto & batch_offset_vec = batch . offset . HostVector ( ) ; <nl> - const auto & batch_data_vec = batch . data . HostVector ( ) ; <nl> - size_t top = offset_vec . back ( ) ; <nl> - data_vec . resize ( top + batch . data . Size ( ) ) ; <nl> - std : : memcpy ( dmlc : : BeginPtr ( data_vec ) + top , <nl> - dmlc : : BeginPtr ( batch_data_vec ) , <nl> - sizeof ( Entry ) * batch . data . Size ( ) ) ; <nl> - size_t begin = offset . Size ( ) ; <nl> - offset_vec . resize ( begin + batch . Size ( ) ) ; <nl> - for ( size_t i = 0 ; i < batch . Size ( ) ; + + i ) { <nl> - offset_vec [ i + begin ] = top + batch_offset_vec [ i + 1 ] ; <nl> - } <nl> - } <nl> + void Push ( const SparsePage & batch ) ; <nl> + / * ! <nl> + * \ brief Push a SparsePage stored in CSC format <nl> + * \ param batch The row batch to be pushed <nl> + * / <nl> + void PushCSC ( const SparsePage & batch ) ; <nl> / * ! <nl> * \ brief Push one instance into page <nl> * \ param inst an instance row <nl> mmm a / src / common / group_data . h <nl> ppp b / src / common / group_data . h <nl> namespace common { <nl> * \ tparam ValueType type of entries in the sparse matrix <nl> * \ tparam SizeType type of the index range holder <nl> * / <nl> - template < typename ValueType , typename SizeType = size_t > <nl> + template < typename ValueType , typename SizeType = std : : size_t > <nl> struct ParallelGroupBuilder { <nl> public : <nl> / / parallel group builder of data <nl> struct ParallelGroupBuilder { <nl> * \ param nkeys number of keys in the matrix , can be smaller than expected <nl> * \ param nthread number of thread that will be used in construction <nl> * / <nl> - inline void InitBudget ( size_t nkeys , int nthread ) { <nl> + inline void InitBudget ( std : : size_t nkeys , int nthread ) { <nl> thread_rptr_ . resize ( nthread ) ; <nl> - for ( size_t i = 0 ; i < thread_rptr_ . size ( ) ; + + i ) { <nl> + for ( std : : size_t i = 0 ; i < thread_rptr_ . size ( ) ; + + i ) { <nl> thread_rptr_ [ i ] . resize ( nkeys ) ; <nl> std : : fill ( thread_rptr_ [ i ] . begin ( ) , thread_rptr_ [ i ] . end ( ) , 0 ) ; <nl> } <nl> struct ParallelGroupBuilder { <nl> * \ param threadid the id of thread that calls this function <nl> * \ param nelem number of element budget add to this row <nl> * / <nl> - inline void AddBudget ( size_t key , int threadid , SizeType nelem = 1 ) { <nl> + inline void AddBudget ( std : : size_t key , int threadid , SizeType nelem = 1 ) { <nl> std : : vector < SizeType > & trptr = thread_rptr_ [ threadid ] ; <nl> if ( trptr . size ( ) < key + 1 ) { <nl> trptr . resize ( key + 1 , 0 ) ; <nl> struct ParallelGroupBuilder { <nl> / * ! \ brief step 3 : initialize the necessary storage * / <nl> inline void InitStorage ( ) { <nl> / / set rptr to correct size <nl> - for ( size_t tid = 0 ; tid < thread_rptr_ . size ( ) ; + + tid ) { <nl> + for ( std : : size_t tid = 0 ; tid < thread_rptr_ . size ( ) ; + + tid ) { <nl> if ( rptr_ . size ( ) < = thread_rptr_ [ tid ] . size ( ) ) { <nl> - rptr_ . resize ( thread_rptr_ [ tid ] . size ( ) + 1 ) ; <nl> + rptr_ . resize ( thread_rptr_ [ tid ] . size ( ) + 1 ) ; / / key + 1 <nl> } <nl> } <nl> / / initialize rptr to be beginning of each segment <nl> - size_t start = 0 ; <nl> - for ( size_t i = 0 ; i + 1 < rptr_ . size ( ) ; + + i ) { <nl> - for ( size_t tid = 0 ; tid < thread_rptr_ . size ( ) ; + + tid ) { <nl> + std : : size_t start = 0 ; <nl> + for ( std : : size_t i = 0 ; i + 1 < rptr_ . size ( ) ; + + i ) { <nl> + for ( std : : size_t tid = 0 ; tid < thread_rptr_ . size ( ) ; + + tid ) { <nl> std : : vector < SizeType > & trptr = thread_rptr_ [ tid ] ; <nl> - if ( i < trptr . size ( ) ) { <nl> - size_t ncnt = trptr [ i ] ; <nl> + if ( i < trptr . size ( ) ) { / / i ^ th row is assigned for this thread <nl> + std : : size_t ncnt = trptr [ i ] ; / / how many entries in this row <nl> trptr [ i ] = start ; <nl> start + = ncnt ; <nl> } <nl> } <nl> - rptr_ [ i + 1 ] = start ; <nl> + rptr_ [ i + 1 ] = start ; / / pointer accumulated from all thread <nl> } <nl> data_ . resize ( start ) ; <nl> } <nl> struct ParallelGroupBuilder { <nl> * \ param value The value to be pushed to the group . <nl> * \ param threadid the id of thread that calls this function <nl> * / <nl> - inline void Push ( size_t key , ValueType value , int threadid ) { <nl> + void Push ( std : : size_t key , ValueType value , int threadid ) { <nl> SizeType & rp = thread_rptr_ [ threadid ] [ key ] ; <nl> data_ [ rp + + ] = value ; <nl> } <nl> mmm a / src / common / timer . h <nl> ppp b / src / common / timer . h <nl> struct Monitor { <nl> <nl> LOG ( CONSOLE ) < < " = = = = = = = = Monitor : " < < label < < " = = = = = = = = " ; <nl> for ( auto & kv : statistics_map ) { <nl> + if ( kv . second . count = = 0 ) { <nl> + LOG ( WARNING ) < < <nl> + " Timer for " < < kv . first < < " did not get stopped properly . " ; <nl> + continue ; <nl> + } <nl> LOG ( CONSOLE ) < < kv . first < < " : " < < kv . second . timer . ElapsedSeconds ( ) <nl> < < " s , " < < kv . second . count < < " calls @ " <nl> < < std : : chrono : : duration_cast < std : : chrono : : microseconds > ( <nl> mmm a / src / data / data . cc <nl> ppp b / src / data / data . cc <nl> data : : SparsePageFormat : : DecideFormat ( const std : : string & cache_prefix ) { <nl> } <nl> } <nl> <nl> + void SparsePage : : Push ( const SparsePage & batch ) { <nl> + auto & data_vec = data . HostVector ( ) ; <nl> + auto & offset_vec = offset . HostVector ( ) ; <nl> + const auto & batch_offset_vec = batch . offset . HostVector ( ) ; <nl> + const auto & batch_data_vec = batch . data . HostVector ( ) ; <nl> + size_t top = offset_vec . back ( ) ; <nl> + data_vec . resize ( top + batch . data . Size ( ) ) ; <nl> + std : : memcpy ( dmlc : : BeginPtr ( data_vec ) + top , <nl> + dmlc : : BeginPtr ( batch_data_vec ) , <nl> + sizeof ( Entry ) * batch . data . Size ( ) ) ; <nl> + size_t begin = offset . Size ( ) ; <nl> + offset_vec . resize ( begin + batch . Size ( ) ) ; <nl> + for ( size_t i = 0 ; i < batch . Size ( ) ; + + i ) { <nl> + offset_vec [ i + begin ] = top + batch_offset_vec [ i + 1 ] ; <nl> + } <nl> + } <nl> + <nl> + void SparsePage : : Push ( const dmlc : : RowBlock < uint32_t > & batch ) { <nl> + auto & data_vec = data . HostVector ( ) ; <nl> + auto & offset_vec = offset . HostVector ( ) ; <nl> + data_vec . reserve ( data . Size ( ) + batch . offset [ batch . size ] - batch . offset [ 0 ] ) ; <nl> + offset_vec . reserve ( offset . Size ( ) + batch . size ) ; <nl> + CHECK ( batch . index ! = nullptr ) ; <nl> + for ( size_t i = 0 ; i < batch . size ; + + i ) { <nl> + offset_vec . push_back ( offset_vec . back ( ) + batch . offset [ i + 1 ] - batch . offset [ i ] ) ; <nl> + } <nl> + for ( size_t i = batch . offset [ 0 ] ; i < batch . offset [ batch . size ] ; + + i ) { <nl> + uint32_t index = batch . index [ i ] ; <nl> + bst_float fvalue = batch . value = = nullptr ? 1 . 0f : batch . value [ i ] ; <nl> + data_vec . emplace_back ( index , fvalue ) ; <nl> + } <nl> + CHECK_EQ ( offset_vec . back ( ) , data . Size ( ) ) ; <nl> + } <nl> + <nl> + void SparsePage : : PushCSC ( const SparsePage & batch ) { <nl> + std : : vector < xgboost : : Entry > & self_data = data . HostVector ( ) ; <nl> + std : : vector < size_t > & self_offset = offset . HostVector ( ) ; <nl> + <nl> + auto const & other_data = batch . data . ConstHostVector ( ) ; <nl> + auto const & other_offset = batch . offset . ConstHostVector ( ) ; <nl> + <nl> + if ( other_data . empty ( ) ) { <nl> + return ; <nl> + } <nl> + if ( ! self_data . empty ( ) ) { <nl> + CHECK_EQ ( self_offset . size ( ) , other_offset . size ( ) ) <nl> + < < " self_data . size ( ) : " < < this - > data . Size ( ) < < " , " <nl> + < < " other_data . size ( ) : " < < other_data . size ( ) < < std : : flush ; <nl> + } else { <nl> + self_data = other_data ; <nl> + self_offset = other_offset ; <nl> + return ; <nl> + } <nl> + <nl> + std : : vector < size_t > offset ( other_offset . size ( ) ) ; <nl> + offset [ 0 ] = 0 ; <nl> + <nl> + std : : vector < xgboost : : Entry > data ( self_data . size ( ) + batch . data . Size ( ) ) ; <nl> + <nl> + / / n_cols in original csr data matrix , here in csc is n_rows <nl> + size_t const n_features = other_offset . size ( ) - 1 ; <nl> + size_t beg = 0 ; <nl> + size_t ptr = 1 ; <nl> + for ( size_t i = 0 ; i < n_features ; + + i ) { <nl> + size_t const self_beg = self_offset . at ( i ) ; <nl> + size_t const self_length = self_offset . at ( i + 1 ) - self_beg ; <nl> + CHECK_LT ( beg , data . size ( ) ) ; <nl> + std : : memcpy ( dmlc : : BeginPtr ( data ) + beg , <nl> + dmlc : : BeginPtr ( self_data ) + self_beg , <nl> + sizeof ( Entry ) * self_length ) ; <nl> + beg + = self_length ; <nl> + <nl> + size_t const other_beg = other_offset . at ( i ) ; <nl> + size_t const other_length = other_offset . at ( i + 1 ) - other_beg ; <nl> + CHECK_LT ( beg , data . size ( ) ) ; <nl> + std : : memcpy ( dmlc : : BeginPtr ( data ) + beg , <nl> + dmlc : : BeginPtr ( other_data ) + other_beg , <nl> + sizeof ( Entry ) * other_length ) ; <nl> + beg + = other_length ; <nl> + <nl> + CHECK_LT ( ptr , offset . size ( ) ) ; <nl> + offset . at ( ptr ) = beg ; <nl> + ptr + + ; <nl> + } <nl> + <nl> + self_data = std : : move ( data ) ; <nl> + self_offset = std : : move ( offset ) ; <nl> + } <nl> + <nl> namespace data { <nl> / / List of files that will be force linked in static links . <nl> DMLC_REGISTRY_LINK_TAG ( sparse_page_raw_format ) ; <nl> mmm a / src / data / sparse_page_source . cc <nl> ppp b / src / data / sparse_page_source . cc <nl> bool SparsePageSource : : CacheExist ( const std : : string & cache_info , <nl> } <nl> <nl> void SparsePageSource : : CreateRowPage ( dmlc : : Parser < uint32_t > * src , <nl> - const std : : string & cache_info ) { <nl> + const std : : string & cache_info ) { <nl> const std : : string page_type = " . row . page " ; <nl> std : : vector < std : : string > cache_shards = GetCacheShards ( cache_info ) ; <nl> CHECK_NE ( cache_shards . size ( ) , 0U ) ; <nl> void SparsePageSource : : CreateRowPage ( dmlc : : Parser < uint32_t > * src , <nl> CHECK ( info . qids_ . empty ( ) | | info . qids_ . size ( ) = = info . num_row_ ) ; <nl> info . SaveBinary ( fo . get ( ) ) ; <nl> } <nl> - LOG ( CONSOLE ) < < " SparsePageSource : Finished writing to " < < name_info ; <nl> + LOG ( CONSOLE ) < < " SparsePageSource : : CreateRowPage Finished writing to " <nl> + < < name_info ; <nl> } <nl> <nl> void SparsePageSource : : CreatePageFromDMatrix ( DMatrix * src , <nl> void SparsePageSource : : CreatePageFromDMatrix ( DMatrix * src , <nl> } else if ( page_type = = " . col . page " ) { <nl> page - > Push ( batch . GetTranspose ( src - > Info ( ) . num_col_ ) ) ; <nl> } else if ( page_type = = " . sorted . col . page " ) { <nl> - auto tmp = batch . GetTranspose ( src - > Info ( ) . num_col_ ) ; <nl> - tmp . SortRows ( ) ; <nl> - page - > Push ( tmp ) ; <nl> + SparsePage tmp = batch . GetTranspose ( src - > Info ( ) . num_col_ ) ; <nl> + page - > PushCSC ( tmp ) ; <nl> + page - > SortRows ( ) ; <nl> } else { <nl> LOG ( FATAL ) < < " Unknown page type : " < < page_type ; <nl> } <nl> mmm a / src / tree / updater_basemaker - inl . h <nl> ppp b / src / tree / updater_basemaker - inl . h <nl> class BaseMaker : public TreeUpdater { <nl> for ( bst_uint fid = 0 ; fid < batch . Size ( ) ; + + fid ) { <nl> auto c = batch [ fid ] ; <nl> if ( c . size ( ) ! = 0 ) { <nl> + CHECK_LT ( fid * 2 , fminmax_ . size ( ) ) ; <nl> fminmax_ [ fid * 2 + 0 ] = <nl> std : : max ( - c [ 0 ] . fvalue , fminmax_ [ fid * 2 + 0 ] ) ; <nl> fminmax_ [ fid * 2 + 1 ] = <nl> new file mode 100644 <nl> index 0000000000 . . 7baea3a39e <nl> mmm / dev / null <nl> ppp b / tests / cpp / data / test_data . cc <nl> <nl> + # include < gtest / gtest . h > <nl> + # include < vector > <nl> + <nl> + # include " xgboost / data . h " <nl> + <nl> + namespace xgboost { <nl> + TEST ( SparsePage , PushCSC ) { <nl> + std : : vector < size_t > offset { 0 } ; <nl> + std : : vector < Entry > data ; <nl> + SparsePage page ; <nl> + page . offset . HostVector ( ) = offset ; <nl> + page . data . HostVector ( ) = data ; <nl> + <nl> + offset = { 0 , 1 , 4 } ; <nl> + for ( size_t i = 0 ; i < offset . back ( ) ; + + i ) { <nl> + data . push_back ( Entry ( i , 0 . 1f ) ) ; <nl> + } <nl> + <nl> + SparsePage other ; <nl> + other . offset . HostVector ( ) = offset ; <nl> + other . data . HostVector ( ) = data ; <nl> + <nl> + page . PushCSC ( other ) ; <nl> + <nl> + ASSERT_EQ ( page . offset . HostVector ( ) . size ( ) , offset . size ( ) ) ; <nl> + ASSERT_EQ ( page . data . HostVector ( ) . size ( ) , data . size ( ) ) ; <nl> + for ( size_t i = 0 ; i < offset . size ( ) ; + + i ) { <nl> + ASSERT_EQ ( page . offset . HostVector ( ) [ i ] , offset [ i ] ) ; <nl> + } <nl> + for ( size_t i = 0 ; i < data . size ( ) ; + + i ) { <nl> + ASSERT_EQ ( page . data . HostVector ( ) [ i ] . index , data [ i ] . index ) ; <nl> + } <nl> + <nl> + page . PushCSC ( other ) ; <nl> + ASSERT_EQ ( page . offset . HostVector ( ) . size ( ) , offset . size ( ) ) ; <nl> + ASSERT_EQ ( page . data . Size ( ) , data . size ( ) * 2 ) ; <nl> + <nl> + for ( size_t i = 0 ; i < offset . size ( ) ; + + i ) { <nl> + ASSERT_EQ ( page . offset . HostVector ( ) [ i ] , offset [ i ] * 2 ) ; <nl> + } <nl> + <nl> + auto inst = page [ 0 ] ; <nl> + ASSERT_EQ ( inst . size ( ) , 2 ) ; <nl> + for ( auto entry : inst ) { <nl> + ASSERT_EQ ( entry . index , 0 ) ; <nl> + } <nl> + <nl> + inst = page [ 1 ] ; <nl> + ASSERT_EQ ( inst . size ( ) , 6 ) ; <nl> + std : : vector < size_t > indices_sol { 1 , 2 , 3 } ; <nl> + for ( size_t i = 0 ; i < inst . size ( ) ; + + i ) { <nl> + ASSERT_EQ ( inst [ i ] . index , indices_sol [ i % 3 ] ) ; <nl> + } <nl> + } <nl> + } <nl> mmm a / tests / cpp / test_learner . cc <nl> ppp b / tests / cpp / test_learner . cc <nl> <nl> # include < vector > <nl> # include " helpers . h " <nl> # include " xgboost / learner . h " <nl> + # include " dmlc / filesystem . h " <nl> <nl> namespace xgboost { <nl> <nl> TEST ( Learner , CheckGroup ) { <nl> delete pp_mat ; <nl> } <nl> <nl> + TEST ( Learner , SLOW_CheckMultiBatch ) { <nl> + using Arg = std : : pair < std : : string , std : : string > ; <nl> + / / Create sufficiently large data to make two row pages <nl> + dmlc : : TemporaryDirectory tempdir ; <nl> + const std : : string tmp_file = tempdir . path + " / big . libsvm " ; <nl> + CreateBigTestData ( tmp_file , 5000000 ) ; <nl> + std : : shared_ptr < DMatrix > dmat ( xgboost : : DMatrix : : Load ( tmp_file + " # " + tmp_file + " . cache " , true , false ) ) ; <nl> + EXPECT_TRUE ( FileExists ( tmp_file + " . cache . row . page " ) ) ; <nl> + EXPECT_FALSE ( dmat - > SingleColBlock ( ) ) ; <nl> + size_t num_row = dmat - > Info ( ) . num_row_ ; <nl> + std : : vector < bst_float > labels ( num_row ) ; <nl> + for ( size_t i = 0 ; i < num_row ; + + i ) { <nl> + labels [ i ] = i % 2 ; <nl> + } <nl> + dmat - > Info ( ) . SetInfo ( " label " , labels . data ( ) , DataType : : kFloat32 , num_row ) ; <nl> + std : : vector < std : : shared_ptr < DMatrix > > mat { dmat } ; <nl> + auto learner = std : : unique_ptr < Learner > ( Learner : : Create ( mat ) ) ; <nl> + learner - > Configure ( { Arg { " objective " , " binary : logistic " } } ) ; <nl> + learner - > InitModel ( ) ; <nl> + learner - > UpdateOneIter ( 0 , dmat . get ( ) ) ; <nl> + } <nl> + <nl> } / / namespace xgboost <nl> | Add PushCSC for SparsePage . ( ) | dmlc/xgboost | 7ea567567933a4aa11348495241da272246bb4ca | 2019-03-01T17:58:08Z |
mmm a / android / android - jni / Makefile <nl> ppp b / android / android - jni / Makefile <nl> <nl> # The path to the NDK , requires crystax version r - 4 for now , due to support <nl> - # for the standard library <nl> + # for the standard library <nl> <nl> - # load environment from local make file <nl> + # load environment from local make file <nl> LOCAL_ENV_MK = local . env . mk <nl> ifneq " $ ( wildcard $ ( LOCAL_ENV_MK ) ) " " " <nl> include $ ( LOCAL_ENV_MK ) <nl> else <nl> $ ( shell cp sample . $ ( LOCAL_ENV_MK ) $ ( LOCAL_ENV_MK ) ) <nl> $ ( info ERROR local environement not setup ! try : ) <nl> $ ( info gedit $ ( LOCAL_ENV_MK ) ) <nl> - $ ( info Please setup the $ ( LOCAL_ENV_MK ) - the default was just created ' ) <nl> - include $ ( LOCAL_ENV_MK ) <nl> + $ ( error Please setup the $ ( LOCAL_ENV_MK ) - the default was just created ' ) <nl> endif <nl> <nl> ANDROID_NDK_BASE = $ ( ANDROID_NDK_ROOT ) <nl> mmm a / android / android - jni / jni / Calibration . cpp <nl> ppp b / android / android - jni / jni / Calibration . cpp <nl> bool runCalibration ( vector < vector < Point2f > > imagePoints , <nl> if ( flags & CV_CALIB_FIX_ASPECT_RATIO ) <nl> cameraMatrix . at < double > ( 0 , 0 ) = aspectRatio ; <nl> <nl> - distCoeffs = Mat : : zeros ( 5 , 1 , CV_64F ) ; <nl> + distCoeffs = Mat : : zeros ( 4 , 1 , CV_64F ) ; <nl> <nl> vector < vector < Point3f > > objectPoints ( 1 ) ; <nl> calcChessboardCorners ( boardSize , squareSize , objectPoints [ 0 ] ) ; <nl> new file mode 100755 <nl> index 00000000000 . . 0a1b6bd3088 <nl> mmm / dev / null <nl> ppp b / android / android - jni / project_create . sh <nl> <nl> + # ! / bin / sh <nl> + # this generates an ant based cli build of the android - jni project <nl> + android update project - - name android - opencv \ <nl> + - - path . <nl> | working on cleaning up the android port - trying to fix the distortion coefficients bug | opencv/opencv | 525da9ef019d53a496854681b821885f02e1b3de | 2010-10-12T02:23:08Z |
mmm a / caffe2 / python / cnn . py <nl> ppp b / caffe2 / python / cnn . py <nl> def FC_Sparse ( self , * args , * * kwargs ) : <nl> def Dropout ( self , * args , * * kwargs ) : <nl> return model_helpers . Dropout ( self , * args , * * kwargs ) <nl> <nl> - def LRN ( self , blob_in , blob_out , * * kwargs ) : <nl> - " " " LRN " " " <nl> - return self . net . LRN ( <nl> - blob_in , <nl> - [ blob_out , " _ " + blob_out + " _scale " ] , <nl> - order = self . order , <nl> - * * kwargs <nl> - ) [ 0 ] <nl> + def LRN ( self , * args , * * kwargs ) : <nl> + return model_helpers . LRN ( self , * args , * * kwargs ) <nl> + <nl> + def Softmax ( self , * args , * * kwargs ) : <nl> + return model_helpers . Softmax ( self , * args , use_cudnn = self . use_cudnn , <nl> + * * kwargs ) <nl> + <nl> + def SpatialBN ( self , * args , * * kwargs ) : <nl> + return model_helpers . SpatialBN ( self , * args , order = self . order , * * kwargs ) <nl> + <nl> + def InstanceNorm ( self , * args , * * kwargs ) : <nl> + return model_helpers . InstanceNorm ( self , * args , order = self . order , <nl> + * * kwargs ) <nl> + <nl> <nl> def MaxPool ( self , * args , * * kwargs ) : <nl> return model_helpers . MaxPool ( self , * args , use_cudnn = self . use_cudnn , <nl> def Transpose ( self , blob_in , blob_out , * * kwargs ) : <nl> kwargs [ ' engine ' ] = ' CUDNN ' <nl> return self . net . Transpose ( blob_in , blob_out , * * kwargs ) <nl> <nl> - def Softmax ( self , blob_in , blob_out = None , * * kwargs ) : <nl> - " " " Softmax . " " " <nl> - if self . use_cudnn : <nl> - kwargs [ ' engine ' ] = ' CUDNN ' <nl> - if blob_out is not None : <nl> - return self . net . Softmax ( blob_in , blob_out , * * kwargs ) <nl> - else : <nl> - return self . net . Softmax ( blob_in , * * kwargs ) <nl> - <nl> def Sum ( self , blob_in , blob_out , * * kwargs ) : <nl> " " " Sum " " " <nl> return self . net . Sum ( blob_in , blob_out , * * kwargs ) <nl> <nl> - def InstanceNorm ( self , blob_in , blob_out , dim_in , * * kwargs ) : <nl> - blob_out = blob_out or self . net . NextName ( ) <nl> - # Input : input , scale , bias <nl> - # Output : output , saved_mean , saved_inv_std <nl> - # scale : initialize with ones <nl> - # bias : initialize with zeros <nl> - <nl> - def init_blob ( value , suffix ) : <nl> - return self . param_init_net . ConstantFill ( <nl> - [ ] , blob_out + " _ " + suffix , shape = [ dim_in ] , value = value ) <nl> - scale , bias = init_blob ( 1 . 0 , " s " ) , init_blob ( 0 . 0 , " b " ) <nl> - <nl> - self . params . extend ( [ scale , bias ] ) <nl> - self . weights . append ( scale ) <nl> - self . biases . append ( bias ) <nl> - blob_outs = [ blob_out , blob_out + " _sm " , blob_out + " _siv " ] <nl> - if ' is_test ' in kwargs and kwargs [ ' is_test ' ] : <nl> - blob_outputs = self . net . InstanceNorm ( <nl> - [ blob_in , scale , bias ] , [ blob_out ] , <nl> - order = self . order , * * kwargs ) <nl> - return blob_outputs <nl> - else : <nl> - blob_outputs = self . net . InstanceNorm ( <nl> - [ blob_in , scale , bias ] , blob_outs , <nl> - order = self . order , * * kwargs ) <nl> - # Return the output <nl> - return blob_outputs [ 0 ] <nl> - <nl> - def SpatialBN ( self , blob_in , blob_out , dim_in , * * kwargs ) : <nl> - blob_out = blob_out or self . net . NextName ( ) <nl> - # Input : input , scale , bias , est_mean , est_inv_var <nl> - # Output : output , running_mean , running_inv_var , saved_mean , <nl> - # saved_inv_var <nl> - # scale : initialize with ones <nl> - # bias : initialize with zeros <nl> - # est mean : zero <nl> - # est var : ones <nl> - <nl> - def init_blob ( value , suffix ) : <nl> - return self . param_init_net . ConstantFill ( <nl> - [ ] , blob_out + " _ " + suffix , shape = [ dim_in ] , value = value ) <nl> - <nl> - if self . init_params : <nl> - scale , bias = init_blob ( 1 . 0 , " s " ) , init_blob ( 0 . 0 , " b " ) <nl> - running_mean = init_blob ( 0 . 0 , " rm " ) <nl> - running_inv_var = init_blob ( 1 . 0 , " riv " ) <nl> - else : <nl> - scale = core . ScopedBlobReference ( <nl> - blob_out + ' _s ' , self . param_init_net ) <nl> - bias = core . ScopedBlobReference ( <nl> - blob_out + ' _b ' , self . param_init_net ) <nl> - running_mean = core . ScopedBlobReference ( <nl> - blob_out + ' _rm ' , self . param_init_net ) <nl> - running_inv_var = core . ScopedBlobReference ( <nl> - blob_out + ' _riv ' , self . param_init_net ) <nl> - <nl> - self . params . extend ( [ scale , bias ] ) <nl> - self . computed_params . extend ( [ running_mean , running_inv_var ] ) <nl> - self . weights . append ( scale ) <nl> - self . biases . append ( bias ) <nl> - blob_outs = [ blob_out , running_mean , running_inv_var , <nl> - blob_out + " _sm " , blob_out + " _siv " ] <nl> - if ' is_test ' in kwargs and kwargs [ ' is_test ' ] : <nl> - blob_outputs = self . net . SpatialBN ( <nl> - [ blob_in , scale , bias , blob_outs [ 1 ] , blob_outs [ 2 ] ] , [ blob_out ] , <nl> - order = self . order , * * kwargs ) <nl> - return blob_outputs <nl> - else : <nl> - blob_outputs = self . net . SpatialBN ( <nl> - [ blob_in , scale , bias , blob_outs [ 1 ] , blob_outs [ 2 ] ] , blob_outs , <nl> - order = self . order , * * kwargs ) <nl> - # Return the output <nl> - return blob_outputs [ 0 ] <nl> - <nl> def Iter ( self , blob_out , * * kwargs ) : <nl> if ' device_option ' in kwargs : <nl> del kwargs [ ' device_option ' ] <nl> new file mode 100644 <nl> index 000000000000 . . 7596b43cb619 <nl> mmm / dev / null <nl> ppp b / caffe2 / python / helpers / normalization . py <nl> <nl> + # # @ package normalization <nl> + # Module caffe2 . python . helpers . normalization <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + from __future__ import unicode_literals <nl> + <nl> + from caffe2 . python import core <nl> + <nl> + <nl> + def LRN ( model , blob_in , blob_out , * * kwargs ) : <nl> + " " " LRN " " " <nl> + return model . net . LRN ( <nl> + blob_in , <nl> + [ blob_out , " _ " + blob_out + " _scale " ] , <nl> + order = model . order , <nl> + * * kwargs <nl> + ) [ 0 ] <nl> + <nl> + <nl> + def Softmax ( model , blob_in , blob_out = None , use_cudnn = False , * * kwargs ) : <nl> + " " " Softmax . " " " <nl> + if use_cudnn : <nl> + kwargs [ ' engine ' ] = ' CUDNN ' <nl> + if blob_out is not None : <nl> + return model . net . Softmax ( blob_in , blob_out , * * kwargs ) <nl> + else : <nl> + return model . net . Softmax ( blob_in , * * kwargs ) <nl> + <nl> + <nl> + def InstanceNorm ( model , blob_in , blob_out , dim_in , order = " NCHW " , * * kwargs ) : <nl> + blob_out = blob_out or model . net . NextName ( ) <nl> + # Input : input , scale , bias <nl> + # Output : output , saved_mean , saved_inv_std <nl> + # scale : initialize with ones <nl> + # bias : initialize with zeros <nl> + <nl> + def init_blob ( value , suffix ) : <nl> + return model . param_init_net . ConstantFill ( <nl> + [ ] , blob_out + " _ " + suffix , shape = [ dim_in ] , value = value ) <nl> + scale , bias = init_blob ( 1 . 0 , " s " ) , init_blob ( 0 . 0 , " b " ) <nl> + <nl> + model . params . extend ( [ scale , bias ] ) <nl> + model . weights . append ( scale ) <nl> + model . biases . append ( bias ) <nl> + blob_outs = [ blob_out , blob_out + " _sm " , blob_out + " _siv " ] <nl> + if ' is_test ' in kwargs and kwargs [ ' is_test ' ] : <nl> + blob_outputs = model . net . InstanceNorm ( <nl> + [ blob_in , scale , bias ] , [ blob_out ] , <nl> + order = order , * * kwargs ) <nl> + return blob_outputs <nl> + else : <nl> + blob_outputs = model . net . InstanceNorm ( <nl> + [ blob_in , scale , bias ] , blob_outs , <nl> + order = order , * * kwargs ) <nl> + # Return the output <nl> + return blob_outputs [ 0 ] <nl> + <nl> + <nl> + def SpatialBN ( model , blob_in , blob_out , dim_in , order = " NCHW " , * * kwargs ) : <nl> + blob_out = blob_out or model . net . NextName ( ) <nl> + # Input : input , scale , bias , est_mean , est_inv_var <nl> + # Output : output , running_mean , running_inv_var , saved_mean , <nl> + # saved_inv_var <nl> + # scale : initialize with ones <nl> + # bias : initialize with zeros <nl> + # est mean : zero <nl> + # est var : ones <nl> + <nl> + def init_blob ( value , suffix ) : <nl> + return model . param_init_net . ConstantFill ( <nl> + [ ] , blob_out + " _ " + suffix , shape = [ dim_in ] , value = value ) <nl> + <nl> + if model . init_params : <nl> + scale , bias = init_blob ( 1 . 0 , " s " ) , init_blob ( 0 . 0 , " b " ) <nl> + running_mean = init_blob ( 0 . 0 , " rm " ) <nl> + running_inv_var = init_blob ( 1 . 0 , " riv " ) <nl> + else : <nl> + scale = core . ScopedBlobReference ( <nl> + blob_out + ' _s ' , model . param_init_net ) <nl> + bias = core . ScopedBlobReference ( <nl> + blob_out + ' _b ' , model . param_init_net ) <nl> + running_mean = core . ScopedBlobReference ( <nl> + blob_out + ' _rm ' , model . param_init_net ) <nl> + running_inv_var = core . ScopedBlobReference ( <nl> + blob_out + ' _riv ' , model . param_init_net ) <nl> + <nl> + model . params . extend ( [ scale , bias ] ) <nl> + model . computed_params . extend ( [ running_mean , running_inv_var ] ) <nl> + model . weights . append ( scale ) <nl> + model . biases . append ( bias ) <nl> + blob_outs = [ blob_out , running_mean , running_inv_var , <nl> + blob_out + " _sm " , blob_out + " _siv " ] <nl> + if ' is_test ' in kwargs and kwargs [ ' is_test ' ] : <nl> + blob_outputs = model . net . SpatialBN ( <nl> + [ blob_in , scale , bias , blob_outs [ 1 ] , blob_outs [ 2 ] ] , [ blob_out ] , <nl> + order = order , * * kwargs ) <nl> + return blob_outputs <nl> + else : <nl> + blob_outputs = model . net . SpatialBN ( <nl> + [ blob_in , scale , bias , blob_outs [ 1 ] , blob_outs [ 2 ] ] , blob_outs , <nl> + order = order , * * kwargs ) <nl> + # Return the output <nl> + return blob_outputs [ 0 ] <nl> mmm a / caffe2 / python / model_helpers . py <nl> ppp b / caffe2 / python / model_helpers . py <nl> <nl> from caffe2 . python . helpers . dropout import * <nl> from caffe2 . python . helpers . fc import * <nl> from caffe2 . python . helpers . pooling import * <nl> + from caffe2 . python . helpers . normalization import * <nl> | normalization helpers | pytorch/pytorch | 3623c241c481c27dff4f566b60ef23924ac7a24d | 2017-04-17T22:03:04Z |
new file mode 100644 <nl> index 000000000000 . . cdfb22053b6b <nl> mmm / dev / null <nl> ppp b / test / Frontend / rdar13723332 . swift <nl> <nl> + / / RUN : % swift - emit - sil 2 > & 1 | FileCheck % s <nl> + / / CHECK : swift : no inputs specified <nl> | Don ' t crash if no input filename was specified . | apple/swift | 7f2c15271945c4bc6e9a8326444eb31186694717 | 2013-04-30T00:01:37Z |
mmm a / tensorflow / python / platform / default / _gfile . py <nl> ppp b / tensorflow / python / platform / default / _gfile . py <nl> def Stat ( path ) : # pylint : disable = invalid - name <nl> filestat = collections . namedtuple ( ' FileStat ' , [ ' mtime ' ] ) <nl> filestat . mtime = statinfo . st_mtime <nl> return filestat <nl> + <nl> + <nl> + def Copy ( oldpath , newpath , overwrite = False ) : <nl> + " " " Copy a file . <nl> + <nl> + Args : <nl> + oldpath : string ; a pathname of a file . <nl> + newpath : string ; a pathname to which the file will be copied . <nl> + overwrite : boolean ; if false , it is an error for newpath to be <nl> + occupied by an existing file . <nl> + <nl> + Raises : <nl> + OSError : If " newpath " is occupied by an existing file and overwrite = False , <nl> + or any error thrown by shutil . copy . <nl> + " " " <nl> + if not overwrite and Exists ( newpath ) : <nl> + raise OSError ( errno . EEXIST , os . strerror ( errno . EEXIST ) , newpath ) <nl> + shutil . copy ( oldpath , newpath ) <nl> mmm a / tensorflow / python / platform / default / gfile_test . py <nl> ppp b / tensorflow / python / platform / default / gfile_test . py <nl> def testRename ( self ) : <nl> gfile . Rename ( self . tmp + " dir2 " , self . tmp + " newdir " ) <nl> self . assertTrue ( gfile . Exists ( self . tmp + " newdir " ) ) <nl> <nl> + def testCopy ( self ) : <nl> + gfile . MkDir ( self . tmp + " dir1 " ) <nl> + gfile . MkDir ( self . tmp + " dir2 " ) <nl> + with gfile . GFile ( self . tmp + " dir1 / file1 " , " w " ) : <nl> + pass # Create file <nl> + with gfile . GFile ( self . tmp + " dir2 / file2 " , " w " ) : <nl> + pass # Create file <nl> + <nl> + # Dest file already exists , overwrite = False ( default ) . <nl> + self . assertRaises ( <nl> + OSError , lambda : gfile . Copy ( self . tmp + " dir1 / file1 " , <nl> + self . tmp + " dir2 / file2 " ) ) <nl> + # Overwrite succeeds <nl> + gfile . Copy ( self . tmp + " dir1 / file1 " , self . tmp + " dir2 / file2 " , <nl> + overwrite = True ) <nl> + self . assertTrue ( gfile . Exists ( self . tmp + " dir2 / file2 " ) ) <nl> + <nl> + # Normal copy . <nl> + gfile . Rename ( self . tmp + " dir1 / file1 " , self . tmp + " dir2 / file1 " ) <nl> + self . assertTrue ( gfile . Exists ( self . tmp + " dir2 / file1 " ) ) <nl> + <nl> + # Normal copy to non - existent dir <nl> + self . assertRaises ( OSError , <nl> + lambda : gfile . Rename ( self . tmp + " dir1 / file1 " , <nl> + self . tmp + " newdir / file1 " ) ) <nl> + <nl> + <nl> if __name__ = = " __main__ " : <nl> googletest . main ( ) <nl> | TensorFlow : add gfile . Copy implementation and test . Fixes . | tensorflow/tensorflow | 0ee27e38afb2682dc7a320397c5d29f43ea7f354 | 2016-02-16T00:58:56Z |
mmm a / scalarvssimd / Makefile <nl> ppp b / scalarvssimd / Makefile <nl> bench : benchmarks / bench . cpp rapidjson / license . txt $ ( HEADERS ) <nl> $ ( CXX ) - std = c + + 11 - O3 - o $ @ benchmarks / bench . cpp - Irapidjson / include - Iinclude - march = native - lm - Wall - Wextra <nl> <nl> rapidjson / license . txt : <nl> - git submodule foreach git pull origin master <nl> - <nl> + git submodule update - - init - - recursive <nl> <nl> clean : <nl> rm - f bench <nl> | Fixed dependency . | simdjson/simdjson | bc85248978c0d9e84f97333e298f7fb77492e375 | 2018-04-27T00:28:02Z |
mmm a / src / btree / concurrent_traversal . cc <nl> ppp b / src / btree / concurrent_traversal . cc <nl> class concurrent_traversal_adapter_t : public depth_first_traversal_callback_t { <nl> sink_waiters_ ( 0 ) , <nl> cb_ ( cb ) { } <nl> <nl> - void handle_pair_coro ( dft_value_t * fragile_keyvalue , <nl> + void handle_pair_coro ( scoped_key_value_t * fragile_keyvalue , <nl> adjustable_semaphore_acq_t * fragile_acq , <nl> fifo_enforcer_write_token_t token , <nl> auto_drainer_t : : lock_t ) { <nl> / / This is called by coro_t : : spawn_now_dangerously . We need to get these <nl> / / values before the caller ' s stack frame is destroyed . <nl> - dft_value_t keyvalue = std : : move ( * fragile_keyvalue ) ; <nl> + scoped_key_value_t keyvalue = std : : move ( * fragile_keyvalue ) ; <nl> <nl> adjustable_semaphore_acq_t semaphore_acq ( std : : move ( * fragile_acq ) ) ; <nl> <nl> class concurrent_traversal_adapter_t : public depth_first_traversal_callback_t { <nl> } <nl> } <nl> <nl> - virtual bool handle_pair ( dft_value_t & & keyvalue ) { <nl> + virtual bool handle_pair ( scoped_key_value_t & & keyvalue ) { <nl> / / First thing first : Get in line with the token enforcer . <nl> <nl> fifo_enforcer_write_token_t token = source_ . enter_write ( ) ; <nl> mmm a / src / btree / concurrent_traversal . hpp <nl> ppp b / src / btree / concurrent_traversal . hpp <nl> class concurrent_traversal_callback_t { <nl> / / can enters at a time . ( This should happen after loading the value from disk <nl> / / ( which should be done concurrently ) and before using ql : : env_t to evaluate <nl> / / transforms and terminals , or whatever non - reentrant behavior you have in mind . ) <nl> - virtual bool handle_pair ( dft_value_t & & keyvalue , <nl> + virtual bool handle_pair ( scoped_key_value_t & & keyvalue , <nl> concurrent_traversal_waiter_t waiter ) <nl> THROWS_ONLY ( interrupted_exc_t ) = 0 ; <nl> <nl> mmm a / src / btree / depth_first_traversal . cc <nl> ppp b / src / btree / depth_first_traversal . cc <nl> bool btree_depth_first_traversal ( btree_slice_t * slice , transaction_t * transactio <nl> btree_key_cmp ( key , range . right . key . btree_key ( ) ) > = 0 ) { <nl> break ; <nl> } <nl> - if ( ! cb - > handle_pair ( dft_value_t ( key , ( * it ) . second , <nl> - movable_t < counted_buf_lock_t > ( block ) ) ) ) { <nl> + if ( ! cb - > handle_pair ( scoped_key_value_t ( key , ( * it ) . second , <nl> + movable_t < counted_buf_lock_t > ( block ) ) ) ) { <nl> return false ; <nl> } <nl> } <nl> bool btree_depth_first_traversal ( btree_slice_t * slice , transaction_t * transactio <nl> break ; <nl> } <nl> <nl> - if ( ! cb - > handle_pair ( dft_value_t ( key , ( * it ) . second , <nl> - movable_t < counted_buf_lock_t > ( block ) ) ) ) { <nl> + if ( ! cb - > handle_pair ( scoped_key_value_t ( key , ( * it ) . second , <nl> + movable_t < counted_buf_lock_t > ( block ) ) ) ) { <nl> return false ; <nl> } <nl> } <nl> mmm a / src / btree / depth_first_traversal . hpp <nl> ppp b / src / btree / depth_first_traversal . hpp <nl> class counted_buf_lock_t : public buf_lock_t , <nl> counted_buf_lock_t ( Args & & . . . args ) : buf_lock_t ( std : : forward < Args > ( args ) . . . ) { } <nl> } ; <nl> <nl> - struct dft_value_t { <nl> - dft_value_t ( const btree_key_t * key , <nl> - const void * value , <nl> - movable_t < counted_buf_lock_t > & & buf ) <nl> + class scoped_key_value_t { <nl> + public : <nl> + scoped_key_value_t ( const btree_key_t * key , <nl> + const void * value , <nl> + movable_t < counted_buf_lock_t > & & buf ) <nl> : key_ ( key ) , value_ ( value ) , buf_ ( std : : move ( buf ) ) { <nl> guarantee ( buf_ . has ( ) ) ; <nl> } <nl> <nl> - public : <nl> const btree_key_t * key ( ) const { <nl> guarantee ( buf_ . has ( ) ) ; <nl> return key_ ; <nl> struct dft_value_t { <nl> <nl> / / Releases the hold on the buf_lock_t , after which key ( ) and value ( ) may not be <nl> / / used . <nl> - void release_keepalive ( ) { buf_ . reset ( ) ; } <nl> + void reset ( ) { buf_ . reset ( ) ; } <nl> <nl> private : <nl> const btree_key_t * key_ ; <nl> class depth_first_traversal_callback_t { <nl> public : <nl> / * Return value of ` true ` indicates to keep going ; ` false ` indicates to stop <nl> traversing the tree . * / <nl> - virtual bool handle_pair ( dft_value_t & & value ) = 0 ; <nl> + virtual bool handle_pair ( scoped_key_value_t & & keyvalue ) = 0 ; <nl> protected : <nl> virtual ~ depth_first_traversal_callback_t ( ) { } <nl> } ; <nl> mmm a / src / memcached / memcached_btree / rget . cc <nl> ppp b / src / memcached / memcached_btree / rget . cc <nl> class rget_depth_first_traversal_callback_t : public depth_first_traversal_callb <nl> public : <nl> rget_depth_first_traversal_callback_t ( transaction_t * txn , int max , exptime_t et ) : <nl> transaction ( txn ) , maximum ( max ) , effective_time ( et ) , cumulative_size ( 0 ) { } <nl> - bool handle_pair ( dft_value_t & & keyvalue ) { <nl> - const btree_key_t * key = keyvalue . key ( ) ; <nl> - const void * value = keyvalue . value ( ) ; <nl> - <nl> - const memcached_value_t * mc_value = reinterpret_cast < const memcached_value_t * > ( value ) ; <nl> + bool handle_pair ( scoped_key_value_t & & keyvalue ) { <nl> + const memcached_value_t * mc_value <nl> + = static_cast < const memcached_value_t * > ( keyvalue . value ( ) ) ; <nl> if ( mc_value - > expired ( effective_time ) ) { <nl> return true ; <nl> } <nl> counted_t < data_buffer_t > data ( value_to_data_buffer ( mc_value , transaction ) ) ; <nl> - result . pairs . push_back ( key_with_data_buffer_t ( store_key_t ( key ) , mc_value - > mcflags ( ) , data ) ) ; <nl> + result . pairs . push_back ( key_with_data_buffer_t ( store_key_t ( keyvalue . key ( ) ) , <nl> + mc_value - > mcflags ( ) , <nl> + data ) ) ; <nl> cumulative_size + = estimate_rget_result_pair_size ( result . pairs . back ( ) ) ; <nl> return static_cast < int64_t > ( result . pairs . size ( ) ) < maximum & & cumulative_size < rget_max_chunk_size ; <nl> } <nl> mmm a / src / rdb_protocol / btree . cc <nl> ppp b / src / rdb_protocol / btree . cc <nl> class rdb_rget_depth_first_traversal_callback_t : public concurrent_traversal_ca <nl> <nl> / / RSI : Ask whether interrupted_exc_t is the only exception that could possibly <nl> / / be thrown here . <nl> - bool handle_pair ( dft_value_t & & keyvalue , <nl> + bool handle_pair ( scoped_key_value_t & & keyvalue , <nl> concurrent_traversal_waiter_t waiter ) THROWS_ONLY ( interrupted_exc_t ) { <nl> store_key_t store_key ( keyvalue . key ( ) ) ; <nl> if ( bad_init ) { <nl> class rdb_rget_depth_first_traversal_callback_t : public concurrent_traversal_ca <nl> <nl> const rdb_value_t * rdb_value = reinterpret_cast < const rdb_value_t * > ( keyvalue . value ( ) ) ; <nl> boost : : shared_ptr < scoped_cJSON_t > first_value = get_data ( rdb_value , transaction ) ; <nl> - keyvalue . release_keepalive ( ) ; <nl> + <nl> + keyvalue . reset ( ) ; <nl> <nl> waiter . wait_interruptible ( ) ; <nl> <nl> | Renamed dft_value_t to scoped_key_value_t . | rethinkdb/rethinkdb | 1b836b1bf82d07a4ac1f3f4c3644fc29cd9ed27a | 2013-08-29T06:29:55Z |
mmm a / core / math / rect2 . h <nl> ppp b / core / math / rect2 . h <nl> struct Rect2 { <nl> inline bool encloses ( const Rect2 & p_rect ) const { <nl> <nl> return ( p_rect . position . x > = position . x ) & & ( p_rect . position . y > = position . y ) & & <nl> - ( ( p_rect . position . x + p_rect . size . x ) < ( position . x + size . x ) ) & & <nl> - ( ( p_rect . position . y + p_rect . size . y ) < ( position . y + size . y ) ) ; <nl> + ( ( p_rect . position . x + p_rect . size . x ) < = ( position . x + size . x ) ) & & <nl> + ( ( p_rect . position . y + p_rect . size . y ) < = ( position . y + size . y ) ) ; <nl> } <nl> <nl> _FORCE_INLINE_ bool has_no_area ( ) const { <nl> | Merge pull request from AlexHolly / fix - rect2 - encloses | godotengine/godot | 106e21fd5f85a861a692db229ee69a6ec31fe562 | 2019-10-08T14:21:45Z |
mmm a / RELEASES . md <nl> ppp b / RELEASES . md <nl> <nl> - Version 0 . 7 . 7 ( 2020 - 07 - 17 ) <nl> + Version 0 . 7 . 7 ( 2020 - 07 - 20 ) <nl> = = = = = = = = = = = = = = = = = = = = = = = = <nl> * White panda is no longer supported , upgrade to comma two or black panda <nl> * Improved vehicle model estimation using high precision localizer <nl> mmm a / release / files_common <nl> ppp b / release / files_common <nl> <nl> . gitignore <nl> LICENSE <nl> + launch . sh <nl> launch_chffrplus . sh <nl> launch_openpilot . sh <nl> <nl> | fix release date | commaai/openpilot | eebb941b5a30055b0a909782bde412153a3e8cb0 | 2020-07-16T21:44:58Z |
mmm a / CMake / HPHPFindLibs . cmake <nl> ppp b / CMake / HPHPFindLibs . cmake <nl> macro ( hphp_link target ) <nl> target_link_libraries ( $ { target } timelib ) <nl> target_link_libraries ( $ { target } folly ) <nl> target_link_libraries ( $ { target } wangle ) <nl> + target_link_libraries ( $ { target } brotli_enc ) <nl> + target_link_libraries ( $ { target } brotli_dec ) <nl> <nl> if ( ENABLE_MCROUTER ) <nl> target_link_libraries ( $ { target } mcrouter ) <nl> mmm a / CMake / HPHPSetup . cmake <nl> ppp b / CMake / HPHPSetup . cmake <nl> include_directories ( " $ { TP_DIR } / folly " ) <nl> include_directories ( " $ { TP_DIR } / folly / src " ) <nl> include_directories ( " $ { TP_DIR } / thrift / src " ) <nl> include_directories ( " $ { TP_DIR } / wangle / src " ) <nl> + include_directories ( " $ { TP_DIR } / brotli / src " ) <nl> include_directories ( $ { TP_DIR } ) <nl> <nl> include_directories ( $ { HPHP_HOME } / hphp ) <nl> mmm a / third - party <nl> ppp b / third - party <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit 57ddc7942877befb97ecc3df2581a4de119049aa <nl> + Subproject commit eee7c38b2ae8e33cc217b56724e889e8ebc4bf59 <nl> | Add brotli to hhvm - third - party | facebook/hhvm | da4135459f6acc6867ce9ff2f4ef304df17c63ab | 2016-03-15T21:30:55Z |
mmm a / setup . py <nl> ppp b / setup . py <nl> def _embed_libiomp ( self ) : <nl> return <nl> lib_dir = os . path . join ( self . build_lib , ' torch ' , ' lib ' ) <nl> libtorch_cpu_path = os . path . join ( lib_dir , ' libtorch_cpu . dylib ' ) <nl> + if not os . path . exists ( libtorch_cpu_path ) : <nl> + return <nl> # Parse libtorch_cpu load commands <nl> otool_cmds = subprocess . check_output ( [ ' otool ' , ' - l ' , libtorch_cpu_path ] ) . decode ( ' utf - 8 ' ) . split ( ' \ n ' ) <nl> rpaths , libs = [ ] , [ ] <nl> | Skip iomp5 emebedding if torch_cpu could not be found ( ) | pytorch/pytorch | 6f6025183fa953f947eb6882979c88aa15c13977 | 2020-11-04T22:22:53Z |
mmm a / src / mongo / shell / linenoise . cpp <nl> ppp b / src / mongo / shell / linenoise . cpp <nl> int InputBuffer : : incrementalHistorySearch ( PromptBase & pi , int startChar ) { <nl> pb . promptCursorRowOffset = dp . promptCursorRowOffset ; <nl> pb . promptScreenColumns = pi . promptScreenColumns ; <nl> pb . promptPreviousLen = dp . promptChars ; <nl> - if ( useSearchedLine ) { <nl> + if ( useSearchedLine & & activeHistoryLine ) { <nl> historyRecallMostRecent = true ; <nl> copyString32 ( buf32 , activeHistoryLine , buflen + 1 ) ; <nl> len = historyLineLength ; <nl> | SERVER - 6061 don ' t use zeroed activeHistoryLine | mongodb/mongo | 2a9e8882d54b58ee13fba7c38753e5430e15af6a | 2012-06-12T13:11:54Z |
mmm a / Marlin / Configuration_adv . h <nl> ppp b / Marlin / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / Marlin . cpp <nl> ppp b / Marlin / src / Marlin . cpp <nl> <nl> # include " sd / cardreader . h " <nl> # include " module / configuration_store . h " <nl> # include " module / printcounter . h " / / PrintCounter or Stopwatch <nl> + # include " feature / closedloop . h " <nl> + <nl> # ifdef ARDUINO <nl> # include < pins_arduino . h > <nl> # endif <nl> void setup ( ) { <nl> # if ENABLED ( USE_WATCHDOG ) / / Reinit watchdog after HAL_get_reset_source call <nl> watchdog_init ( ) ; <nl> # endif <nl> + <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + init_closedloop ( ) ; <nl> + # endif <nl> } <nl> <nl> / * * <nl> mmm a / Marlin / src / config / default / Configuration_adv . h <nl> ppp b / Marlin / src / config / default / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / AlephObjects / TAZ4 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / AlephObjects / TAZ4 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Anet / A2 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Anet / A2 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Anet / A2plus / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Anet / A2plus / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Anet / A6 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Anet / A6 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Anet / A8 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Anet / A8 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Azteeg / X5GT / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Azteeg / X5GT / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / BIBO / TouchX / cyclops / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / BIBO / TouchX / cyclops / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / BIBO / TouchX / default / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / BIBO / TouchX / default / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / BQ / Hephestos / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / BQ / Hephestos / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / BQ / Hephestos_2 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / BQ / Hephestos_2 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / BQ / WITBOX / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / BQ / WITBOX / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Cartesio / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Cartesio / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Creality / CR - 10 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Creality / CR - 10 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Creality / CR - 10S / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Creality / CR - 10S / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Creality / CR - 10mini / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Creality / CR - 10mini / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Creality / CR - 8 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Creality / CR - 8 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Creality / Ender - 2 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Creality / Ender - 2 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Creality / Ender - 3 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Creality / Ender - 3 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Creality / Ender - 4 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Creality / Ender - 4 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Einstart - S / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Einstart - S / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Felix / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Felix / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / FolgerTech / i3 - 2020 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / FolgerTech / i3 - 2020 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Formbot / T - Rex_2 + / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Formbot / T - Rex_2 + / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Formbot / T_Rex_3 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Formbot / T_Rex_3 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> diff - - git a / Marlin / src / config / examples / Geeetech / Prusa i3 Pro C / Configuration_adv . h b / Marlin / src / config / examples / Geeetech / Prusa i3 Pro C / Configuration_adv . h <nl> mmm a / Marlin / src / config / examples / Geeetech / Prusa i3 Pro C / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Geeetech / Prusa i3 Pro C / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> diff - - git a / Marlin / src / config / examples / Geeetech / Prusa i3 Pro W / Configuration_adv . h b / Marlin / src / config / examples / Geeetech / Prusa i3 Pro W / Configuration_adv . h <nl> mmm a / Marlin / src / config / examples / Geeetech / Prusa i3 Pro W / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Geeetech / Prusa i3 Pro W / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Infitary / i3 - M508 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Infitary / i3 - M508 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / JGAurora / A5 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / JGAurora / A5 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / MakerParts / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / MakerParts / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Malyan / M150 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Malyan / M150 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Malyan / M200 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Malyan / M200 / Configuration_adv . h <nl> <nl> <nl> # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Micromake / C1 / enhanced / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Micromake / C1 / enhanced / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Mks / Sbase / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Mks / Sbase / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / RigidBot / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / RigidBot / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / SCARA / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / SCARA / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Sanguinololu / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Sanguinololu / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / TheBorg / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / TheBorg / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / TinyBoy2 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / TinyBoy2 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Tronxy / X3A / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Tronxy / X3A / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / UltiMachine / Archim2 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / UltiMachine / Archim2 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Velleman / K8200 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Velleman / K8200 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / Velleman / K8400 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Velleman / K8400 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> diff - - git a / Marlin / src / config / examples / Wanhao / Duplicator 6 / Configuration_adv . h b / Marlin / src / config / examples / Wanhao / Duplicator 6 / Configuration_adv . h <nl> mmm a / Marlin / src / config / examples / Wanhao / Duplicator 6 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / Wanhao / Duplicator 6 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / delta / FLSUN / auto_calibrate / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / delta / FLSUN / auto_calibrate / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / delta / FLSUN / kossel / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / delta / FLSUN / kossel / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / delta / FLSUN / kossel_mini / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / delta / FLSUN / kossel_mini / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / delta / generic / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / delta / generic / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / delta / kossel_mini / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / delta / kossel_mini / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / delta / kossel_pro / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / delta / kossel_pro / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / delta / kossel_xl / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / delta / kossel_xl / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / gCreate / gMax1 . 5 + / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / gCreate / gMax1 . 5 + / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / makibox / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / makibox / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / tvrrug / Round2 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / tvrrug / Round2 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> mmm a / Marlin / src / config / examples / wt150 / Configuration_adv . h <nl> ppp b / Marlin / src / config / examples / wt150 / Configuration_adv . h <nl> <nl> <nl> / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> <nl> + / / Employ an external closed loop controller . Override pins here if needed . <nl> + / / # define EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + / / # define CLOSED_LOOP_ENABLE_PIN - 1 <nl> + / / # define CLOSED_LOOP_MOVE_COMPLETE_PIN - 1 <nl> + # endif <nl> + <nl> / * * <nl> * Dual Steppers / Dual Endstops <nl> * <nl> new file mode 100644 <nl> index 00000000000 . . 2fe60fcf9c8 <nl> mmm / dev / null <nl> ppp b / Marlin / src / feature / closedloop . cpp <nl> <nl> + / * * <nl> + * Marlin 3D Printer Firmware <nl> + * Copyright ( C ) 2016 MarlinFirmware [ https : / / github . com / MarlinFirmware / Marlin ] <nl> + * <nl> + * Based on Sprinter and grbl . <nl> + * Copyright ( C ) 2011 Camiel Gubbels / Erik van der Zalm <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation , either version 3 of the License , or <nl> + * ( at your option ) any later version . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + # include " . . / inc / MarlinConfig . h " <nl> + <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + <nl> + # if ! PIN_EXISTS ( CLOSED_LOOP_ENABLE ) | | ! PIN_EXISTS ( CLOSED_LOOP_MOVE_COMPLETE ) <nl> + # error " CLOSED_LOOP_ENABLE_PIN and CLOSED_LOOP_MOVE_COMPLETE_PIN are required for EXTERNAL_CLOSED_LOOP_CONTROLLER . " <nl> + # endif <nl> + <nl> + # include " closedloop . h " <nl> + <nl> + void init_closedloop ( ) { <nl> + OUT_WRITE ( CLOSED_LOOP_ENABLE_PIN , LOW ) ; <nl> + SET_INPUT_PULLUP ( CLOSED_LOOP_MOVE_COMPLETE_PIN ) ; <nl> + } <nl> + <nl> + void set_closedloop ( const byte val ) { <nl> + OUT_WRITE ( CLOSED_LOOP_ENABLE_PIN , val ) ; <nl> + } <nl> + <nl> + # endif / / EXTERNAL_CLOSED_LOOP_CONTROLLER <nl> new file mode 100644 <nl> index 00000000000 . . 84cec36fe6c <nl> mmm / dev / null <nl> ppp b / Marlin / src / feature / closedloop . h <nl> <nl> + / * * <nl> + * Marlin 3D Printer Firmware <nl> + * Copyright ( C ) 2016 MarlinFirmware [ https : / / github . com / MarlinFirmware / Marlin ] <nl> + * <nl> + * Based on Sprinter and grbl . <nl> + * Copyright ( C ) 2011 Camiel Gubbels / Erik van der Zalm <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation , either version 3 of the License , or <nl> + * ( at your option ) any later version . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + # pragma once <nl> + <nl> + void init_closedloop ( ) ; <nl> + void set_closedloop ( const byte val ) ; <nl> new file mode 100644 <nl> index 00000000000 . . a4b75e71bda <nl> mmm / dev / null <nl> ppp b / Marlin / src / gcode / calibrate / M12 . cpp <nl> <nl> + / * * <nl> + * Marlin 3D Printer Firmware <nl> + * Copyright ( C ) 2016 MarlinFirmware [ https : / / github . com / MarlinFirmware / Marlin ] <nl> + * <nl> + * Based on Sprinter and grbl . <nl> + * Copyright ( C ) 2011 Camiel Gubbels / Erik van der Zalm <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation , either version 3 of the License , or <nl> + * ( at your option ) any later version . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + # include " . . / . . / inc / MarlinConfigPre . h " <nl> + <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + <nl> + # include " . . / gcode . h " <nl> + # include " . . / . . / module / planner . h " <nl> + # include " . . / . . / feature / closedloop . h " <nl> + <nl> + void GcodeSuite : : M12 ( ) { <nl> + planner . synchronize ( ) ; <nl> + if ( parser . seenval ( ' S ' ) ) <nl> + set_closedloop ( parser . value_int ( ) ) ; / / Force a CLC set <nl> + } <nl> + <nl> + # endif <nl> mmm a / Marlin / src / gcode / gcode . cpp <nl> ppp b / Marlin / src / gcode / gcode . cpp <nl> void GcodeSuite : : process_parsed_command ( <nl> case 5 : M5 ( ) ; break ; / / M5 - turn spindle / laser off <nl> # endif <nl> <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + case 12 : M12 ( ) ; break ; / / M12 : Synchronize and optionally force a CLC set <nl> + # endif <nl> + <nl> case 17 : M17 ( ) ; break ; / / M17 : Enable all stepper motors <nl> <nl> # if ENABLED ( SDSUPPORT ) <nl> mmm a / Marlin / src / gcode / gcode . h <nl> ppp b / Marlin / src / gcode / gcode . h <nl> <nl> * M3 - Turn laser / spindle on , set spindle / laser speed / power , set rotation to clockwise <nl> * M4 - Turn laser / spindle on , set spindle / laser speed / power , set rotation to counter - clockwise <nl> * M5 - Turn laser / spindle off <nl> + * M12 - Set up closed loop control system . More features coming soon . ( Requires EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> * M17 - Enable / Power all stepper motors <nl> * M18 - Disable all stepper motors ; same as M84 <nl> * M20 - List SD card . ( Requires SDSUPPORT ) <nl> class GcodeSuite { <nl> static void M5 ( ) ; <nl> # endif <nl> <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + static void M12 ( ) ; <nl> + # endif <nl> + <nl> static void M17 ( ) ; <nl> <nl> static void M18_M84 ( ) ; <nl> mmm a / Marlin / src / module / planner . cpp <nl> ppp b / Marlin / src / module / planner . cpp <nl> float Planner : : get_axis_position_mm ( const AxisEnum axis ) { <nl> / * * <nl> * Block until all buffered steps are executed / cleaned <nl> * / <nl> - void Planner : : synchronize ( ) { while ( has_blocks_queued ( ) | | cleaning_buffer_counter ) idle ( ) ; } <nl> + void Planner : : synchronize ( ) { <nl> + while ( <nl> + has_blocks_queued ( ) | | cleaning_buffer_counter <nl> + # if ENABLED ( EXTERNAL_CLOSED_LOOP_CONTROLLER ) <nl> + | | ! READ ( CLOSED_LOOP_MOVE_COMPLETE_PIN ) <nl> + # endif <nl> + ) idle ( ) ; <nl> + } <nl> <nl> / * * <nl> * Planner : : _buffer_steps <nl> | Merge pull request from SJ - Innovation / bf2_ClosedLoopController_Support | MarlinFirmware/Marlin | 5d5cd1332dd79c217425d3cd2f197de7880b5675 | 2018-09-10T08:18:29Z |
mmm a / tensorflow / python / tpu / tensor_tracer . py <nl> ppp b / tensorflow / python / tpu / tensor_tracer . py <nl> def set_parameters ( tensor_tracer_params = None ) : <nl> - full_tensor_summary : Writes the full tensors as binary event files . <nl> The outputs can be read using : trace = <nl> tensor_tracer . read_tensor_tracer_event_file ( event_file_path ) <nl> - - trace - back - if - nan : This mode will write the full tensor content only <nl> - when the tensor has a NaN or Inf in it . It is possible to also print <nl> - the inputs coming to this op using ' trace_stack_size ' parameter . <nl> - E . g . , if trace_stack_size = 2 , then the tensor with NaN / Inf , its <nl> - inputs , and its inputs ' inputs will also be printed . <nl> + <nl> - report_file : Path to the metadata file that is written during graph <nl> construction . If not set , metadata will be printed to stdout during <nl> graph construction . <nl> def set_parameters ( tensor_tracer_params = None ) : <nl> ' - - included_optypes = some_op_type - - excluded_optypes = * . ' will trace <nl> only the ops with type ' some_op_type ' <nl> Advanced Flags : <nl> - - compact_trace : If not set , statistics per tensor is written as soon as <nl> - they are executed . If set , then statistics for all traced tensors will <nl> - be stored in a cache and will be written only once per step . This flag <nl> - is ignored for full - tensor and part - tensor trace modes . If the <nl> - trace_dir is a remote directory , compact_trace will be forced . <nl> - trace_scalar : Scalar values are not traced by default . If this flag is <nl> set , scalar values will also be traced . <nl> - - included_cores : Accepts a list string . Tracing will only be dumped for <nl> - these cores . E . g , setting it to ' [ 0 , 2 , 4 , 6 ] ' will result in a trace <nl> - only for those cores . <nl> - op_range : In the form of ' % d : % d ' that limits the tracing to the ops <nl> within this limit . - - op_range = ' 5 : 10 ' will trace only the ops that have <nl> topological order between 5 - 10 . <nl> - - trace_before_included_ops : If set to a number - k , it will also trace <nl> - distance - k inputs of each traced tensor . E . g . , k = 1 , then in addition <nl> - to each traced_tensor , their input tensors will also be traced . <nl> - - trace_after_included_ops : Same as trace_before_included_ops , where it <nl> - will also trace distance - k outputs of each traced tensor . <nl> - submode : ' brief ' or ' detailed ' . If the trace mode is not compact , <nl> brief mode will print only the id of each traced tensor to save some <nl> space . ' detailed ' mode prints the full tensor name . <nl> - - trace_stack_size : Used only for trace_mode = trace - back - if - nan mode . It <nl> - determines how many ops to print back from a nan op . E . g , op4 - > op3 <nl> - - > op2 - > op1 - > op0 , if op0 has a NaN and trace_stack_size is 1 , the <nl> - result of op1 will also be printed . trace_stack_size is 2 , the result <nl> - of op1 and op2 will be printed . <nl> - use_fingerprint_subdirectory : The trace directory will be chosen as <nl> using the fingerprint of the trace metadata under the provided <nl> trace_dir . <nl> def unsafe_scalar_trace ( op ) : <nl> <nl> def _is_interesting_op ( self , op ) : <nl> " " " Returns True if the given op is not an interesting one to be traced . " " " <nl> - # If flag is set to include less interesting ops , then include everything . <nl> - if self . _parameters . include_less_interesting_ops : <nl> - return True <nl> return op_priority ( op . type ) < = self . _parameters . trace_level <nl> <nl> @ staticmethod <nl> def _is_user_included_op ( self , op ) : <nl> - The op is at most _trace_ops_before_included hops before an included op <nl> - The op is at most _trace_ops_after_included hops after an included op <nl> " " " <nl> + for opname_re in self . _parameters . included_opname_re_list : <nl> + if opname_re . match ( op . name ) : <nl> + return True <nl> <nl> - def _is_op_or_any_neighbor_included ( op , check_before = 0 , check_after = 0 ) : <nl> - " " " Helper function to check if op is included or not . " " " <nl> - for opname_re in self . _parameters . included_opname_re_list : <nl> - if opname_re . match ( op . name ) : <nl> - return True <nl> - <nl> - for optype_re in self . _parameters . included_optype_re_list : <nl> - if optype_re . match ( op . type ) : <nl> - return True <nl> - <nl> - if check_after > 0 : <nl> - for out_tensor in op . outputs : <nl> - for consumer in out_tensor . consumers ( ) : <nl> - if _is_op_or_any_neighbor_included ( consumer , check_after - 1 , 0 ) : <nl> - return True <nl> - if check_before > 0 : <nl> - for input_tensor in op . inputs : <nl> - if _is_op_or_any_neighbor_included ( input_tensor . op , <nl> - 0 , <nl> - check_before - 1 ) : <nl> - return True <nl> - return False <nl> - # check_after and check_before are swapped below , as below operation <nl> - # checks the distance from an arbitrary op to included ops . <nl> - return _is_op_or_any_neighbor_included ( <nl> - op , self . _parameters . trace_ops_after_included , <nl> - self . _parameters . trace_ops_before_included ) <nl> + for optype_re in self . _parameters . included_optype_re_list : <nl> + if optype_re . match ( op . type ) : <nl> + return True <nl> + return False <nl> <nl> def _is_user_excluded_op ( self , op ) : <nl> for opname_re in self . _parameters . excluded_opname_re_list : <nl> def _use_temp_cache ( self ) : <nl> <nl> def _use_tensor_values_cache ( self ) : <nl> " " " Returns True if immediate tensors should be first saved to a cache . " " " <nl> - if self . _parameters . trace_mode = = tensor_tracer_flags . TRACE_MODE_SUMMARY : <nl> - # For summary tace mode only compact format is supported . <nl> - return True <nl> - <nl> - if self . _parameters . trace_mode not in set ( [ <nl> - tensor_tracer_flags . TRACE_MODE_NAN_INF , <nl> - tensor_tracer_flags . TRACE_MODE_NORM , <nl> - tensor_tracer_flags . TRACE_MODE_MAX_ABS , <nl> - tensor_tracer_flags . TRACE_MODE_SUMMARY <nl> - ] ) : <nl> - return False <nl> - if ( self . _parameters . trace_dir and <nl> - _trace_files_need_precreated ( self . _parameters . trace_dir ) ) : <nl> - return True <nl> return self . _parameters . use_compact_trace <nl> <nl> def _use_tensor_buffer ( self ) : <nl> def _show_max_abs ( tensor ) : <nl> output_tensor = array_ops . reshape ( output_tensor , [ 1 ] ) <nl> return output_tensor <nl> <nl> - def _detect_inf_nan_producer ( tensor ) : <nl> - " " " Checks if the tensor is the first NaN / Inf tensor in the computation path . " " " <nl> - if tensor . op . inputs : <nl> - inp_check = [ <nl> - _detect_nan_inf ( inp_tensor ) for inp_tensor in tensor . op . inputs <nl> - ] <nl> - is_any_input_inf_nan = math_ops . add_n ( inp_check ) <nl> - else : <nl> - is_any_input_inf_nan = constant_op . constant ( 0 , dtypes . bool ) <nl> - is_current_tensor_inf_nan = _detect_nan_inf ( tensor ) <nl> - # An op is NaN / INF producer only when all inputs are nan / inf free ( <nl> - # is_any_input_inf_nan = 0 ) , and its output has nan / inf ( <nl> - # is_current_tensor_inf_nan = 1 ) . Below will be 1 if op nan / inf is producer . <nl> - is_nan_producer = is_current_tensor_inf_nan - is_any_input_inf_nan <nl> - is_nan_producer = math_ops . reduce_any ( is_nan_producer > 0 ) <nl> - return is_nan_producer <nl> - <nl> - if ( self . _parameters . trace_mode = = <nl> - tensor_tracer_flags . TRACE_MODE_FULL_IF_NAN ) : <nl> - return { self . _parameters . trace_mode : _detect_inf_nan_producer ( tensor ) } <nl> if self . _parameters . trace_mode = = tensor_tracer_flags . TRACE_MODE_NAN_INF : <nl> return { self . _parameters . trace_mode : _detect_nan_inf ( tensor ) } <nl> if ( self . _parameters . trace_mode = = <nl> def _print_tensor ( tensor_name , num_elements , tensor , output_tensor ) : <nl> <nl> Raises : <nl> ValueError : If tensor_name is not already in <nl> - tensor_trace_order . tensorname_idx_map . <nl> + tensor_trace_order . tensorname_to_cache_idx . <nl> " " " <nl> <nl> if self . _parameters . is_brief_mode ( ) : <nl> - if tensor_name not in tensor_trace_order . tensorname_idx_map : <nl> + if tensor_name not in tensor_trace_order . tensorname_to_cache_idx : <nl> raise ValueError ( <nl> - ' Tensor name % s is not in the tensorname_idx_map ' % tensor_name ) <nl> - msg = ' % d ' % tensor_trace_order . tensorname_idx_map [ tensor_name ] <nl> + ' Tensor name % s is not in the tensorname_to_cache_idx ' % <nl> + tensor_name ) <nl> + msg = ' % d ' % tensor_trace_order . tensorname_to_cache_idx [ tensor_name ] <nl> else : <nl> msg = ' " % s " ' % tensor_name <nl> <nl> def _show_full_tensor ( tensor ) : <nl> <nl> return _print_tensor ( tensor_name , - 1 , tensor , tensor ) <nl> <nl> - def _show_full_tensors ( tensor ) : <nl> - " " " Prints the full tensor values for the tensors that are _trace_stack_size hops away from a given tensor . " " " <nl> - <nl> - def _get_distance_k_tensors ( k_before = 0 ) : <nl> - " " " Returns the tensors that are at most k_before hops away from the tensor . " " " <nl> - if k_before < 0 : <nl> - return [ ] <nl> - visited_tensors = { tensor : 0 } <nl> - visitor_queue = [ tensor ] <nl> - head = 0 <nl> - while head < len ( visitor_queue ) : <nl> - current_tensor = visitor_queue [ head ] <nl> - head + = 1 <nl> - distance = visited_tensors [ current_tensor ] <nl> - if distance = = k_before : <nl> - break <nl> - for input_tensor in current_tensor . op . inputs : <nl> - if input_tensor in visited_tensors : <nl> - continue <nl> - visitor_queue . append ( input_tensor ) <nl> - visited_tensors [ input_tensor ] = distance + 1 <nl> - return visitor_queue <nl> - <nl> - tensors_to_print = _get_distance_k_tensors ( <nl> - self . _parameters . trace_stack_size ) <nl> - print_ops = [ _print_tensor ( t . name , - 1 , t , t ) for t in tensors_to_print ] <nl> - with ops . control_dependencies ( print_ops ) : <nl> - return constant_op . constant ( True ) <nl> - <nl> - if ( self . _parameters . trace_mode = = <nl> - tensor_tracer_flags . TRACE_MODE_FULL_IF_NAN ) : <nl> - return _show_full_tensors <nl> if ( self . _parameters . trace_mode = = <nl> tensor_tracer_flags . TRACE_MODE_PART_TENSOR ) : <nl> return _show_part_tensor <nl> def tpu_wrap_trace_fn ( tensor , out_tensor_name ) : <nl> else : <nl> return tensor_trace_fn ( tensor ) <nl> <nl> - def conditional_trace_fn ( predicate_tensor , out_tensor , trace_fn , <nl> - out_tensor_name ) : <nl> - " " " Creates a cond op that traces the out_tensor if predicate is satisfied . " " " <nl> - return control_flow_ops . cond ( <nl> - predicate_tensor , lambda : trace_fn ( out_tensor , out_tensor_name ) , <nl> - lambda : constant_op . constant ( False ) ) . op <nl> - <nl> if len ( processed_tensors ) ! = 1 : <nl> raise RuntimeError ( ' Multiple stats are only allowed in compact ' <nl> ' mode . ' ) <nl> def conditional_trace_fn ( predicate_tensor , out_tensor , trace_fn , <nl> # mode that uses compact format ( self . _use_tensor_values_cache = true ) . <nl> # Non - compact mode currently allows single stat per tensor . <nl> processed_out_tensor = six . next ( six . itervalues ( processed_tensors ) ) <nl> - <nl> - if self . _parameters . is_conditional_trace : <nl> - trace_op = conditional_trace_fn ( processed_out_tensor , out_tensor , <nl> - tpu_wrap_trace_fn , tensor_name ) <nl> - elif self . _parameters . included_cores : <nl> - should_print = constant_op . constant ( False ) <nl> - for core in self . _parameters . included_cores : <nl> - should_print = gen_math_ops . logical_or ( <nl> - should_print , gen_math_ops . equal ( self . _replica_id , core ) ) <nl> - trace_op = conditional_trace_fn ( should_print , processed_out_tensor , <nl> - tpu_wrap_trace_fn , tensor_name ) <nl> - <nl> - else : <nl> - trace_op = tpu_wrap_trace_fn ( processed_out_tensor , tensor_name ) <nl> + trace_op = tpu_wrap_trace_fn ( processed_out_tensor , tensor_name ) <nl> <nl> if op_control_flow_context : <nl> # pylint : disable = protected - access <nl> mmm a / tensorflow / python / tpu / tensor_tracer_flags . py <nl> ppp b / tensorflow / python / tpu / tensor_tracer_flags . py <nl> <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . platform import tf_logging as logging <nl> <nl> - TRACE_MODE_NAN_INF = ' nan - inf ' <nl> TRACE_MODE_PART_TENSOR = ' part - tensor ' <nl> TRACE_MODE_FULL_TENSOR = ' full - tensor ' <nl> - TRACE_MODE_FULL_IF_NAN = ' trace - back - if - nan ' <nl> + TRACE_MODE_FULL_TENSOR_SUMMARY = ' full_tensor_summary ' <nl> + <nl> + TRACE_MODE_NAN_INF = ' nan - inf ' <nl> TRACE_MODE_NORM = ' norm ' <nl> TRACE_MODE_MAX_ABS = ' max - abs ' <nl> TRACE_MODE_SUMMARY = ' summary ' <nl> # summary mode to collects a finite set of signatures for each traced tensor , <nl> # ( such as norm , max , min , mean ) and dumps it using tb summaries . <nl> - TRACE_MODE_FULL_TENSOR_SUMMARY = ' full_tensor_summary ' <nl> + <nl> # Full tensor mode dumps the whole tensor values for the traced tensors without <nl> # any processing on them ; using tb summaries . <nl> <nl> <nl> _FLAG_NO_EQUAL_PAT = re . compile ( r ' \ s * - - ( [ ^ = ] + ) \ s * ' ) <nl> <nl> FLAGS_ENV_VAR = ' TENSOR_TRACER_FLAGS ' <nl> - FLAG_NAME_TRACE_STACK_SIZE = ' trace_stack_size ' <nl> FLAG_NAME_ENABLE = ' enable ' <nl> FLAG_NAME_TRACE_MODE = ' trace_mode ' <nl> - FLAG_NAME_USE_COMPACT_TRACE = ' compact_trace ' <nl> FLAG_NAME_TRACE_SCALAR_OPS = ' trace_scalar ' <nl> - FLAG_NAME_TRACE_BEFORE_OPS = ' trace_before_included_ops ' <nl> - FLAG_NAME_TRACE_AFTER_OPS = ' trace_after_included_ops ' <nl> FLAG_NAME_SUBMODE = ' submode ' <nl> - FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS = ' include_less_interesting_ops ' <nl> FLAG_NAME_EXCLUDED_OPNAMES = ' excluded_opnames ' <nl> FLAG_NAME_EXCLUDED_OPTYPES = ' excluded_optypes ' <nl> FLAG_NAME_INCLUDED_OPNAMES = ' included_opnames ' <nl> FLAG_NAME_INCLUDED_OPTYPES = ' included_optypes ' <nl> - FLAG_NAME_INCLUDED_CORES = ' included_cores ' <nl> FLAG_NAME_TRACE_LEVEL = ' trace_level ' <nl> FLAG_NAME_TRACE_DIR = ' trace_dir ' <nl> FLAG_NAME_REPORT_FILE = ' report_file ' <nl> def __init__ ( self , env = None ) : <nl> self . included_optype_re_list = self . _flag_value_to_re_list ( <nl> FLAG_NAME_INCLUDED_OPTYPES ) <nl> <nl> - self . is_conditional_trace = self . _is_conditional_trace_mode ( ) <nl> self . trace_scalar_ops = self . is_flag_on ( FLAG_NAME_TRACE_SCALAR_OPS ) <nl> - self . use_compact_trace = self . is_flag_on ( FLAG_NAME_USE_COMPACT_TRACE ) <nl> + self . use_compact_trace = self . trace_mode in ( TRACE_MODE_NAN_INF , <nl> + TRACE_MODE_NORM , <nl> + TRACE_MODE_MAX_ABS , <nl> + TRACE_MODE_SUMMARY ) <nl> self . use_temp_cache_var = self . is_flag_on ( FLAG_NAME_TEMP_CACHE_VAR ) <nl> self . use_fingerprint_subdir = self . is_flag_on ( FLAG_NAME_FINGERPRINT_DIR ) <nl> <nl> - # _trace_ops_before_included and _trace_ops_after_included denotes to depth <nl> - # of tracing relative to the ops given in - - included_opnames or <nl> - # - - included_optypes <nl> - # For example , in the below graph <nl> - # op1 - - > op2 - - > op3 - - > op4 - - > op5 <nl> - # If - - included_opnames = op3 then only op3 will be traced . <nl> - # If also - - trace_before_included_ops = 2 ( _trace_ops_before_included ) , then <nl> - # op1 and op2 will be traced as they are at most 2 hops apart from an <nl> - # included op . Similarly , if - - trace_after_included_ops = 2 , then op4 and op5 <nl> - # will also be traced . <nl> - self . trace_ops_before_included = self . _get_flag_int_value ( <nl> - FLAG_NAME_TRACE_BEFORE_OPS , 0 ) <nl> - self . trace_ops_after_included = self . _get_flag_int_value ( <nl> - FLAG_NAME_TRACE_AFTER_OPS , 0 ) <nl> - self . trace_stack_size = self . _get_flag_int_value ( FLAG_NAME_TRACE_STACK_SIZE , <nl> - 1 ) <nl> _ , self . graph_dump_path = self . get_flag_value ( <nl> FLAG_NAME_DUMP_BEFORE_AFTER_GRAPHS ) <nl> - self . included_cores = self . _flag_value_as_int_list ( FLAG_NAME_INCLUDED_CORES ) <nl> - self . include_less_interesting_ops = self . is_flag_on ( <nl> - FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS ) <nl> self . trace_level = self . _get_flag_int_value ( FLAG_NAME_TRACE_LEVEL , <nl> _TT_DEFAULT_TRACE_LEVEL ) <nl> self . summary_signatures = self . _get_summary_signatures ( ) <nl> self . collect_summary_per_core = self . is_flag_on ( FLAG_NAME_SUMMARY_PER_CORE ) <nl> <nl> - def _is_conditional_trace_mode ( self ) : <nl> - return self . trace_mode = = TRACE_MODE_FULL_IF_NAN <nl> - <nl> def _get_report_filepath ( self ) : <nl> " " " Sets the path of the output report file . " " " <nl> <nl> def _get_trace_mode ( self ) : <nl> trace_mode = TRACE_MODE_NORM <nl> valid_trace_modes = [ <nl> TRACE_MODE_NAN_INF , TRACE_MODE_PART_TENSOR , TRACE_MODE_FULL_TENSOR , <nl> - TRACE_MODE_NORM , TRACE_MODE_MAX_ABS , TRACE_MODE_FULL_IF_NAN , <nl> + TRACE_MODE_NORM , TRACE_MODE_MAX_ABS , <nl> TRACE_MODE_SUMMARY , TRACE_MODE_FULL_TENSOR_SUMMARY <nl> ] <nl> if trace_mode not in valid_trace_modes : <nl> def match_next_flag ( flags , pos ) : <nl> def _validate_flag_names ( self ) : <nl> " " " Validates if the TensorTrace flags passed are valid . " " " <nl> valid_flag_names = [ <nl> - FLAG_NAME_ENABLE , FLAG_NAME_TRACE_MODE , FLAG_NAME_USE_COMPACT_TRACE , <nl> - FLAG_NAME_TRACE_SCALAR_OPS , FLAG_NAME_TRACE_BEFORE_OPS , <nl> - FLAG_NAME_TRACE_AFTER_OPS , FLAG_NAME_TRACE_STACK_SIZE , <nl> + FLAG_NAME_ENABLE , FLAG_NAME_TRACE_MODE , <nl> + FLAG_NAME_TRACE_SCALAR_OPS , <nl> FLAG_NAME_SUBMODE , FLAG_NAME_EXCLUDED_OPNAMES , <nl> FLAG_NAME_EXCLUDED_OPTYPES , FLAG_NAME_INCLUDED_OPNAMES , <nl> FLAG_NAME_INCLUDED_OPTYPES , FLAG_NAME_TRACE_DIR , <nl> - FLAG_NAME_INCLUDED_CORES , FLAG_NAME_REPORT_FILE , <nl> + FLAG_NAME_REPORT_FILE , <nl> FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR , <nl> - FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS , FLAG_NAME_OP_RANGE , <nl> + FLAG_NAME_OP_RANGE , <nl> FLAG_NAME_DUMP_BEFORE_AFTER_GRAPHS , FLAG_NAME_TRACE_LEVEL , <nl> FLAG_NAME_SUMMARY_SIGNATURES , FLAG_NAME_SUMMARY_PER_CORE , <nl> FLAG_NAME_TEMP_CACHE_VAR , FLAG_NAME_FINGERPRINT_DIR <nl> mmm a / tensorflow / python / tpu / tensor_tracer_report . py <nl> ppp b / tensorflow / python / tpu / tensor_tracer_report . py <nl> def create_report_proto ( self , tt_config , tt_parameters , tensor_trace_order , <nl> report . config . num_cores = tt_config . num_replicas <nl> report . config . num_hosts = tt_config . num_hosts <nl> report . config . num_cores_per_host = tt_config . num_replicas_per_host <nl> - for core in tt_parameters . included_cores : <nl> - report . config . included_cores . append ( core ) <nl> report . config . submode = tt_parameters . submode <nl> report . config . trace_mode = tt_parameters . trace_mode <nl> <nl> def _write_config_section ( self , tt_config , tt_parameters ) : <nl> tt_parameters . trace_mode ) ) <nl> self . _write_report ( ' % s % s \ n ' % ( _FIELD_NAME_SUBMODE , <nl> tt_parameters . submode ) ) <nl> - if tt_parameters . included_cores : <nl> - self . _write_report ( ' % s % s \ n ' % ( _FIELD_NAME_NUM_REPLICAS , <nl> - len ( tt_parameters . included_cores ) ) ) <nl> - else : <nl> - self . _write_report ( ' % s % s \ n ' % ( _FIELD_NAME_NUM_REPLICAS , <nl> - tt_config . num_replicas ) ) <nl> + self . _write_report ( ' % s % s \ n ' % ( _FIELD_NAME_NUM_REPLICAS , <nl> + tt_config . num_replicas ) ) <nl> self . _write_report ( ' % s % s \ n ' % ( _FIELD_NAME_NUM_REPLICAS_PER_HOST , <nl> tt_config . num_replicas_per_host ) ) <nl> self . _write_report ( ' % s % s \ n ' % ( _FIELD_NAME_NUM_HOSTS , tt_config . num_hosts ) ) <nl> | Removing some unused options from tensor tracer . | tensorflow/tensorflow | fbe4f6e8a404a1bda52c755b2601195b2107fc35 | 2020-09-24T23:13:26Z |
mmm a / src / tree / updater_gpu_hist . cu <nl> ppp b / src / tree / updater_gpu_hist . cu <nl> struct ExpandEntry { <nl> if ( split . left_sum . GetHess ( ) = = 0 | | split . right_sum . GetHess ( ) = = 0 ) { <nl> return false ; <nl> } <nl> - if ( param . max_depth > 0 & & depth = = param . max_depth ) return false ; <nl> - if ( param . max_leaves > 0 & & num_leaves = = param . max_leaves ) return false ; <nl> + if ( split . loss_chg < param . min_split_loss ) { return false ; } <nl> + if ( param . max_depth > 0 & & depth = = param . max_depth ) { return false ; } <nl> + if ( param . max_leaves > 0 & & num_leaves = = param . max_leaves ) { return false ; } <nl> return true ; <nl> } <nl> <nl> mmm a / tests / cpp / tree / test_gpu_hist . cu <nl> ppp b / tests / cpp / tree / test_gpu_hist . cu <nl> void TestBuildHist ( bool use_shared_memory_histograms ) { <nl> auto page = BuildEllpackPage ( kNRows , kNCols ) ; <nl> DeviceShard < GradientSumT > shard ( 0 , page . get ( ) , kNRows , param , kNCols , kNCols ) ; <nl> shard . InitHistogram ( ) ; <nl> - <nl> + <nl> xgboost : : SimpleLCG gen ; <nl> xgboost : : SimpleRealUniformDistribution < bst_float > dist ( 0 . 0f , 1 . 0f ) ; <nl> std : : vector < GradientPair > h_gpair ( kNRows ) ; <nl> TEST ( GpuHist , TestHistogramIndex ) { <nl> TestHistogramIndexImpl ( ) ; <nl> } <nl> <nl> + / / gamma is an alias of min_split_loss <nl> + int32_t TestMinSplitLoss ( DMatrix * dmat , float gamma , HostDeviceVector < GradientPair > * gpair ) { <nl> + Args args { <nl> + { " max_depth " , " 1 " } , <nl> + { " max_leaves " , " 0 " } , <nl> + <nl> + / / Disable all other parameters . <nl> + { " colsample_bynode " , " 1 " } , <nl> + { " colsample_bylevel " , " 1 " } , <nl> + { " colsample_bytree " , " 1 " } , <nl> + { " min_child_weight " , " 0 . 01 " } , <nl> + { " reg_alpha " , " 0 " } , <nl> + { " reg_lambda " , " 0 " } , <nl> + { " max_delta_step " , " 0 " } , <nl> + <nl> + / / test gamma <nl> + { " gamma " , std : : to_string ( gamma ) } <nl> + } ; <nl> + <nl> + tree : : GPUHistMakerSpecialised < GradientPairPrecise > hist_maker ; <nl> + GenericParameter generic_param ( CreateEmptyGenericParam ( 0 ) ) ; <nl> + hist_maker . Configure ( args , & generic_param ) ; <nl> + <nl> + RegTree tree ; <nl> + hist_maker . Update ( gpair , dmat , { & tree } ) ; <nl> + <nl> + auto n_nodes = tree . NumExtraNodes ( ) ; <nl> + return n_nodes ; <nl> + } <nl> + <nl> + TEST ( GpuHist , MinSplitLoss ) { <nl> + constexpr size_t kRows = 32 ; <nl> + constexpr size_t kCols = 16 ; <nl> + constexpr float kSparsity = 0 . 6 ; <nl> + auto dmat = CreateDMatrix ( kRows , kCols , kSparsity , 3 ) ; <nl> + <nl> + xgboost : : SimpleLCG gen ; <nl> + xgboost : : SimpleRealUniformDistribution < bst_float > dist ( 0 . 0f , 1 . 0f ) ; <nl> + std : : vector < GradientPair > h_gpair ( kRows ) ; <nl> + for ( auto & gpair : h_gpair ) { <nl> + bst_float grad = dist ( & gen ) ; <nl> + bst_float hess = dist ( & gen ) ; <nl> + gpair = GradientPair ( grad , hess ) ; <nl> + } <nl> + HostDeviceVector < GradientPair > gpair ( h_gpair ) ; <nl> + <nl> + { <nl> + int32_t n_nodes = TestMinSplitLoss ( ( * dmat ) . get ( ) , 0 . 01 , & gpair ) ; <nl> + / / This is not strictly verified , meaning the numeber ` 2 ` is whatever GPU_Hist retured <nl> + / / when writing this test , and only used for testing larger gamma ( below ) does prevent <nl> + / / building tree . <nl> + ASSERT_EQ ( n_nodes , 2 ) ; <nl> + } <nl> + { <nl> + int32_t n_nodes = TestMinSplitLoss ( ( * dmat ) . get ( ) , 100 . 0 , & gpair ) ; <nl> + / / No new nodes with gamma = = 100 . <nl> + ASSERT_EQ ( n_nodes , static_cast < decltype ( n_nodes ) > ( 0 ) ) ; <nl> + } <nl> + delete dmat ; <nl> + } <nl> + <nl> } / / namespace tree <nl> } / / namespace xgboost <nl> | Support gamma in GPU_Hist . ( ) | dmlc/xgboost | 0b89cd1dfa1464bc812116844818a443ed7ce1c7 | 2019-09-24T02:16:08Z |
mmm a / src / compiler / access - info . cc <nl> ppp b / src / compiler / access - info . cc <nl> std : : ostream & operator < < ( std : : ostream & os , AccessMode access_mode ) { <nl> UNREACHABLE ( ) ; <nl> } <nl> <nl> - ElementAccessInfo : : ElementAccessInfo ( <nl> - ZoneVector < Handle < Map > > & & lookup_start_object_maps , <nl> - ElementsKind elements_kind , Zone * zone ) <nl> + ElementAccessInfo : : ElementAccessInfo ( ZoneVector < Handle < Map > > & & receiver_maps , <nl> + ElementsKind elements_kind , Zone * zone ) <nl> : elements_kind_ ( elements_kind ) , <nl> - lookup_start_object_maps_ ( lookup_start_object_maps ) , <nl> + receiver_maps_ ( receiver_maps ) , <nl> transition_sources_ ( zone ) { <nl> - CHECK ( ! lookup_start_object_maps . empty ( ) ) ; <nl> + CHECK ( ! receiver_maps . empty ( ) ) ; <nl> } <nl> <nl> / / static <nl> MinimorphicLoadPropertyAccessInfo MinimorphicLoadPropertyAccessInfo : : Invalid ( ) { <nl> <nl> PropertyAccessInfo : : PropertyAccessInfo ( Zone * zone ) <nl> : kind_ ( kInvalid ) , <nl> - lookup_start_object_maps_ ( zone ) , <nl> + receiver_maps_ ( zone ) , <nl> unrecorded_dependencies_ ( zone ) , <nl> field_representation_ ( Representation : : None ( ) ) , <nl> field_type_ ( Type : : None ( ) ) { } <nl> <nl> - PropertyAccessInfo : : PropertyAccessInfo ( <nl> - Zone * zone , Kind kind , MaybeHandle < JSObject > holder , <nl> - ZoneVector < Handle < Map > > & & lookup_start_object_maps ) <nl> + PropertyAccessInfo : : PropertyAccessInfo ( Zone * zone , Kind kind , <nl> + MaybeHandle < JSObject > holder , <nl> + ZoneVector < Handle < Map > > & & receiver_maps ) <nl> : kind_ ( kind ) , <nl> - lookup_start_object_maps_ ( lookup_start_object_maps ) , <nl> + receiver_maps_ ( receiver_maps ) , <nl> unrecorded_dependencies_ ( zone ) , <nl> holder_ ( holder ) , <nl> field_representation_ ( Representation : : None ( ) ) , <nl> field_type_ ( Type : : None ( ) ) { } <nl> <nl> - PropertyAccessInfo : : PropertyAccessInfo ( <nl> - Zone * zone , Kind kind , MaybeHandle < JSObject > holder , <nl> - Handle < Object > constant , ZoneVector < Handle < Map > > & & lookup_start_object_maps ) <nl> + PropertyAccessInfo : : PropertyAccessInfo ( Zone * zone , Kind kind , <nl> + MaybeHandle < JSObject > holder , <nl> + Handle < Object > constant , <nl> + ZoneVector < Handle < Map > > & & receiver_maps ) <nl> : kind_ ( kind ) , <nl> - lookup_start_object_maps_ ( lookup_start_object_maps ) , <nl> + receiver_maps_ ( receiver_maps ) , <nl> unrecorded_dependencies_ ( zone ) , <nl> constant_ ( constant ) , <nl> holder_ ( holder ) , <nl> PropertyAccessInfo : : PropertyAccessInfo ( <nl> Kind kind , MaybeHandle < JSObject > holder , MaybeHandle < Map > transition_map , <nl> FieldIndex field_index , Representation field_representation , <nl> Type field_type , Handle < Map > field_owner_map , MaybeHandle < Map > field_map , <nl> - ZoneVector < Handle < Map > > & & lookup_start_object_maps , <nl> + ZoneVector < Handle < Map > > & & receiver_maps , <nl> ZoneVector < CompilationDependency const * > & & unrecorded_dependencies ) <nl> : kind_ ( kind ) , <nl> - lookup_start_object_maps_ ( lookup_start_object_maps ) , <nl> + receiver_maps_ ( receiver_maps ) , <nl> unrecorded_dependencies_ ( std : : move ( unrecorded_dependencies ) ) , <nl> transition_map_ ( transition_map ) , <nl> holder_ ( holder ) , <nl> bool PropertyAccessInfo : : Merge ( PropertyAccessInfo const * that , <nl> } <nl> this - > field_type_ = <nl> Type : : Union ( this - > field_type_ , that - > field_type_ , zone ) ; <nl> - this - > lookup_start_object_maps_ . insert ( <nl> - this - > lookup_start_object_maps_ . end ( ) , <nl> - that - > lookup_start_object_maps_ . begin ( ) , <nl> - that - > lookup_start_object_maps_ . end ( ) ) ; <nl> + this - > receiver_maps_ . insert ( this - > receiver_maps_ . end ( ) , <nl> + that - > receiver_maps_ . begin ( ) , <nl> + that - > receiver_maps_ . end ( ) ) ; <nl> this - > unrecorded_dependencies_ . insert ( <nl> this - > unrecorded_dependencies_ . end ( ) , <nl> that - > unrecorded_dependencies_ . begin ( ) , <nl> bool PropertyAccessInfo : : Merge ( PropertyAccessInfo const * that , <nl> if ( this - > constant_ . address ( ) = = that - > constant_ . address ( ) ) { <nl> DCHECK ( this - > unrecorded_dependencies_ . empty ( ) ) ; <nl> DCHECK ( that - > unrecorded_dependencies_ . empty ( ) ) ; <nl> - this - > lookup_start_object_maps_ . insert ( <nl> - this - > lookup_start_object_maps_ . end ( ) , <nl> - that - > lookup_start_object_maps_ . begin ( ) , <nl> - that - > lookup_start_object_maps_ . end ( ) ) ; <nl> + this - > receiver_maps_ . insert ( this - > receiver_maps_ . end ( ) , <nl> + that - > receiver_maps_ . begin ( ) , <nl> + that - > receiver_maps_ . end ( ) ) ; <nl> return true ; <nl> } <nl> return false ; <nl> bool PropertyAccessInfo : : Merge ( PropertyAccessInfo const * that , <nl> case kStringLength : { <nl> DCHECK ( this - > unrecorded_dependencies_ . empty ( ) ) ; <nl> DCHECK ( that - > unrecorded_dependencies_ . empty ( ) ) ; <nl> - this - > lookup_start_object_maps_ . insert ( <nl> - this - > lookup_start_object_maps_ . end ( ) , <nl> - that - > lookup_start_object_maps_ . begin ( ) , <nl> - that - > lookup_start_object_maps_ . end ( ) ) ; <nl> + this - > receiver_maps_ . insert ( this - > receiver_maps_ . end ( ) , <nl> + that - > receiver_maps_ . begin ( ) , <nl> + that - > receiver_maps_ . end ( ) ) ; <nl> return true ; <nl> } <nl> case kModuleExport : <nl> mmm a / src / compiler / access - info . h <nl> ppp b / src / compiler / access - info . h <nl> std : : ostream & operator < < ( std : : ostream & , AccessMode ) ; <nl> / / This class encapsulates all information required to access a certain element . <nl> class ElementAccessInfo final { <nl> public : <nl> - ElementAccessInfo ( ZoneVector < Handle < Map > > & & lookup_start_object_maps , <nl> + ElementAccessInfo ( ZoneVector < Handle < Map > > & & receiver_maps , <nl> ElementsKind elements_kind , Zone * zone ) ; <nl> <nl> ElementsKind elements_kind ( ) const { return elements_kind_ ; } <nl> - ZoneVector < Handle < Map > > const & lookup_start_object_maps ( ) const { <nl> - return lookup_start_object_maps_ ; <nl> + ZoneVector < Handle < Map > > const & receiver_maps ( ) const { <nl> + return receiver_maps_ ; <nl> } <nl> ZoneVector < Handle < Map > > const & transition_sources ( ) const { <nl> return transition_sources_ ; <nl> } <nl> <nl> void AddTransitionSource ( Handle < Map > map ) { <nl> - CHECK_EQ ( lookup_start_object_maps_ . size ( ) , 1 ) ; <nl> + CHECK_EQ ( receiver_maps_ . size ( ) , 1 ) ; <nl> transition_sources_ . push_back ( map ) ; <nl> } <nl> <nl> private : <nl> ElementsKind elements_kind_ ; <nl> - ZoneVector < Handle < Map > > lookup_start_object_maps_ ; <nl> + ZoneVector < Handle < Map > > receiver_maps_ ; <nl> ZoneVector < Handle < Map > > transition_sources_ ; <nl> } ; <nl> <nl> class PropertyAccessInfo final { <nl> Type field_type ( ) const { return field_type_ ; } <nl> Representation field_representation ( ) const { return field_representation_ ; } <nl> MaybeHandle < Map > field_map ( ) const { return field_map_ ; } <nl> - ZoneVector < Handle < Map > > const & lookup_start_object_maps ( ) const { <nl> - return lookup_start_object_maps_ ; <nl> + ZoneVector < Handle < Map > > const & receiver_maps ( ) const { <nl> + return receiver_maps_ ; <nl> } <nl> <nl> private : <nl> explicit PropertyAccessInfo ( Zone * zone ) ; <nl> PropertyAccessInfo ( Zone * zone , Kind kind , MaybeHandle < JSObject > holder , <nl> - ZoneVector < Handle < Map > > & & lookup_start_object_maps ) ; <nl> + ZoneVector < Handle < Map > > & & receiver_maps ) ; <nl> PropertyAccessInfo ( Zone * zone , Kind kind , MaybeHandle < JSObject > holder , <nl> Handle < Object > constant , <nl> - ZoneVector < Handle < Map > > & & lookup_start_object_maps ) ; <nl> + ZoneVector < Handle < Map > > & & receiver_maps ) ; <nl> PropertyAccessInfo ( Kind kind , MaybeHandle < JSObject > holder , <nl> MaybeHandle < Map > transition_map , FieldIndex field_index , <nl> Representation field_representation , Type field_type , <nl> Handle < Map > field_owner_map , MaybeHandle < Map > field_map , <nl> - ZoneVector < Handle < Map > > & & lookup_start_object_maps , <nl> + ZoneVector < Handle < Map > > & & receiver_maps , <nl> ZoneVector < CompilationDependency const * > & & dependencies ) ; <nl> <nl> Kind kind_ ; <nl> - ZoneVector < Handle < Map > > lookup_start_object_maps_ ; <nl> + ZoneVector < Handle < Map > > receiver_maps_ ; <nl> ZoneVector < CompilationDependency const * > unrecorded_dependencies_ ; <nl> Handle < Object > constant_ ; <nl> MaybeHandle < Map > transition_map_ ; <nl> mmm a / src / compiler / bytecode - graph - builder . cc <nl> ppp b / src / compiler / bytecode - graph - builder . cc <nl> class BytecodeGraphBuilder { <nl> const Operator * op , Node * receiver , FeedbackSlot load_slot , <nl> FeedbackSlot call_slot ) ; <nl> JSTypeHintLowering : : LoweringResult TryBuildSimplifiedLoadNamed ( <nl> - const Operator * op , FeedbackSlot slot ) ; <nl> + const Operator * op , Node * receiver , FeedbackSlot slot ) ; <nl> JSTypeHintLowering : : LoweringResult TryBuildSimplifiedLoadKeyed ( <nl> const Operator * op , Node * receiver , Node * key , FeedbackSlot slot ) ; <nl> JSTypeHintLowering : : LoweringResult TryBuildSimplifiedStoreNamed ( <nl> void BytecodeGraphBuilder : : VisitLdaNamedProperty ( ) { <nl> const Operator * op = javascript ( ) - > LoadNamed ( name . object ( ) , feedback ) ; <nl> <nl> JSTypeHintLowering : : LoweringResult lowering = <nl> - TryBuildSimplifiedLoadNamed ( op , feedback . slot ) ; <nl> + TryBuildSimplifiedLoadNamed ( op , object , feedback . slot ) ; <nl> if ( lowering . IsExit ( ) ) return ; <nl> <nl> Node * node = nullptr ; <nl> void BytecodeGraphBuilder : : VisitLdaNamedPropertyFromSuper ( ) { <nl> Node * home_object = environment ( ) - > LookupAccumulator ( ) ; <nl> NameRef name ( broker ( ) , <nl> bytecode_iterator ( ) . GetConstantForIndexOperand ( 1 , isolate ( ) ) ) ; <nl> + const Operator * op = javascript ( ) - > LoadNamedFromSuper ( name . object ( ) ) ; <nl> + / / TODO ( marja , v8 : 9237 ) : Use lowering . <nl> <nl> - FeedbackSource feedback = <nl> - CreateFeedbackSource ( bytecode_iterator ( ) . GetIndexOperand ( 2 ) ) ; <nl> - const Operator * op = <nl> - javascript ( ) - > LoadNamedFromSuper ( name . object ( ) , feedback ) ; <nl> - <nl> - JSTypeHintLowering : : LoweringResult lowering = <nl> - TryBuildSimplifiedLoadNamed ( op , feedback . slot ) ; <nl> - if ( lowering . IsExit ( ) ) return ; <nl> - <nl> - Node * node = nullptr ; <nl> - if ( lowering . IsSideEffectFree ( ) ) { <nl> - node = lowering . value ( ) ; <nl> - } else { <nl> - DCHECK ( ! lowering . Changed ( ) ) ; <nl> - DCHECK ( IrOpcode : : IsFeedbackCollectingOpcode ( op - > opcode ( ) ) ) ; <nl> - node = NewNode ( op , receiver , home_object , feedback_vector_node ( ) ) ; <nl> - } <nl> + Node * node = NewNode ( op , receiver , home_object ) ; <nl> environment ( ) - > BindAccumulator ( node , Environment : : kAttachFrameState ) ; <nl> } <nl> <nl> BytecodeGraphBuilder : : TryBuildSimplifiedGetIterator ( const Operator * op , <nl> <nl> JSTypeHintLowering : : LoweringResult <nl> BytecodeGraphBuilder : : TryBuildSimplifiedLoadNamed ( const Operator * op , <nl> + Node * receiver , <nl> FeedbackSlot slot ) { <nl> if ( ! CanApplyTypeHintLowering ( op ) ) return NoChange ( ) ; <nl> Node * effect = environment ( ) - > GetEffectDependency ( ) ; <nl> Node * control = environment ( ) - > GetControlDependency ( ) ; <nl> JSTypeHintLowering : : LoweringResult early_reduction = <nl> - type_hint_lowering ( ) . ReduceLoadNamedOperation ( op , effect , control , slot ) ; <nl> + type_hint_lowering ( ) . ReduceLoadNamedOperation ( op , receiver , effect , <nl> + control , slot ) ; <nl> ApplyEarlyReduction ( early_reduction ) ; <nl> return early_reduction ; <nl> } <nl> mmm a / src / compiler / js - call - reducer . cc <nl> ppp b / src / compiler / js - call - reducer . cc <nl> Reduction JSCallReducer : : ReduceRegExpPrototypeTest ( Node * node ) { <nl> <nl> / / Add proper dependencies on the { regexp } s [ [ Prototype ] ] s . <nl> dependencies ( ) - > DependOnStablePrototypeChains ( <nl> - ai_exec . lookup_start_object_maps ( ) , kStartAtPrototype , <nl> + ai_exec . receiver_maps ( ) , kStartAtPrototype , <nl> JSObjectRef ( broker ( ) , holder ) ) ; <nl> } else { <nl> return inference . NoChange ( ) ; <nl> mmm a / src / compiler / js - generic - lowering . cc <nl> ppp b / src / compiler / js - generic - lowering . cc <nl> void JSGenericLowering : : LowerJSLoadNamed ( Node * node ) { <nl> } <nl> <nl> void JSGenericLowering : : LowerJSLoadNamedFromSuper ( Node * node ) { <nl> - / / TODO ( marja , v8 : 9237 ) : Call a builtin which collects feedback . <nl> JSLoadNamedFromSuperNode n ( node ) ; <nl> NamedAccess const & p = n . Parameters ( ) ; <nl> - node - > RemoveInput ( 2 ) ; / / Feedback vector <nl> node - > InsertInput ( zone ( ) , 2 , jsgraph ( ) - > HeapConstant ( p . name ( ) ) ) ; <nl> ReplaceWithRuntimeCall ( node , Runtime : : kLoadFromSuper ) ; <nl> } <nl> mmm a / src / compiler / js - heap - copy - reducer . cc <nl> ppp b / src / compiler / js - heap - copy - reducer . cc <nl> Reduction JSHeapCopyReducer : : Reduce ( Node * node ) { <nl> break ; <nl> } <nl> case IrOpcode : : kJSLoadNamedFromSuper : { <nl> + / / TODO ( marja , v8 : 9237 ) : Process feedback once it ' s added to the byte <nl> + / / code . <nl> NamedAccess const & p = NamedAccessOf ( node - > op ( ) ) ; <nl> NameRef name ( broker ( ) , p . name ( ) ) ; <nl> - if ( p . feedback ( ) . IsValid ( ) ) { <nl> - broker ( ) - > ProcessFeedbackForPropertyAccess ( p . feedback ( ) , <nl> - AccessMode : : kLoad , name ) ; <nl> - } <nl> break ; <nl> } <nl> case IrOpcode : : kJSStoreNamed : { <nl> mmm a / src / compiler / js - native - context - specialization . cc <nl> ppp b / src / compiler / js - native - context - specialization . cc <nl> Reduction JSNativeContextSpecialization : : Reduce ( Node * node ) { <nl> return ReduceJSStoreGlobal ( node ) ; <nl> case IrOpcode : : kJSLoadNamed : <nl> return ReduceJSLoadNamed ( node ) ; <nl> - case IrOpcode : : kJSLoadNamedFromSuper : <nl> - return ReduceJSLoadNamedFromSuper ( node ) ; <nl> case IrOpcode : : kJSStoreNamed : <nl> return ReduceJSStoreNamed ( node ) ; <nl> case IrOpcode : : kJSHasProperty : <nl> Reduction JSNativeContextSpecialization : : ReduceJSInstanceOf ( Node * node ) { <nl> / / takes over , but that requires the constructor to be callable . <nl> if ( ! receiver_map . is_callable ( ) ) return NoChange ( ) ; <nl> <nl> - dependencies ( ) - > DependOnStablePrototypeChains ( <nl> - access_info . lookup_start_object_maps ( ) , kStartAtPrototype ) ; <nl> + dependencies ( ) - > DependOnStablePrototypeChains ( access_info . receiver_maps ( ) , <nl> + kStartAtPrototype ) ; <nl> <nl> / / Monomorphic property access . <nl> access_builder . BuildCheckMaps ( constructor , & effect , control , <nl> - access_info . lookup_start_object_maps ( ) ) ; <nl> + access_info . receiver_maps ( ) ) ; <nl> <nl> / / Lower to OrdinaryHasInstance ( C , O ) . <nl> NodeProperties : : ReplaceValueInput ( node , constructor , 0 ) ; <nl> Reduction JSNativeContextSpecialization : : ReduceJSInstanceOf ( Node * node ) { <nl> <nl> if ( found_on_proto ) { <nl> dependencies ( ) - > DependOnStablePrototypeChains ( <nl> - access_info . lookup_start_object_maps ( ) , kStartAtPrototype , <nl> + access_info . receiver_maps ( ) , kStartAtPrototype , <nl> JSObjectRef ( broker ( ) , holder ) ) ; <nl> } <nl> <nl> Reduction JSNativeContextSpecialization : : ReduceJSInstanceOf ( Node * node ) { <nl> <nl> / / Monomorphic property access . <nl> access_builder . BuildCheckMaps ( constructor , & effect , control , <nl> - access_info . lookup_start_object_maps ( ) ) ; <nl> + access_info . receiver_maps ( ) ) ; <nl> <nl> / / Create a nested frame state inside the current method ' s most - recent frame <nl> / / state that will ensure that deopts that happen after this point will not <nl> JSNativeContextSpecialization : : InferHasInPrototypeChainResult <nl> JSNativeContextSpecialization : : InferHasInPrototypeChain ( <nl> Node * receiver , Node * effect , HeapObjectRef const & prototype ) { <nl> ZoneHandleSet < Map > receiver_maps ; <nl> - NodeProperties : : InferMapsResult result = NodeProperties : : InferMapsUnsafe ( <nl> - broker ( ) , receiver , effect , & receiver_maps ) ; <nl> - if ( result = = NodeProperties : : kNoMaps ) return kMayBeInPrototypeChain ; <nl> + NodeProperties : : InferReceiverMapsResult result = <nl> + NodeProperties : : InferReceiverMapsUnsafe ( broker ( ) , receiver , effect , <nl> + & receiver_maps ) ; <nl> + if ( result = = NodeProperties : : kNoReceiverMaps ) return kMayBeInPrototypeChain ; <nl> <nl> / / Try to determine either that all of the { receiver_maps } have the given <nl> / / { prototype } in their chain , or that none do . If we can ' t tell , return <nl> JSNativeContextSpecialization : : InferHasInPrototypeChain ( <nl> bool none = true ; <nl> for ( size_t i = 0 ; i < receiver_maps . size ( ) ; + + i ) { <nl> MapRef map ( broker ( ) , receiver_maps [ i ] ) ; <nl> - if ( result = = NodeProperties : : kUnreliableMaps & & ! map . is_stable ( ) ) { <nl> + if ( result = = NodeProperties : : kUnreliableReceiverMaps & & ! map . is_stable ( ) ) { <nl> return kMayBeInPrototypeChain ; <nl> } <nl> while ( true ) { <nl> JSNativeContextSpecialization : : InferHasInPrototypeChain ( <nl> if ( ! prototype . map ( ) . is_stable ( ) ) return kMayBeInPrototypeChain ; <nl> last_prototype = prototype . AsJSObject ( ) ; <nl> } <nl> - WhereToStart start = result = = NodeProperties : : kUnreliableMaps <nl> + WhereToStart start = result = = NodeProperties : : kUnreliableReceiverMaps <nl> ? kStartAtReceiver <nl> : kStartAtPrototype ; <nl> dependencies ( ) - > DependOnStablePrototypeChains ( receiver_maps , start , <nl> Reduction JSNativeContextSpecialization : : ReduceJSResolvePromise ( Node * node ) { <nl> return inference . NoChange ( ) ; <nl> } <nl> <nl> - dependencies ( ) - > DependOnStablePrototypeChains ( <nl> - access_info . lookup_start_object_maps ( ) , kStartAtPrototype ) ; <nl> + dependencies ( ) - > DependOnStablePrototypeChains ( access_info . receiver_maps ( ) , <nl> + kStartAtPrototype ) ; <nl> <nl> / / Simply fulfill the { promise } with the { resolution } . <nl> Node * value = effect = <nl> FieldAccess ForPropertyCellValue ( MachineRepresentation representation , <nl> } / / namespace <nl> <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> - Node * node , Node * lookup_start_object , Node * receiver , Node * value , <nl> - NameRef const & name , AccessMode access_mode , Node * key , Node * effect ) { <nl> + Node * node , Node * receiver , Node * value , NameRef const & name , <nl> + AccessMode access_mode , Node * key ) { <nl> base : : Optional < PropertyCellRef > cell = <nl> native_context ( ) . global_object ( ) . GetPropertyCell ( name ) ; <nl> - return cell . has_value ( ) <nl> - ? ReduceGlobalAccess ( node , lookup_start_object , receiver , value , <nl> - name , access_mode , key , * cell , effect ) <nl> - : NoChange ( ) ; <nl> + return cell . has_value ( ) ? ReduceGlobalAccess ( node , receiver , value , name , <nl> + access_mode , key , * cell ) <nl> + : NoChange ( ) ; <nl> } <nl> <nl> / / TODO ( neis ) : Try to merge this with ReduceNamedAccess by introducing a new <nl> / / PropertyAccessInfo kind for global accesses and using the existing mechanism <nl> / / for building loads / stores . <nl> - / / Note : The " receiver " parameter is only used for DCHECKS , but that ' s on <nl> - / / purpose . This way we can assert the super property access cases won ' t hit the <nl> - / / code which hasn ' t been modified to support super property access . <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> - Node * node , Node * lookup_start_object , Node * receiver , Node * value , <nl> - NameRef const & name , AccessMode access_mode , Node * key , <nl> - PropertyCellRef const & property_cell , Node * effect ) { <nl> + Node * node , Node * receiver , Node * value , NameRef const & name , <nl> + AccessMode access_mode , Node * key , PropertyCellRef const & property_cell ) { <nl> + Node * effect = NodeProperties : : GetEffectInput ( node ) ; <nl> Node * control = NodeProperties : : GetControlInput ( node ) ; <nl> - if ( effect = = nullptr ) { <nl> - effect = NodeProperties : : GetEffectInput ( node ) ; <nl> - } <nl> <nl> ObjectRef property_cell_value = property_cell . value ( ) ; <nl> if ( property_cell_value . IsHeapObject ( ) & & <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> <nl> / / We have additional constraints for stores . <nl> if ( access_mode = = AccessMode : : kStore ) { <nl> - DCHECK_EQ ( receiver , lookup_start_object ) ; <nl> if ( property_details . IsReadOnly ( ) ) { <nl> / / Don ' t even bother trying to lower stores to read - only data properties . <nl> return NoChange ( ) ; <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> } <nl> } <nl> } else if ( access_mode = = AccessMode : : kHas ) { <nl> - DCHECK_EQ ( receiver , lookup_start_object ) ; <nl> / / has checks cannot follow the fast - path used by loads when these <nl> / / conditions hold . <nl> if ( ( property_details . IsConfigurable ( ) | | ! property_details . IsReadOnly ( ) ) & & <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> effect = BuildCheckEqualsName ( name , key , effect , control ) ; <nl> } <nl> <nl> - / / If we have a { lookup_start_object } to validate , we do so by checking that <nl> - / / its map is the ( target ) global proxy ' s map . This guarantees that in fact <nl> - / / the lookup start object is the global proxy . <nl> - if ( lookup_start_object ! = nullptr ) { <nl> + / / If we have a { receiver } to validate , we do so by checking that its map is <nl> + / / the ( target ) global proxy ' s map . This guarantees that in fact the receiver <nl> + / / is the global proxy . <nl> + if ( receiver ! = nullptr ) { <nl> effect = graph ( ) - > NewNode ( <nl> simplified ( ) - > CheckMaps ( <nl> CheckMapsFlag : : kNone , <nl> ZoneHandleSet < Map > ( <nl> HeapObjectRef ( broker ( ) , global_proxy ( ) ) . map ( ) . object ( ) ) ) , <nl> - lookup_start_object , effect , control ) ; <nl> + receiver , effect , control ) ; <nl> } <nl> <nl> if ( access_mode = = AccessMode : : kLoad | | access_mode = = AccessMode : : kHas ) { <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> } <nl> } else { <nl> DCHECK_EQ ( AccessMode : : kStore , access_mode ) ; <nl> - DCHECK_EQ ( receiver , lookup_start_object ) ; <nl> DCHECK ( ! property_details . IsReadOnly ( ) ) ; <nl> switch ( property_details . cell_type ( ) ) { <nl> case PropertyCellType : : kUndefined : { <nl> Reduction JSNativeContextSpecialization : : ReduceJSLoadGlobal ( Node * node ) { <nl> ReplaceWithValue ( node , value , effect ) ; <nl> return Replace ( value ) ; <nl> } else if ( feedback . IsPropertyCell ( ) ) { <nl> - return ReduceGlobalAccess ( node , nullptr , nullptr , nullptr , <nl> + return ReduceGlobalAccess ( node , nullptr , nullptr , <nl> NameRef ( broker ( ) , p . name ( ) ) , AccessMode : : kLoad , <nl> nullptr , feedback . property_cell ( ) ) ; <nl> } else { <nl> Reduction JSNativeContextSpecialization : : ReduceJSStoreGlobal ( Node * node ) { <nl> ReplaceWithValue ( node , value , effect , control ) ; <nl> return Replace ( value ) ; <nl> } else if ( feedback . IsPropertyCell ( ) ) { <nl> - return ReduceGlobalAccess ( node , nullptr , nullptr , value , <nl> - NameRef ( broker ( ) , p . name ( ) ) , AccessMode : : kStore , <nl> - nullptr , feedback . property_cell ( ) ) ; <nl> + return ReduceGlobalAccess ( node , nullptr , value , NameRef ( broker ( ) , p . name ( ) ) , <nl> + AccessMode : : kStore , nullptr , <nl> + feedback . property_cell ( ) ) ; <nl> } else { <nl> DCHECK ( feedback . IsMegamorphic ( ) ) ; <nl> return NoChange ( ) ; <nl> Reduction JSNativeContextSpecialization : : ReduceMinimorphicPropertyAccess ( <nl> Node * node , Node * value , <nl> MinimorphicLoadPropertyAccessFeedback const & feedback , <nl> FeedbackSource const & source ) { <nl> - DCHECK ( node - > opcode ( ) = = IrOpcode : : kJSLoadNamed | | <nl> - node - > opcode ( ) = = IrOpcode : : kJSLoadProperty | | <nl> - node - > opcode ( ) = = IrOpcode : : kJSLoadNamedFromSuper ) ; <nl> - STATIC_ASSERT ( JSLoadNamedNode : : ObjectIndex ( ) = = 0 & & <nl> - JSLoadPropertyNode : : ObjectIndex ( ) = = 0 ) ; <nl> - <nl> + Node * receiver = NodeProperties : : GetValueInput ( node , 0 ) ; <nl> Node * effect = NodeProperties : : GetEffectInput ( node ) ; <nl> Node * control = NodeProperties : : GetControlInput ( node ) ; <nl> <nl> - Node * lookup_start_object ; <nl> - if ( node - > opcode ( ) = = IrOpcode : : kJSLoadNamedFromSuper ) { <nl> - DCHECK ( FLAG_super_ic ) ; <nl> - JSLoadNamedFromSuperNode n ( node ) ; <nl> - / / Lookup start object is the __proto__ of the home object . <nl> - lookup_start_object = effect = <nl> - BuildLoadPrototypeFromObject ( n . home_object ( ) , effect , control ) ; <nl> - } else { <nl> - lookup_start_object = NodeProperties : : GetValueInput ( node , 0 ) ; <nl> - } <nl> - <nl> MinimorphicLoadPropertyAccessInfo access_info = <nl> broker ( ) - > GetPropertyAccessInfo ( <nl> feedback , source , <nl> Reduction JSNativeContextSpecialization : : ReduceMinimorphicPropertyAccess ( <nl> <nl> effect = graph ( ) - > NewNode ( <nl> simplified ( ) - > DynamicCheckMaps ( flags , feedback . handler ( ) , maps , source ) , <nl> - lookup_start_object , effect , control ) ; <nl> + receiver , effect , control ) ; <nl> value = access_builder . BuildMinimorphicLoadDataField ( <nl> - feedback . name ( ) , access_info , lookup_start_object , & effect , & control ) ; <nl> + feedback . name ( ) , access_info , receiver , & effect , & control ) ; <nl> <nl> ReplaceWithValue ( node , value , effect , control ) ; <nl> return Replace ( value ) ; <nl> Reduction JSNativeContextSpecialization : : ReduceNamedAccess ( <nl> node - > opcode ( ) = = IrOpcode : : kJSStoreProperty | | <nl> node - > opcode ( ) = = IrOpcode : : kJSStoreNamedOwn | | <nl> node - > opcode ( ) = = IrOpcode : : kJSStoreDataPropertyInLiteral | | <nl> - node - > opcode ( ) = = IrOpcode : : kJSHasProperty | | <nl> - node - > opcode ( ) = = IrOpcode : : kJSLoadNamedFromSuper ) ; <nl> + node - > opcode ( ) = = IrOpcode : : kJSHasProperty ) ; <nl> STATIC_ASSERT ( JSLoadNamedNode : : ObjectIndex ( ) = = 0 & & <nl> JSStoreNamedNode : : ObjectIndex ( ) = = 0 & & <nl> JSLoadPropertyNode : : ObjectIndex ( ) = = 0 & & <nl> Reduction JSNativeContextSpecialization : : ReduceNamedAccess ( <nl> JSStoreNamedNode : : ObjectIndex ( ) = = 0 & & <nl> JSStoreDataPropertyInLiteralNode : : ObjectIndex ( ) = = 0 & & <nl> JSHasPropertyNode : : ObjectIndex ( ) = = 0 ) ; <nl> - STATIC_ASSERT ( JSLoadNamedFromSuperNode : : ReceiverIndex ( ) = = 0 ) ; <nl> - <nl> + Node * receiver = NodeProperties : : GetValueInput ( node , 0 ) ; <nl> Node * context = NodeProperties : : GetContextInput ( node ) ; <nl> Node * frame_state = NodeProperties : : GetFrameStateInput ( node ) ; <nl> Node * effect = NodeProperties : : GetEffectInput ( node ) ; <nl> Node * control = NodeProperties : : GetControlInput ( node ) ; <nl> <nl> - / / receiver = the object we pass to the accessor ( if any ) as the " this " value . <nl> - Node * receiver = NodeProperties : : GetValueInput ( node , 0 ) ; <nl> - / / lookup_start_object = the object where we start looking for the property . <nl> - Node * lookup_start_object ; <nl> - if ( node - > opcode ( ) = = IrOpcode : : kJSLoadNamedFromSuper ) { <nl> - DCHECK ( FLAG_super_ic ) ; <nl> - JSLoadNamedFromSuperNode n ( node ) ; <nl> - / / Lookup start object is the __proto__ of the home object . <nl> - lookup_start_object = effect = <nl> - BuildLoadPrototypeFromObject ( n . home_object ( ) , effect , control ) ; <nl> - } else { <nl> - lookup_start_object = receiver ; <nl> - } <nl> - <nl> / / Either infer maps from the graph or use the feedback . <nl> - ZoneVector < Handle < Map > > lookup_start_object_maps ( zone ( ) ) ; <nl> - if ( ! InferMaps ( lookup_start_object , effect , & lookup_start_object_maps ) ) { <nl> - lookup_start_object_maps = feedback . maps ( ) ; <nl> + ZoneVector < Handle < Map > > receiver_maps ( zone ( ) ) ; <nl> + if ( ! InferReceiverMaps ( receiver , effect , & receiver_maps ) ) { <nl> + receiver_maps = feedback . maps ( ) ; <nl> } <nl> - RemoveImpossibleMaps ( lookup_start_object , & lookup_start_object_maps ) ; <nl> + RemoveImpossibleReceiverMaps ( receiver , & receiver_maps ) ; <nl> <nl> / / Check if we have an access o . x or o . x = v where o is the target native <nl> / / contexts ' global proxy , and turn that into a direct access to the <nl> / / corresponding global object instead . <nl> - if ( lookup_start_object_maps . size ( ) = = 1 ) { <nl> - MapRef lookup_start_object_map ( broker ( ) , lookup_start_object_maps [ 0 ] ) ; <nl> - if ( lookup_start_object_map . equals ( <nl> + if ( receiver_maps . size ( ) = = 1 ) { <nl> + MapRef receiver_map ( broker ( ) , receiver_maps [ 0 ] ) ; <nl> + if ( receiver_map . equals ( <nl> broker ( ) - > target_native_context ( ) . global_proxy_object ( ) . map ( ) ) & & <nl> ! broker ( ) - > target_native_context ( ) . global_object ( ) . IsDetached ( ) ) { <nl> - return ReduceGlobalAccess ( node , lookup_start_object , receiver , value , <nl> - feedback . name ( ) , access_mode , key , effect ) ; <nl> + return ReduceGlobalAccess ( node , receiver , value , feedback . name ( ) , <nl> + access_mode , key ) ; <nl> } <nl> } <nl> <nl> ZoneVector < PropertyAccessInfo > access_infos ( zone ( ) ) ; <nl> { <nl> ZoneVector < PropertyAccessInfo > access_infos_for_feedback ( zone ( ) ) ; <nl> - for ( Handle < Map > map_handle : lookup_start_object_maps ) { <nl> + for ( Handle < Map > map_handle : receiver_maps ) { <nl> MapRef map ( broker ( ) , map_handle ) ; <nl> if ( map . is_deprecated ( ) ) continue ; <nl> PropertyAccessInfo access_info = broker ( ) - > GetPropertyAccessInfo ( <nl> Reduction JSNativeContextSpecialization : : ReduceNamedAccess ( <nl> / / Check for the monomorphic cases . <nl> if ( access_infos . size ( ) = = 1 ) { <nl> PropertyAccessInfo access_info = access_infos . front ( ) ; <nl> - if ( receiver ! = lookup_start_object ) { <nl> - / / Super property access . lookup_start_object is a JSReceiver or <nl> - / / null . It can ' t be a number , a string etc . So trying to build the <nl> - / / checks in the " else if " branch doesn ' t make sense . <nl> - access_builder . BuildCheckMaps ( lookup_start_object , & effect , control , <nl> - access_info . lookup_start_object_maps ( ) ) ; <nl> - <nl> - } else if ( ! access_builder . TryBuildStringCheck ( <nl> - broker ( ) , access_info . lookup_start_object_maps ( ) , & receiver , <nl> - & effect , control ) & & <nl> - ! access_builder . TryBuildNumberCheck ( <nl> - broker ( ) , access_info . lookup_start_object_maps ( ) , & receiver , <nl> - & effect , control ) ) { <nl> - / / Try to build string check or number check if possible . Otherwise build <nl> - / / a map check . <nl> - <nl> - / / TryBuildStringCheck and TryBuildNumberCheck don ' t update the receiver <nl> - / / if they fail . <nl> - DCHECK_EQ ( receiver , lookup_start_object ) ; <nl> - if ( HasNumberMaps ( broker ( ) , access_info . lookup_start_object_maps ( ) ) ) { <nl> + / / Try to build string check or number check if possible . <nl> + / / Otherwise build a map check . <nl> + if ( ! access_builder . TryBuildStringCheck ( broker ( ) , <nl> + access_info . receiver_maps ( ) , <nl> + & receiver , & effect , control ) & & <nl> + ! access_builder . TryBuildNumberCheck ( broker ( ) , <nl> + access_info . receiver_maps ( ) , <nl> + & receiver , & effect , control ) ) { <nl> + if ( HasNumberMaps ( broker ( ) , access_info . receiver_maps ( ) ) ) { <nl> / / We need to also let Smi { receiver } s through in this case , so <nl> / / we construct a diamond , guarded by the Sminess of the { receiver } <nl> / / and if { receiver } is not a Smi just emit a sequence of map checks . <nl> Reduction JSNativeContextSpecialization : : ReduceNamedAccess ( <nl> Node * efalse = effect ; <nl> { <nl> access_builder . BuildCheckMaps ( receiver , & efalse , if_false , <nl> - access_info . lookup_start_object_maps ( ) ) ; <nl> + access_info . receiver_maps ( ) ) ; <nl> } <nl> <nl> control = graph ( ) - > NewNode ( common ( ) - > Merge ( 2 ) , if_true , if_false ) ; <nl> Reduction JSNativeContextSpecialization : : ReduceNamedAccess ( <nl> graph ( ) - > NewNode ( common ( ) - > EffectPhi ( 2 ) , etrue , efalse , control ) ; <nl> } else { <nl> access_builder . BuildCheckMaps ( receiver , & effect , control , <nl> - access_info . lookup_start_object_maps ( ) ) ; <nl> + access_info . receiver_maps ( ) ) ; <nl> } <nl> - } else { <nl> - / / At least one of TryBuildStringCheck & TryBuildNumberCheck succeeded <nl> - / / and updated the receiver . Update lookup_start_object to match ( they <nl> - / / should be the same ) . <nl> - lookup_start_object = receiver ; <nl> } <nl> <nl> / / Generate the actual property access . <nl> ValueEffectControl continuation = BuildPropertyAccess ( <nl> - lookup_start_object , receiver , value , context , frame_state , effect , <nl> - control , feedback . name ( ) , if_exceptions , access_info , access_mode ) ; <nl> + receiver , value , context , frame_state , effect , control , feedback . name ( ) , <nl> + if_exceptions , access_info , access_mode ) ; <nl> value = continuation . value ( ) ; <nl> effect = continuation . effect ( ) ; <nl> control = continuation . control ( ) ; <nl> Reduction JSNativeContextSpecialization : : ReduceNamedAccess ( <nl> ZoneVector < Node * > effects ( zone ( ) ) ; <nl> ZoneVector < Node * > controls ( zone ( ) ) ; <nl> <nl> - Node * receiverissmi_control = nullptr ; <nl> - Node * receiverissmi_effect = effect ; <nl> - <nl> - if ( receiver = = lookup_start_object ) { <nl> - / / Check if { receiver } may be a number . <nl> - bool receiverissmi_possible = false ; <nl> - for ( PropertyAccessInfo const & access_info : access_infos ) { <nl> - if ( HasNumberMaps ( broker ( ) , access_info . lookup_start_object_maps ( ) ) ) { <nl> - receiverissmi_possible = true ; <nl> - break ; <nl> - } <nl> + / / Check if { receiver } may be a number . <nl> + bool receiverissmi_possible = false ; <nl> + for ( PropertyAccessInfo const & access_info : access_infos ) { <nl> + if ( HasNumberMaps ( broker ( ) , access_info . receiver_maps ( ) ) ) { <nl> + receiverissmi_possible = true ; <nl> + break ; <nl> } <nl> + } <nl> <nl> - / / Handle the case that { receiver } may be a number . <nl> - if ( receiverissmi_possible ) { <nl> - Node * check = graph ( ) - > NewNode ( simplified ( ) - > ObjectIsSmi ( ) , receiver ) ; <nl> - Node * branch = graph ( ) - > NewNode ( common ( ) - > Branch ( ) , check , control ) ; <nl> - control = graph ( ) - > NewNode ( common ( ) - > IfFalse ( ) , branch ) ; <nl> - receiverissmi_control = graph ( ) - > NewNode ( common ( ) - > IfTrue ( ) , branch ) ; <nl> - receiverissmi_effect = effect ; <nl> - } <nl> + / / Handle the case that { receiver } may be a number . <nl> + Node * receiverissmi_control = nullptr ; <nl> + Node * receiverissmi_effect = effect ; <nl> + if ( receiverissmi_possible ) { <nl> + Node * check = graph ( ) - > NewNode ( simplified ( ) - > ObjectIsSmi ( ) , receiver ) ; <nl> + Node * branch = graph ( ) - > NewNode ( common ( ) - > Branch ( ) , check , control ) ; <nl> + control = graph ( ) - > NewNode ( common ( ) - > IfFalse ( ) , branch ) ; <nl> + receiverissmi_control = graph ( ) - > NewNode ( common ( ) - > IfTrue ( ) , branch ) ; <nl> + receiverissmi_effect = effect ; <nl> } <nl> <nl> / / Generate code for the various different property access patterns . <nl> Reduction JSNativeContextSpecialization : : ReduceNamedAccess ( <nl> for ( size_t j = 0 ; j < access_infos . size ( ) ; + + j ) { <nl> PropertyAccessInfo const & access_info = access_infos [ j ] ; <nl> Node * this_value = value ; <nl> - Node * this_lookup_start_object = lookup_start_object ; <nl> Node * this_receiver = receiver ; <nl> Node * this_effect = effect ; <nl> Node * this_control = fallthrough_control ; <nl> <nl> - / / Perform map check on { lookup_start_object } . <nl> - ZoneVector < Handle < Map > > const & lookup_start_object_maps = <nl> - access_info . lookup_start_object_maps ( ) ; <nl> + / / Perform map check on { receiver } . <nl> + ZoneVector < Handle < Map > > const & receiver_maps = <nl> + access_info . receiver_maps ( ) ; <nl> { <nl> / / Whether to insert a dedicated MapGuard node into the <nl> / / effect to be able to learn from the control flow . <nl> bool insert_map_guard = true ; <nl> <nl> - / / Check maps for the { lookup_start_object } s . <nl> + / / Check maps for the { receiver } s . <nl> if ( j = = access_infos . size ( ) - 1 ) { <nl> / / Last map check on the fallthrough control path , do a <nl> / / conditional eager deoptimization exit here . <nl> - access_builder . BuildCheckMaps ( lookup_start_object , & this_effect , <nl> - this_control , lookup_start_object_maps ) ; <nl> + access_builder . BuildCheckMaps ( receiver , & this_effect , this_control , <nl> + receiver_maps ) ; <nl> fallthrough_control = nullptr ; <nl> <nl> / / Don ' t insert a MapGuard in this case , as the CheckMaps <nl> Reduction JSNativeContextSpecialization : : ReduceNamedAccess ( <nl> / / along the effect chain . <nl> insert_map_guard = false ; <nl> } else { <nl> - / / Explicitly branch on the { lookup_start_object_maps } . <nl> + / / Explicitly branch on the { receiver_maps } . <nl> ZoneHandleSet < Map > maps ; <nl> - for ( Handle < Map > map : lookup_start_object_maps ) { <nl> + for ( Handle < Map > map : receiver_maps ) { <nl> maps . insert ( map , graph ( ) - > zone ( ) ) ; <nl> } <nl> Node * check = this_effect = <nl> - graph ( ) - > NewNode ( simplified ( ) - > CompareMaps ( maps ) , <nl> - lookup_start_object , this_effect , this_control ) ; <nl> + graph ( ) - > NewNode ( simplified ( ) - > CompareMaps ( maps ) , receiver , <nl> + this_effect , this_control ) ; <nl> Node * branch = <nl> graph ( ) - > NewNode ( common ( ) - > Branch ( ) , check , this_control ) ; <nl> fallthrough_control = graph ( ) - > NewNode ( common ( ) - > IfFalse ( ) , branch ) ; <nl> Reduction JSNativeContextSpecialization : : ReduceNamedAccess ( <nl> } <nl> <nl> / / The Number case requires special treatment to also deal with Smis . <nl> - if ( HasNumberMaps ( broker ( ) , lookup_start_object_maps ) ) { <nl> + if ( HasNumberMaps ( broker ( ) , receiver_maps ) ) { <nl> / / Join this check with the " receiver is smi " check above . <nl> - DCHECK_EQ ( receiver , lookup_start_object ) ; <nl> DCHECK_NOT_NULL ( receiverissmi_effect ) ; <nl> DCHECK_NOT_NULL ( receiverissmi_control ) ; <nl> this_control = graph ( ) - > NewNode ( common ( ) - > Merge ( 2 ) , this_control , <nl> Reduction JSNativeContextSpecialization : : ReduceNamedAccess ( <nl> receiverissmi_effect , this_control ) ; <nl> receiverissmi_effect = receiverissmi_control = nullptr ; <nl> <nl> - / / The { lookup_start_object } can also be a Smi in this case , so <nl> + / / The { receiver } can also be a Smi in this case , so <nl> / / a MapGuard doesn ' t make sense for this at all . <nl> insert_map_guard = false ; <nl> } <nl> Reduction JSNativeContextSpecialization : : ReduceNamedAccess ( <nl> / / Introduce a MapGuard to learn from this on the effect chain . <nl> if ( insert_map_guard ) { <nl> ZoneHandleSet < Map > maps ; <nl> - for ( auto lookup_start_object_map : lookup_start_object_maps ) { <nl> - maps . insert ( lookup_start_object_map , graph ( ) - > zone ( ) ) ; <nl> + for ( auto receiver_map : receiver_maps ) { <nl> + maps . insert ( receiver_map , graph ( ) - > zone ( ) ) ; <nl> } <nl> - this_effect = <nl> - graph ( ) - > NewNode ( simplified ( ) - > MapGuard ( maps ) , <nl> - lookup_start_object , this_effect , this_control ) ; <nl> + this_effect = graph ( ) - > NewNode ( simplified ( ) - > MapGuard ( maps ) , receiver , <nl> + this_effect , this_control ) ; <nl> } <nl> <nl> - / / If all { lookup_start_object_maps } are Strings we also need to rename <nl> - / / the { lookup_start_object } here to make sure that TurboFan knows that <nl> - / / along this path the { this_lookup_start_object } is a String . This is <nl> - / / because we want strict checking of types , for example for <nl> - / / StringLength operators . <nl> - if ( HasOnlyStringMaps ( broker ( ) , lookup_start_object_maps ) ) { <nl> - DCHECK_EQ ( receiver , lookup_start_object ) ; <nl> - this_lookup_start_object = this_receiver = this_effect = <nl> - graph ( ) - > NewNode ( common ( ) - > TypeGuard ( Type : : String ( ) ) , <nl> - lookup_start_object , this_effect , this_control ) ; <nl> + / / If all { receiver_maps } are Strings we also need to rename the <nl> + / / { receiver } here to make sure that TurboFan knows that along this <nl> + / / path the { this_receiver } is a String . This is because we want <nl> + / / strict checking of types , for example for StringLength operators . <nl> + if ( HasOnlyStringMaps ( broker ( ) , receiver_maps ) ) { <nl> + this_receiver = this_effect = <nl> + graph ( ) - > NewNode ( common ( ) - > TypeGuard ( Type : : String ( ) ) , receiver , <nl> + this_effect , this_control ) ; <nl> } <nl> } <nl> <nl> / / Generate the actual property access . <nl> - ValueEffectControl continuation = BuildPropertyAccess ( <nl> - this_lookup_start_object , this_receiver , this_value , context , <nl> - frame_state , this_effect , this_control , feedback . name ( ) , <nl> - if_exceptions , access_info , access_mode ) ; <nl> + ValueEffectControl continuation = <nl> + BuildPropertyAccess ( this_receiver , this_value , context , frame_state , <nl> + this_effect , this_control , feedback . name ( ) , <nl> + if_exceptions , access_info , access_mode ) ; <nl> values . push_back ( continuation . value ( ) ) ; <nl> effects . push_back ( continuation . effect ( ) ) ; <nl> controls . push_back ( continuation . control ( ) ) ; <nl> Reduction JSNativeContextSpecialization : : ReduceJSLoadNamed ( Node * node ) { <nl> FeedbackSource ( p . feedback ( ) ) , AccessMode : : kLoad ) ; <nl> } <nl> <nl> - Reduction JSNativeContextSpecialization : : ReduceJSLoadNamedFromSuper ( <nl> - Node * node ) { <nl> - JSLoadNamedFromSuperNode n ( node ) ; <nl> - NamedAccess const & p = n . Parameters ( ) ; <nl> - NameRef name ( broker ( ) , p . name ( ) ) ; <nl> - <nl> - if ( ! p . feedback ( ) . IsValid ( ) ) return NoChange ( ) ; <nl> - return ReducePropertyAccess ( node , nullptr , name , jsgraph ( ) - > Dead ( ) , <nl> - FeedbackSource ( p . feedback ( ) ) , AccessMode : : kLoad ) ; <nl> - } <nl> - <nl> Reduction JSNativeContextSpecialization : : ReduceJSGetIterator ( Node * node ) { <nl> JSGetIteratorNode n ( node ) ; <nl> GetIteratorParameters const & p = n . Parameters ( ) ; <nl> base : : Optional < JSTypedArrayRef > GetTypedArrayConstant ( JSHeapBroker * broker , <nl> } <nl> } / / namespace <nl> <nl> - void JSNativeContextSpecialization : : RemoveImpossibleMaps ( <nl> - Node * object , ZoneVector < Handle < Map > > * maps ) const { <nl> - base : : Optional < MapRef > root_map = InferRootMap ( object ) ; <nl> + void JSNativeContextSpecialization : : RemoveImpossibleReceiverMaps ( <nl> + Node * receiver , ZoneVector < Handle < Map > > * receiver_maps ) const { <nl> + base : : Optional < MapRef > root_map = InferReceiverRootMap ( receiver ) ; <nl> if ( root_map . has_value ( ) ) { <nl> DCHECK ( ! root_map - > is_abandoned_prototype_map ( ) ) ; <nl> - maps - > erase ( <nl> - std : : remove_if ( maps - > begin ( ) , maps - > end ( ) , <nl> + receiver_maps - > erase ( <nl> + std : : remove_if ( receiver_maps - > begin ( ) , receiver_maps - > end ( ) , <nl> [ root_map , this ] ( Handle < Map > map ) { <nl> MapRef map_ref ( broker ( ) , map ) ; <nl> return map_ref . is_abandoned_prototype_map ( ) | | <nl> ( map_ref . FindRootMap ( ) . has_value ( ) & & <nl> ! map_ref . FindRootMap ( ) - > equals ( * root_map ) ) ; <nl> } ) , <nl> - maps - > end ( ) ) ; <nl> + receiver_maps - > end ( ) ) ; <nl> } <nl> } <nl> <nl> JSNativeContextSpecialization : : TryRefineElementAccessFeedback ( <nl> if ( ! use_inference ) return feedback ; <nl> <nl> ZoneVector < Handle < Map > > inferred_maps ( zone ( ) ) ; <nl> - if ( ! InferMaps ( receiver , effect , & inferred_maps ) ) return feedback ; <nl> + if ( ! InferReceiverMaps ( receiver , effect , & inferred_maps ) ) return feedback ; <nl> <nl> - RemoveImpossibleMaps ( receiver , & inferred_maps ) ; <nl> + RemoveImpossibleReceiverMaps ( receiver , & inferred_maps ) ; <nl> / / TODO ( neis ) : After Refine , the resulting feedback can still contain <nl> / / impossible maps when a target is kept only because more than one of its <nl> / / sources was inferred . Think of a way to completely rule out impossible <nl> Reduction JSNativeContextSpecialization : : ReduceElementAccess ( <nl> / / the zone allocation of this vector . <nl> ZoneVector < MapRef > prototype_maps ( zone ( ) ) ; <nl> for ( ElementAccessInfo const & access_info : access_infos ) { <nl> - for ( Handle < Map > map : access_info . lookup_start_object_maps ( ) ) { <nl> + for ( Handle < Map > map : access_info . receiver_maps ( ) ) { <nl> MapRef receiver_map ( broker ( ) , map ) ; <nl> / / If the { receiver_map } has a prototype and its elements backing <nl> / / store is either holey , or we have a potentially growing store , <nl> Reduction JSNativeContextSpecialization : : ReduceElementAccess ( <nl> ElementAccessInfo access_info = access_infos . front ( ) ; <nl> <nl> / / Perform possible elements kind transitions . <nl> - MapRef transition_target ( broker ( ) , <nl> - access_info . lookup_start_object_maps ( ) . front ( ) ) ; <nl> + MapRef transition_target ( broker ( ) , access_info . receiver_maps ( ) . front ( ) ) ; <nl> for ( auto source : access_info . transition_sources ( ) ) { <nl> - DCHECK_EQ ( access_info . lookup_start_object_maps ( ) . size ( ) , 1 ) ; <nl> + DCHECK_EQ ( access_info . receiver_maps ( ) . size ( ) , 1 ) ; <nl> MapRef transition_source ( broker ( ) , source ) ; <nl> effect = graph ( ) - > NewNode ( <nl> simplified ( ) - > TransitionElementsKind ( ElementsTransition ( <nl> Reduction JSNativeContextSpecialization : : ReduceElementAccess ( <nl> <nl> / / Perform map check on the { receiver } . <nl> access_builder . BuildCheckMaps ( receiver , & effect , control , <nl> - access_info . lookup_start_object_maps ( ) ) ; <nl> + access_info . receiver_maps ( ) ) ; <nl> <nl> / / Access the actual element . <nl> ValueEffectControl continuation = <nl> Reduction JSNativeContextSpecialization : : ReduceElementAccess ( <nl> Node * this_control = fallthrough_control ; <nl> <nl> / / Perform possible elements kind transitions . <nl> - MapRef transition_target ( broker ( ) , <nl> - access_info . lookup_start_object_maps ( ) . front ( ) ) ; <nl> + MapRef transition_target ( broker ( ) , access_info . receiver_maps ( ) . front ( ) ) ; <nl> for ( auto source : access_info . transition_sources ( ) ) { <nl> MapRef transition_source ( broker ( ) , source ) ; <nl> - DCHECK_EQ ( access_info . lookup_start_object_maps ( ) . size ( ) , 1 ) ; <nl> + DCHECK_EQ ( access_info . receiver_maps ( ) . size ( ) , 1 ) ; <nl> this_effect = graph ( ) - > NewNode ( <nl> simplified ( ) - > TransitionElementsKind ( ElementsTransition ( <nl> IsSimpleMapChangeTransition ( transition_source . elements_kind ( ) , <nl> Reduction JSNativeContextSpecialization : : ReduceElementAccess ( <nl> <nl> / / Perform map check ( s ) on { receiver } . <nl> ZoneVector < Handle < Map > > const & receiver_maps = <nl> - access_info . lookup_start_object_maps ( ) ; <nl> + access_info . receiver_maps ( ) ; <nl> if ( j = = access_infos . size ( ) - 1 ) { <nl> / / Last map check on the fallthrough control path , do a <nl> / / conditional eager deoptimization exit here . <nl> Reduction JSNativeContextSpecialization : : ReducePropertyAccess ( <nl> node - > opcode ( ) = = IrOpcode : : kJSHasProperty | | <nl> node - > opcode ( ) = = IrOpcode : : kJSLoadNamed | | <nl> node - > opcode ( ) = = IrOpcode : : kJSStoreNamed | | <nl> - node - > opcode ( ) = = IrOpcode : : kJSStoreNamedOwn | | <nl> - node - > opcode ( ) = = IrOpcode : : kJSLoadNamedFromSuper ) ; <nl> + node - > opcode ( ) = = IrOpcode : : kJSStoreNamedOwn ) ; <nl> DCHECK_GE ( node - > op ( ) - > ControlOutputCount ( ) , 1 ) ; <nl> <nl> ProcessedFeedback const & feedback = <nl> Reduction JSNativeContextSpecialization : : ReducePropertyAccess ( <nl> case ProcessedFeedback : : kElementAccess : <nl> DCHECK_EQ ( feedback . AsElementAccess ( ) . keyed_mode ( ) . access_mode ( ) , <nl> access_mode ) ; <nl> - DCHECK_NE ( node - > opcode ( ) , IrOpcode : : kJSLoadNamedFromSuper ) ; <nl> return ReduceElementAccess ( node , key , value , feedback . AsElementAccess ( ) ) ; <nl> default : <nl> UNREACHABLE ( ) ; <nl> Node * JSNativeContextSpecialization : : InlineApiCall ( <nl> <nl> JSNativeContextSpecialization : : ValueEffectControl <nl> JSNativeContextSpecialization : : BuildPropertyLoad ( <nl> - Node * lookup_start_object , Node * receiver , Node * context , Node * frame_state , <nl> - Node * effect , Node * control , NameRef const & name , <nl> - ZoneVector < Node * > * if_exceptions , PropertyAccessInfo const & access_info ) { <nl> + Node * receiver , Node * context , Node * frame_state , Node * effect , <nl> + Node * control , NameRef const & name , ZoneVector < Node * > * if_exceptions , <nl> + PropertyAccessInfo const & access_info ) { <nl> / / Determine actual holder and perform prototype chain checks . <nl> Handle < JSObject > holder ; <nl> if ( access_info . holder ( ) . ToHandle ( & holder ) ) { <nl> dependencies ( ) - > DependOnStablePrototypeChains ( <nl> - access_info . lookup_start_object_maps ( ) , kStartAtPrototype , <nl> + access_info . receiver_maps ( ) , kStartAtPrototype , <nl> JSObjectRef ( broker ( ) , holder ) ) ; <nl> } <nl> <nl> JSNativeContextSpecialization : : BuildPropertyLoad ( <nl> graph ( ) - > NewNode ( simplified ( ) - > LoadField ( AccessBuilder : : ForCellValue ( ) ) , <nl> cell , effect , control ) ; <nl> } else if ( access_info . IsStringLength ( ) ) { <nl> - DCHECK_EQ ( receiver , lookup_start_object ) ; <nl> value = graph ( ) - > NewNode ( simplified ( ) - > StringLength ( ) , receiver ) ; <nl> } else { <nl> DCHECK ( access_info . IsDataField ( ) | | access_info . IsDataConstant ( ) ) ; <nl> PropertyAccessBuilder access_builder ( jsgraph ( ) , broker ( ) , dependencies ( ) ) ; <nl> - value = access_builder . BuildLoadDataField ( <nl> - name , access_info , lookup_start_object , & effect , & control ) ; <nl> + value = access_builder . BuildLoadDataField ( name , access_info , receiver , <nl> + & effect , & control ) ; <nl> } <nl> <nl> return ValueEffectControl ( value , effect , control ) ; <nl> JSNativeContextSpecialization : : BuildPropertyTest ( <nl> Handle < JSObject > holder ; <nl> if ( access_info . holder ( ) . ToHandle ( & holder ) ) { <nl> dependencies ( ) - > DependOnStablePrototypeChains ( <nl> - access_info . lookup_start_object_maps ( ) , kStartAtPrototype , <nl> + access_info . receiver_maps ( ) , kStartAtPrototype , <nl> JSObjectRef ( broker ( ) , holder ) ) ; <nl> } <nl> <nl> JSNativeContextSpecialization : : BuildPropertyTest ( <nl> <nl> JSNativeContextSpecialization : : ValueEffectControl <nl> JSNativeContextSpecialization : : BuildPropertyAccess ( <nl> - Node * lookup_start_object , Node * receiver , Node * value , Node * context , <nl> - Node * frame_state , Node * effect , Node * control , NameRef const & name , <nl> - ZoneVector < Node * > * if_exceptions , PropertyAccessInfo const & access_info , <nl> - AccessMode access_mode ) { <nl> + Node * receiver , Node * value , Node * context , Node * frame_state , Node * effect , <nl> + Node * control , NameRef const & name , ZoneVector < Node * > * if_exceptions , <nl> + PropertyAccessInfo const & access_info , AccessMode access_mode ) { <nl> switch ( access_mode ) { <nl> case AccessMode : : kLoad : <nl> - return BuildPropertyLoad ( lookup_start_object , receiver , context , <nl> - frame_state , effect , control , name , <nl> - if_exceptions , access_info ) ; <nl> + return BuildPropertyLoad ( receiver , context , frame_state , effect , control , <nl> + name , if_exceptions , access_info ) ; <nl> case AccessMode : : kStore : <nl> case AccessMode : : kStoreInLiteral : <nl> - DCHECK_EQ ( receiver , lookup_start_object ) ; <nl> return BuildPropertyStore ( receiver , value , context , frame_state , effect , <nl> control , name , if_exceptions , access_info , <nl> access_mode ) ; <nl> case AccessMode : : kHas : <nl> - DCHECK_EQ ( receiver , lookup_start_object ) ; <nl> return BuildPropertyTest ( effect , control , access_info ) ; <nl> } <nl> UNREACHABLE ( ) ; <nl> JSNativeContextSpecialization : : BuildPropertyStore ( <nl> if ( access_info . holder ( ) . ToHandle ( & holder ) ) { <nl> DCHECK_NE ( AccessMode : : kStoreInLiteral , access_mode ) ; <nl> dependencies ( ) - > DependOnStablePrototypeChains ( <nl> - access_info . lookup_start_object_maps ( ) , kStartAtPrototype , <nl> + access_info . receiver_maps ( ) , kStartAtPrototype , <nl> JSObjectRef ( broker ( ) , holder ) ) ; <nl> } <nl> <nl> JSNativeContextSpecialization : : BuildElementAccess ( <nl> / / TODO ( bmeurer ) : We currently specialize based on elements kind . We should <nl> / / also be able to properly support strings and other JSObjects here . <nl> ElementsKind elements_kind = access_info . elements_kind ( ) ; <nl> - ZoneVector < Handle < Map > > const & receiver_maps = <nl> - access_info . lookup_start_object_maps ( ) ; <nl> + ZoneVector < Handle < Map > > const & receiver_maps = access_info . receiver_maps ( ) ; <nl> <nl> if ( IsTypedArrayElementsKind ( elements_kind ) ) { <nl> Node * buffer_or_receiver = receiver ; <nl> bool JSNativeContextSpecialization : : CanTreatHoleAsUndefined ( <nl> return dependencies ( ) - > DependOnNoElementsProtector ( ) ; <nl> } <nl> <nl> - bool JSNativeContextSpecialization : : InferMaps ( <nl> - Node * object , Node * effect , ZoneVector < Handle < Map > > * maps ) const { <nl> - ZoneHandleSet < Map > map_set ; <nl> - NodeProperties : : InferMapsResult result = <nl> - NodeProperties : : InferMapsUnsafe ( broker ( ) , object , effect , & map_set ) ; <nl> - if ( result = = NodeProperties : : kReliableMaps ) { <nl> - for ( size_t i = 0 ; i < map_set . size ( ) ; + + i ) { <nl> - maps - > push_back ( map_set [ i ] ) ; <nl> + bool JSNativeContextSpecialization : : InferReceiverMaps ( <nl> + Node * receiver , Node * effect , <nl> + ZoneVector < Handle < Map > > * receiver_maps ) const { <nl> + ZoneHandleSet < Map > maps ; <nl> + NodeProperties : : InferReceiverMapsResult result = <nl> + NodeProperties : : InferReceiverMapsUnsafe ( broker ( ) , receiver , effect , <nl> + & maps ) ; <nl> + if ( result = = NodeProperties : : kReliableReceiverMaps ) { <nl> + for ( size_t i = 0 ; i < maps . size ( ) ; + + i ) { <nl> + receiver_maps - > push_back ( maps [ i ] ) ; <nl> } <nl> return true ; <nl> - } else if ( result = = NodeProperties : : kUnreliableMaps ) { <nl> - / / For untrusted maps , we can still use the information <nl> + } else if ( result = = NodeProperties : : kUnreliableReceiverMaps ) { <nl> + / / For untrusted receiver maps , we can still use the information <nl> / / if the maps are stable . <nl> - for ( size_t i = 0 ; i < map_set . size ( ) ; + + i ) { <nl> - MapRef map ( broker ( ) , map_set [ i ] ) ; <nl> + for ( size_t i = 0 ; i < maps . size ( ) ; + + i ) { <nl> + MapRef map ( broker ( ) , maps [ i ] ) ; <nl> if ( ! map . is_stable ( ) ) return false ; <nl> } <nl> - for ( size_t i = 0 ; i < map_set . size ( ) ; + + i ) { <nl> - maps - > push_back ( map_set [ i ] ) ; <nl> + for ( size_t i = 0 ; i < maps . size ( ) ; + + i ) { <nl> + receiver_maps - > push_back ( maps [ i ] ) ; <nl> } <nl> return true ; <nl> } <nl> return false ; <nl> } <nl> <nl> - base : : Optional < MapRef > JSNativeContextSpecialization : : InferRootMap ( <nl> - Node * object ) const { <nl> - HeapObjectMatcher m ( object ) ; <nl> + base : : Optional < MapRef > JSNativeContextSpecialization : : InferReceiverRootMap ( <nl> + Node * receiver ) const { <nl> + HeapObjectMatcher m ( receiver ) ; <nl> if ( m . HasResolvedValue ( ) ) { <nl> MapRef map = m . Ref ( broker ( ) ) . map ( ) ; <nl> return map . FindRootMap ( ) ; <nl> } else if ( m . IsJSCreate ( ) ) { <nl> base : : Optional < MapRef > initial_map = <nl> - NodeProperties : : GetJSCreateMap ( broker ( ) , object ) ; <nl> + NodeProperties : : GetJSCreateMap ( broker ( ) , receiver ) ; <nl> if ( initial_map . has_value ( ) ) { <nl> if ( ! initial_map - > FindRootMap ( ) . has_value ( ) ) { <nl> return base : : nullopt ; <nl> base : : Optional < MapRef > JSNativeContextSpecialization : : InferRootMap ( <nl> return base : : nullopt ; <nl> } <nl> <nl> - Node * JSNativeContextSpecialization : : BuildLoadPrototypeFromObject ( <nl> - Node * object , Node * effect , Node * control ) { <nl> - Node * map = effect = <nl> - graph ( ) - > NewNode ( simplified ( ) - > LoadField ( AccessBuilder : : ForMap ( ) ) , object , <nl> - effect , control ) ; <nl> - return graph ( ) - > NewNode ( <nl> - simplified ( ) - > LoadField ( AccessBuilder : : ForMapPrototype ( ) ) , map , effect , <nl> - control ) ; <nl> - } <nl> - <nl> Graph * JSNativeContextSpecialization : : graph ( ) const { <nl> return jsgraph ( ) - > graph ( ) ; <nl> } <nl> mmm a / src / compiler / js - native - context - specialization . h <nl> ppp b / src / compiler / js - native - context - specialization . h <nl> class V8_EXPORT_PRIVATE JSNativeContextSpecialization final <nl> Reduction ReduceJSLoadGlobal ( Node * node ) ; <nl> Reduction ReduceJSStoreGlobal ( Node * node ) ; <nl> Reduction ReduceJSLoadNamed ( Node * node ) ; <nl> - Reduction ReduceJSLoadNamedFromSuper ( Node * node ) ; <nl> Reduction ReduceJSGetIterator ( Node * node ) ; <nl> Reduction ReduceJSStoreNamed ( Node * node ) ; <nl> Reduction ReduceJSHasProperty ( Node * node ) ; <nl> class V8_EXPORT_PRIVATE JSNativeContextSpecialization final <nl> Reduction ReduceJSToObject ( Node * node ) ; <nl> <nl> Reduction ReduceElementAccess ( Node * node , Node * index , Node * value , <nl> - ElementAccessFeedback const & feedback ) ; <nl> + ElementAccessFeedback const & processed ) ; <nl> / / In the case of non - keyed ( named ) accesses , pass the name as { static_name } <nl> / / and use { nullptr } for { key } ( load / store modes are irrelevant ) . <nl> Reduction ReducePropertyAccess ( Node * node , Node * key , <nl> class V8_EXPORT_PRIVATE JSNativeContextSpecialization final <nl> Node * value , FeedbackSource const & source , <nl> AccessMode access_mode ) ; <nl> Reduction ReduceNamedAccess ( Node * node , Node * value , <nl> - NamedAccessFeedback const & feedback , <nl> + NamedAccessFeedback const & processed , <nl> AccessMode access_mode , Node * key = nullptr ) ; <nl> Reduction ReduceMinimorphicPropertyAccess ( <nl> Node * node , Node * value , <nl> MinimorphicLoadPropertyAccessFeedback const & feedback , <nl> FeedbackSource const & source ) ; <nl> - Reduction ReduceGlobalAccess ( Node * node , Node * lookup_start_object , <nl> - Node * receiver , Node * value , NameRef const & name , <nl> - AccessMode access_mode , Node * key = nullptr , <nl> - Node * effect = nullptr ) ; <nl> - Reduction ReduceGlobalAccess ( Node * node , Node * lookup_start_object , <nl> - Node * receiver , Node * value , NameRef const & name , <nl> - AccessMode access_mode , Node * key , <nl> - PropertyCellRef const & property_cell , <nl> - Node * effect = nullptr ) ; <nl> + Reduction ReduceGlobalAccess ( Node * node , Node * receiver , Node * value , <nl> + NameRef const & name , AccessMode access_mode , <nl> + Node * key = nullptr ) ; <nl> + Reduction ReduceGlobalAccess ( Node * node , Node * receiver , Node * value , <nl> + NameRef const & name , AccessMode access_mode , <nl> + Node * key , PropertyCellRef const & property_cell ) ; <nl> Reduction ReduceElementLoadFromHeapConstant ( Node * node , Node * key , <nl> AccessMode access_mode , <nl> KeyedAccessLoadMode load_mode ) ; <nl> class V8_EXPORT_PRIVATE JSNativeContextSpecialization final <nl> } ; <nl> <nl> / / Construct the appropriate subgraph for property access . <nl> - ValueEffectControl BuildPropertyAccess ( <nl> - Node * lookup_start_object , Node * receiver , Node * value , Node * context , <nl> - Node * frame_state , Node * effect , Node * control , NameRef const & name , <nl> - ZoneVector < Node * > * if_exceptions , PropertyAccessInfo const & access_info , <nl> - AccessMode access_mode ) ; <nl> - ValueEffectControl BuildPropertyLoad ( Node * lookup_start_object , <nl> - Node * receiver , Node * context , <nl> + ValueEffectControl BuildPropertyAccess ( Node * receiver , Node * value , <nl> + Node * context , Node * frame_state , <nl> + Node * effect , Node * control , <nl> + NameRef const & name , <nl> + ZoneVector < Node * > * if_exceptions , <nl> + PropertyAccessInfo const & access_info , <nl> + AccessMode access_mode ) ; <nl> + ValueEffectControl BuildPropertyLoad ( Node * receiver , Node * context , <nl> Node * frame_state , Node * effect , <nl> Node * control , NameRef const & name , <nl> ZoneVector < Node * > * if_exceptions , <nl> class V8_EXPORT_PRIVATE JSNativeContextSpecialization final <nl> / / code dependencies and might use the array protector cell . <nl> bool CanTreatHoleAsUndefined ( ZoneVector < Handle < Map > > const & receiver_maps ) ; <nl> <nl> - void RemoveImpossibleMaps ( Node * object , ZoneVector < Handle < Map > > * maps ) const ; <nl> + void RemoveImpossibleReceiverMaps ( <nl> + Node * receiver , ZoneVector < Handle < Map > > * receiver_maps ) const ; <nl> <nl> ElementAccessFeedback const & TryRefineElementAccessFeedback ( <nl> ElementAccessFeedback const & feedback , Node * receiver , <nl> Node * effect ) const ; <nl> <nl> - / / Try to infer maps for the given { object } at the current { effect } . <nl> - bool InferMaps ( Node * object , Node * effect , <nl> - ZoneVector < Handle < Map > > * maps ) const ; <nl> + / / Try to infer maps for the given { receiver } at the current { effect } . <nl> + bool InferReceiverMaps ( Node * receiver , Node * effect , <nl> + ZoneVector < Handle < Map > > * receiver_maps ) const ; <nl> <nl> - / / Try to infer a root map for the { object } independent of the current program <nl> - / / location . <nl> - base : : Optional < MapRef > InferRootMap ( Node * object ) const ; <nl> + / / Try to infer a root map for the { receiver } independent of the current <nl> + / / program location . <nl> + base : : Optional < MapRef > InferReceiverRootMap ( Node * receiver ) const ; <nl> <nl> / / Checks if we know at compile time that the { receiver } either definitely <nl> / / has the { prototype } in it ' s prototype chain , or the { receiver } definitely <nl> class V8_EXPORT_PRIVATE JSNativeContextSpecialization final <nl> InferHasInPrototypeChainResult InferHasInPrototypeChain ( <nl> Node * receiver , Node * effect , HeapObjectRef const & prototype ) ; <nl> <nl> - Node * BuildLoadPrototypeFromObject ( Node * object , Node * effect , Node * control ) ; <nl> - <nl> Graph * graph ( ) const ; <nl> JSGraph * jsgraph ( ) const { return jsgraph_ ; } <nl> <nl> mmm a / src / compiler / js - operator . cc <nl> ppp b / src / compiler / js - operator . cc <nl> const Operator * JSOperatorBuilder : : LoadNamed ( Handle < Name > name , <nl> access ) ; / / parameter <nl> } <nl> <nl> - const Operator * JSOperatorBuilder : : LoadNamedFromSuper ( <nl> - Handle < Name > name , const FeedbackSource & feedback ) { <nl> + const Operator * JSOperatorBuilder : : LoadNamedFromSuper ( Handle < Name > name ) { <nl> static constexpr int kReceiver = 1 ; <nl> static constexpr int kHomeObject = 1 ; <nl> - static constexpr int kFeedbackVector = 1 ; <nl> - static constexpr int kArity = kReceiver + kHomeObject + kFeedbackVector ; <nl> - NamedAccess access ( LanguageMode : : kSloppy , name , feedback ) ; <nl> + static constexpr int kArity = kReceiver + kHomeObject ; <nl> + / / TODO ( marja , v8 : 9237 ) : Use real feedback . <nl> + NamedAccess access ( LanguageMode : : kSloppy , name , FeedbackSource ( ) ) ; <nl> return zone ( ) - > New < Operator1 < NamedAccess > > ( / / - - <nl> IrOpcode : : kJSLoadNamedFromSuper , Operator : : kNoProperties , / / opcode <nl> " JSLoadNamedFromSuper " , / / name <nl> mmm a / src / compiler / js - operator . h <nl> ppp b / src / compiler / js - operator . h <nl> class V8_EXPORT_PRIVATE JSOperatorBuilder final <nl> <nl> const Operator * LoadProperty ( FeedbackSource const & feedback ) ; <nl> const Operator * LoadNamed ( Handle < Name > name , FeedbackSource const & feedback ) ; <nl> - const Operator * LoadNamedFromSuper ( Handle < Name > name , <nl> - FeedbackSource const & feedback ) ; <nl> + const Operator * LoadNamedFromSuper ( Handle < Name > name ) ; <nl> <nl> const Operator * StoreProperty ( LanguageMode language_mode , <nl> FeedbackSource const & feedback ) ; <nl> class JSLoadNamedFromSuperNode final : public JSNodeWrapperBase { <nl> <nl> const NamedAccess & Parameters ( ) const { return NamedAccessOf ( node ( ) - > op ( ) ) ; } <nl> <nl> - / / TODO ( marja , v8 : 9237 ) : A more intuitive order would be ( home_object , <nl> - / / receiver , feedback_vector ) . The order can be changed once we no longer <nl> - / / delegate to Runtime_LoadFromSuper . <nl> - # define INPUTS ( V ) \ <nl> - V ( Receiver , receiver , 0 , Object ) \ <nl> - V ( HomeObject , home_object , 1 , Object ) \ <nl> - V ( FeedbackVector , feedback_vector , 2 , HeapObject ) <nl> + # define INPUTS ( V ) \ <nl> + V ( Receiver , receiver , 0 , Object ) \ <nl> + V ( Object , home_object , 1 , Object ) <nl> INPUTS ( DEFINE_INPUT_ACCESSORS ) <nl> # undef INPUTS <nl> } ; <nl> mmm a / src / compiler / js - type - hint - lowering . cc <nl> ppp b / src / compiler / js - type - hint - lowering . cc <nl> JSTypeHintLowering : : ReduceGetIteratorOperation ( const Operator * op , <nl> } <nl> <nl> JSTypeHintLowering : : LoweringResult JSTypeHintLowering : : ReduceLoadNamedOperation ( <nl> - const Operator * op , Node * effect , Node * control , FeedbackSlot slot ) const { <nl> - DCHECK ( op - > opcode ( ) = = IrOpcode : : kJSLoadNamed | | <nl> - op - > opcode ( ) = = IrOpcode : : kJSLoadNamedFromSuper ) ; <nl> + const Operator * op , Node * receiver , Node * effect , Node * control , <nl> + FeedbackSlot slot ) const { <nl> + DCHECK_EQ ( IrOpcode : : kJSLoadNamed , op - > opcode ( ) ) ; <nl> if ( Node * node = TryBuildSoftDeopt ( <nl> slot , effect , control , <nl> DeoptimizeReason : : kInsufficientTypeFeedbackForGenericNamedAccess ) ) { <nl> mmm a / src / compiler / js - type - hint - lowering . h <nl> ppp b / src / compiler / js - type - hint - lowering . h <nl> class JSTypeHintLowering { <nl> FeedbackSlot call_slot ) const ; <nl> <nl> / / Potential reduction of property access operations . <nl> - LoweringResult ReduceLoadNamedOperation ( const Operator * op , Node * effect , <nl> - Node * control , <nl> + LoweringResult ReduceLoadNamedOperation ( const Operator * op , Node * obj , <nl> + Node * effect , Node * control , <nl> FeedbackSlot slot ) const ; <nl> LoweringResult ReduceLoadKeyedOperation ( const Operator * op , Node * obj , <nl> Node * key , Node * effect , <nl> mmm a / src / compiler / map - inference . cc <nl> ppp b / src / compiler / map - inference . cc <nl> MapInference : : MapInference ( JSHeapBroker * broker , Node * object , Node * effect ) <nl> : broker_ ( broker ) , object_ ( object ) { <nl> ZoneHandleSet < Map > maps ; <nl> auto result = <nl> - NodeProperties : : InferMapsUnsafe ( broker_ , object_ , effect , & maps ) ; <nl> + NodeProperties : : InferReceiverMapsUnsafe ( broker_ , object_ , effect , & maps ) ; <nl> maps_ . insert ( maps_ . end ( ) , maps . begin ( ) , maps . end ( ) ) ; <nl> - maps_state_ = ( result = = NodeProperties : : kUnreliableMaps ) <nl> + maps_state_ = ( result = = NodeProperties : : kUnreliableReceiverMaps ) <nl> ? kUnreliableDontNeedGuard <nl> : kReliableOrGuarded ; <nl> - DCHECK_EQ ( maps_ . empty ( ) , result = = NodeProperties : : kNoMaps ) ; <nl> + DCHECK_EQ ( maps_ . empty ( ) , result = = NodeProperties : : kNoReceiverMaps ) ; <nl> } <nl> <nl> MapInference : : ~ MapInference ( ) { CHECK ( Safe ( ) ) ; } <nl> mmm a / src / compiler / node - properties . cc <nl> ppp b / src / compiler / node - properties . cc <nl> base : : Optional < MapRef > NodeProperties : : GetJSCreateMap ( JSHeapBroker * broker , <nl> } <nl> <nl> / / static <nl> - NodeProperties : : InferMapsResult NodeProperties : : InferMapsUnsafe ( <nl> + NodeProperties : : InferReceiverMapsResult NodeProperties : : InferReceiverMapsUnsafe ( <nl> JSHeapBroker * broker , Node * receiver , Node * effect , <nl> ZoneHandleSet < Map > * maps_return ) { <nl> HeapObjectMatcher m ( receiver ) ; <nl> NodeProperties : : InferMapsResult NodeProperties : : InferMapsUnsafe ( <nl> / / The { receiver_map } is only reliable when we install a stability <nl> / / code dependency . <nl> * maps_return = ZoneHandleSet < Map > ( receiver . map ( ) . object ( ) ) ; <nl> - return kUnreliableMaps ; <nl> + return kUnreliableReceiverMaps ; <nl> } <nl> } <nl> } <nl> - InferMapsResult result = kReliableMaps ; <nl> + InferReceiverMapsResult result = kReliableReceiverMaps ; <nl> while ( true ) { <nl> switch ( effect - > opcode ( ) ) { <nl> case IrOpcode : : kMapGuard : { <nl> NodeProperties : : InferMapsResult NodeProperties : : InferMapsUnsafe ( <nl> return result ; <nl> } <nl> / / We reached the allocation of the { receiver } . <nl> - return kNoMaps ; <nl> + return kNoReceiverMaps ; <nl> } <nl> - result = kUnreliableMaps ; / / JSCreate can have side - effect . <nl> + result = kUnreliableReceiverMaps ; / / JSCreate can have side - effect . <nl> break ; <nl> } <nl> case IrOpcode : : kJSCreatePromise : { <nl> NodeProperties : : InferMapsResult NodeProperties : : InferMapsUnsafe ( <nl> } <nl> / / Without alias analysis we cannot tell whether this <nl> / / StoreField [ map ] affects { receiver } or not . <nl> - result = kUnreliableMaps ; <nl> + result = kUnreliableReceiverMaps ; <nl> } <nl> break ; <nl> } <nl> NodeProperties : : InferMapsResult NodeProperties : : InferMapsUnsafe ( <nl> if ( control - > opcode ( ) ! = IrOpcode : : kLoop ) { <nl> DCHECK ( control - > opcode ( ) = = IrOpcode : : kDead | | <nl> control - > opcode ( ) = = IrOpcode : : kMerge ) ; <nl> - return kNoMaps ; <nl> + return kNoReceiverMaps ; <nl> } <nl> <nl> / / Continue search for receiver map outside the loop . Since operations <nl> / / inside the loop may change the map , the result is unreliable . <nl> effect = GetEffectInput ( effect , 0 ) ; <nl> - result = kUnreliableMaps ; <nl> + result = kUnreliableReceiverMaps ; <nl> continue ; <nl> } <nl> default : { <nl> DCHECK_EQ ( 1 , effect - > op ( ) - > EffectOutputCount ( ) ) ; <nl> if ( effect - > op ( ) - > EffectInputCount ( ) ! = 1 ) { <nl> / / Didn ' t find any appropriate CheckMaps node . <nl> - return kNoMaps ; <nl> + return kNoReceiverMaps ; <nl> } <nl> if ( ! effect - > op ( ) - > HasProperty ( Operator : : kNoWrite ) ) { <nl> / / Without alias / escape analysis we cannot tell whether this <nl> / / { effect } affects { receiver } or not . <nl> - result = kUnreliableMaps ; <nl> + result = kUnreliableReceiverMaps ; <nl> } <nl> break ; <nl> } <nl> NodeProperties : : InferMapsResult NodeProperties : : InferMapsUnsafe ( <nl> <nl> / / Stop walking the effect chain once we hit the definition of <nl> / / the { receiver } along the { effect } s . <nl> - if ( IsSame ( receiver , effect ) ) return kNoMaps ; <nl> + if ( IsSame ( receiver , effect ) ) return kNoReceiverMaps ; <nl> <nl> / / Continue with the next { effect } . <nl> DCHECK_EQ ( 1 , effect - > op ( ) - > EffectInputCount ( ) ) ; <nl> mmm a / src / compiler / node - properties . h <nl> ppp b / src / compiler / node - properties . h <nl> class V8_EXPORT_PRIVATE NodeProperties final { <nl> / / Walks up the { effect } chain to find a witness that provides map <nl> / / information about the { receiver } . Can look through potentially <nl> / / side effecting nodes . <nl> - enum InferMapsResult { <nl> - kNoMaps , / / No maps inferred . <nl> - kReliableMaps , / / Maps can be trusted . <nl> - kUnreliableMaps / / Maps might have changed ( side - effect ) . <nl> + enum InferReceiverMapsResult { <nl> + kNoReceiverMaps , / / No receiver maps inferred . <nl> + kReliableReceiverMaps , / / Receiver maps can be trusted . <nl> + kUnreliableReceiverMaps / / Receiver maps might have changed ( side - effect ) . <nl> } ; <nl> - / / DO NOT USE InferMapsUnsafe IN NEW CODE . Use MapInference instead . <nl> - static InferMapsResult InferMapsUnsafe ( JSHeapBroker * broker , Node * object , <nl> - Node * effect , <nl> - ZoneHandleSet < Map > * maps ) ; <nl> + / / DO NOT USE InferReceiverMapsUnsafe IN NEW CODE . Use MapInference instead . <nl> + static InferReceiverMapsResult InferReceiverMapsUnsafe ( <nl> + JSHeapBroker * broker , Node * receiver , Node * effect , <nl> + ZoneHandleSet < Map > * maps_return ) ; <nl> <nl> / / Return the initial map of the new - target if the allocation can be inlined . <nl> static base : : Optional < MapRef > GetJSCreateMap ( JSHeapBroker * broker , <nl> mmm a / src / compiler / opcodes . h <nl> ppp b / src / compiler / opcodes . h <nl> class V8_EXPORT_PRIVATE IrOpcode { <nl> case kJSInstanceOf : <nl> case kJSLoadGlobal : <nl> case kJSLoadNamed : <nl> - case kJSLoadNamedFromSuper : <nl> case kJSLoadProperty : <nl> case kJSStoreDataPropertyInLiteral : <nl> case kJSStoreGlobal : <nl> mmm a / src / compiler / property - access - builder . cc <nl> ppp b / src / compiler / property - access - builder . cc <nl> bool PropertyAccessBuilder : : TryBuildNumberCheck ( <nl> } <nl> <nl> void PropertyAccessBuilder : : BuildCheckMaps ( <nl> - Node * object , Node * * effect , Node * control , <nl> - ZoneVector < Handle < Map > > const & maps ) { <nl> - HeapObjectMatcher m ( object ) ; <nl> + Node * receiver , Node * * effect , Node * control , <nl> + ZoneVector < Handle < Map > > const & receiver_maps ) { <nl> + HeapObjectMatcher m ( receiver ) ; <nl> if ( m . HasResolvedValue ( ) ) { <nl> - MapRef object_map = m . Ref ( broker ( ) ) . map ( ) ; <nl> - if ( object_map . is_stable ( ) ) { <nl> - for ( Handle < Map > map : maps ) { <nl> - if ( MapRef ( broker ( ) , map ) . equals ( object_map ) ) { <nl> - dependencies ( ) - > DependOnStableMap ( object_map ) ; <nl> + MapRef receiver_map = m . Ref ( broker ( ) ) . map ( ) ; <nl> + if ( receiver_map . is_stable ( ) ) { <nl> + for ( Handle < Map > map : receiver_maps ) { <nl> + if ( MapRef ( broker ( ) , map ) . equals ( receiver_map ) ) { <nl> + dependencies ( ) - > DependOnStableMap ( receiver_map ) ; <nl> return ; <nl> } <nl> } <nl> } <nl> } <nl> - ZoneHandleSet < Map > map_set ; <nl> + ZoneHandleSet < Map > maps ; <nl> CheckMapsFlags flags = CheckMapsFlag : : kNone ; <nl> - for ( Handle < Map > map : maps ) { <nl> - MapRef object_map ( broker ( ) , map ) ; <nl> - map_set . insert ( object_map . object ( ) , graph ( ) - > zone ( ) ) ; <nl> - if ( object_map . is_migration_target ( ) ) { <nl> + for ( Handle < Map > map : receiver_maps ) { <nl> + MapRef receiver_map ( broker ( ) , map ) ; <nl> + maps . insert ( receiver_map . object ( ) , graph ( ) - > zone ( ) ) ; <nl> + if ( receiver_map . is_migration_target ( ) ) { <nl> flags | = CheckMapsFlag : : kTryMigrateInstance ; <nl> } <nl> } <nl> - * effect = graph ( ) - > NewNode ( simplified ( ) - > CheckMaps ( flags , map_set ) , object , <nl> + * effect = graph ( ) - > NewNode ( simplified ( ) - > CheckMaps ( flags , maps ) , receiver , <nl> * effect , control ) ; <nl> } <nl> <nl> Node * PropertyAccessBuilder : : BuildCheckValue ( Node * receiver , Effect * effect , <nl> } <nl> <nl> Node * PropertyAccessBuilder : : ResolveHolder ( <nl> - PropertyAccessInfo const & access_info , Node * lookup_start_object ) { <nl> + PropertyAccessInfo const & access_info , Node * receiver ) { <nl> Handle < JSObject > holder ; <nl> if ( access_info . holder ( ) . ToHandle ( & holder ) ) { <nl> return jsgraph ( ) - > Constant ( ObjectRef ( broker ( ) , holder ) ) ; <nl> } <nl> - return lookup_start_object ; <nl> + return receiver ; <nl> } <nl> <nl> MachineRepresentation PropertyAccessBuilder : : ConvertRepresentation ( <nl> MachineRepresentation PropertyAccessBuilder : : ConvertRepresentation ( <nl> <nl> Node * PropertyAccessBuilder : : TryBuildLoadConstantDataField ( <nl> NameRef const & name , PropertyAccessInfo const & access_info , <nl> - Node * lookup_start_object ) { <nl> + Node * receiver ) { <nl> if ( ! access_info . IsDataConstant ( ) ) return nullptr ; <nl> <nl> / / First , determine if we have a constant holder to load from . <nl> Handle < JSObject > holder ; <nl> / / If { access_info } has a holder , just use it . <nl> if ( ! access_info . holder ( ) . ToHandle ( & holder ) ) { <nl> - / / Otherwise , try to match the { lookup_start_object } as a constant . <nl> - HeapObjectMatcher m ( lookup_start_object ) ; <nl> + / / Otherwise , try to match the { receiver } as a constant . <nl> + HeapObjectMatcher m ( receiver ) ; <nl> if ( ! m . HasResolvedValue ( ) | | ! m . Ref ( broker ( ) ) . IsJSObject ( ) ) return nullptr ; <nl> <nl> - / / Let us make sure the actual map of the constant lookup_start_object is <nl> - / / among the maps in { access_info } . <nl> - MapRef lookup_start_object_map = m . Ref ( broker ( ) ) . map ( ) ; <nl> - if ( std : : find_if ( <nl> - access_info . lookup_start_object_maps ( ) . begin ( ) , <nl> - access_info . lookup_start_object_maps ( ) . end ( ) , [ & ] ( Handle < Map > map ) { <nl> - return MapRef ( broker ( ) , map ) . equals ( lookup_start_object_map ) ; <nl> - } ) = = access_info . lookup_start_object_maps ( ) . end ( ) ) { <nl> - / / The map of the lookup_start_object is not in the feedback , let us bail <nl> - / / out . <nl> + / / Let us make sure the actual map of the constant receiver is among <nl> + / / the maps in { access_info } . <nl> + MapRef receiver_map = m . Ref ( broker ( ) ) . map ( ) ; <nl> + if ( std : : find_if ( access_info . receiver_maps ( ) . begin ( ) , <nl> + access_info . receiver_maps ( ) . end ( ) , [ & ] ( Handle < Map > map ) { <nl> + return MapRef ( broker ( ) , map ) . equals ( receiver_map ) ; <nl> + } ) = = access_info . receiver_maps ( ) . end ( ) ) { <nl> + / / The map of the receiver is not in the feedback , let us bail out . <nl> return nullptr ; <nl> } <nl> holder = m . Ref ( broker ( ) ) . AsJSObject ( ) . object ( ) ; <nl> Node * PropertyAccessBuilder : : BuildLoadDataField ( NameRef const & name , <nl> <nl> Node * PropertyAccessBuilder : : BuildMinimorphicLoadDataField ( <nl> NameRef const & name , MinimorphicLoadPropertyAccessInfo const & access_info , <nl> - Node * lookup_start_object , Node * * effect , Node * * control ) { <nl> + Node * receiver , Node * * effect , Node * * control ) { <nl> DCHECK_NULL ( dependencies ( ) ) ; <nl> MachineRepresentation const field_representation = <nl> ConvertRepresentation ( access_info . field_representation ( ) ) ; <nl> Node * PropertyAccessBuilder : : BuildMinimorphicLoadDataField ( <nl> kFullWriteBarrier , <nl> LoadSensitivity : : kCritical , <nl> ConstFieldInfo : : None ( ) } ; <nl> - return BuildLoadDataField ( name , lookup_start_object , field_access , <nl> + return BuildLoadDataField ( name , receiver , field_access , <nl> access_info . is_inobject ( ) , effect , control ) ; <nl> } <nl> <nl> Node * PropertyAccessBuilder : : BuildLoadDataField ( <nl> - NameRef const & name , PropertyAccessInfo const & access_info , <nl> - Node * lookup_start_object , Node * * effect , Node * * control ) { <nl> + NameRef const & name , PropertyAccessInfo const & access_info , Node * receiver , <nl> + Node * * effect , Node * * control ) { <nl> DCHECK ( access_info . IsDataField ( ) | | access_info . IsDataConstant ( ) ) ; <nl> - if ( Node * value = TryBuildLoadConstantDataField ( name , access_info , <nl> - lookup_start_object ) ) { <nl> + if ( Node * value = <nl> + TryBuildLoadConstantDataField ( name , access_info , receiver ) ) { <nl> return value ; <nl> } <nl> <nl> MachineRepresentation const field_representation = <nl> ConvertRepresentation ( access_info . field_representation ( ) ) ; <nl> - Node * storage = ResolveHolder ( access_info , lookup_start_object ) ; <nl> + Node * storage = ResolveHolder ( access_info , receiver ) ; <nl> <nl> FieldAccess field_access = { <nl> kTaggedBase , <nl> mmm a / src / compiler / property - access - builder . h <nl> ppp b / src / compiler / property - access - builder . h <nl> class PropertyAccessBuilder { <nl> <nl> / / TODO ( jgruber ) : Remove the untyped version once all uses are <nl> / / updated . <nl> - void BuildCheckMaps ( Node * object , Node * * effect , Node * control , <nl> - ZoneVector < Handle < Map > > const & maps ) ; <nl> - void BuildCheckMaps ( Node * object , Effect * effect , Control control , <nl> - ZoneVector < Handle < Map > > const & maps ) { <nl> + void BuildCheckMaps ( Node * receiver , Node * * effect , Node * control , <nl> + ZoneVector < Handle < Map > > const & receiver_maps ) ; <nl> + void BuildCheckMaps ( Node * receiver , Effect * effect , Control control , <nl> + ZoneVector < Handle < Map > > const & receiver_maps ) { <nl> Node * e = * effect ; <nl> Node * c = control ; <nl> - BuildCheckMaps ( object , & e , c , maps ) ; <nl> + BuildCheckMaps ( receiver , & e , c , receiver_maps ) ; <nl> * effect = e ; <nl> } <nl> Node * BuildCheckValue ( Node * receiver , Effect * effect , Control control , <nl> class PropertyAccessBuilder { <nl> / / properties ( without heap - object or map checks ) . <nl> Node * BuildLoadDataField ( NameRef const & name , <nl> PropertyAccessInfo const & access_info , <nl> - Node * lookup_start_object , Node * * effect , <nl> - Node * * control ) ; <nl> + Node * receiver , Node * * effect , Node * * control ) ; <nl> <nl> / / Builds the load for data - field access for minimorphic loads that use <nl> / / dynamic map checks . These cannot depend on any information from the maps . <nl> Node * BuildMinimorphicLoadDataField ( <nl> NameRef const & name , MinimorphicLoadPropertyAccessInfo const & access_info , <nl> - Node * lookup_start_object , Node * * effect , Node * * control ) ; <nl> + Node * receiver , Node * * effect , Node * * control ) ; <nl> <nl> static MachineRepresentation ConvertRepresentation ( <nl> Representation representation ) ; <nl> class PropertyAccessBuilder { <nl> <nl> Node * TryBuildLoadConstantDataField ( NameRef const & name , <nl> PropertyAccessInfo const & access_info , <nl> - Node * lookup_start_object ) ; <nl> + Node * receiver ) ; <nl> / / Returns a node with the holder for the property access described by <nl> / / { access_info } . <nl> - Node * ResolveHolder ( PropertyAccessInfo const & access_info , <nl> - Node * lookup_start_object ) ; <nl> + Node * ResolveHolder ( PropertyAccessInfo const & access_info , Node * receiver ) ; <nl> <nl> Node * BuildLoadDataField ( NameRef const & name , Node * holder , <nl> FieldAccess & field_access , bool is_inobject , <nl> mmm a / src / compiler / serializer - for - background - compilation . cc <nl> ppp b / src / compiler / serializer - for - background - compilation . cc <nl> class SerializerForBackgroundCompilation { <nl> bool honor_bailout_on_uninitialized ) ; <nl> void ProcessNamedPropertyAccess ( Hints * receiver , NameRef const & name , <nl> FeedbackSlot slot , AccessMode access_mode ) ; <nl> - void ProcessNamedSuperPropertyAccess ( Hints * receiver , NameRef const & name , <nl> - FeedbackSlot slot , <nl> - AccessMode access_mode ) ; <nl> void ProcessNamedAccess ( Hints * receiver , NamedAccessFeedback const & feedback , <nl> AccessMode access_mode , Hints * result_hints ) ; <nl> - void ProcessNamedSuperAccess ( Hints * receiver , <nl> - NamedAccessFeedback const & feedback , <nl> - AccessMode access_mode , Hints * result_hints ) ; <nl> void ProcessElementAccess ( Hints const & receiver , Hints const & key , <nl> ElementAccessFeedback const & feedback , <nl> AccessMode access_mode ) ; <nl> class SerializerForBackgroundCompilation { <nl> bool honor_bailout_on_uninitialized ) ; <nl> <nl> PropertyAccessInfo ProcessMapForNamedPropertyAccess ( <nl> - Hints * receiver , base : : Optional < MapRef > receiver_map , <nl> - MapRef lookup_start_object_map , NameRef const & name , <nl> + Hints * receiver , MapRef receiver_map , NameRef const & name , <nl> AccessMode access_mode , base : : Optional < JSObjectRef > concrete_receiver , <nl> Hints * result_hints ) ; <nl> <nl> void SerializerForBackgroundCompilation : : ProcessCallOrConstruct ( <nl> arguments - > insert ( arguments - > begin ( ) , result_hints_from_new_target ) ; <nl> } <nl> <nl> - / / For JSNativeContextSpecialization : : InferRootMap <nl> + / / For JSNativeContextSpecialization : : InferReceiverRootMap <nl> Hints new_accumulator_hints = result_hints_from_new_target . Copy ( zone ( ) ) ; <nl> <nl> ProcessCallOrConstructRecursive ( callee , new_target , * arguments , <nl> void SerializerForBackgroundCompilation : : ProcessUnaryOrBinaryOperation ( <nl> <nl> PropertyAccessInfo <nl> SerializerForBackgroundCompilation : : ProcessMapForNamedPropertyAccess ( <nl> - Hints * receiver , base : : Optional < MapRef > receiver_map , <nl> - MapRef lookup_start_object_map , NameRef const & name , AccessMode access_mode , <nl> - base : : Optional < JSObjectRef > concrete_receiver , Hints * result_hints ) { <nl> - DCHECK_IMPLIES ( concrete_receiver . has_value ( ) , receiver_map . has_value ( ) ) ; <nl> - <nl> - / / For JSNativeContextSpecialization : : InferRootMap <nl> - lookup_start_object_map . SerializeRootMap ( ) ; <nl> + Hints * receiver , MapRef receiver_map , NameRef const & name , <nl> + AccessMode access_mode , base : : Optional < JSObjectRef > concrete_receiver , <nl> + Hints * result_hints ) { <nl> + / / For JSNativeContextSpecialization : : InferReceiverRootMap <nl> + receiver_map . SerializeRootMap ( ) ; <nl> <nl> / / For JSNativeContextSpecialization : : ReduceNamedAccess . <nl> JSGlobalProxyRef global_proxy = <nl> broker ( ) - > target_native_context ( ) . global_proxy_object ( ) ; <nl> JSGlobalObjectRef global_object = <nl> broker ( ) - > target_native_context ( ) . global_object ( ) ; <nl> - if ( lookup_start_object_map . equals ( global_proxy . map ( ) ) ) { <nl> + if ( receiver_map . equals ( global_proxy . map ( ) ) ) { <nl> base : : Optional < PropertyCellRef > cell = global_object . GetPropertyCell ( <nl> name , SerializationPolicy : : kSerializeIfNeeded ) ; <nl> if ( access_mode = = AccessMode : : kLoad & & cell . has_value ( ) ) { <nl> SerializerForBackgroundCompilation : : ProcessMapForNamedPropertyAccess ( <nl> } <nl> <nl> PropertyAccessInfo access_info = broker ( ) - > GetPropertyAccessInfo ( <nl> - lookup_start_object_map , name , access_mode , dependencies ( ) , <nl> + receiver_map , name , access_mode , dependencies ( ) , <nl> SerializationPolicy : : kSerializeIfNeeded ) ; <nl> <nl> / / For JSNativeContextSpecialization : : InlinePropertySetterCall <nl> SerializerForBackgroundCompilation : : ProcessMapForNamedPropertyAccess ( <nl> if ( access_info . constant ( ) - > IsJSFunction ( ) ) { <nl> JSFunctionRef function ( broker ( ) , access_info . constant ( ) ) ; <nl> <nl> - if ( receiver_map . has_value ( ) ) { <nl> - / / For JSCallReducer and JSInlining ( Heuristic ) . <nl> - HintsVector arguments ( <nl> - { Hints : : SingleMap ( receiver_map - > object ( ) , zone ( ) ) } , zone ( ) ) ; <nl> - / / In the case of a setter any added result hints won ' t make sense , but <nl> - / / they will be ignored anyways by Process * PropertyAccess due to the <nl> - / / access mode not being kLoad . <nl> - ProcessCalleeForCallOrConstruct ( <nl> - function . object ( ) , base : : nullopt , arguments , <nl> - SpeculationMode : : kDisallowSpeculation , <nl> - kMissingArgumentsAreUndefined , result_hints ) ; <nl> - <nl> - / / For JSCallReducer : : ReduceCallApiFunction . <nl> - Handle < SharedFunctionInfo > sfi = function . shared ( ) . object ( ) ; <nl> - if ( sfi - > IsApiFunction ( ) ) { <nl> - FunctionTemplateInfoRef fti_ref ( <nl> - broker ( ) , handle ( sfi - > get_api_func_data ( ) , broker ( ) - > isolate ( ) ) ) ; <nl> - if ( fti_ref . has_call_code ( ) ) { <nl> - fti_ref . SerializeCallCode ( ) ; <nl> - ProcessReceiverMapForApiCall ( fti_ref , receiver_map - > object ( ) ) ; <nl> - } <nl> + / / For JSCallReducer and JSInlining ( Heuristic ) . <nl> + HintsVector arguments ( { Hints : : SingleMap ( receiver_map . object ( ) , zone ( ) ) } , <nl> + zone ( ) ) ; <nl> + / / In the case of a setter any added result hints won ' t make sense , but <nl> + / / they will be ignored anyways by Process * PropertyAccess due to the <nl> + / / access mode not being kLoad . <nl> + ProcessCalleeForCallOrConstruct ( <nl> + function . object ( ) , base : : nullopt , arguments , <nl> + SpeculationMode : : kDisallowSpeculation , kMissingArgumentsAreUndefined , <nl> + result_hints ) ; <nl> + <nl> + / / For JSCallReducer : : ReduceCallApiFunction . <nl> + Handle < SharedFunctionInfo > sfi = function . shared ( ) . object ( ) ; <nl> + if ( sfi - > IsApiFunction ( ) ) { <nl> + FunctionTemplateInfoRef fti_ref ( <nl> + broker ( ) , handle ( sfi - > get_api_func_data ( ) , broker ( ) - > isolate ( ) ) ) ; <nl> + if ( fti_ref . has_call_code ( ) ) { <nl> + fti_ref . SerializeCallCode ( ) ; <nl> + ProcessReceiverMapForApiCall ( fti_ref , receiver_map . object ( ) ) ; <nl> } <nl> } <nl> } else if ( access_info . constant ( ) - > IsJSBoundFunction ( ) ) { <nl> SerializerForBackgroundCompilation : : ProcessMapForNamedPropertyAccess ( <nl> holder = JSObjectRef ( broker ( ) , prototype ) ; <nl> } else { <nl> CHECK_IMPLIES ( concrete_receiver . has_value ( ) , <nl> - concrete_receiver - > map ( ) . equals ( * receiver_map ) ) ; <nl> + concrete_receiver - > map ( ) . equals ( receiver_map ) ) ; <nl> holder = concrete_receiver ; <nl> } <nl> <nl> void SerializerForBackgroundCompilation : : ProcessNamedPropertyAccess ( <nl> } <nl> } <nl> <nl> - void SerializerForBackgroundCompilation : : ProcessNamedSuperPropertyAccess ( <nl> - Hints * receiver , NameRef const & name , FeedbackSlot slot , <nl> - AccessMode access_mode ) { <nl> - if ( slot . IsInvalid ( ) | | feedback_vector ( ) . is_null ( ) ) return ; <nl> - FeedbackSource source ( feedback_vector ( ) , slot ) ; <nl> - ProcessedFeedback const & feedback = <nl> - broker ( ) - > ProcessFeedbackForPropertyAccess ( source , access_mode , name ) ; <nl> - if ( BailoutOnUninitialized ( feedback ) ) return ; <nl> - <nl> - Hints new_accumulator_hints ; <nl> - switch ( feedback . kind ( ) ) { <nl> - case ProcessedFeedback : : kNamedAccess : <nl> - DCHECK ( name . equals ( feedback . AsNamedAccess ( ) . name ( ) ) ) ; <nl> - ProcessNamedSuperAccess ( receiver , feedback . AsNamedAccess ( ) , access_mode , <nl> - & new_accumulator_hints ) ; <nl> - break ; <nl> - case ProcessedFeedback : : kMinimorphicPropertyAccess : <nl> - DCHECK ( name . equals ( feedback . AsMinimorphicPropertyAccess ( ) . name ( ) ) ) ; <nl> - ProcessMinimorphicPropertyAccess ( feedback . AsMinimorphicPropertyAccess ( ) , <nl> - source ) ; <nl> - break ; <nl> - case ProcessedFeedback : : kInsufficient : <nl> - break ; <nl> - default : <nl> - UNREACHABLE ( ) ; <nl> - } <nl> - <nl> - if ( access_mode = = AccessMode : : kLoad ) { <nl> - environment ( ) - > accumulator_hints ( ) = new_accumulator_hints ; <nl> - } <nl> - } <nl> - <nl> void SerializerForBackgroundCompilation : : ProcessNamedAccess ( <nl> Hints * receiver , NamedAccessFeedback const & feedback , <nl> AccessMode access_mode , Hints * result_hints ) { <nl> void SerializerForBackgroundCompilation : : ProcessNamedAccess ( <nl> for ( Handle < Map > map : <nl> GetRelevantReceiverMaps ( broker ( ) - > isolate ( ) , receiver - > maps ( ) ) ) { <nl> MapRef map_ref ( broker ( ) , map ) ; <nl> - ProcessMapForNamedPropertyAccess ( receiver , map_ref , map_ref , <nl> - feedback . name ( ) , access_mode , <nl> - base : : nullopt , result_hints ) ; <nl> + ProcessMapForNamedPropertyAccess ( receiver , map_ref , feedback . name ( ) , <nl> + access_mode , base : : nullopt , result_hints ) ; <nl> } <nl> <nl> for ( Handle < Object > hint : receiver - > constants ( ) ) { <nl> ObjectRef object ( broker ( ) , hint ) ; <nl> if ( access_mode = = AccessMode : : kLoad & & object . IsJSObject ( ) ) { <nl> MapRef map_ref = object . AsJSObject ( ) . map ( ) ; <nl> - ProcessMapForNamedPropertyAccess ( receiver , map_ref , map_ref , <nl> - feedback . name ( ) , access_mode , <nl> - object . AsJSObject ( ) , result_hints ) ; <nl> + ProcessMapForNamedPropertyAccess ( receiver , map_ref , feedback . name ( ) , <nl> + access_mode , object . AsJSObject ( ) , <nl> + result_hints ) ; <nl> } <nl> / / For JSNativeContextSpecialization : : ReduceJSLoadNamed . <nl> if ( access_mode = = AccessMode : : kLoad & & object . IsJSFunction ( ) & & <nl> void SerializerForBackgroundCompilation : : ProcessNamedAccess ( <nl> } <nl> } <nl> <nl> - void SerializerForBackgroundCompilation : : ProcessNamedSuperAccess ( <nl> - Hints * receiver , NamedAccessFeedback const & feedback , <nl> - AccessMode access_mode , Hints * result_hints ) { <nl> - MapHandles receiver_maps = <nl> - GetRelevantReceiverMaps ( broker ( ) - > isolate ( ) , receiver - > maps ( ) ) ; <nl> - for ( Handle < Map > receiver_map : receiver_maps ) { <nl> - MapRef receiver_map_ref ( broker ( ) , receiver_map ) ; <nl> - for ( Handle < Map > feedback_map : feedback . maps ( ) ) { <nl> - MapRef feedback_map_ref ( broker ( ) , feedback_map ) ; <nl> - ProcessMapForNamedPropertyAccess ( <nl> - receiver , receiver_map_ref , feedback_map_ref , feedback . name ( ) , <nl> - access_mode , base : : nullopt , result_hints ) ; <nl> - } <nl> - } <nl> - if ( receiver_maps . empty ( ) ) { <nl> - for ( Handle < Map > feedback_map : feedback . maps ( ) ) { <nl> - MapRef feedback_map_ref ( broker ( ) , feedback_map ) ; <nl> - ProcessMapForNamedPropertyAccess ( <nl> - receiver , base : : nullopt , feedback_map_ref , feedback . name ( ) , <nl> - access_mode , base : : nullopt , result_hints ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> void SerializerForBackgroundCompilation : : ProcessElementAccess ( <nl> Hints const & receiver , Hints const & key , <nl> ElementAccessFeedback const & feedback , AccessMode access_mode ) { <nl> void SerializerForBackgroundCompilation : : ProcessElementAccess ( <nl> for ( Handle < Object > hint : receiver . constants ( ) ) { <nl> ObjectRef receiver_ref ( broker ( ) , hint ) ; <nl> <nl> - / / For JSNativeContextSpecialization : : InferRootMap <nl> + / / For JSNativeContextSpecialization : : InferReceiverRootMap <nl> if ( receiver_ref . IsHeapObject ( ) ) { <nl> receiver_ref . AsHeapObject ( ) . map ( ) . SerializeRootMap ( ) ; <nl> } <nl> void SerializerForBackgroundCompilation : : ProcessElementAccess ( <nl> } <nl> } <nl> <nl> - / / For JSNativeContextSpecialization : : InferRootMap <nl> + / / For JSNativeContextSpecialization : : InferReceiverRootMap <nl> for ( Handle < Map > map : receiver . maps ( ) ) { <nl> MapRef map_ref ( broker ( ) , map ) ; <nl> map_ref . SerializeRootMap ( ) ; <nl> void SerializerForBackgroundCompilation : : VisitLdaNamedProperty ( <nl> <nl> void SerializerForBackgroundCompilation : : VisitLdaNamedPropertyFromSuper ( <nl> BytecodeArrayIterator * iterator ) { <nl> - Hints * receiver = & register_hints ( iterator - > GetRegisterOperand ( 0 ) ) ; <nl> - NameRef name ( broker ( ) , <nl> - iterator - > GetConstantForIndexOperand ( 1 , broker ( ) - > isolate ( ) ) ) ; <nl> - FeedbackSlot slot = iterator - > GetSlotOperand ( 2 ) ; <nl> - ProcessNamedSuperPropertyAccess ( receiver , name , slot , AccessMode : : kLoad ) ; <nl> + NameRef ( broker ( ) , <nl> + iterator - > GetConstantForIndexOperand ( 1 , broker ( ) - > isolate ( ) ) ) ; <nl> + / / TODO ( marja , v8 : 9237 ) : Process feedback once it ' s added to the byte code . <nl> } <nl> <nl> / / TODO ( neis ) : Do feedback - independent serialization also for * NoFeedback <nl> deleted file mode 100644 <nl> index f6a99005707 . . 00000000000 <nl> mmm a / test / mjsunit / es6 / super - ic - opt - dynamic - map - checks . js <nl> ppp / dev / null <nl> <nl> - / / Copyright 2020 the V8 project authors . All rights reserved . <nl> - / / Use of this source code is governed by a BSD - style license that can be <nl> - / / found in the LICENSE file . <nl> - <nl> - / / Flags : - - allow - natives - syntax - - super - ic - - opt - - runtime - call - stats <nl> - / / Flags : - - no - always - opt - - no - stress - opt - - turboprop <nl> - / / Flags : - - turboprop - dynamic - map - checks <nl> - <nl> - load ( " test / mjsunit / runtime - callstats - helpers . js " ) ; <nl> - <nl> - % GetAndResetRuntimeCallStats ( ) ; <nl> - <nl> - / / This file contains tests which require - - dynamic - map - chekcs . <nl> - <nl> - ( function TestMinimorphicPropertyAccess ( ) { <nl> - class A { } <nl> - A . prototype . bar = " wrong value : A . prototype . bar " ; <nl> - <nl> - class B extends A { } ; <nl> - B . prototype . bar = " correct value " ; <nl> - <nl> - class C extends B { <nl> - foo ( should_bailout ) { <nl> - const r = super . bar ; <nl> - const did_bailout = ( <nl> - % GetOptimizationStatus ( C . prototype . foo ) & <nl> - V8OptimizationStatus . kTopmostFrameIsTurboFanned ) = = 0 ; <nl> - assertEquals ( should_bailout , did_bailout ) ; <nl> - return r ; <nl> - } <nl> - } <nl> - C . prototype . bar = " wrong value : C . prototype . bar " ; <nl> - % PrepareFunctionForOptimization ( C . prototype . foo ) ; <nl> - <nl> - let o = new C ( ) ; <nl> - o . bar = " wrong value : o . bar " ; <nl> - <nl> - / / Fill in the feedback . <nl> - let r = o . foo ( true ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - % OptimizeFunctionOnNextCall ( C . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function . <nl> - r = o . foo ( false ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - } ) ( ) ; <nl> - <nl> - / / Assert that the tests so far generated real optimized code and not just a <nl> - / / bailout to Runtime_LoadFromSuper . TODO ( marja , v8 : 9237 ) : update this to track <nl> - / / the builtin we ' ll use for bailout cases . <nl> - assertEquals ( 0 , getRuntimeFunctionCallCount ( " LoadFromSuper " ) ) ; <nl> - <nl> - / / Reset runtime stats so that we don ' t get extra printout . <nl> - % GetAndResetRuntimeCallStats ( ) ; <nl> deleted file mode 100644 <nl> index 136bad8bf67 . . 00000000000 <nl> mmm a / test / mjsunit / es6 / super - ic - opt - no - turboprop . js <nl> ppp / dev / null <nl> <nl> - / / Copyright 2020 the V8 project authors . All rights reserved . <nl> - / / Use of this source code is governed by a BSD - style license that can be <nl> - / / found in the LICENSE file . <nl> - <nl> - / / Flags : - - allow - natives - syntax - - super - ic - - opt - - runtime - call - stats <nl> - / / Flags : - - no - always - opt - - no - stress - opt <nl> - <nl> - load ( " test / mjsunit / runtime - callstats - helpers . js " ) ; <nl> - <nl> - % GetAndResetRuntimeCallStats ( ) ; <nl> - <nl> - / / This file contains tests which are disabled for TurboProp . TurboProp deopts <nl> - / / differently than TurboFan , so the assertions about when a function is <nl> - / / deoptimized won ' t hold . <nl> - <nl> - ( function TestPropertyIsConstant ( ) { <nl> - / / Test for a case where the property is a constant found in the lookup start <nl> - / / object . <nl> - class A { } <nl> - A . prototype . bar = " wrong value : A . prototype . bar " ; <nl> - <nl> - class B extends A { } ; <nl> - B . prototype . bar = " correct value " ; <nl> - <nl> - class C extends B { <nl> - foo ( ) { return super . bar ; } <nl> - } <nl> - C . prototype . bar = " wrong value : C . prototype . bar " ; <nl> - % PrepareFunctionForOptimization ( C . prototype . foo ) ; <nl> - <nl> - let o = new C ( ) ; <nl> - o . bar = " wrong value : o . bar " ; <nl> - <nl> - / / Fill in the feedback . <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - % OptimizeFunctionOnNextCall ( C . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function . <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - / / Assert that the function was not deoptimized . <nl> - assertOptimized ( C . prototype . foo ) ; <nl> - <nl> - / / Change the property value . <nl> - B . prototype . bar = " new value " ; <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " new value " , r ) ; <nl> - <nl> - / / Assert that the function was deoptimized ( dependency to the constant <nl> - / / value ) . <nl> - assertFalse ( isOptimized ( C . prototype . foo ) ) ; <nl> - } ) ( ) ; <nl> - <nl> - / / Assert that the tests so far generated real optimized code and not just a <nl> - / / bailout to Runtime_LoadFromSuper . TODO ( marja , v8 : 9237 ) : update this to track <nl> - / / the builtin we ' ll use for bailout cases . <nl> - assertEquals ( 0 , getRuntimeFunctionCallCount ( " LoadFromSuper " ) ) ; <nl> - <nl> - / / Reset runtime stats so that we don ' t get extra printout . <nl> - % GetAndResetRuntimeCallStats ( ) ; <nl> deleted file mode 100644 <nl> index 3550589828a . . 00000000000 <nl> mmm a / test / mjsunit / es6 / super - ic - opt . js <nl> ppp / dev / null <nl> <nl> - / / Copyright 2020 the V8 project authors . All rights reserved . <nl> - / / Use of this source code is governed by a BSD - style license that can be <nl> - / / found in the LICENSE file . <nl> - <nl> - / / Flags : - - allow - natives - syntax - - super - ic - - opt - - runtime - call - stats <nl> - / / Flags : - - no - always - opt - - no - stress - opt <nl> - <nl> - load ( " test / mjsunit / runtime - callstats - helpers . js " ) ; <nl> - <nl> - % GetAndResetRuntimeCallStats ( ) ; <nl> - <nl> - ( function TestPropertyIsInTheHomeObjectsProto ( ) { <nl> - / / Test where the property is a constant found on home object ' s proto . This <nl> - / / will generate a minimorphic property load . <nl> - class A { } <nl> - A . prototype . bar = " wrong value : A . prototype . bar " ; <nl> - <nl> - class B extends A { } ; <nl> - B . prototype . bar = " correct value " ; <nl> - <nl> - class C extends B { <nl> - foo ( ) { return super . bar ; } <nl> - } <nl> - C . prototype . bar = " wrong value : D . prototype . bar " ; <nl> - % PrepareFunctionForOptimization ( C . prototype . foo ) ; <nl> - <nl> - let o = new C ( ) ; <nl> - o . bar = " wrong value : o . bar " ; <nl> - <nl> - / / Fill in the feedback . <nl> - let r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - % OptimizeFunctionOnNextCall ( C . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function . <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - / / Assert that the function was not deoptimized . <nl> - assertOptimized ( C . prototype . foo ) ; <nl> - <nl> - / / Change the property value . <nl> - B . prototype . bar = " new value " ; <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " new value " , r ) ; <nl> - } ) ( ) ; <nl> - <nl> - ( function TestPropertyIsGetterInTheHomeObjectsProto ( ) { <nl> - / / Test where the property is a constant found on home object ' s proto . This <nl> - / / will generate a minimorphic property load . <nl> - class A { } <nl> - A . prototype . bar = " wrong value : A . prototype . bar " ; <nl> - <nl> - class B extends A { <nl> - get bar ( ) { return this . this_value ; } <nl> - } <nl> - class C extends B { <nl> - foo ( ) { return super . bar ; } <nl> - } <nl> - C . prototype . bar = " wrong value : D . prototype . bar " ; <nl> - % PrepareFunctionForOptimization ( C . prototype . foo ) ; <nl> - <nl> - let o = new C ( ) ; <nl> - o . bar = " wrong value : o . bar " ; <nl> - o . this_value = " correct value " ; <nl> - <nl> - / / Fill in the feedback . <nl> - let r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - % OptimizeFunctionOnNextCall ( C . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function . <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - / / Assert that the function was not deoptimized . <nl> - assertOptimized ( C . prototype . foo ) ; <nl> - <nl> - / / Change the property value . <nl> - o . this_value = " new value " ; <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " new value " , r ) ; <nl> - } ) ( ) ; <nl> - <nl> - ( function TestPropertyIsConstantInThePrototypeChain ( ) { <nl> - / / Test where the property is a constant found on the prototype chain of the <nl> - / / lookup start object . <nl> - class A { } <nl> - A . prototype . bar = " wrong value : A . prototype . bar " ; <nl> - <nl> - class B extends A { } ; <nl> - B . prototype . bar = " correct value " ; <nl> - <nl> - class C extends B { } ; <nl> - <nl> - class D extends C { <nl> - foo ( ) { return super . bar ; } <nl> - } <nl> - D . prototype . bar = " wrong value : D . prototype . bar " ; <nl> - % PrepareFunctionForOptimization ( D . prototype . foo ) ; <nl> - <nl> - let o = new D ( ) ; <nl> - o . bar = " wrong value : o . bar " ; <nl> - <nl> - / / Fill in the feedback . <nl> - let r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - % OptimizeFunctionOnNextCall ( D . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function . <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - / / Assert that the function was not deoptimized . <nl> - assertOptimized ( D . prototype . foo ) ; <nl> - <nl> - / / Change the property value . <nl> - B . prototype . bar = " new value " ; <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " new value " , r ) ; <nl> - <nl> - / / Assert that the function was deoptimized ( dependency to the constant <nl> - / / value ) . <nl> - assertFalse ( isOptimized ( D . prototype . foo ) ) ; <nl> - } ) ( ) ; <nl> - <nl> - ( function TestPropertyIsNonConstantData ( ) { <nl> - / / Test for a case where the property is a non - constant data property found <nl> - / / in the lookup start object . <nl> - class A { } <nl> - A . prototype . bar = " wrong value : A . prototype . bar " ; <nl> - <nl> - class B extends A { } ; <nl> - B . prototype . bar = " initial value " ; <nl> - <nl> - class C extends B { <nl> - foo ( ) { return super . bar ; } <nl> - } <nl> - C . prototype . bar = " wrong value : C . prototype . bar " ; <nl> - % PrepareFunctionForOptimization ( C . prototype . foo ) ; <nl> - <nl> - let o = new C ( ) ; <nl> - o . bar = " wrong value : o . bar " ; <nl> - <nl> - / / Make the property look like a non - constant . <nl> - B . prototype . bar = " correct value " ; <nl> - <nl> - / / Fill in the feedback . <nl> - let r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - % OptimizeFunctionOnNextCall ( C . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function . <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - / / Assert that the function was not deoptimized . <nl> - assertOptimized ( C . prototype . foo ) ; <nl> - <nl> - / / Change the property value . <nl> - B . prototype . bar = " new value " ; <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " new value " , r ) ; <nl> - <nl> - / / Assert that the function was still not deoptimized ( the value was not a <nl> - / / constant to begin with ) . <nl> - assertOptimized ( C . prototype . foo ) ; <nl> - } ) ( ) ; <nl> - <nl> - ( function TestPropertyIsGetter ( ) { <nl> - class A { } <nl> - A . prototype . bar = " wrong value : A . prototype . bar " ; <nl> - <nl> - class B extends A { <nl> - get bar ( ) { <nl> - return this . test_value ; <nl> - } <nl> - } ; <nl> - <nl> - class C extends B { } <nl> - <nl> - class D extends C { <nl> - foo ( ) { <nl> - const b = super . bar ; <nl> - return b ; <nl> - } <nl> - } <nl> - % PrepareFunctionForOptimization ( D . prototype . foo ) ; <nl> - D . prototype . bar = " wrong value : D . prototype . bar " ; <nl> - <nl> - let o = new D ( ) ; <nl> - o . bar = " wrong value : o . bar " ; <nl> - o . test_value = " correct value " ; <nl> - <nl> - / / Fill in the feedback . <nl> - let r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - % OptimizeFunctionOnNextCall ( D . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function . <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - / / Assert that the function was not deoptimized . <nl> - assertOptimized ( D . prototype . foo ) ; <nl> - } ) ( ) ; <nl> - <nl> - ( function TestPropertyInsertedInTheMiddle ( ) { <nl> - / / Test for a case where the property is a constant found in the lookup start <nl> - / / object . <nl> - class A { } <nl> - A . prototype . bar = " correct value " ; <nl> - <nl> - class B extends A { } ; <nl> - <nl> - class C extends B { <nl> - foo ( ) { return super . bar ; } <nl> - } <nl> - C . prototype . bar = " wrong value : C . prototype . bar " ; <nl> - % PrepareFunctionForOptimization ( C . prototype . foo ) ; <nl> - <nl> - let o = new C ( ) ; <nl> - o . bar = " wrong value : o . bar " ; <nl> - <nl> - / / Fill in the feedback . <nl> - let r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - % OptimizeFunctionOnNextCall ( C . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function . <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - / / Assert that the function was not deoptimized . <nl> - assertOptimized ( C . prototype . foo ) ; <nl> - <nl> - / / Insert the property into the prototype chain between the lookup start <nl> - / / object and the old holder . <nl> - B . prototype . bar = " new value " ; <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " new value " , r ) ; <nl> - <nl> - / / Assert that the function was deoptimized ( holder changed ) . <nl> - assertFalse ( isOptimized ( C . prototype . foo ) ) ; <nl> - } ) ( ) ; <nl> - <nl> - ( function TestUnexpectedHomeObjectPrototypeDeoptimizes ( ) { <nl> - class A { } <nl> - A . prototype . bar = " wrong value : A . prototype . bar " ; <nl> - <nl> - class B extends A { } <nl> - B . prototype . bar = " correct value " ; <nl> - <nl> - class C extends B { } <nl> - <nl> - class D extends C { <nl> - foo ( ) { return super . bar ; } <nl> - } <nl> - % PrepareFunctionForOptimization ( D . prototype . foo ) ; <nl> - D . prototype . bar = " wrong value : D . prototype . bar " ; <nl> - <nl> - const o = new D ( ) ; <nl> - <nl> - / / Fill in the feedback . <nl> - let r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - % OptimizeFunctionOnNextCall ( D . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function . <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - / / Assert that the function was not deoptimized . <nl> - assertOptimized ( D . prototype . foo ) ; <nl> - <nl> - / / Change the home object ' s prototype . <nl> - D . prototype . __proto__ = { " bar " : " new value " } ; <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " new value " , r ) ; <nl> - <nl> - / / Assert that the function was deoptimized . <nl> - assertEquals ( false , isOptimized ( D . prototype . foo ) ) ; <nl> - } ) ( ) ; <nl> - <nl> - ( function TestUnexpectedReceiverDoesNotDeoptimize ( ) { <nl> - class A { } <nl> - A . prototype . bar = " wrong value : A . prototype . bar " ; <nl> - <nl> - class B extends A { } ; <nl> - B . prototype . bar = " correct value " ; <nl> - <nl> - class C extends B { <nl> - foo ( ) { return super . bar ; } <nl> - } <nl> - C . prototype . bar = " wrong value : C . prototype . bar " ; <nl> - % PrepareFunctionForOptimization ( C . prototype . foo ) ; <nl> - <nl> - let o = new C ( ) ; <nl> - o . bar = " wrong value : o . bar " ; <nl> - <nl> - / / Fill in the feedback . <nl> - let r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - % OptimizeFunctionOnNextCall ( C . prototype . foo ) ; <nl> - o . foo ( ) ; <nl> - assertOptimized ( C . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function with an unexpected receiver . <nl> - r = C . prototype . foo . call ( { ' lol ' : 5 } ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - / / Assert that the function was not deoptimized . <nl> - assertOptimized ( C . prototype . foo ) ; <nl> - } ) ( ) ; <nl> - <nl> - ( function TestPolymorphic ( ) { <nl> - class A { } <nl> - A . prototype . bar = " wrong value : A . prototype . bar " ; <nl> - <nl> - class B extends A { } <nl> - B . prototype . bar = " correct value " ; <nl> - <nl> - class C extends B { } <nl> - <nl> - class D extends C { <nl> - foo ( ) { return super . bar ; } <nl> - } <nl> - % PrepareFunctionForOptimization ( D . prototype . foo ) ; <nl> - D . prototype . bar = " wrong value : D . prototype . bar " ; <nl> - <nl> - const o = new D ( ) ; <nl> - <nl> - / / Create objects which will act as the " home object ' s prototype " later . <nl> - const prototypes = [ { " a " : 0 } , { " b " : 0 } ] ; <nl> - for ( p of prototypes ) { <nl> - p . __proto__ = B . prototype ; <nl> - } <nl> - <nl> - / / Fill in the feedback ( polymorphic ) . <nl> - for ( p of prototypes ) { <nl> - D . prototype . __proto__ = p ; <nl> - const r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - } <nl> - <nl> - % OptimizeFunctionOnNextCall ( D . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function - don ' t change the home object ' s proto any <nl> - / / more . <nl> - let r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - / / Assert that the function was not deoptimized . <nl> - assertOptimized ( D . prototype . foo ) ; <nl> - } ) ( ) ; <nl> - <nl> - ( function TestPolymorphicWithGetter ( ) { <nl> - class A { } <nl> - A . prototype . bar = " wrong value : A . prototype . bar " ; <nl> - <nl> - class B extends A { <nl> - get bar ( ) { <nl> - return this . test_value ; <nl> - } <nl> - } <nl> - <nl> - class C extends B { } <nl> - <nl> - class D extends C { <nl> - foo ( ) { return super . bar ; } <nl> - } <nl> - % PrepareFunctionForOptimization ( D . prototype . foo ) ; <nl> - D . prototype . bar = " wrong value : D . prototype . bar " ; <nl> - <nl> - const o = new D ( ) ; <nl> - o . test_value = " correct value " ; <nl> - <nl> - / / Create objects which will act as the " home object ' s prototype " later . <nl> - const prototypes = [ { " a " : 0 } , { " b " : 0 } ] ; <nl> - for ( p of prototypes ) { <nl> - p . __proto__ = B . prototype ; <nl> - } <nl> - <nl> - / / Fill in the feedback . <nl> - for ( p of prototypes ) { <nl> - D . prototype . __proto__ = p ; <nl> - const r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - } <nl> - <nl> - % OptimizeFunctionOnNextCall ( D . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function - don ' t change the home object ' s proto any <nl> - / / more . <nl> - const r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - / / Assert that the function was not deoptimized . <nl> - assertOptimized ( D . prototype . foo ) ; <nl> - } ) ( ) ; <nl> - <nl> - ( function TestPolymorphicMixinDoesNotDeopt ( ) { <nl> - function createClasses ( ) { <nl> - class A { } <nl> - A . prototype . bar = " correct value " ; <nl> - class B extends A { <nl> - foo ( ) { return super . bar ; } <nl> - } <nl> - return B ; <nl> - } <nl> - <nl> - const b1 = createClasses ( ) ; <nl> - % PrepareFunctionForOptimization ( b1 . prototype . foo ) ; <nl> - const b2 = createClasses ( ) ; <nl> - % PrepareFunctionForOptimization ( b2 . prototype . foo ) ; <nl> - <nl> - class c1 extends b1 { } ; <nl> - class c2 extends b2 { } ; <nl> - <nl> - const objects = [ new c1 ( ) , new c2 ( ) ] ; <nl> - <nl> - / / Fill in the feedback . <nl> - for ( o of objects ) { <nl> - const r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - } <nl> - % OptimizeFunctionOnNextCall ( b1 . prototype . foo ) ; <nl> - % OptimizeFunctionOnNextCall ( b2 . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function . <nl> - for ( o of objects ) { <nl> - const r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - } <nl> - assertOptimized ( b1 . prototype . foo ) ; <nl> - assertOptimized ( b2 . prototype . foo ) ; <nl> - } ) ( ) ; <nl> - <nl> - ( function TestHomeObjectProtoIsGlobalThis ( ) { <nl> - class A { } <nl> - <nl> - class B extends A { <nl> - foo ( ) { return super . bar ; } <nl> - } <nl> - B . prototype . __proto__ = globalThis ; <nl> - globalThis . bar = " correct value " ; <nl> - % PrepareFunctionForOptimization ( B . prototype . foo ) ; <nl> - <nl> - let o = new B ( ) ; <nl> - <nl> - / / Fill in the feedback . <nl> - let r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - % OptimizeFunctionOnNextCall ( B . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function . <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - / / Assert that the function was not deoptimized . <nl> - assertOptimized ( B . prototype . foo ) ; <nl> - <nl> - globalThis . bar = " new value " ; <nl> - <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " new value " , r ) ; <nl> - } ) ( ) ; <nl> - <nl> - / / Assert that the tests so far generated real optimized code and not just a <nl> - / / bailout to Runtime_LoadFromSuper . TODO ( marja , v8 : 9237 ) : update this to track <nl> - / / the builtin we ' ll use for bailout cases . <nl> - assertEquals ( 0 , getRuntimeFunctionCallCount ( " LoadFromSuper " ) ) ; <nl> - <nl> - ( function TestMegamorphic ( ) { <nl> - class A { } <nl> - A . prototype . bar = " wrong value : A . prototype . bar " ; <nl> - <nl> - class B extends A { } <nl> - B . prototype . bar = " correct value " ; <nl> - <nl> - class C extends B { } <nl> - <nl> - class D extends C { <nl> - foo ( ) { return super . bar ; } <nl> - } <nl> - % PrepareFunctionForOptimization ( D . prototype . foo ) ; <nl> - D . prototype . bar = " wrong value : D . prototype . bar " ; <nl> - <nl> - const o = new D ( ) ; <nl> - <nl> - / / Create objects which will act as the " home object ' s prototype " later . <nl> - const prototypes = [ { " a " : 0 } , { " b " : 0 } , { " c " : 0 } , { " d " : 0 } , { " e " : 0 } , <nl> - { " f " : 0 } , { " g " : 0 } , { " e " : 0 } ] ; <nl> - for ( p of prototypes ) { <nl> - p . __proto__ = B . prototype ; <nl> - } <nl> - <nl> - / / Fill in the feedback ( megamorphic ) . <nl> - for ( p of prototypes ) { <nl> - D . prototype . __proto__ = p ; <nl> - const r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - } <nl> - <nl> - % OptimizeFunctionOnNextCall ( D . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function - don ' t change the home object ' s proto any <nl> - / / more . <nl> - let r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - / / Assert that the function was not deoptimized . <nl> - assertOptimized ( D . prototype . foo ) ; <nl> - <nl> - / / The " optimized code " is just a runtime call though . TODO ( marja , v8 : 9237 ) : <nl> - / / update this to track the builtin we ' ll use for bailout cases . <nl> - assertEquals ( 1 , getRuntimeFunctionCallCount ( " LoadFromSuper " ) ) ; <nl> - } ) ( ) ; <nl> - <nl> - ( function TestMegamorphicWithGetter ( ) { <nl> - class A { } <nl> - A . prototype . bar = " wrong value : A . prototype . bar " ; <nl> - <nl> - class B extends A { <nl> - get bar ( ) { <nl> - return this . test_value ; <nl> - } <nl> - } ; <nl> - <nl> - class C extends B { } <nl> - <nl> - class D extends C { <nl> - foo ( ) { return super . bar ; } <nl> - } <nl> - % PrepareFunctionForOptimization ( D . prototype . foo ) ; <nl> - D . prototype . bar = " wrong value : D . prototype . bar " ; <nl> - <nl> - const o = new D ( ) ; <nl> - o . test_value = " correct value " ; <nl> - <nl> - / / Create objects which will act as the " home object ' s prototype " later . <nl> - const prototypes = [ { " a " : 0 } , { " b " : 0 } , { " c " : 0 } , { " d " : 0 } , { " e " : 0 } , <nl> - { " f " : 0 } , { " g " : 0 } , { " e " : 0 } ] ; <nl> - for ( p of prototypes ) { <nl> - p . __proto__ = B . prototype ; <nl> - } <nl> - <nl> - / / Fill in the feedback ( megamorphic ) . <nl> - for ( p of prototypes ) { <nl> - D . prototype . __proto__ = p ; <nl> - const r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - } <nl> - <nl> - % OptimizeFunctionOnNextCall ( D . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function - don ' t change the home object ' s proto any <nl> - / / more . <nl> - const r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - / / The " optimized code " is just a runtime call though . TODO ( marja , v8 : 9237 ) : <nl> - / / update this to track the builtin we ' ll use for bailout cases . <nl> - assertEquals ( 1 , getRuntimeFunctionCallCount ( " LoadFromSuper " ) ) ; <nl> - } ) ( ) ; <nl> - <nl> - ( function TestHomeObjectProtoIsGlobalThisGetterProperty ( ) { <nl> - class A { } <nl> - <nl> - class B extends A { <nl> - foo ( ) { return super . bar ; } <nl> - } <nl> - B . prototype . __proto__ = globalThis ; <nl> - Object . defineProperty ( globalThis , " bar " , { get : function ( ) { return this . this_value ; } } ) ; <nl> - % PrepareFunctionForOptimization ( B . prototype . foo ) ; <nl> - <nl> - let o = new B ( ) ; <nl> - o . this_value = " correct value " ; <nl> - <nl> - / / Fill in the feedback . <nl> - let r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - % OptimizeFunctionOnNextCall ( B . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function . <nl> - r = o . foo ( ) ; <nl> - assertEquals ( " correct value " , r ) ; <nl> - <nl> - / / Assert that the function was not deoptimized . <nl> - assertOptimized ( B . prototype . foo ) ; <nl> - <nl> - / / The " optimized code " is just a runtime call though . TODO ( marja , v8 : 9237 ) : <nl> - / / update this to track the builtin we ' ll use for bailout cases . <nl> - assertEquals ( 1 , getRuntimeFunctionCallCount ( " LoadFromSuper " ) ) ; <nl> - } ) ( ) ; <nl> - <nl> - ( function TestHomeObjectProtoIsFunctionAndPropertyIsPrototype ( ) { <nl> - / / There are special optimizations for accessing Function . prototype . Test <nl> - / / that super property access which ends up accessing it works . <nl> - class A { } <nl> - <nl> - class B extends A { <nl> - foo ( ) { return super . prototype ; } <nl> - } <nl> - function f ( ) { } <nl> - B . prototype . __proto__ = f ; <nl> - % PrepareFunctionForOptimization ( B . prototype . foo ) ; <nl> - <nl> - let o = new B ( ) ; <nl> - <nl> - / / Fill in the feedback . <nl> - let r = o . foo ( ) ; <nl> - assertEquals ( f . prototype , r ) ; <nl> - <nl> - % OptimizeFunctionOnNextCall ( B . prototype . foo ) ; <nl> - <nl> - / / Test the optimized function . <nl> - r = o . foo ( ) ; <nl> - assertEquals ( f . prototype , r ) ; <nl> - <nl> - / / Assert that the function was not deoptimized . <nl> - assertOptimized ( B . prototype . foo ) ; <nl> - <nl> - / / The " optimized code " is just a runtime call though . TODO ( marja , v8 : 9237 ) : <nl> - / / update this to track the builtin we ' ll use for bailout cases . <nl> - assertEquals ( 1 , getRuntimeFunctionCallCount ( " LoadFromSuper " ) ) ; <nl> - } ) ( ) ; <nl> - <nl> - / / Reset runtime stats so that we don ' t get extra printout . <nl> - % GetAndResetRuntimeCallStats ( ) ; <nl> mmm a / test / mjsunit / mjsunit . status <nl> ppp b / test / mjsunit / mjsunit . status <nl> <nl> ' compiler / number - comparison - truncations ' : [ SKIP ] , <nl> ' compiler / redundancy - elimination ' : [ SKIP ] , <nl> ' compiler / regress - 9945 - * ' : [ SKIP ] , <nl> - ' es6 / super - ic - opt - no - turboprop ' : [ SKIP ] , <nl> <nl> # Static asserts for optimizations don ' t hold due to removed optimization <nl> # phases . <nl> <nl> ' compiler / serializer - feedback - propagation - 1 ' : [ SKIP ] , <nl> ' compiler / serializer - feedback - propagation - 2 ' : [ SKIP ] , <nl> ' compiler / serializer - transition - propagation ' : [ SKIP ] , <nl> - # crbug . com / v8 / 11110 <nl> - ' es6 / super - ic - opt * ' : [ SKIP ] , <nl> } ] , # variant = = nci or variant = = nci_as_midtier <nl> <nl> [ ' ( ( arch = = mipsel or arch = = mips64el or arch = = mips or arch = = mips64 ) and not simd_mips ) or ( arch in [ ppc64 , s390x ] ) ' , { <nl> deleted file mode 100644 <nl> index 04205ff7bd1 . . 00000000000 <nl> mmm a / test / mjsunit / regress - 1146106 . js <nl> ppp / dev / null <nl> <nl> - / / Copyright 2020 the V8 project authors . All rights reserved . <nl> - / / Use of this source code is governed by a BSD - style license that can be <nl> - / / found in the LICENSE file . <nl> - <nl> - / / Flags : - - allow - natives - syntax - - concurrent - inlining - - no - use - ic - - super - ic <nl> - <nl> - class A { <nl> - bar ( ) { } <nl> - } <nl> - class B extends A { <nl> - foo ( ) { <nl> - return super . bar ( ) ; <nl> - } <nl> - } <nl> - % PrepareFunctionForOptimization ( B . prototype . foo ) ; <nl> - new B ( ) . foo ( ) ; <nl> - % OptimizeFunctionOnNextCall ( B . prototype . foo ) ; <nl> - new B ( ) . foo ( ) ; <nl> mmm a / test / mjsunit / runtime - callstats - helpers . js <nl> ppp b / test / mjsunit / runtime - callstats - helpers . js <nl> function getRuntimeFunctionCallCount ( function_name ) { <nl> const line = lines [ i ] ; <nl> const m = line . match ( / ( ? < name > \ S + ) \ s + \ S + \ s + \ S + \ s + ( ? < count > \ S + ) / ) ; <nl> if ( function_name = = m . groups . name ) { <nl> - return parseInt ( m . groups . count ) ; <nl> + return m . groups . count ; <nl> } <nl> } <nl> return 0 ; <nl> | Revert " [ super ] Optimize super property access in JSNativeContextSpecialization " | v8/v8 | d9a0b7b11e9e2e9b04bfb4a356f8718dee29ac05 | 2020-11-09T17:42:41Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.