diff
stringlengths
41
2.03M
msg
stringlengths
1
1.5k
repo
stringlengths
5
40
sha
stringlengths
40
40
time
stringlengths
20
20
mmm a / src / Processors / Formats / Impl / CHColumnToArrowColumn . cpp <nl> ppp b / src / Processors / Formats / Impl / CHColumnToArrowColumn . cpp <nl> namespace DB <nl> / / TODO : constructed every iteration <nl> ColumnWithTypeAndName column = header . safeGetByPosition ( column_i ) ; <nl> column . column = recursiveRemoveLowCardinality ( chunk . getColumns ( ) [ column_i ] ) ; <nl> + column . type = recursiveRemoveLowCardinality ( column . type ) ; <nl> <nl> const bool is_column_nullable = column . type - > isNullable ( ) ; <nl> const auto & column_nested_type <nl>
Remove LC converting to Arrow .
ClickHouse/ClickHouse
3d37a632474fbc3de11b0ff885d8a12089f81db7
2020-07-03T10:37:55Z
mmm a / tests / runner . py <nl> ppp b / tests / runner . py <nl> def test_memops ( self ) : <nl> # include < stdlib . h > <nl> int main ( ) { <nl> int N = 1024 * 1024 ; <nl> - int M = 600 ; <nl> + int M = 190 ; <nl> int final = 0 ; <nl> char * buf = ( char * ) malloc ( N ) ; <nl> for ( int t = 0 ; t < M ; t + + ) { <nl> def test_memops ( self ) : <nl> return 1 ; <nl> } <nl> ' ' ' <nl> - self . do_benchmark ( src , [ ] , ' final : 800 . ' ) <nl> + self . do_benchmark ( src , [ ] , ' final : 720 . ' ) <nl> <nl> def test_fannkuch ( self ) : <nl> src = open ( path_from_root ( ' tests ' , ' fannkuch . cpp ' ) , ' r ' ) . read ( ) <nl>
normalize memops benchmark
emscripten-core/emscripten
91043556252cb87e837807bfc39ee9769410ba16
2011-07-02T02:56:14Z
mmm a / arangod / Aql / OptimizerRule . h <nl> ppp b / arangod / Aql / OptimizerRule . h <nl> struct OptimizerRule { <nl> removeTraversalPathVariable_pass6 , <nl> prepareTraversalsRule_pass6 , <nl> <nl> - / / simplify an EnumerationCollectionNode that fetches an <nl> - / / entire document to a projection of this document <nl> - reduceExtractionToProjectionRule_pass6 , <nl> - <nl> / / / Pass 9 : push down calculations beyond FILTERs and LIMITs <nl> moveCalculationsDownRule_pass9 , <nl> <nl> struct OptimizerRule { <nl> undistributeRemoveAfterEnumCollRule_pass10 , <nl> <nl> / / push collect operations to the db servers <nl> - collectInClusterRule_pass10 <nl> - } ; <nl> + collectInClusterRule_pass10 , <nl> <nl> + / / simplify an EnumerationCollectionNode that fetches an <nl> + / / entire document to a projection of this document <nl> + reduceExtractionToProjectionRule_pass6 <nl> + } ; <nl> <nl> std : : string name ; <nl> RuleFunction func ; <nl> mmm a / arangod / Transaction / Methods . cpp <nl> ppp b / arangod / Transaction / Methods . cpp <nl> std : : pair < bool , bool > transaction : : Methods : : findIndexHandleForAndNode ( <nl> <nl> / / enable the following line to see index candidates considered with their <nl> / / abilities and scores <nl> - / / LOG_TOPIC ( TRACE , Logger : : FIXME ) < < " looking at index : " < < idx . get ( ) < < " , isSorted : " < < idx - > isSorted ( ) < < " , isSparse : " < < idx - > sparse ( ) < < " , fields : " < < idx - > fields ( ) . size ( ) < < " , supportsFilter : " < < supportsFilter < < " , supportsSort : " < < supportsSort < < " , filterCost : " < < filterCost < < " , sortCost : " < < sortCost < < " , totalCost : " < < ( filterCost + sortCost ) < < " , isOnlyAttributeAccess : " < < isOnlyAttributeAccess < < " , isUnidirectional : " < < sortCondition - > isUnidirectional ( ) < < " , isOnlyEqualityMatch : " < < node - > isOnlyEqualityMatch ( ) < < " , itemsInIndex : " < < itemsInIndex ; <nl> + LOG_TOPIC ( TRACE , Logger : : FIXME ) < < " looking at index : " < < idx . get ( ) < < " , isSorted : " < < idx - > isSorted ( ) < < " , isSparse : " < < idx - > sparse ( ) < < " , fields : " < < idx - > fields ( ) . size ( ) < < " , supportsFilter : " < < supportsFilter < < " , supportsSort : " < < supportsSort < < " , filterCost : " < < filterCost < < " , sortCost : " < < sortCost < < " , totalCost : " < < ( filterCost + sortCost ) < < " , isOnlyAttributeAccess : " < < isOnlyAttributeAccess < < " , isUnidirectional : " < < sortCondition - > isUnidirectional ( ) < < " , isOnlyEqualityMatch : " < < node - > isOnlyEqualityMatch ( ) < < " , itemsInIndex : " < < itemsInIndex ; <nl> <nl> if ( ! supportsFilter & & ! supportsSort ) { <nl> continue ; <nl> mmm a / js / server / tests / aql / aql - optimizer - rule - reduce - extraction - to - projection - rocksdb . js <nl> ppp b / js / server / tests / aql / aql - optimizer - rule - reduce - extraction - to - projection - rocksdb . js <nl> function optimizerRuleTestSuite ( ) { <nl> <nl> setUp : function ( ) { <nl> db . _drop ( cn ) ; <nl> - c = db . _create ( cn ) ; <nl> + c = db . _create ( cn , { numberOfShards : 4 } ) ; <nl> <nl> for ( var i = 0 ; i < 1000 ; + + i ) { <nl> c . insert ( { value1 : i , value2 : " test " + i } ) ; <nl> mmm a / lib / Rest / Version . cpp <nl> ppp b / lib / Rest / Version . cpp <nl> std : : string Version : : getVerboseVersionString ( ) { <nl> < < " , using " <nl> # ifdef ARANGODB_HAVE_JEMALLOC <nl> < < " jemalloc , " <nl> + # endif <nl> + # ifdef HAVE_ARANGODB_BUILD_REPOSITORY <nl> + < < " build " < < getBuildRepository ( ) < < " , " <nl> # endif <nl> < < " VPack " < < getVPackVersion ( ) < < " , " <nl> < < " RocksDB " < < getRocksDBVersion ( ) < < " , " <nl> < < " ICU " < < getICUVersion ( ) < < " , " <nl> - < < " V8 " < < getV8Version ( ) < < " , " < < getOpenSSLVersion ( ) ; <nl> + < < " V8 " < < getV8Version ( ) < < " , " <nl> + < < getOpenSSLVersion ( ) ; <nl> <nl> return version . str ( ) ; <nl> } <nl>
Bug fix / forward port 3 . 3 . 5 changes ( )
arangodb/arangodb
65bfeb705427b47c720948f87f2a350e9d154c4b
2018-04-03T15:31:34Z
mmm a / src / app / doc_api . cpp <nl> ppp b / src / app / doc_api . cpp <nl> void DocApi : : swapCel ( <nl> <nl> LayerImage * DocApi : : newLayer ( LayerGroup * parent , const std : : string & name ) <nl> { <nl> - LayerImage * layer = new LayerImage ( parent - > sprite ( ) ) ; <nl> - layer - > setName ( name ) ; <nl> + LayerImage * newLayer = new LayerImage ( parent - > sprite ( ) ) ; <nl> + newLayer - > setName ( name ) ; <nl> <nl> - addLayer ( parent , layer , parent - > lastLayer ( ) ) ; <nl> - return layer ; <nl> + addLayer ( parent , newLayer , parent - > lastLayer ( ) ) ; <nl> + return newLayer ; <nl> } <nl> <nl> LayerGroup * DocApi : : newGroup ( LayerGroup * parent , const std : : string & name ) <nl> { <nl> - LayerGroup * layer = new LayerGroup ( parent - > sprite ( ) ) ; <nl> - layer - > setName ( name ) ; <nl> + LayerGroup * newLayerGroup = new LayerGroup ( parent - > sprite ( ) ) ; <nl> + newLayerGroup - > setName ( name ) ; <nl> <nl> - addLayer ( parent , layer , parent - > lastLayer ( ) ) ; <nl> - return layer ; <nl> + addLayer ( parent , newLayerGroup , parent - > lastLayer ( ) ) ; <nl> + return newLayerGroup ; <nl> } <nl> <nl> void DocApi : : addLayer ( LayerGroup * parent , Layer * newLayer , Layer * afterThis ) <nl>
Minor changes functions newLayer and newGroup
aseprite/aseprite
6eaebefb90cd04062f4d8d22682e166c0e1d3ffc
2018-08-30T21:10:45Z
mmm a / VERSIONS <nl> ppp b / VERSIONS <nl> <nl> CXX_STANDARD " 17 " <nl> STARTER_REV " 0 . 14 . 12 " <nl> SYNCER_REV " 0 . 6 . 5 " <nl> - GCC_LINUX " 8 . 3 . 0 " <nl> + GCC_LINUX " 9 . 2 . 0 " <nl> MSVC_WINDOWS " 2017 " <nl> MACOS_MIN " 10 . 14 " <nl> OPENSSL_LINUX " 1 . 1 . 1 " <nl>
Try to use gcc 9 . 2 . 0 ( )
arangodb/arangodb
22eec2e35407d868ac36f06b9abdbee3fb3c3ef3
2019-10-18T10:03:51Z
mmm a / src / ui . cpp <nl> ppp b / src / ui . cpp <nl> void MainFrameRepaint ( ) <nl> printf ( " MainFrameRepaint \ n " ) ; <nl> wxPaintEvent event ; <nl> pframeMain - > fRefresh = true ; <nl> - pframeMain - > fRefreshListCtrl = true ; <nl> pframeMain - > GetEventHandler ( ) - > AddPendingEvent ( event ) ; <nl> } <nl> } <nl> void CSendDialog : : OnButtonSend ( wxCommandEvent & event ) <nl> strError = pwalletMain - > SendMoney ( scriptPubKey , nValue , wtx , true ) ; <nl> } <nl> if ( strError = = " " ) <nl> + { <nl> + pframeMain - > RefreshListCtrl ( ) ; <nl> wxMessageBox ( _ ( " Payment sent " ) , _ ( " Sending . . . " ) ) ; <nl> + } <nl> else if ( strError = = " ABORTED " ) <nl> { <nl> if ( fWasLocked ) <nl>
Fixed regression I introduced : wallets with lots of transactions were unusable in GUI .
bitcoin/bitcoin
36d9b2311dcb7401ea0d3deda636bc50f135c522
2011-09-01T19:44:47Z
mmm a / utils / benchmark / Richards / richards_benchmark . c <nl> ppp b / utils / benchmark / Richards / richards_benchmark . c <nl> <nl> # include < stdio . h > <nl> # include < stdlib . h > <nl> <nl> + # ifdef NDEBUG <nl> + # define DEBUG ( . . . ) <nl> + # define DEBUG_DO ( x ) <nl> + # else <nl> # define DEBUG ( . . . ) printf ( __VA_ARGS__ ) <nl> # define DEBUG_DO ( x ) { x ; } <nl> + # endif <nl> <nl> # ifdef SMALL_PROBLEM_SIZE <nl> # define Count 1000 * 1000 <nl>
# ifdef to allow benchmarking C version of richards .
apple/swift
67bf9f97f86989ea7f4a411f8dd787e7758d565e
2014-08-09T02:32:03Z
mmm a / ios / sdk / WeexSDK / Sources / Module / WXPickerModule . m <nl> ppp b / ios / sdk / WeexSDK / Sources / Module / WXPickerModule . m <nl> @ implementation WXPickerModule <nl> # pragma mark - private method <nl> - ( void ) resetPickerView <nl> { <nl> - if ( self . picker ) <nl> - { <nl> + if ( self . picker ) { <nl> [ self . picker removeFromSuperview ] ; <nl> } <nl> - if ( self . pickerView ) <nl> - { <nl> + if ( self . pickerView ) { <nl> [ self . pickerView removeFromSuperview ] ; <nl> } <nl> } <nl>
* [ ios ] fix code style
apache/incubator-weex
31b08adf525559d9ce46f25eab349584d2f621a9
2016-11-28T12:42:21Z
new file mode 100755 <nl> index 000000000000 . . c7579d2548e4 <nl> mmm / dev / null <nl> ppp b / test / functional / minchainwork . py <nl> <nl> + # ! / usr / bin / env python3 <nl> + # Copyright ( c ) 2017 The Bitcoin Core developers <nl> + # Distributed under the MIT software license , see the accompanying <nl> + # file COPYING or http : / / www . opensource . org / licenses / mit - license . php . <nl> + " " " Test logic for setting nMinimumChainWork on command line . <nl> + <nl> + Nodes don ' t consider themselves out of " initial block download " until <nl> + their active chain has more work than nMinimumChainWork . <nl> + <nl> + Nodes don ' t download blocks from a peer unless the peer ' s best known block <nl> + has more work than nMinimumChainWork . <nl> + <nl> + While in initial block download , nodes won ' t relay blocks to their peers , so <nl> + test that this parameter functions as intended by verifying that block relay <nl> + only succeeds past a given node once its nMinimumChainWork has been exceeded . <nl> + " " " <nl> + <nl> + import time <nl> + <nl> + from test_framework . test_framework import BitcoinTestFramework <nl> + from test_framework . util import sync_blocks , connect_nodes , assert_equal <nl> + <nl> + # 2 hashes required per regtest block ( with no difficulty adjustment ) <nl> + REGTEST_WORK_PER_BLOCK = 2 <nl> + <nl> + class MinimumChainWorkTest ( BitcoinTestFramework ) : <nl> + def set_test_params ( self ) : <nl> + self . setup_clean_chain = True <nl> + self . num_nodes = 3 <nl> + self . extra_args = [ [ ] , [ " - minimumchainwork = 0x65 " ] , [ " - minimumchainwork = 0x65 " ] ] <nl> + self . node_min_work = [ 0 , 101 , 101 ] <nl> + <nl> + def setup_network ( self ) : <nl> + # This test relies on the chain setup being : <nl> + # node0 < - node1 < - node2 <nl> + # Before leaving IBD , nodes prefer to download blocks from outbound <nl> + # peers , so ensure that we ' re mining on an outbound peer and testing <nl> + # block relay to inbound peers . <nl> + self . setup_nodes ( ) <nl> + for i in range ( self . num_nodes - 1 ) : <nl> + connect_nodes ( self . nodes [ i + 1 ] , i ) <nl> + <nl> + def run_test ( self ) : <nl> + # Start building a chain on node0 . node2 shouldn ' t be able to sync until node1 ' s <nl> + # minchainwork is exceeded <nl> + starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block ' s work <nl> + self . log . info ( " Testing relay across node % d ( minChainWork = % d ) " , 1 , self . node_min_work [ 1 ] ) <nl> + <nl> + starting_blockcount = self . nodes [ 2 ] . getblockcount ( ) <nl> + <nl> + num_blocks_to_generate = int ( ( self . node_min_work [ 1 ] - starting_chain_work ) / REGTEST_WORK_PER_BLOCK ) <nl> + self . log . info ( " Generating % d blocks on node0 " , num_blocks_to_generate ) <nl> + hashes = self . nodes [ 0 ] . generate ( num_blocks_to_generate ) <nl> + <nl> + self . log . info ( " Node0 current chain work : % s " , self . nodes [ 0 ] . getblockheader ( hashes [ - 1 ] ) [ ' chainwork ' ] ) <nl> + <nl> + # Sleep a few seconds and verify that node2 didn ' t get any new blocks <nl> + # or headers . We sleep , rather than sync_blocks ( node0 , node1 ) because <nl> + # it ' s reasonable either way for node1 to get the blocks , or not get <nl> + # them ( since they ' re below node1 ' s minchainwork ) . <nl> + time . sleep ( 3 ) <nl> + <nl> + self . log . info ( " Verifying node 2 has no more blocks than before " ) <nl> + self . log . info ( " Blockcounts : % s " , [ n . getblockcount ( ) for n in self . nodes ] ) <nl> + # Node2 shouldn ' t have any new headers yet , because node1 should not <nl> + # have relayed anything . <nl> + assert_equal ( len ( self . nodes [ 2 ] . getchaintips ( ) ) , 1 ) <nl> + assert_equal ( self . nodes [ 2 ] . getchaintips ( ) [ 0 ] [ ' height ' ] , 0 ) <nl> + <nl> + assert self . nodes [ 1 ] . getbestblockhash ( ) ! = self . nodes [ 0 ] . getbestblockhash ( ) <nl> + assert_equal ( self . nodes [ 2 ] . getblockcount ( ) , starting_blockcount ) <nl> + <nl> + self . log . info ( " Generating one more block " ) <nl> + self . nodes [ 0 ] . generate ( 1 ) <nl> + <nl> + self . log . info ( " Verifying nodes are all synced " ) <nl> + self . sync_all ( ) <nl> + self . log . info ( " Blockcounts : % s " , [ n . getblockcount ( ) for n in self . nodes ] ) <nl> + <nl> + if __name__ = = ' __main__ ' : <nl> + MinimumChainWorkTest ( ) . main ( ) <nl> mmm a / test / functional / test_runner . py <nl> ppp b / test / functional / test_runner . py <nl> <nl> ' bip65 - cltv - p2p . py ' , <nl> ' uptime . py ' , <nl> ' resendwallettransactions . py ' , <nl> + ' minchainwork . py ' , <nl> ] <nl> <nl> EXTENDED_SCRIPTS = [ <nl>
[ qa ] Test nMinimumChainWork
bitcoin/bitcoin
eac64bb7a3b6aba747403b23b3b1d3609843f8db
2017-09-05T19:09:21Z
mmm a / tensorflow / python / framework / ops . py <nl> ppp b / tensorflow / python / framework / ops . py <nl> def get_all_collection_keys ( ) : <nl> return get_default_graph ( ) . get_all_collection_keys ( ) <nl> <nl> <nl> + name_scope_cache = { } <nl> + <nl> + <nl> # Named like a function for backwards compatibility with the <nl> # @ tf_contextlib . contextmanager version , which was switched to a class to avoid <nl> # some object creation overhead . <nl> def __enter__ ( self ) : <nl> if not self . _name : <nl> scope_name = " " <nl> else : <nl> - if self . _name [ - 1 ] = = " / " : <nl> + cache_key = self . _name , self . _old_name , self . _default_name <nl> + if cache_key in name_scope_cache : <nl> + self . _ctx . scope_name = name_scope_cache [ cache_key ] <nl> + return self . _ctx . scope_name <nl> + elif self . _name [ - 1 ] = = " / " : <nl> # A trailing slash breaks out of nested name scopes , indicating a <nl> # fully specified scope name , for compatibility with Graph . name_scope . <nl> scope_name = self . _name <nl> def __enter__ ( self ) : <nl> scope_name = ( <nl> self . _old_name + name_with_trailing_slash <nl> if self . _old_name else name_with_trailing_slash ) <nl> + name_scope_cache [ cache_key ] = scope_name <nl> self . _ctx . scope_name = scope_name <nl> return scope_name <nl> else : <nl> mmm a / tensorflow / python / ops / math_grad . py <nl> ppp b / tensorflow / python / ops / math_grad . py <nl> def _MulGrad ( op , grad ) : <nl> sx = array_ops . shape ( x ) <nl> sy = array_ops . shape ( y ) <nl> rx , ry = gen_array_ops . _broadcast_gradient_args ( sx , sy ) <nl> - # pylint : enable = protected - access <nl> x = math_ops . conj ( x ) <nl> y = math_ops . conj ( y ) <nl> - return ( array_ops . reshape ( math_ops . reduce_sum ( grad * y , rx ) , sx ) , <nl> - array_ops . reshape ( math_ops . reduce_sum ( x * grad , ry ) , sy ) ) <nl> + return ( array_ops . reshape ( <nl> + math_ops . reduce_sum ( gen_math_ops . _mul ( grad , y ) , rx ) , sx ) , <nl> + array_ops . reshape ( <nl> + math_ops . reduce_sum ( gen_math_ops . _mul ( x , grad ) , ry ) , sy ) ) <nl> + # pylint : enable = protected - access <nl> <nl> <nl> @ ops . RegisterGradient ( " Div " ) <nl> def _MatMulGrad ( op , grad ) : <nl> t_b = op . get_attr ( " transpose_b " ) <nl> a = math_ops . conj ( op . inputs [ 0 ] ) <nl> b = math_ops . conj ( op . inputs [ 1 ] ) <nl> + # pylint : disable = protected - access <nl> if not t_a and not t_b : <nl> - grad_a = math_ops . matmul ( grad , b , transpose_b = True ) <nl> - grad_b = math_ops . matmul ( a , grad , transpose_a = True ) <nl> + grad_a = gen_math_ops . _mat_mul ( grad , b , transpose_b = True ) <nl> + grad_b = gen_math_ops . _mat_mul ( a , grad , transpose_a = True ) <nl> elif not t_a and t_b : <nl> - grad_a = math_ops . matmul ( grad , b ) <nl> - grad_b = math_ops . matmul ( grad , a , transpose_a = True ) <nl> + grad_a = gen_math_ops . _mat_mul ( grad , b ) <nl> + grad_b = gen_math_ops . _mat_mul ( grad , a , transpose_a = True ) <nl> elif t_a and not t_b : <nl> - grad_a = math_ops . matmul ( b , grad , transpose_b = True ) <nl> - grad_b = math_ops . matmul ( a , grad ) <nl> + grad_a = gen_math_ops . _mat_mul ( b , grad , transpose_b = True ) <nl> + grad_b = gen_math_ops . _mat_mul ( a , grad ) <nl> elif t_a and t_b : <nl> - grad_a = math_ops . matmul ( b , grad , transpose_a = True , transpose_b = True ) <nl> - grad_b = math_ops . matmul ( grad , a , transpose_a = True , transpose_b = True ) <nl> + grad_a = gen_math_ops . _mat_mul ( b , grad , transpose_a = True , transpose_b = True ) <nl> + grad_b = gen_math_ops . _mat_mul ( grad , a , transpose_a = True , transpose_b = True ) <nl> + # pylint : enable = protected - access <nl> return grad_a , grad_b <nl> <nl> <nl>
Minor eager - related performance improvements
tensorflow/tensorflow
142351b998a6471f26b5c9ba74d09b107cd96c68
2018-02-13T20:00:40Z
mmm a / tensorflow / contrib / data / __init__ . py <nl> ppp b / tensorflow / contrib / data / __init__ . py <nl> <nl> @ @ choose_from_datasets <nl> @ @ dense_to_sparse_batch <nl> @ @ enumerate_dataset <nl> + @ @ group_by_reducer <nl> @ @ group_by_window <nl> @ @ ignore_errors <nl> @ @ make_batched_features_dataset <nl> <nl> from tensorflow . contrib . data . python . ops . error_ops import ignore_errors <nl> from tensorflow . contrib . data . python . ops . get_single_element import get_single_element <nl> from tensorflow . contrib . data . python . ops . grouping import bucket_by_sequence_length <nl> + from tensorflow . contrib . data . python . ops . grouping import group_by_reducer <nl> from tensorflow . contrib . data . python . ops . grouping import group_by_window <nl> from tensorflow . contrib . data . python . ops . interleave_ops import choose_from_datasets <nl> from tensorflow . contrib . data . python . ops . interleave_ops import parallel_interleave <nl>
[ tf . data ] Properly export ` tf . contrib . data . group_by_reducer ( ) `
tensorflow/tensorflow
6c08402e3a7d3e440d6913cb683f26d28514ad8d
2018-06-20T18:37:24Z
mmm a / tensorflow / tensorboard / TAG <nl> ppp b / tensorflow / tensorboard / TAG <nl> @ @ - 1 + 1 @ @ <nl> - 44 <nl> + 45 <nl>
Autogenerated Change : Change TensorBoard TAG to 45
tensorflow/tensorflow
fd7a6e90e48afe58d0c8dbf1803133a2fea9ecf0
2017-01-27T19:30:48Z
mmm a / tensorflow / lite / delegates / gpu / cl / kernels / BUILD <nl> ppp b / tensorflow / lite / delegates / gpu / cl / kernels / BUILD <nl> cc_test ( <nl> ] , <nl> ) <nl> <nl> + cc_library ( <nl> + name = " quantize_and_dequantize " , <nl> + srcs = [ " quantize_and_dequantize . cc " ] , <nl> + hdrs = [ " quantize_and_dequantize . h " ] , <nl> + deps = [ <nl> + " : flt_type " , <nl> + " : gpu_operation " , <nl> + " : util " , <nl> + " / / tensorflow / lite / delegates / gpu / cl : cl_context " , <nl> + " / / tensorflow / lite / delegates / gpu / cl : cl_kernel " , <nl> + " / / tensorflow / lite / delegates / gpu / cl : linear_storage " , <nl> + " / / tensorflow / lite / delegates / gpu / common : data_type " , <nl> + " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> + " / / tensorflow / lite / delegates / gpu / common : status " , <nl> + " / / tensorflow / lite / delegates / gpu / common : tensor " , <nl> + " @ com_google_absl / / absl / strings " , <nl> + " @ com_google_absl / / absl / types : variant " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " quantize_and_dequantize_test " , <nl> + srcs = [ " quantize_and_dequantize_test . cc " ] , <nl> + linkstatic = True , <nl> + tags = tf_gpu_tests_tags ( ) + [ <nl> + " linux " , <nl> + " local " , <nl> + ] , <nl> + deps = [ <nl> + " : cl_test " , <nl> + " : quantize_and_dequantize " , <nl> + " / / tensorflow / lite / delegates / gpu / cl : tensor " , <nl> + " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> + " / / tensorflow / lite / delegates / gpu / common : status " , <nl> + " / / tensorflow / lite / kernels / internal : quantization_util " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> cc_library ( <nl> name = " relu " , <nl> srcs = [ " relu . cc " ] , <nl> new file mode 100644 <nl> index 0000000000000 . . f7751fac6ffef <nl> mmm / dev / null <nl> ppp b / tensorflow / lite / delegates / gpu / cl / kernels / quantize_and_dequantize . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / lite / delegates / gpu / cl / kernels / quantize_and_dequantize . h " <nl> + <nl> + # include < string > <nl> + <nl> + # include " absl / strings / str_cat . h " <nl> + # include " absl / types / variant . h " <nl> + # include " tensorflow / lite / delegates / gpu / cl / kernels / util . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / tensor . h " <nl> + <nl> + namespace tflite { <nl> + namespace gpu { <nl> + namespace cl { <nl> + <nl> + QuantizeAndDequantize : : QuantizeAndDequantize ( <nl> + const OperationDef & definition , const QuantizeAndDequantizeAttributes & attr , <nl> + CalculationsPrecision scalar_precision ) <nl> + : ElementwiseOperation ( definition ) { <nl> + min_ = FLT ( scalar_precision , attr . min ) ; <nl> + max_ = FLT ( scalar_precision , attr . max ) ; <nl> + scale_ = FLT ( scalar_precision , attr . scale ) ; <nl> + } <nl> + <nl> + QuantizeAndDequantize : : QuantizeAndDequantize ( QuantizeAndDequantize & & operation ) <nl> + : ElementwiseOperation ( std : : move ( operation ) ) , <nl> + min_ ( std : : move ( operation . min_ ) ) , <nl> + max_ ( std : : move ( operation . max_ ) ) , <nl> + scale_ ( std : : move ( operation . scale_ ) ) { } <nl> + <nl> + QuantizeAndDequantize & QuantizeAndDequantize : : operator = ( <nl> + QuantizeAndDequantize & & operation ) { <nl> + if ( this ! = & operation ) { <nl> + min_ = std : : move ( operation . min_ ) ; <nl> + max_ = std : : move ( operation . max_ ) ; <nl> + scale_ = std : : move ( operation . scale_ ) ; <nl> + ElementwiseOperation : : operator = ( std : : move ( operation ) ) ; <nl> + } <nl> + return * this ; <nl> + } <nl> + <nl> + void QuantizeAndDequantize : : SetLinkIndex ( int index ) { <nl> + min_ . SetName ( absl : : StrCat ( " quantize_and_dequantize_min_ " , index ) ) ; <nl> + max_ . SetName ( absl : : StrCat ( " quantize_and_dequantize_max_ " , index ) ) ; <nl> + scale_ . SetName ( absl : : StrCat ( " quantize_and_dequantize_scale_ " , index ) ) ; <nl> + } <nl> + <nl> + std : : string QuantizeAndDequantize : : GetCoreCode ( <nl> + const LinkingContext & context ) const { <nl> + std : : string scale_string , max_string , min_string ; <nl> + if ( ! scale_ . Active ( ) ) { <nl> + scale_string = " ( FLT4 ) ( 1 . 0f ) " ; <nl> + } else { <nl> + scale_string = absl : : StrCat ( " ( FLT4 ) ( " , scale_ . GetName ( ) , " ) " ) ; <nl> + } <nl> + if ( ! max_ . Active ( ) ) { <nl> + max_string = " ( FLT4 ) ( 0 . 0f ) " ; <nl> + } else { <nl> + max_string = absl : : StrCat ( " ( FLT4 ) ( " , max_ . GetName ( ) , " ) " ) ; <nl> + } <nl> + if ( ! min_ . Active ( ) ) { <nl> + min_string = " ( FLT4 ) ( 0 . 0f ) " ; <nl> + } else { <nl> + min_string = absl : : StrCat ( " ( FLT4 ) ( " , min_ . GetName ( ) , " ) " ) ; <nl> + } <nl> + std : : string clamped_value = absl : : StrCat ( <nl> + " min ( " , max_string , " , max ( " , min_string , " , " , context . var_name , " ) ) " ) ; <nl> + std : : string quantized_value = absl : : StrCat ( <nl> + " round ( ( " , clamped_value , " - " , min_string , " ) / " , scale_string , " ) " ) ; <nl> + std : : string dequantized_value = <nl> + absl : : StrCat ( quantized_value , " * " , scale_string , " + " , min_string ) ; <nl> + <nl> + return absl : : StrCat ( context . var_name , " = " , dequantized_value , " ; \ n " ) ; <nl> + } <nl> + <nl> + std : : string QuantizeAndDequantize : : GetArgsDeclaration ( ) const { <nl> + return absl : : StrCat ( " , \ n " , min_ . GetDeclaration ( ) , " , \ n " , <nl> + max_ . GetDeclaration ( ) , " , \ n " , <nl> + scale_ . GetDeclaration ( ) ) ; <nl> + } <nl> + <nl> + Status QuantizeAndDequantize : : BindArguments ( CLKernel * kernel ) { <nl> + RETURN_IF_ERROR ( kernel - > SetBytesAuto ( min_ ) ) ; <nl> + RETURN_IF_ERROR ( kernel - > SetBytesAuto ( max_ ) ) ; <nl> + RETURN_IF_ERROR ( kernel - > SetBytesAuto ( scale_ ) ) ; <nl> + return OkStatus ( ) ; <nl> + } <nl> + <nl> + Status CreateQuantizeAndDequantize ( const CreationContext & creation_context , <nl> + const OperationDef & definition , <nl> + const QuantizeAndDequantizeAttributes & attr , <nl> + QuantizeAndDequantize * result ) { <nl> + const auto scalar_precision = creation_context . device - > IsPowerVR ( ) <nl> + ? CalculationsPrecision : : F32 <nl> + : definition . precision ; <nl> + const bool is_fp16 = definition . precision = = CalculationsPrecision : : F16 | | <nl> + definition . precision = = CalculationsPrecision : : F32_F16 ; <nl> + if ( is_fp16 & & attr . scale < 0 . 000062f ) { <nl> + / / The smallest positive normal number for Half - precision floating - point <nl> + / / format is 2 ^ - 14 ~ 0 . 000062f . Therefore , if the scale is lesser than this <nl> + / / number , we just reset it accordingly . <nl> + QuantizeAndDequantizeAttributes adjusted_attr = attr ; <nl> + adjusted_attr . scale = 0 . 000062f ; <nl> + * result = <nl> + QuantizeAndDequantize ( definition , adjusted_attr , scalar_precision ) ; <nl> + } else { <nl> + * result = QuantizeAndDequantize ( definition , attr , scalar_precision ) ; <nl> + } <nl> + result - > SetLinkIndex ( 0 ) ; <nl> + return OkStatus ( ) ; <nl> + } <nl> + <nl> + } / / namespace cl <nl> + } / / namespace gpu <nl> + } / / namespace tflite <nl> new file mode 100644 <nl> index 0000000000000 . . 07fa8f2177397 <nl> mmm / dev / null <nl> ppp b / tensorflow / lite / delegates / gpu / cl / kernels / quantize_and_dequantize . h <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef TENSORFLOW_LITE_DELEGATES_GPU_CL_KERNELS_QUANTIZE_AND_DEQUANTIZE_H_ <nl> + # define TENSORFLOW_LITE_DELEGATES_GPU_CL_KERNELS_QUANTIZE_AND_DEQUANTIZE_H_ <nl> + <nl> + # include < string > <nl> + <nl> + # include " tensorflow / lite / delegates / gpu / cl / cl_context . h " <nl> + # include " tensorflow / lite / delegates / gpu / cl / cl_kernel . h " <nl> + # include " tensorflow / lite / delegates / gpu / cl / kernels / flt_type . h " <nl> + # include " tensorflow / lite / delegates / gpu / cl / kernels / gpu_operation . h " <nl> + # include " tensorflow / lite / delegates / gpu / cl / linear_storage . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / data_type . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / operations . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / tensor . h " <nl> + <nl> + namespace tflite { <nl> + namespace gpu { <nl> + namespace cl { <nl> + <nl> + / / Performs the operation : { Quantize , Dequantize } on floating - point data . <nl> + / / We need this operation to emulate the error introduced by quantization <nl> + / / on the GPU , which cannot represent int8 tensors . <nl> + / / <nl> + / / Implemented as : <nl> + / / qvalue = round ( ( min ( qmax , max ( qmin , src_val ) ) - qmin ) * ( 1 / qscale ) + 0 . 5 ) <nl> + / / dq_value = qvalue * qscale + qmin <nl> + / / Here , qmin , qmax & qscale refer to the quantization values as implemented in <nl> + / / TensorFlow Lite ' s ' FakeQuant ' kernel . round ( x + 0 . 5 ) ensures we round away <nl> + / / from zero . <nl> + / / <nl> + / / NOTE : We do not need to nudge min / max values in this op , since they would <nl> + / / already be adjusted while generating the quantized model . <nl> + class QuantizeAndDequantize : public ElementwiseOperation { <nl> + public : <nl> + QuantizeAndDequantize ( ) = default ; <nl> + / / Move only <nl> + QuantizeAndDequantize ( QuantizeAndDequantize & & operation ) ; <nl> + QuantizeAndDequantize & operator = ( QuantizeAndDequantize & & operation ) ; <nl> + QuantizeAndDequantize ( const QuantizeAndDequantize & ) = delete ; <nl> + QuantizeAndDequantize & operator = ( const QuantizeAndDequantize & ) = delete ; <nl> + <nl> + void SetLinkIndex ( int index ) override ; <nl> + std : : string GetCoreCode ( const LinkingContext & context ) const override ; <nl> + std : : string GetArgsDeclaration ( ) const override ; <nl> + Status BindArguments ( CLKernel * kernel ) override ; <nl> + <nl> + friend Status CreateQuantizeAndDequantize ( <nl> + const CreationContext & creation_context , const OperationDef & definition , <nl> + const QuantizeAndDequantizeAttributes & attr , <nl> + QuantizeAndDequantize * result ) ; <nl> + <nl> + private : <nl> + QuantizeAndDequantize ( const OperationDef & definition , <nl> + const QuantizeAndDequantizeAttributes & attr , <nl> + CalculationsPrecision scalar_precision ) ; <nl> + <nl> + template < DataType T > <nl> + Status UploadParameters ( const : : tflite : : gpu : : Tensor < Linear , T > & parameters , <nl> + CLContext * context ) ; <nl> + <nl> + FLT min_ ; <nl> + FLT max_ ; <nl> + FLT scale_ ; <nl> + } ; <nl> + <nl> + Status CreateQuantizeAndDequantize ( const CreationContext & creation_context , <nl> + const OperationDef & definition , <nl> + const QuantizeAndDequantizeAttributes & attr , <nl> + QuantizeAndDequantize * result ) ; <nl> + <nl> + template < DataType T > <nl> + Status QuantizeAndDequantize : : UploadParameters ( <nl> + const : : tflite : : gpu : : Tensor < Linear , T > & parameters , CLContext * context ) { <nl> + LinearStorageCreateInfo create_info ; <nl> + create_info . storage_type = <nl> + DeduceLinearStorageType ( definition_ . GetPrimaryStorageType ( ) ) ; <nl> + create_info . data_type = definition_ . GetPrimaryDataType ( ) ; <nl> + return OkStatus ( ) ; <nl> + } <nl> + <nl> + } / / namespace cl <nl> + } / / namespace gpu <nl> + } / / namespace tflite <nl> + <nl> + # endif / / TENSORFLOW_LITE_DELEGATES_GPU_CL_KERNELS_QUANTIZE_AND_DEQUANTIZE_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . 71d6d066b9b7b <nl> mmm / dev / null <nl> ppp b / tensorflow / lite / delegates / gpu / cl / kernels / quantize_and_dequantize_test . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / lite / delegates / gpu / cl / kernels / quantize_and_dequantize . h " <nl> + <nl> + # include < vector > <nl> + <nl> + # include < gmock / gmock . h > <nl> + # include < gtest / gtest . h > <nl> + # include " tensorflow / lite / delegates / gpu / cl / kernels / cl_test . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / operations . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> + # include " tensorflow / lite / kernels / internal / quantization_util . h " <nl> + <nl> + using : : testing : : FloatNear ; <nl> + using : : testing : : Pointwise ; <nl> + <nl> + namespace tflite { <nl> + namespace gpu { <nl> + namespace cl { <nl> + namespace { <nl> + <nl> + TEST_F ( OpenCLOperationTest , QuantAndDequant_Dim2Bits8 ) { <nl> + TensorFloat32 src_tensor ; <nl> + src_tensor . shape = BHWC ( 1 , 3 , 2 , 1 ) ; <nl> + src_tensor . data = { 0 . 0f , 1 . 0f , 0 . 25f , 0 . 50f , 0 . 4444444f , 0 . 00001f } ; <nl> + <nl> + / / Unlike TFLite ' s FakeQuant kernel , we assume that the incoming values are <nl> + / / pre - nudged , since this should be done during model conversion . <nl> + const int num_bits = 8 ; <nl> + const int quant_min = 0 ; <nl> + const int quant_max = ( 1 < < num_bits ) - 1 ; <nl> + QuantizeAndDequantizeAttributes attr ; <nl> + NudgeQuantizationRange ( / * * original_min * * / 0 . 0 , / * * original_max * * / 1 . 0 , <nl> + quant_min , quant_max , & attr . min , & attr . max , <nl> + & attr . scale ) ; <nl> + <nl> + for ( auto storage : env_ . GetSupportedStorages ( ) ) { <nl> + for ( auto precision : env_ . GetSupportedPrecisions ( ) ) { <nl> + const float eps = precision = = CalculationsPrecision : : F32 ? 1e - 6f : 1e - 2f ; <nl> + OperationDef op_def ; <nl> + op_def . precision = precision ; <nl> + auto data_type = DeduceDataTypeFromPrecision ( precision ) ; <nl> + op_def . src_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + op_def . dst_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + TensorFloat32 dst_tensor ; <nl> + QuantizeAndDequantize operation ; <nl> + ASSERT_OK ( CreateQuantizeAndDequantize ( creation_context_ , op_def , attr , <nl> + & operation ) ) ; <nl> + ASSERT_OK ( ExecuteGPUOperation ( src_tensor , creation_context_ , & operation , <nl> + BHWC ( 1 , 3 , 2 , 1 ) , & dst_tensor ) ) ; <nl> + EXPECT_THAT ( dst_tensor . data , <nl> + Pointwise ( FloatNear ( eps ) , { 0 . 0f , 1 . 0f , 0 . 25098f , 0 . 498039f , <nl> + 0 . 443137f , 0 . 0f } ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + TEST_F ( OpenCLOperationTest , QuantAndDequant_Dim3Bits8_NegativeRange ) { <nl> + TensorFloat32 src_tensor ; <nl> + src_tensor . shape = BHWC ( 1 , 3 , 1 , 2 ) ; <nl> + src_tensor . data = { 0 . 0f , - 0 . 9f , 0 . 25f , 0 . 50f , 0 . 4444444f , - 0 . 00001f } ; <nl> + <nl> + / / Unlike TFLite ' s FakeQuant kernel , we assume that the incoming values are <nl> + / / pre - nudged , since this should be done during model conversion . <nl> + const int num_bits = 8 ; <nl> + const int quant_min = 0 ; <nl> + const int quant_max = ( 1 < < num_bits ) - 1 ; <nl> + QuantizeAndDequantizeAttributes attr ; <nl> + NudgeQuantizationRange ( / * * original_min * * / - 0 . 9 , / * * original_max * * / 0 . 9 , <nl> + quant_min , quant_max , & attr . min , & attr . max , <nl> + & attr . scale ) ; <nl> + <nl> + for ( auto storage : env_ . GetSupportedStorages ( ) ) { <nl> + for ( auto precision : env_ . GetSupportedPrecisions ( ) ) { <nl> + const float eps = precision = = CalculationsPrecision : : F32 ? 1e - 6f : 1e - 2f ; <nl> + OperationDef op_def ; <nl> + op_def . precision = precision ; <nl> + auto data_type = DeduceDataTypeFromPrecision ( precision ) ; <nl> + op_def . src_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + op_def . dst_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + TensorFloat32 dst_tensor ; <nl> + QuantizeAndDequantize operation ; <nl> + ASSERT_OK ( CreateQuantizeAndDequantize ( creation_context_ , op_def , attr , <nl> + & operation ) ) ; <nl> + ASSERT_OK ( ExecuteGPUOperation ( src_tensor , creation_context_ , & operation , <nl> + BHWC ( 1 , 3 , 1 , 2 ) , & dst_tensor ) ) ; <nl> + EXPECT_THAT ( dst_tensor . data , <nl> + Pointwise ( FloatNear ( eps ) , { 0 . 0f , - 0 . 896471f , 0 . 247059f , <nl> + 0 . 501176f , 0 . 444706f , 0 . 0f } ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + TEST_F ( OpenCLOperationTest , QuantAndDequant_Dim3Bits16 ) { <nl> + TensorFloat32 src_tensor ; <nl> + src_tensor . shape = BHWC ( 1 , 3 , 1 , 2 ) ; <nl> + src_tensor . data = { 0 . 0f , 1 . 0f , 0 . 25f , 0 . 50f , 0 . 4444444f , 0 . 00001f } ; <nl> + <nl> + / / Unlike TFLite ' s FakeQuant kernel , we assume that the incoming values are <nl> + / / pre - nudged , since this should be done during model conversion . <nl> + const int num_bits = 16 ; <nl> + const int quant_min = 0 ; <nl> + const int quant_max = ( 1 < < num_bits ) - 1 ; <nl> + QuantizeAndDequantizeAttributes attr ; <nl> + NudgeQuantizationRange ( / * * original_min * * / 0 . 0 , / * * original_max * * / 1 . 0 , <nl> + quant_min , quant_max , & attr . min , & attr . max , <nl> + & attr . scale ) ; <nl> + <nl> + for ( auto storage : env_ . GetSupportedStorages ( ) ) { <nl> + for ( auto precision : env_ . GetSupportedPrecisions ( ) ) { <nl> + const float eps = precision = = CalculationsPrecision : : F32 ? 1e - 6f : 1e - 3f ; <nl> + OperationDef op_def ; <nl> + op_def . precision = precision ; <nl> + auto data_type = DeduceDataTypeFromPrecision ( precision ) ; <nl> + op_def . src_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + op_def . dst_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + TensorFloat32 dst_tensor ; <nl> + QuantizeAndDequantize operation ; <nl> + ASSERT_OK ( CreateQuantizeAndDequantize ( creation_context_ , op_def , attr , <nl> + & operation ) ) ; <nl> + ASSERT_OK ( ExecuteGPUOperation ( src_tensor , creation_context_ , & operation , <nl> + BHWC ( 1 , 3 , 1 , 2 ) , & dst_tensor ) ) ; <nl> + EXPECT_THAT ( dst_tensor . data , <nl> + Pointwise ( FloatNear ( eps ) , { 0 . 0f , 1 . 0f , 0 . 250004f , 0 . 500008f , <nl> + 0 . 44445f , 1 . 5259e - 05f } ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + TEST_F ( OpenCLOperationTest , QuantAndDequant_Dim2Bits16_NegativeRange ) { <nl> + TensorFloat32 src_tensor ; <nl> + src_tensor . shape = BHWC ( 1 , 3 , 2 , 1 ) ; <nl> + src_tensor . data = { 0 . 0f , - 0 . 9f , 0 . 25f , 0 . 50f , 0 . 4444444f , - 0 . 00001f } ; <nl> + <nl> + / / Unlike TFLite ' s FakeQuant kernel , we assume that the incoming values are <nl> + / / pre - nudged , since this should be done during model conversion . <nl> + const int num_bits = 16 ; <nl> + const int quant_min = 0 ; <nl> + const int quant_max = ( 1 < < num_bits ) - 1 ; <nl> + QuantizeAndDequantizeAttributes attr ; <nl> + NudgeQuantizationRange ( / * * original_min * * / - 0 . 9 , / * * original_max * * / 0 . 9 , <nl> + quant_min , quant_max , & attr . min , & attr . max , <nl> + & attr . scale ) ; <nl> + <nl> + for ( auto storage : env_ . GetSupportedStorages ( ) ) { <nl> + for ( auto precision : env_ . GetSupportedPrecisions ( ) ) { <nl> + const float eps = precision = = CalculationsPrecision : : F32 ? 1e - 6f : 1e - 2f ; <nl> + OperationDef op_def ; <nl> + op_def . precision = precision ; <nl> + auto data_type = DeduceDataTypeFromPrecision ( precision ) ; <nl> + op_def . src_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + op_def . dst_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + TensorFloat32 dst_tensor ; <nl> + QuantizeAndDequantize operation ; <nl> + ASSERT_OK ( CreateQuantizeAndDequantize ( creation_context_ , op_def , attr , <nl> + & operation ) ) ; <nl> + ASSERT_OK ( ExecuteGPUOperation ( src_tensor , creation_context_ , & operation , <nl> + BHWC ( 1 , 3 , 2 , 1 ) , & dst_tensor ) ) ; <nl> + EXPECT_THAT ( dst_tensor . data , <nl> + Pointwise ( FloatNear ( eps ) , { 0 . 0f , - 0 . 900014f , 0 . 249998f , <nl> + 0 . 499995f , 0 . 444431f , 0 . 0f } ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + } / / namespace <nl> + } / / namespace cl <nl> + } / / namespace gpu <nl> + } / / namespace tflite <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / BUILD <nl> ppp b / tensorflow / lite / delegates / gpu / cl / selectors / BUILD <nl> cc_library ( <nl> " / / tensorflow / lite / delegates / gpu / cl / kernels : padding " , <nl> " / / tensorflow / lite / delegates / gpu / cl / kernels : pooling " , <nl> " / / tensorflow / lite / delegates / gpu / cl / kernels : prelu " , <nl> + " / / tensorflow / lite / delegates / gpu / cl / kernels : quantize_and_dequantize " , <nl> " / / tensorflow / lite / delegates / gpu / cl / kernels : relu " , <nl> " / / tensorflow / lite / delegates / gpu / cl / kernels : reshape " , <nl> " / / tensorflow / lite / delegates / gpu / cl / kernels : reshapex4 " , <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / operation_selector . cc <nl> ppp b / tensorflow / lite / delegates / gpu / cl / selectors / operation_selector . cc <nl> Status GPUOperationFromNode ( const CreationContext & creation_context , <nl> auto attr = absl : : any_cast < PReLUAttributes > ( node . operation . attributes ) ; <nl> return SelectPReLU ( attr , creation_context , op_def , gpu_op ) ; <nl> } <nl> + case OperationType : : QUANTIZE_AND_DEQUANTIZE : { <nl> + auto attr = absl : : any_cast < QuantizeAndDequantizeAttributes > ( <nl> + node . operation . attributes ) ; <nl> + return SelectQuantizeAndDequantize ( attr , creation_context , op_def , <nl> + gpu_op ) ; <nl> + } <nl> case OperationType : : RELU : { <nl> auto attr = absl : : any_cast < ReLUAttributes > ( node . operation . attributes ) ; <nl> SelectReLU ( creation_context , attr , op_def , gpu_op ) ; <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / simple_selectors . cc <nl> ppp b / tensorflow / lite / delegates / gpu / cl / selectors / simple_selectors . cc <nl> limitations under the License . <nl> # include " tensorflow / lite / delegates / gpu / cl / kernels / padding . h " <nl> # include " tensorflow / lite / delegates / gpu / cl / kernels / pooling . h " <nl> # include " tensorflow / lite / delegates / gpu / cl / kernels / prelu . h " <nl> + # include " tensorflow / lite / delegates / gpu / cl / kernels / quantize_and_dequantize . h " <nl> # include " tensorflow / lite / delegates / gpu / cl / kernels / relu . h " <nl> # include " tensorflow / lite / delegates / gpu / cl / kernels / reshape . h " <nl> # include " tensorflow / lite / delegates / gpu / cl / kernels / reshapex4 . h " <nl> Status SelectWinograd36To4x4 ( <nl> return OkStatus ( ) ; <nl> } <nl> <nl> + Status SelectQuantizeAndDequantize ( const QuantizeAndDequantizeAttributes & attr , <nl> + const CreationContext & creation_context , <nl> + const OperationDef & op_def , <nl> + std : : unique_ptr < GPUOperation > * ptr ) { <nl> + QuantizeAndDequantize operation ; <nl> + RETURN_IF_ERROR ( <nl> + CreateQuantizeAndDequantize ( creation_context , op_def , attr , & operation ) ) ; <nl> + * ptr = absl : : make_unique < QuantizeAndDequantize > ( std : : move ( operation ) ) ; <nl> + return OkStatus ( ) ; <nl> + } <nl> + <nl> } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / simple_selectors . h <nl> ppp b / tensorflow / lite / delegates / gpu / cl / selectors / simple_selectors . h <nl> Status SelectWinograd36To4x4 ( <nl> const : : tflite : : gpu : : Tensor < Linear , DataType : : FLOAT32 > & biases , <nl> std : : unique_ptr < GPUOperation > * ptr ) ; <nl> <nl> + Status SelectQuantizeAndDequantize ( const QuantizeAndDequantizeAttributes & attr , <nl> + const CreationContext & creation_context , <nl> + const OperationDef & op_def , <nl> + std : : unique_ptr < GPUOperation > * ptr ) ; <nl> + <nl> } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> mmm a / tensorflow / lite / delegates / gpu / common / operations . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / operations . cc <nl> std : : string ToString ( enum OperationType op ) { <nl> return " pow " ; <nl> case OperationType : : PRELU : <nl> return " prelu " ; <nl> + case OperationType : : QUANTIZE_AND_DEQUANTIZE : <nl> + return " quantize_and_dequantize " ; <nl> case OperationType : : RELU : <nl> return " relu " ; <nl> case OperationType : : RESHAPE : <nl> OperationType OperationTypeFromString ( const std : : string & name ) { <nl> { " pooling_2d " , OperationType : : POOLING_2D } , <nl> { " pow " , OperationType : : POW } , <nl> { " prelu " , OperationType : : PRELU } , <nl> + { " quantize_and_dequantize " , OperationType : : QUANTIZE_AND_DEQUANTIZE } , <nl> { " relu " , OperationType : : RELU } , <nl> { " resize " , OperationType : : RESIZE } , <nl> { " reshape " , OperationType : : RESHAPE } , <nl> mmm a / tensorflow / lite / delegates / gpu / common / operations . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / operations . h <nl> enum class OperationType { <nl> POOLING_2D , <nl> POW , <nl> PRELU , <nl> + / / Used to accurately run inference on quantized models . <nl> + QUANTIZE_AND_DEQUANTIZE , <nl> RELU , <nl> RESHAPE , <nl> RESIZE , <nl> struct SpaceToDepthAttributes { <nl> int block_size ; <nl> } ; <nl> <nl> + / / These help perform a combination of Quantize & Dequantize to adjust float <nl> + / / values like quantized inference would . <nl> + struct QuantizeAndDequantizeAttributes { <nl> + float min = 0 ; <nl> + float max = 0 ; <nl> + float scale = 0 ; <nl> + } ; <nl> + <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> <nl> mmm a / tensorflow / lite / delegates / gpu / gl / kernels / BUILD <nl> ppp b / tensorflow / lite / delegates / gpu / gl / kernels / BUILD <nl> cc_test ( <nl> ] , <nl> ) <nl> <nl> + cc_library ( <nl> + name = " quantize_and_dequantize " , <nl> + srcs = [ " quantize_and_dequantize . cc " ] , <nl> + hdrs = [ " quantize_and_dequantize . h " ] , <nl> + deps = [ <nl> + " / / tensorflow / lite / delegates / gpu / common : convert " , <nl> + " / / tensorflow / lite / delegates / gpu / common : data_type " , <nl> + " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> + " / / tensorflow / lite / delegates / gpu / common : shape " , <nl> + " / / tensorflow / lite / delegates / gpu / common : status " , <nl> + " / / tensorflow / lite / delegates / gpu / common : types " , <nl> + " / / tensorflow / lite / delegates / gpu / gl : node_shader " , <nl> + " @ com_google_absl / / absl / memory " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " quantize_and_dequantize_test " , <nl> + srcs = [ " quantize_and_dequantize_test . cc " ] , <nl> + tags = tf_gpu_tests_tags ( ) + [ <nl> + " notap " , <nl> + " tflite_not_portable_ios " , <nl> + ] , <nl> + deps = [ <nl> + " : quantize_and_dequantize " , <nl> + " : test_util " , <nl> + " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> + " / / tensorflow / lite / kernels / internal : quantization_util " , <nl> + " @ com_google_googletest / / : gtest " , <nl> + ] , <nl> + ) <nl> + <nl> cc_library ( <nl> name = " relu " , <nl> srcs = [ " relu . cc " ] , <nl> TFLITE_GPU_BINARY_RELEASE_OPERATORS = [ <nl> " pad " , <nl> " pooling " , <nl> " prelu " , <nl> + " quantize_and_dequantize " , <nl> " relu " , <nl> " mean " , <nl> " reshape " , <nl> new file mode 100644 <nl> index 0000000000000 . . 3f21124aee9e4 <nl> mmm / dev / null <nl> ppp b / tensorflow / lite / delegates / gpu / gl / kernels / quantize_and_dequantize . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / lite / delegates / gpu / gl / kernels / quantize_and_dequantize . h " <nl> + <nl> + # include < memory > <nl> + # include < string > <nl> + <nl> + # include " absl / memory / memory . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / data_type . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / shape . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / types . h " <nl> + <nl> + namespace tflite { <nl> + namespace gpu { <nl> + namespace gl { <nl> + namespace { <nl> + <nl> + class QuantizeAndDequantize : public NodeShader { <nl> + public : <nl> + Status GenerateCode ( const GenerationContext & ctx , <nl> + GeneratedCode * generated_code ) const final { <nl> + std : : string code ; <nl> + / / Constants <nl> + code + = " vec4 scale = vec4 ( $ quant_scale $ ) ; " ; <nl> + code + = " vec4 min_bound = vec4 ( $ quant_min $ ) ; " ; <nl> + code + = " vec4 max_bound = vec4 ( $ quant_max $ ) ; " ; <nl> + / / Quantize <nl> + code + = " value_0 = clamp ( value_0 , min_bound , max_bound ) ; " ; <nl> + code + = " value_0 = ( value_0 - min_bound ) / scale ; " ; <nl> + code + = " value_0 = floor ( value_0 + vec4 ( 0 . 5 ) ) ; " ; <nl> + / / Dequantize <nl> + code + = " value_0 = value_0 * scale + min_bound ; " ; <nl> + <nl> + auto attr = absl : : any_cast < const QuantizeAndDequantizeAttributes & > ( <nl> + ctx . node - > operation . attributes ) ; <nl> + * generated_code = { <nl> + / * parameters = * / { { " quant_min " , attr . min } , <nl> + { " quant_max " , attr . max } , <nl> + { " quant_scale " , attr . scale } } , <nl> + / * objects = * / { } , <nl> + / * shared_variables = * / { } , <nl> + / * workload = * / uint3 ( ) , <nl> + / * workgroup = * / uint3 ( ) , <nl> + / * source_code = * / code , <nl> + / * input = * / IOStructure : : AUTO , <nl> + / * output = * / IOStructure : : AUTO , <nl> + } ; <nl> + return OkStatus ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + } / / namespace <nl> + <nl> + std : : unique_ptr < NodeShader > NewQuantizeAndDequantizeNodeShader ( ) { <nl> + return absl : : make_unique < QuantizeAndDequantize > ( ) ; <nl> + } <nl> + <nl> + } / / namespace gl <nl> + } / / namespace gpu <nl> + } / / namespace tflite <nl> new file mode 100644 <nl> index 0000000000000 . . 1fa6ad918c400 <nl> mmm / dev / null <nl> ppp b / tensorflow / lite / delegates / gpu / gl / kernels / quantize_and_dequantize . h <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_QUANTIZE_AND_DEQUANTIZE_H_ <nl> + # define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_QUANTIZE_AND_DEQUANTIZE_H_ <nl> + <nl> + # include < memory > <nl> + <nl> + # include " tensorflow / lite / delegates / gpu / common / operations . h " <nl> + # include " tensorflow / lite / delegates / gpu / gl / node_shader . h " <nl> + <nl> + namespace tflite { <nl> + namespace gpu { <nl> + namespace gl { <nl> + <nl> + / / Performs the operation : { Quantize , Dequantize } on floating - point data . <nl> + / / We need this operation to emulate the error introduced by quantization <nl> + / / on the GPU , which cannot represent int8 tensors . <nl> + / / <nl> + / / Implemented as : <nl> + / / qvalue = round ( ( min ( qmax , max ( qmin , src_val ) ) - qmin ) * ( 1 / qscale ) + 0 . 5 ) <nl> + / / dq_value = qvalue * qscale + qmin <nl> + / / Here , qmin , qmax & qscale refer to the quantization values as implemented in <nl> + / / TensorFlow Lite ' s ' FakeQuant ' kernel . round ( x + 0 . 5 ) ensures we round away <nl> + / / from zero . <nl> + / / <nl> + / / NOTE : We do not need to nudge min / max values in this op , since they would <nl> + / / already be adjusted while generating the quantized model . <nl> + std : : unique_ptr < NodeShader > NewQuantizeAndDequantizeNodeShader ( ) ; <nl> + <nl> + } / / namespace gl <nl> + } / / namespace gpu <nl> + } / / namespace tflite <nl> + <nl> + # endif / / TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_QUANTIZE_AND_DEQUANTIZE_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . 916f916617559 <nl> mmm / dev / null <nl> ppp b / tensorflow / lite / delegates / gpu / gl / kernels / quantize_and_dequantize_test . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / lite / delegates / gpu / gl / kernels / quantize_and_dequantize . h " <nl> + <nl> + # include < gmock / gmock . h > <nl> + # include < gtest / gtest . h > <nl> + # include " tensorflow / lite / delegates / gpu / common / operations . h " <nl> + # include " tensorflow / lite / delegates / gpu / gl / kernels / test_util . h " <nl> + # include " tensorflow / lite / kernels / internal / quantization_util . h " <nl> + <nl> + using : : testing : : FloatNear ; <nl> + using : : testing : : Pointwise ; <nl> + <nl> + namespace tflite { <nl> + namespace gpu { <nl> + namespace gl { <nl> + namespace { <nl> + <nl> + TEST ( QuantizeAndDequantizeTest , Dim2Bits8 ) { <nl> + TensorRef < BHWC > input ; <nl> + input . type = DataType : : FLOAT32 ; <nl> + input . ref = 0 ; <nl> + input . shape = BHWC ( 1 , 3 , 2 , 1 ) ; <nl> + <nl> + / / Unlike TFLite ' s FakeQuant kernel , we assume that the incoming values are <nl> + / / pre - nudged , since this should be done during model conversion . <nl> + const int num_bits = 8 ; <nl> + const int quant_min = 0 ; <nl> + const int quant_max = ( 1 < < num_bits ) - 1 ; <nl> + QuantizeAndDequantizeAttributes attr ; <nl> + NudgeQuantizationRange ( / * * original_min * * / 0 . 0 , / * * original_max * * / 1 . 0 , <nl> + quant_min , quant_max , & attr . min , & attr . max , <nl> + & attr . scale ) ; <nl> + <nl> + TensorRef < BHWC > output ; <nl> + output . type = DataType : : FLOAT32 ; <nl> + output . ref = 1 ; <nl> + output . shape = BHWC ( 1 , 3 , 2 , 1 ) ; <nl> + <nl> + SingleOpModel model ( { ToString ( OperationType : : QUANTIZE_AND_DEQUANTIZE ) , attr } , <nl> + { input } , { output } ) ; <nl> + ASSERT_TRUE ( <nl> + model . PopulateTensor ( 0 , { 0 . 0 , 1 . 0 , 0 . 25 , 0 . 50 , 0 . 4444444 , 0 . 00001 } ) ) ; <nl> + ASSERT_OK ( model . Invoke ( * NewQuantizeAndDequantizeNodeShader ( ) ) ) ; <nl> + EXPECT_THAT ( model . GetOutput ( 0 ) , <nl> + Pointwise ( FloatNear ( 1e - 6 ) , <nl> + { 0 . 0f , 1 . 0f , 0 . 25098f , 0 . 498039f , 0 . 443137f , 0 . 0f } ) ) ; <nl> + } <nl> + <nl> + TEST ( QuantizeAndDequantizeTest , Dim3Bits8_NegativeRange ) { <nl> + TensorRef < BHWC > input ; <nl> + input . type = DataType : : FLOAT32 ; <nl> + input . ref = 0 ; <nl> + input . shape = BHWC ( 1 , 3 , 1 , 2 ) ; <nl> + <nl> + / / Unlike TFLite ' s FakeQuant kernel , we assume that the incoming values are <nl> + / / pre - nudged , since this should be done during model conversion . <nl> + const int num_bits = 8 ; <nl> + const int quant_min = 0 ; <nl> + const int quant_max = ( 1 < < num_bits ) - 1 ; <nl> + QuantizeAndDequantizeAttributes attr ; <nl> + NudgeQuantizationRange ( / * * original_min * * / - 0 . 9 , / * * original_max * * / 0 . 9 , <nl> + quant_min , quant_max , & attr . min , & attr . max , <nl> + & attr . scale ) ; <nl> + <nl> + TensorRef < BHWC > output ; <nl> + output . type = DataType : : FLOAT32 ; <nl> + output . ref = 1 ; <nl> + output . shape = BHWC ( 1 , 3 , 1 , 2 ) ; <nl> + <nl> + SingleOpModel model ( { ToString ( OperationType : : QUANTIZE_AND_DEQUANTIZE ) , attr } , <nl> + { input } , { output } ) ; <nl> + ASSERT_TRUE ( <nl> + model . PopulateTensor ( 0 , { 0 . 0 , - 0 . 9 , 0 . 25 , 0 . 50 , 0 . 4444444 , - 0 . 00001 } ) ) ; <nl> + ASSERT_OK ( model . Invoke ( * NewQuantizeAndDequantizeNodeShader ( ) ) ) ; <nl> + EXPECT_THAT ( model . GetOutput ( 0 ) , <nl> + Pointwise ( FloatNear ( 1e - 6 ) , { 0 . 0f , - 0 . 896471f , 0 . 247059f , <nl> + 0 . 501176f , 0 . 444706f , 0 . 0f } ) ) ; <nl> + } <nl> + <nl> + TEST ( QuantizeAndDequantizeTest , Dim3Bits16 ) { <nl> + TensorRef < BHWC > input ; <nl> + input . type = DataType : : FLOAT32 ; <nl> + input . ref = 0 ; <nl> + input . shape = BHWC ( 1 , 3 , 1 , 2 ) ; <nl> + <nl> + / / Unlike TFLite ' s FakeQuant kernel , we assume that the incoming values are <nl> + / / pre - nudged , since this should be done during model conversion . <nl> + const int num_bits = 16 ; <nl> + const int quant_min = 0 ; <nl> + const int quant_max = ( 1 < < num_bits ) - 1 ; <nl> + QuantizeAndDequantizeAttributes attr ; <nl> + NudgeQuantizationRange ( / * * original_min * * / 0 . 0 , / * * original_max * * / 1 . 0 , <nl> + quant_min , quant_max , & attr . min , & attr . max , <nl> + & attr . scale ) ; <nl> + <nl> + TensorRef < BHWC > output ; <nl> + output . type = DataType : : FLOAT32 ; <nl> + output . ref = 1 ; <nl> + output . shape = BHWC ( 1 , 3 , 1 , 2 ) ; <nl> + <nl> + SingleOpModel model ( { ToString ( OperationType : : QUANTIZE_AND_DEQUANTIZE ) , attr } , <nl> + { input } , { output } ) ; <nl> + ASSERT_TRUE ( <nl> + model . PopulateTensor ( 0 , { 0 . 0 , 1 . 0 , 0 . 25 , 0 . 50 , 0 . 4444444 , 0 . 00001 } ) ) ; <nl> + ASSERT_OK ( model . Invoke ( * NewQuantizeAndDequantizeNodeShader ( ) ) ) ; <nl> + EXPECT_THAT ( model . GetOutput ( 0 ) , <nl> + Pointwise ( FloatNear ( 1e - 6 ) , { 0 . 0f , 1 . 0f , 0 . 250004f , 0 . 500008f , <nl> + 0 . 44445f , 1 . 5259e - 05f } ) ) ; <nl> + } <nl> + <nl> + TEST ( QuantizeAndDequantizeTest , Dim2Bits16_NegativeRange ) { <nl> + TensorRef < BHWC > input ; <nl> + input . type = DataType : : FLOAT32 ; <nl> + input . ref = 0 ; <nl> + input . shape = BHWC ( 1 , 3 , 2 , 1 ) ; <nl> + <nl> + / / Unlike TFLite ' s FakeQuant kernel , we assume that the incoming values are <nl> + / / pre - nudged , since this should be done during model conversion . <nl> + const int num_bits = 16 ; <nl> + const int quant_min = 0 ; <nl> + const int quant_max = ( 1 < < num_bits ) - 1 ; <nl> + QuantizeAndDequantizeAttributes attr ; <nl> + NudgeQuantizationRange ( / * * original_min * * / - 0 . 9 , / * * original_max * * / 0 . 9 , <nl> + quant_min , quant_max , & attr . min , & attr . max , <nl> + & attr . scale ) ; <nl> + <nl> + TensorRef < BHWC > output ; <nl> + output . type = DataType : : FLOAT32 ; <nl> + output . ref = 1 ; <nl> + output . shape = BHWC ( 1 , 3 , 2 , 1 ) ; <nl> + <nl> + SingleOpModel model ( { ToString ( OperationType : : QUANTIZE_AND_DEQUANTIZE ) , attr } , <nl> + { input } , { output } ) ; <nl> + ASSERT_TRUE ( <nl> + model . PopulateTensor ( 0 , { 0 . 0 , - 0 . 9 , 0 . 25 , 0 . 50 , 0 . 4444444 , - 0 . 00001 } ) ) ; <nl> + ASSERT_OK ( model . Invoke ( * NewQuantizeAndDequantizeNodeShader ( ) ) ) ; <nl> + EXPECT_THAT ( model . GetOutput ( 0 ) , <nl> + Pointwise ( FloatNear ( 1e - 6 ) , { 0 . 0f , - 0 . 900014f , 0 . 249998f , <nl> + 0 . 499995f , 0 . 444431f , 0 . 0f } ) ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + } / / namespace gl <nl> + } / / namespace gpu <nl> + } / / namespace tflite <nl> mmm a / tensorflow / lite / delegates / gpu / gl / kernels / registry . cc <nl> ppp b / tensorflow / lite / delegates / gpu / gl / kernels / registry . cc <nl> limitations under the License . <nl> # include " tensorflow / lite / delegates / gpu / gl / kernels / pad . h " <nl> # include " tensorflow / lite / delegates / gpu / gl / kernels / pooling . h " <nl> # include " tensorflow / lite / delegates / gpu / gl / kernels / prelu . h " <nl> + # include " tensorflow / lite / delegates / gpu / gl / kernels / quantize_and_dequantize . h " <nl> # include " tensorflow / lite / delegates / gpu / gl / kernels / relu . h " <nl> # include " tensorflow / lite / delegates / gpu / gl / kernels / reshape . h " <nl> # include " tensorflow / lite / delegates / gpu / gl / kernels / resize . h " <nl> class Registry : public NodeShader { <nl> insert_op ( Type : : PAD , NewPadNodeShader ) ; <nl> insert_op ( Type : : POOLING_2D , NewPoolingNodeShader ) ; <nl> insert_op ( Type : : PRELU , NewPReLUNodeShader ) ; <nl> + insert_op ( Type : : QUANTIZE_AND_DEQUANTIZE , <nl> + NewQuantizeAndDequantizeNodeShader ) ; <nl> insert_op ( Type : : RELU , NewReLUNodeShader ) ; <nl> insert_op ( Type : : RESIZE , NewResizeNodeShader ) ; <nl> insert_op ( Type : : RESHAPE , NewReshapeNodeShader ) ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / api . cc <nl> ppp b / tensorflow / lite / delegates / gpu / metal / api . cc <nl> Status RegisterPrimaryOps ( const GraphFloat32 & graph , const Node * node , <nl> case OperationType : : BATCH_TO_SPACE : <nl> case OperationType : : CONST : <nl> case OperationType : : LSTM : <nl> + case OperationType : : QUANTIZE_AND_DEQUANTIZE : <nl> case OperationType : : SPACE_TO_BATCH : <nl> case OperationType : : TRANSPOSE : <nl> case OperationType : : UNKNOWN : <nl>
Adds QuantizeAndDequantize kernel to OpenGL & OpenCL backends . This is not a TFLite op , but will be used to support inference on quantized models with future CLs .
tensorflow/tensorflow
e61ff10d8b7ae8c844954955d73ced412152781d
2020-03-16T20:46:58Z
new file mode 100644 <nl> index 00000000000 . . f9af370efc3 <nl> mmm / dev / null <nl> ppp b / hphp / tools / benchy / README <nl> <nl> + Overview <nl> + benchy . py is a harness for building , running benchmarks , processing benchmark <nl> + measurments , and displaying results for HHVM . The various pieces are : <nl> + <nl> + benchy . py : The main interaction point with the tool . It coordinates all the <nl> + other phases of the process ( e . g . checking out branches , building , running <nl> + the harness , and processing the results ) . <nl> + <nl> + benchy_harness . py : The main benchmark harness . It is responsible for actually <nl> + running HHVM on the specified benchmarks . <nl> + <nl> + benchy_config . py : The central config file for all of the other benchy scripts . <nl> + <nl> + any_mean . py : Responsible for consuming the raw benchmark measurments and <nl> + aggregating them into means and confidence intervals . <nl> + <nl> + confidence_interval . py : Generic library for calculating mean confidence <nl> + intervals . <nl> + <nl> + significance . py : Parses benchmark measurment results , comparing measurements <nl> + for significant changes , and printing the results . <nl> + <nl> + table . py : Builds and prints nicely formatted tables . <nl> + <nl> + dot - benchy : This is an example of the file that should be added as ~ / . benchy <nl> + It includes important settings that benchy needs to know to run properly . <nl> + <nl> + tab - complete . sh : This is a shell script that can be added to the user ' s shell <nl> + init file ( e . g . . bashrc ) that will allow for tab completion of branch names . <nl> + <nl> + <nl> + Settings <nl> + There are a few settings that are useful to know to effectively use benchy . py . <nl> + Each benchmark runs in its own isolated VM . The number of times benchy runs a <nl> + benchmark is determined by the inner , outer , and warmup settings . <nl> + <nl> + inner : The number of benchmark iterations run in a single invocation of the VM . <nl> + outer : The number of VM invocations for each benchmark . <nl> + warmup : Additional inner iterations that run at the beginning of a VM <nl> + invocation whose results are ignored in the final results . <nl> + <nl> + So , e . g . , if one were to run the following invocation of benchy : <nl> + <nl> + hphp / tools / benchy . py - - inner 3 - - outer 5 - - warmup 2 master <nl> + <nl> + Then each benchmark would be run 3 * 5 * 2 = 30 times , but only 3 * 5 = 15 of <nl> + those measurements would be used for final computation of the mean and <nl> + confidence interval . <nl> + <nl> + Additionally , subsets of the total set of benchmarks can be run using the <nl> + ' - - suite ' and ' - - benchmark ' options . Each of these options is followed by a <nl> + regular expression . Any suite or benchmark that matches one of these regular <nl> + expressions will be run . So , if we only wanted to run the ' Splay ' benchmark , <nl> + we would use the following command : <nl> + <nl> + hphp / tools / benchy . py - - benchmark Splay master <nl> + <nl> + Or if we only wanted to run the php - octane suite , we would use the following : <nl> + <nl> + hphp / tools / benchy . py - - suite php - octane master <nl> + <nl> + <nl> + For other settings , run : <nl> + <nl> + hphp / tools / benchy . py - h <nl> new file mode 100755 <nl> index 00000000000 . . 1a202503ea9 <nl> mmm / dev / null <nl> ppp b / hphp / tools / benchy / any_mean . py <nl> <nl> + " " " Computes averages and confidence intervals of labeled data . " " " <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + from __future__ import unicode_literals <nl> + from confidence_interval import mean_confidence_interval , arith_mean <nl> + <nl> + import argparse <nl> + import math <nl> + import re <nl> + import sys <nl> + <nl> + MEASUREMENT_REGEX = re . compile ( r ' ( . + ) : \ s * ( . + ) ' ) <nl> + <nl> + <nl> + def parse_measurements ( in_file ) : <nl> + " " " Parses a set of labeled measurements and aggregates like - labeled <nl> + measurements . <nl> + <nl> + " " " <nl> + categories = { } <nl> + for line in in_file : <nl> + result = MEASUREMENT_REGEX . match ( line ) <nl> + if result is None : <nl> + continue <nl> + <nl> + lhs = str ( result . group ( 1 ) ) <nl> + rhs = str ( result . group ( 2 ) ) <nl> + <nl> + try : <nl> + rhs = float ( rhs ) <nl> + except ValueError : <nl> + continue <nl> + <nl> + if lhs not in categories : <nl> + categories [ lhs ] = [ ] <nl> + categories [ lhs ] . append ( rhs ) <nl> + return categories <nl> + <nl> + <nl> + def find_widest_key ( categories ) : <nl> + " " " Returns width of widest key for formatting . <nl> + <nl> + " " " <nl> + widest_key = 0 <nl> + for key in categories : <nl> + if len ( key ) > widest_key : <nl> + widest_key = len ( key ) <nl> + return widest_key <nl> + <nl> + <nl> + def arithmetic_mean ( samples ) : <nl> + " " " Computes the arithmetic mean of a set of samples . <nl> + <nl> + " " " <nl> + return float ( sum ( samples ) ) / float ( len ( samples ) ) <nl> + <nl> + <nl> + def geometric_mean ( samples ) : <nl> + " " " Computes the geometric mean of a set of samples . <nl> + <nl> + " " " <nl> + return math . exp ( arithmetic_mean ( [ math . log ( x ) for x in samples ] ) ) <nl> + <nl> + <nl> + # Select stripes across all categories and compute the geomean of these stripes <nl> + def compute_striped_geomeans ( categories ) : <nl> + " " " Pulls a sample from each category into a " stripe " and computes the <nl> + geometric mean of the stripe . <nl> + <nl> + " " " <nl> + geomeans = [ ] <nl> + i = 0 <nl> + while True : <nl> + stripe = [ ] <nl> + for _ , values in categories . iteritems ( ) : <nl> + if i > = len ( values ) : <nl> + continue <nl> + stripe . append ( values [ i ] ) <nl> + if len ( stripe ) = = 0 : <nl> + break <nl> + geomeans . append ( geometric_mean ( stripe ) ) <nl> + i + = 1 <nl> + categories [ ' Geomean ' ] = geomeans <nl> + <nl> + <nl> + def print_means_and_cis ( categories , widest_key ) : <nl> + " " " Prints the mean and confidence interval for each category . <nl> + <nl> + " " " <nl> + for key , values in categories . iteritems ( ) : <nl> + pad_width = widest_key - len ( key ) <nl> + padding = " " * pad_width <nl> + mean , interval = None , None <nl> + if len ( values ) > 1 : <nl> + mean , interval = mean_confidence_interval ( values ) <nl> + print ( " % s : % s % . 2f + - % . 2f " % ( key , padding , mean , interval ) ) <nl> + else : <nl> + mean = arith_mean ( values ) <nl> + print ( " % s : % s % . 2f " % ( key , padding , mean ) ) <nl> + sys . stderr . write ( " Warning : too few samples to calculate confidence " <nl> + " interval for \ " % s \ " \ n " % key ) <nl> + <nl> + <nl> + def main ( ) : <nl> + " " " Parses arguments and passes them to the main computation functions . <nl> + <nl> + " " " <nl> + parser = argparse . ArgumentParser ( description = " Utility script for " <nl> + " calculating statistics on labeled data . " ) <nl> + parser . add_argument ( ' - - geomean ' , action = ' store_const ' , const = True , <nl> + default = False , help = ' Also outputs the geometric mean ' <nl> + ' of all the other means . ' ) <nl> + parser . add_argument ( ' file ' , nargs = ' ? ' , type = str ) <nl> + args = parser . parse_args ( ) <nl> + <nl> + infile = None <nl> + if args . file is None : <nl> + infile = sys . stdin <nl> + else : <nl> + infile = open ( args . file , ' r ' ) <nl> + <nl> + categories = parse_measurements ( infile ) <nl> + if args . geomean : <nl> + compute_striped_geomeans ( categories ) <nl> + widest_key = find_widest_key ( categories ) <nl> + print_means_and_cis ( categories , widest_key ) <nl> + <nl> + <nl> + if __name__ = = " __main__ " : <nl> + main ( ) <nl> new file mode 100755 <nl> index 00000000000 . . 2ac1e1c0922 <nl> mmm / dev / null <nl> ppp b / hphp / tools / benchy / benchy . py <nl> <nl> + " " " Convenience wrapper for benchmarking . <nl> + <nl> + Ties together all of the parts of building branches , running benchmarks , and <nl> + comparing results . <nl> + <nl> + Expects to be given a number of branches to compare . The script will then <nl> + check out each branch , build the branch into its own directory , run the <nl> + benchmark harness using the built branches , process the output to get various <nl> + statistics , and print the results for easy comparison . <nl> + " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + from __future__ import unicode_literals <nl> + import argparse <nl> + import os <nl> + import shlex <nl> + import subprocess <nl> + <nl> + import benchy_config as config <nl> + <nl> + def _unique_id ( ) : <nl> + " " " Returns the next unique integer ID . <nl> + <nl> + Note : this mirrors the function of the same name in the harness . We should <nl> + probably do something more intelligent than copy and paste , but it works . <nl> + <nl> + " " " <nl> + result = _unique_id . next_id <nl> + _unique_id . next_id + = 1 <nl> + return result <nl> + _unique_id . next_id = 1 <nl> + <nl> + <nl> + def verbose ( ) : <nl> + " " " Returns the current verbosity level . <nl> + <nl> + " " " <nl> + return verbose . level <nl> + verbose . level = 0 <nl> + <nl> + <nl> + def set_verbose_level ( level ) : <nl> + " " " Sets the verbosity level for debugging . <nl> + <nl> + " " " <nl> + verbose . level = level <nl> + <nl> + <nl> + class Branch ( object ) : <nl> + " " " A branch within a repository , i . e . the basic unit of comparison . " " " <nl> + def __init__ ( self , name ) : <nl> + self . name = name <nl> + self . uid = _unique_id ( ) <nl> + <nl> + def build_dir ( self ) : <nl> + " " " Returns the build directory for this branch . <nl> + <nl> + " " " <nl> + return os . path . join ( config . BUILD_ROOT , self . name ) <nl> + <nl> + def root_dir ( self ) : <nl> + " " " Returns the root directory inside the build directory for this <nl> + branch . <nl> + <nl> + " " " <nl> + return os . path . join ( self . build_dir ( ) , config . BUILD_INTERNAL_PATH ) <nl> + <nl> + <nl> + def parse_branches ( raw_branches ) : <nl> + " " " Maps branch names and to Branch objects . <nl> + <nl> + " " " <nl> + branches = [ ] <nl> + for raw_branch in raw_branches : <nl> + branches . append ( Branch ( raw_branch ) ) <nl> + return branches <nl> + <nl> + <nl> + def run_command ( cmd , env = None , stdout = None ) : <nl> + " " " Runs a command and checks the return code for errors . <nl> + <nl> + " " " <nl> + cmd = shlex . split ( cmd . encode ( ' utf8 ' ) ) <nl> + if verbose ( ) > = 1 : <nl> + print ( cmd ) <nl> + subprocess . check_call ( cmd , env = env , stdout = stdout ) <nl> + <nl> + <nl> + def build_branches ( branches ) : <nl> + " " " Builds each of the branches into their own directories . <nl> + <nl> + " " " <nl> + env = os . environ . copy ( ) <nl> + for branch in branches : <nl> + run_command ( ' arc feature % s ' % branch . name ) <nl> + run_command ( ' fbmake clean ' ) <nl> + <nl> + build_dir = branch . build_dir ( ) <nl> + if os . path . isfile ( build_dir ) : <nl> + os . remove ( build_dir ) <nl> + if not os . path . exists ( build_dir ) : <nl> + os . makedirs ( build_dir ) <nl> + <nl> + env [ ' FBMAKE_BUILD_ROOT ' ] = build_dir <nl> + run_command ( ' fbmake - - build - root " % s " opt - j70 ' % build_dir , env ) <nl> + <nl> + <nl> + def run_benchmarks ( suites , benchmarks , run_perf , inner , outer , branches ) : <nl> + " " " Runs the benchmarks on the branches by invoking the harness script . <nl> + <nl> + " " " <nl> + benchy_path = config . HARNESS_PATH <nl> + <nl> + suite_str = ' ' . join ( [ " - - suite % s " % s for s in suites ] ) <nl> + benchmark_str = ' ' . join ( [ " - - benchmark % s " % b for b in benchmarks ] ) <nl> + perf_str = ' - - perf ' if run_perf else ' ' <nl> + inner_str = ' ' if inner is None else ' - - inner { 0 } ' . format ( inner ) <nl> + outer_str = ' ' if outer is None else ' - - outer { 0 } ' . format ( outer ) <nl> + branch_str = ' ' . join ( [ " % s : % s " % ( b . name , b . root_dir ( ) ) for b in branches ] ) <nl> + <nl> + command = " { harness } { suites } { benchmarks } { perf } { inner } { outer } { branch } " <nl> + run_command ( command . format ( harness = benchy_path , <nl> + suites = suite_str , <nl> + benchmarks = benchmark_str , <nl> + perf = perf_str , <nl> + inner = inner_str , <nl> + outer = outer_str , <nl> + branch = branch_str ) ) <nl> + <nl> + <nl> + def process_results ( branches , output_mode ) : <nl> + " " " Runs statistics on the results and pretty prints them . <nl> + <nl> + " " " <nl> + anymean = config . ANYMEAN_PATH <nl> + significance = config . SIGNIFICANCE_PATH <nl> + result_paths = [ ] <nl> + counter = 0 <nl> + <nl> + for branch in branches : <nl> + counter + = 1 <nl> + runlog = os . path . join ( config . WORK_DIR , " runlog . % d " % counter ) <nl> + result_path = os . path . join ( config . WORK_DIR , branch . name ) <nl> + with open ( result_path , ' w ' ) as result_file : <nl> + cmd = " { anymean } - - geomean { runlog } " <nl> + run_command ( cmd . format ( anymean = anymean , runlog = runlog ) , <nl> + stdout = result_file ) <nl> + result_paths . append ( result_path ) <nl> + <nl> + cmd = " { significance } - - { output_mode } { results } " <nl> + run_command ( cmd . format ( significance = significance , <nl> + output_mode = output_mode , <nl> + results = ' ' . join ( result_paths ) ) ) <nl> + <nl> + <nl> + def main ( ) : <nl> + " " " Parses args and passes them off to the other phases . <nl> + <nl> + " " " <nl> + parser = argparse . ArgumentParser ( description = ' Convenience wrapper for ' <nl> + ' benchmarking multiple branches . ' ) <nl> + parser . add_argument ( ' - - no - build ' , action = ' store_const ' , const = True , <nl> + help = ' Don \ ' t clean and build . ' ) <nl> + parser . add_argument ( ' - - suite ' , action = ' append ' , type = str , <nl> + help = ' Run any suite that matches the provided regex ' ) <nl> + parser . add_argument ( ' - - benchmark ' , action = ' append ' , type = str , <nl> + help = ' Run any benchmark that matches the provided ' <nl> + ' regex ' ) <nl> + parser . add_argument ( ' - - inner ' , action = ' store ' , type = int , <nl> + help = ' Number of iterations of the benchmark to run ' <nl> + ' for each VM instance ' ) <nl> + parser . add_argument ( ' - - outer ' , action = ' store ' , type = int , <nl> + help = ' Number of instances of the VM to run for each ' <nl> + ' benchmark ' ) <nl> + parser . add_argument ( ' branch ' , nargs = ' + ' , type = str , metavar = ' BRANCH ' , <nl> + help = ' Branch to benchmark ' ) <nl> + parser . add_argument ( ' - - remarkup ' , action = ' store_const ' , const = True , <nl> + default = False , help = ' Spit out the results as Remarkup ' ) <nl> + parser . add_argument ( ' - - perf ' , action = ' store_const ' , const = True , <nl> + default = False , help = ' Run perf for each VM invocation . ' ) <nl> + parser . add_argument ( ' - v ' , ' - - verbose ' , type = int , default = 0 , <nl> + help = ' Increase verbosity ' ) <nl> + args = parser . parse_args ( ) <nl> + <nl> + included_suites = args . suite <nl> + if included_suites is None : <nl> + included_suites = [ ] <nl> + <nl> + included_benchmarks = args . benchmark <nl> + if included_benchmarks is None : <nl> + included_benchmarks = [ ] <nl> + <nl> + set_verbose_level ( args . verbose ) <nl> + inner = args . inner <nl> + outer = args . outer <nl> + do_build = args . no_build is None <nl> + run_perf = args . perf <nl> + output_mode = ' remarkup ' if args . remarkup else ' terminal ' <nl> + <nl> + branches = parse_branches ( args . branch ) <nl> + <nl> + if do_build : <nl> + build_branches ( branches ) <nl> + run_benchmarks ( included_suites , <nl> + included_benchmarks , <nl> + run_perf , inner , outer , branches ) <nl> + process_results ( branches , output_mode ) <nl> + <nl> + <nl> + if __name__ = = " __main__ " : <nl> + main ( ) <nl> new file mode 100644 <nl> index 00000000000 . . 6733a1b79db <nl> mmm / dev / null <nl> ppp b / hphp / tools / benchy / benchy_config . py <nl> <nl> + " " " Configuration loader for benchy benchmark harness . <nl> + <nl> + " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + from __future__ import unicode_literals <nl> + import json <nl> + import os <nl> + <nl> + def _load ( ) : <nl> + " " " Initializes and returns a singleton config dictionary . <nl> + <nl> + " " " <nl> + config = _load . config <nl> + if config is not None : <nl> + return _load . config <nl> + <nl> + benchy_dir = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> + tools_dir = os . path . dirname ( benchy_dir ) <nl> + base_dir = os . path . dirname ( tools_dir ) <nl> + fbcode_dir = os . path . dirname ( base_dir ) <nl> + benchmark_dir = os . path . join ( base_dir , ' benchmarks ' , ' php - octane ' ) <nl> + _load . config = { <nl> + ' ANYMEAN_PATH ' : os . path . join ( benchy_dir , ' any_mean . py ' ) , <nl> + ' BENCHMARK_DIR ' : benchmark_dir , <nl> + ' BENCH_ENTRY_PATH ' : os . path . join ( benchmark_dir , ' harness - run . php ' ) , <nl> + ' BUILD_INTERNAL_PATH ' : os . path . join ( fbcode_dir [ 1 : ] , ' _build ' , <nl> + ' opt ' , ' hphp ' ) , <nl> + ' HARNESS_PATH ' : os . path . join ( benchy_dir , ' benchy_harness . py ' ) , <nl> + ' INCLUDE_PATH ' : os . path . join ( benchmark_dir , ' include . php ' ) , <nl> + ' SIGNIFICANCE_PATH ' : os . path . join ( benchy_dir , ' significance . py ' ) , <nl> + ' SUITES_PATH ' : os . path . join ( benchmark_dir , ' suites . json ' ) , <nl> + ' VERSION ' : 1 , <nl> + ' WRAPPER_PATH ' : os . path . join ( tools_dir , ' hhvm_wrapper . php ' ) , <nl> + } <nl> + <nl> + home_dir = os . path . expanduser ( ' ~ ' ) <nl> + config_path = os . path . join ( home_dir , ' . benchy ' ) <nl> + with open ( config_path , ' r ' ) as config_file : <nl> + tmp = json . load ( config_file ) <nl> + work_dir = _load . config [ ' WORK_DIR ' ] = tmp [ ' work_dir ' ] <nl> + _load . config [ ' BUILD_ROOT ' ] = tmp [ ' build_dir ' ] <nl> + _load . config [ ' RUNSCRIPT_PATH ' ] = os . path . join ( work_dir , ' runscript ' ) <nl> + _load . config [ ' RUNLOG_PATH ' ] = os . path . join ( work_dir , ' runlog ' ) <nl> + _load . config [ ' PERF_PATH ' ] = os . path . join ( work_dir , ' perf ' ) <nl> + _load . config [ ' TMP_PATH ' ] = os . path . join ( work_dir , ' tmp ' ) <nl> + return _load . config <nl> + _load . config = None <nl> + <nl> + <nl> + def _get ( key ) : <nl> + " " " Looks up the given key in the config singleton . <nl> + <nl> + " " " <nl> + config = _load ( ) <nl> + if key in config : <nl> + return config [ key ] <nl> + return None <nl> + <nl> + <nl> + ANYMEAN_PATH = _get ( ' ANYMEAN_PATH ' ) <nl> + BENCHMARK_DIR = _get ( ' BENCHMARK_DIR ' ) <nl> + BENCH_ENTRY_PATH = _get ( ' BENCH_ENTRY_PATH ' ) <nl> + BUILD_ROOT = _get ( ' BUILD_ROOT ' ) <nl> + BUILD_INTERNAL_PATH = _get ( ' BUILD_INTERNAL_PATH ' ) <nl> + HARNESS_PATH = _get ( ' HARNESS_PATH ' ) <nl> + INCLUDE_PATH = _get ( ' INCLUDE_PATH ' ) <nl> + PERF_PATH = _get ( ' PERF_PATH ' ) <nl> + RUNLOG_PATH = _get ( ' RUNLOG_PATH ' ) <nl> + RUNSCRIPT_PATH = _get ( ' RUNSCRIPT_PATH ' ) <nl> + SIGNIFICANCE_PATH = _get ( ' SIGNIFICANCE_PATH ' ) <nl> + SUITES_PATH = _get ( ' SUITES_PATH ' ) <nl> + TMP_PATH = _get ( ' TMP_PATH ' ) <nl> + VERSION = _get ( ' VERSION ' ) <nl> + WORK_DIR = _get ( ' WORK_DIR ' ) <nl> + WRAPPER_PATH = _get ( ' WRAPPER_PATH ' ) <nl> new file mode 100755 <nl> index 00000000000 . . 9e060763864 <nl> mmm / dev / null <nl> ppp b / hphp / tools / benchy / benchy_harness . py <nl> <nl> + " " " Harness for running benchmarks . <nl> + <nl> + This module handles all aspects of running a series of benchmarks with <nl> + pre - built executables and emitting the results into a central location for <nl> + further processing . <nl> + <nl> + It ' s currently very focused on HHVM and php - octane , but it could be generalized <nl> + because it uses many standard shell features and it leaves the raw benchmark <nl> + results in text files for other tools to process . <nl> + " " " <nl> + <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + from __future__ import unicode_literals <nl> + import argparse <nl> + import json <nl> + import os <nl> + import random <nl> + import re <nl> + import shutil <nl> + import subprocess <nl> + import sys <nl> + <nl> + import benchy_config as config <nl> + <nl> + class Benchmark ( object ) : <nl> + " " " A single benchmark invocation . <nl> + <nl> + " " " <nl> + def __init__ ( self , suite , name , path , owner ) : <nl> + self . name = name <nl> + self . path = path <nl> + self . suite = suite <nl> + self . owner = owner <nl> + self . children = [ ] <nl> + <nl> + def matches ( self , pattern ) : <nl> + " " " Returns true if the given pattern matches this Benchmark . <nl> + <nl> + " " " <nl> + if re . search ( pattern , self . name ) is not None : <nl> + return True <nl> + return False <nl> + <nl> + def __str__ ( self ) : <nl> + return ' { 0 . suite } - { 0 . name } ' . format ( self ) <nl> + <nl> + def __repr__ ( self ) : <nl> + return self . __str__ ( ) <nl> + <nl> + class Suite ( object ) : <nl> + " " " A named group of related benchmarks . <nl> + <nl> + " " " <nl> + def __init__ ( self , name , benchmarks , statistic ) : <nl> + self . name = name <nl> + self . benchmarks = [ ] <nl> + self . statistic = statistic <nl> + for raw_bench in benchmarks : <nl> + self . benchmarks . append ( Benchmark ( suite = name , <nl> + name = raw_bench [ ' name ' ] , <nl> + path = raw_bench [ ' path ' ] , <nl> + owner = raw_bench [ ' owner ' ] ) ) <nl> + <nl> + for benchmark in self . benchmarks : <nl> + if not benchmark . owner : <nl> + continue <nl> + owner = None <nl> + for other in self . benchmarks : <nl> + if benchmark . owner ! = other . name : <nl> + continue <nl> + owner = other <nl> + break <nl> + if owner is None : <nl> + msg = " Couldn ' t find owner { 0 . owner } of benchmark { 0 . name } " <nl> + raise RuntimeError ( msg . format ( benchmark ) ) <nl> + owner . children . append ( benchmark ) <nl> + <nl> + def matches ( self , pattern ) : <nl> + " " " Returns true if the given pattern matches this Suite . <nl> + <nl> + " " " <nl> + if re . search ( pattern , self . name ) is not None : <nl> + return True <nl> + return False <nl> + <nl> + def __str__ ( self ) : <nl> + return " { 0 . name } " . format ( self ) <nl> + <nl> + def __repr__ ( self ) : <nl> + return self . __str__ ( ) <nl> + <nl> + <nl> + def _unique_id ( ) : <nl> + " " " Returns a unique integer ID to uniquely identify each VM . <nl> + <nl> + " " " <nl> + result = _unique_id . next_id <nl> + _unique_id . next_id + = 1 <nl> + return result <nl> + _unique_id . next_id = 1 <nl> + <nl> + <nl> + class VirtualMachine ( object ) : <nl> + " " " A single named executable with which to run benchmarks and measure . <nl> + <nl> + " " " <nl> + def __init__ ( self , name , path ) : <nl> + self . name = name <nl> + self . path = path <nl> + self . uid = _unique_id ( ) <nl> + <nl> + def __str__ ( self ) : <nl> + return " { 0 . name } " . format ( self ) <nl> + <nl> + def __repr__ ( self ) : <nl> + return self . __str__ ( ) <nl> + <nl> + <nl> + def load_benchmark_suites ( ) : <nl> + " " " Loads the benchmark suites to run from the suites . json file . <nl> + <nl> + " " " <nl> + json_obj = None <nl> + with open ( config . SUITES_PATH , ' r ' ) as suites_file : <nl> + try : <nl> + json_obj = json . load ( suites_file ) <nl> + except Exception as exc : <nl> + sys . stderr . write ( " Failed to load suites from JSON file . \ n " ) <nl> + raise exc <nl> + <nl> + if json_obj [ ' version ' ] ! = config . VERSION : <nl> + sys . stderr . write ( " Unknown version in suites JSON file . \ n " ) <nl> + sys . exit ( - 1 ) <nl> + <nl> + suites = [ ] <nl> + raw_suites = json_obj [ ' suites ' ] <nl> + for raw_suite in raw_suites : <nl> + suites . append ( Suite ( name = raw_suite [ ' name ' ] , <nl> + benchmarks = raw_suite [ ' benchmarks ' ] , <nl> + statistic = raw_suite [ ' statistic ' ] ) ) <nl> + return suites <nl> + <nl> + <nl> + def _flatten ( list_of_lists ) : <nl> + " " " Flattens a list of lists . <nl> + <nl> + " " " <nl> + result = [ ] <nl> + for elem in list_of_lists : <nl> + result . extend ( elem ) <nl> + return result <nl> + <nl> + <nl> + def filter_suites_and_benchmarks ( suites , included_suites , included_benchmarks ) : <nl> + " " " Filters in specified benchmarks and suites . <nl> + <nl> + Returns a list of benchmarks whose suite and benchmark names match at <nl> + least one of the specified regexes . <nl> + <nl> + " " " <nl> + def matches ( thing , patterns ) : <nl> + " " " Returns true if the thing matches any of the provided patterns . <nl> + <nl> + " " " <nl> + for pattern in patterns : <nl> + if thing . matches ( pattern ) : <nl> + return True <nl> + return False <nl> + <nl> + filtered_suites = [ s for s in suites if matches ( s , included_suites ) ] <nl> + benchmarks = _flatten ( [ suite . benchmarks for suite in filtered_suites ] ) <nl> + return [ b for b in benchmarks if matches ( b , included_benchmarks ) ] <nl> + <nl> + <nl> + def setup_workdir ( ) : <nl> + " " " Deletes any old stale working directory and creates a fresh one . <nl> + <nl> + " " " <nl> + if os . path . isfile ( config . WORK_DIR ) : <nl> + msg = " Work directory { 0 } already exists and is a file " <nl> + raise RuntimeError ( msg . format ( config . WORK_DIR ) ) <nl> + if os . path . isdir ( config . WORK_DIR ) : <nl> + shutil . rmtree ( config . WORK_DIR ) <nl> + os . makedirs ( config . WORK_DIR ) <nl> + <nl> + <nl> + def warmup_lines_to_chop ( benchmark , warmup ) : <nl> + " " " Returns the number of lines to be excluded from final measurements . <nl> + <nl> + " " " <nl> + # The main benchmark emits its own line . <nl> + lines_to_chop = warmup <nl> + # Each of the child benchmarks emit their own line . <nl> + lines_to_chop + = warmup * len ( benchmark . children ) <nl> + # hhvm_wrapper emits an extra line to let us know its compiling bytecode . <nl> + lines_to_chop + = 1 <nl> + # Tail 1 - indexes line numbers and starts emitting from the nth line . <nl> + lines_to_chop + = 1 <nl> + return lines_to_chop <nl> + <nl> + <nl> + def single_run ( * * kwargs ) : <nl> + " " " Generates the necessary shell - fu for a single benchmark invocation . <nl> + <nl> + " " " <nl> + template = " " " <nl> + printf " \ \ 033 [ 2K \ \ r " <nl> + printf " [ { idx } / { total } ] { vm . name } : { bench . name } " <nl> + printf " < ? \ \ n " > { include } <nl> + printf " include ' util . php ' ; \ \ n " > > { include } <nl> + printf " include ' { bench . path } ' ; \ \ n " > > { include } <nl> + printf " QueueRuns ( { extra_iters } , \ \ $ { bench . name } ) ; \ \ n " > > { include } <nl> + { wrapper } - - compile - - build - root = { vm . path } { perf } - - { harness } > { tmp } <nl> + cat { tmp } | tail - n + { lines_to_chop } > > { runlog } <nl> + " " " <nl> + lines = template . format ( * * kwargs ) . split ( ' \ n ' ) [ 1 : - 1 ] <nl> + return [ s . strip ( ) for s in lines ] <nl> + <nl> + <nl> + def generate_runscript ( vms , benchmarks_to_run , run_perf , warmup , inner , outer ) : <nl> + " " " Generates the script that will run all of the benchmarks . <nl> + <nl> + " " " <nl> + final_runlist = [ ] <nl> + for virtual_machine in vms : <nl> + for benchmark in benchmarks_to_run : <nl> + # Benchmarks that are run as part of other benchmarks are excluded . <nl> + if benchmark . owner is not None : <nl> + continue <nl> + for _ in range ( outer ) : <nl> + final_runlist . append ( ( virtual_machine , benchmark ) ) <nl> + random . shuffle ( final_runlist ) <nl> + <nl> + lines = [ ] <nl> + for i in range ( len ( final_runlist ) ) : <nl> + virtual_machine , benchmark = final_runlist [ i ] <nl> + runlog_path = config . RUNLOG_PATH + ( ' . { 0 . uid } ' . format ( virtual_machine ) ) <nl> + perf_str = ' - - perf = { base } . { bench } . { vm . uid } ' . format ( <nl> + base = config . PERF_PATH , <nl> + bench = str ( benchmark ) , <nl> + vm = virtual_machine ) <nl> + lines . extend ( single_run ( <nl> + idx = i + 1 , <nl> + total = len ( final_runlist ) , <nl> + vm = virtual_machine , <nl> + bench = benchmark , <nl> + lines_to_chop = warmup_lines_to_chop ( benchmark , warmup ) , <nl> + extra_iters = warmup + inner - 1 , <nl> + perf = perf_str if run_perf else ' ' , <nl> + runlog = runlog_path , <nl> + include = config . INCLUDE_PATH , <nl> + wrapper = config . WRAPPER_PATH , <nl> + harness = config . BENCH_ENTRY_PATH , <nl> + tmp = config . TMP_PATH ) ) <nl> + lines . append ( " printf ' \ \ a \ \ n ' " ) <nl> + <nl> + with open ( config . RUNSCRIPT_PATH , ' w ' ) as runscript : <nl> + for line in lines : <nl> + runscript . write ( line ) <nl> + runscript . write ( ' \ n ' ) <nl> + <nl> + <nl> + def execute_runscript ( ) : <nl> + " " " Executes the generated runscript . <nl> + <nl> + " " " <nl> + subprocess . call ( [ ' sh ' , config . RUNSCRIPT_PATH ] ) <nl> + <nl> + <nl> + def parse_virtual_machines ( raw_vms ) : <nl> + " " " Parses the name and path for each specified VM . <nl> + <nl> + Provides a default name if none is provided . Doesn ' t verify that the <nl> + provided path exists . <nl> + <nl> + " " " <nl> + vms = [ ] <nl> + vm_pattern = r ' ( ? : ( . * ) : ) ? ( . * ) ' <nl> + counter = 0 <nl> + for raw_vm in raw_vms : <nl> + counter + = 1 <nl> + result = re . match ( vm_pattern , raw_vm ) <nl> + if result is None : <nl> + raise RuntimeError ( " Invalid format for VM : % s " % raw_vm ) <nl> + name = result . group ( 1 ) <nl> + path = str ( result . group ( 2 ) ) <nl> + <nl> + if name is None : <nl> + name = " VM # % d " % counter <nl> + vms . append ( VirtualMachine ( name , path ) ) <nl> + return vms <nl> + <nl> + <nl> + def main ( ) : <nl> + " " " Parses arguments and passes them on to all the subsequent phases . <nl> + <nl> + " " " <nl> + parser = argparse . ArgumentParser ( description = " Run some benchmarks . " ) <nl> + parser . add_argument ( ' - - suite ' , action = ' append ' , type = str , <nl> + help = ' Run any suite that matches the provided regex ' ) <nl> + parser . add_argument ( ' - - benchmark ' , action = ' append ' , type = str , <nl> + help = ' Run any benchmark that matches the provided ' <nl> + ' regex ' ) <nl> + parser . add_argument ( ' - - inner ' , action = ' store ' , type = int , default = 4 , <nl> + help = ' Number of iterations of the benchmark to run ' <nl> + ' for each VM instance ' ) <nl> + parser . add_argument ( ' - - outer ' , action = ' store ' , type = int , default = 4 , <nl> + help = ' Number of instances of the VM to run for each ' <nl> + ' benchmark ' ) <nl> + parser . add_argument ( ' - - dry - run ' , action = ' store_const ' , const = True , <nl> + default = False , help = ' Don \ ' t run any benchmarks . Only ' <nl> + ' generate the runscript ' ) <nl> + parser . add_argument ( ' - - perf ' , action = ' store_const ' , const = True , <nl> + default = False , help = ' Run perf for each benchmark . ' ) <nl> + parser . add_argument ( ' - - warmup ' , action = ' store ' , type = int , default = 1 , <nl> + help = ' Number of inner iterations to warmup the VM . ' ) <nl> + parser . add_argument ( ' vm ' , nargs = ' + ' , type = str , metavar = ' VM ' , <nl> + help = ' VM to benchmark ' ) <nl> + args = parser . parse_args ( ) <nl> + <nl> + setup_workdir ( ) <nl> + <nl> + included_suites = args . suite <nl> + if included_suites is None : <nl> + included_suites = [ r ' . * ' ] <nl> + <nl> + included_benchmarks = args . benchmark <nl> + if included_benchmarks is None : <nl> + included_benchmarks = [ r ' . * ' ] <nl> + <nl> + warmup = args . warmup <nl> + inner = args . inner <nl> + outer = args . outer <nl> + dry_run = args . dry_run <nl> + run_perf = args . perf <nl> + vms = parse_virtual_machines ( args . vm ) <nl> + <nl> + suites = load_benchmark_suites ( ) <nl> + benchmarks_to_run = filter_suites_and_benchmarks ( suites , <nl> + included_suites , <nl> + included_benchmarks ) <nl> + <nl> + if len ( benchmarks_to_run ) = = 0 : <nl> + sys . stderr . write ( " No benchmarks to run , exiting . . . \ n " ) <nl> + return <nl> + <nl> + generate_runscript ( vms , benchmarks_to_run , run_perf , warmup , inner , outer ) <nl> + if dry_run : <nl> + return <nl> + execute_runscript ( ) <nl> + <nl> + <nl> + if __name__ = = " __main__ " : <nl> + main ( ) <nl> new file mode 100644 <nl> index 00000000000 . . 5f2a6bd4f29 <nl> mmm / dev / null <nl> ppp b / hphp / tools / benchy / confidence_interval . py <nl> <nl> + " " " Various utilities to compute confidence intervals . <nl> + <nl> + " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + from __future__ import unicode_literals <nl> + import math <nl> + <nl> + # Table from http : / / en . wikipedia . org / wiki / Student ' s_t - distribution <nl> + # We assume a 95 % confidence interval . <nl> + T_TABLE = [ <nl> + ( 1 , 12 . 71 ) , <nl> + ( 2 , 4 . 303 ) , <nl> + ( 3 , 3 . 182 ) , <nl> + ( 4 , 2 . 776 ) , <nl> + ( 5 , 2 . 571 ) , <nl> + ( 6 , 2 . 447 ) , <nl> + ( 7 , 2 . 365 ) , <nl> + ( 8 , 2 . 306 ) , <nl> + ( 9 , 2 . 262 ) , <nl> + ( 10 , 2 . 228 ) , <nl> + ( 11 , 2 . 201 ) , <nl> + ( 12 , 2 . 179 ) , <nl> + ( 13 , 2 . 160 ) , <nl> + ( 14 , 2 . 145 ) , <nl> + ( 15 , 2 . 131 ) , <nl> + ( 16 , 2 . 120 ) , <nl> + ( 17 , 2 . 110 ) , <nl> + ( 18 , 2 . 101 ) , <nl> + ( 19 , 2 . 093 ) , <nl> + ( 20 , 2 . 086 ) , <nl> + ( 21 , 2 . 080 ) , <nl> + ( 22 , 2 . 074 ) , <nl> + ( 23 , 2 . 069 ) , <nl> + ( 24 , 2 . 064 ) , <nl> + ( 25 , 2 . 060 ) , <nl> + ( 26 , 2 . 056 ) , <nl> + ( 27 , 2 . 052 ) , <nl> + ( 28 , 2 . 048 ) , <nl> + ( 29 , 2 . 045 ) , <nl> + ( 30 , 2 . 042 ) , <nl> + ( 40 , 2 . 021 ) , <nl> + ( 50 , 2 . 009 ) , <nl> + ( 60 , 2 . 000 ) , <nl> + ( 80 , 1 . 990 ) , <nl> + ( 100 , 1 . 984 ) , <nl> + ( 120 , 1 . 980 ) , <nl> + ( float ( ' + Inf ' ) , 1 . 960 ) , <nl> + ] <nl> + <nl> + def t_score ( degrees_of_freedom ) : <nl> + " " " Returns the 95 % bi - directional t - score for the first entry in the table <nl> + with more than the requested degrees of freedom . <nl> + <nl> + " " " <nl> + for entry in T_TABLE : <nl> + entry_df , score = entry <nl> + if entry_df > = degrees_of_freedom : <nl> + return score <nl> + raise RuntimeError ( " Should never be reached " ) <nl> + <nl> + def arith_mean ( samples ) : <nl> + " " " Computes the arithmetic mean of a set of samples . <nl> + <nl> + " " " <nl> + accum = 0 . 0 <nl> + num_samples = float ( len ( samples ) ) <nl> + for sample in samples : <nl> + accum + = float ( sample ) / num_samples <nl> + return accum <nl> + <nl> + def sample_std_dev ( samples ) : <nl> + " " " Computes the standard deviation of a set of samples . <nl> + <nl> + " " " <nl> + avg = arith_mean ( samples ) <nl> + err = 0 . 0 <nl> + for sample in samples : <nl> + err + = ( sample - avg ) * ( sample - avg ) / ( len ( samples ) - 1 ) <nl> + return math . sqrt ( err ) <nl> + <nl> + def mean_standard_error ( samples ) : <nl> + " " " Computes the mean standard error of a set of samples . <nl> + <nl> + " " " <nl> + return sample_std_dev ( samples ) / math . sqrt ( len ( samples ) ) <nl> + <nl> + def critical_value ( samples ) : <nl> + " " " Computes the critical value for a set of samples to be used in the <nl> + confidence interval calculation . Assumes a 95 % bi - directional confidence <nl> + interval . <nl> + <nl> + " " " <nl> + degrees_of_freedom = len ( samples ) - 1 <nl> + return t_score ( degrees_of_freedom ) <nl> + <nl> + def margin_of_error ( samples ) : <nl> + " " " Computes the margin of error for a set of samples . <nl> + <nl> + " " " <nl> + return critical_value ( samples ) * mean_standard_error ( samples ) <nl> + <nl> + def mean_confidence_interval ( samples ) : <nl> + " " " Returns a tuple of ( arithmetic mean , confidence interval ) for a set of <nl> + samples . <nl> + <nl> + " " " <nl> + return ( arith_mean ( samples ) , margin_of_error ( samples ) ) <nl> new file mode 100644 <nl> index 00000000000 . . d289656b1dd <nl> mmm / dev / null <nl> ppp b / hphp / tools / benchy / dot - benchy <nl> <nl> + { <nl> + " work_dir " : " / home / mhahnenberg / workdir " , <nl> + " build_dir " : " / home / mhahnenberg / builds " <nl> + } <nl> new file mode 100755 <nl> index 00000000000 . . bb1b9b24995 <nl> mmm / dev / null <nl> ppp b / hphp / tools / benchy / significance . py <nl> <nl> + " " " Pretty print statistics comparisons . <nl> + <nl> + Parses files containing labeled lines of means and confidence intervals , <nl> + compares each of them against each other using confidence intervals to <nl> + determine which changes are significant , and pretty prints the results in a <nl> + table . <nl> + <nl> + " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + from __future__ import unicode_literals <nl> + import argparse <nl> + import os <nl> + import re <nl> + from table import Table <nl> + <nl> + LINE_REGEX = re . compile ( r ' ( . + ) : \ s * ( [ 0 - 9 . ] + ) ( ? : \ + - ( [ 0 - 9 . ] + ) ) ? ' ) <nl> + <nl> + <nl> + class ResultFile ( object ) : <nl> + " " " A handle to a named file and its parsed contents . <nl> + <nl> + " " " <nl> + def __init__ ( self , name , data ) : <nl> + self . name = name <nl> + self . data = data <nl> + <nl> + def short_name ( self ) : <nl> + " " " Returns the short name for this particular result file . <nl> + <nl> + The path is used as the name , so the short name is the basename . <nl> + <nl> + " " " <nl> + return os . path . basename ( self . name ) <nl> + <nl> + <nl> + def read_input ( filename , in_file ) : <nl> + " " " Parses labeled averages and confidence intervals from the provided file . <nl> + <nl> + " " " <nl> + data = { } <nl> + for line in in_file : <nl> + result = LINE_REGEX . match ( line ) <nl> + if result is None : <nl> + continue <nl> + <nl> + lhs = str ( result . group ( 1 ) ) <nl> + mean = str ( result . group ( 2 ) ) <nl> + conf_interv = result . group ( 3 ) <nl> + <nl> + try : <nl> + mean = float ( mean ) <nl> + if conf_interv is None : <nl> + conf_interv = 0 . 0 <nl> + else : <nl> + conf_interv = float ( conf_interv ) <nl> + except ValueError : <nl> + continue <nl> + <nl> + data [ lhs ] = ( mean , conf_interv ) <nl> + return ResultFile ( filename , data ) <nl> + <nl> + <nl> + def transpose_result_data ( result_files ) : <nl> + " " " Reassociate data from multiple result files into single categories . <nl> + <nl> + We reorganize the data so that we can compare results on the same <nl> + category across multiple result files . <nl> + <nl> + " " " <nl> + categories = { } <nl> + for result_file in result_files : <nl> + for key , value in result_file . data . iteritems ( ) : <nl> + if key not in categories : <nl> + categories [ key ] = [ ] <nl> + categories [ key ] . append ( ( result_file . name , value ) ) <nl> + return categories <nl> + <nl> + <nl> + def confidence_intervals_overlap ( old_score , old_ci , new_score , new_ci ) : <nl> + " " " Returns true if the confidence intervals of the old and new scores <nl> + overlap , false otherwise . <nl> + <nl> + " " " <nl> + if old_score < new_score : <nl> + old_score + = old_ci <nl> + new_score - = new_ci <nl> + return old_score > = new_score <nl> + else : <nl> + old_score - = old_ci <nl> + new_score + = new_ci <nl> + return old_score < = new_score <nl> + <nl> + <nl> + def percent_delta ( old_score , new_score ) : <nl> + " " " Calculates the percent change between two scores . <nl> + <nl> + " " " <nl> + return float ( new_score - old_score ) / float ( old_score ) <nl> + <nl> + <nl> + def print_results ( result_files , out_format ) : <nl> + " " " Builds a table with the parsed results . Used when there is only one <nl> + result file . <nl> + <nl> + " " " <nl> + categories = transpose_result_data ( result_files ) <nl> + columns = [ result . short_name ( ) for result in result_files ] <nl> + columns . insert ( 0 , " Benchmark " ) <nl> + table = Table ( columns ) <nl> + <nl> + geomean = None <nl> + for key in categories : <nl> + scores = [ run [ 1 ] for run in categories [ key ] ] <nl> + entries = [ " % . 2f + - % . 2f " % score for score in scores ] <nl> + entries . insert ( 0 , key ) <nl> + if key = = ' Geomean ' : <nl> + geomean = entries <nl> + else : <nl> + table . add_row ( entries ) <nl> + <nl> + if geomean is not None : <nl> + table . add_row ( geomean ) <nl> + <nl> + if out_format = = ' terminal ' : <nl> + table . dump_to_terminal ( ) <nl> + elif out_format = = ' remarkup ' : <nl> + table . dump_to_remarkup ( ) <nl> + else : <nl> + raise RuntimeError ( " Unknown output format : % s " % out_format ) <nl> + <nl> + <nl> + def red ( text ) : <nl> + " " " Returns a string with ANSI codes for red color and reset color wrapping <nl> + the provided text . <nl> + <nl> + " " " <nl> + return ' \ 033 [ 31m % s \ 033 [ 39m ' % text <nl> + <nl> + <nl> + def green ( text ) : <nl> + " " " Returns a string with ANSI codes for green color and reset color <nl> + wrapping the provided text . <nl> + <nl> + " " " <nl> + return ' \ 033 [ 32m % s \ 033 [ 39m ' % text <nl> + <nl> + <nl> + def bold ( out_format , text ) : <nl> + " " " Returns a string representing a bolded version of the given text <nl> + depending on the output format . <nl> + <nl> + " " " <nl> + if out_format = = ' remarkup ' : <nl> + return " * * % s * * " % text <nl> + elif out_format = = ' terminal ' : <nl> + return " \ 033 [ 1m % s \ 033 [ 0m " % text <nl> + else : <nl> + raise RuntimeError ( " Unknown output format : % s " % out_format ) <nl> + <nl> + <nl> + def faster ( out_format , text ) : <nl> + " " " Used to visually signify the given string represents a faster result . <nl> + <nl> + " " " <nl> + if out_format = = ' remarkup ' : <nl> + return bold ( out_format , text ) <nl> + elif out_format = = ' terminal ' : <nl> + return bold ( out_format , green ( text ) ) <nl> + else : <nl> + raise RuntimeError ( " Unknown output format : % s " % out_format ) <nl> + <nl> + <nl> + def slower ( out_format , text ) : <nl> + " " " Used to visually signify the given string represents a slower result . <nl> + <nl> + " " " <nl> + if out_format = = ' remarkup ' : <nl> + return bold ( out_format , text ) <nl> + elif out_format = = ' terminal ' : <nl> + return bold ( out_format , red ( text ) ) <nl> + else : <nl> + raise RuntimeError ( " Unknown output format : % s " % out_format ) <nl> + <nl> + <nl> + def print_comparison_results ( result_files , out_format ) : <nl> + " " " Builds a table of the various gathered results and prints it out . <nl> + <nl> + The table also includes an extra column for deltas and adds entries for <nl> + significant changes between the last two provided files . <nl> + <nl> + " " " <nl> + def entries_for_scores ( key , scores ) : <nl> + " " " Returns entries for the next row , prepending the category name and <nl> + appending any significant changes . <nl> + <nl> + " " " <nl> + old_score , old_ci = scores [ - 2 ] <nl> + new_score , new_ci = scores [ - 1 ] <nl> + entries = [ " % . 2f + - % . 2f " % score for score in scores ] <nl> + entries . insert ( 0 , key ) <nl> + if confidence_intervals_overlap ( old_score , old_ci , new_score , new_ci ) : <nl> + entries . append ( " " ) <nl> + else : <nl> + change = percent_delta ( old_score , new_score ) <nl> + if change < 0 . 0 : <nl> + change_str = " % . 4f % % slower " % ( change * 100 . 0 ) <nl> + entries . append ( slower ( out_format , change_str ) ) <nl> + else : <nl> + change_str = " + % . 4f % % faster " % ( change * 100 . 0 ) <nl> + entries . append ( faster ( out_format , change_str ) ) <nl> + return entries <nl> + <nl> + categories = transpose_result_data ( result_files ) <nl> + columns = [ result . short_name ( ) for result in result_files ] <nl> + columns . insert ( 0 , " Benchmark " ) <nl> + columns . append ( " Deltas " ) <nl> + table = Table ( columns ) <nl> + <nl> + geomean = None <nl> + for key in categories : <nl> + entries = entries_for_scores ( key , [ run [ 1 ] for run in categories [ key ] ] ) <nl> + if key = = ' Geomean ' : <nl> + geomean = entries <nl> + geomean [ 0 ] = bold ( out_format , geomean [ 0 ] ) <nl> + else : <nl> + table . add_row ( entries ) <nl> + <nl> + if geomean is not None : <nl> + table . add_row ( geomean ) <nl> + <nl> + if out_format = = ' terminal ' : <nl> + table . dump_to_terminal ( ) <nl> + elif out_format = = ' remarkup ' : <nl> + table . dump_to_remarkup ( ) <nl> + else : <nl> + raise RuntimeError ( " Unknown output format : % s " % out_format ) <nl> + <nl> + <nl> + def main ( ) : <nl> + " " " Parses arguments and passes control off to the worker functions . <nl> + <nl> + " " " <nl> + parser = argparse . ArgumentParser ( description = " Compare benchmark results " <nl> + " for significant changes . " ) <nl> + parser . add_argument ( ' - - remarkup ' , action = ' store_const ' , const = True , <nl> + default = False , help = ' Spit out the results as Remarkup ' ) <nl> + parser . add_argument ( ' - - terminal ' , action = ' store_const ' , const = True , <nl> + default = False , help = ' Spit out the results in format ' <nl> + ' that \ ' s nice for terminals ' ) <nl> + parser . add_argument ( ' file ' , metavar = ' FILE ' , nargs = ' + ' , type = str , <nl> + help = ' Files to parse for statistics . ' ) <nl> + args = parser . parse_args ( ) <nl> + <nl> + out_format = None <nl> + if args . terminal : <nl> + out_format = ' terminal ' <nl> + elif args . remarkup : <nl> + out_format = ' remarkup ' <nl> + else : <nl> + out_format = ' terminal ' <nl> + <nl> + result_files = [ ] <nl> + for filename in args . file : <nl> + with open ( filename , ' r ' ) as in_file : <nl> + result_files . append ( read_input ( filename , in_file ) ) <nl> + if len ( result_files ) > 1 : <nl> + print_comparison_results ( result_files , out_format ) <nl> + else : <nl> + print_results ( result_files , out_format ) <nl> + <nl> + <nl> + if __name__ = = " __main__ " : <nl> + main ( ) <nl> new file mode 100644 <nl> index 00000000000 . . a3848b0e1cd <nl> mmm / dev / null <nl> ppp b / hphp / tools / benchy / tab - complete . sh <nl> <nl> + # Copy this file to your shell init script to get tab completion : - ) <nl> + <nl> + _benchy_complete ( ) <nl> + { <nl> + CUR = $ { COMP_WORDS [ COMP_CWORD ] } <nl> + COMPREPLY = ( ) <nl> + BRANCHES = ` git branch | awk - F ' + ' ' ! / \ ( no branch \ ) / { print $ 2 } ' | grep " ^ $ CUR " ` <nl> + if [ $ ? - eq 0 ] <nl> + then <nl> + for BRANCH in $ BRANCHES <nl> + do <nl> + COMPREPLY + = ( $ BRANCH ) <nl> + done <nl> + fi <nl> + } <nl> + <nl> + complete - F _benchy_complete benchy . py <nl> new file mode 100644 <nl> index 00000000000 . . 4bb5b359fc8 <nl> mmm / dev / null <nl> ppp b / hphp / tools / benchy / table . py <nl> <nl> + " " " Encapsulated table class for pretty printing tabular data . <nl> + <nl> + " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + from __future__ import unicode_literals <nl> + import sys <nl> + import re <nl> + <nl> + ANSI_ESCAPE = re . compile ( r ' \ 033 [ ^ m ] * m ' ) <nl> + <nl> + def _print_horizontal_line ( max_widths ) : <nl> + " " " Prints a horizontal line across all columns in the table . <nl> + <nl> + " " " <nl> + for width in max_widths : <nl> + dashes = ' - ' * width <nl> + sys . stdout . write ( " % smmm " % dashes ) <nl> + sys . stdout . write ( ' - \ n ' ) <nl> + <nl> + def _print_entry_centered ( entry , width ) : <nl> + " " " Prints a table entry with equal space on each side . <nl> + <nl> + " " " <nl> + total_padding = width - _len_sans_ansi ( entry ) <nl> + front_padding = ' ' * ( total_padding / 2 ) <nl> + back_padding = ' ' * ( total_padding - len ( front_padding ) ) <nl> + sys . stdout . write ( " % s % s % s " % ( front_padding , entry , back_padding ) ) <nl> + <nl> + def _print_entry_left ( entry , width , filler = ' ' ) : <nl> + " " " Prints a table entry left justified within its cell . <nl> + <nl> + " " " <nl> + total_padding = width - _len_sans_ansi ( entry ) <nl> + back_padding = filler * total_padding <nl> + sys . stdout . write ( " % s % s " % ( entry , back_padding ) ) <nl> + <nl> + def _len_sans_ansi ( text ) : <nl> + " " " Computes the length of a string that might contain ANSI codes . <nl> + <nl> + " " " <nl> + return len ( ANSI_ESCAPE . sub ( ' ' , text ) ) <nl> + <nl> + class Table ( object ) : <nl> + " " " Encapsulation around a set of column headers and a series of rows of <nl> + data which gracefully handles pretty printing the data it contains in a <nl> + variety of tabular formats . <nl> + <nl> + " " " <nl> + def __init__ ( self , headers ) : <nl> + self . _headers = headers <nl> + self . _row_length = len ( headers ) <nl> + self . _rows = [ ] <nl> + <nl> + def add_row ( self , row ) : <nl> + " " " Append a new row of data . <nl> + <nl> + " " " <nl> + if len ( row ) ! = self . _row_length : <nl> + raise RuntimeError ( " Invalid row length " ) <nl> + self . _rows . append ( row ) <nl> + <nl> + def _find_max_column_widths ( self , headers , rows ) : <nl> + " " " Finds the maximum width of each column based on its contents . <nl> + <nl> + " " " <nl> + # Find the max width for each column <nl> + max_widths = [ _len_sans_ansi ( x ) for x in headers ] <nl> + for row in rows : <nl> + for i in range ( self . _row_length ) : <nl> + width = _len_sans_ansi ( str ( row [ i ] ) ) <nl> + if width > max_widths [ i ] : <nl> + max_widths [ i ] = width <nl> + return max_widths <nl> + <nl> + def dump_to_remarkup ( self ) : <nl> + " " " Pretty print the table headers and rows in remarkup format . <nl> + <nl> + " " " <nl> + headers , rows = self . _headers , self . _rows <nl> + max_widths = self . _find_max_column_widths ( headers , rows ) <nl> + <nl> + # Print the headers . <nl> + sys . stdout . write ( " | " ) <nl> + for i in range ( len ( headers ) ) : <nl> + sys . stdout . write ( ' ' ) <nl> + _print_entry_centered ( headers [ i ] , max_widths [ i ] ) <nl> + sys . stdout . write ( " | " ) <nl> + sys . stdout . write ( ' \ n ' ) <nl> + <nl> + sys . stdout . write ( " | " ) <nl> + for i in range ( len ( headers ) ) : <nl> + sys . stdout . write ( " - " ) <nl> + _print_entry_left ( ' ' , max_widths [ i ] , filler = ' - ' ) <nl> + sys . stdout . write ( " - | " ) <nl> + sys . stdout . write ( ' \ n ' ) <nl> + <nl> + # Print each row <nl> + for i in range ( len ( self . _rows ) ) : <nl> + row = self . _rows [ i ] <nl> + sys . stdout . write ( " | " ) <nl> + for i in range ( len ( row ) ) : <nl> + sys . stdout . write ( " " ) <nl> + _print_entry_left ( str ( row [ i ] ) , max_widths [ i ] ) <nl> + sys . stdout . write ( " | " ) <nl> + sys . stdout . write ( ' \ n ' ) <nl> + <nl> + def dump_to_terminal ( self ) : <nl> + " " " Pretty prints the table headers and rows in a terminal friendly <nl> + format . <nl> + <nl> + " " " <nl> + headers , rows = self . _headers , self . _rows <nl> + max_widths = self . _find_max_column_widths ( headers , rows ) <nl> + <nl> + # Print the headers . <nl> + _print_horizontal_line ( max_widths ) <nl> + <nl> + sys . stdout . write ( " | " ) <nl> + for i in range ( len ( headers ) ) : <nl> + sys . stdout . write ( " " ) <nl> + _print_entry_centered ( headers [ i ] , max_widths [ i ] ) <nl> + sys . stdout . write ( " | " ) <nl> + sys . stdout . write ( ' \ n ' ) <nl> + <nl> + _print_horizontal_line ( max_widths ) <nl> + <nl> + # Print each row . <nl> + for row in rows : <nl> + sys . stdout . write ( " | " ) <nl> + for i in range ( len ( row ) ) : <nl> + sys . stdout . write ( " " ) <nl> + _print_entry_left ( str ( row [ i ] ) , max_widths [ i ] ) <nl> + sys . stdout . write ( " | " ) <nl> + sys . stdout . write ( ' \ n ' ) <nl> + <nl> + _print_horizontal_line ( max_widths ) <nl>
Add benchy . py for benchmarking HHVM
facebook/hhvm
968fe31ec85a35252649b6d5a7b088a4dd0d3813
2014-10-16T01:00:24Z
mmm a / Source / Math / CPUMatrixImpl . h <nl> ppp b / Source / Math / CPUMatrixImpl . h <nl> void CPUMatrix < ElemType > : : MaxROIPoolingBackward ( const size_t numRois , const size <nl> for ( size_t c = 0 ; c < channels ; c + + ) <nl> { <nl> ElemType gradient = 0 ; <nl> - int num_of_gradients = 0 ; <nl> / / [ W x H x C x N ] <nl> size_t index = w + h * width + c * height * width ; <nl> / / go right up to channel c of the current ROI . <nl> void CPUMatrix < ElemType > : : MaxROIPoolingBackward ( const size_t numRois , const size <nl> if ( ( size_t ) offsetArgmax [ ph * pooledWidth + pw ] = = ( w + h * width ) ) <nl> { <nl> gradient + = offsetPoolGrad [ ph * pooledWidth + pw ] ; <nl> - num_of_gradients + + ; <nl> } <nl> } <nl> } <nl> <nl> - num_of_gradients = max ( num_of_gradients , 1 ) ; <nl> - grad ( index , imgIdx ) = gradient / num_of_gradients ; <nl> + # pragma omp atomic <nl> + grad ( index , imgIdx ) + = gradient ; <nl> } <nl> } <nl> } <nl> mmm a / Source / Math / Convolution . cuh <nl> ppp b / Source / Math / Convolution . cuh <nl> __global__ void kMaxROIPoolingBackward ( const int totalIterations , <nl> int roiMax = ( n + 1 ) * numROIs ; <nl> <nl> ElemType gradient = 0 ; <nl> - int num_of_gradients = 0 ; <nl> - <nl> for ( int roiN = roiMin ; roiN < roiMax ; roiN + + ) <nl> { <nl> / / each ROI is 4 elements : ( x , y , w , h ) <nl> __global__ void kMaxROIPoolingBackward ( const int totalIterations , <nl> if ( ( int ) offsetArgmax [ ph * pooledWidth + pw ] = = ( h * width + w ) ) <nl> { <nl> gradient + = offsetPoolGrad [ ph * pooledWidth + pw ] ; <nl> - num_of_gradients + + ; <nl> } <nl> } <nl> } <nl> } <nl> <nl> - num_of_gradients = max ( num_of_gradients , 1 ) ; <nl> - atomicAdd ( & grad [ index ] , gradient / num_of_gradients ) ; <nl> + atomicAdd ( & grad [ index ] , gradient ) ; <nl> } <nl> } <nl> <nl> mmm a / bindings / python / cntk / ops / tests / kernel_test . py <nl> ppp b / bindings / python / cntk / ops / tests / kernel_test . py <nl> def test_op_average_pooling_include_pad ( input_size , pooling_window , strides , res <nl> [ 8 . , 9 . , 9 . ] , <nl> [ 8 . , 9 . , 9 . ] ] ] , <nl> [ [ [ 0 . , 0 . , 0 . ] , # ( 1 , 3 , 3 ) expected backward output ( gradient input is all 1s ) <nl> - [ 0 . , 1 . , 1 . ] , <nl> - [ 0 . , 1 . , 1 . ] ] ] ) <nl> + [ 0 . , 1 . , 2 . ] , <nl> + [ 0 . , 2 . , 4 . ] ] ] ) <nl> ] <nl> <nl> @ pytest . mark . parametrize ( " input_map , input_rois , expected_fwd , expected_bkwd " , ROIPOOLING_OPERANDS ) <nl>
Sum gradient as before to match Caffe implementation .
microsoft/CNTK
211bef0c8749967e5c4fe7db68983a4aa0f80a80
2017-06-13T17:18:27Z
new file mode 100644 <nl> index 000000000000 . . 81f6c7d69f1d <nl> mmm / dev / null <nl> ppp b / jstests / core / list_namespaces_invalidation . js <nl> <nl> + / / SERVER - 27996 Missing invalidation for system . namespaces writes <nl> + ( function ( ) { <nl> + ' use strict ' ; <nl> + let dbInvalidName = ' system_namespaces_invalidations ' ; <nl> + let dbInvalid = db . getSiblingDB ( dbInvalidName ) ; <nl> + let num_collections = 3 ; <nl> + function testNamespaceInvalidation ( isRename ) { <nl> + dbInvalid . dropDatabase ( ) ; <nl> + <nl> + / / Create enough collections to necessitate multiple cursor batches . <nl> + for ( let i = 0 ; i < num_collections ; i + + ) { <nl> + assert . commandWorked ( dbInvalid . createCollection ( ' coll ' + i . toString ( ) ) ) ; <nl> + } <nl> + <nl> + / / Get the first two namespaces . Use find on ' system . namespaces ' on MMAPv1 , listCollections <nl> + / / otherwise . <nl> + let cmd = dbInvalid . system . indexes . count ( ) ? { find : ' system . namespaces ' } <nl> + : { listCollections : dbInvalidName } ; <nl> + Object . extend ( cmd , { batchSize : 2 } ) ; <nl> + let res = dbInvalid . runCommand ( cmd ) ; <nl> + assert . commandWorked ( res , ' could not run ' + tojson ( cmd ) ) ; <nl> + printjson ( res ) ; <nl> + <nl> + / / Ensure the cursor has data , drop or rename the collections , and exhaust the cursor . <nl> + let cursor = new DBCommandCursor ( dbInvalid . getMongo ( ) , res ) ; <nl> + let errMsg = <nl> + ' expected more data from command ' + tojson ( cmd ) + ' , with result ' + tojson ( res ) ; <nl> + assert ( cursor . hasNext ( ) , errMsg ) ; <nl> + for ( let j = 0 ; j < num_collections ; j + + ) { <nl> + if ( isRename ) { <nl> + / / Rename the collection to something that does not fit in the previously allocated <nl> + / / memory for the record . <nl> + assert . commandWorked ( dbInvalid [ ' coll ' + j . toString ( ) ] . renameCollection ( <nl> + ' coll ' + j . toString ( ) + ' lkdsahflaksjdhfsdkljhfskladhfkahfsakfla ' + <nl> + ' skfjhaslfaslfkhasklfjhsakljhdsjksahkldjslh ' ) ) ; <nl> + } else { <nl> + assert ( dbInvalid [ ' coll ' + j . toString ( ) ] . drop ( ) ) ; <nl> + } <nl> + } <nl> + assert . gt ( cursor . itcount ( ) , 0 , errMsg ) ; <nl> + } <nl> + / / Test that we invalidate namespaces for both collection drops and renames . <nl> + testNamespaceInvalidation ( false ) ; <nl> + testNamespaceInvalidation ( true ) ; <nl> + } ( ) ) ; <nl> mmm a / src / mongo / db / storage / mmap_v1 / mmap_v1_database_catalog_entry . cpp <nl> ppp b / src / mongo / db / storage / mmap_v1 / mmap_v1_database_catalog_entry . cpp <nl> Status MMAPV1DatabaseCatalogEntry : : _renameSingleNamespace ( OperationContext * txn , <nl> <nl> RecordId rid = _addNamespaceToNamespaceCollection ( txn , toNS , newSpec . isEmpty ( ) ? 0 : & newSpec ) ; <nl> <nl> + / / Invalidate old namespace record <nl> + const NamespaceString nsn ( name ( ) , " system . namespaces " ) ; <nl> + StringData dbName ( name ( ) ) ; <nl> + invariant ( txn - > lockState ( ) - > isDbLockedForMode ( dbName , MODE_X ) ) ; <nl> + Database * db = dbHolder ( ) . get ( txn , dbName ) ; <nl> + Collection * systemNamespaces = db - > getCollection ( nsn ) ; <nl> + systemNamespaces - > getCursorManager ( ) - > invalidateDocument ( <nl> + txn , oldSpecLocation , INVALIDATION_DELETION ) ; <nl> + <nl> _getNamespaceRecordStore ( ) - > deleteRecord ( txn , oldSpecLocation ) ; <nl> <nl> Entry * & entry = _collections [ toNS . toString ( ) ] ; <nl> void MMAPV1DatabaseCatalogEntry : : _removeNamespaceFromNamespaceCollection ( Operati <nl> RecordStoreV1Base * rs = _getNamespaceRecordStore ( ) ; <nl> invariant ( rs ) ; <nl> <nl> - rs - > deleteRecord ( txn , entry - > second - > catalogEntry - > getNamespacesRecordId ( ) ) ; <nl> + / / Invalidate old namespace record <nl> + RecordId oldSpecLocation = entry - > second - > catalogEntry - > getNamespacesRecordId ( ) ; <nl> + const NamespaceString nsn ( name ( ) , " system . namespaces " ) ; <nl> + StringData dbName ( name ( ) ) ; <nl> + invariant ( txn - > lockState ( ) - > isDbLockedForMode ( dbName , MODE_X ) ) ; <nl> + Database * db = dbHolder ( ) . get ( txn , dbName ) ; <nl> + Collection * systemNamespaces = db - > getCollection ( nsn ) ; <nl> + systemNamespaces - > getCursorManager ( ) - > invalidateDocument ( <nl> + txn , oldSpecLocation , INVALIDATION_DELETION ) ; <nl> + <nl> + rs - > deleteRecord ( txn , oldSpecLocation ) ; <nl> } <nl> <nl> CollectionOptions MMAPV1DatabaseCatalogEntry : : getCollectionOptions ( OperationContext * txn , <nl>
SERVER - 27996 Invalidate stale system . namespaces record IDs
mongodb/mongo
9e0bacc738f93308bf7ab020f8959619e1f98519
2017-02-14T23:05:21Z
new file mode 100755 <nl> index 00000000000 . . 2629cc776b4 <nl> mmm / dev / null <nl> ppp b / Telegram / DeployLinux . sh <nl> <nl> + AppVersionStr = 0 . 6 . 3 <nl> + AppVersion = 6003 <nl> + <nl> + if [ ! - f " . / . . / Linux / Release / deploy / $ AppVersionStr / tlinuxupd $ AppVersion " ] ; then <nl> + echo " tlinuxupd $ AppVersion not found ! " ; <nl> + exit 1 <nl> + fi <nl> + <nl> + if [ ! - f " . / . . / Linux / Release / deploy / $ AppVersionStr / tsetup . $ AppVersionStr . tar . xz " ] ; then <nl> + echo " tsetup . $ AppVersionStr . tar . xz not found ! " <nl> + exit 1 <nl> + fi <nl> + <nl> + scp . / . . / Linux / Release / deploy / $ AppVersionStr / tlinuxupd $ AppVersion tupdates : tdesktop / static / tlinux / <nl> + scp . / . . / Linux / Release / deploy / $ AppVersionStr / tsetup . $ AppVersionStr . tar . xz tupdates : tdesktop / static / tlinux / <nl> + <nl>
added linux deploy script to git
telegramdesktop/tdesktop
6377f59e23c3b9dcde036550e6ecf0702950af95
2014-10-17T13:27:05Z
mmm a / configure . in <nl> ppp b / configure . in <nl> APP_VERSION_TAG_LC = $ ( echo $ APP_VERSION_TAG | $ { AWK } ' { print tolower ( $ 0 ) } ' ) <nl> APP_VERSION_CODE = $ { APP_VERSION_CODE - $ ( $ { AWK } ' / VERSION_CODE / { print $ 2 } ' version . txt ) } <nl> APP_ADDON_API = $ { APP_ADDON_API - $ ( $ { AWK } ' / ADDON_API / { print $ 2 } ' version . txt ) } <nl> if test " $ APP_NAME " ! = " " & & test " $ APP_VERSION_MAJOR " ! = " " & & test " $ APP_VERSION_MINOR " ! = " " \ <nl> - & & test " $ APP_VERSION_TAG " ! = " " & & test " $ APP_VERSION_CODE " ! = " " & & test " $ APP_ADDON_API " ! = " " ; then <nl> - final_message = " $ final_message \ n $ { APP_NAME } Version : \ t $ { APP_VERSION_MAJOR } . $ { APP_VERSION_MINOR } - $ { APP_VERSION_TAG } " <nl> + & & test " $ APP_VERSION_CODE " ! = " " & & test " $ APP_ADDON_API " ! = " " ; then <nl> + APP_VERSION = $ { APP_VERSION_MAJOR } . $ { APP_VERSION_MINOR } <nl> + if test " $ APP_VERSION_TAG " ! = " " ; then <nl> + APP_VERSION = $ { APP_VERSION } - $ { APP_VERSION_TAG } <nl> + fi <nl> + <nl> AC_SUBST ( APP_NAME ) <nl> AC_SUBST ( APP_NAME_LC ) <nl> + AC_SUBST ( APP_VERSION ) <nl> AC_SUBST ( APP_VERSION_MAJOR ) <nl> AC_SUBST ( APP_VERSION_MINOR ) <nl> AC_SUBST ( APP_VERSION_TAG ) <nl> AC_SUBST ( APP_VERSION_TAG_LC ) <nl> AC_SUBST ( APP_VERSION_CODE ) <nl> AC_SUBST ( APP_ADDON_API ) <nl> + <nl> + final_message = " $ final_message \ n $ { APP_NAME } Version : \ t $ { APP_VERSION } " <nl> else <nl> AC_MSG_ERROR ( could not detect application Version , make sure version . txt is complete ) <nl> fi <nl> mmm a / tools / android / packaging / xbmc / AndroidManifest . xml . in <nl> ppp b / tools / android / packaging / xbmc / AndroidManifest . xml . in <nl> <nl> < manifest xmlns : android = " http : / / schemas . android . com / apk / res / android " <nl> package = " org . xbmc . @ APP_NAME_LC @ " <nl> android : versionCode = " @ APP_VERSION_CODE @ " <nl> - android : versionName = " @ APP_VERSION_MAJOR @ . @ APP_VERSION_MINOR @ - @ APP_VERSION_TAG @ " > <nl> + android : versionName = " @ APP_VERSION @ " > <nl> <nl> < ! - - This is the platform API where NativeActivity was introduced . - - > <nl> < uses - sdk android : minSdkVersion = " 14 " / > <nl> mmm a / tools / darwin / packaging / atv2 / mkdeb - atv2 . sh . in <nl> ppp b / tools / darwin / packaging / atv2 / mkdeb - atv2 . sh . in <nl> fi <nl> PACKAGE = org . xbmc . @ APP_NAME_LC @ - atv2 <nl> <nl> VERSION = @ APP_VERSION_MAJOR @ . @ APP_VERSION_MINOR @ <nl> - REVISION = 0 ~ @ APP_VERSION_TAG_LC @ <nl> + REVISION = 0 <nl> + <nl> + if [ " @ APP_VERSION_TAG_LC @ " ! = " " ] ; then <nl> + REVISION = $ REVISION ~ @ APP_VERSION_TAG_LC @ <nl> + fi <nl> + <nl> ARCHIVE = $ { PACKAGE } _ $ { VERSION } - $ { REVISION } _iphoneos - arm . deb <nl> SIZE = " $ ( du - s - k $ { APP } | awk ' { print $ 1 } ' ) " <nl> <nl> mmm a / tools / darwin / packaging / ios / mkdeb - ios . sh . in <nl> ppp b / tools / darwin / packaging / ios / mkdeb - ios . sh . in <nl> fi <nl> PACKAGE = org . xbmc . @ APP_NAME_LC @ - ios <nl> <nl> VERSION = @ APP_VERSION_MAJOR @ . @ APP_VERSION_MINOR @ <nl> - REVISION = 0 ~ @ APP_VERSION_TAG_LC @ <nl> + REVISION = 0 <nl> + <nl> + if [ " @ APP_VERSION_TAG_LC @ " ! = " " ] ; then <nl> + REVISION = $ REVISION ~ @ APP_VERSION_TAG_LC @ <nl> + fi <nl> + <nl> ARCHIVE = $ { PACKAGE } _ $ { VERSION } - $ { REVISION } _iphoneos - arm . deb <nl> SIZE = " $ ( du - s - k $ { APP } | awk ' { print $ 1 } ' ) " <nl> <nl> mmm a / tools / darwin / packaging / osx / mkdmg - osx . sh . in <nl> ppp b / tools / darwin / packaging / osx / mkdmg - osx . sh . in <nl> ARCHITECTURE = ` file $ APP / Contents / MacOS / @ APP_NAME @ | awk ' { print $ NF } ' ` <nl> PACKAGE = org . xbmc . @ APP_NAME_LC @ - osx <nl> <nl> VERSION = @ APP_VERSION_MAJOR @ . @ APP_VERSION_MINOR @ <nl> - REVISION = 0 ~ @ APP_VERSION_TAG_LC @ <nl> + REVISION = 0 <nl> + <nl> + if [ " @ APP_VERSION_TAG_LC @ " ! = " " ] ; then <nl> + REVISION = $ REVISION ~ @ APP_VERSION_TAG_LC @ <nl> + fi <nl> + <nl> ARCHIVE = $ { PACKAGE } _ $ { VERSION } - $ { REVISION } _macosx - intel - $ { ARCHITECTURE } <nl> <nl> echo Creating $ PACKAGE package version $ VERSION revision $ REVISION <nl> mmm a / tools / windows / CompileInfo . bat <nl> ppp b / tools / windows / CompileInfo . bat <nl> FOR / f % % i IN ( ' % msys_bin_dir % \ awk . exe " / VERSION_MINOR / { print $ 2 } " % base_dir % \ v <nl> FOR / f % % i IN ( ' % msys_bin_dir % \ awk . exe " / VERSION_TAG / { print $ 2 } " % base_dir % \ version . txt ' ) DO SET tag = % % i <nl> FOR / f % % i IN ( ' % msys_bin_dir % \ awk . exe " / ADDON_API / { print $ 2 } " % base_dir % \ version . txt ' ) DO SET addon_api = % % i <nl> <nl> + SET app_version = % major % . % minor % <nl> + IF NOT [ % tag % ] = = [ ] ( <nl> + SET app_version = % app_version % - % tag % <nl> + ) <nl> + <nl> REM XBMC_PC . rc . in requires a comma - separated version of addon_api <nl> SET separator = , <nl> CALL SET file_version = % % addon_api : . = % separator % % % % separator % 0 <nl> CALL SET file_version = % % addon_api : . = % separator % % % % separator % 0 <nl> REM create the files with the proper version information <nl> " % msys_bin_dir % \ sed . exe " - e s / @ APP_NAME @ / % app_name % / g - e s / @ APP_VERSION_MAJOR @ / % major % / g - e s / @ APP_VERSION_MINOR @ / % minor % / g - e s / @ APP_VERSION_TAG @ / % tag % / g " % base_dir % \ xbmc \ CompileInfo . cpp . in " > " % base_dir % \ xbmc \ CompileInfo . cpp " <nl> " % msys_bin_dir % \ sed . exe " s / @ APP_ADDON_API @ / % addon_api % / g " % base_dir % \ addons \ xbmc . addon \ addon . xml . in " > " % base_dir % \ addons \ xbmc . addon \ addon . xml " <nl> - " % msys_bin_dir % \ sed . exe " - e s / @ APP_NAME @ / % app_name % / g - e s / @ COMPANY_NAME @ / % company_name % / g - e s / @ APP_VERSION_MAJOR @ / % major % / g - e s / @ APP_VERSION_MINOR @ / % minor % / g - e s / @ APP_VERSION_TAG @ / % tag % / g - e s / @ FILE_VERSION @ / % file_version % / g " % base_dir % \ xbmc \ win32 \ XBMC_PC . rc . in " > " % base_dir % \ xbmc \ win32 \ XBMC_PC . rc " <nl> + " % msys_bin_dir % \ sed . exe " - e s / @ APP_NAME @ / % app_name % / g - e s / @ COMPANY_NAME @ / % company_name % / g - e s / @ APP_VERSION_MAJOR @ / % major % / g - e s / @ APP_VERSION_MINOR @ / % minor % / g - e s / @ APP_VERSION_TAG @ / % tag % / g - e s / @ FILE_VERSION @ / % file_version % / g - e s / @ APP_VERSION @ / % app_version % / g " % base_dir % \ xbmc \ win32 \ XBMC_PC . rc . in " > " % base_dir % \ xbmc \ win32 \ XBMC_PC . rc " <nl> <nl> mmm a / xbmc / win32 / XBMC_PC . rc . in <nl> ppp b / xbmc / win32 / XBMC_PC . rc . in <nl> BEGIN <nl> BEGIN <nl> VALUE " CompanyName " , " @ COMPANY_NAME @ " <nl> VALUE " FileDescription " , " @ APP_NAME @ " <nl> - VALUE " FileVersion " , " @ APP_VERSION_MAJOR @ . @ APP_VERSION_MINOR @ - @ APP_VERSION_TAG @ " <nl> + VALUE " FileVersion " , " @ APP_VERSION @ " <nl> VALUE " InternalName " , " @ APP_NAME @ . exe " <nl> VALUE " LegalCopyright " , " Copyright ( c ) @ COMPANY_NAME @ . All rights reserved . " <nl> VALUE " OriginalFilename " , " @ APP_NAME @ . exe " <nl> VALUE " ProductName " , " @ APP_NAME @ for Windows " <nl> - VALUE " ProductVersion " , " @ APP_VERSION_MAJOR @ . @ APP_VERSION_MINOR @ - @ APP_VERSION_TAG @ " <nl> + VALUE " ProductVersion " , " @ APP_VERSION @ " <nl> END <nl> END <nl> BLOCK " VarFileInfo " <nl>
Made versiontag optional
xbmc/xbmc
c8a2a6bbb711ada6ef2c406971c1f9c57b129467
2014-12-22T20:03:34Z
mmm a / core / os / file_access . cpp <nl> ppp b / core / os / file_access . cpp <nl> Vector < String > FileAccess : : get_csv_line ( const String & p_delim ) const { <nl> String l ; <nl> int qc = 0 ; <nl> do { <nl> - ERR_FAIL_COND_V ( eof_reached ( ) , Vector < String > ( ) ) ; <nl> + if ( eof_reached ( ) ) <nl> + break ; <nl> <nl> l + = get_line ( ) + " \ n " ; <nl> qc = 0 ; <nl>
Removed error message arriving whenever csv file changed
godotengine/godot
70c3270dfa1f5d237e57454b10fde1ea60884fd2
2018-12-16T04:09:33Z
mmm a / Common / Include / MPIWrapper . h <nl> ppp b / Common / Include / MPIWrapper . h <nl> namespace Microsoft { namespace MSR { namespace CNTK { <nl> # endif <nl> } <nl> <nl> + / / Workaround for the issue with MPI hanging when we have non - 0 exit codes from CNTK processes <nl> + / / OpenMPI has a confirmed race condition on killing child process vs . handling their non - zero exit statuses , resulting <nl> + / / in a deadlock , where all processes killed but MPI is still waiting . <nl> + / / This happens when several perfectly synchronized processes ( for example on MPI barrier ) <nl> + / / simulatenously exit with non - 0 exit code . <nl> + / / As a workaround , we simply sleep 50 * rank miliseconds , effectively " de - synchronizing processes " at exit , <nl> + / / allowing MPI to sequentially handle terminations <nl> + static int s_myRank ; <nl> + static void MPIWorkaroundAtExit ( ) <nl> + { <nl> + / / Note : we can ' t use g_mpi , since MPI stack is already down at this point <nl> + Sleep ( s_myRank * 50 ) ; <nl> + } <nl> + <nl> public : <nl> MPIWrapper ( ) <nl> : m_currentComm ( MPI_COMM_WORLD ) <nl> namespace Microsoft { namespace MSR { namespace CNTK { <nl> MPI_Comm_size ( MPI_COMM_WORLD , & m_numMPINodes ) ; <nl> m_numNodesInUse = m_numMPINodes ; <nl> <nl> + / / Applying MPI workaround <nl> + s_myRank = m_myRank ; <nl> + atexit ( & MPIWrapper : : MPIWorkaroundAtExit ) ; <nl> + <nl> / / by default we use all of them <nl> RequestNodes ( " MPIWrapper " ) ; <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 0762e08eba5 <nl> mmm / dev / null <nl> ppp b / Common / MPIWrapper . cpp <nl> <nl> + # include " Include / Basics . h " <nl> + # include " Include / MPIWrapper . h " <nl> + <nl> + int Microsoft : : MSR : : CNTK : : MPIWrapper : : s_myRank = - 1 ; <nl> mmm a / MachineLearning / CNTK / CNTK . vcxproj <nl> ppp b / MachineLearning / CNTK / CNTK . vcxproj <nl> <nl> < PrecompiledHeader Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | x64 ' " > < / PrecompiledHeader > <nl> < / ClCompile > <nl> < ClCompile Include = " . . \ . . \ Common \ TimerUtility . cpp " / > <nl> + < ClCompile Include = " . . \ . . \ Common \ MPIWrapper . cpp " / > <nl> < ClCompile Include = " CNTK . cpp " / > <nl> < ClCompile Include = " ExperimentalNetworkBuilder . cpp " / > <nl> < ClCompile Include = " ModelEditLanguage . cpp " / > <nl> <nl> < / ItemGroup > <nl> < Import Project = " $ ( VCTargetsPath ) \ Microsoft . Cpp . targets " / > <nl> < ImportGroup Label = " ExtensionTargets " / > <nl> - < / Project > <nl> \ No newline at end of file <nl> + < / Project > <nl> mmm a / Makefile <nl> ppp b / Makefile <nl> CNTK_SRC = \ <nl> BrainScript / BrainScriptParser . cpp \ <nl> BrainScript / BrainScriptTest . cpp \ <nl> MachineLearning / CNTK / ExperimentalNetworkBuilder . cpp \ <nl> + Common / MPIWrapper . cpp \ <nl> <nl> <nl> ifdef CUDA_PATH <nl>
Applying workaround for race condition in OpenMPI on non - 0 exit code from CNTK
microsoft/CNTK
f2db77795e9ce5464f4742cb7e7915105e806016
2015-10-30T23:16:09Z
mmm a / lib / Index / Index . cpp <nl> ppp b / lib / Index / Index . cpp <nl> static NominalTypeDecl * getNominalParent ( ValueDecl * D ) { <nl> return Ty - > getAnyNominal ( ) ; <nl> } <nl> <nl> - / / / \ returns true if \ c D is a subclass of ' XCTestCase ' . <nl> - static bool isUnitTestCase ( const ClassDecl * D ) { <nl> - if ( ! D ) <nl> - return false ; <nl> - while ( auto * SuperD = D - > getSuperclassDecl ( ) ) { <nl> - if ( SuperD - > getNameStr ( ) = = " XCTestCase " ) <nl> - return true ; <nl> - D = SuperD ; <nl> - } <nl> - return false ; <nl> - } <nl> - <nl> - static bool isUnitTest ( ValueDecl * D ) { <nl> - if ( ! D - > hasName ( ) ) <nl> - return false ; <nl> - <nl> - / / A ' test candidate ' is : <nl> - / / 1 . An instance method . . . <nl> - auto FD = dyn_cast < FuncDecl > ( D ) ; <nl> - if ( ! FD ) <nl> - return false ; <nl> - if ( ! D - > isInstanceMember ( ) ) <nl> - return false ; <nl> - <nl> - / / 2 . . . . on a class or extension ( not a struct ) subclass of XCTestCase . . . <nl> - auto parentNTD = getNominalParent ( D ) ; <nl> - if ( ! parentNTD ) <nl> - return false ; <nl> - if ( ! isa < ClassDecl > ( parentNTD ) ) <nl> - return false ; <nl> - if ( ! isUnitTestCase ( cast < ClassDecl > ( parentNTD ) ) ) <nl> - return false ; <nl> - <nl> - / / 3 . . . . that returns void . . . <nl> - Type RetTy = FD - > getResultInterfaceType ( ) ; <nl> - if ( RetTy & & ! RetTy - > isVoid ( ) ) <nl> - return false ; <nl> - <nl> - / / 4 . . . . takes no parameters . . . <nl> - if ( FD - > getParameterLists ( ) . size ( ) ! = 2 ) <nl> - return false ; <nl> - if ( FD - > getParameterList ( 1 ) - > size ( ) ! = 0 ) <nl> - return false ; <nl> - <nl> - / / 5 . . . . is of at least ' internal ' accessibility ( unless we can use <nl> - / / Objective - C reflection ) . . . <nl> - if ( ! D - > getASTContext ( ) . LangOpts . EnableObjCInterop & & <nl> - ( D - > getFormalAccess ( ) < Accessibility : : Internal | | <nl> - parentNTD - > getFormalAccess ( ) < Accessibility : : Internal ) ) <nl> - return false ; <nl> - <nl> - / / 6 . . . . and starts with " test " . <nl> - if ( FD - > getName ( ) . str ( ) . startswith ( " test " ) ) <nl> - return true ; <nl> - <nl> - return false ; <nl> - } <nl> - <nl> bool IndexSwiftASTWalker : : initFuncDeclIndexSymbol ( FuncDecl * D , <nl> IndexSymbol & Info ) { <nl> if ( initIndexSymbol ( D , D - > getLoc ( ) , / * IsRef = * / false , Info ) ) <nl> return true ; <nl> <nl> - if ( isUnitTest ( D ) ) <nl> - Info . symInfo . Properties | = SymbolProperty : : UnitTest ; <nl> - <nl> if ( D - > getAttrs ( ) . hasAttribute < IBActionAttr > ( ) ) { <nl> / / Relate with type of the first parameter using RelationIBTypeOf . <nl> if ( D - > getParameterLists ( ) . size ( ) > = 2 ) { <nl> mmm a / lib / Index / IndexSymbol . cpp <nl> ppp b / lib / Index / IndexSymbol . cpp <nl> <nl> using namespace swift ; <nl> using namespace swift : : index ; <nl> <nl> + static NominalTypeDecl * getNominalParent ( const ValueDecl * D ) { <nl> + Type Ty = D - > getDeclContext ( ) - > getDeclaredTypeOfContext ( ) ; <nl> + if ( ! Ty ) <nl> + return nullptr ; <nl> + return Ty - > getAnyNominal ( ) ; <nl> + } <nl> + <nl> + / / / \ returns true if \ c D is a subclass of ' XCTestCase ' . <nl> + static bool isUnitTestCase ( const ClassDecl * D ) { <nl> + if ( ! D ) <nl> + return false ; <nl> + while ( auto * SuperD = D - > getSuperclassDecl ( ) ) { <nl> + if ( SuperD - > getNameStr ( ) = = " XCTestCase " ) <nl> + return true ; <nl> + D = SuperD ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + static bool isUnitTest ( const ValueDecl * D ) { <nl> + if ( ! D - > hasName ( ) ) <nl> + return false ; <nl> + <nl> + / / A ' test candidate ' is : <nl> + / / 1 . An instance method . . . <nl> + auto FD = dyn_cast < FuncDecl > ( D ) ; <nl> + if ( ! FD ) <nl> + return false ; <nl> + if ( ! D - > isInstanceMember ( ) ) <nl> + return false ; <nl> + <nl> + / / 2 . . . . on a class or extension ( not a struct ) subclass of XCTestCase . . . <nl> + auto parentNTD = getNominalParent ( D ) ; <nl> + if ( ! parentNTD ) <nl> + return false ; <nl> + if ( ! isa < ClassDecl > ( parentNTD ) ) <nl> + return false ; <nl> + if ( ! isUnitTestCase ( cast < ClassDecl > ( parentNTD ) ) ) <nl> + return false ; <nl> + <nl> + / / 3 . . . . that returns void . . . <nl> + Type RetTy = FD - > getResultInterfaceType ( ) ; <nl> + if ( RetTy & & ! RetTy - > isVoid ( ) ) <nl> + return false ; <nl> + <nl> + / / 4 . . . . takes no parameters . . . <nl> + if ( FD - > getParameterLists ( ) . size ( ) ! = 2 ) <nl> + return false ; <nl> + if ( FD - > getParameterList ( 1 ) - > size ( ) ! = 0 ) <nl> + return false ; <nl> + <nl> + / / 5 . . . . is of at least ' internal ' accessibility ( unless we can use <nl> + / / Objective - C reflection ) . . . <nl> + if ( ! D - > getASTContext ( ) . LangOpts . EnableObjCInterop & & <nl> + ( D - > getFormalAccess ( ) < Accessibility : : Internal | | <nl> + parentNTD - > getFormalAccess ( ) < Accessibility : : Internal ) ) <nl> + return false ; <nl> + <nl> + / / 6 . . . . and starts with " test " . <nl> + if ( FD - > getName ( ) . str ( ) . startswith ( " test " ) ) <nl> + return true ; <nl> + <nl> + return false ; <nl> + } <nl> + <nl> static void setFuncSymbolInfo ( const FuncDecl * FD , SymbolInfo & sym ) { <nl> sym . Kind = SymbolKind : : Function ; <nl> <nl> if ( FD - > getAttrs ( ) . hasAttribute < IBActionAttr > ( ) ) <nl> sym . Properties | = SymbolProperty : : IBAnnotated ; <nl> <nl> + if ( isUnitTest ( FD ) ) <nl> + sym . Properties | = SymbolProperty : : UnitTest ; <nl> + <nl> if ( FD - > getDeclContext ( ) - > isTypeContext ( ) ) { <nl> if ( FD - > isStatic ( ) ) { <nl> if ( FD - > getCorrectStaticSpelling ( ) = = StaticSpellingKind : : KeywordClass ) <nl> SymbolInfo index : : getSymbolInfoForDecl ( const Decl * D ) { <nl> switch ( D - > getKind ( ) ) { <nl> case DeclKind : : Enum : info . Kind = SymbolKind : : Enum ; break ; <nl> case DeclKind : : Struct : info . Kind = SymbolKind : : Struct ; break ; <nl> - case DeclKind : : Class : info . Kind = SymbolKind : : Class ; break ; <nl> case DeclKind : : Protocol : info . Kind = SymbolKind : : Protocol ; break ; <nl> + case DeclKind : : Class : <nl> + info . Kind = SymbolKind : : Class ; <nl> + if ( isUnitTestCase ( cast < ClassDecl > ( D ) ) ) <nl> + info . Properties | = SymbolProperty : : UnitTest ; <nl> + break ; <nl> case DeclKind : : Extension : { <nl> info . Kind = SymbolKind : : Extension ; <nl> auto * ED = cast < ExtensionDecl > ( D ) ; <nl> SymbolInfo index : : getSymbolInfoForDecl ( const Decl * D ) { <nl> break ; <nl> if ( isa < StructDecl > ( NTD ) ) <nl> info . SubKind = SymbolSubKind : : SwiftExtensionOfStruct ; <nl> - else if ( isa < ClassDecl > ( NTD ) ) <nl> + else if ( auto * CD = dyn_cast < ClassDecl > ( NTD ) ) { <nl> info . SubKind = SymbolSubKind : : SwiftExtensionOfClass ; <nl> - else if ( isa < EnumDecl > ( NTD ) ) <nl> + if ( isUnitTestCase ( CD ) ) <nl> + info . Properties | = SymbolProperty : : UnitTest ; <nl> + } else if ( isa < EnumDecl > ( NTD ) ) <nl> info . SubKind = SymbolSubKind : : SwiftExtensionOfEnum ; <nl> else if ( isa < ProtocolDecl > ( NTD ) ) <nl> info . SubKind = SymbolSubKind : : SwiftExtensionOfProtocol ; <nl> mmm a / test / Index / index_module . swift <nl> ppp b / test / Index / index_module . swift <nl> <nl> public var someGlobal : Int = 0 <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 12 | variable / Swift | someGlobal | [ [ SOMEGLOBAL_USR : . * ] ] | Def | rel : 0 <nl> / / CHECK : [ [ @ LINE - 2 ] ] : 12 | function / acc - get / Swift | getter : someGlobal | [ [ SOMEGLOBAL_GET_USR : . * ] ] | Def , Impl , RelAcc | rel : 1 <nl> - / / CHECK - NEXT : RelAcc | someGlobal | [ [ SOMEGLOBAL_USR ] ] <nl> + / / CHECK - NEXT : RelAcc | variable / Swift | someGlobal | [ [ SOMEGLOBAL_USR ] ] <nl> / / CHECK : [ [ @ LINE - 4 ] ] : 12 | function / acc - set / Swift | setter : someGlobal | [ [ SOMEGLOBAL_SET_USR : . * ] ] | Def , Impl , RelAcc | rel : 1 <nl> - / / CHECK - NEXT : RelAcc | someGlobal | [ [ SOMEGLOBAL_USR ] ] <nl> + / / CHECK - NEXT : RelAcc | variable / Swift | someGlobal | [ [ SOMEGLOBAL_USR ] ] <nl> <nl> public func someFunc ( ) { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 13 | function / Swift | someFunc ( ) | [ [ SOMEFUNC_USR : . * ] ] | Def | rel : 0 <nl> public func someFunc ( ) { } <nl> <nl> / / CHECK : 0 : 0 | variable / Swift | someGlobal | [ [ SOMEGLOBAL_USR ] ] | Def | rel : 0 <nl> / / CHECK : 0 : 0 | function / acc - get / Swift | getter : someGlobal | [ [ SOMEGLOBAL_GET_USR : . * ] ] | Def , Impl , RelAcc | rel : 1 <nl> - / / CHECK - NEXT : RelAcc | someGlobal | [ [ SOMEGLOBAL_USR ] ] <nl> + / / CHECK - NEXT : RelAcc | variable / Swift | someGlobal | [ [ SOMEGLOBAL_USR ] ] <nl> / / CHECK : 0 : 0 | function / acc - set / Swift | setter : someGlobal | [ [ SOMEGLOBAL_SET_USR : . * ] ] | Def , Impl , RelAcc | rel : 1 <nl> - / / CHECK - NEXT : RelAcc | someGlobal | [ [ SOMEGLOBAL_USR ] ] <nl> + / / CHECK - NEXT : RelAcc | variable / Swift | someGlobal | [ [ SOMEGLOBAL_USR ] ] <nl> <nl> / / CHECK : 0 : 0 | function / Swift | someFunc ( ) | [ [ SOMEFUNC_USR ] ] | Def | rel : 0 <nl> mmm a / test / Index / index_system_module . swift <nl> ppp b / test / Index / index_system_module . swift <nl> <nl> <nl> / / CHECK : class / Swift | SubCls | [ [ SUBCLS_USR : . * ] ] | Def | rel : 0 <nl> / / CHECK : class / Swift | BaseCls | [ [ BASECLS_USR : . * ] ] | Ref , RelBase | rel : 1 <nl> - / / CHECK - NEXT : RelBase | SubCls | [ [ SUBCLS_USR ] ] <nl> + / / CHECK - NEXT : RelBase | class / Swift | SubCls | [ [ SUBCLS_USR ] ] <nl> / / CHECK : instance - method / Swift | theMeth ( ) | [ [ SUBCLSMETH_USR : . * ] ] | Def , RelChild , RelOver | rel : 2 <nl> - / / CHECK - NEXT : RelOver | theMeth ( ) | [ [ BASECLSMETH_USR : . * ] ] <nl> - / / CHECK - NEXT : RelChild | SubCls | [ [ SUBCLS_USR ] ] <nl> + / / CHECK - NEXT : RelOver | instance - method / Swift | theMeth ( ) | [ [ BASECLSMETH_USR : . * ] ] <nl> + / / CHECK - NEXT : RelChild | class / Swift | SubCls | [ [ SUBCLS_USR ] ] <nl> / / CHECK : class / Swift | BaseCls | [ [ BASECLS_USR ] ] | Def | rel : 0 <nl> / / CHECK : instance - method / Swift | theMeth ( ) | [ [ BASECLSMETH_USR ] ] | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | BaseCls | [ [ BASECLS_USR ] ] <nl> + / / CHECK - NEXT : RelChild | class / Swift | BaseCls | [ [ BASECLS_USR ] ] <nl> / / CHECK : function / Swift | some_func ( ) | [ [ SOMEFUNC_USR : . * ] ] | Def | rel : 0 <nl> mmm a / test / Index / kinds . swift <nl> ppp b / test / Index / kinds . swift <nl> enum AnEnumeration { <nl> / / EnumElement <nl> case Element <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 8 | enumerator / Swift | Element | s : 14swift_ide_test13AnEnumerationO7ElementA2CmF | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AnEnumeration | s : 14swift_ide_test13AnEnumerationO <nl> + / / CHECK - NEXT : RelChild | enum / Swift | AnEnumeration | s : 14swift_ide_test13AnEnumerationO <nl> } <nl> <nl> / / Struct <nl> struct AStruct { <nl> / / Subscript <nl> subscript ( index : Int ) - > Int { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 3 | instance - property / subscript / Swift | subscript ( _ : ) | s : 14swift_ide_test7AStructV9subscriptS2ici | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AStruct | s : 14swift_ide_test7AStructV <nl> + / / CHECK - NEXT : RelChild | struct / Swift | AStruct | s : 14swift_ide_test7AStructV <nl> <nl> / / Accessor + AccessorAddressor <nl> unsafeAddress { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 5 | instance - method / acc - addr / Swift | | s : 14swift_ide_test7AStructV9subscriptS2icflu | Def , RelChild , RelAcc | rel : 1 <nl> - / / CHECK - NEXT : RelChild , RelAcc | subscript ( _ : ) | s : 14swift_ide_test7AStructV9subscriptS2ici <nl> + / / CHECK - NEXT : RelChild , RelAcc | instance - property / subscript / Swift | subscript ( _ : ) | s : 14swift_ide_test7AStructV9subscriptS2ici <nl> <nl> return UnsafePointer ( base ) <nl> } <nl> struct AStruct { <nl> / / Accessor + AccessorMutableAddressor <nl> unsafeMutableAddress { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 5 | instance - method / acc - mutaddr / Swift | | s : 14swift_ide_test7AStructV9subscriptS2icfau | Def , RelChild , RelAcc | rel : 1 <nl> - / / CHECK - NEXT : RelChild , RelAcc | subscript ( _ : ) | s : 14swift_ide_test7AStructV9subscriptS2ici <nl> + / / CHECK - NEXT : RelChild , RelAcc | instance - property / subscript / Swift | subscript ( _ : ) | s : 14swift_ide_test7AStructV9subscriptS2ici <nl> <nl> return base <nl> } <nl> class AClass { <nl> / / InstanceMethod + Parameters <nl> func instanceMethod ( a : Int , b b : Int , _ c : Int , d _ : Int , _ : Int ) { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 8 | instance - method / Swift | instanceMethod ( a : b : _ : d : _ : ) | s : 14swift_ide_test6AClassC14instanceMethodySi1a_Si1bS2i1dSitF | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AClass | s : 14swift_ide_test6AClassC <nl> + / / CHECK - NEXT : RelChild | class / Swift | AClass | s : 14swift_ide_test6AClassC <nl> / / CHECK : [ [ @ LINE - 3 ] ] : 23 | param / Swift | a | s : 14swift_ide_test6AClassC14instanceMethodySi1a_Si1bS2i1dSitFAEL_Siv | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | instanceMethod ( a : b : _ : d : _ : ) | s : 14swift_ide_test6AClassC14instanceMethodySi1a_Si1bS2i1dSitF <nl> + / / CHECK - NEXT : RelChild | instance - method / Swift | instanceMethod ( a : b : _ : d : _ : ) | s : 14swift_ide_test6AClassC14instanceMethodySi1a_Si1bS2i1dSitF <nl> / / CHECK - NOT : [ [ @ LINE - 5 ] ] : 33 | param / Swift | b | s : { { . * } } | Def , RelChild | rel : 1 <nl> / / CHECK - NOT : [ [ @ LINE - 6 ] ] : 43 | param / Swift | c | s : { { . * } } | Def , RelChild | rel : 1 <nl> / / CHECK - NOT : [ [ @ LINE - 7 ] ] : 53 | param / Swift | d | s : { { . * } } | Def , RelChild | rel : 1 <nl> class AClass { <nl> / / ClassMethod <nl> class func classMethod ( ) { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 14 | class - method / Swift | classMethod ( ) | s : 14swift_ide_test6AClassC11classMethodyyFZ | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AClass | s : 14swift_ide_test6AClassC <nl> + / / CHECK - NEXT : RelChild | class / Swift | AClass | s : 14swift_ide_test6AClassC <nl> <nl> / / StaticMethod <nl> static func staticMethod ( ) { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 15 | static - method / Swift | staticMethod ( ) | s : 14swift_ide_test6AClassC12staticMethodyyFZ | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AClass | s : 14swift_ide_test6AClassC <nl> + / / CHECK - NEXT : RelChild | class / Swift | AClass | s : 14swift_ide_test6AClassC <nl> <nl> / / InstanceProperty <nl> var instanceProperty : Int { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 7 | instance - property / Swift | instanceProperty | s : 14swift_ide_test6AClassC16instancePropertySiv | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AClass | s : 14swift_ide_test6AClassC <nl> + / / CHECK - NEXT : RelChild | class / Swift | AClass | s : 14swift_ide_test6AClassC <nl> <nl> / / Accessor + AccessorGetter <nl> get { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 5 | instance - method / acc - get / Swift | getter : instanceProperty | s : 14swift_ide_test6AClassC16instancePropertySifg | Def , RelChild , RelAcc | rel : 1 <nl> - / / CHECK - NEXT : RelChild , RelAcc | instanceProperty | s : 14swift_ide_test6AClassC16instancePropertySiv <nl> + / / CHECK - NEXT : RelChild , RelAcc | instance - property / Swift | instanceProperty | s : 14swift_ide_test6AClassC16instancePropertySiv <nl> <nl> return 1 <nl> } <nl> class AClass { <nl> / / Accessor + AccessorSetter <nl> set { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 5 | instance - method / acc - set / Swift | setter : instanceProperty | s : 14swift_ide_test6AClassC16instancePropertySifs | Def , RelChild , RelAcc | rel : 1 <nl> - / / CHECK - NEXT : RelChild , RelAcc | instanceProperty | s : 14swift_ide_test6AClassC16instancePropertySiv <nl> + / / CHECK - NEXT : RelChild , RelAcc | instance - property / Swift | instanceProperty | s : 14swift_ide_test6AClassC16instancePropertySiv <nl> } <nl> <nl> var observed = 0 { <nl> class AClass { <nl> / / Accessor + AccessorWillSet <nl> willSet { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 5 | instance - method / acc - willset / Swift | willSet : observed | s : 14swift_ide_test6AClassC8observedSifw | Def , RelChild , RelAcc | rel : 1 <nl> - / / CHECK - NEXT : RelChild , RelAcc | observed | s : 14swift_ide_test6AClassC8observedSiv <nl> + / / CHECK - NEXT : RelChild , RelAcc | instance - property / Swift | observed | s : 14swift_ide_test6AClassC8observedSiv <nl> <nl> / / Accessor + AccessorDidSet <nl> didSet { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 5 | instance - method / acc - didset / Swift | didSet : observed | s : 14swift_ide_test6AClassC8observedSifW | Def , RelChild , RelAcc | rel : 1 <nl> - / / CHECK - NEXT : RelChild , RelAcc | observed | s : 14swift_ide_test6AClassC8observedSiv <nl> + / / CHECK - NEXT : RelChild , RelAcc | instance - property / Swift | observed | s : 14swift_ide_test6AClassC8observedSiv <nl> } <nl> <nl> / / ClassProperty <nl> class let classProperty = 1 <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 13 | class - property / Swift | classProperty | s : 14swift_ide_test6AClassC13classPropertySivZ | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AClass | s : 14swift_ide_test6AClassC <nl> + / / CHECK - NEXT : RelChild | class / Swift | AClass | s : 14swift_ide_test6AClassC <nl> <nl> / / StaticProperty <nl> static let staticProperty = 1 <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 14 | static - property / Swift | staticProperty | s : 14swift_ide_test6AClassC14staticPropertySivZ | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AClass | s : 14swift_ide_test6AClassC <nl> + / / CHECK - NEXT : RelChild | class / Swift | AClass | s : 14swift_ide_test6AClassC <nl> <nl> / / Constructor <nl> init ( ) { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 3 | constructor / Swift | init ( ) | s : 14swift_ide_test6AClassCACycfc | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AClass | s : 14swift_ide_test6AClassC <nl> + / / CHECK - NEXT : RelChild | class / Swift | AClass | s : 14swift_ide_test6AClassC <nl> <nl> / / Destructor <nl> deinit { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 3 | destructor / Swift | deinit | s : 14swift_ide_test6AClassCfd | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AClass | s : 14swift_ide_test6AClassC <nl> + / / CHECK - NEXT : RelChild | class / Swift | AClass | s : 14swift_ide_test6AClassC <nl> } <nl> <nl> / / Protocol <nl> protocol AProtocol { <nl> / / AssociatedType <nl> associatedtype T <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 18 | type - alias / associated - type / Swift | T | s : 14swift_ide_test9AProtocolP1T | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AProtocol | s : 14swift_ide_test9AProtocolP <nl> + / / CHECK - NEXT : RelChild | protocol / Swift | AProtocol | s : 14swift_ide_test9AProtocolP <nl> } <nl> <nl> / / Extension <nl> extension AnEnumeration { func extFn ( ) { } } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 11 | extension / ext - enum / Swift | AnEnumeration | [ [ EXT_AnEnumeration_USR : s : e : s : 14swift_ide_test13AnEnumerationO5extFnyyF ] ] | Def | rel : 0 <nl> / / CHECK : [ [ @ LINE - 2 ] ] : 11 | enum / Swift | AnEnumeration | s : 14swift_ide_test13AnEnumerationO | Ref , RelExt | rel : 1 <nl> - / / CHECK - NEXT : RelExt | AnEnumeration | [ [ EXT_AnEnumeration_USR ] ] <nl> + / / CHECK - NEXT : RelExt | extension / ext - enum / Swift | AnEnumeration | [ [ EXT_AnEnumeration_USR ] ] <nl> <nl> extension AStruct { func extFn ( ) { } } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 11 | extension / ext - struct / Swift | AStruct | [ [ EXT_AStruct_USR : s : e : s : 14swift_ide_test7AStructV5extFnyyF ] ] | Def | rel : 0 <nl> / / CHECK : [ [ @ LINE - 2 ] ] : 11 | struct / Swift | AStruct | s : 14swift_ide_test7AStructV | Ref , RelExt | rel : 1 <nl> - / / CHECK - NEXT : RelExt | AStruct | [ [ EXT_AStruct_USR ] ] <nl> + / / CHECK - NEXT : RelExt | extension / ext - struct / Swift | AStruct | [ [ EXT_AStruct_USR ] ] <nl> <nl> extension AClass { func extFn ( ) { } } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 11 | extension / ext - class / Swift | AClass | [ [ EXT_AClass_USR : s : e : s : 14swift_ide_test6AClassC5extFnyyF ] ] | Def | rel : 0 <nl> / / CHECK : [ [ @ LINE - 2 ] ] : 11 | class / Swift | AClass | s : 14swift_ide_test6AClassC | Ref , RelExt | rel : 1 <nl> - / / CHECK - NEXT : RelExt | AClass | [ [ EXT_AClass_USR ] ] <nl> + / / CHECK - NEXT : RelExt | extension / ext - class / Swift | AClass | [ [ EXT_AClass_USR ] ] <nl> <nl> extension AProtocol { func extFn ( ) } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 11 | extension / ext - protocol / Swift | AProtocol | [ [ EXT_AProtocol_USR : s : e : s : 14swift_ide_test9AProtocolPAAE5extFnyyF ] ] | Def | rel : 0 <nl> / / CHECK : [ [ @ LINE - 2 ] ] : 11 | protocol / Swift | AProtocol | s : 14swift_ide_test9AProtocolP | Ref , RelExt | rel : 1 <nl> - / / CHECK - NEXT : RelExt | AProtocol | [ [ EXT_AProtocol_USR ] ] <nl> + / / CHECK - NEXT : RelExt | extension / ext - protocol / Swift | AProtocol | [ [ EXT_AProtocol_USR ] ] <nl> <nl> / / TypeAlias <nl> typealias SomeAlias = AStruct <nl> typealias SomeAlias = AStruct <nl> / / GenericTypeParam <nl> struct GenericStruct < ATypeParam > { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 22 | type - alias / generic - type - param / Swift | ATypeParam | s : 14swift_ide_test13GenericStructV10ATypeParamxmfp | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | GenericStruct | s : 14swift_ide_test13GenericStructV <nl> + / / CHECK - NEXT : RelChild | struct / Swift | GenericStruct | s : 14swift_ide_test13GenericStructV <nl> <nl> func GenericFunc < ATypeParam > ( _ : ATypeParam ) { } <nl> / / CHECK - NOT : [ [ @ LINE - 1 ] ] : 18 | type - alias / generic - type - param / Swift | ATypeParam | { { . * } } | Def , RelChild | rel : 1 <nl> func + ( a : AStruct , b : AStruct ) - > AStruct { return a } <nl> <nl> class XCTestCase { } <nl> class MyTestCase : XCTestCase { <nl> - func testMe ( ) { } <nl> - / / CHECK : [ [ @ LINE - 1 ] ] : 8 | instance - method ( test ) / Swift | testMe ( ) | <nl> + / / CHECK : [ [ @ LINE - 1 ] ] : 7 | class ( test ) / Swift | MyTestCase | <nl> + func callit ( ) { } <nl> + func testMe ( ) { <nl> + / / CHECK : [ [ @ LINE - 1 ] ] : 8 | instance - method ( test ) / Swift | testMe ( ) | [ [ MyTestCase_testMe_USR : . * ] ] | Def , RelChild <nl> + callit ( ) <nl> + / / CHECK : [ [ @ LINE - 1 ] ] : 5 | instance - method / Swift | callit ( ) | s : 14swift_ide_test10MyTestCaseC6callityyF | Ref , Call , Dyn , RelRec , RelCall , RelCont | rel : 2 <nl> + / / CHECK - NEXT : RelCall , RelCont | instance - method ( test ) / Swift | testMe ( ) | [ [ MyTestCase_testMe_USR ] ] <nl> + } <nl> func testResult ( ) - > Int ? { return nil } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 8 | instance - method / Swift | testResult ( ) | <nl> func test ( withInt : Int ) { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 8 | instance - method / Swift | test ( withInt : ) | <nl> } <nl> class SubTestCase : MyTestCase { <nl> + / / CHECK : [ [ @ LINE - 1 ] ] : 7 | class ( test ) / Swift | SubTestCase | [ [ SubTestCase_USR : . * ] ] | Def | rel : 0 <nl> func testIt2 ( ) { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 8 | instance - method ( test ) / Swift | testIt2 ( ) | <nl> } <nl> extension SubTestCase { <nl> + / / CHECK : [ [ @ LINE - 1 ] ] : 11 | extension / ext - class ( test ) / Swift | SubTestCase | [ [ SubTestCaseExt_USR : . * ] ] | Def | rel : 0 <nl> + / / CHECK : [ [ @ LINE - 2 ] ] : 11 | class ( test ) / Swift | SubTestCase | [ [ SubTestCase_USR ] ] | Ref , RelExt | rel : 1 <nl> + / / CHECK - NEXT : RelExt | extension / ext - class ( test ) / Swift | SubTestCase | [ [ SubTestCaseExt_USR ] ] <nl> func testIt3 ( ) { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 8 | instance - method ( test ) / Swift | testIt3 ( ) | <nl> } <nl> class AttrAnnots { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 17 | instance - property ( IB ) / Swift | iboutletString | <nl> @ IBAction func someibaction ( o : TargetForIBAction ) { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 18 | instance - method ( IB ) / Swift | someibaction ( o : ) | { { . * } } | Def , RelChild , RelIBType | rel : 2 <nl> - / / CHECK - NEXT : RelIBType | TargetForIBAction | [ [ TargetForIBAction_USR ] ] <nl> + / / CHECK - NEXT : RelIBType | class / Swift | TargetForIBAction | [ [ TargetForIBAction_USR ] ] <nl> @ GKInspectable var gkString = " gk " <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 22 | instance - property ( GKI ) / Swift | gkString | <nl> } <nl> typealias C1Alias = C1 <nl> / / CHECK : [ [ @ LINE + 4 ] ] : 7 | class / Swift | SubC1 | [ [ SubC1_USR : . * ] ] | Def | rel : 0 <nl> / / CHECK : [ [ @ LINE + 3 ] ] : 15 | type - alias / Swift | C1Alias | [ [ C1Alias_USR ] ] | Ref | rel : 0 <nl> / / CHECK : [ [ @ LINE + 2 ] ] : 15 | class / Swift | C1 | [ [ C1_USR ] ] | Ref , Impl , RelBase | rel : 1 <nl> - / / CHECK - NEXT : RelBase | SubC1 | [ [ SubC1_USR ] ] <nl> + / / CHECK - NEXT : RelBase | class / Swift | SubC1 | [ [ SubC1_USR ] ] <nl> class SubC1 : C1Alias { } <nl> mmm a / test / Index / roles . swift <nl> ppp b / test / Index / roles . swift <nl> var z : Int { <nl> <nl> return y <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 12 | variable / Swift | y | s : 14swift_ide_test1ySiv | Ref , Read , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | getter : z | s : 14swift_ide_test1zSifg <nl> + / / CHECK - NEXT : RelCont | function / acc - get / Swift | getter : z | s : 14swift_ide_test1zSifg <nl> } <nl> set { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 3 | function / acc - set / Swift | setter : z | s : 14swift_ide_test1zSifs | Def , RelChild , RelAcc | rel : 1 <nl> <nl> y = newValue <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 5 | variable / Swift | y | s : 14swift_ide_test1ySiv | Ref , Writ , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | setter : z | s : 14swift_ide_test1zSifs <nl> + / / CHECK - NEXT : RelCont | function / acc - set / Swift | setter : z | s : 14swift_ide_test1zSifs <nl> } <nl> } <nl> / / Write + Read of z <nl> z = z + 1 <nl> func aCalledFunction ( a : Int , b : inout Int ) { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 6 | function / Swift | aCalledFunction ( a : b : ) | s : 14swift_ide_test15aCalledFunctionySi1a_Siz1btF | Def | rel : 0 <nl> / / CHECK : [ [ @ LINE - 2 ] ] : 22 | param / Swift | a | s : { { . * } } | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | aCalledFunction ( a : b : ) | s : 14swift_ide_test15aCalledFunctionySi1a_Siz1btF <nl> + / / CHECK - NEXT : RelChild | function / Swift | aCalledFunction ( a : b : ) | s : 14swift_ide_test15aCalledFunctionySi1a_Siz1btF <nl> / / CHECK : [ [ @ LINE - 4 ] ] : 30 | param / Swift | b | s : { { . * } } | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | aCalledFunction ( a : b : ) | s : 14swift_ide_test15aCalledFunctionySi1a_Siz1btF <nl> + / / CHECK - NEXT : RelChild | function / Swift | aCalledFunction ( a : b : ) | s : 14swift_ide_test15aCalledFunctionySi1a_Siz1btF <nl> <nl> var _ = a + b <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 11 | param / Swift | a | s : { { . * } } | Ref , Read , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | aCalledFunction ( a : b : ) | s : 14swift_ide_test15aCalledFunctionySi1a_Siz1btF <nl> + / / CHECK - NEXT : RelCont | function / Swift | aCalledFunction ( a : b : ) | s : 14swift_ide_test15aCalledFunctionySi1a_Siz1btF <nl> / / CHECK : [ [ @ LINE - 3 ] ] : 15 | param / Swift | b | s : { { . * } } | Ref , Read , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | aCalledFunction ( a : b : ) | s : 14swift_ide_test15aCalledFunctionySi1a_Siz1btF <nl> + / / CHECK - NEXT : RelCont | function / Swift | aCalledFunction ( a : b : ) | s : 14swift_ide_test15aCalledFunctionySi1a_Siz1btF <nl> <nl> b = a + 1 <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 3 | param / Swift | b | s : { { . * } } | Ref , Writ , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | aCalledFunction ( a : b : ) | s : 14swift_ide_test15aCalledFunctionySi1a_Siz1btF <nl> + / / CHECK - NEXT : RelCont | function / Swift | aCalledFunction ( a : b : ) | s : 14swift_ide_test15aCalledFunctionySi1a_Siz1btF <nl> / / CHECK : [ [ @ LINE - 3 ] ] : 7 | param / Swift | a | s : { { . * } } | Ref , Read , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | aCalledFunction ( a : b : ) | s : 14swift_ide_test15aCalledFunctionySi1a_Siz1btF <nl> + / / CHECK - NEXT : RelCont | function / Swift | aCalledFunction ( a : b : ) | s : 14swift_ide_test15aCalledFunctionySi1a_Siz1btF <nl> } <nl> <nl> aCalledFunction ( a : 1 , b : & z ) <nl> func aCaller ( ) { <nl> <nl> aCalledFunction ( a : 1 , b : & z ) <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 3 | function / Swift | aCalledFunction ( a : b : ) | s : 14swift_ide_test15aCalledFunctionySi1a_Siz1btF | Ref , Call , RelCall , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCall , RelCont | aCaller ( ) | s : 14swift_ide_test7aCalleryyF <nl> + / / CHECK - NEXT : RelCall , RelCont | function / Swift | aCaller ( ) | s : 14swift_ide_test7aCalleryyF <nl> } <nl> <nl> let aRef = aCalledFunction <nl> let aRef = aCalledFunction <nl> struct AStruct { <nl> var x : Int <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 7 | instance - property / Swift | x | s : 14swift_ide_test7AStructV1xSiv | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AStruct | s : 14swift_ide_test7AStructV <nl> + / / CHECK - NEXT : RelChild | struct / Swift | AStruct | s : 14swift_ide_test7AStructV <nl> <nl> mutating func aMethod ( ) { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 17 | instance - method / Swift | aMethod ( ) | s : 14swift_ide_test7AStructV7aMethodyyF | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AStruct | s : 14swift_ide_test7AStructV <nl> + / / CHECK - NEXT : RelChild | struct / Swift | AStruct | s : 14swift_ide_test7AStructV <nl> <nl> x + = 1 <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 5 | instance - property / Swift | x | s : 14swift_ide_test7AStructV1xSiv | Ref , Read , Writ , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | aMethod ( ) | s : 14swift_ide_test7AStructV7aMethodyyF <nl> + / / CHECK - NEXT : RelCont | instance - method / Swift | aMethod ( ) | s : 14swift_ide_test7AStructV7aMethodyyF <nl> / / CHECK : [ [ @ LINE - 3 ] ] : 5 | function / acc - get / Swift | getter : x | s : 14swift_ide_test7AStructV1xSifg | Ref , Call , Impl , RelRec , RelCall , RelCont | rel : 2 <nl> - / / CHECK - NEXT : RelCall , RelCont | aMethod ( ) | s : 14swift_ide_test7AStructV7aMethodyyF <nl> - / / CHECK - NEXT : RelRec | AStruct | s : 14swift_ide_test7AStructV <nl> + / / CHECK - NEXT : RelCall , RelCont | instance - method / Swift | aMethod ( ) | s : 14swift_ide_test7AStructV7aMethodyyF <nl> + / / CHECK - NEXT : RelRec | struct / Swift | AStruct | s : 14swift_ide_test7AStructV <nl> / / CHECK : [ [ @ LINE - 6 ] ] : 5 | function / acc - set / Swift | setter : x | s : 14swift_ide_test7AStructV1xSifs | Ref , Call , Impl , RelRec , RelCall , RelCont | rel : 2 <nl> - / / CHECK - NEXT : RelCall , RelCont | aMethod ( ) | s : 14swift_ide_test7AStructV7aMethodyyF <nl> - / / CHECK - NEXT : RelRec | AStruct | s : 14swift_ide_test7AStructV <nl> + / / CHECK - NEXT : RelCall , RelCont | instance - method / Swift | aMethod ( ) | s : 14swift_ide_test7AStructV7aMethodyyF <nl> + / / CHECK - NEXT : RelRec | struct / Swift | AStruct | s : 14swift_ide_test7AStructV <nl> / / CHECK : [ [ @ LINE - 9 ] ] : 7 | function / infix - operator / Swift | + = ( _ : _ : ) | s : s2peoiySiz_SitF | Ref , Call , RelCall , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCall , RelCont | aMethod ( ) | s : 14swift_ide_test7AStructV7aMethodyyF <nl> + / / CHECK - NEXT : RelCall , RelCont | instance - method / Swift | aMethod ( ) | s : 14swift_ide_test7AStructV7aMethodyyF <nl> } <nl> <nl> / / RelationChildOf , RelationAccessorOf <nl> subscript ( index : Int ) - > Int { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 3 | instance - property / subscript / Swift | subscript ( _ : ) | s : 14swift_ide_test7AStructV9subscriptS2ici | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AStruct | s : 14swift_ide_test7AStructV <nl> + / / CHECK - NEXT : RelChild | struct / Swift | AStruct | s : 14swift_ide_test7AStructV <nl> <nl> get { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 5 | instance - method / acc - get / Swift | getter : subscript ( _ : ) | s : 14swift_ide_test7AStructV9subscriptS2icfg | Def , RelChild , RelAcc | rel : 1 <nl> - / / CHECK - NEXT : RelChild , RelAcc | subscript ( _ : ) | s : 14swift_ide_test7AStructV9subscriptS2ici <nl> + / / CHECK - NEXT : RelChild , RelAcc | instance - property / subscript / Swift | subscript ( _ : ) | s : 14swift_ide_test7AStructV9subscriptS2ici <nl> <nl> return x <nl> } <nl> set { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 5 | instance - method / acc - set / Swift | setter : subscript ( _ : ) | s : 14swift_ide_test7AStructV9subscriptS2icfs | Def , RelChild , RelAcc | rel : 1 <nl> - / / CHECK - NEXT : RelChild , RelAcc | subscript ( _ : ) | s : 14swift_ide_test7AStructV9subscriptS2ici <nl> + / / CHECK - NEXT : RelChild , RelAcc | instance - property / subscript / Swift | subscript ( _ : ) | s : 14swift_ide_test7AStructV9subscriptS2ici <nl> <nl> x = newValue <nl> } <nl> protocol X { } <nl> class ImplementsX : X { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 7 | class / Swift | ImplementsX | [ [ ImplementsX_USR : . * ] ] | Def | rel : 0 <nl> / / CHECK : [ [ @ LINE - 2 ] ] : 21 | protocol / Swift | X | [ [ X_USR ] ] | Ref , RelBase | rel : 1 <nl> - / / CHECK - NEXT : RelBase | ImplementsX | [ [ ImplementsX_USR ] ] <nl> + / / CHECK - NEXT : RelBase | class / Swift | ImplementsX | [ [ ImplementsX_USR ] ] <nl> <nl> protocol AProtocol { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 10 | protocol / Swift | AProtocol | [ [ AProtocol_USR : . * ] ] | Def | rel : 0 <nl> <nl> associatedtype T : X <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 18 | type - alias / associated - type / Swift | T | s : 14swift_ide_test9AProtocolP1T | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AProtocol | [ [ AProtocol_USR ] ] <nl> + / / CHECK - NEXT : RelChild | protocol / Swift | AProtocol | [ [ AProtocol_USR ] ] <nl> / / CHECK : [ [ @ LINE - 3 ] ] : 22 | protocol / Swift | X | [ [ X_USR ] ] | Ref | rel : 0 <nl> <nl> func foo ( ) - > Int <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 8 | instance - method / Swift | foo ( ) | s : 14swift_ide_test9AProtocolP3fooSiyF | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AProtocol | s : 14swift_ide_test9AProtocolP <nl> + / / CHECK - NEXT : RelChild | protocol / Swift | AProtocol | s : 14swift_ide_test9AProtocolP <nl> } <nl> <nl> class ASubClass : AClass , AProtocol { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 7 | class / Swift | ASubClass | s : 14swift_ide_test9ASubClassC | Def | rel : 0 <nl> / / CHECK : [ [ @ LINE - 2 ] ] : 19 | class / Swift | AClass | s : 14swift_ide_test6AClassC | Ref , RelBase | rel : 1 <nl> - / / CHECK - NEXT : RelBase | ASubClass | s : 14swift_ide_test9ASubClassC <nl> + / / CHECK - NEXT : RelBase | class / Swift | ASubClass | s : 14swift_ide_test9ASubClassC <nl> / / CHECK : [ [ @ LINE - 4 ] ] : 27 | protocol / Swift | AProtocol | s : 14swift_ide_test9AProtocolP | Ref , RelBase | rel : 1 <nl> - / / CHECK - NEXT : RelBase | ASubClass | s : 14swift_ide_test9ASubClassC <nl> + / / CHECK - NEXT : RelBase | class / Swift | ASubClass | s : 14swift_ide_test9ASubClassC <nl> <nl> typealias T = ImplementsX <nl> <nl> override func foo ( ) - > Int { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 17 | instance - method / Swift | foo ( ) | s : 14swift_ide_test9ASubClassC3fooSiyF | Def , RelChild , RelOver | rel : 3 <nl> - / / CHECK - NEXT : RelOver | foo ( ) | s : 14swift_ide_test6AClassC3fooSiyF <nl> - / / CHECK - NEXT : RelOver | foo ( ) | s : 14swift_ide_test9AProtocolP3fooSiyF <nl> - / / CHECK - NEXT : RelChild | ASubClass | s : 14swift_ide_test9ASubClassC <nl> + / / CHECK - NEXT : RelOver | instance - method / Swift | foo ( ) | s : 14swift_ide_test6AClassC3fooSiyF <nl> + / / CHECK - NEXT : RelOver | instance - method / Swift | foo ( ) | s : 14swift_ide_test9AProtocolP3fooSiyF <nl> + / / CHECK - NEXT : RelChild | class / Swift | ASubClass | s : 14swift_ide_test9ASubClassC <nl> return 1 <nl> } <nl> } <nl> class ASubClass : AClass , AProtocol { <nl> extension AClass { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 11 | extension / ext - class / Swift | AClass | [ [ EXT_ACLASS_USR : . * ] ] | Def | rel : 0 <nl> / / CHECK : [ [ @ LINE - 2 ] ] : 11 | class / Swift | AClass | s : 14swift_ide_test6AClassC | Ref , RelExt | rel : 1 <nl> - / / CHECK - NEXT : RelExt | AClass | [ [ EXT_ACLASS_USR ] ] <nl> + / / CHECK - NEXT : RelExt | extension / ext - class / Swift | AClass | [ [ EXT_ACLASS_USR ] ] <nl> <nl> func bar ( ) - > Int { return 2 } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 8 | instance - method / Swift | bar ( ) | s : 14swift_ide_test6AClassC3barSiyF | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | AClass | [ [ EXT_ACLASS_USR ] ] <nl> + / / CHECK - NEXT : RelChild | extension / ext - class / Swift | AClass | [ [ EXT_ACLASS_USR ] ] <nl> } <nl> <nl> struct OuterS { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 8 | struct / Swift | OuterS | [ [ OUTERS_USR : . * ] ] | Def | rel : 0 <nl> struct InnerS { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 10 | struct / Swift | InnerS | [ [ INNERS_USR : . * ] ] | Def , RelChild | rel : 1 <nl> - / / CHECK - NEXT : RelChild | OuterS | [ [ OUTERS_USR ] ] <nl> + / / CHECK - NEXT : RelChild | struct / Swift | OuterS | [ [ OUTERS_USR ] ] <nl> } <nl> extension OuterS . InnerS : AProtocol { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 18 | extension / ext - struct / Swift | InnerS | [ [ EXT_INNERS_USR : . * ] ] | Def | rel : 0 <nl> / / CHECK : [ [ @ LINE - 2 ] ] : 18 | struct / Swift | InnerS | [ [ INNERS_USR ] ] | Ref , RelExt | rel : 1 <nl> - / / CHECK - NEXT : RelExt | InnerS | [ [ EXT_INNERS_USR ] ] <nl> + / / CHECK - NEXT : RelExt | extension / ext - struct / Swift | InnerS | [ [ EXT_INNERS_USR ] ] <nl> / / CHECK : [ [ @ LINE - 4 ] ] : 27 | protocol / Swift | AProtocol | [ [ AProtocol_USR ] ] | Ref , RelBase | rel : 1 <nl> - / / CHECK - NEXT : RelBase | InnerS | [ [ EXT_INNERS_USR ] ] <nl> + / / CHECK - NEXT : RelBase | extension / ext - struct / Swift | InnerS | [ [ EXT_INNERS_USR ] ] <nl> / / CHECK : [ [ @ LINE - 6 ] ] : 11 | struct / Swift | OuterS | [ [ OUTERS_USR ] ] | Ref | rel : 0 <nl> <nl> typealias T = ImplementsX <nl> let aSubInstance : AClass = ASubClass ( x : 1 ) <nl> let _ = aSubInstance . foo ( ) <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 9 | variable / Swift | aSubInstance | s : 14swift_ide_test12aSubInstanceAA6AClassCv | Ref , Read | rel : 0 <nl> / / CHECK : [ [ @ LINE - 2 ] ] : 22 | instance - method / Swift | foo ( ) | s : 14swift_ide_test6AClassC3fooSiyF | Ref , Call , Dyn , RelRec | rel : 1 <nl> - / / CHECK - NEXT : RelRec | AClass | s : 14swift_ide_test6AClassC <nl> + / / CHECK - NEXT : RelRec | class / Swift | AClass | s : 14swift_ide_test6AClassC <nl> <nl> / / RelationContainedBy <nl> let contained = 2 <nl> func containing ( ) { <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 6 | function / Swift | containing ( ) | s : 14swift_ide_test10containingyyF | Def | rel : 0 <nl> let _ = contained <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 11 | variable / Swift | contained | s : 14swift_ide_test9containedSiv | Ref , Read , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | containing ( ) | s : 14swift_ide_test10containingyyF <nl> + / / CHECK - NEXT : RelCont | function / Swift | containing ( ) | s : 14swift_ide_test10containingyyF <nl> <nl> var x = contained <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 11 | variable / Swift | contained | s : 14swift_ide_test9containedSiv | Ref , Read , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | containing ( ) | s : 14swift_ide_test10containingyyF <nl> + / / CHECK - NEXT : RelCont | function / Swift | containing ( ) | s : 14swift_ide_test10containingyyF <nl> <nl> struct LocalStruct { <nl> var i : AClass = AClass ( x : contained ) <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 12 | class / Swift | AClass | s : 14swift_ide_test6AClassC | Ref , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | containing ( ) | s : 14swift_ide_test10containingyyF <nl> + / / CHECK - NEXT : RelCont | function / Swift | containing ( ) | s : 14swift_ide_test10containingyyF <nl> / / CHECK : [ [ @ LINE - 3 ] ] : 21 | class / Swift | AClass | s : 14swift_ide_test6AClassC | Ref , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | containing ( ) | s : 14swift_ide_test10containingyyF <nl> + / / CHECK - NEXT : RelCont | function / Swift | containing ( ) | s : 14swift_ide_test10containingyyF <nl> / / CHECK : [ [ @ LINE - 5 ] ] : 31 | variable / Swift | contained | s : 14swift_ide_test9containedSiv | Ref , Read , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | containing ( ) | s : 14swift_ide_test10containingyyF <nl> + / / CHECK - NEXT : RelCont | function / Swift | containing ( ) | s : 14swift_ide_test10containingyyF <nl> <nl> init ( i _ : AClass ) { } <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 15 | class / Swift | AClass | s : 14swift_ide_test6AClassC | Ref , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | containing ( ) | s : 14swift_ide_test10containingyyF <nl> + / / CHECK - NEXT : RelCont | function / Swift | containing ( ) | s : 14swift_ide_test10containingyyF <nl> <nl> func inner ( ) - > Int { <nl> let _ : AClass = AClass ( x : contained ) <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 14 | class / Swift | AClass | s : 14swift_ide_test6AClassC | Ref , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | containing ( ) | s : 14swift_ide_test10containingyyF <nl> + / / CHECK - NEXT : RelCont | function / Swift | containing ( ) | s : 14swift_ide_test10containingyyF <nl> / / CHECK : [ [ @ LINE - 3 ] ] : 23 | class / Swift | AClass | s : 14swift_ide_test6AClassC | Ref , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | containing ( ) | s : 14swift_ide_test10containingyyF <nl> + / / CHECK - NEXT : RelCont | function / Swift | containing ( ) | s : 14swift_ide_test10containingyyF <nl> / / CHECK : [ [ @ LINE - 5 ] ] : 33 | variable / Swift | contained | s : 14swift_ide_test9containedSiv | Ref , Read , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCont | containing ( ) | s : 14swift_ide_test10containingyyF <nl> + / / CHECK - NEXT : RelCont | function / Swift | containing ( ) | s : 14swift_ide_test10containingyyF <nl> <nl> aCalledFunction ( a : 1 , b : & z ) <nl> / / CHECK : [ [ @ LINE - 1 ] ] : 7 | function / Swift | aCalledFunction ( a : b : ) | s : 14swift_ide_test15aCalledFunctionySi1a_Siz1btF | Ref , Call , RelCall , RelCont | rel : 1 <nl> - / / CHECK - NEXT : RelCall , RelCont | containing ( ) | s : 14swift_ide_test10containingyyF <nl> + / / CHECK - NEXT : RelCall , RelCont | function / Swift | containing ( ) | s : 14swift_ide_test10containingyyF <nl> <nl> return contained <nl> } <nl> mmm a / test / SourceKit / Indexing / index_is_test_candidate_objc . swift . response <nl> ppp b / test / SourceKit / Indexing / index_is_test_candidate_objc . swift . response <nl> <nl> key . column : 8 , <nl> key . is_test_candidate : 1 <nl> } <nl> - ] <nl> + ] , <nl> + key . is_test_candidate : 1 <nl> } , <nl> { <nl> key . kind : source . lang . swift . decl . class , <nl> <nl> key . column : 8 , <nl> key . is_test_candidate : 1 <nl> } <nl> - ] <nl> + ] , <nl> + key . is_test_candidate : 1 <nl> } <nl> ] <nl> } <nl> mmm a / tools / swift - ide - test / swift - ide - test . cpp <nl> ppp b / tools / swift - ide - test / swift - ide - test . cpp <nl> namespace { <nl> for ( auto Relation : symbol . Relations ) { <nl> OS < < " " ; <nl> clang : : index : : printSymbolRoles ( Relation . roles , OS ) ; <nl> + OS < < " | " ; <nl> + printSymbolInfo ( Relation . symInfo ) ; <nl> OS < < " | " < < Relation . name < < " | " < < Relation . USR < < " \ n " ; <nl> } <nl> return Continue ; <nl>
Merge pull request from akyrtzi / index - unittest - property - fixes
apple/swift
5be73438420cd5a779534a042585afd48d5c1472
2017-03-07T02:14:24Z
mmm a / include / rocksdb / utilities / transaction . h <nl> ppp b / include / rocksdb / utilities / transaction . h <nl> class Transaction { <nl> return 0 ; <nl> } <nl> <nl> - enum ExecutionStatus { <nl> + enum TransactionState { <nl> STARTED = 0 , <nl> AWAITING_PREPARE = 1 , <nl> PREPARED = 2 , <nl> class Transaction { <nl> LOCKS_STOLEN = 7 , <nl> } ; <nl> <nl> - / / Execution status of the transaction . <nl> - std : : atomic < ExecutionStatus > exec_status_ ; <nl> + TransactionState GetState ( ) { return txn_state_ ; } <nl> + void SetState ( TransactionState state ) { txn_state_ = state ; } <nl> <nl> protected : <nl> explicit Transaction ( const TransactionDB * db ) { } <nl> class Transaction { <nl> uint64_t log_number_ ; <nl> TransactionName name_ ; <nl> <nl> + / / Execution status of the transaction . <nl> + std : : atomic < TransactionState > txn_state_ ; <nl> + <nl> private : <nl> / / No copying allowed <nl> Transaction ( const Transaction & ) ; <nl> mmm a / utilities / transactions / transaction_db_impl . cc <nl> ppp b / utilities / transactions / transaction_db_impl . cc <nl> Status TransactionDBImpl : : Initialize ( <nl> } <nl> <nl> s = real_trx - > RebuildFromWriteBatch ( recovered_trx - > batch_ ) ; <nl> - real_trx - > exec_status_ = Transaction : : PREPARED ; <nl> + real_trx - > SetState ( Transaction : : PREPARED ) ; <nl> if ( ! s . ok ( ) ) { <nl> break ; <nl> } <nl> void TransactionDBImpl : : GetAllPreparedTransactions ( <nl> transv - > clear ( ) ; <nl> std : : lock_guard < std : : mutex > lock ( name_map_mutex_ ) ; <nl> for ( auto it = transactions_ . begin ( ) ; it ! = transactions_ . end ( ) ; it + + ) { <nl> - if ( it - > second - > exec_status_ = = Transaction : : PREPARED ) { <nl> + if ( it - > second - > GetState ( ) = = Transaction : : PREPARED ) { <nl> transv - > push_back ( it - > second ) ; <nl> } <nl> } <nl> void TransactionDBImpl : : RegisterTransaction ( Transaction * txn ) { <nl> assert ( txn ) ; <nl> assert ( txn - > GetName ( ) . length ( ) > 0 ) ; <nl> assert ( GetTransactionByName ( txn - > GetName ( ) ) = = nullptr ) ; <nl> - assert ( txn - > exec_status_ = = Transaction : : STARTED ) ; <nl> + assert ( txn - > GetState ( ) = = Transaction : : STARTED ) ; <nl> std : : lock_guard < std : : mutex > lock ( name_map_mutex_ ) ; <nl> transactions_ [ txn - > GetName ( ) ] = txn ; <nl> } <nl> mmm a / utilities / transactions / transaction_impl . cc <nl> ppp b / utilities / transactions / transaction_impl . cc <nl> TransactionImpl : : TransactionImpl ( TransactionDB * txn_db , <nl> void TransactionImpl : : Initialize ( const TransactionOptions & txn_options ) { <nl> txn_id_ = GenTxnID ( ) ; <nl> <nl> - exec_status_ = STARTED ; <nl> + txn_state_ = STARTED ; <nl> <nl> lock_timeout_ = txn_options . lock_timeout * 1000 ; <nl> if ( lock_timeout_ < 0 ) { <nl> TransactionImpl : : ~ TransactionImpl ( ) { <nl> if ( expiration_time_ > 0 ) { <nl> txn_db_impl_ - > RemoveExpirableTransaction ( txn_id_ ) ; <nl> } <nl> - if ( ! name_ . empty ( ) & & exec_status_ ! = COMMITED ) { <nl> + if ( ! name_ . empty ( ) & & txn_state_ ! = COMMITED ) { <nl> txn_db_impl_ - > UnregisterTransaction ( this ) ; <nl> } <nl> } <nl> void TransactionImpl : : Clear ( ) { <nl> void TransactionImpl : : Reinitialize ( TransactionDB * txn_db , <nl> const WriteOptions & write_options , <nl> const TransactionOptions & txn_options ) { <nl> - if ( ! name_ . empty ( ) & & exec_status_ ! = COMMITED ) { <nl> + if ( ! name_ . empty ( ) & & txn_state_ ! = COMMITED ) { <nl> txn_db_impl_ - > UnregisterTransaction ( this ) ; <nl> } <nl> TransactionBaseImpl : : Reinitialize ( txn_db - > GetRootDB ( ) , write_options ) ; <nl> Status TransactionImpl : : CommitBatch ( WriteBatch * batch ) { <nl> if ( IsExpired ( ) ) { <nl> s = Status : : Expired ( ) ; <nl> } else if ( expiration_time_ > 0 ) { <nl> - ExecutionStatus expected = STARTED ; <nl> - can_commit = std : : atomic_compare_exchange_strong ( & exec_status_ , & expected , <nl> + TransactionState expected = STARTED ; <nl> + can_commit = std : : atomic_compare_exchange_strong ( & txn_state_ , & expected , <nl> AWAITING_COMMIT ) ; <nl> - } else if ( exec_status_ = = STARTED ) { <nl> + } else if ( txn_state_ = = STARTED ) { <nl> / / lock stealing is not a concern <nl> can_commit = true ; <nl> } <nl> <nl> if ( can_commit ) { <nl> - exec_status_ . store ( AWAITING_COMMIT ) ; <nl> + txn_state_ . store ( AWAITING_COMMIT ) ; <nl> s = db_ - > Write ( write_options_ , batch ) ; <nl> if ( s . ok ( ) ) { <nl> - exec_status_ . store ( COMMITED ) ; <nl> + txn_state_ . store ( COMMITED ) ; <nl> } <nl> - } else if ( exec_status_ = = LOCKS_STOLEN ) { <nl> + } else if ( txn_state_ = = LOCKS_STOLEN ) { <nl> s = Status : : Expired ( ) ; <nl> } else { <nl> s = Status : : InvalidArgument ( " Transaction is not in state for commit . " ) ; <nl> Status TransactionImpl : : Prepare ( ) { <nl> if ( expiration_time_ > 0 ) { <nl> / / must concern ourselves with expiraton and / or lock stealing <nl> / / need to compare / exchange bc locks could be stolen under us here <nl> - ExecutionStatus expected = STARTED ; <nl> - can_prepare = std : : atomic_compare_exchange_strong ( & exec_status_ , & expected , <nl> + TransactionState expected = STARTED ; <nl> + can_prepare = std : : atomic_compare_exchange_strong ( & txn_state_ , & expected , <nl> AWAITING_PREPARE ) ; <nl> - } else if ( exec_status_ = = STARTED ) { <nl> + } else if ( txn_state_ = = STARTED ) { <nl> / / expiration and lock stealing is not possible <nl> can_prepare = true ; <nl> } <nl> <nl> if ( can_prepare ) { <nl> - exec_status_ . store ( AWAITING_PREPARE ) ; <nl> + txn_state_ . store ( AWAITING_PREPARE ) ; <nl> / / transaction can ' t expire after preparation <nl> expiration_time_ = 0 ; <nl> WriteOptions write_options = write_options_ ; <nl> Status TransactionImpl : : Prepare ( ) { <nl> if ( s . ok ( ) ) { <nl> assert ( log_number_ ! = 0 ) ; <nl> dbimpl_ - > MarkLogAsContainingPrepSection ( log_number_ ) ; <nl> - exec_status_ . store ( PREPARED ) ; <nl> + txn_state_ . store ( PREPARED ) ; <nl> } <nl> - } else if ( exec_status_ = = LOCKS_STOLEN ) { <nl> + } else if ( txn_state_ = = LOCKS_STOLEN ) { <nl> s = Status : : Expired ( ) ; <nl> - } else if ( exec_status_ = = PREPARED ) { <nl> + } else if ( txn_state_ = = PREPARED ) { <nl> s = Status : : InvalidArgument ( " Transaction has already been prepared . " ) ; <nl> - } else if ( exec_status_ = = COMMITED ) { <nl> + } else if ( txn_state_ = = COMMITED ) { <nl> s = Status : : InvalidArgument ( " Transaction has already been committed . " ) ; <nl> - } else if ( exec_status_ = = ROLLEDBACK ) { <nl> + } else if ( txn_state_ = = ROLLEDBACK ) { <nl> s = Status : : InvalidArgument ( " Transaction has already been rolledback . " ) ; <nl> } else { <nl> s = Status : : InvalidArgument ( " Transaction is not in state for commit . " ) ; <nl> Status TransactionImpl : : Commit ( ) { <nl> / / to change our state out from under us in the even that we expire and have <nl> / / our locks stolen . In this case the only valid state is STARTED because <nl> / / a state of PREPARED would have a cleared expiration_time_ . <nl> - ExecutionStatus expected = STARTED ; <nl> - commit_single = std : : atomic_compare_exchange_strong ( <nl> - & exec_status_ , & expected , AWAITING_COMMIT ) ; <nl> + TransactionState expected = STARTED ; <nl> + commit_single = std : : atomic_compare_exchange_strong ( & txn_state_ , & expected , <nl> + AWAITING_COMMIT ) ; <nl> TEST_SYNC_POINT ( " TransactionTest : : ExpirableTransactionDataRace : 1 " ) ; <nl> - } else if ( exec_status_ = = PREPARED ) { <nl> + } else if ( txn_state_ = = PREPARED ) { <nl> / / expiration and lock stealing is not a concern <nl> commit_prepared = true ; <nl> - } else if ( exec_status_ = = STARTED ) { <nl> + } else if ( txn_state_ = = STARTED ) { <nl> / / expiration and lock stealing is not a concern <nl> commit_single = true ; <nl> } <nl> Status TransactionImpl : : Commit ( ) { <nl> s = Status : : InvalidArgument ( <nl> " Commit - time batch contains values that will not be committed . " ) ; <nl> } else { <nl> - exec_status_ . store ( AWAITING_COMMIT ) ; <nl> + txn_state_ . store ( AWAITING_COMMIT ) ; <nl> s = db_ - > Write ( write_options_ , GetWriteBatch ( ) - > GetWriteBatch ( ) ) ; <nl> Clear ( ) ; <nl> if ( s . ok ( ) ) { <nl> - exec_status_ . store ( COMMITED ) ; <nl> + txn_state_ . store ( COMMITED ) ; <nl> } <nl> } <nl> } else if ( commit_prepared ) { <nl> - exec_status_ . store ( AWAITING_COMMIT ) ; <nl> - <nl> + txn_state_ . store ( AWAITING_COMMIT ) ; <nl> <nl> / / We take the commit - time batch and append the Commit marker . <nl> / / The Memtable will ignore the Commit marker in non - recovery mode <nl> Status TransactionImpl : : Commit ( ) { <nl> txn_db_impl_ - > UnregisterTransaction ( this ) ; <nl> <nl> Clear ( ) ; <nl> - exec_status_ . store ( COMMITED ) ; <nl> - } else if ( exec_status_ = = LOCKS_STOLEN ) { <nl> + txn_state_ . store ( COMMITED ) ; <nl> + } else if ( txn_state_ = = LOCKS_STOLEN ) { <nl> s = Status : : Expired ( ) ; <nl> - } else if ( exec_status_ = = COMMITED ) { <nl> + } else if ( txn_state_ = = COMMITED ) { <nl> s = Status : : InvalidArgument ( " Transaction has already been committed . " ) ; <nl> - } else if ( exec_status_ = = ROLLEDBACK ) { <nl> + } else if ( txn_state_ = = ROLLEDBACK ) { <nl> s = Status : : InvalidArgument ( " Transaction has already been rolledback . " ) ; <nl> } else { <nl> s = Status : : InvalidArgument ( " Transaction is not in state for commit . " ) ; <nl> Status TransactionImpl : : Commit ( ) { <nl> <nl> Status TransactionImpl : : Rollback ( ) { <nl> Status s ; <nl> - if ( exec_status_ = = PREPARED ) { <nl> + if ( txn_state_ = = PREPARED ) { <nl> WriteBatch rollback_marker ; <nl> WriteBatchInternal : : MarkRollback ( & rollback_marker , name_ ) ; <nl> - exec_status_ . store ( AWAITING_ROLLBACK ) ; <nl> + txn_state_ . store ( AWAITING_ROLLBACK ) ; <nl> s = db_impl_ - > WriteImpl ( write_options_ , & rollback_marker ) ; <nl> if ( s . ok ( ) ) { <nl> / / we do not need to keep our prepared section around <nl> assert ( log_number_ > 0 ) ; <nl> dbimpl_ - > MarkLogAsHavingPrepSectionFlushed ( log_number_ ) ; <nl> Clear ( ) ; <nl> - exec_status_ . store ( ROLLEDBACK ) ; <nl> + txn_state_ . store ( ROLLEDBACK ) ; <nl> } <nl> - } else if ( exec_status_ = = STARTED ) { <nl> + } else if ( txn_state_ = = STARTED ) { <nl> / / prepare couldn ' t have taken place <nl> Clear ( ) ; <nl> - } else if ( exec_status_ = = COMMITED ) { <nl> + } else if ( txn_state_ = = COMMITED ) { <nl> s = Status : : InvalidArgument ( " This transaction has already been committed . " ) ; <nl> } else { <nl> s = Status : : InvalidArgument ( <nl> Status TransactionImpl : : Rollback ( ) { <nl> } <nl> <nl> Status TransactionImpl : : RollbackToSavePoint ( ) { <nl> - if ( exec_status_ ! = STARTED ) { <nl> + if ( txn_state_ ! = STARTED ) { <nl> return Status : : InvalidArgument ( " Transaction is beyond state for rollback . " ) ; <nl> } <nl> <nl> Status TransactionImpl : : ValidateSnapshot ( ColumnFamilyHandle * column_family , <nl> <nl> bool TransactionImpl : : TryStealingLocks ( ) { <nl> assert ( IsExpired ( ) ) ; <nl> - ExecutionStatus expected = STARTED ; <nl> - return std : : atomic_compare_exchange_strong ( & exec_status_ , & expected , <nl> + TransactionState expected = STARTED ; <nl> + return std : : atomic_compare_exchange_strong ( & txn_state_ , & expected , <nl> LOCKS_STOLEN ) ; <nl> } <nl> <nl> void TransactionImpl : : UnlockGetForUpdate ( ColumnFamilyHandle * column_family , <nl> <nl> Status TransactionImpl : : SetName ( const TransactionName & name ) { <nl> Status s ; <nl> - if ( exec_status_ = = STARTED ) { <nl> + if ( txn_state_ = = STARTED ) { <nl> if ( name_ . length ( ) ) { <nl> s = Status : : InvalidArgument ( " Transaction has already been named . " ) ; <nl> } else if ( txn_db_impl_ - > GetTransactionByName ( name ) ! = nullptr ) { <nl>
Expose Transaction State Publicly
facebook/rocksdb
37737c3a6b1b5bf3ecb604a74d47ad55f7de1dde
2016-10-07T18:58:53Z
mmm a / folly / Conv . cpp <nl> ppp b / folly / Conv . cpp <nl> Expected < Tgt , ConversionCode > str_to_floating ( StringPiece * src ) noexcept { <nl> StringToDoubleConverter : : ALLOW_LEADING_SPACES , <nl> 0 . 0 , <nl> / / return this for junk input string <nl> - std : : numeric_limits < double > : : quiet_NaN ( ) , <nl> + std : : numeric_limits < Tgt > : : quiet_NaN ( ) , <nl> nullptr , <nl> nullptr ) ; <nl> <nl> Expected < Tgt , ConversionCode > str_to_floating ( StringPiece * src ) noexcept { <nl> return makeUnexpected ( ConversionCode : : EMPTY_INPUT_STRING ) ; <nl> } <nl> <nl> - int length ; <nl> - auto result = conv . StringToDouble ( <nl> - src - > data ( ) , <nl> - static_cast < int > ( src - > size ( ) ) , <nl> - & length ) ; / / processed char count <nl> + int length ; / / processed char count <nl> + auto result = std : : is_same < Tgt , float > : : value <nl> + ? conv . StringToFloat ( src - > data ( ) , static_cast < int > ( src - > size ( ) ) , & length ) <nl> + : static_cast < Tgt > ( conv . StringToDouble ( <nl> + src - > data ( ) , static_cast < int > ( src - > size ( ) ) , & length ) ) ; <nl> <nl> if ( ! std : : isnan ( result ) ) { <nl> / / If we get here with length = 0 , the input string is empty . <nl> mmm a / folly / test / ConvTest . cpp <nl> ppp b / folly / test / ConvTest . cpp <nl> TEST ( Conv , FloatToBool ) { <nl> EXPECT_EQ ( to < bool > ( - std : : numeric_limits < double > : : infinity ( ) ) , true ) ; <nl> } <nl> <nl> + TEST ( Conv , RoundTripFloatToStringToFloat ) { <nl> + const std : : array < float , 6 > kTests { { <nl> + 3 . 14159f , <nl> + 12345678 . f , <nl> + numeric_limits < float > : : lowest ( ) , <nl> + numeric_limits < float > : : max ( ) , <nl> + numeric_limits < float > : : infinity ( ) , <nl> + - numeric_limits < float > : : infinity ( ) , <nl> + } } ; <nl> + <nl> + for ( const auto & test : kTests ) { <nl> + SCOPED_TRACE ( to < string > ( test ) ) ; <nl> + EXPECT_EQ ( to < float > ( to < string > ( test ) ) , test ) ; <nl> + } <nl> + <nl> + EXPECT_TRUE ( <nl> + std : : isnan ( to < float > ( to < string > ( numeric_limits < float > : : quiet_NaN ( ) ) ) ) ) ; <nl> + } <nl> + <nl> namespace { <nl> <nl> template < typename F > <nl> TEST ( Conv , ConversionErrorStrToFloat ) { <nl> EXPECT_CONV_ERROR_STR ( float , " \ t " , EMPTY_INPUT_STRING ) ; <nl> EXPECT_CONV_ERROR_STR ( float , " junk " , STRING_TO_FLOAT_ERROR ) ; <nl> EXPECT_CONV_ERROR ( to < float > ( " 1bla " ) , NON_WHITESPACE_AFTER_END , " bla " ) ; <nl> + <nl> + EXPECT_CONV_ERROR_STR_NOVAL ( double , StringPiece ( ) , EMPTY_INPUT_STRING ) ; <nl> + EXPECT_CONV_ERROR_STR_NOVAL ( double , " " , EMPTY_INPUT_STRING ) ; <nl> + EXPECT_CONV_ERROR_STR ( double , " " , EMPTY_INPUT_STRING ) ; <nl> + EXPECT_CONV_ERROR_STR ( double , " \ t " , EMPTY_INPUT_STRING ) ; <nl> + EXPECT_CONV_ERROR_STR ( double , " junk " , STRING_TO_FLOAT_ERROR ) ; <nl> + EXPECT_CONV_ERROR ( to < double > ( " 1bla " ) , NON_WHITESPACE_AFTER_END , " bla " ) ; <nl> } <nl> <nl> TEST ( Conv , ConversionErrorStrToInt ) { <nl> TEST ( Conv , TryStringToFloat ) { <nl> char x = ' - ' ; <nl> auto rv3 = folly : : tryTo < float > ( folly : : StringPiece ( & x , 1 ) ) ; <nl> EXPECT_FALSE ( rv3 . hasValue ( ) ) ; <nl> + <nl> + / / Exact conversion at numeric limits ( 8 + decimal digits ) <nl> + auto rv4 = folly : : tryTo < float > ( " - 3 . 4028235E38 " ) ; <nl> + EXPECT_TRUE ( rv4 . hasValue ( ) ) ; <nl> + EXPECT_EQ ( rv4 . value ( ) , numeric_limits < float > : : lowest ( ) ) ; <nl> + auto rv5 = folly : : tryTo < float > ( " 3 . 40282346E38 " ) ; <nl> + EXPECT_TRUE ( rv5 . hasValue ( ) ) ; <nl> + EXPECT_EQ ( rv5 . value ( ) , numeric_limits < float > : : max ( ) ) ; <nl> + <nl> + / / Beyond numeric limits <nl> + / / numeric_limits < float > : : lowest ( ) ~ = - 3 . 402823466E38 <nl> + const std : : array < folly : : StringPiece , 4 > kOversizedInputs { { <nl> + " - 3 . 403E38 " , <nl> + " - 3 . 4029E38 " , <nl> + " - 3 . 402824E38 " , <nl> + " - 3 . 4028236E38 " , <nl> + } } ; <nl> + for ( const auto & input : kOversizedInputs ) { <nl> + auto rv = folly : : tryTo < float > ( input ) ; <nl> + EXPECT_EQ ( rv . value ( ) , - numeric_limits < float > : : infinity ( ) ) < < input ; <nl> + } <nl> + <nl> + / / NaN <nl> + const std : : array < folly : : StringPiece , 6 > kNanInputs { { <nl> + " nan " , <nl> + " NaN " , <nl> + " NAN " , <nl> + " - nan " , <nl> + " - NaN " , <nl> + " - NAN " , <nl> + } } ; <nl> + for ( const auto & input : kNanInputs ) { <nl> + auto rv = folly : : tryTo < float > ( input ) ; <nl> + EXPECT_TRUE ( std : : isnan ( rv . value ( ) ) ) < < input ; <nl> + } <nl> } <nl> <nl> TEST ( Conv , TryStringToDouble ) { <nl>
Improve string to 32 - bit float conversion near numeric limits
facebook/folly
4c2692fac934b34e055621423615734bcbbeb480
2020-12-04T19:25:58Z
mmm a / bindings / csharp / Swig / cntk_cs . i <nl> ppp b / bindings / csharp / Swig / cntk_cs . i <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> % ignore_enum_class CNTK : : Internal : : PrimitiveFunction ; <nl> % ignore_class CNTK : : Internal : : CompositeFunction ; <nl> % ignore_function CNTK : : Internal : : MaxNumCPUThreadsSet ; <nl> - % ignore_enum_class CNTK : : Internal : : PrimitiveOpType ; <nl> + % ignore_enum_class CNTK : : PrimitiveOpType ; <nl> % ignore_function CNTK : : Internal : : IsWithin ; <nl> % ignore_function CNTK : : Internal : : PackedIndex ; <nl> % ignore_function CNTK : : Internal : : GatherPacked ; <nl>
correctly ignore PrimitiveOpType
microsoft/CNTK
3eb11d84a2628e0ee281ca0258adf5bfcccc529f
2017-03-28T07:46:40Z
mmm a / modules / gpu / doc / data_structures . rst <nl> ppp b / modules / gpu / doc / data_structures . rst <nl> gpu : : DevMem2D \ _ <nl> mmmmmmmmmmmmmmm <nl> . . ocv : class : : gpu : : DevMem2D \ _ <nl> <nl> - This lightweight class encapsulates pitched memory on a GPU and is passed to nvcc - compiled code ( CUDA kernels ) . Typically , it is used internally by OpenCV and by users who write device code . You can call its members from both host and device code . : : <nl> + Lightweight class encapsulating pitched memory on a GPU and passed to nvcc - compiled code ( CUDA kernels ) . Typically , it is used internally by OpenCV and by users who write device code . You can call its members from both host and device code . : : <nl> <nl> template < typename T > struct DevMem2D_ <nl> { <nl> gpu : : PtrStep \ _ <nl> mmmmmmmmmmmm - - <nl> . . ocv : class : : gpu : : PtrStep \ _ <nl> <nl> - This structure is similar to <nl> - : ocv : class : ` DevMem2D_ ` but contains only a pointer and row step . Width and height fields are excluded due to performance reasons . The structure is intended for internal use or for users who write device code . <nl> + Structure similar to <nl> + : ocv : class : ` DevMem2D_ ` but containing only a pointer and row step . Width and height fields are excluded due to performance reasons . The structure is intended for internal use or for users who write device code . <nl> : : <nl> <nl> template < typename T > struct PtrStep_ <nl> gpu : : PtrElemStrp \ _ <nl> mmmmmmmmmmmmmmmmmm <nl> . . ocv : class : : gpu : : PtrElemStrp \ _ <nl> <nl> - This structure is similar to <nl> - : ocv : class : ` DevMem2D_ ` but contains only pointer and row step in elements . Width and height fields are excluded due to performance reasons . This class can only be constructed if ` ` sizeof ( T ) ` ` is a multiple of 256 . The structure is intended for internal use or for users who write device code . <nl> + Structure similar to <nl> + : ocv : class : ` DevMem2D_ ` but containing only a pointer and a row step in elements . Width and height fields are excluded due to performance reasons . This class can only be constructed if ` ` sizeof ( T ) ` ` is a multiple of 256 . The structure is intended for internal use or for users who write device code . <nl> : : <nl> <nl> template < typename T > struct PtrElemStep_ : public PtrStep_ < T > <nl> gpu : : GpuMat <nl> mmmmmmmmm - - <nl> . . ocv : class : : gpu : : GpuMat <nl> <nl> - This is a base storage class for GPU memory with reference counting . Its interface matches the <nl> + Base storage class for GPU memory with reference counting . Its interface matches the <nl> : c : type : ` Mat ` interface with the following limitations : <nl> <nl> * <nl> This is a base storage class for GPU memory with reference counting . Its interfa <nl> * <nl> no expression templates technique support <nl> <nl> - Beware that the latter limitation may lead to overloaded matrix operators that cause memory allocations . The ` ` GpuMat ` ` class is convertible to : ocv : class : ` gpu : : DevMem2D_ ` and : ocv : class : ` gpu : : PtrStep_ ` so it can be passed directly to kernel . <nl> + Beware that the latter limitation may lead to overloaded matrix operators that cause memory allocations . The ` ` GpuMat ` ` class is convertible to : ocv : class : ` gpu : : DevMem2D_ ` and : ocv : class : ` gpu : : PtrStep_ ` so it can be passed directly to the kernel . <nl> <nl> - * * Note : * * <nl> - <nl> - In contrast with : c : type : ` Mat ` , in most cases ` ` GpuMat : : isContinuous ( ) = = false ` ` . This means that rows are aligned to size depending on the hardware . Single - row ` ` GpuMat ` ` is always a continuous matrix . : : <nl> + . . note : : In contrast with : ocv : class : ` Mat ` , in most cases ` ` GpuMat : : isContinuous ( ) = = false ` ` . This means that rows are aligned to a size depending on the hardware . Single - row ` ` GpuMat ` ` is always a continuous matrix . <nl> + : : <nl> <nl> class CV_EXPORTS GpuMat <nl> { <nl> In contrast with : c : type : ` Mat ` , in most cases ` ` GpuMat : : isContinuous ( ) = = false ` <nl> } ; <nl> <nl> <nl> - * * Note : * * <nl> + . . note : : <nl> <nl> - You are not recommended to leave static or global ` ` GpuMat ` ` variables allocated , that is to rely on its destructor . The destruction order of such variables and CUDA context is undefined . GPU memory release function returns error if the CUDA context has been destroyed before . <nl> + You are not recommended to leave static or global ` ` GpuMat ` ` variables allocated , that is , to rely on its destructor . The destruction order of such variables and CUDA context is undefined . GPU memory release function returns error if the CUDA context has been destroyed before . <nl> <nl> - See Also : <nl> - : ocv : func : ` Mat ` <nl> + . . seealso : : <nl> + : ocv : class : ` Mat ` <nl> <nl> . . index : : gpu : : CudaMem <nl> <nl> gpu : : CudaMem <nl> mmmmmmmmmmmm <nl> . . ocv : class : : gpu : : CudaMem <nl> <nl> - This class with reference counting wraps special memory type allocation functions from CUDA . Its interface is also <nl> + Class with reference counting wrapping special memory type allocation functions from CUDA . Its interface is also <nl> : ocv : func : ` Mat ` - like but with additional memory type parameters . <nl> <nl> * <nl> - ` ` ALLOC_PAGE_LOCKED ` ` : Sets a page locked memory type , used commonly for fast and asynchronous uploading / downloading data from / to GPU . <nl> + ` ` ALLOC_PAGE_LOCKED ` ` sets a page locked memory type used commonly for fast and asynchronous uploading / downloading data from / to GPU . <nl> * <nl> - ` ` ALLOC_ZEROCOPY ` ` : Specifies a zero copy memory allocation that enables mapping the host memory to GPU address space , if supported . <nl> + ` ` ALLOC_ZEROCOPY ` ` specifies a zero copy memory allocation that enables mapping the host memory to GPU address space , if supported . <nl> * <nl> - ` ` ALLOC_WRITE_COMBINED ` ` : Sets the write combined buffer that is not cached by CPU . Such buffers are used to supply GPU with data when GPU only reads it . The advantage is a better CPU cache utilization . <nl> + ` ` ALLOC_WRITE_COMBINED ` ` sets the write combined buffer that is not cached by CPU . Such buffers are used to supply GPU with data when GPU only reads it . The advantage is a better CPU cache utilization . <nl> <nl> - * * Note : * * <nl> + . . note : : <nl> <nl> - Allocation size of such memory types is usually limited . For more details , see " CUDA 2 . 2 Pinned Memory APIs " document or " CUDA C Programming Guide " . <nl> + Allocation size of such memory types is usually limited . For more details , see * CUDA 2 . 2 Pinned Memory APIs * document or * CUDA C Programming Guide * . <nl> : : <nl> <nl> class CV_EXPORTS CudaMem <nl> gpu : : CudaMem : : createGpuMatHeader <nl> <nl> . . ocv : function : : GpuMat gpu : : CudaMem : : createGpuMatHeader ( ) const <nl> <nl> - Maps CPU memory to GPU address space and creates the : ocv : class : ` gpu : : GpuMat ` header without reference counting for it . This can be done only if memory was allocated with the ` ` ALLOC_ZEROCOPY ` ` flag and if it is supported by the hardware ( laptops often share video and CPU memory , so address spaces can be mapped , which eliminates an extra copy ) . <nl> + Maps CPU memory to GPU address space and creates the : ocv : class : ` gpu : : GpuMat ` header without reference counting for it . This can be done only if memory was allocated with the ` ` ALLOC_ZEROCOPY ` ` flag and if it is supported by the hardware . Laptops often share video and CPU memory , so address spaces can be mapped , which eliminates an extra copy . <nl> <nl> . . index : : gpu : : CudaMem : : canMapHostMemory <nl> <nl> gpu : : Stream <nl> mmmmmmmmm - - <nl> . . ocv : class : : gpu : : Stream <nl> <nl> - This class encapsulates a queue of asynchronous calls . Some functions have overloads with the additional ` ` gpu : : Stream ` ` parameter . The overloads do initialization work ( allocate output buffers , upload constants , and so on ) , start the GPU kernel , and return before results are ready . You can check whether all operations are complete via : ocv : func : ` gpu : : Stream : : queryIfComplete ` . You can asynchronously upload / download data from / to page - locked buffers , using the : ocv : class : ` gpu : : CudaMem ` or : c : type : ` Mat ` header that points to a region of : ocv : class : ` gpu : : CudaMem ` . <nl> - <nl> - * * Note : * * <nl> + This class encapsulates a queue of asynchronous calls . Some functions have overloads with the additional ` ` gpu : : Stream ` ` parameter . The overloads do initialization work ( allocate output buffers , upload constants , and so on ) , start the GPU kernel , and return before results are ready . You can check whether all operations are complete via : ocv : func : ` gpu : : Stream : : queryIfComplete ` . You can asynchronously upload / download data from / to page - locked buffers , using the : ocv : class : ` gpu : : CudaMem ` or : ocv : class : ` Mat ` header that points to a region of : ocv : class : ` gpu : : CudaMem ` . <nl> <nl> - Currently , you may face problems if an operation is enqueued twice with different data . Some functions use the constant GPU memory , and next call may update the memory before the previous one has been finished . But calling different operations asynchronously is safe because each operation has its own constant buffer . Memory copy / upload / download / set operations to the buffers you hold are also safe . <nl> + . . note : : <nl> + <nl> + Currently , you may face problems if an operation is enqueued twice with different data . Some functions use the constant GPU memory , and next call may update the memory before the previous one has been finished . But calling different operations asynchronously is safe because each operation has its own constant buffer . Memory copy / upload / download / set operations to the buffers you hold are also safe . <nl> : : <nl> <nl> class CV_EXPORTS Stream <nl> gpu : : StreamAccessor <nl> mmmmmmmmmmmmmmmmmm - <nl> . . ocv : class : : gpu : : StreamAccessor <nl> <nl> - This class enables getting ` ` cudaStream_t ` ` from : ocv : class : ` gpu : : Stream ` and is declared in ` ` stream_accessor . hpp ` ` because it is the only public header that depends on the CUDA Runtime API . Including it brings a dependency to your code . <nl> + Class that enables getting ` ` cudaStream_t ` ` from : ocv : class : ` gpu : : Stream ` and is declared in ` ` stream_accessor . hpp ` ` because it is the only public header that depends on the CUDA Runtime API . Including it brings a dependency to your code . <nl> : : <nl> <nl> struct StreamAccessor <nl> gpu : : createContinuous <nl> <nl> * . . ocv : function : : GpuMat gpu : : createContinuous ( Size size , int type ) <nl> <nl> - Matrix is called continuous if its elements are stored continuously , that is without gaps in the end of each row . <nl> + Matrix is called continuous if its elements are stored continuously , that is , without gaps at the end of each row . <nl> <nl> . . index : : gpu : : ensureSizeIsEnough <nl> <nl> mmm a / modules / gpu / doc / image_processing . rst <nl> ppp b / modules / gpu / doc / image_processing . rst <nl> gpu : : meanShiftProc <nl> <nl> : param criteria : Termination criteria . See : ocv : class : ` TermCriteria ` . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` gpu : : meanShiftFiltering ` <nl> <nl> . . index : : gpu : : meanShiftSegmentation <nl> gpu : : integral <nl> <nl> : param sqsum : Squared integral image of the ` ` CV_32FC1 ` ` type . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` integral ` <nl> <nl> . . index : : gpu : : sqrIntegral <nl> gpu : : cornerHarris <nl> <nl> : param borderType : Pixel extrapolation method . Only ` ` BORDER_REFLECT101 ` ` and ` ` BORDER_REPLICATE ` ` are supported for now . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` cornerHarris ` <nl> <nl> . . index : : gpu : : cornerMinEigenVal <nl> gpu : : cornerMinEigenVal <nl> mmmmmmmmmmmmmmmmmmmmmmmm - - <nl> . . ocv : function : : void gpu : : cornerMinEigenVal ( const GpuMat & src , GpuMat & dst , int blockSize , int ksize , int borderType = BORDER_REFLECT101 ) <nl> <nl> - Computes the minimum eigen value of 2x2 derivative covariation matrix at each pixel ( the cornerness criteria ) . <nl> + Computes the minimum eigen value of a 2x2 derivative covariation matrix at each pixel ( the cornerness criteria ) . <nl> <nl> : param src : Source image . Only ` ` CV_8UC1 ` ` and ` ` CV_32FC1 ` ` images are supported for now . <nl> <nl> gpu : : cornerMinEigenVal <nl> <nl> : param borderType : Pixel extrapolation method . Only ` ` BORDER_REFLECT101 ` ` and ` ` BORDER_REPLICATE ` ` are supported for now . <nl> <nl> - See also : : ocv : func : ` cornerMinEigenVal ` <nl> + . . seealso : : : ocv : func : ` cornerMinEigenVal ` <nl> <nl> . . index : : gpu : : mulSpectrums <nl> <nl> gpu : : mulSpectrums <nl> <nl> Only full ( not packed ) ` ` CV_32FC2 ` ` complex spectrums in the interleaved format are supported for now . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` mulSpectrums ` <nl> <nl> . . index : : gpu : : mulAndScaleSpectrums <nl> gpu : : mulAndScaleSpectrums <nl> <nl> Only full ( not packed ) ` ` CV_32FC2 ` ` complex spectrums in the interleaved format are supported for now . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` mulSpectrums ` <nl> <nl> . . index : : gpu : : dft <nl> gpu : : dft <nl> <nl> : param flags : Optional flags : <nl> <nl> - * * * DFT_ROWS * * Transform each individual row of the source matrix . <nl> + * * * DFT_ROWS * * transforms each individual row of the source matrix . <nl> <nl> - * * * DFT_SCALE * * Scale the result : divide it by the number of elements in the transform ( obtained from ` ` dft_size ` ` ) . <nl> + * * * DFT_SCALE * * scales the result : divide it by the number of elements in the transform ( obtained from ` ` dft_size ` ` ) . <nl> <nl> - * * * DFT_INVERSE * * Invert DFT . Use for complex - complex cases ( real - complex and complex - real cases are always forward and inverse , respectively ) . <nl> + * * * DFT_INVERSE * * inverts DFT . Use for complex - complex cases ( real - complex and complex - real cases are always forward and inverse , respectively ) . <nl> <nl> - * * * DFT_REAL_OUTPUT * * Specify the output as real . The source matrix is the result of real - complex transform , so the destination matrix must be real . <nl> + * * * DFT_REAL_OUTPUT * * specifies the output as real . The source matrix is the result of real - complex transform , so the destination matrix must be real . <nl> <nl> <nl> The source matrix should be continuous , otherwise reallocation and data copying is performed . The function chooses an operation mode depending on the flags , size , and channel count of the source matrix : <nl> gpu : : dft <nl> If the source matrix is complex and the output is not specified as real , the destination matrix is complex and has the ` ` dft_size ` ` size and ` ` CV_32FC2 ` ` type . The destination matrix contains a full result of the DFT ( forward or inverse ) . <nl> <nl> * <nl> - If the source matrix is complex and the output is specified as real , the function assumes that its input is the result of the forward transform ( see next item ) . The destionation matrix has the ` ` dft_size ` ` size and ` ` CV_32FC1 ` ` type . It contains the result of the inverse DFT . <nl> + If the source matrix is complex and the output is specified as real , the function assumes that its input is the result of the forward transform ( see the next item ) . The destionation matrix has the ` ` dft_size ` ` size and ` ` CV_32FC1 ` ` type . It contains the result of the inverse DFT . <nl> <nl> * <nl> If the source matrix is real ( its type is ` ` CV_32FC1 ` ` ) , forward DFT is performed . The result of the DFT is packed into complex ( ` ` CV_32FC2 ` ` ) matrix . So , the width of the destination matrix is ` ` dft_size . width / 2 + 1 ` ` . But if the source is a single column , the height is reduced instead of the width . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` dft ` <nl> <nl> . . index : : gpu : : convolve <nl> gpu : : convolve <nl> <nl> . . index : : gpu : : ConvolveBuf <nl> <nl> - . . _gpu : : ConvolveBuf : <nl> - <nl> gpu : : ConvolveBuf <nl> mmmmmmmmmmmmmmm - <nl> . . ocv : class : : gpu : : ConvolveBuf <nl> <nl> - This class provides a memory buffer for the <nl> - : ocv : func : ` gpu : : convolve ` function . <nl> + Class providing a memory buffer for the : ocv : func : ` gpu : : convolve ` function . <nl> : : <nl> <nl> struct CV_EXPORTS ConvolveBuf <nl> gpu : : matchTemplate <nl> * ` ` CV_TM_SQDIFF ` ` <nl> * ` ` CV_TM_CCORR ` ` <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` matchTemplate ` <nl> <nl> . . index : : gpu : : remap <nl> gpu : : remap <nl> <nl> : param ymap : Y values . Only ` ` CV_32FC1 ` ` type is supported . <nl> <nl> - The function transforms the source image using the specified map : <nl> + The function transforms the source image using the specified map : <nl> <nl> . . math : : <nl> <nl> \ texttt { dst } ( x , y ) = \ texttt { src } ( xmap ( x , y ) , ymap ( x , y ) ) <nl> <nl> - Values of pixels with non - integer coordinates are computed using bilinear the interpolation . <nl> + Values of pixels with non - integer coordinates are computed using the bilinear interpolation . <nl> <nl> - See Also : : ocv : func : ` remap ` <nl> + . . seealso : : <nl> + : ocv : func : ` remap ` <nl> <nl> . . index : : gpu : : cvtColor <nl> <nl> gpu : : cvtColor <nl> <nl> : param stream : Stream for the asynchronous version . <nl> <nl> - 3 - channel color spaces ( like ` ` HSV ` ` , ` ` XYZ ` ` , and so on ) can be stored in a 4 - channel image for better perfomance . <nl> + 3 - channel color spaces ( like ` ` HSV ` ` , ` ` XYZ ` ` , and so on ) can be stored in a 4 - channel image for better perfomance . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` cvtColor ` <nl> <nl> . . index : : gpu : : threshold <nl> gpu : : threshold <nl> <nl> : param stream : Stream for the asynchronous version . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` threshold ` <nl> <nl> . . index : : gpu : : resize <nl> gpu : : resize <nl> <nl> : param interpolation : Interpolation method . Only ` ` INTER_NEAREST ` ` and ` ` INTER_LINEAR ` ` are supported . <nl> <nl> - See Also : : ocv : func : ` resize ` <nl> + . . seealso : : : ocv : func : ` resize ` <nl> <nl> . . index : : gpu : : warpAffine <nl> <nl> gpu : : warpAffine <nl> <nl> : param flags : Combination of interpolation methods ( see : ocv : func : ` resize ` ) and the optional flag ` ` WARP_INVERSE_MAP ` ` specifying that ` ` M ` ` is an inverse transformation ( ` ` dst = > src ` ` ) . Only ` ` INTER_NEAREST ` ` , ` ` INTER_LINEAR ` ` , and ` ` INTER_CUBIC ` ` interpolation methods are supported . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` warpAffine ` <nl> <nl> . . index : : gpu : : warpPerspective <nl> gpu : : warpPerspective <nl> <nl> Applies a perspective transformation to an image . <nl> <nl> - : param src : Source image . Supports ` ` CV_8U ` ` , ` ` CV_16U ` ` , ` ` CV_32S ` ` , or ` ` CV_32F ` ` depth and 1 , 3 , or 4 channels . <nl> + : param src : Source image . ` ` CV_8U ` ` , ` ` CV_16U ` ` , ` ` CV_32S ` ` , or ` ` CV_32F ` ` depth and 1 , 3 , or 4 channels are supported . <nl> <nl> : param dst : Destination image with the same type as ` ` src ` ` . The size is ` ` dsize ` ` . <nl> <nl> gpu : : warpPerspective <nl> <nl> : param flags : Combination of interpolation methods ( see : ocv : func : ` resize ` ) and the optional flag ` ` WARP_INVERSE_MAP ` ` specifying that ` ` M ` ` is the inverse transformation ( ` ` dst = > src ` ` ) . Only ` ` INTER_NEAREST ` ` , ` ` INTER_LINEAR ` ` , and ` ` INTER_CUBIC ` ` interpolation methods are supported . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` warpPerspective ` <nl> <nl> . . index : : gpu : : rotate <nl> gpu : : rotate <nl> <nl> : param interpolation : Interpolation method . Only ` ` INTER_NEAREST ` ` , ` ` INTER_LINEAR ` ` , and ` ` INTER_CUBIC ` ` are supported . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` gpu : : warpAffine ` <nl> <nl> . . index : : gpu : : copyMakeBorder <nl> gpu : : copyMakeBorder <nl> <nl> : param value : Border value . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` copyMakeBorder ` <nl> <nl> . . index : : gpu : : rectStdDev <nl> gpu : : histRange <nl> <nl> . . ocv : function : : void gpu : : histRange ( const GpuMat & src , GpuMat hist [ 4 ] , const GpuMat levels [ 4 ] ) <nl> <nl> - Calculates a histogram with bins determined by the ` levels ` array . <nl> + Calculates a histogram with bins determined by the ` ` levels ` ` array . <nl> <nl> : param src : Source image . ` ` CV_8U ` ` , ` ` CV_16U ` ` , or ` ` CV_16S ` ` depth and 1 or 4 channels are supported . For a four - channel image , all channels are processed separately . <nl> <nl> mmm a / modules / gpu / doc / initalization_and_information . rst <nl> ppp b / modules / gpu / doc / initalization_and_information . rst <nl> gpu : : getDevice <nl> mmmmmmmmmmmmmmmmmm <nl> . . ocv : function : : int getDevice ( ) <nl> <nl> - Returns the current device index that was set by ` ` { gpu : : getDevice } ` ` or initialized by default . <nl> + Returns the current device index set by ` ` { gpu : : getDevice } ` ` or initialized by default . <nl> <nl> . . index : : gpu : : GpuFeature <nl> <nl> gpu : : GpuFeature <nl> mmmmmmmmmmmmmmm <nl> . . ocv : class : : gpu : : GpuFeature <nl> <nl> - This class provides GPU computing features . <nl> + Class providing GPU computing features . <nl> : : <nl> <nl> enum GpuFeature <nl> gpu : : DeviceInfo <nl> mmmmmmmmmmmmmmm <nl> . . ocv : class : : gpu : : DeviceInfo <nl> <nl> - This class provides functionality for querying the specified GPU properties . <nl> + Class providing functionality for querying the specified GPU properties . <nl> : : <nl> <nl> class CV_EXPORTS DeviceInfo <nl> gpu : : DeviceInfo : : isCompatible <nl> <nl> . . index : : gpu : : TargetArchs <nl> <nl> - . . _gpu : : TargetArchs : <nl> - <nl> gpu : : TargetArchs <nl> mmmmmmmmmmmmmmm - <nl> . . ocv : class : : gpu : : TargetArchs <nl> <nl> - This class provides a set of static methods to check what NVIDIA * card architecture the GPU module was built for . <nl> + Class providing a set of static methods to check what NVIDIA * card architecture the GPU module was built for . <nl> <nl> The following method checks whether the module was built with the support of the given feature : <nl> <nl> mmm a / modules / gpu / doc / introduction . rst <nl> ppp b / modules / gpu / doc / introduction . rst <nl> GPU Module Introduction <nl> General Information <nl> mmmmmmmmmmmmmmmmmm - <nl> <nl> - The OpenCV GPU module is a set of classes and functions to utilize GPU computational capabilities . It is implemented using NVIDIA * CUDA * Runtime API and supports only NVIDIA GPUs . The OpenCV GPU module includes utility functions , low - level vision primitives , and high - level algorithms . The utility functions and low - level primitives provide a powerful infrastructure for developing fast vision algorithms taking advantage of GPU whereas the high - level functionality includes some state - of - the - art algorithms ( such as stereo correspondence , face and people detectors , and others ) , ready to be used by the application developers . <nl> + The OpenCV GPU module is a set of classes and functions to utilize GPU computational capabilities . It is implemented using NVIDIA * CUDA * Runtime API and supports only NVIDIA GPUs . The OpenCV GPU module includes utility functions , low - level vision primitives , and high - level algorithms . The utility functions and low - level primitives provide a powerful infrastructure for developing fast vision algorithms taking advantage of GPU whereas the high - level functionality includes some state - of - the - art algorithms ( such as stereo correspondence , face and people detectors , and others ) ready to be used by the application developers . <nl> <nl> The GPU module is designed as a host - level API . This means that if you have pre - compiled OpenCV GPU binaries , you are not required to have the CUDA Toolkit installed or write any extra code to make use of the GPU . <nl> <nl> The GPU module depends on the CUDA Toolkit and NVIDIA Performance Primitives lib <nl> <nl> The OpenCV GPU module is designed for ease of use and does not require any knowledge of CUDA . Though , such a knowledge will certainly be useful to handle non - trivial cases or achieve the highest performance . It is helpful to understand the cost of various operations , what the GPU does , what the preferred data formats are , and so on . The GPU module is an effective instrument for quick implementation of GPU - accelerated computer vision algorithms . However , if your algorithm involves many simple operations , then , for the best possible performance , you may still need to write your own kernels to avoid extra write and read operations on the intermediate results . <nl> <nl> - To enable CUDA support , configure OpenCV using ` ` CMake ` ` with ` ` WITH_CUDA = ON ` ` . When the flag is set and if CUDA is installed , the full - featured OpenCV GPU module is built . Otherwise , the module is still built , but at runtime all functions from the module throw <nl> + To enable CUDA support , configure OpenCV using ` ` CMake ` ` with ` ` WITH_CUDA = ON ` ` . When the flag is set and if CUDA is installed , the full - featured OpenCV GPU module is built . Otherwise , the module is still built but at runtime all functions from the module throw <nl> : ocv : func : ` Exception ` with ` ` CV_GpuNotSupported ` ` error code , except for <nl> : ocv : func : ` gpu : : getCudaEnabledDeviceCount ( ) ` . The latter function returns zero GPU count in this case . Building OpenCV without CUDA support does not perform device code compilation , so it does not require the CUDA Toolkit installed . Therefore , using the <nl> : ocv : func : ` gpu : : getCudaEnabledDeviceCount ( ) ` function , you can implement a high - level algorithm that will detect GPU presence at runtime and choose an appropriate implementation ( CPU or GPU ) accordingly . <nl> You can always determine at runtime whether the OpenCV GPU - built binaries ( or PT <nl> Threading and Multi - threading <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> <nl> - The OpenCV GPU module follows the CUDA Runtime API conventions regarding the multi - threaded programming . This means that for the first API call a CUDA context is created implicitly , attached to the current CPU thread and then is used as the thread ' s " current " context . All further operations , such as a memory allocation , GPU code compilation , are associated with the context and the thread . Because any other thread is not attached to the context , memory ( and other resources ) allocated in the first thread cannot be accessed by the other thread . Instead , for this other thread CUDA creates another context associated with it . In short , by default , different threads do not share resources . <nl> - <nl> - But you can remove this limitation by using the CUDA Driver API ( version 3 . 1 or later ) . You can retrieve context reference for one thread , attach it to another thread , and make it " current " for that thread . As a result , the threads can share memory and other resources . It is also possible to create a context explicitly before calling any GPU code and attach it to all the threads you want to share the resources with . <nl> + The OpenCV GPU module follows the CUDA Runtime API conventions regarding the multi - threaded programming . This means that for the first API call a CUDA context is created implicitly , attached to the current CPU thread and then is used as the " current " context of the thread . All further operations , such as a memory allocation , GPU code compilation , are associated with the context and the thread . Since any other thread is not attached to the context , memory ( and other resources ) allocated in the first thread cannot be accessed by another thread . Instead , for this other thread CUDA creates another context associated with it . In short , by default , different threads do not share resources . But you can remove this limitation by using the CUDA Driver API ( version 3 . 1 or later ) . You can retrieve context reference for one thread , attach it to another thread , and make it " current " for that thread . As a result , the threads can share memory and other resources . It is also possible to create a context explicitly before calling any GPU code and attach it to all the threads you want to share the resources with . <nl> <nl> It is also possible to create the context explicitly using the CUDA Driver API , attach , and set the " current " context for all necessary threads . The CUDA Runtime API ( and OpenCV functions , respectively ) picks it up . <nl> <nl> Utilizing Multiple GPUs <nl> mmmmmmmmm <nl> <nl> - In the current version , each of the OpenCV GPU algorithms can use only a single GPU . So , to utilize multiple GPUs , you have to manually distribute the work between GPUs . Here are the two ways of utilizing multiple GPUs : <nl> + In the current version , each of the OpenCV GPU algorithms can use only a single GPU . So , to utilize multiple GPUs , you have to manually distribute the work between GPUs . Consider the following ways of utilizing multiple GPUs : <nl> <nl> * <nl> - If you use only synchronous functions , create several CPU threads ( one per each GPU ) and from within each thread create a CUDA context for the corresponding GPU using <nl> + If you use only synchronous functions , create several CPU threads ( one per each GPU ) . From within each thread , create a CUDA context for the corresponding GPU using <nl> : ocv : func : ` gpu : : setDevice ( ) ` or Driver API . Each of the threads will use the associated GPU . <nl> <nl> * <nl> mmm a / modules / gpu / doc / matrix_reductions . rst <nl> ppp b / modules / gpu / doc / matrix_reductions . rst <nl> Matrix Reductions <nl> <nl> gpu : : meanStdDev <nl> mmmmmmmmmmmmmmmmmm - <nl> - . . ocv : function : : void gpu : : meanStdDev ( const GpuMat \ & mtx , Scalar \ & mean , Scalar \ & stddev ) <nl> + . . cpp : function : : void gpu : : meanStdDev ( const GpuMat \ & mtx , Scalar \ & mean , Scalar \ & stddev ) <nl> <nl> Computes a mean value and a standard deviation of matrix elements . <nl> <nl> gpu : : meanStdDev <nl> <nl> : param stddev : Standard deviation value . <nl> <nl> - See Also : : ocv : func : ` meanStdDev ` <nl> + . . seealso : : <nl> + : ocv : func : ` meanStdDev ` <nl> <nl> . . index : : gpu : : norm <nl> <nl> gpu : : norm <nl> . . ocv : function : : double gpu : : norm ( const GpuMat \ & src1 , int normType , GpuMat \ & buf ) <nl> . . ocv : function : : double norm ( const GpuMat \ & src1 , const GpuMat \ & src2 , int normType = NORM_L2 ) <nl> <nl> - Returns the norm of matrix ( or difference of two matrices ) . <nl> + Returns the norm of a matrix ( or difference of two matrices ) . <nl> <nl> - : param src1 : The source matrix . Any matrices except 64F are supported . <nl> + : param src1 : Source matrix . Any matrices except 64F are supported . <nl> <nl> - : param src2 : The second source matrix ( if any ) with the same size and type as ` ` src1 ` ` . <nl> + : param src2 : Second source matrix ( if any ) with the same size and type as ` ` src1 ` ` . <nl> <nl> : param normType : Norm type . ` ` NORM_L1 ` ` , ` ` NORM_L2 ` ` , and ` ` NORM_INF ` ` are supported for now . <nl> <nl> : param buf : Optional buffer to avoid extra memory allocations . It is resized automatically . <nl> <nl> - See Also : : ocv : func : ` norm ` <nl> + . . seealso : : <nl> + : ocv : func : ` norm ` <nl> <nl> . . index : : gpu : : sum <nl> <nl> gpu : : sum <nl> <nl> : param buf : Optional buffer to avoid extra memory allocations . It is resized automatically . <nl> <nl> - See Also : : ocv : func : ` sum ` <nl> + . . seealso : : <nl> + : ocv : func : ` sum ` <nl> <nl> . . index : : gpu : : absSum <nl> <nl> gpu : : minMax <nl> <nl> The function does not work with ` ` CV_64F ` ` images on GPUs with the compute capability < 1 . 3 . <nl> <nl> - See Also : : ocv : func : ` minMaxLoc ` <nl> + . . seealso : : <nl> + : ocv : func : ` minMaxLoc ` <nl> <nl> . . index : : gpu : : minMaxLoc <nl> <nl> gpu : : minMaxLoc <nl> <nl> The function does not work with ` ` CV_64F ` ` images on GPU with the compute capability < 1 . 3 . <nl> <nl> - See Also : : ocv : func : ` minMaxLoc ` <nl> + . . seealso : : <nl> + : ocv : func : ` minMaxLoc ` <nl> <nl> . . index : : gpu : : countNonZero <nl> <nl> gpu : : countNonZero <nl> <nl> : param buf : Optional buffer to avoid extra memory allocations . It is resized automatically . <nl> <nl> - The function does not work with ` ` CV_64F ` ` images on GPUs with the compute capability < 1 . 3 . <nl> + The function does not work with ` ` CV_64F ` ` images on GPUs with the compute capability < 1 . 3 . <nl> <nl> - See Also : : ocv : func : ` countNonZero ` <nl> + . . seealso : : <nl> + : ocv : func : ` countNonZero ` <nl> mmm a / modules / gpu / doc / operations_on_matrices . rst <nl> ppp b / modules / gpu / doc / operations_on_matrices . rst <nl> gpu : : transpose <nl> <nl> Transposes a matrix . <nl> <nl> - : param src : Source matrix . 1 , 4 , 8 bytes element sizes are supported for now . <nl> + : param src : Source matrix . 1 - , 4 - , 8 - byte element sizes are supported for now . <nl> <nl> : param dst : Destination matrix . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` transpose ` <nl> <nl> . . index : : gpu : : flip <nl> gpu : : flip <nl> * ` ` < 0 ` ` Flips around both axes . <nl> <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` flip ` <nl> <nl> . . index : : gpu : : LUT <nl> gpu : : LUT <nl> : param dst : Destination matrix with the same depth as ` ` lut ` ` and the same number of channels as ` ` src ` ` . <nl> <nl> <nl> - See Also : : ocv : func : ` LUT ` <nl> + . . seealso : : <nl> + : ocv : func : ` LUT ` <nl> <nl> . . index : : gpu : : merge <nl> <nl> gpu : : merge <nl> <nl> : param stream : Stream for the asynchronous version . <nl> <nl> - See Also : : ocv : func : ` merge ` <nl> + . . seealso : : <nl> + : ocv : func : ` merge ` <nl> <nl> . . index : : gpu : : split <nl> <nl> gpu : : split <nl> <nl> : param src : Source matrix . <nl> <nl> - : param dst : The destination array / vector of single - channel matrices . <nl> + : param dst : Destination array / vector of single - channel matrices . <nl> <nl> : param stream : Stream for the asynchronous version . <nl> <nl> - See Also : : ocv : func : ` split ` <nl> + . . seealso : : <nl> + : ocv : func : ` split ` <nl> <nl> . . index : : gpu : : magnitude <nl> <nl> gpu : : magnitude <nl> <nl> : param stream : Stream for the asynchronous version . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` magnitude ` <nl> <nl> . . index : : gpu : : magnitudeSqr <nl> gpu : : phase <nl> <nl> : param stream : Stream for the asynchronous version . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` phase ` <nl> <nl> . . index : : gpu : : cartToPolar <nl> gpu : : cartToPolar <nl> <nl> : param stream : Stream for the asynchronous version . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` cartToPolar ` <nl> <nl> . . index : : gpu : : polarToCart <nl> gpu : : polarToCart <nl> <nl> : param stream : Stream for the asynchronous version . <nl> <nl> - See Also : <nl> + . . seealso : : <nl> : ocv : func : ` polarToCart ` <nl> mmm a / modules / gpu / doc / per_element_operations . rst <nl> ppp b / modules / gpu / doc / per_element_operations . rst <nl> <nl> - Per - element Operations . <nl> + Per - element Operations <nl> = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> . . highlight : : cpp <nl> gpu : : add <nl> <nl> : param dst : Destination matrix with the same size and type as ` ` src1 ` ` . <nl> <nl> - See Also : : ocv : func : ` add ` <nl> + . . seealso : : <nl> + : ocv : func : ` add ` <nl> <nl> . . index : : gpu : : subtract <nl> <nl> gpu : : subtract <nl> <nl> : param dst : Destination matrix with the same size and type as ` ` src1 ` ` . <nl> <nl> - See Also : : ocv : func : ` subtract ` <nl> + . . seealso : : <nl> + : ocv : func : ` subtract ` <nl> <nl> <nl> <nl> gpu : : multiply <nl> <nl> : param dst : Destination matrix with the same size and type as ` ` src1 ` ` . <nl> <nl> - See Also : : ocv : func : ` multiply ` <nl> + . . seealso : : <nl> + : ocv : func : ` multiply ` <nl> <nl> <nl> . . index : : gpu : : divide <nl> gpu : : divide <nl> <nl> : param dst : Destination matrix with the same size and type as ` ` src1 ` ` . <nl> <nl> - This function , in contrast to : ocv : func : ` divide ` , uses a round - down rounding mode . <nl> + This function , in contrast to : ocv : func : ` divide ` , uses a round - down rounding mode . <nl> <nl> - See Also : : ocv : func : ` divide ` <nl> + . . seealso : : <nl> + : ocv : func : ` divide ` <nl> <nl> <nl> <nl> gpu : : exp <nl> <nl> : param dst : Destination matrix with the same size and type as ` ` src ` ` . <nl> <nl> - See Also : : ocv : func : ` exp ` <nl> + . . seealso : : <nl> + : ocv : func : ` exp ` <nl> <nl> <nl> <nl> gpu : : log <nl> <nl> : param dst : Destination matrix with the same size and type as ` ` src ` ` . <nl> <nl> - See Also : : ocv : func : ` log ` <nl> + . . seealso : : : ocv : func : ` log ` <nl> <nl> <nl> <nl> gpu : : absdiff <nl> <nl> . . ocv : function : : void gpu : : absdiff ( const GpuMat & src1 , const Scalar & src2 , GpuMat & dst ) <nl> <nl> - Computes per - element absolute difference of two matrices ( or of matrix and scalar ) . <nl> + Computes per - element absolute difference of two matrices ( or of a matrix and scalar ) . <nl> <nl> : param src1 : First source matrix . ` ` CV_8UC1 ` ` , ` ` CV_8UC4 ` ` , ` ` CV_32SC1 ` ` and ` ` CV_32FC1 ` ` matrices are supported for now . <nl> <nl> gpu : : absdiff <nl> <nl> : param dst : Destination matrix with the same size and type as ` ` src1 ` ` . <nl> <nl> - See Also : : ocv : func : ` absdiff ` <nl> + . . seealso : : <nl> + : ocv : func : ` absdiff ` <nl> <nl> . . index : : gpu : : compare <nl> <nl> gpu : : compare <nl> * * * CMP_LE : * * ` ` src1 ( . ) < = src2 ( . ) ` ` <nl> * * * CMP_NE : * * ` ` src1 ( . ) ! = src2 ( . ) ` ` <nl> <nl> - See Also : : ocv : func : ` compare ` <nl> + . . seealso : : <nl> + : ocv : func : ` compare ` <nl> <nl> <nl> . . index : : gpu : : bitwise_not <nl> gpu : : bitwise_xor <nl> <nl> . . ocv : function : : void gpu : : bitwise_xor ( const GpuMat & src1 , const GpuMat & src2 , GpuMat & dst , const GpuMat & mask , const Stream & stream ) <nl> <nl> - Performs a per - element bitwise " exclusive or " operation of two matrices . <nl> + Performs a per - element bitwise ` ` exclusive or ` ` operation of two matrices . <nl> <nl> : param src1 : First source matrix . <nl> <nl> gpu : : min <nl> <nl> : param stream : Stream for the asynchronous version . <nl> <nl> - See Also : : ocv : func : ` min ` <nl> + . . seealso : : <nl> + : ocv : func : ` min ` <nl> <nl> <nl> <nl> gpu : : max <nl> <nl> : param stream : Stream for the asynchronous version . <nl> <nl> - See Also : : ocv : func : ` max ` <nl> + . . seealso : : <nl> + : ocv : func : ` max ` <nl>
Purpose : 2nd review cycle .
opencv/opencv
392b9074f5caecc43cbc9ab6cbaf6129847aa991
2011-06-18T20:19:03Z
deleted file mode 100644 <nl> index f45d42d5f786 . . 000000000000 <nl> mmm a / . travis . yml <nl> ppp / dev / null <nl> <nl> - os : linux <nl> - dist : trusty <nl> - sudo : required <nl> - language : cpp <nl> - compiler : gcc <nl> - <nl> - env : <nl> - global : <nl> - - BUILD_CCACHE_DIR = ~ / build / ccache <nl> - - BUILD_NINJA_DIR = ~ / build / ninja <nl> - matrix : <nl> - - BUILD = linux <nl> - - BUILD = linux - gcc5 <nl> - - BUILD = linux - cuda <nl> - - BUILD = linux - mkl <nl> - - BUILD = linux - android <nl> - <nl> - matrix : <nl> - include : <nl> - - env : BUILD = osx <nl> - os : osx <nl> - osx_image : xcode8 . 3 <nl> - compiler : clang <nl> - - env : BUILD = osx - ios <nl> - os : osx <nl> - osx_image : xcode8 . 3 <nl> - compiler : clang <nl> - - env : BUILD = osx - android <nl> - os : osx <nl> - osx_image : xcode8 . 3 <nl> - compiler : clang <nl> - <nl> - cache : <nl> - directories : <nl> - - $ BUILD_CCACHE_DIR <nl> - - $ BUILD_NINJA_DIR <nl> - - $ HOME / . ccache <nl> - <nl> - before_install : <nl> - - source . travis / setup . sh <nl> - <nl> - install : <nl> - - . / . travis / install . sh <nl> - - . / . travis / build . sh <nl> - <nl> - script : <nl> - - . / . travis / test . sh <nl> deleted file mode 100644 <nl> index 1bcbaeb619bf . . 000000000000 <nl> mmm a / appveyor . yml <nl> ppp / dev / null <nl> <nl> - version : ' { build } ' <nl> - clone_folder : c : \ projects \ caffe2 <nl> - environment : <nl> - matrix : <nl> - - USE_CUDA : OFF <nl> - CMAKE_BUILD_TYPE : Release <nl> - APPVEYOR_BUILD_WORKER_IMAGE : Visual Studio 2017 <nl> - <nl> - # Building CUDA with Visual Studio 2017 is yet to be supported by <nl> - # NVidia , so we canot enable it right now . <nl> - # - USE_CUDA : ON <nl> - # CMAKE_BUILD_TYPE : Release <nl> - # APPVEYOR_BUILD_WORKER_IMAGE : Visual Studio 2017 <nl> - <nl> - # Building CUDA currently causes a timeout in appveyor . In the interest <nl> - # of properly monitoring the rest , we will disable cuda contbuild for now . <nl> - # - USE_CUDA : ON <nl> - # CMAKE_BUILD_TYPE : Release <nl> - # APPVEYOR_BUILD_WORKER_IMAGE : Visual Studio 2015 <nl> - <nl> - - USE_CUDA : OFF <nl> - CMAKE_BUILD_TYPE : Release <nl> - APPVEYOR_BUILD_WORKER_IMAGE : Visual Studio 2015 <nl> - <nl> - # Debug build is not a top priority for us right now , so in the <nl> - # interest of contbuild time , we disable it . <nl> - # - USE_CUDA : OFF <nl> - # CMAKE_BUILD_TYPE : Debug <nl> - # APPVEYOR_BUILD_WORKER_IMAGE : Visual Studio 2017 <nl> - <nl> - # Currently , CUDA + Debug does not work due to an error of using <nl> - # std : : _Debug_lt in device code . Not sure where this comes from yet , <nl> - # but it is probably safe to assume that very few are going to build <nl> - # debug mode with CUDA and Windows . <nl> - # - USE_CUDA : ON <nl> - # CMAKE_BUILD_TYPE : Debug <nl> - <nl> - install : <nl> - - cmd : c : \ projects \ caffe2 \ scripts \ appveyor \ install . bat <nl> - <nl> - build_script : <nl> - - cmd : > - <nl> - cd c : \ projects \ caffe2 <nl> - <nl> - git submodule update - - init <nl> - <nl> - call scripts \ build_windows . bat <nl>
Delete defunct . travis . yml and appveyor . yml files ( )
pytorch/pytorch
7f66164a89e66632a1f212a5c5c7fd1e0d0f9aa6
2018-03-28T14:55:03Z
mmm a / src / full - codegen . cc <nl> ppp b / src / full - codegen . cc <nl> void FullCodeGenerator : : CallLoadIC ( ContextualMode contextual_mode , <nl> <nl> <nl> void FullCodeGenerator : : CallGlobalLoadIC ( Handle < String > name ) { <nl> - if ( masm ( ) - > serializer_enabled ( ) ) return CallLoadIC ( CONTEXTUAL ) ; <nl> + if ( masm ( ) - > serializer_enabled ( ) | | FLAG_vector_ics ) { <nl> + / / Vector - ICs don ' t work with LoadGlobalIC . <nl> + return CallLoadIC ( CONTEXTUAL ) ; <nl> + } <nl> Handle < Code > ic = CodeFactory : : LoadGlobalIC ( <nl> isolate ( ) , isolate ( ) - > global_object ( ) , name ) . code ( ) ; <nl> CallIC ( ic , TypeFeedbackId : : None ( ) ) ; <nl> mmm a / src / ic / ic . cc <nl> ppp b / src / ic / ic . cc <nl> Handle < Code > LoadIC : : initialize_stub ( Isolate * isolate , <nl> <nl> Handle < Code > LoadIC : : load_global ( Isolate * isolate , Handle < GlobalObject > global , <nl> Handle < String > name ) { <nl> + / / This special IC doesn ' t work with vector ics . <nl> + DCHECK ( ! FLAG_vector_ics ) ; <nl> + <nl> Handle < ScriptContextTable > script_contexts ( <nl> global - > native_context ( ) - > script_context_table ( ) ) ; <nl> <nl>
The Global Load IC doesn ' t yet play well with - - vector - ics .
v8/v8
d232dcfd6a838fe016d1634db717d7ea9f453231
2015-03-04T09:41:19Z
mmm a / Tests / EndToEndTests / CNTKv2Python / Examples / TrainResNet_CIFAR10_Distributed_test . py <nl> ppp b / Tests / EndToEndTests / CNTKv2Python / Examples / TrainResNet_CIFAR10_Distributed_test . py <nl> def test_cifar_resnet_distributed ( device_id ) : <nl> " - q " , " 32 " , <nl> " - es " , " 512 " , <nl> " - device " , " 0 " ] <nl> - mpiexec_test ( device_id , script_under_test , params , 0 . 86 , False , 2 ) <nl> + mpiexec_test ( device_id , script_under_test , params , 0 . 86 , False , 3 ) <nl> <nl> def test_cifar_resnet_distributed_1bitsgd ( device_id ) : <nl> params = [ " - e " , " 2 " , <nl> def test_cifar_resnet_distributed_1bitsgd ( device_id ) : <nl> " - q " , " 1 " , <nl> " - es " , " 512 " , <nl> " - device " , " 0 " ] <nl> - mpiexec_test ( device_id , script_under_test , params , 0 . 86 , False , 2 ) <nl> + mpiexec_test ( device_id , script_under_test , params , 0 . 86 , False , 3 ) <nl> <nl> <nl> def test_cifar_resnet_distributed_block_momentum ( device_id ) : <nl> def test_cifar_resnet_distributed_block_momentum ( device_id ) : <nl> " - b " , " 3200 " , <nl> " - es " , " 512 " , <nl> " - device " , " 0 " ] <nl> - mpiexec_test ( device_id , script_under_test , params , 0 . 89 , False , 2 ) <nl> + mpiexec_test ( device_id , script_under_test , params , 0 . 89 , False , 5 ) <nl>
Increasing tolerance
microsoft/CNTK
f4caf732b25085cf17ccb0881ed605e4d4603d03
2017-02-01T12:44:33Z
mmm a / xbmc / windowing / gbm / DRMUtils . cpp <nl> ppp b / xbmc / windowing / gbm / DRMUtils . cpp <nl> bool CDRMUtils : : FindPlanes ( ) <nl> if ( ! FindModifiersForPlane ( m_overlay_plane ) ) <nl> { <nl> CLog : : Log ( LOGDEBUG , " CDRMUtils : : % s - no drm modifiers present for the overlay plane " , __FUNCTION__ ) ; <nl> + m_overlay_plane - > modifiers_map . emplace ( DRM_FORMAT_ARGB8888 , std : : vector < uint64_t > { DRM_FORMAT_MOD_LINEAR } ) ; <nl> + m_overlay_plane - > modifiers_map . emplace ( DRM_FORMAT_XRGB8888 , std : : vector < uint64_t > { DRM_FORMAT_MOD_LINEAR } ) ; <nl> } <nl> <nl> return true ; <nl> mmm a / xbmc / windowing / gbm / GBMUtils . cpp <nl> ppp b / xbmc / windowing / gbm / GBMUtils . cpp <nl> bool CGBMUtils : : CreateSurface ( int width , int height , const uint64_t * modifiers , <nl> GBM_FORMAT_ARGB8888 , <nl> modifiers , <nl> modifiers_count ) ; <nl> - # else <nl> - m_surface = gbm_surface_create ( m_device , <nl> - width , <nl> - height , <nl> - GBM_FORMAT_ARGB8888 , <nl> - GBM_BO_USE_SCANOUT | GBM_BO_USE_RENDERING ) ; <nl> # endif <nl> + if ( ! m_surface ) <nl> + { <nl> + m_surface = gbm_surface_create ( m_device , <nl> + width , <nl> + height , <nl> + GBM_FORMAT_ARGB8888 , <nl> + GBM_BO_USE_SCANOUT | GBM_BO_USE_RENDERING ) ; <nl> + } <nl> <nl> if ( ! m_surface ) <nl> { <nl>
Merge pull request from lrusak / gbm - modifiers - fix
xbmc/xbmc
6baf09e182c66e916b0ebdf31c330d0bc0cf2725
2018-09-08T15:34:51Z
mmm a / stdlib / public / core / SequenceAlgorithms . swift . gyb <nl> ppp b / stdlib / public / core / SequenceAlgorithms . swift . gyb <nl> $ { orderingExplanation } <nl> / / / <nl> / / / - SeeAlso : ` min ( isOrderedBefore : ) ` <nl> % end <nl> + @ warn_unqualified_access <nl> public func min ( <nl> % if preds : <nl> isOrderedBefore : @ noescape ( $ { GElement } , $ { GElement } ) throws - > Bool <nl> $ { orderingExplanation } <nl> / / / <nl> / / / - SeeAlso : ` max ( isOrderedBefore : ) ` <nl> % end <nl> + @ warn_unqualified_access <nl> public func max ( <nl> % if preds : <nl> isOrderedBefore : @ noescape ( $ { GElement } , $ { GElement } ) throws - > Bool <nl> mmm a / stdlib / public / core / Stride . swift . gyb <nl> ppp b / stdlib / public / core / Stride . swift . gyb <nl> extension Strideable { <nl> Builtin . unreachable ( ) <nl> } <nl> <nl> + @ warn_unqualified_access <nl> @ available ( * , unavailable , message : " Use stride ( from : through : by : ) free function instead " ) <nl> public func stride ( <nl> through end : Self , by stride : Stride <nl>
Adding @ warn_unqualified_access to Sequence . min / max and Strideable . stride
apple/swift
1cca4ff528874525b6188c724b1d4904a090b8e0
2016-07-05T23:08:05Z
mmm a / src / citra_qt / debugger / graphics_cmdlists . cpp <nl> ppp b / src / citra_qt / debugger / graphics_cmdlists . cpp <nl> <nl> / / Licensed under GPLv2 or any later version <nl> / / Refer to the license . txt file included . <nl> <nl> + # include < QApplication > <nl> + # include < QClipboard > <nl> # include < QLabel > <nl> # include < QListView > <nl> # include < QMainWindow > <nl> GPUCommandListWidget : : GPUCommandListWidget ( QWidget * parent ) : QDockWidget ( tr ( " Pi <nl> this , SLOT ( OnCommandDoubleClicked ( const QModelIndex & ) ) ) ; <nl> <nl> toggle_tracing = new QPushButton ( tr ( " Start Tracing " ) ) ; <nl> + QPushButton * copy_all = new QPushButton ( tr ( " Copy All " ) ) ; <nl> <nl> connect ( toggle_tracing , SIGNAL ( clicked ( ) ) , this , SLOT ( OnToggleTracing ( ) ) ) ; <nl> connect ( this , SIGNAL ( TracingFinished ( const Pica : : DebugUtils : : PicaTrace & ) ) , <nl> model , SLOT ( OnPicaTraceFinished ( const Pica : : DebugUtils : : PicaTrace & ) ) ) ; <nl> <nl> + connect ( copy_all , SIGNAL ( clicked ( ) ) , this , SLOT ( CopyAllToClipboard ( ) ) ) ; <nl> + <nl> command_info_widget = new QWidget ; <nl> <nl> QVBoxLayout * main_layout = new QVBoxLayout ; <nl> main_layout - > addWidget ( list_widget ) ; <nl> - main_layout - > addWidget ( toggle_tracing ) ; <nl> + { <nl> + QHBoxLayout * sub_layout = new QHBoxLayout ; <nl> + sub_layout - > addWidget ( toggle_tracing ) ; <nl> + sub_layout - > addWidget ( copy_all ) ; <nl> + main_layout - > addLayout ( sub_layout ) ; <nl> + } <nl> main_layout - > addWidget ( command_info_widget ) ; <nl> main_widget - > setLayout ( main_layout ) ; <nl> <nl> void GPUCommandListWidget : : OnToggleTracing ( ) { <nl> toggle_tracing - > setText ( tr ( " Start Tracing " ) ) ; <nl> } <nl> } <nl> + <nl> + void GPUCommandListWidget : : CopyAllToClipboard ( ) { <nl> + QClipboard * clipboard = QApplication : : clipboard ( ) ; <nl> + QString text ; <nl> + <nl> + QAbstractItemModel * model = ( QAbstractListModel * ) list_widget - > model ( ) ; <nl> + <nl> + for ( int row = 0 ; row < model - > rowCount ( { } ) ; + + row ) { <nl> + for ( int col = 0 ; col < model - > columnCount ( { } ) ; + + col ) { <nl> + QModelIndex index = model - > index ( row , col ) ; <nl> + text + = model - > data ( index ) . value < QString > ( ) ; <nl> + text + = ' \ t ' ; <nl> + } <nl> + text + = ' \ n ' ; <nl> + } <nl> + <nl> + clipboard - > setText ( text ) ; <nl> + } <nl> mmm a / src / citra_qt / debugger / graphics_cmdlists . h <nl> ppp b / src / citra_qt / debugger / graphics_cmdlists . h <nl> public slots : <nl> <nl> void SetCommandInfo ( const QModelIndex & ) ; <nl> <nl> + void CopyAllToClipboard ( ) ; <nl> + <nl> signals : <nl> void TracingFinished ( const Pica : : DebugUtils : : PicaTrace & ) ; <nl> <nl>
citra - qt : Add support for copying the command list contents to clipboard .
yuzu-emu/yuzu
7fd03750bd42419d2b66b4a92897e6bbd21e7ac2
2015-07-15T15:51:59Z
mmm a / test / test_fx_experimental . py <nl> ppp b / test / test_fx_experimental . py <nl> <nl> import torch <nl> from torch . fx . symbolic_trace import symbolic_trace <nl> from torch . fx . experimental import GraphManipulation <nl> - from torch . fx . experimental . Partitioner import Partitioner , Device <nl> + from torch . fx . experimental . Partitioner import Partitioner , Device , PartitionerConfig <nl> from torch . testing . _internal . common_utils import run_tests <nl> from torch . testing . _internal . jit_utils import JitTestCase <nl> <nl> def forward ( self , a , b ) : <nl> ) <nl> partitioner = Partitioner ( ) <nl> devices = [ <nl> - Device ( ' dev_0 ' , 125 ) , <nl> - Device ( ' dev_1 ' , 125 ) , <nl> - Device ( ' dev_2 ' , 125 ) <nl> + Device ( ' dev_0 ' , 125 , 0 ) , <nl> + Device ( ' dev_1 ' , 125 , 1 ) , <nl> + Device ( ' dev_2 ' , 125 , 2 ) <nl> ] <nl> - ret = partitioner . partition_graph ( traced , m , devices ) <nl> + partitioner_config = PartitionerConfig ( devices ) <nl> + ret = partitioner . partition_graph ( traced , m , partitioner_config ) <nl> module_with_submodules = ret . module_with_submodules <nl> + dag = ret . dag <nl> self . assertEqual ( traced ( a , b ) , module_with_submodules ( a , b ) ) <nl> <nl> def test_size_based_partition ( self ) : <nl> def forward ( self , a , b ) : <nl> ) <nl> partitioner = Partitioner ( ) <nl> devices = [ <nl> - Device ( ' dev_0 ' , 125 ) , <nl> - Device ( ' dev_1 ' , 125 ) , <nl> - Device ( ' dev_2 ' , 125 ) <nl> + Device ( ' dev_0 ' , 125 , 0 ) , <nl> + Device ( ' dev_1 ' , 125 , 1 ) , <nl> + Device ( ' dev_2 ' , 125 , 2 ) <nl> ] <nl> - ret = partitioner . partition_graph ( traced , m , devices ) <nl> + partitioner_config = PartitionerConfig ( devices ) <nl> + ret = partitioner . partition_graph ( traced , m , partitioner_config ) <nl> module_with_submodules = ret . module_with_submodules <nl> + dag = ret . dag <nl> self . assertEqual ( traced ( a , b ) , module_with_submodules ( a , b ) ) <nl> assert len ( module_with_submodules . graph . nodes ) = = 7 <nl> <nl> def forward ( self , a , b ) : <nl> ) <nl> partitioner = Partitioner ( ) <nl> devices = [ <nl> - Device ( ' dev_0 ' , 125 ) , <nl> - Device ( ' dev_1 ' , 125 ) , <nl> - Device ( ' dev_2 ' , 125 ) <nl> + Device ( ' dev_0 ' , 125 , 0 ) , <nl> + Device ( ' dev_1 ' , 125 , 1 ) , <nl> + Device ( ' dev_2 ' , 125 , 2 ) <nl> ] <nl> - ret = partitioner . partition_graph ( traced , m , devices ) <nl> + partitioner_config = PartitionerConfig ( devices ) <nl> + ret = partitioner . partition_graph ( traced , m , partitioner_config ) <nl> module_with_submodules = ret . module_with_submodules <nl> + dag = ret . dag <nl> self . assertEqual ( traced ( a , b ) , module_with_submodules ( a , b ) ) <nl> assert len ( module_with_submodules . graph . nodes ) = = 5 <nl> <nl> + def test_sparse_nn_partition ( self ) : <nl> + class MyRecommendationModule ( torch . nn . Module ) : <nl> + def create_mlp ( self , num_of_layers : int , input_size : int , output_size : int ) : <nl> + layers = torch . nn . ModuleList ( ) <nl> + for _ in range ( num_of_layers ) : <nl> + ll = torch . nn . Linear ( input_size , output_size ) <nl> + layers . append ( ll ) <nl> + layers . append ( torch . nn . ReLU ( ) ) <nl> + return layers <nl> + <nl> + def __init__ ( self ) : <nl> + super ( MyRecommendationModule , self ) . __init__ ( ) <nl> + layers = self . create_mlp ( 4 , 4 , 4 ) <nl> + self . bottom_layers = torch . nn . Sequential ( * layers ) <nl> + layers = self . create_mlp ( 3 , 24 , 24 ) <nl> + self . top_layers = torch . nn . Sequential ( * layers ) <nl> + self . embedding_layers = torch . nn . ModuleList ( ) <nl> + el = torch . nn . EmbeddingBag ( 500000 , 4 , mode = ' sum ' , sparse = True ) <nl> + self . embedding_layers . append ( el ) <nl> + for i in range ( 3 ) : <nl> + el = torch . nn . EmbeddingBag ( 1000000 , 4 , mode = ' sum ' , sparse = True ) <nl> + self . embedding_layers . append ( el ) <nl> + el = torch . nn . EmbeddingBag ( 500000 , 4 , mode = ' sum ' , sparse = True ) <nl> + self . embedding_layers . append ( el ) <nl> + <nl> + def forward ( self , a , b , offset ) : <nl> + x = self . bottom_layers ( a ) <nl> + y = [ ] <nl> + c = [ ] <nl> + for i in range ( len ( self . embedding_layers ) ) : <nl> + temp = torch . randint ( 10 , ( 8 , ) ) <nl> + c . append ( temp + b ) <nl> + for i in range ( len ( self . embedding_layers ) ) : <nl> + if i % 2 = = 0 : <nl> + y . append ( self . embedding_layers [ i ] ( c [ i ] , offset ) ) <nl> + else : <nl> + y . append ( self . embedding_layers [ i ] ( torch . randint ( 10 , ( 8 , ) ) , offset ) ) <nl> + z = torch . cat ( [ x ] + y , dim = 1 ) <nl> + p = self . top_layers ( z ) <nl> + return p <nl> + <nl> + m = MyRecommendationModule ( ) <nl> + a = torch . rand ( 2 , 4 ) <nl> + b = torch . randint ( 10 , ( 8 , ) ) <nl> + offset = torch . randint ( 1 , ( 2 , ) ) <nl> + traced = symbolic_trace ( m ) <nl> + GraphManipulation . get_size_of_all_nodes ( traced , [ a , b , offset ] ) <nl> + devices = [ <nl> + Device ( ' dev_0 ' , 33000000 , 0 ) , <nl> + Device ( ' dev_1 ' , 33000000 , 1 ) , <nl> + Device ( ' dev_2 ' , 33000000 , 2 ) <nl> + ] <nl> + partitioner_config = PartitionerConfig ( devices , is_sparse_nn = True ) <nl> + partitioner = Partitioner ( ) <nl> + ret = partitioner . partition_graph ( traced , m , partitioner_config ) <nl> + module_with_submodules = ret . module_with_submodules <nl> + dag = ret . dag <nl> + self . assertEqual ( traced ( a , b , offset ) , module_with_submodules ( a , b , offset ) ) <nl> + assert len ( module_with_submodules . graph . nodes ) = = 24 <nl> + <nl> if __name__ = = ' __main__ ' : <nl> run_tests ( ) <nl> mmm a / torch / fx / experimental / Partitioner . py <nl> ppp b / torch / fx / experimental / Partitioner . py <nl> <nl> from torch . fx . graph_module import GraphModule <nl> from torch . fx . node import Node , map_arg <nl> - from typing import Dict , List , Union , Set , NamedTuple , Tuple <nl> + from typing import Dict , List , Set , NamedTuple , Tuple <nl> import torch <nl> from torch . fx . experimental . subgraph_creation_example import split_module <nl> import operator <nl> <nl> - class DAGNode ( NamedTuple ) : <nl> + class DAGNode ( ) : <nl> " " " <nl> DAGNode class maintains useful information for a partition ( submodule ) . <nl> inputs ( submodule node ) and outputs ( submodule node ) . <nl> " " " <nl> - submodule : Node <nl> - input_nodes : List [ Node ] <nl> - output_nodes : List [ Node ] <nl> + def __init__ ( <nl> + self , <nl> + submodule_node : Node , <nl> + input_nodes : List [ Node ] , <nl> + output_nodes : List [ Node ] , <nl> + logical_device_ids : List [ int ] , <nl> + size_bytes : int <nl> + ) - > None : <nl> + self . submodule_node : Node = submodule_node <nl> + self . input_nodes : List [ Node ] = input_nodes <nl> + self . output_nodes : List [ Node ] = output_nodes <nl> + self . logical_device_ids : List [ int ] = logical_device_ids <nl> + self . size_bytes = size_bytes <nl> + <nl> + def __str__ ( self ) - > str : <nl> + return str ( self . submodule_node ) <nl> <nl> class DAG : <nl> " " " DAG class contains all the DAG nodes " " " <nl> def __init__ ( self ) - > None : <nl> <nl> def create_node ( <nl> self , <nl> - submodule : Node , <nl> + submodule_node : Node , <nl> input_nodes : List [ Node ] , <nl> - output_nodes : List [ Node ] <nl> + output_nodes : List [ Node ] , <nl> + logical_devices : List [ int ] , <nl> + size_bytes : int <nl> ) - > None : <nl> - node = DAGNode ( submodule , input_nodes , output_nodes ) <nl> + node = DAGNode ( submodule_node , input_nodes , output_nodes , logical_devices , size_bytes ) <nl> self . nodes . append ( node ) <nl> <nl> class Partition : <nl> " " " Partition class contains all the information about an individual partition . <nl> It also provides necessary methods for manipulation the partition . <nl> " " " <nl> - def __init__ ( self , partition_id : int , fx_module : GraphModule ) - > None : <nl> - self . graph_module = fx_module <nl> + def __init__ ( self , partition_id : int ) - > None : <nl> self . nodes : Set [ Node ] = set ( ) <nl> self . partition_id = partition_id <nl> self . parents : Set [ ' Partition ' ] = set ( ) <nl> self . children : Set [ ' Partition ' ] = set ( ) <nl> self . bfs_level : int = - 1 <nl> - <nl> - def add_node ( self , node : Node ) - > None : <nl> - " " " Append a new node into the partition . " " " <nl> - self . nodes . add ( node ) <nl> - <nl> - def add_parent ( self , partition : ' Partition ' ) - > None : <nl> - self . parents . add ( partition ) <nl> - <nl> - def add_child ( self , partition : ' Partition ' ) - > None : <nl> - self . children . add ( partition ) <nl> + self . used_mem_bytes : int = 0 <nl> + self . logical_device_ids : List [ int ] = [ ] <nl> <nl> def __str__ ( self ) : <nl> return str ( self . partition_id ) <nl> <nl> + def recalculate_mem_size ( self ) : <nl> + self . used_mem_bytes = 0 <nl> + for node in self . nodes : <nl> + self . used_mem_bytes + = get_extra_size_of ( node , self . nodes ) <nl> + <nl> class PartitionResult ( NamedTuple ) : <nl> " " " NameTuple used for returning DAG and a new graph module <nl> " " " <nl> class PartitionResult ( NamedTuple ) : <nl> <nl> class Device ( NamedTuple ) : <nl> name : str <nl> - available_mem_bytes : Union [ float , int ] <nl> + available_mem_bytes : int <nl> + logical_id : int <nl> + <nl> + class PartitionerConfig ( NamedTuple ) : <nl> + devices : List [ Device ] <nl> + is_sparse_nn : bool = False <nl> + <nl> + def get_extra_size_of ( node : Node , nodes : Set [ Node ] ) - > int : <nl> + " " " Given a node and a set of nodes , <nl> + this function return the extra size that needed <nl> + if this node is included in this set . <nl> + " " " <nl> + # Find all its input nodes <nl> + input_nodes : Dict [ Node , None ] = { } <nl> + map_arg ( node . args , lambda n : input_nodes . setdefault ( n ) ) <nl> + map_arg ( node . kwargs , lambda n : input_nodes . setdefault ( n ) ) <nl> + # Calculate total size of related nodes <nl> + total_size_of_input_nodes = 0 <nl> + for n in input_nodes : <nl> + # Make sure this node hasn ' t been in this set yet <nl> + if n not in nodes : <nl> + size_bytes = getattr ( n , ' size_bytes ' , None ) <nl> + if size_bytes : <nl> + total_size_of_input_nodes + = size_bytes . output_size <nl> + else : <nl> + raise RuntimeError ( ' node has no size_bytes attr ' ) <nl> + # Don ' t forget the op node itself <nl> + size_bytes = getattr ( node , ' size_bytes ' , None ) <nl> + if size_bytes : <nl> + total_size_of_input_nodes + = size_bytes . total_size <nl> + else : <nl> + raise RuntimeError ( ' node has no size_bytes attr ' ) <nl> + return total_size_of_input_nodes <nl> <nl> class Partitioner : <nl> " " " A graph module may not fit into one device . <nl> Partitioner class helps cut one graph into subgraphs ( partitions ) , <nl> so that each partition could fit into a different device . <nl> The main function of this class is self . partition_graph . <nl> - For self . partition_graph , first , it checks the size of the whole graph <nl> - and see if the whole graph can fit into one device . <nl> - If it does , it goes to self . find_single_partition <nl> - If the whole graph is even larger than the combined memory size of all devices , <nl> - a RuntimeError is raised . <nl> - If the whole graph cannot fit into one devices but <nl> - could be split into multiple devices , it goes to self . size_based_partition . <nl> - After the size_based_partition , it checks if the number of partitions exceeds <nl> - the number of devices . If it does , a RuntimeError is raised . <nl> - Otherwise , a DAG structure is returned <nl> + It will partition the graph based on the scheme specified in partition_config <nl> + A DAG structure is returned <nl> along with a new graph module with partitions as submodule nodes . <nl> " " " <nl> def __init__ ( self ) - > None : <nl> - self . partitions : Set [ Partition ] = set ( ) <nl> + self . partitions : List [ Partition ] = [ ] <nl> + self . node_to_partitions : Dict [ Node , int ] = { } <nl> self . devices : List [ Device ] = [ ] <nl> - self . node_to_partitions : Dict [ Node , List [ int ] ] = { } <nl> - self . partition_to_used_mem_bytes : Dict [ Partition , int ] = { } <nl> <nl> def partition_graph ( <nl> self , <nl> fx_module : GraphModule , <nl> torch_module : torch . nn . Module , <nl> - devices : List [ Device ] <nl> + partitioner_config : PartitionerConfig <nl> ) - > PartitionResult : <nl> " " " <nl> - Given the fx module , torch module and devices , <nl> + Given the fx module , torch module and partitioner_config , <nl> find the partitions , do the partitions , <nl> and then return a DAG and a new fx module with submodule nodes ( partitions ) <nl> " " " <nl> self . graph_module = fx_module <nl> - self . devices = devices <nl> self . torch_module = torch_module <nl> + self . devices = partitioner_config . devices <nl> if len ( self . devices ) = = 0 : <nl> raise RuntimeError ( ' No devices ' ) <nl> available_mem_bytes = self . devices [ 0 ] . available_mem_bytes <nl> def partition_graph ( <nl> break <nl> total_size_of_graph + = node . size_bytes . total_size <nl> if total_size_of_graph < = available_mem_bytes : <nl> - self . find_single_partition ( ) <nl> + self . find_single_partition ( total_size_of_graph ) <nl> elif total_size_of_graph > len ( self . devices ) * available_mem_bytes : <nl> raise RuntimeError ( ' Devices have no enough memory for the module ' ) <nl> else : <nl> if not all ( device . available_mem_bytes = = available_mem_bytes for device in self . devices ) : <nl> raise RuntimeError ( ' All devices must have same memory size ! ' ) <nl> - self . size_based_partition ( available_mem_bytes ) <nl> - # Check if enought devices are provided for all partitions <nl> - if len ( self . partitions ) > len ( self . devices ) : <nl> - raise RuntimeError ( ' Lack of Devices ' ) <nl> + if partitioner_config . is_sparse_nn : <nl> + self . sparse_nn_partition ( available_mem_bytes ) <nl> + else : <nl> + self . size_based_partition ( available_mem_bytes ) <nl> module_with_submodules = self . do_partition ( ) <nl> # The DAG contains DAGNodes with info of each partition ' s input nodes , output nodes <nl> # and how partitions are connected . <nl> - dag = self . dump_partition_DAG ( module_with_submodules ) <nl> + dag = self . dump_dag ( module_with_submodules ) <nl> ret = PartitionResult ( dag , module_with_submodules ) <nl> return ret <nl> <nl> - def find_single_partition ( self ) - > None : <nl> + def find_single_partition ( self , total_size_of_graph ) - > None : <nl> " " " Only one partition ( one graph on one device ) . " " " <nl> partition_0 = self . create_partition ( ) <nl> for node in self . graph_module . graph . nodes : <nl> if node . op = = ' output ' : <nl> break <nl> - self . node_to_partitions [ node ] = [ partition_0 . partition_id ] <nl> - partition_0 . add_node ( node ) <nl> + self . node_to_partitions [ node ] = partition_0 . partition_id <nl> + partition_0 . nodes . add ( node ) <nl> + partition_0 . used_mem_bytes = total_size_of_graph <nl> + partition_0 . logical_device_ids = [ self . devices [ 0 ] . logical_id ] <nl> return <nl> <nl> - def size_based_partition ( self , available_mem_bytes : Union [ float , int ] ) - > None : <nl> + def size_based_partition ( self , available_mem_bytes : int ) - > None : <nl> " " " This method partitions the graph based on memory size . <nl> We assume all devices have the same memory size . <nl> The basic idea is : <nl> def size_based_partition ( self , available_mem_bytes : Union [ float , int ] ) - > None : <nl> Then through self . combine_partition_based_on_size ( ) , <nl> partitions will be combined to keep <nl> as less partitions as possible . <nl> - self . check_partition_dependecy checks if the combination of <nl> - partitions leads to a circular dependency <nl> " " " <nl> # Create the first partition <nl> partition = self . create_partition ( ) <nl> # Track the used mem for the current partition <nl> - used_mem_bytes = 0 <nl> for node in self . graph_module . graph . nodes : <nl> if node . op in { ' call_module ' , ' call_method ' , ' call_function ' } : <nl> - # Find all its input nodes <nl> - input_nodes : Dict [ Node , None ] = { } <nl> - map_arg ( node . args , lambda n : input_nodes . setdefault ( n ) ) <nl> - map_arg ( node . kwargs , lambda n : input_nodes . setdefault ( n ) ) <nl> - # Calculate total size of related nodes <nl> - total_size_of_input_nodes = 0 <nl> - for n in input_nodes : <nl> - # Make sure this node hasn ' t been in this partition yet <nl> - if n not in partition . nodes : <nl> - size_bytes = getattr ( n , ' size_bytes ' , None ) <nl> - if size_bytes : <nl> - total_size_of_input_nodes + = size_bytes . output_size <nl> - else : <nl> - raise RuntimeError ( ' node has no size_bytes attr ' ) <nl> - # Don ' t forget the op node itself <nl> - size_bytes = getattr ( node , ' size_bytes ' , None ) <nl> - if size_bytes : <nl> - total_size_of_input_nodes + = size_bytes . total_size <nl> - else : <nl> - raise RuntimeError ( ' node has no size_bytes attr ' ) <nl> + total_size_of_input_nodes = get_extra_size_of ( node , partition . nodes ) <nl> # The current node with its inputs cannot fit into the current partition <nl> - if used_mem_bytes + total_size_of_input_nodes > available_mem_bytes : <nl> - self . partition_to_used_mem_bytes [ partition ] = used_mem_bytes <nl> + if total_size_of_input_nodes + partition . used_mem_bytes > available_mem_bytes : <nl> partition = self . create_partition ( ) <nl> - used_mem_bytes = 0 <nl> + total_size_of_input_nodes = get_extra_size_of ( node , partition . nodes ) <nl> # The current node may be too large to fit into a whole new partition <nl> - if total_size_of_input_nodes > available_mem_bytes : <nl> + if total_size_of_input_nodes + partition . used_mem_bytes > available_mem_bytes : <nl> raise RuntimeError ( node . target + ' is too large to fit into a device ' ) <nl> # Add the current node into the current partition <nl> - partition . add_node ( node ) <nl> - # Add all input nodes if they are placeholders or constants <nl> - for n in input_nodes : <nl> - if ( n not in partition . nodes ) and ( n . op in { ' placeholder ' , ' get_attr ' } ) : <nl> - partition . add_node ( n ) <nl> - used_mem_bytes = used_mem_bytes + total_size_of_input_nodes <nl> - # Update used mem mapping for the last partition <nl> - self . partition_to_used_mem_bytes [ partition ] = used_mem_bytes <nl> + partition . nodes . add ( node ) <nl> + partition . used_mem_bytes + = total_size_of_input_nodes <nl> # Find parent partitions and child partitions for each partition . <nl> self . set_parents_and_children ( ) <nl> # Combine small partitions <nl> - self . combine_partitions_based_on_size ( available_mem_bytes ) <nl> + self . combine_partitions_based_on_size ( self . partitions [ : ] , available_mem_bytes ) <nl> # Reassign partition ids and update self . node_to_partitions . <nl> self . reorganize_partitions ( ) <nl> + # Check if devices are enough for all partitions <nl> + if len ( self . partitions ) > len ( self . devices ) : <nl> + msg = ' Need ' + str ( len ( self . partitions ) ) + ' devices , but only ' \ <nl> + + str ( len ( self . devices ) ) + ' provided ' <nl> + raise RuntimeError ( msg ) <nl> + for i , partition in enumerate ( self . partitions ) : <nl> + partition . logical_device_ids = [ self . devices [ i ] . logical_id ] <nl> return <nl> <nl> def do_partition ( self ) - > GraphModule : <nl> " " " Return a module with submodules ( partitions ) . " " " <nl> - for node in self . graph_module . graph . nodes : <nl> - if node . op = = ' output ' : <nl> - break <nl> module_with_submodules = split_module ( <nl> self . graph_module , <nl> self . torch_module , <nl> - lambda node : self . node_to_partitions [ node ] [ 0 ] <nl> + lambda node : self . node_to_partitions [ node ] <nl> ) <nl> return module_with_submodules <nl> <nl> - def dump_partition_DAG ( self , module_with_submodules : GraphModule ) - > DAG : <nl> + def dump_dag ( self , module_with_submodules : GraphModule ) - > DAG : <nl> dag = DAG ( ) <nl> for node in module_with_submodules . graph . nodes : <nl> if node . op = = ' output ' : <nl> def dump_partition_DAG ( self , module_with_submodules : GraphModule ) - > DAG : <nl> output_nodes = list ( node . users ) <nl> else : <nl> output_nodes = [ node ] <nl> - dag . create_node ( node , list ( input_nodes ) , output_nodes ) <nl> + partition_id = int ( node . name . rsplit ( ' _ ' , 1 ) [ - 1 ] ) <nl> + device_ids = self . partitions [ partition_id ] . logical_device_ids <nl> + size_bytes = self . partitions [ partition_id ] . used_mem_bytes <nl> + dag . create_node ( node , list ( input_nodes ) , output_nodes , device_ids , size_bytes ) <nl> return dag <nl> <nl> def create_partition ( self ) - > Partition : <nl> " " " Create a partition and append it to self . partitions . " " " <nl> partition_id = len ( self . partitions ) <nl> - assert isinstance ( self . graph_module , GraphModule ) <nl> - partition = Partition ( partition_id , self . graph_module ) <nl> - self . partitions . add ( partition ) <nl> + partition = Partition ( partition_id ) <nl> + self . partitions . append ( partition ) <nl> return partition <nl> <nl> - def combine_partitions_based_on_size ( self , available_mem_bytes ) - > None : <nl> + def combine_partitions_based_on_size ( <nl> + self , <nl> + partitions : List [ Partition ] , <nl> + available_mem_bytes : int <nl> + ) - > None : <nl> " " " Combining small partitions together to keep as less partitions as possible . <nl> Here is an example of the algorithm to do this : <nl> Assume some partitions , we first sort them based on partiiton used memory size . <nl> def combine_partitions_based_on_size ( self , available_mem_bytes ) - > None : <nl> find_combination = True <nl> while find_combination : <nl> # Sort partitions based on memory size <nl> - sorted_partitions = sorted ( self . partition_to_used_mem_bytes . items ( ) , key = lambda item : item [ 1 ] ) <nl> + sorted_partitions = sorted ( partitions , key = lambda p : p . used_mem_bytes ) <nl> # Mark bfs level <nl> self . get_bfs_level_partition ( ) <nl> - find_combination = self . find_partition_to_combine_based_on_size ( sorted_partitions , available_mem_bytes ) <nl> + find_combination , partitions = \ <nl> + self . find_partition_to_combine_based_on_size ( <nl> + sorted_partitions , <nl> + available_mem_bytes , <nl> + partitions <nl> + ) <nl> return <nl> <nl> def find_partition_to_combine_based_on_size ( <nl> self , <nl> - sorted_partitions : List [ Tuple [ Partition , int ] ] , <nl> - available_mem_bytes : int <nl> - ) - > bool : <nl> + sorted_partitions : List [ Partition ] , <nl> + available_mem_bytes : int , <nl> + partitions : List [ Partition ] <nl> + ) - > Tuple [ bool , List [ Partition ] ] : <nl> " " " step 1 in self . combine_partition_based_on_size ( ) " " " <nl> + <nl> + def calculate_mem_bytes_needed ( p1 , p2 ) : <nl> + nodes = p1 . nodes . union ( p2 . nodes ) <nl> + mem_bytes_needed = 0 <nl> + for node in nodes : <nl> + mem_bytes_needed + = get_extra_size_of ( node , nodes ) <nl> + return mem_bytes_needed <nl> + <nl> find_combination = False <nl> - smallest_partition = sorted_partitions . pop ( 0 ) [ 0 ] <nl> - left_mem = available_mem_bytes - self . partition_to_used_mem_bytes [ smallest_partition ] <nl> - for t in sorted_partitions [ : : - 1 ] : <nl> - if t [ 1 ] < = left_mem and abs ( smallest_partition . bfs_level - t [ 0 ] . bfs_level ) < = 1 : <nl> - self . combine_two_partitions ( t [ 0 ] , smallest_partition ) <nl> - find_combination = True <nl> - break <nl> - return find_combination <nl> + smallest_partition = sorted_partitions . pop ( 0 ) <nl> + for p in sorted_partitions [ : : - 1 ] : <nl> + if abs ( smallest_partition . bfs_level - p . bfs_level ) < = 1 : <nl> + # Calculate how many bytes needed if combined <nl> + mem_bytes_needed = calculate_mem_bytes_needed ( p , smallest_partition ) <nl> + if mem_bytes_needed < = available_mem_bytes : <nl> + self . combine_two_partitions ( p , smallest_partition ) <nl> + partitions . remove ( smallest_partition ) <nl> + partitions . remove ( p ) <nl> + partitions . append ( self . partitions [ - 1 ] ) <nl> + find_combination = True <nl> + break <nl> + return find_combination , partitions <nl> <nl> - def combine_two_partitions ( self , partition_0 : Partition , partition_1 : Partition ) - > None : <nl> + def combine_two_partitions ( <nl> + self , <nl> + partition_0 : Partition , <nl> + partition_1 : Partition , <nl> + ) - > None : <nl> " " " Given two partitions , combine them into a new one <nl> - and remove the previous two partitions <nl> + and remove the previous two partitions from self . partitions <nl> " " " <nl> partition = self . create_partition ( ) <nl> partition . nodes = partition_0 . nodes . union ( partition_1 . nodes ) <nl> partition . parents = partition_0 . parents . union ( partition_1 . parents ) <nl> partition . children = partition_0 . children . union ( partition_1 . children ) <nl> + partition . recalculate_mem_size ( ) <nl> partition . bfs_level = max ( partition_0 . bfs_level , partition_1 . bfs_level ) <nl> if partition_0 in partition . children : <nl> partition . children . remove ( partition_0 ) <nl> def combine_two_partitions ( self , partition_0 : Partition , partition_1 : Partition ) <nl> partition . children . remove ( partition_1 ) <nl> if partition_1 in partition . parents : <nl> partition . parents . remove ( partition_1 ) <nl> - self . partition_to_used_mem_bytes [ partition ] = self . partition_to_used_mem_bytes [ partition_0 ] + \ <nl> - self . partition_to_used_mem_bytes [ partition_1 ] <nl> - del self . partition_to_used_mem_bytes [ partition_0 ] <nl> - del self . partition_to_used_mem_bytes [ partition_1 ] <nl> # Replace partition_0 and partition_1 with the new partition in children and parents <nl> for p in partition . parents : <nl> if partition_0 in p . children : <nl> def set_parents_and_children ( self ) - > None : <nl> for p in self . partitions : <nl> if p ! = partition and n in p . nodes and node not in p . nodes : <nl> if p not in partition . children : <nl> - partition . add_child ( p ) <nl> + partition . children . add ( p ) <nl> if partition not in p . parents : <nl> - p . add_parent ( partition ) <nl> + p . parents . add ( partition ) <nl> return <nl> <nl> def reorganize_partitions ( self ) - > None : <nl> def reorganize_partitions ( self ) - > None : <nl> # Update self . node_to_partitions accordingly <nl> for partition in self . partitions : <nl> for node in partition . nodes : <nl> - if node not in self . node_to_partitions : <nl> - self . node_to_partitions [ node ] = [ partition . partition_id ] <nl> - else : <nl> - self . node_to_partitions [ node ] . append ( partition . partition_id ) <nl> + self . node_to_partitions [ node ] = partition . partition_id <nl> return <nl> <nl> def get_bfs_level_partition ( self ) - > None : <nl> def get_bfs_level_partition ( self ) - > None : <nl> next_level = set ( ) <nl> level + = 1 <nl> return <nl> + <nl> + def sparse_nn_partition ( self , available_mem_bytes : int ) - > None : <nl> + " " " This method partition a sparse nn module . <nl> + It first traverse all the nodes and do the partitions based on memory size . <nl> + If the current partition has no enough memory left for a new op node <nl> + ( call_module , call_method , call_function ) , a new partition is created . <nl> + Different for size_based_partition , when traversing cross the boundary between <nl> + non - embedding nodes and embedding nodes , a new partition is created regardlessly . <nl> + For example , if the current node is a non - embedding node but the next node is an <nl> + embedding node , a new partition is created for the next node . <nl> + After the partition , the partitions are combined as much as possible . <nl> + The rule is that a non - embedding partition only <nl> + combines with another non - embedding one . <nl> + So as the embedding partitions . <nl> + " " " <nl> + def reset_partition_in_sparse_nn ( partition , new_partition = True ) : <nl> + if in_embedding_region : <nl> + embedding_partitions . append ( partition ) <nl> + else : <nl> + non_embedding_partitions . append ( partition ) <nl> + if new_partition : <nl> + partition = self . create_partition ( ) <nl> + partition . left_mem_bytes = available_mem_bytes <nl> + return partition <nl> + return None <nl> + <nl> + def is_embedding_node ( node : Node ) - > bool : <nl> + " " " Check if a node is an embedding node " " " <nl> + if node . op = = ' call_module ' : <nl> + submodule = self . graph_module <nl> + for atom in str ( node . target ) . split ( ' . ' ) : <nl> + if not hasattr ( submodule , atom ) : <nl> + raise RuntimeError ( f ' Module { submodule } has no attribute { atom } ' ) <nl> + submodule = getattr ( submodule , atom ) <nl> + if ' Embedding ' in str ( submodule ) : <nl> + return True <nl> + return False <nl> + <nl> + # Track embedding partitons and non - embedding partitions separately <nl> + embedding_partitions : List [ Partition ] = [ ] <nl> + non_embedding_partitions : List [ Partition ] = [ ] <nl> + # A Flag to check the boundary <nl> + in_embedding_region : bool = False <nl> + partition = self . create_partition ( ) <nl> + for node in self . graph_module . graph . nodes : <nl> + if node . op in { ' call_module ' , ' call_method ' , ' call_function ' } : <nl> + # Check if crossing the boundary between embedding nodes and non embedding nodes <nl> + if is_embedding_node ( node ) ! = in_embedding_region : <nl> + # Crossing the boundary <nl> + # Check if the current partition is an empty partition <nl> + if partition . used_mem_bytes ! = 0 : <nl> + # The current partition isn ' t an empty partition . Create a new one . <nl> + partition = reset_partition_in_sparse_nn ( partition ) <nl> + in_embedding_region = not in_embedding_region <nl> + total_size_of_input_nodes = get_extra_size_of ( node , partition . nodes ) <nl> + if total_size_of_input_nodes + partition . used_mem_bytes > available_mem_bytes : <nl> + partition = reset_partition_in_sparse_nn ( partition ) <nl> + total_size_of_input_nodes = get_extra_size_of ( node , partition . nodes ) <nl> + if total_size_of_input_nodes > available_mem_bytes : <nl> + raise RuntimeError ( node . target + ' is too large to fit into a device ' ) <nl> + partition . nodes . add ( node ) <nl> + partition . used_mem_bytes + = total_size_of_input_nodes <nl> + reset_partition_in_sparse_nn ( partition , new_partition = False ) <nl> + # Set parents and children for each partition <nl> + self . set_parents_and_children ( ) <nl> + # Combining non - embedding partitions <nl> + self . combine_partitions_based_on_size ( non_embedding_partitions , available_mem_bytes ) <nl> + # Combining embedding partitions <nl> + self . combine_partitions_based_on_size ( embedding_partitions , available_mem_bytes ) <nl> + self . reorganize_partitions ( ) <nl> + total_size_of_non_embedding_partitions = 0 <nl> + for partition in non_embedding_partitions : <nl> + total_size_of_non_embedding_partitions + = partition . used_mem_bytes <nl> + # Check if devices are enough for all partitions <nl> + if len ( embedding_partitions ) > len ( self . devices ) : <nl> + msg = ' Need ' + str ( len ( embedding_partitions ) ) + ' devices , but only ' \ <nl> + + str ( len ( self . devices ) ) + ' provided ' <nl> + raise RuntimeError ( msg ) <nl> + occupied_devices = [ ] <nl> + for i , partition in enumerate ( embedding_partitions ) : <nl> + # Check if all non - embedding partitions can fit into embedding partition devices <nl> + if total_size_of_non_embedding_partitions + partition . used_mem_bytes > available_mem_bytes : <nl> + raise RuntimeError ( <nl> + ' partition_ ' + <nl> + str ( partition . partition_id ) + <nl> + ' ( embedding partition ) and non embedding partitions can not fit into one device ' <nl> + ) <nl> + else : <nl> + # Add logical device to the partition <nl> + partition . logical_device_ids = [ self . devices [ i ] . logical_id ] <nl> + occupied_devices . append ( self . devices [ i ] . logical_id ) <nl> + # Add logical devices to the non_embedding_partitions <nl> + for partition in non_embedding_partitions : <nl> + partition . logical_device_ids = occupied_devices <nl> + return <nl> mmm a / torch / fx / experimental / subgraph_creation_example . py <nl> ppp b / torch / fx / experimental / subgraph_creation_example . py <nl> def record_cross_partition_use ( def_node : torch . fx . node . Node , use_node : Optiona <nl> if not hasattr ( target_attr , atom ) : <nl> raise RuntimeError ( f ' Operator target { node . target } not found ! ' ) <nl> target_attr = getattr ( target_attr , atom ) <nl> - partition . targets [ node . target ] = target_attr <nl> target = target_atoms [ - 1 ] <nl> + partition . targets [ target ] = target_attr <nl> <nl> assert isinstance ( gathered_args , tuple ) <nl> assert isinstance ( gathered_kwargs , dict ) <nl>
add sparse_nn_partition ( )
pytorch/pytorch
8640905088a002dbf149b59333a190c8aed2f5e5
2020-10-27T07:11:58Z
mmm a / lib / Sema / TypeCheckEffects . cpp <nl> ppp b / lib / Sema / TypeCheckEffects . cpp <nl> class Context { <nl> / / / A non - throwing function . <nl> NonThrowingFunction , <nl> <nl> - / / / A rethrowing function . <nl> - RethrowingFunction , <nl> - <nl> / / / A default argument expression . <nl> DefaultArgument , <nl> <nl> class Context { <nl> Optional < AnyFunctionRef > Function ; <nl> bool IsNonExhaustiveCatch = false ; <nl> bool DiagnoseErrorOnTry = false ; <nl> - DeclContext * RethrowsDC = nullptr ; <nl> InterpolatedStringLiteralExpr * InterpolatedString = nullptr ; <nl> <nl> explicit Context ( Kind kind , Optional < AnyFunctionRef > function = None ) <nl> : TheKind ( kind ) , Function ( function ) { } <nl> <nl> + public : <nl> + / / / Whether this is a function that rethrows . <nl> + bool isRethrows ( ) const { <nl> + if ( getKind ( ) ! = Kind : : Handled ) <nl> + return false ; <nl> + <nl> + if ( ! Function ) <nl> + return false ; <nl> + <nl> + auto fn = Function - > getAbstractFunctionDecl ( ) ; <nl> + if ( ! fn ) <nl> + return false ; <nl> + <nl> + return fn - > getAttrs ( ) . hasAttribute < RethrowsAttr > ( ) ; <nl> + } <nl> + <nl> / / / Whether this is an autoclosure . <nl> bool isAutoClosure ( ) const { <nl> if ( ! Function ) <nl> class Context { <nl> return isa < AutoClosureExpr > ( closure ) ; <nl> } <nl> <nl> - public : <nl> static Context getHandled ( ) { <nl> return Context ( Kind : : Handled ) ; <nl> } <nl> class Context { <nl> } <nl> <nl> static Context forFunction ( AbstractFunctionDecl * D ) { <nl> - if ( D - > getAttrs ( ) . hasAttribute < RethrowsAttr > ( ) ) { <nl> - Context result ( Kind : : RethrowingFunction , AnyFunctionRef ( D ) ) ; <nl> - result . RethrowsDC = D ; <nl> - return result ; <nl> - } <nl> - <nl> / / HACK : If the decl is the synthesized getter for a ' lazy ' property , then <nl> / / treat the context as a property initializer in order to produce a better <nl> / / diagnostic ; the only code we should be diagnosing on is within the <nl> class Context { <nl> Kind getKind ( ) const { return TheKind ; } <nl> <nl> bool handlesNothing ( ) const { <nl> - return getKind ( ) ! = Kind : : Handled & & <nl> - getKind ( ) ! = Kind : : RethrowingFunction ; <nl> + return getKind ( ) ! = Kind : : Handled ; <nl> } <nl> bool handles ( ThrowingKind errorKind ) const { <nl> switch ( errorKind ) { <nl> class Context { <nl> <nl> / / A call that ' s rethrowing - only can be handled by ' rethrows ' . <nl> case ThrowingKind : : RethrowingOnly : <nl> - return ! handlesNothing ( ) ; <nl> + return getKind ( ) = = Kind : : Handled ; <nl> <nl> / / An operation that always throws can only be handled by an <nl> / / all - handling context . <nl> case ThrowingKind : : Throws : <nl> - return getKind ( ) = = Kind : : Handled ; <nl> + return getKind ( ) = = Kind : : Handled & & ! isRethrows ( ) ; <nl> } <nl> llvm_unreachable ( " bad error kind " ) ; <nl> } <nl> <nl> - DeclContext * getRethrowsDC ( ) const { return RethrowsDC ; } <nl> + DeclContext * getRethrowsDC ( ) const { <nl> + if ( ! isRethrows ( ) ) <nl> + return nullptr ; <nl> + <nl> + return Function - > getAbstractFunctionDecl ( ) ; <nl> + } <nl> + <nl> InterpolatedStringLiteralExpr * getInterpolatedString ( ) const { <nl> return InterpolatedString ; <nl> } <nl> class Context { <nl> / / Allow the diagnostic to fire on the ' try ' if we don ' t have <nl> / / anything else to say . <nl> if ( isTryCovered & & ! reason . isRethrowsCall ( ) & & <nl> - getKind ( ) = = Kind : : NonThrowingFunction & & <nl> - ! isAutoClosure ( ) ) { <nl> + ! isRethrows ( ) & & ! isAutoClosure ( ) ) { <nl> DiagnoseErrorOnTry = true ; <nl> return ; <nl> } <nl> class Context { <nl> const PotentialThrowReason & reason ) { <nl> switch ( getKind ( ) ) { <nl> case Kind : : Handled : <nl> - llvm_unreachable ( " throw site is handled ! " ) ; <nl> - <nl> - / / TODO : Doug suggested that we could generate one error per <nl> - / / non - throwing function with throw sites within it , possibly with <nl> - / / notes for the throw sites . <nl> + if ( isRethrows ( ) ) { <nl> + diagnoseThrowInLegalContext ( Diags , E , isTryCovered , reason , <nl> + diag : : throw_in_rethrows_function , <nl> + diag : : throwing_call_in_rethrows_function , <nl> + diag : : tryless_throwing_call_in_rethrows_function ) ; <nl> + return ; <nl> + } <nl> <nl> - case Kind : : RethrowingFunction : <nl> - diagnoseThrowInLegalContext ( Diags , E , isTryCovered , reason , <nl> - diag : : throw_in_rethrows_function , <nl> - diag : : throwing_call_in_rethrows_function , <nl> - diag : : tryless_throwing_call_in_rethrows_function ) ; <nl> - return ; <nl> + llvm_unreachable ( " throw site is handled ! " ) ; <nl> <nl> case Kind : : NonThrowingFunction : <nl> if ( IsNonExhaustiveCatch ) { <nl> class Context { <nl> void diagnoseUnhandledTry ( DiagnosticEngine & Diags , TryExpr * E ) { <nl> switch ( getKind ( ) ) { <nl> case Kind : : Handled : <nl> - case Kind : : RethrowingFunction : <nl> llvm_unreachable ( " try is handled ! " ) ; <nl> <nl> case Kind : : NonThrowingFunction : <nl> class CheckEffectsCoverage : public EffectsHandlingWalker < CheckEffectsCoverage > <nl> <nl> auto savedContext = CurContext ; <nl> if ( doThrowingKind ! = ThrowingKind : : Throws & & <nl> - CurContext . getKind ( ) = = Context : : Kind : : RethrowingFunction ) { <nl> + CurContext . isRethrows ( ) ) { <nl> / / If this catch clause is reachable at all , it ' s because a function <nl> / / parameter throws . So let ' s temporarily set our context to Handled so <nl> / / the catch body is allowed to throw . <nl>
[ Effects handling ] Eliminate Context : : Kind : : RethrowingFunction .
apple/swift
524887bb009735c3c5889b7eba16706c6d387df3
2020-08-11T04:27:56Z
mmm a / hphp / runtime / base / mixed - array - defs . h <nl> ppp b / hphp / runtime / base / mixed - array - defs . h <nl> void MixedArray : : InitSmall ( MixedArray * a , RefCount count , uint32_t size , <nl> : : " r " ( a ) : " xmm0 " <nl> ) ; <nl> # else <nl> - auto const hash = a - > hashTab ( ) ; <nl> + auto const hash = mixedHash ( mixedData ( a ) , MixedArray : : SmallScale ) ; <nl> auto const emptyVal = int64_t { MixedArray : : Empty } ; <nl> reinterpret_cast < int64_t * > ( hash ) [ 0 ] = emptyVal ; <nl> reinterpret_cast < int64_t * > ( hash ) [ 1 ] = emptyVal ; <nl>
Fix MixedArray : : InitSmall on platforms not using the inline ASM
facebook/hhvm
8594316a782da257793f864a48726ea2f41efd1e
2015-09-12T00:00:41Z
mmm a / samples / TestJavascript / proj . android / build_native . sh <nl> ppp b / samples / TestJavascript / proj . android / build_native . sh <nl> set - x <nl> <nl> " $ NDK_ROOT " / ndk - build $ PARALLEL_BUILD_FLAG - C " $ APP_ANDROID_ROOT " $ * \ <nl> " NDK_MODULE_PATH = $ { COCOS2DX_ROOT } : $ { COCOS2DX_ROOT } / cocos2dx / platform / third_party / android / prebuilt " \ <nl> - NDK_LOG = 1 V = 1 <nl> + NDK_LOG = 0 V = 0 <nl> mmm a / samples / TestJavascript / proj . android / jni / Application . mk <nl> ppp b / samples / TestJavascript / proj . android / jni / Application . mk <nl> <nl> APP_STL : = gnustl_static <nl> APP_CPPFLAGS : = - frtti - DCOCOS2D_JAVASCRIPT = 1 <nl> - APP_CPPFLAGS + = - DCOCOS2D_DEBUG = 2 <nl> + APP_CPPFLAGS + = - DCOCOS2D_DEBUG = 1 - DCC_ENABLE_CHIPMUNK_INTEGRATION = 1 <nl> mmm a / scripting / javascript / bindings / Android . mk <nl> ppp b / scripting / javascript / bindings / Android . mk <nl> LOCAL_MODULE_FILENAME : = libscriptingcore - spidermonkey <nl> <nl> LOCAL_SRC_FILES : = ScriptingCore . cpp \ <nl> cocos2d_specifics . cpp \ <nl> - CCPhysicsSprite . cpp \ <nl> js_manual_conversions . cpp \ <nl> cocosjs_manual_conversions . cpp \ <nl> js_bindings_chipmunk_manual . cpp \ <nl>
issue : Closed verbose log of TestJavascript for android . And Made it compiled ok .
cocos2d/cocos2d-x
1ce1b4b471dd73096fcbb39cb1129d773fe281f8
2012-11-22T02:47:57Z
mmm a / html / admin / css / graphlayout . css <nl> ppp b / html / admin / css / graphlayout . css <nl> button . graphViewer - icon - button > img { <nl> width : 290px ; <nl> } <nl> <nl> - img . gv - icon - btn { <nl> + img . gv - icon - small { <nl> + width : 16px ; <nl> + height : 16px ; <nl> + } <nl> + <nl> + img . gv - icon - small . delete { <nl> + content : url ( " . . / img / icon_delete . png " ) ; <nl> + } <nl> + <nl> + img . gv - icon - small . add { <nl> + content : url ( " . . / img / plus_icon . png " ) ; <nl> + } <nl> + <nl> + <nl> + img . gv - icon - btn { <nl> width : 36px ; <nl> height : 36px ; <nl> } <nl> mmm a / html / admin / js / graphViewer / jasmine_test / specAdapter / arangoAdapterUISpec . js <nl> ppp b / html / admin / js / graphViewer / jasmine_test / specAdapter / arangoAdapterUISpec . js <nl> <nl> <nl> it ( ' should add many new lines to priority on demand ' , function ( ) { <nl> runs ( function ( ) { <nl> - helper . simulateMouseEvent ( " click " , " control_adapter_priority_attribute_addLine " ) ; <nl> - helper . simulateMouseEvent ( " click " , " control_adapter_priority_attribute_addLine " ) ; <nl> - helper . simulateMouseEvent ( " click " , " control_adapter_priority_attribute_addLine " ) ; <nl> - helper . simulateMouseEvent ( " click " , " control_adapter_priority_attribute_addLine " ) ; <nl> - expect ( $ ( " # control_adapter_priority_attribute_1 " ) . length ) . toEqual ( 1 ) ; <nl> - expect ( $ ( " # control_adapter_priority_attribute_2 " ) . length ) . toEqual ( 1 ) ; <nl> - expect ( $ ( " # control_adapter_priority_attribute_3 " ) . length ) . toEqual ( 1 ) ; <nl> - expect ( $ ( " # control_adapter_priority_attribute_4 " ) . length ) . toEqual ( 1 ) ; <nl> - expect ( $ ( " # control_adapter_priority_attribute_5 " ) . length ) . toEqual ( 1 ) ; <nl> - expect ( $ ( " # control_adapter_priority_attribute_addLine " ) . length ) . toEqual ( 1 ) ; <nl> - $ ( " # control_adapter_priority_attribute_1 " ) . attr ( " value " , " foo " ) ; <nl> - $ ( " # control_adapter_priority_attribute_2 " ) . attr ( " value " , " bar " ) ; <nl> - $ ( " # control_adapter_priority_attribute_3 " ) . attr ( " value " , " " ) ; <nl> - $ ( " # control_adapter_priority_attribute_4 " ) . attr ( " value " , " baz " ) ; <nl> - $ ( " # control_adapter_priority_attribute_5 " ) . attr ( " value " , " foxx " ) ; <nl> + var idPrefix = " control_adapter_priority_attribute_ " ; <nl> + helper . simulateMouseEvent ( " click " , idPrefix + " addLine " ) ; <nl> + helper . simulateMouseEvent ( " click " , idPrefix + " addLine " ) ; <nl> + helper . simulateMouseEvent ( " click " , idPrefix + " addLine " ) ; <nl> + helper . simulateMouseEvent ( " click " , idPrefix + " addLine " ) ; <nl> + expect ( $ ( " # " + idPrefix + " 1 " ) . length ) . toEqual ( 1 ) ; <nl> + expect ( $ ( " # " + idPrefix + " 2 " ) . length ) . toEqual ( 1 ) ; <nl> + expect ( $ ( " # " + idPrefix + " 3 " ) . length ) . toEqual ( 1 ) ; <nl> + expect ( $ ( " # " + idPrefix + " 4 " ) . length ) . toEqual ( 1 ) ; <nl> + expect ( $ ( " # " + idPrefix + " 5 " ) . length ) . toEqual ( 1 ) ; <nl> + <nl> + expect ( $ ( " # " + idPrefix + " 1 " ) . attr ( " value " ) ) . toEqual ( " " ) ; <nl> + expect ( $ ( " # " + idPrefix + " 2 " ) . attr ( " value " ) ) . toEqual ( " " ) ; <nl> + expect ( $ ( " # " + idPrefix + " 3 " ) . attr ( " value " ) ) . toEqual ( " " ) ; <nl> + expect ( $ ( " # " + idPrefix + " 4 " ) . attr ( " value " ) ) . toEqual ( " " ) ; <nl> + expect ( $ ( " # " + idPrefix + " 5 " ) . attr ( " value " ) ) . toEqual ( " " ) ; <nl> + <nl> + expect ( $ ( " # " + idPrefix + " addLine " ) . length ) . toEqual ( 1 ) ; <nl> + $ ( " # " + idPrefix + " 1 " ) . attr ( " value " , " foo " ) ; <nl> + $ ( " # " + idPrefix + " 2 " ) . attr ( " value " , " bar " ) ; <nl> + $ ( " # " + idPrefix + " 3 " ) . attr ( " value " , " " ) ; <nl> + $ ( " # " + idPrefix + " 4 " ) . attr ( " value " , " baz " ) ; <nl> + $ ( " # " + idPrefix + " 5 " ) . attr ( " value " , " foxx " ) ; <nl> helper . simulateMouseEvent ( " click " , " control_adapter_priority_submit " ) ; <nl> expect ( adapter . changeTo ) . toHaveBeenCalledWith ( { <nl> prioList : [ " foo " , " bar " , " baz " , " foxx " ] <nl> mmm a / html / admin / js / graphViewer / ui / modalDialogHelper . js <nl> ppp b / html / admin / js / graphViewer / ui / modalDialogHelper . js <nl> var modalDialogHelper = modalDialogHelper | | { } ; <nl> labelTh = document . createElement ( " th " ) , <nl> contentTh = document . createElement ( " th " ) , <nl> input , <nl> + icon , <nl> addLineButton , <nl> rows , <nl> lastId , <nl> var modalDialogHelper = modalDialogHelper | | { } ; <nl> innerContentTh = document . createElement ( " th " ) , <nl> innerInput = document . createElement ( " input " ) , <nl> removeRow = document . createElement ( " button " ) , <nl> + innerIcon = document . createElement ( " img " ) , <nl> lastItem ; <nl> innerInput . type = " text " ; <nl> innerInput . id = idprefix + o . id + " _ " + lastId ; <nl> var modalDialogHelper = modalDialogHelper | | { } ; <nl> innerContentTh . className = " collectionTh " ; <nl> innerContentTh . appendChild ( innerInput ) ; <nl> removeRow . id = idprefix + o . id + " _ " + lastId + " _remove " ; <nl> + removeRow . className = " graphViewer - icon - button " ; <nl> + removeRow . appendChild ( innerIcon ) ; <nl> + innerIcon . className = " gv - icon - small delete " ; <nl> removeRow . onclick = function ( ) { <nl> table . removeChild ( innerTr ) ; <nl> rows . splice ( rows . indexOf ( innerTr ) , 1 ) ; <nl> var modalDialogHelper = modalDialogHelper | | { } ; <nl> lastId = 1 ; <nl> addLineButton = document . createElement ( " button " ) ; <nl> input = document . createElement ( " input " ) ; <nl> + icon = document . createElement ( " img " ) ; <nl> input . type = " text " ; <nl> input . id = idprefix + o . id + " _1 " ; <nl> contentTh . appendChild ( input ) ; <nl> contentTh . appendChild ( addLineButton ) ; <nl> - addLineButton . onclick = addNewLine ; <nl> + addLineButton . onclick = function ( ) { <nl> + addNewLine ( ) ; <nl> + } ; <nl> addLineButton . id = idprefix + o . id + " _addLine " ; <nl> + addLineButton . className = " graphViewer - icon - button " ; <nl> + addLineButton . appendChild ( icon ) ; <nl> + icon . className = " gv - icon - small add " ; <nl> if ( o . objects . length > 0 ) { <nl> input . value = o . objects [ 0 ] ; <nl> } <nl>
GraphViewer : The PriorityList now uses an icon for its buttons . Looks way more awesome
arangodb/arangodb
a9479002f75ffdf66c67b015625c8ba678c1f16d
2013-07-16T06:09:52Z
mmm a / tensorflow / compiler / xla / service / gpu / nvptx_compiler . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / nvptx_compiler . cc <nl> bool MaybeLoadPtxFromFile ( const HloModule * module , std : : string * ptx ) { <nl> / / If the xla_gpu_ptx_file options is set , be explicit when a file is used <nl> / / and warn when a file is not used to ease catching typo in filename . <nl> std : : string prefix = xla : : FilenameFor ( * module , * ptx ) ; <nl> - std : : string ptx_filename ; <nl> + std : : string matched_filename ; <nl> for ( const string filename : module - > config ( ) . debug_options ( ) . xla_gpu_ptx_file ( ) ) { <nl> - / / To ease comparing many PTX versions , accept different suffix then <nl> + / / To ease comparing many PTX versions , accept different suffixes then <nl> / / the original filename . <nl> if ( absl : : StartsWith ( filename , prefix ) ) { <nl> - ptx_filename = filename ; <nl> + matched_filename = filename ; <nl> VLOG ( 0 ) < < " RunBackend ( ) - Will load PTX from file : " < < filename ; <nl> break ; <nl> } <nl> } <nl> if ( module - > config ( ) . debug_options ( ) . xla_gpu_ptx_file ( ) . size ( ) > 0 & & <nl> - ptx_filename . empty ( ) ) { <nl> + matched_filename . empty ( ) ) { <nl> VLOG ( 0 ) < < " RunBackend ( ) - For module with prefix ' " < < prefix <nl> < < " ' , we did not found a PTX file to load . " ; <nl> } <nl> <nl> - if ( ! ptx_filename . empty ( ) ) { <nl> - std : : ifstream ifs ( ptx_filename , std : : ifstream : : in ) ; <nl> + if ( ! matched_filename . empty ( ) ) { <nl> + std : : ifstream ifs ( matched_filename , std : : ifstream : : in ) ; <nl> * ptx = std : : string ( std : : istreambuf_iterator < char > ( ifs ) , <nl> std : : istreambuf_iterator < char > ( ) ) ; <nl> - CHECK ( ! ptx - > empty ( ) ) < < " Empty or non existing PTX file : " < < ptx_filename ; <nl> + CHECK ( ! ptx - > empty ( ) ) < < " Empty or non existing PTX file : " < < matched_filename ; <nl> return true ; <nl> } <nl> return false ; <nl>
Rename a variable for clarify and fix a comment typo .
tensorflow/tensorflow
1f5e538ba905ee3616f72e73b73742e6ef4a6490
2019-07-22T19:58:18Z
mmm a / hphp / runtime / base / concurrent - shared - store . h <nl> ppp b / hphp / runtime / base / concurrent - shared - store . h <nl> <nl> # ifndef incl_HPHP_CONCURRENT_SHARED_STORE_H_ <nl> # define incl_HPHP_CONCURRENT_SHARED_STORE_H_ <nl> <nl> - # define TBB_PREVIEW_CONCURRENT_PRIORITY_QUEUE 1 <nl> - <nl> # include < atomic > <nl> # include < utility > <nl> # include < vector > <nl>
Remove # define TBB_PREVIEW_CONCURRENT_PRIORITY_QUEUE
facebook/hhvm
7755d0e70591b2659c04bf5f6af683cddd8214a5
2014-09-22T21:30:17Z
mmm a / src / txmempool . h <nl> ppp b / src / txmempool . h <nl> class CTxMemPool ; <nl> * ( nCountWithDescendants , nSizeWithDescendants , and nModFeesWithDescendants ) for <nl> * all ancestors of the newly added transaction . <nl> * <nl> - * If updating the descendant state is skipped , we can mark the entry as <nl> - * " dirty " , and set nSizeWithDescendants / nModFeesWithDescendants to equal nTxSize / <nl> - * nFee + feeDelta . ( This can potentially happen during a reorg , where we limit the <nl> - * amount of work we ' re willing to do to avoid consuming too much CPU . ) <nl> - * <nl> * / <nl> <nl> class CTxMemPoolEntry <nl> class CTxMemPoolEntry <nl> <nl> / / Information about descendants of this transaction that are in the <nl> / / mempool ; if we remove this transaction we must remove all of these <nl> - / / descendants as well . if nCountWithDescendants is 0 , treat this entry as <nl> - / / dirty , and nSizeWithDescendants and nModFeesWithDescendants will not be <nl> - / / correct . <nl> + / / descendants as well . <nl> uint64_t nCountWithDescendants ; / / ! < number of descendant transactions <nl> uint64_t nSizeWithDescendants ; / / ! < . . . and size <nl> CAmount nModFeesWithDescendants ; / / ! < . . . and total fees ( all including us ) <nl> class CTxMemPoolEntry <nl> size_t DynamicMemoryUsage ( ) const { return nUsageSize ; } <nl> const LockPoints & GetLockPoints ( ) const { return lockPoints ; } <nl> <nl> - / / Adjusts the descendant state , if this entry is not dirty . <nl> + / / Adjusts the descendant state . <nl> void UpdateDescendantState ( int64_t modifySize , CAmount modifyFee , int64_t modifyCount ) ; <nl> / / Adjusts the ancestor state <nl> void UpdateAncestorState ( int64_t modifySize , CAmount modifyFee , int64_t modifyCount , int modifySigOps ) ; <nl> enum class MemPoolRemovalReason { <nl> * CalculateMemPoolAncestors ( ) takes configurable limits that are designed to <nl> * prevent these calculations from being too CPU intensive . <nl> * <nl> - * Adding transactions from a disconnected block can be very time consuming , <nl> - * because we don ' t have a way to limit the number of in - mempool descendants . <nl> - * To bound CPU processing , we limit the amount of work we ' re willing to do <nl> - * to properly update the descendant information for a tx being added from <nl> - * a disconnected block . If we would exceed the limit , then we instead mark <nl> - * the entry as " dirty " , and set the feerate for sorting purposes to be equal <nl> - * the feerate of the transaction without any descendants . <nl> - * <nl> * / <nl> class CTxMemPool <nl> { <nl>
Merge : [ doc ] Removing comments about dirty entries on txmempool
bitcoin/bitcoin
b6ee855b411ee9bc39f935d0da3298a773a2ed37
2017-05-15T22:39:56Z
mmm a / include / swift / AST / DeclContext . h <nl> ppp b / include / swift / AST / DeclContext . h <nl> class alignas ( 8 ) DeclContext { <nl> <nl> / / / Returns the module scope context that contains this context . <nl> / / / <nl> - / / / This is either a Module or a SourceFile . <nl> + / / / This is either a \ c Module or a \ c FileUnit . <nl> DeclContext * getModuleScopeContext ( ) const ; <nl> <nl> / / / Returns the source file that contains this context , or null if this <nl>
Fix a comment
apple/swift
bba48ac151fb4a3d109d43864f25bb8675ffc634
2014-03-06T10:01:44Z
mmm a / usage / README . md <nl> ppp b / usage / README . md <nl> You can investigate the key name in Karabiner - EventViewer . <nl> <nl> You can set keyboard type in Devices tab . <nl> <nl> - < img src = " img / devices . png " width = " 400 " > <nl> + < img src = " img / keyboard_type . png " width = " 400 " > <nl> <nl> If you are using multiple keyboards which have different keyboard types , you have to set the keyboard type in Devices tab . <nl> Otherwise , Karabiner - Elements cannot treat keyboard type properly due to a limitation of macOS . <nl> deleted file mode 100644 <nl> index e92a33b8b . . 000000000 <nl> Binary files a / usage / img / devices . png and / dev / null differ <nl> new file mode 100644 <nl> index 000000000 . . 469629ebd <nl> Binary files / dev / null and b / usage / img / keyboard_type . png differ <nl> Binary files a / usage / img / keycode . png and b / usage / img / keycode . png differ <nl>
update usage
pqrs-org/Karabiner-Elements
f3a95592e31e6ce29f10c2a7bda5aa039e4e4d98
2016-10-28T17:05:26Z
mmm a / javanano / src / test / java / com / google / protobuf / nano / unittest_nano . proto <nl> ppp b / javanano / src / test / java / com / google / protobuf / nano / unittest_nano . proto <nl> message TestAllTypesNano { <nl> oneof oneof_field { <nl> uint32 oneof_uint32 = 111 ; <nl> NestedMessage oneof_nested_message = 112 ; <nl> - / / string oneof_string = 123 ; <nl> - / / bytes oneof_bytes = 124 ; <nl> + string oneof_string = 123 ; <nl> + bytes oneof_bytes = 124 ; <nl> fixed64 oneof_fixed64 = 115 ; <nl> } <nl> } <nl>
Uncomment string / bytes fields in oneof nano .
protocolbuffers/protobuf
3ab660cd4a5e1249553ad49a94ba1eaf2798d0ac
2015-02-20T00:43:08Z
mmm a / tools / clusterfuzz / v8_foozzie . py <nl> ppp b / tools / clusterfuzz / v8_foozzie . py <nl> <nl> ' - - no - lazy ' , <nl> ' - - no - lazy - inner - functions ' , <nl> ] , <nl> + ignition_no_ic = [ <nl> + ' - - turbo - filter = ~ ' , <nl> + ' - - noopt ' , <nl> + ' - - liftoff ' , <nl> + ' - - no - wasm - tier - up ' , <nl> + ' - - no - use - ic ' , <nl> + ] , <nl> ignition_turbo = [ ] , <nl> + ignition_turbo_no_ic = [ <nl> + ' - - no - use - ic ' , <nl> + ] , <nl> ignition_turbo_opt = [ <nl> ' - - always - opt ' , <nl> ' - - no - liftoff ' , <nl> mmm a / tools / clusterfuzz / v8_foozzie_test . py <nl> ppp b / tools / clusterfuzz / v8_foozzie_test . py <nl> def random ( self ) : <nl> self . assertEqual ( <nl> [ <nl> ' - - first - config = ignition ' , <nl> - ' - - second - config = ignition_turbo ' , <nl> + ' - - second - config = ignition_turbo_no_ic ' , <nl> ' - - second - d8 = d8 ' , <nl> ] , <nl> v8_fuzz_config . Config ( ' foo ' , Rng ( ) ) . choose_foozzie_flags ( ) , <nl> mmm a / tools / clusterfuzz / v8_fuzz_config . py <nl> ppp b / tools / clusterfuzz / v8_fuzz_config . py <nl> <nl> [ 10 , ' ignition ' , ' jitless ' , ' d8 ' ] , <nl> [ 10 , ' ignition ' , ' slow_path ' , ' d8 ' ] , <nl> [ 5 , ' ignition ' , ' slow_path_opt ' , ' d8 ' ] , <nl> - [ 30 , ' ignition ' , ' ignition_turbo ' , ' d8 ' ] , <nl> - [ 20 , ' ignition ' , ' ignition_turbo_opt ' , ' d8 ' ] , <nl> + [ 10 , ' ignition ' , ' ignition_turbo ' , ' d8 ' ] , <nl> + [ 10 , ' ignition_no_ic ' , ' ignition_turbo ' , ' d8 ' ] , <nl> + [ 10 , ' ignition ' , ' ignition_turbo_no_ic ' , ' d8 ' ] , <nl> + [ 10 , ' ignition ' , ' ignition_turbo_opt ' , ' d8 ' ] , <nl> + [ 10 , ' ignition_no_ic ' , ' ignition_turbo_opt ' , ' d8 ' ] , <nl> [ 5 , ' ignition_turbo_opt ' , ' ignition_turbo_opt ' , ' clang_x86 / d8 ' ] , <nl> [ 5 , ' ignition_turbo ' , ' ignition_turbo ' , ' clang_x86 / d8 ' ] , <nl> [ 5 , ' ignition ' , ' ignition ' , ' clang_x86 / d8 ' ] , <nl>
[ foozzie ] Add no - ic configurations
v8/v8
6207d75e91054f17f845e96a4c3a07e951b53daf
2019-05-22T08:28:53Z
mmm a / lib / SILOptimizer / Analysis / EscapeAnalysis . cpp <nl> ppp b / lib / SILOptimizer / Analysis / EscapeAnalysis . cpp <nl> SILValue EscapeAnalysis : : getPointerBase ( SILValue value ) { <nl> case ValueKind : : StructElementAddrInst : <nl> case ValueKind : : StructExtractInst : <nl> case ValueKind : : TupleElementAddrInst : <nl> + case ValueKind : : InitExistentialAddrInst : <nl> + case ValueKind : : OpenExistentialAddrInst : <nl> case ValueKind : : BeginAccessInst : <nl> case ValueKind : : UncheckedTakeEnumDataAddrInst : <nl> case ValueKind : : UncheckedEnumDataInst : <nl> void EscapeAnalysis : : analyzeInstruction ( SILInstruction * I , <nl> } <nl> case SILInstructionKind : : RefElementAddrInst : <nl> case SILInstructionKind : : RefTailAddrInst : <nl> - case SILInstructionKind : : ProjectBoxInst : <nl> - case SILInstructionKind : : InitExistentialAddrInst : <nl> - case SILInstructionKind : : OpenExistentialAddrInst : { <nl> + case SILInstructionKind : : ProjectBoxInst : { <nl> / / For projections into objects , get the non - address reference operand and <nl> / / return an interior content node that the reference points to . <nl> auto SVI = cast < SingleValueInstruction > ( I ) ; <nl> mmm a / test / SILOptimizer / escape_analysis . sil <nl> ppp b / test / SILOptimizer / escape_analysis . sil <nl> bb0 ( % 0 : $ Builtin . Int64 , % 1 : $ X , % 2 : $ X , % 3 : $ X ) : <nl> <nl> / / CHECK - LABEL : CG of test_existential_addr <nl> / / CHECK - NEXT : Arg [ ref ] % 0 Esc : A , Succ : <nl> - / / CHECK - NEXT : Val % 1 Esc : , Succ : ( % 2 ) <nl> - / / CHECK - NEXT : Con % 2 Esc : , Succ : ( % 2 . 1 ) <nl> - / / CHECK - NEXT : Con [ ref ] % 2 . 1 Esc : , Succ : % 0 <nl> + / / CHECK - NEXT : Val % 1 Esc : , Succ : ( % 1 . 1 ) <nl> + / / CHECK - NEXT : Con [ ref ] % 1 . 1 Esc : , Succ : % 0 <nl> / / CHECK - NEXT : End <nl> sil @ test_existential_addr : $ @ convention ( thin ) ( @ owned Pointer ) - > ( ) { <nl> bb0 ( % 0 : $ Pointer ) : <nl> mmm a / test / SILOptimizer / escape_analysis_invalidate . sil <nl> ppp b / test / SILOptimizer / escape_analysis_invalidate . sil <nl> <nl> - / / RUN : % target - sil - opt % s - temp - rvalue - opt - enable - sil - verify - all - escapes - internal - verify | % FileCheck % s <nl> + / / RUN : % target - sil - opt % s - temp - rvalue - opt - enable - sil - verify - all - escapes - internal - verify - wmo | % FileCheck % s <nl> / / <nl> / / TempRValue iteratively uses EscapeAnalysis and deletes <nl> / / instructions . Make sure that the connection graph remains valid <nl> / / < rdar : / / 57290845 > . <nl> + / / <nl> + / / This test requires - wmo so EscapeAnalysis can find all <nl> + / / implementations of SomeProtocol . foo . Otherwise the existential <nl> + / / address appears to escape . As an alternative , we could be more <nl> + / / aggressive about considering address - type argument not to escape , <nl> + / / but that would require some limiting address_to_pointer to never <nl> + / / occur on an exclusive address argument . <nl> <nl> import Swift <nl> <nl> bb0 ( % 0 : $ * SomeProtocol , % 1 : $ SomeClass ) : <nl> % v = tuple ( ) <nl> return % v : $ ( ) <nl> } <nl> + <nl> + sil hidden @ $ s26escape_analysis_invalidate12SomeInstanceV3fooyyF : $ @ convention ( method ) ( SomeInstance ) - > ( ) { <nl> + bb0 ( % 0 : $ SomeInstance ) : <nl> + debug_value % 0 : $ SomeInstance , let , name " self " , argno 1 / / id : % 1 <nl> + % 2 = tuple ( ) / / user : % 3 <nl> + return % 2 : $ ( ) / / id : % 3 <nl> + } <nl> + <nl> + sil private [ transparent ] [ thunk ] @ $ s26escape_analysis_invalidate12SomeInstanceVAA0A8ProtocolA2aDP3fooyyFTW : $ @ convention ( witness_method : SomeProtocol ) ( @ in_guaranteed SomeInstance ) - > ( ) { <nl> + bb0 ( % 0 : $ * SomeInstance ) : <nl> + % 1 = load % 0 : $ * SomeInstance <nl> + / / function_ref SomeInstance . foo ( ) <nl> + % 2 = function_ref @ $ s26escape_analysis_invalidate12SomeInstanceV3fooyyF : $ @ convention ( method ) ( SomeInstance ) - > ( ) <nl> + % 3 = apply % 2 ( % 1 ) : $ @ convention ( method ) ( SomeInstance ) - > ( ) <nl> + % 4 = tuple ( ) <nl> + return % 4 : $ ( ) <nl> + } <nl> + <nl> + sil_witness_table hidden SomeInstance : SomeProtocol module t { <nl> + method # SomeProtocol . foo ! 1 : < Self where Self : SomeProtocol > ( Self ) - > ( ) - > ( ) : @ $ s26escape_analysis_invalidate12SomeInstanceVAA0A8ProtocolA2aDP3fooyyFTW / / protocol witness for SomeProtocol . foo ( ) in conformance SomeInstance <nl> + } <nl> mmm a / test / SILOptimizer / escape_analysis_reduced . sil <nl> ppp b / test / SILOptimizer / escape_analysis_reduced . sil <nl> bb2 : <nl> } <nl> <nl> / / CHECK - LABEL : CG of testPendingMerge <nl> - / / CHECK - NEXT : Arg % 0 Esc : A , Succ : ( % 22 ) <nl> - / / CHECK - NEXT : Arg [ ref ] % 1 Esc : G , Succ : ( % 9 . 1 ) <nl> + / / CHECK - NEXT : Arg % 0 Esc : A , Succ : ( % 0 . 1 ) <nl> + / / CHECK - NEXT : Con % 0 . 1 Esc : A , Succ : % 1 , % 4 . 1 <nl> + / / CHECK - NEXT : Arg [ ref ] % 1 Esc : A , Succ : ( % 9 . 1 ) <nl> / / CHECK - NEXT : Val % 2 Esc : , Succ : ( % 9 ) <nl> / / CHECK - NEXT : Val % 4 Esc : , Succ : ( % 4 . 1 ) <nl> - / / CHECK - NEXT : Con % 4 . 1 Esc : A , Succ : ( % 9 . 1 ) , % 7 , % 9 <nl> - / / CHECK - NEXT : Val % 5 Esc : , Succ : ( % 7 ) <nl> - / / CHECK - NEXT : Con % 7 Esc : A , Succ : ( % 9 . 1 ) <nl> + / / CHECK - NEXT : Con % 4 . 1 Esc : A , Succ : ( % 9 . 1 ) , % 5 . 1 , % 9 <nl> + / / CHECK - NEXT : Val % 5 Esc : , Succ : ( % 5 . 1 ) <nl> + / / CHECK - NEXT : Con % 5 . 1 Esc : A , Succ : % 1 <nl> / / CHECK - NEXT : Con [ ref ] % 9 Esc : A , Succ : ( % 9 . 1 ) <nl> - / / CHECK - NEXT : Con [ int ] % 9 . 1 Esc : G , Succ : ( % 9 . 1 ) , % 1 <nl> - / / CHECK - NEXT : Con % 22 Esc : A , Succ : ( % 22 . 1 ) <nl> - / / CHECK - NEXT : Con % 22 . 1 Esc : A , Succ : % 1 , % 4 . 1 <nl> + / / CHECK - NEXT : Con [ int ] % 9 . 1 Esc : G , Succ : ( % 9 . 2 ) <nl> + / / CHECK - NEXT : Con % 9 . 2 Esc : G , Succ : ( % 9 . 3 ) <nl> + / / CHECK - NEXT : Con % 9 . 3 Esc : G , Succ : <nl> / / CHECK - LABEL : End <nl> sil private @ testPendingMerge : $ @ convention ( thin ) ( @ owned VariableNode ) - > ( @ out ASTNode , @ error Error ) { <nl> bb0 ( % 0 : $ * ASTNode , % 1 : $ VariableNode ) : <nl> new file mode 100644 <nl> index 000000000000 . . 232beaaac67d <nl> mmm / dev / null <nl> ppp b / test / SILOptimizer / escape_analysis_release_hoisting . sil <nl> <nl> + / / RUN : % target - sil - opt - enable - sil - verify - all - release - hoisting % s | % FileCheck % s <nl> + / / REQUIRES : CPU = x86_64 <nl> + / / REQUIRES : OS = macosx <nl> + <nl> + sil_stage canonical <nl> + <nl> + import Builtin <nl> + import Swift <nl> + import SwiftShims <nl> + <nl> + / / Test < rdar : / / 59559805 > miscompile ; use - after - free <nl> + / / Avoid hoisting strong_release of an existential <nl> + / / over a store to the value . <nl> + protocol ArrayElementProtocol { } <nl> + <nl> + struct ArrayElementStruct : ArrayElementProtocol { <nl> + @ _hasStorage @ _hasInitialValue var dummy : Int { get set } <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil @ testArrayStorageInitAfterFree : $ @ convention ( thin ) ( ) - > ( ) { <nl> + / / CHECK : [ [ ARRAY : % . * ] ] = alloc_ref [ stack ] [ tail_elems $ ArrayElementProtocol * undef : $ Builtin . Word ] $ _ContiguousArrayStorage < ArrayElementProtocol > <nl> + / / CHECK : [ [ CAST : % . * ] ] = upcast [ [ ARRAY ] ] : $ _ContiguousArrayStorage < ArrayElementProtocol > to $ __ContiguousArrayStorageBase <nl> + / / CHECK : [ [ TAIL : % . * ] ] = ref_tail_addr [ [ CAST ] ] : $ __ContiguousArrayStorageBase , $ ArrayElementProtocol <nl> + / / CHECK : [ [ ADR : % . * ] ] = init_existential_addr [ [ TAIL ] ] : $ * ArrayElementProtocol , $ ArrayElementStruct <nl> + / / CHECK : store % { { . * } } to [ [ ADR ] ] : $ * ArrayElementStruct <nl> + / / CHECK : strong_release [ [ ARRAY ] ] : $ _ContiguousArrayStorage < ArrayElementProtocol > <nl> + / / CHECK - LABEL : } / / end sil function ' testArrayStorageInitAfterFree ' <nl> + sil @ testArrayStorageInitAfterFree : $ @ convention ( thin ) ( ) - > ( ) { <nl> + bb0 : <nl> + % 0 = alloc_ref [ stack ] [ tail_elems $ ArrayElementProtocol * undef : $ Builtin . Word ] $ _ContiguousArrayStorage < ArrayElementProtocol > <nl> + % 1 = upcast % 0 : $ _ContiguousArrayStorage < ArrayElementProtocol > to $ __ContiguousArrayStorageBase <nl> + % 2 = struct $ _SwiftArrayBodyStorage ( undef : $ Int , undef : $ UInt ) <nl> + % 3 = struct $ _ArrayBody ( % 2 : $ _SwiftArrayBodyStorage ) <nl> + % 4 = ref_element_addr % 1 : $ __ContiguousArrayStorageBase , # __ContiguousArrayStorageBase . countAndCapacity <nl> + store % 3 to % 4 : $ * _ArrayBody <nl> + % 6 = ref_tail_addr % 1 : $ __ContiguousArrayStorageBase , $ ArrayElementProtocol <nl> + % 7 = value_to_bridge_object undef : $ Builtin . Int64 <nl> + % 8 = struct $ _StringObject ( undef : $ UInt64 , % 7 : $ Builtin . BridgeObject ) <nl> + % 9 = struct $ _StringGuts ( % 8 : $ _StringObject ) <nl> + % 10 = struct $ String ( % 9 : $ _StringGuts ) <nl> + % 11 = struct $ ArrayElementStruct ( undef : $ Int ) <nl> + % 12 = init_existential_addr % 6 : $ * ArrayElementProtocol , $ ArrayElementStruct <nl> + store % 11 to % 12 : $ * ArrayElementStruct <nl> + strong_release % 0 : $ _ContiguousArrayStorage < ArrayElementProtocol > <nl> + dealloc_ref [ stack ] % 0 : $ _ContiguousArrayStorage < ArrayElementProtocol > <nl> + % 16 = tuple ( ) <nl> + return % 16 : $ ( ) <nl> + } <nl>
Fix EscapeAnalysis connection graph for existential values .
apple/swift
601a51a60598deb9533ca528f3c9866a33986d86
2020-02-22T10:07:32Z
mmm a / docs / source / Compiler . md <nl> ppp b / docs / source / Compiler . md <nl> Additional options : <nl> <nl> - ` - - root - type T ` : Select or override the default root_type . <nl> <nl> + - ` - - force - defaults ` : Emit default values in binary output from JSON . <nl> + <nl> NOTE : short - form options for generators are deprecated , use the long form <nl> whenever possible . <nl> mmm a / include / flatbuffers / idl . h <nl> ppp b / include / flatbuffers / idl . h <nl> struct IDLOptions { <nl> bool protobuf_ascii_alike ; <nl> bool size_prefixed ; <nl> std : : string root_type ; <nl> + bool force_defaults ; <nl> <nl> / / Possible options for the more general generator below . <nl> enum Language { <nl> struct IDLOptions { <nl> reexport_ts_modules ( true ) , <nl> protobuf_ascii_alike ( false ) , <nl> size_prefixed ( false ) , <nl> + force_defaults ( false ) , <nl> lang ( IDLOptions : : kJava ) , <nl> mini_reflect ( IDLOptions : : kNone ) , <nl> lang_to_generate ( 0 ) { } <nl> class Parser : public ParserState { <nl> source_ ( nullptr ) , <nl> anonymous_counter ( 0 ) , <nl> recurse_protection_counter ( 0 ) { <nl> + if ( opts . force_defaults ) { <nl> + builder_ . ForceDefaults ( true ) ; <nl> + } <nl> / / Start out with the empty namespace being current . <nl> empty_namespace_ = new Namespace ( ) ; <nl> namespaces_ . push_back ( empty_namespace_ ) ; <nl> mmm a / src / flatc . cpp <nl> ppp b / src / flatc . cpp <nl> std : : string FlatCompiler : : GetUsageString ( const char * program_name ) const { <nl> " - - reflect - types Add minimal type reflection to code generation . \ n " <nl> " - - reflect - names Add minimal type / name reflection . \ n " <nl> " - - root - type T Select or override the default root_type \ n " <nl> + " - - force - defaults Emit default values in binary output from JSON \ n " <nl> " FILEs may be schemas ( must end in . fbs ) , or JSON files ( conforming to preceding \ n " <nl> " schema ) . FILEs after the - - must be binary flatbuffer format files . \ n " <nl> " Output files are named using the base file name of the input , \ n " <nl> int FlatCompiler : : Compile ( int argc , const char * * argv ) { <nl> } else if ( arg = = " - - root - type " ) { <nl> if ( + + argi > = argc ) Error ( " missing type following " + arg , true ) ; <nl> opts . root_type = argv [ argi ] ; <nl> + } else if ( arg = = " - - force - defaults " ) { <nl> + opts . force_defaults = true ; <nl> } else { <nl> for ( size_t i = 0 ; i < params_ . num_generators ; + + i ) { <nl> if ( arg = = params_ . generators [ i ] . generator_opt_long | | <nl>
Add - - force - defaults option to flatc [ C + + , parser ] ( )
google/flatbuffers
741c63052de40c72b21d704bd685dd311ea7e4a5
2018-06-27T16:12:52Z
mmm a / src / hydrogen - instructions . cc <nl> ppp b / src / hydrogen - instructions . cc <nl> void HCheckInstanceType : : GetCheckMaskAndTag ( uint8_t * mask , uint8_t * tag ) { <nl> } <nl> <nl> <nl> + void HLoadElements : : PrintDataTo ( StringStream * stream ) { <nl> + value ( ) - > PrintNameTo ( stream ) ; <nl> + stream - > Add ( " " ) ; <nl> + typecheck ( ) - > PrintNameTo ( stream ) ; <nl> + } <nl> + <nl> + <nl> void HCheckMaps : : PrintDataTo ( StringStream * stream ) { <nl> value ( ) - > PrintNameTo ( stream ) ; <nl> stream - > Add ( " [ % p " , * map_set ( ) - > first ( ) ) ; <nl> mmm a / src / hydrogen - instructions . h <nl> ppp b / src / hydrogen - instructions . h <nl> class HLoadElements : public HTemplateInstruction < 2 > { <nl> } <nl> <nl> HValue * value ( ) { return OperandAt ( 0 ) ; } <nl> + HValue * typecheck ( ) { return OperandAt ( 1 ) ; } <nl> + <nl> + virtual void PrintDataTo ( StringStream * stream ) ; <nl> <nl> virtual Representation RequiredInputRepresentation ( int index ) { <nl> return Representation : : Tagged ( ) ; <nl>
Improved printing of HLoadElements instruction .
v8/v8
bcb383e055a404e259283088ccf2611c052ab9cf
2012-10-16T11:41:07Z
mmm a / scene / gui / control . cpp <nl> ppp b / scene / gui / control . cpp <nl> void Control : : set_anchor ( Margin p_margin , float p_anchor , bool p_keep_margin , bo <nl> } <nl> <nl> update ( ) ; <nl> - _change_notify ( " anchor " ) ; <nl> + _change_notify ( " anchor_left " ) ; <nl> + _change_notify ( " anchor_right " ) ; <nl> + _change_notify ( " anchor_top " ) ; <nl> + _change_notify ( " anchor_bottom " ) ; <nl> } <nl> <nl> void Control : : _set_anchor ( Margin p_margin , float p_anchor ) { <nl>
Fix inspector update after changing anchor via buttons
godotengine/godot
8b391b9ab0f10ae6bf8d17e3eda4a509f49bca6b
2019-03-19T17:37:28Z
mmm a / dbms / CMakeLists . txt <nl> ppp b / dbms / CMakeLists . txt <nl> add_library ( dbms <nl> include / DB / Parsers / ASTSubquery . h <nl> include / DB / Parsers / ASTUseQuery . h <nl> include / DB / Parsers / ASTIdentifier . h <nl> - include / DB / Parsers / ParserJoin . h <nl> include / DB / Parsers / ParserTablePropertiesQuery . h <nl> include / DB / Parsers / ASTJoin . h <nl> include / DB / Parsers / ParserCheckQuery . h <nl> add_library ( dbms <nl> src / Parsers / ParserQueryWithOutput . cpp <nl> src / Parsers / ParserCreateQuery . cpp <nl> src / Parsers / ParserSelectQuery . cpp <nl> - src / Parsers / ParserJoin . cpp <nl> src / Parsers / ParserInsertQuery . cpp <nl> src / Parsers / ParserDropQuery . cpp <nl> src / Parsers / ParserRenameQuery . cpp <nl> deleted file mode 100644 <nl> index f0cc6434584 . . 00000000000 <nl> mmm a / dbms / include / DB / Parsers / ParserJoin . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - <nl> - # include < DB / Parsers / IParserBase . h > <nl> - <nl> - <nl> - namespace DB <nl> - { <nl> - <nl> - <nl> - class ParserJoin : public IParserBase <nl> - { <nl> - protected : <nl> - const char * getName ( ) const { return " JOIN " ; } <nl> - bool parseImpl ( Pos & pos , Pos end , ASTPtr & node , Pos & max_parsed_pos , Expected & expected ) ; <nl> - } ; <nl> - <nl> - } <nl> mmm a / dbms / src / Parsers / ASTTablesInSelectQuery . cpp <nl> ppp b / dbms / src / Parsers / ASTTablesInSelectQuery . cpp <nl> void ASTTableJoin : : formatImplBeforeTable ( const FormatSettings & settings , Format <nl> { <nl> settings . ostr < < ( settings . hilite ? hilite_keyword : " " ) ; <nl> <nl> - if ( locality = = Locality : : Global ) <nl> - settings . ostr < < " GLOBAL " ; <nl> + switch ( locality ) <nl> + { <nl> + case Locality : : Unspecified : <nl> + break ; <nl> + case Locality : : Local : <nl> + break ; <nl> + case Locality : : Global : <nl> + settings . ostr < < " GLOBAL " ; <nl> + break ; <nl> + } <nl> <nl> if ( kind ! = Kind : : Cross & & kind ! = Kind : : Comma ) <nl> - settings . ostr < < ( strictness = = Strictness : : Any ? " ANY " : " ALL " ) ; <nl> + { <nl> + switch ( strictness ) <nl> + { <nl> + case Strictness : : Unspecified : <nl> + break ; <nl> + case Strictness : : Any : <nl> + settings . ostr < < " ANY " ; <nl> + break ; <nl> + case Strictness : : All : <nl> + settings . ostr < < " ALL " ; <nl> + break ; <nl> + } <nl> + } <nl> <nl> - settings . ostr < < <nl> - ( kind = = Kind : : Inner ? " INNER " <nl> - : ( kind = = Kind : : Left ? " LEFT " <nl> - : ( kind = = Kind : : Right ? " RIGHT " <nl> - : ( kind = = Kind : : Cross ? " CROSS " <nl> - : " FULL OUTER " ) ) ) ) ; <nl> + switch ( kind ) <nl> + { <nl> + case Kind : : Inner : <nl> + settings . ostr < < " INNER JOIN " ; <nl> + break ; <nl> + case Kind : : Left : <nl> + settings . ostr < < " LEFT JOIN " ; <nl> + break ; <nl> + case Kind : : Right : <nl> + settings . ostr < < " RIGHT JOIN " ; <nl> + break ; <nl> + case Kind : : Full : <nl> + settings . ostr < < " FULL OUTER JOIN " ; <nl> + break ; <nl> + case Kind : : Cross : <nl> + settings . ostr < < " CROSS JOIN " ; <nl> + break ; <nl> + case Kind : : Comma : <nl> + settings . ostr < < " , " ; <nl> + break ; <nl> + } <nl> <nl> - settings . ostr < < " JOIN " <nl> - < < ( settings . hilite ? hilite_none : " " ) ; <nl> + settings . ostr < < ( settings . hilite ? hilite_none : " " ) ; <nl> } <nl> <nl> <nl> void ASTTableJoin : : formatImplAfterTable ( const FormatSettings & settings , FormatS <nl> <nl> if ( using_expression_list ) <nl> { <nl> - settings . ostr < < ( settings . hilite ? hilite_keyword : " " ) < < " USING " < < ( settings . hilite ? hilite_none : " " ) ; <nl> + settings . ostr < < ( settings . hilite ? hilite_keyword : " " ) < < " USING " < < ( settings . hilite ? hilite_none : " " ) ; <nl> settings . ostr < < " ( " ; <nl> using_expression_list - > formatImpl ( settings , state , frame ) ; <nl> settings . ostr < < " ) " ; <nl> } <nl> else if ( on_expression ) <nl> { <nl> - settings . ostr < < ( settings . hilite ? hilite_keyword : " " ) < < " ON " < < ( settings . hilite ? hilite_none : " " ) ; <nl> + settings . ostr < < ( settings . hilite ? hilite_keyword : " " ) < < " ON " < < ( settings . hilite ? hilite_none : " " ) ; <nl> on_expression - > formatImpl ( settings , state , frame ) ; <nl> } <nl> } <nl> void ASTTablesInSelectQueryElement : : formatImpl ( const FormatSettings & settings , <nl> if ( table_expression ) <nl> { <nl> if ( table_join ) <nl> + { <nl> static_cast < const ASTTableJoin & > ( * table_join ) . formatImplBeforeTable ( settings , state , frame ) ; <nl> + settings . ostr < < " " ; <nl> + } <nl> <nl> - settings . ostr < < " " ; <nl> table_expression - > formatImpl ( settings , state , frame ) ; <nl> - <nl> settings . ostr < < " " ; <nl> + <nl> if ( table_join ) <nl> static_cast < const ASTTableJoin & > ( * table_join ) . formatImplAfterTable ( settings , state , frame ) ; <nl> } <nl> deleted file mode 100644 <nl> index 2045b21f37a . . 00000000000 <nl> mmm a / dbms / src / Parsers / ParserJoin . cpp <nl> ppp / dev / null <nl> <nl> - # include < DB / Parsers / ASTJoin . h > <nl> - # include < DB / Parsers / CommonParsers . h > <nl> - # include < DB / Parsers / ExpressionElementParsers . h > <nl> - # include < DB / Parsers / ExpressionListParsers . h > <nl> - # include < DB / Parsers / ParserJoin . h > <nl> - <nl> - <nl> - namespace DB <nl> - { <nl> - <nl> - namespace ErrorCodes <nl> - { <nl> - extern const int SYNTAX_ERROR ; <nl> - } <nl> - <nl> - <nl> - bool ParserJoin : : parseImpl ( Pos & pos , Pos end , ASTPtr & node , Pos & max_parsed_pos , Expected & expected ) <nl> - { <nl> - Pos begin = pos ; <nl> - <nl> - auto join = std : : make_shared < ASTJoin > ( StringRange ( begin , pos ) ) ; <nl> - node = join ; <nl> - <nl> - ParserWhiteSpaceOrComments ws ; <nl> - ParserString s_global ( " GLOBAL " , true , true ) ; <nl> - ParserString s_any ( " ANY " , true , true ) ; <nl> - ParserString s_all ( " ALL " , true , true ) ; <nl> - ParserString s_inner ( " INNER " , true , true ) ; <nl> - ParserString s_left ( " LEFT " , true , true ) ; <nl> - ParserString s_right ( " RIGHT " , true , true ) ; <nl> - ParserString s_full ( " FULL " , true , true ) ; <nl> - ParserString s_cross ( " CROSS " , true , true ) ; <nl> - ParserString s_outer ( " OUTER " , true , true ) ; <nl> - ParserString s_join ( " JOIN " , true , true ) ; <nl> - ParserString s_using ( " USING " , true , true ) ; <nl> - ParserString s_on ( " ON " , true , true ) ; <nl> - <nl> - ParserNotEmptyExpressionList exp_list ( false ) ; <nl> - ParserLogicalOrExpression exp_elem ; <nl> - ParserWithOptionalAlias subquery ( ParserPtr ( new ParserSubquery ) , true ) ; <nl> - ParserIdentifier identifier ; <nl> - <nl> - ws . ignore ( pos , end ) ; <nl> - <nl> - if ( s_global . ignore ( pos , end ) ) <nl> - join - > locality = ASTJoin : : Global ; <nl> - else <nl> - join - > locality = ASTJoin : : Local ; <nl> - <nl> - ws . ignore ( pos , end ) ; <nl> - <nl> - bool has_strictness = true ; <nl> - if ( s_any . ignore ( pos , end ) ) <nl> - join - > strictness = ASTJoin : : Any ; <nl> - else if ( s_all . ignore ( pos , end ) ) <nl> - join - > strictness = ASTJoin : : All ; <nl> - else <nl> - has_strictness = false ; <nl> - <nl> - ws . ignore ( pos , end ) ; <nl> - <nl> - if ( s_inner . ignore ( pos , end ) ) <nl> - join - > kind = ASTJoin : : Inner ; <nl> - else if ( s_left . ignore ( pos , end ) ) <nl> - join - > kind = ASTJoin : : Left ; <nl> - else if ( s_right . ignore ( pos , end ) ) <nl> - join - > kind = ASTJoin : : Right ; <nl> - else if ( s_full . ignore ( pos , end ) ) <nl> - join - > kind = ASTJoin : : Full ; <nl> - else if ( s_cross . ignore ( pos , end ) ) <nl> - join - > kind = ASTJoin : : Cross ; <nl> - else <nl> - { <nl> - expected = " INNER | LEFT | RIGHT | FULL | CROSS " ; <nl> - return false ; <nl> - } <nl> - <nl> - if ( ! has_strictness & & join - > kind ! = ASTJoin : : Cross ) <nl> - throw Exception ( " You must specify ANY or ALL for JOIN , before INNER or LEFT or RIGHT or FULL . " , ErrorCodes : : SYNTAX_ERROR ) ; <nl> - <nl> - if ( has_strictness & & join - > kind = = ASTJoin : : Cross ) <nl> - throw Exception ( " You must not specify ANY or ALL for CROSS JOIN . " , ErrorCodes : : SYNTAX_ERROR ) ; <nl> - <nl> - ws . ignore ( pos , end ) ; <nl> - <nl> - / / / Для всех JOIN - ов кроме INNER и CROSS может присутствовать не обязательное слово " OUTER " . <nl> - if ( join - > kind ! = ASTJoin : : Inner & & join - > kind ! = ASTJoin : : Cross & & s_outer . ignore ( pos , end ) ) <nl> - ws . ignore ( pos , end ) ; <nl> - <nl> - if ( ! s_join . ignore ( pos , end , max_parsed_pos , expected ) ) <nl> - return false ; <nl> - <nl> - ws . ignore ( pos , end ) ; <nl> - <nl> - if ( ! identifier . parse ( pos , end , join - > table , max_parsed_pos , expected ) <nl> - & & ! subquery . parse ( pos , end , join - > table , max_parsed_pos , expected ) ) <nl> - return false ; <nl> - <nl> - ws . ignore ( pos , end ) ; <nl> - <nl> - if ( join - > kind ! = ASTJoin : : Cross ) <nl> - { <nl> - if ( s_using . ignore ( pos , end , max_parsed_pos , expected ) ) <nl> - { <nl> - ws . ignore ( pos , end ) ; <nl> - <nl> - / / / Выражение для USING можно указать как в скобках , так и без них . <nl> - bool in_parens = ParserString ( " ( " ) . ignore ( pos , end ) ; <nl> - if ( in_parens ) <nl> - ws . ignore ( pos , end ) ; <nl> - <nl> - if ( ! exp_list . parse ( pos , end , join - > using_expr_list , max_parsed_pos , expected ) ) <nl> - return false ; <nl> - <nl> - if ( in_parens ) <nl> - { <nl> - ws . ignore ( pos , end ) ; <nl> - if ( ! ParserString ( " ) " ) . ignore ( pos , end ) ) <nl> - return false ; <nl> - } <nl> - <nl> - ws . ignore ( pos , end ) ; <nl> - } <nl> - else if ( s_on . ignore ( pos , end , max_parsed_pos , expected ) ) <nl> - { <nl> - ws . ignore ( pos , end ) ; <nl> - <nl> - if ( ! exp_elem . parse ( pos , end , join - > on_expr , max_parsed_pos , expected ) ) <nl> - return false ; <nl> - <nl> - ws . ignore ( pos , end ) ; <nl> - } <nl> - else <nl> - { <nl> - expected = " USING or ON " ; <nl> - return false ; <nl> - } <nl> - } <nl> - <nl> - join - > children . push_back ( join - > table ) ; <nl> - <nl> - if ( join - > using_expr_list ) <nl> - join - > children . push_back ( join - > using_expr_list ) ; <nl> - <nl> - return true ; <nl> - } <nl> - <nl> - } <nl> mmm a / dbms / src / Parsers / ParserSelectQuery . cpp <nl> ppp b / dbms / src / Parsers / ParserSelectQuery . cpp <nl> <nl> # include < DB / Parsers / CommonParsers . h > <nl> # include < DB / Parsers / ExpressionElementParsers . h > <nl> # include < DB / Parsers / ExpressionListParsers . h > <nl> - # include < DB / Parsers / ParserJoin . h > <nl> # include < DB / Parsers / ParserSetQuery . h > <nl> # include < DB / Parsers / ParserSampleRatio . h > <nl> # include < DB / Parsers / ParserSelectQuery . h > <nl> bool ParserSelectQuery : : parseImpl ( Pos & pos , Pos end , ASTPtr & node , Pos & max_p <nl> ParserNotEmptyExpressionList exp_list ( false ) ; <nl> ParserNotEmptyExpressionList exp_list_for_select_clause ( true ) ; / / / Allows aliases without AS keyword . <nl> ParserExpressionWithOptionalAlias exp_elem ( false ) ; <nl> - ParserJoin join ; <nl> ParserOrderByExpressionList order_list ; <nl> <nl> ws . ignore ( pos , end ) ; <nl> mmm a / dbms / src / Parsers / ParserTablesInSelectQuery . cpp <nl> ppp b / dbms / src / Parsers / ParserTablesInSelectQuery . cpp <nl> bool ParserTableExpression : : parseImpl ( Pos & pos , Pos end , ASTPtr & node , Pos & m <nl> { <nl> static_cast < ASTFunction & > ( * res - > table_function ) . kind = ASTFunction : : TABLE_FUNCTION ; <nl> } <nl> - else if ( ParserCompoundIdentifier ( ) . parse ( pos , end , res - > database_and_table_name , max_parsed_pos , expected ) ) <nl> + else if ( ParserWithOptionalAlias ( std : : make_unique < ParserCompoundIdentifier > ( ) , true ) <nl> + . parse ( pos , end , res - > database_and_table_name , max_parsed_pos , expected ) ) <nl> { <nl> static_cast < ASTIdentifier & > ( * res - > database_and_table_name ) . kind = ASTIdentifier : : Table ; <nl> } <nl> bool ParserTablesInSelectQueryElement : : parseImpl ( Pos & pos , Pos end , ASTPtr & no <nl> { <nl> ws . ignore ( pos , end ) ; <nl> <nl> - if ( ! ParserExpressionElement ( ) . parse ( pos , end , table_join - > on_expression , max_parsed_pos , expected ) ) <nl> + / / / OR is operator with lowest priority , so start parsing from it . <nl> + if ( ! ParserLogicalOrExpression ( ) . parse ( pos , end , table_join - > on_expression , max_parsed_pos , expected ) ) <nl> return false ; <nl> <nl> ws . ignore ( pos , end ) ; <nl> bool ParserTablesInSelectQuery : : parseImpl ( Pos & pos , Pos end , ASTPtr & node , Pos <nl> while ( ParserTablesInSelectQueryElement ( false ) . parse ( pos , end , child , max_parsed_pos , expected ) ) <nl> res - > children . emplace_back ( child ) ; <nl> <nl> + node = res ; <nl> return true ; <nl> } <nl> <nl>
Rectification of JOINs [ # METR - 2944 ] .
ClickHouse/ClickHouse
9f9be1b7fc044f3c907eb7fc03a517ab2011ace3
2016-07-18T01:20:53Z
mmm a / src / arch / io / network . cc <nl> ppp b / src / arch / io / network . cc <nl> <nl> # else <nl> # include " windows . hpp " <nl> # include < ws2tcpip . h > <nl> - / / typedef int socklen_t ; <nl> + # include < mswsock . h > <nl> # endif <nl> # include < errno . h > <nl> # include < fcntl . h > <nl> <nl> # include " logger . hpp " <nl> # include " perfmon / perfmon . hpp " <nl> <nl> - int connect_ipv4_internal ( fd_t socket , int local_port , const in_addr & addr , int port ) { <nl> + LPFN_CONNECTEX get_ConnectEx ( SOCKET s ) { <nl> + LPFN_CONNECTEX ConnectEx = nullptr ; <nl> + if ( ! ConnectEx ) { <nl> + DWORD size = 0 ; <nl> + GUID id = WSAID_CONNECTEX ; <nl> + guarantee_winerr ( WSAIoctl ( s , SIO_GET_EXTENSION_FUNCTION_POINTER , <nl> + & id , sizeof ( id ) , & ConnectEx , sizeof ( ConnectEx ) , <nl> + & size , nullptr , nullptr ) ) ; <nl> + } <nl> + return ConnectEx ; <nl> + } <nl> + <nl> + void async_connect ( fd_t socket , sockaddr * sa , size_t sa_len , <nl> + event_watcher_t * event_watcher , signal_t * interuptor ) { <nl> + # ifdef _WIN32 <nl> + async_operation_t op ( event_watcher ) ; <nl> + BOOL res = get_ConnectEx ( socket ) ( socket , sa , sa_len , nullptr , 0 , nullptr , & op . overlapped ) ; <nl> + if ( res ) { <nl> + return ; <nl> + } <nl> + DWORD error = GetLastError ( ) ; <nl> + if ( error ! = ERROR_IO_PENDING ) { <nl> + rassert ( false , " ATN TODO : errno - - GetLastError " ) ; <nl> + throw linux_tcp_conn_t : : connect_failed_exc_t ( error ) ; <nl> + } <nl> + wait_interruptible ( & op . completed , interuptor ) ; <nl> + if ( op . error ) { <nl> + rassert ( false , " ATN TODO : errno - - GetLastError " ) ; <nl> + throw linux_tcp_conn_t : : connect_failed_exc_t ( op . error ) ; <nl> + } <nl> + # else <nl> + int res ; <nl> + do { <nl> + res = connect ( socket , sa , sa_len ) ; <nl> + } while ( res = = - 1 & & get_errno ( ) = = EINTR ) ; <nl> + <nl> + if ( res ! = 0 ) { <nl> + if ( get_errno ( ) = = EINPROGRESS ) { <nl> + linux_event_watcher_t : : watch_t watch ( event_watcher , poll_event_out ) ; <nl> + wait_interruptible ( & watch , interruptor ) ; <nl> + int error ; <nl> + socklen_t error_size = sizeof ( error ) ; <nl> + int getsockoptres = getsockopt ( sock . get ( ) , SOL_SOCKET , SO_ERROR , & error , & error_size ) ; <nl> + if ( getsockoptres ! = 0 ) { <nl> + throw linux_tcp_conn_t : : connect_failed_exc_t ( error ) ; <nl> + } <nl> + if ( error ! = 0 ) { <nl> + throw linux_tcp_conn_t : : connect_failed_exc_t ( error ) ; <nl> + } <nl> + } else { <nl> + throw linux_tcp_conn_t : : connect_failed_exc_t ( get_errno ( ) ) ; <nl> + } <nl> + } <nl> + # endif <nl> + } <nl> + <nl> + void connect_ipv4_internal ( fd_t socket , int local_port , const in_addr & addr , int port , event_watcher_t * event_watcher , signal_t * interuptor ) { <nl> struct sockaddr_in sa ; <nl> socklen_t sa_len ( sizeof ( sa ) ) ; <nl> memset ( & sa , 0 , sa_len ) ; <nl> int connect_ipv4_internal ( fd_t socket , int local_port , const in_addr & addr , int <nl> if ( local_port ! = 0 ) { <nl> sa . sin_port = htons ( local_port ) ; <nl> sa . sin_addr . s_addr = INADDR_ANY ; <nl> + / / TODO ATN : on Windows at least , bind can block . Can it block in this use case ? <nl> if ( bind ( socket , reinterpret_cast < sockaddr * > ( & sa ) , sa_len ) ! = 0 ) <nl> logWRN ( " Failed to bind to local port % d : % s " , local_port , errno_string ( get_errno ( ) ) . c_str ( ) ) ; <nl> } <nl> int connect_ipv4_internal ( fd_t socket , int local_port , const in_addr & addr , int <nl> sa . sin_port = htons ( port ) ; <nl> sa . sin_addr = addr ; <nl> <nl> - int res ; <nl> - do { <nl> - res = connect ( socket , reinterpret_cast < sockaddr * > ( & sa ) , sa_len ) ; <nl> - } while ( res = = - 1 & & get_errno ( ) = = EINTR ) ; <nl> - <nl> - return res ; <nl> + async_connect ( socket , reinterpret_cast < sockaddr * > ( & sa ) , sa_len , event_watcher , interuptor ) ; <nl> } <nl> <nl> - int connect_ipv6_internal ( fd_t socket , int local_port , const in6_addr & addr , int port , uint32_t scope_id ) { <nl> + void connect_ipv6_internal ( fd_t socket , int local_port , const in6_addr & addr , int port , uint32_t scope_id , event_watcher_t * event_watcher , signal_t * interuptor ) { <nl> struct sockaddr_in6 sa ; <nl> socklen_t sa_len ( sizeof ( sa ) ) ; <nl> memset ( & sa , 0 , sa_len ) ; <nl> int connect_ipv6_internal ( fd_t socket , int local_port , const in6_addr & addr , int <nl> sa . sin6_addr = addr ; <nl> sa . sin6_scope_id = scope_id ; <nl> <nl> - int res ; <nl> - do { <nl> - res = connect ( socket , reinterpret_cast < sockaddr * > ( & sa ) , sa_len ) ; <nl> - } while ( res = = - 1 & & get_errno ( ) = = EINTR ) ; <nl> - <nl> - return res ; <nl> + async_connect ( socket , reinterpret_cast < sockaddr * > ( & sa ) , sa_len , event_watcher , interuptor ) ; <nl> } <nl> <nl> / / TODO ATN : windows version of this using HANDLE and without get_errno <nl> linux_tcp_conn_t : : linux_tcp_conn_t ( const ip_address_t & peer , <nl> <nl> int res ; <nl> if ( peer . is_ipv4 ( ) ) { <nl> - res = connect_ipv4_internal ( sock . get ( ) , local_port , peer . get_ipv4_addr ( ) , port ) ; <nl> + connect_ipv4_internal ( sock . get ( ) , local_port , peer . get_ipv4_addr ( ) , port , event_watcher . get ( ) , interruptor ) ; <nl> } else { <nl> - res = connect_ipv6_internal ( sock . get ( ) , local_port , peer . get_ipv6_addr ( ) , port , <nl> - peer . get_ipv6_scope_id ( ) ) ; <nl> - } <nl> - <nl> - if ( res ! = 0 ) { <nl> - if ( get_errno ( ) = = EINPROGRESS ) { <nl> - linux_event_watcher_t : : watch_t watch ( event_watcher . get ( ) , poll_event_out ) ; <nl> - wait_interruptible ( & watch , interruptor ) ; <nl> - int error ; <nl> - socklen_t error_size = sizeof ( error ) ; <nl> - int getsockoptres = getsockopt ( sock . get ( ) , SOL_SOCKET , SO_ERROR , & error , & error_size ) ; <nl> - if ( getsockoptres ! = 0 ) { <nl> - throw linux_tcp_conn_t : : connect_failed_exc_t ( error ) ; <nl> - } <nl> - if ( error ! = 0 ) { <nl> - throw linux_tcp_conn_t : : connect_failed_exc_t ( error ) ; <nl> - } <nl> - } else { <nl> - throw linux_tcp_conn_t : : connect_failed_exc_t ( get_errno ( ) ) ; <nl> - } <nl> + connect_ipv6_internal ( sock . get ( ) , local_port , peer . get_ipv6_addr ( ) , port , <nl> + peer . get_ipv6_scope_id ( ) , event_watcher . get ( ) , interruptor ) ; <nl> } <nl> } <nl> <nl> linux_tcp_conn_t : : linux_tcp_conn_t ( fd_t s ) : <nl> drainer ( new auto_drainer_t ) { <nl> rassert ( sock . get ( ) ! = INVALID_FD ) ; <nl> <nl> + # ifndef _WIN32 / / TODO ATN <nl> int res = fcntl ( sock . get ( ) , F_SETFL , O_NONBLOCK ) ; <nl> guarantee_err ( res = = 0 , " Could not make socket non - blocking " ) ; <nl> + # endif <nl> } <nl> <nl> void linux_tcp_conn_t : : enable_keepalive ( ) { <nl> int optval = 1 ; <nl> + # ifdef _WIN32 / / TODO ATN <nl> + int res = setsockopt ( sock . get ( ) , SOL_SOCKET , SO_KEEPALIVE , reinterpret_cast < char * > ( & optval ) , sizeof ( optval ) ) ; <nl> + # else <nl> int res = setsockopt ( sock . get ( ) , SOL_SOCKET , SO_KEEPALIVE , & optval , sizeof ( optval ) ) ; <nl> + # endif <nl> guarantee ( res ! = - 1 , " Could not set SO_KEEPALIVE option . " ) ; <nl> } <nl> <nl> size_t linux_tcp_conn_t : : read_internal ( void * buffer , size_t size ) THROWS_ONLY ( tc <nl> assert_thread ( ) ; <nl> rassert ( ! read_closed . is_pulsed ( ) ) ; <nl> <nl> + # ifdef _WIN32 <nl> + / / TODO ATN : handle all cases <nl> + async_operation_t op ( event_watcher . get ( ) ) ; <nl> + DWORD sync_bytes ; <nl> + BOOL res = ReadFile ( sock . get ( ) , buffer , size , & sync_bytes , & op . overlapped ) ; <nl> + if ( res ) { <nl> + return sync_bytes ; <nl> + } <nl> + DWORD error = GetLastError ( ) ; <nl> + if ( error = = ERROR_IO_PENDING ) { <nl> + wait_any_t waiter ( & op . completed , & read_closed ) ; <nl> + waiter . wait_lazily_unordered ( ) ; <nl> + if ( read_closed . is_pulsed ( ) ) { <nl> + throw tcp_conn_read_closed_exc_t ( ) ; <nl> + } <nl> + error = op . error ; <nl> + } <nl> + if ( error ! = ERROR_SUCCESS ) { <nl> + logERR ( " Could not read from socket : % s " , winerr_string ( error ) . c_str ( ) ) ; <nl> + on_shutdown_read ( ) ; <nl> + throw tcp_conn_read_closed_exc_t ( ) ; <nl> + } else { <nl> + return op . nb_bytes ; <nl> + } <nl> + # else <nl> while ( true ) { <nl> ssize_t res = : : read ( sock . get ( ) , buffer , size ) ; <nl> <nl> size_t linux_tcp_conn_t : : read_internal ( void * buffer , size_t size ) THROWS_ONLY ( tc <nl> return res ; <nl> } <nl> } <nl> + # endif <nl> } <nl> <nl> size_t linux_tcp_conn_t : : read_some ( void * buf , size_t size , signal_t * closer ) THROWS_ONLY ( tcp_conn_read_closed_exc_t ) { <nl> void linux_tcp_conn_t : : pop ( size_t len , signal_t * closer ) THROWS_ONLY ( tcp_conn_re <nl> <nl> void linux_tcp_conn_t : : shutdown_read ( ) { <nl> assert_thread ( ) ; <nl> + # ifdef _WIN32 / / TODO ATN <nl> + int res = : : shutdown ( sock . get ( ) , SD_RECEIVE ) ; <nl> + if ( res ! = 0 & & GetLastError ( ) ! = WSAENOTCONN ) { <nl> + logERR ( " Could not shutdown socket for reading : % s " , winerr_string ( GetLastError ( ) ) . c_str ( ) ) ; <nl> + } <nl> + # else <nl> int res = : : shutdown ( sock . get ( ) , SHUT_RD ) ; <nl> if ( res ! = 0 & & get_errno ( ) ! = ENOTCONN ) { <nl> logERR ( " Could not shutdown socket for reading : % s " , errno_string ( get_errno ( ) ) . c_str ( ) ) ; <nl> } <nl> + # endif <nl> on_shutdown_read ( ) ; <nl> } <nl> <nl> void linux_tcp_conn_t : : perform_write ( const void * buf , size_t size ) { <nl> return ; <nl> } <nl> <nl> + # ifdef _WIN32 / / TODO ATN <nl> + / / TODO ATN <nl> + async_operation_t op ( event_watcher . get ( ) ) ; <nl> + DWORD sync_bytes ; <nl> + BOOL res = WriteFile ( sock . get ( ) , buf , size , & sync_bytes , & op . overlapped ) ; <nl> + if ( res ) { <nl> + return ; <nl> + } <nl> + DWORD error = GetLastError ( ) ; <nl> + if ( error = = ERROR_IO_PENDING ) { <nl> + wait_any_t waiter ( & op . completed , & write_closed ) ; <nl> + waiter . wait_lazily_unordered ( ) ; <nl> + if ( write_closed . is_pulsed ( ) ) { <nl> + throw tcp_conn_write_closed_exc_t ( ) ; <nl> + } <nl> + error = op . error ; <nl> + } <nl> + / * ATN TODO : <nl> + if ( res = = - 1 & & ( get_errno ( ) = = EPIPE | | get_errno ( ) = = ENOTCONN | | get_errno ( ) = = EHOSTUNREACH | | <nl> + get_errno ( ) = = ENETDOWN | | get_errno ( ) = = EHOSTDOWN | | get_errno ( ) = = ECONNRESET ) ) { <nl> + on_shutdown_write ( ) ; <nl> + * / <nl> + if ( error ! = ERROR_SUCCESS ) { <nl> + logERR ( " Could not write to socket : % s " , winerr_string ( error ) . c_str ( ) ) ; <nl> + on_shutdown_write ( ) ; <nl> + } else if ( op . nb_bytes = = 0 ) { <nl> + logERR ( " Didn ' t expect WriteEx to write 0 bytes . " ) ; <nl> + on_shutdown_write ( ) ; <nl> + } else { <nl> + if ( write_perfmon ) write_perfmon - > record ( op . nb_bytes ) ; <nl> + rassert ( op . nb_bytes = = size ) ; / / TODO ATN : does windows guarantee this ? <nl> + } <nl> + # else <nl> while ( size > 0 ) { <nl> ssize_t res = : : write ( sock . get ( ) , buf , size ) ; <nl> <nl> void linux_tcp_conn_t : : perform_write ( const void * buf , size_t size ) { <nl> if ( write_perfmon ) write_perfmon - > record ( res ) ; <nl> } <nl> } <nl> + # endif <nl> } <nl> <nl> void linux_tcp_conn_t : : write ( const void * buf , size_t size , signal_t * closer ) THROWS_ONLY ( tcp_conn_write_closed_exc_t ) { <nl> void linux_tcp_conn_t : : flush_buffer_eventually ( signal_t * closer ) THROWS_ONLY ( tcp <nl> void linux_tcp_conn_t : : shutdown_write ( ) { <nl> assert_thread ( ) ; <nl> <nl> + # ifdef _WIN32 / / TODO ATN <nl> + int res = : : shutdown ( sock . get ( ) , SD_SEND ) ; <nl> + if ( res ! = 0 & & GetLastError ( ) ! = WSAENOTCONN ) { <nl> + logERR ( " Could not shutdown socket for writing : % s " , winerr_string ( GetLastError ( ) ) . c_str ( ) ) ; <nl> + } <nl> + # else <nl> int res = : : shutdown ( sock . get ( ) , SHUT_WR ) ; <nl> if ( res ! = 0 & & get_errno ( ) ! = ENOTCONN ) { <nl> logERR ( " Could not shutdown socket for writing : % s " , errno_string ( get_errno ( ) ) . c_str ( ) ) ; <nl> } <nl> + # endif <nl> <nl> on_shutdown_write ( ) ; <nl> } <nl> linux_tcp_conn_t : : ~ linux_tcp_conn_t ( ) THROWS_NOTHING { <nl> <nl> void linux_tcp_conn_t : : rethread ( threadnum_t new_thread ) { <nl> if ( home_thread ( ) = = get_thread_id ( ) & & new_thread = = INVALID_THREAD ) { <nl> + rassert ( false , " ATN TODO " ) ; <nl> rassert ( ! read_in_progress ) ; <nl> rassert ( ! write_in_progress ) ; <nl> rassert ( event_watcher . has ( ) ) ; <nl> event_watcher . reset ( ) ; <nl> <nl> } else if ( home_thread ( ) = = INVALID_THREAD & & new_thread = = get_thread_id ( ) ) { <nl> + rassert ( false , " ATN TODO " ) ; <nl> rassert ( ! event_watcher . has ( ) ) ; <nl> - event_watcher . init ( new linux_event_watcher_t ( sock . get ( ) , this ) ) ; <nl> + event_watcher . init ( new event_watcher_t ( sock . get ( ) , this ) ) ; <nl> <nl> } else { <nl> crash ( " linux_tcp_conn_t can be rethread ( ) ed from no thread to the current thread or " <nl> void linux_tcp_conn_t : : on_event ( int / * events * / ) { <nl> } <nl> <nl> linux_tcp_conn_descriptor_t : : linux_tcp_conn_descriptor_t ( fd_t fd ) : fd_ ( fd ) { <nl> - rassert ( fd ! = - 1 ) ; <nl> + rassert ( fd ! = INVALID_FD ) ; <nl> } <nl> <nl> linux_tcp_conn_descriptor_t : : ~ linux_tcp_conn_descriptor_t ( ) { <nl> - rassert ( fd_ = = - 1 ) ; <nl> + rassert ( fd_ = = INVALID_FD ) ; <nl> } <nl> <nl> void linux_tcp_conn_descriptor_t : : make_overcomplicated ( scoped_ptr_t < linux_tcp_conn_t > * tcp_conn ) { <nl> bool linux_nonthrowing_tcp_listener_t : : begin_listening ( ) { <nl> int res = listen ( socks [ i ] . get ( ) , RDB_LISTEN_BACKLOG ) ; <nl> guarantee_err ( res = = 0 , " Couldn ' t listen to the socket " ) ; <nl> <nl> + # ifndef _WIN32 / / TODO ATN <nl> res = fcntl ( socks [ i ] . get ( ) , F_SETFL , O_NONBLOCK ) ; <nl> guarantee_err ( res = = 0 , " Could not make socket non - blocking " ) ; <nl> + # endif <nl> } <nl> <nl> / / Start the accept loop <nl> int linux_nonthrowing_tcp_listener_t : : init_sockets ( ) { <nl> return get_errno ( ) ; <nl> } <nl> <nl> - event_watchers [ i ] . init ( new linux_event_watcher_t ( socks [ i ] . get ( ) , this ) ) ; <nl> + event_watchers [ i ] . init ( new event_watcher_t ( socks [ i ] . get ( ) , this ) ) ; <nl> <nl> int sock_fd = socks [ i ] . get ( ) ; <nl> guarantee_err ( sock_fd ! = INVALID_FD , " Couldn ' t create socket " ) ; <nl> <nl> int sockoptval = 1 ; <nl> - int res = setsockopt ( sock_fd , SOL_SOCKET , SO_REUSEADDR , & sockoptval , sizeof ( sockoptval ) ) ; <nl> + # ifndef _WIN32 / / TODO ATN <nl> + int res = setsockopt ( sock_fd , SOL_SOCKET , SO_REUSEADDR , & sockoptval , sizeof ( sockoptval ) ) ; <nl> guarantee_err ( res ! = - 1 , " Could not set REUSEADDR option " ) ; <nl> <nl> / * XXX Making our socket NODELAY prevents the problem where responses to <nl> int linux_nonthrowing_tcp_listener_t : : init_sockets ( ) { <nl> * / <nl> res = setsockopt ( sock_fd , IPPROTO_TCP , TCP_NODELAY , & sockoptval , sizeof ( sockoptval ) ) ; <nl> guarantee_err ( res ! = - 1 , " Could not set TCP_NODELAY option " ) ; <nl> + # undef <nl> } <nl> return 0 ; <nl> } <nl> void linux_nonthrowing_tcp_listener_t : : bind_sockets ( ) { <nl> throw tcp_socket_exc_t ( EADDRINUSE , port ) ; <nl> } <nl> <nl> + # ifndef _WIN32 <nl> fd_t linux_nonthrowing_tcp_listener_t : : wait_for_any_socket ( const auto_drainer_t : : lock_t & lock ) { <nl> scoped_array_t < scoped_ptr_t < linux_event_watcher_t : : watch_t > > watches ( event_watchers . size ( ) ) ; <nl> wait_any_t waiter ( lock . get_drain_signal ( ) ) ; <nl> fd_t linux_nonthrowing_tcp_listener_t : : wait_for_any_socket ( const auto_drainer_t : <nl> / / This should never happen , but it shouldn ' t be much of a problem <nl> return - 1 ; <nl> } <nl> + # endif <nl> <nl> void linux_nonthrowing_tcp_listener_t : : accept_loop ( auto_drainer_t : : lock_t lock ) { <nl> exponential_backoff_t backoff ( 10 , 160 , 2 . 0 , 0 . 5 ) ; <nl>
stash
rethinkdb/rethinkdb
fd3106e69ada1faedb416aebaa7d066e1d95e1fb
2015-08-11T11:48:23Z
mmm a / include / v8 . h <nl> ppp b / include / v8 . h <nl> class V8EXPORT Context { <nl> * to the user of V8 to ensure ( perhaps with locking ) that this <nl> * constraint is not violated . <nl> * <nl> - * v8 : : Locker is a scoped lock object . While it ' s <nl> - * active ( i . e . between its construction and destruction ) the current thread is <nl> - * allowed to use the locked isolate . V8 guarantees that an isolate can be locked <nl> - * by at most one thread at any time . In other words , the scope of a v8 : : Locker is <nl> - * a critical section . <nl> + * More then one thread and multiple V8 isolates can be used <nl> + * without any locking if each isolate is created and accessed <nl> + * by a single thread only . For example , one thread can use <nl> + * multiple isolates or multiple threads can each create and run <nl> + * their own isolate . <nl> * <nl> - * Sample usage : <nl> - * \ code <nl> + * If you wish to start using V8 isolate in more then one thread <nl> + * you can do this by constructing a v8 : : Locker object to guard <nl> + * access to the isolate . After the code using V8 has completed <nl> + * for the current thread you can call the destructor . This can <nl> + * be combined with C + + scope - based construction as follows <nl> + * ( assumes the default isolate that is used if not specified as <nl> + * a parameter for the Locker ) : <nl> + * <nl> + * \ code <nl> * . . . <nl> * { <nl> - * v8 : : Locker locker ( isolate ) ; <nl> - * v8 : : Isolate : : Scope isolate_scope ( isolate ) ; <nl> + * v8 : : Locker locker ; <nl> * . . . <nl> - * / / Code using V8 and isolate goes here . <nl> + * / / Code using V8 goes here . <nl> * . . . <nl> * } / / Destructor called here <nl> * \ endcode <nl> class V8EXPORT Context { <nl> * <nl> * \ code <nl> * { <nl> - * isolate - > Exit ( ) ; <nl> - * v8 : : Unlocker unlocker ( isolate ) ; <nl> + * v8 : : Unlocker unlocker ; <nl> * . . . <nl> * / / Code not using V8 goes here while V8 can run in another thread . <nl> * . . . <nl> * } / / Destructor called here . <nl> - * isolate - > Enter ( ) ; <nl> * \ endcode <nl> * <nl> * The Unlocker object is intended for use in a long - running callback <nl> class V8EXPORT Context { <nl> * \ code <nl> * / / V8 not locked . <nl> * { <nl> - * v8 : : Locker locker ( isolate ) ; <nl> - * Isolate : : Scope isolate_scope ( isolate ) ; <nl> + * v8 : : Locker locker ; <nl> * / / V8 locked . <nl> * { <nl> - * v8 : : Locker another_locker ( isolate ) ; <nl> + * v8 : : Locker another_locker ; <nl> * / / V8 still locked ( 2 levels ) . <nl> * { <nl> - * isolate - > Exit ( ) ; <nl> - * v8 : : Unlocker unlocker ( isolate ) ; <nl> + * v8 : : Unlocker unlocker ; <nl> * / / V8 not locked . <nl> * } <nl> - * isolate - > Enter ( ) ; <nl> * / / V8 locked again ( 2 levels ) . <nl> * } <nl> * / / V8 still locked ( 1 level ) . <nl> * } <nl> * / / V8 Now no longer locked . <nl> * \ endcode <nl> - * <nl> - * <nl> * / <nl> class V8EXPORT Unlocker { <nl> public : <nl> - / * * <nl> - * Initialize Unlocker for a given Isolate . NULL means default isolate . <nl> - * / <nl> - explicit Unlocker ( Isolate * isolate = NULL ) ; <nl> + Unlocker ( ) ; <nl> ~ Unlocker ( ) ; <nl> - private : <nl> - internal : : Isolate * isolate_ ; <nl> } ; <nl> <nl> <nl> class V8EXPORT Locker { <nl> public : <nl> - / * * <nl> - * Initialize Locker for a given Isolate . NULL means default isolate . <nl> - * / <nl> - explicit Locker ( Isolate * isolate = NULL ) ; <nl> + Locker ( ) ; <nl> ~ Locker ( ) ; <nl> <nl> / * * <nl> class V8EXPORT Locker { <nl> static void StopPreemption ( ) ; <nl> <nl> / * * <nl> - * Returns whether or not the locker for a given isolate , or default isolate if NULL is given , <nl> - * is locked by the current thread . <nl> + * Returns whether or not the locker is locked by the current thread . <nl> * / <nl> - static bool IsLocked ( Isolate * isolate = NULL ) ; <nl> + static bool IsLocked ( ) ; <nl> <nl> / * * <nl> * Returns whether v8 : : Locker is being used by this V8 instance . <nl> class V8EXPORT Locker { <nl> private : <nl> bool has_lock_ ; <nl> bool top_level_ ; <nl> - internal : : Isolate * isolate_ ; <nl> <nl> static bool active_ ; <nl> <nl> mmm a / src / api . cc <nl> ppp b / src / api . cc <nl> <nl> <nl> # define LOG_API ( isolate , expr ) LOG ( isolate , ApiEntryCall ( expr ) ) <nl> <nl> + / / TODO ( isolates ) : avoid repeated TLS reads in function prologues . <nl> # ifdef ENABLE_VMSTATE_TRACKING <nl> # define ENTER_V8 ( isolate ) \ <nl> ASSERT ( ( isolate ) - > IsInitialized ( ) ) ; \ <nl> static inline bool EnsureInitializedForIsolate ( i : : Isolate * isolate , <nl> if ( isolate ! = NULL ) { <nl> if ( isolate - > IsInitialized ( ) ) return true ; <nl> } <nl> - ASSERT ( isolate = = i : : Isolate : : Current ( ) ) ; <nl> return ApiCheck ( InitializeHelper ( ) , location , " Error initializing V8 " ) ; <nl> } <nl> <nl> void HandleScopeImplementer : : FreeThreadResources ( ) { <nl> <nl> <nl> char * HandleScopeImplementer : : ArchiveThread ( char * storage ) { <nl> + Isolate * isolate = Isolate : : Current ( ) ; <nl> v8 : : ImplementationUtilities : : HandleScopeData * current = <nl> - isolate_ - > handle_scope_data ( ) ; <nl> + isolate - > handle_scope_data ( ) ; <nl> handle_scope_data_ = * current ; <nl> memcpy ( storage , this , sizeof ( * this ) ) ; <nl> <nl> int HandleScopeImplementer : : ArchiveSpacePerThread ( ) { <nl> <nl> char * HandleScopeImplementer : : RestoreThread ( char * storage ) { <nl> memcpy ( this , storage , sizeof ( * this ) ) ; <nl> - * isolate_ - > handle_scope_data ( ) = handle_scope_data_ ; <nl> + * Isolate : : Current ( ) - > handle_scope_data ( ) = handle_scope_data_ ; <nl> return storage + ArchiveSpacePerThread ( ) ; <nl> } <nl> <nl> void HandleScopeImplementer : : IterateThis ( ObjectVisitor * v ) { <nl> <nl> void HandleScopeImplementer : : Iterate ( ObjectVisitor * v ) { <nl> v8 : : ImplementationUtilities : : HandleScopeData * current = <nl> - isolate_ - > handle_scope_data ( ) ; <nl> + Isolate : : Current ( ) - > handle_scope_data ( ) ; <nl> handle_scope_data_ = * current ; <nl> IterateThis ( v ) ; <nl> } <nl> mmm a / src / api . h <nl> ppp b / src / api . h <nl> class StringTracker { <nl> ISOLATED_CLASS HandleScopeImplementer { <nl> public : <nl> <nl> - explicit HandleScopeImplementer ( Isolate * isolate ) <nl> - : isolate_ ( isolate ) , <nl> - blocks_ ( 0 ) , <nl> + HandleScopeImplementer ( ) <nl> + : blocks_ ( 0 ) , <nl> entered_contexts_ ( 0 ) , <nl> saved_contexts_ ( 0 ) , <nl> spare_ ( NULL ) , <nl> ISOLATED_CLASS HandleScopeImplementer { <nl> ASSERT ( call_depth_ = = 0 ) ; <nl> } <nl> <nl> - Isolate * isolate_ ; <nl> List < internal : : Object * * > blocks_ ; <nl> / / Used as a stack to keep track of entered contexts . <nl> List < Handle < Object > > entered_contexts_ ; <nl> mmm a / src / execution . cc <nl> ppp b / src / execution . cc <nl> void StackGuard : : ClearThread ( const ExecutionAccess & lock ) { <nl> <nl> void StackGuard : : InitThread ( const ExecutionAccess & lock ) { <nl> if ( thread_local_ . Initialize ( ) ) isolate_ - > heap ( ) - > SetStackLimits ( ) ; <nl> - Isolate : : PerIsolateThreadData * per_thread = <nl> - isolate_ - > FindOrAllocatePerThreadDataForThisThread ( ) ; <nl> - uintptr_t stored_limit = per_thread - > stack_limit ( ) ; <nl> + uintptr_t stored_limit = <nl> + Isolate : : CurrentPerIsolateThreadData ( ) - > stack_limit ( ) ; <nl> / / You should hold the ExecutionAccess lock when you call this . <nl> if ( stored_limit ! = 0 ) { <nl> StackGuard : : SetStackLimit ( stored_limit ) ; <nl> mmm a / src / isolate . cc <nl> ppp b / src / isolate . cc <nl> Isolate : : PerIsolateThreadData * <nl> } <nl> <nl> <nl> - Isolate : : PerIsolateThreadData * Isolate : : FindPerThreadDataForThisThread ( ) { <nl> - ThreadId thread_id = ThreadId : : Current ( ) ; <nl> - PerIsolateThreadData * per_thread = NULL ; <nl> - { <nl> - ScopedLock lock ( process_wide_mutex_ ) ; <nl> - per_thread = thread_data_table_ - > Lookup ( this , thread_id ) ; <nl> - } <nl> - return per_thread ; <nl> - } <nl> - <nl> - <nl> void Isolate : : EnsureDefaultIsolate ( ) { <nl> ScopedLock lock ( process_wide_mutex_ ) ; <nl> if ( default_isolate_ = = NULL ) { <nl> void Isolate : : EnsureDefaultIsolate ( ) { <nl> } <nl> / / Can ' t use SetIsolateThreadLocals ( default_isolate_ , NULL ) here <nl> / / becase a non - null thread data may be already set . <nl> - if ( Thread : : GetThreadLocal ( isolate_key_ ) = = NULL ) { <nl> - Thread : : SetThreadLocal ( isolate_key_ , default_isolate_ ) ; <nl> - } <nl> + Thread : : SetThreadLocal ( isolate_key_ , default_isolate_ ) ; <nl> CHECK ( default_isolate_ - > PreInit ( ) ) ; <nl> } <nl> <nl> Isolate : : Isolate ( ) <nl> zone_ . isolate_ = this ; <nl> stack_guard_ . isolate_ = this ; <nl> <nl> - / / ThreadManager is initialized early to support locking an isolate <nl> - / / before it is entered . <nl> - thread_manager_ = new ThreadManager ( ) ; <nl> - thread_manager_ - > isolate_ = this ; <nl> - <nl> # if defined ( V8_TARGET_ARCH_ARM ) & & ! defined ( __arm__ ) | | \ <nl> defined ( V8_TARGET_ARCH_MIPS ) & & ! defined ( __mips__ ) <nl> simulator_initialized_ = false ; <nl> bool Isolate : : PreInit ( ) { <nl> TRACE_ISOLATE ( preinit ) ; <nl> <nl> ASSERT ( Isolate : : Current ( ) = = this ) ; <nl> + <nl> # ifdef ENABLE_DEBUGGER_SUPPORT <nl> debug_ = new Debug ( this ) ; <nl> debugger_ = new Debugger ( ) ; <nl> bool Isolate : : PreInit ( ) { <nl> <nl> string_tracker_ = new StringTracker ( ) ; <nl> string_tracker_ - > isolate_ = this ; <nl> + thread_manager_ = new ThreadManager ( ) ; <nl> + thread_manager_ - > isolate_ = this ; <nl> compilation_cache_ = new CompilationCache ( this ) ; <nl> transcendental_cache_ = new TranscendentalCache ( ) ; <nl> keyed_lookup_cache_ = new KeyedLookupCache ( ) ; <nl> bool Isolate : : PreInit ( ) { <nl> write_input_buffer_ = new StringInputBuffer ( ) ; <nl> global_handles_ = new GlobalHandles ( this ) ; <nl> bootstrapper_ = new Bootstrapper ( ) ; <nl> - handle_scope_implementer_ = new HandleScopeImplementer ( this ) ; <nl> + handle_scope_implementer_ = new HandleScopeImplementer ( ) ; <nl> stub_cache_ = new StubCache ( this ) ; <nl> ast_sentinels_ = new AstSentinels ( ) ; <nl> regexp_stack_ = new RegExpStack ( ) ; <nl> bool Isolate : : PreInit ( ) { <nl> <nl> <nl> void Isolate : : InitializeThreadLocal ( ) { <nl> - thread_local_top_ . isolate_ = this ; <nl> thread_local_top_ . Initialize ( ) ; <nl> clear_pending_exception ( ) ; <nl> clear_pending_message ( ) ; <nl> mmm a / src / isolate . h <nl> ppp b / src / isolate . h <nl> class ThreadLocalTop BASE_EMBEDDED { <nl> ASSERT ( try_catch_handler_address_ = = NULL ) ; <nl> } <nl> <nl> - Isolate * isolate_ ; <nl> / / The context where the current execution method is created and for variable <nl> / / lookups . <nl> Context * context_ ; <nl> class Isolate { <nl> / / Safe to call multiple times . <nl> static void EnsureDefaultIsolate ( ) ; <nl> <nl> - / / Find the PerThread for this particular ( isolate , thread ) combination <nl> - / / If one does not yet exist , return null . <nl> - PerIsolateThreadData * FindPerThreadDataForThisThread ( ) ; <nl> - <nl> # ifdef ENABLE_DEBUGGER_SUPPORT <nl> / / Get the debugger from the default isolate . Preinitializes the <nl> / / default isolate if needed . <nl> class Isolate { <nl> / / If one does not yet exist , allocate a new one . <nl> PerIsolateThreadData * FindOrAllocatePerThreadDataForThisThread ( ) ; <nl> <nl> - / / PreInits and returns a default isolate . Needed when a new thread tries <nl> + / / PreInits and returns a default isolate . Needed when a new thread tries <nl> / / to create a Locker for the first time ( the lock itself is in the isolate ) . <nl> static Isolate * GetDefaultIsolateForLocking ( ) ; <nl> <nl> class Isolate { <nl> <nl> friend class ExecutionAccess ; <nl> friend class IsolateInitializer ; <nl> - friend class ThreadManager ; <nl> - friend class StackGuard ; <nl> friend class ThreadId ; <nl> friend class v8 : : Isolate ; <nl> friend class v8 : : Locker ; <nl> - friend class v8 : : Unlocker ; <nl> <nl> DISALLOW_COPY_AND_ASSIGN ( Isolate ) ; <nl> } ; <nl> mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> int Relocatable : : ArchiveSpacePerThread ( ) { <nl> <nl> <nl> / / Archive statics that are thread local . <nl> - char * Relocatable : : ArchiveState ( Isolate * isolate , char * to ) { <nl> + char * Relocatable : : ArchiveState ( char * to ) { <nl> + Isolate * isolate = Isolate : : Current ( ) ; <nl> * reinterpret_cast < Relocatable * * > ( to ) = isolate - > relocatable_top ( ) ; <nl> isolate - > set_relocatable_top ( NULL ) ; <nl> return to + ArchiveSpacePerThread ( ) ; <nl> char * Relocatable : : ArchiveState ( Isolate * isolate , char * to ) { <nl> <nl> <nl> / / Restore statics that are thread local . <nl> - char * Relocatable : : RestoreState ( Isolate * isolate , char * from ) { <nl> + char * Relocatable : : RestoreState ( char * from ) { <nl> + Isolate * isolate = Isolate : : Current ( ) ; <nl> isolate - > set_relocatable_top ( * reinterpret_cast < Relocatable * * > ( from ) ) ; <nl> return from + ArchiveSpacePerThread ( ) ; <nl> } <nl> mmm a / src / objects . h <nl> ppp b / src / objects . h <nl> class Relocatable BASE_EMBEDDED { <nl> <nl> static void PostGarbageCollectionProcessing ( ) ; <nl> static int ArchiveSpacePerThread ( ) ; <nl> - static char * ArchiveState ( Isolate * isolate , char * to ) ; <nl> - static char * RestoreState ( Isolate * isolate , char * from ) ; <nl> + static char * ArchiveState ( char * to ) ; <nl> + static char * RestoreState ( char * from ) ; <nl> static void Iterate ( ObjectVisitor * v ) ; <nl> static void Iterate ( ObjectVisitor * v , Relocatable * top ) ; <nl> static char * Iterate ( ObjectVisitor * v , char * t ) ; <nl> mmm a / src / top . cc <nl> ppp b / src / top . cc <nl> void ThreadLocalTop : : Initialize ( ) { <nl> InitializeInternal ( ) ; <nl> # ifdef USE_SIMULATOR <nl> # ifdef V8_TARGET_ARCH_ARM <nl> - simulator_ = Simulator : : current ( isolate_ ) ; <nl> + simulator_ = Simulator : : current ( Isolate : : Current ( ) ) ; <nl> # elif V8_TARGET_ARCH_MIPS <nl> - simulator_ = Simulator : : current ( isolate_ ) ; <nl> + simulator_ = Simulator : : current ( Isolate : : Current ( ) ) ; <nl> # endif <nl> # endif <nl> thread_id_ = ThreadId : : Current ( ) ; <nl> mmm a / src / v8threads . cc <nl> ppp b / src / v8threads . cc <nl> bool Locker : : active_ = false ; <nl> <nl> <nl> / / Constructor for the Locker object . Once the Locker is constructed the <nl> - / / current thread will be guaranteed to have the lock for a given isolate . <nl> - Locker : : Locker ( v8 : : Isolate * isolate ) <nl> - : has_lock_ ( false ) , <nl> - top_level_ ( false ) , <nl> - isolate_ ( reinterpret_cast < i : : Isolate * > ( isolate ) ) { <nl> - if ( isolate_ = = NULL ) { <nl> - isolate_ = i : : Isolate : : GetDefaultIsolateForLocking ( ) ; <nl> - } <nl> + / / current thread will be guaranteed to have the big V8 lock . <nl> + Locker : : Locker ( ) : has_lock_ ( false ) , top_level_ ( true ) { <nl> + / / TODO ( isolates ) : When Locker has Isolate parameter and it is provided , grab <nl> + / / that one instead of using the current one . <nl> + / / We pull default isolate for Locker constructor w / o p [ arameter . <nl> + / / A thread should not enter an isolate before acquiring a lock , <nl> + / / in cases which mandate using Lockers . <nl> + / / So getting a lock is the first thing threads do in a scenario where <nl> + / / multple threads share an isolate . Hence , we need to access <nl> + / / ' locking isolate ' before we can actually enter into default isolate . <nl> + internal : : Isolate * isolate = internal : : Isolate : : GetDefaultIsolateForLocking ( ) ; <nl> + ASSERT ( isolate ! = NULL ) ; <nl> + <nl> / / Record that the Locker has been used at least once . <nl> active_ = true ; <nl> / / Get the big lock if necessary . <nl> - if ( ! isolate_ - > thread_manager ( ) - > IsLockedByCurrentThread ( ) ) { <nl> - isolate_ - > thread_manager ( ) - > Lock ( ) ; <nl> + if ( ! isolate - > thread_manager ( ) - > IsLockedByCurrentThread ( ) ) { <nl> + isolate - > thread_manager ( ) - > Lock ( ) ; <nl> has_lock_ = true ; <nl> <nl> + if ( isolate - > IsDefaultIsolate ( ) ) { <nl> + / / This only enters if not yet entered . <nl> + internal : : Isolate : : EnterDefaultIsolate ( ) ; <nl> + } <nl> + <nl> + ASSERT ( internal : : Thread : : HasThreadLocal ( <nl> + internal : : Isolate : : thread_id_key ( ) ) ) ; <nl> + <nl> / / Make sure that V8 is initialized . Archiving of threads interferes <nl> / / with deserialization by adding additional root pointers , so we must <nl> / / initialize here , before anyone can call ~ Locker ( ) or Unlocker ( ) . <nl> - if ( ! isolate_ - > IsInitialized ( ) ) { <nl> - isolate_ - > Enter ( ) ; <nl> + if ( ! isolate - > IsInitialized ( ) ) { <nl> V8 : : Initialize ( ) ; <nl> - isolate_ - > Exit ( ) ; <nl> } <nl> - <nl> / / This may be a locker within an unlocker in which case we have to <nl> / / get the saved state for this thread and restore it . <nl> - if ( isolate_ - > thread_manager ( ) - > RestoreThread ( ) ) { <nl> + if ( isolate - > thread_manager ( ) - > RestoreThread ( ) ) { <nl> top_level_ = false ; <nl> } else { <nl> - internal : : ExecutionAccess access ( isolate_ ) ; <nl> - isolate_ - > stack_guard ( ) - > ClearThread ( access ) ; <nl> - isolate_ - > stack_guard ( ) - > InitThread ( access ) ; <nl> - } <nl> - if ( isolate_ - > IsDefaultIsolate ( ) ) { <nl> - / / This only enters if not yet entered . <nl> - internal : : Isolate : : EnterDefaultIsolate ( ) ; <nl> + internal : : ExecutionAccess access ( isolate ) ; <nl> + isolate - > stack_guard ( ) - > ClearThread ( access ) ; <nl> + isolate - > stack_guard ( ) - > InitThread ( access ) ; <nl> } <nl> } <nl> - ASSERT ( isolate_ - > thread_manager ( ) - > IsLockedByCurrentThread ( ) ) ; <nl> + ASSERT ( isolate - > thread_manager ( ) - > IsLockedByCurrentThread ( ) ) ; <nl> } <nl> <nl> <nl> - bool Locker : : IsLocked ( v8 : : Isolate * isolate ) { <nl> - i : : Isolate * internal_isolate = reinterpret_cast < i : : Isolate * > ( isolate ) ; <nl> - if ( internal_isolate = = NULL ) { <nl> - internal_isolate = i : : Isolate : : GetDefaultIsolateForLocking ( ) ; <nl> - } <nl> - return internal_isolate - > thread_manager ( ) - > IsLockedByCurrentThread ( ) ; <nl> + bool Locker : : IsLocked ( ) { <nl> + return internal : : Isolate : : Current ( ) - > thread_manager ( ) - > <nl> + IsLockedByCurrentThread ( ) ; <nl> } <nl> <nl> <nl> Locker : : ~ Locker ( ) { <nl> - ASSERT ( isolate_ - > thread_manager ( ) - > IsLockedByCurrentThread ( ) ) ; <nl> + / / TODO ( isolate ) : this should use a field storing the isolate it <nl> + / / locked instead . <nl> + internal : : Isolate * isolate = internal : : Isolate : : Current ( ) ; <nl> + ASSERT ( isolate - > thread_manager ( ) - > IsLockedByCurrentThread ( ) ) ; <nl> if ( has_lock_ ) { <nl> - if ( isolate_ - > IsDefaultIsolate ( ) ) { <nl> - isolate_ - > Exit ( ) ; <nl> - } <nl> if ( top_level_ ) { <nl> - isolate_ - > thread_manager ( ) - > FreeThreadResources ( ) ; <nl> + isolate - > thread_manager ( ) - > FreeThreadResources ( ) ; <nl> } else { <nl> - isolate_ - > thread_manager ( ) - > ArchiveThread ( ) ; <nl> + isolate - > thread_manager ( ) - > ArchiveThread ( ) ; <nl> } <nl> - isolate_ - > thread_manager ( ) - > Unlock ( ) ; <nl> + isolate - > thread_manager ( ) - > Unlock ( ) ; <nl> } <nl> } <nl> <nl> <nl> - Unlocker : : Unlocker ( v8 : : Isolate * isolate ) <nl> - : isolate_ ( reinterpret_cast < i : : Isolate * > ( isolate ) ) { <nl> - if ( isolate_ = = NULL ) { <nl> - isolate_ = i : : Isolate : : GetDefaultIsolateForLocking ( ) ; <nl> - } <nl> - ASSERT ( isolate_ - > thread_manager ( ) - > IsLockedByCurrentThread ( ) ) ; <nl> - if ( isolate_ - > IsDefaultIsolate ( ) ) { <nl> - isolate_ - > Exit ( ) ; <nl> - } <nl> - isolate_ - > thread_manager ( ) - > ArchiveThread ( ) ; <nl> - isolate_ - > thread_manager ( ) - > Unlock ( ) ; <nl> + Unlocker : : Unlocker ( ) { <nl> + internal : : Isolate * isolate = internal : : Isolate : : Current ( ) ; <nl> + ASSERT ( isolate - > thread_manager ( ) - > IsLockedByCurrentThread ( ) ) ; <nl> + isolate - > thread_manager ( ) - > ArchiveThread ( ) ; <nl> + isolate - > thread_manager ( ) - > Unlock ( ) ; <nl> } <nl> <nl> <nl> Unlocker : : ~ Unlocker ( ) { <nl> - ASSERT ( ! isolate_ - > thread_manager ( ) - > IsLockedByCurrentThread ( ) ) ; <nl> - isolate_ - > thread_manager ( ) - > Lock ( ) ; <nl> - isolate_ - > thread_manager ( ) - > RestoreThread ( ) ; <nl> - if ( isolate_ - > IsDefaultIsolate ( ) ) { <nl> - isolate_ - > Enter ( ) ; <nl> - } <nl> + / / TODO ( isolates ) : check it ' s the isolate we unlocked . <nl> + internal : : Isolate * isolate = internal : : Isolate : : Current ( ) ; <nl> + ASSERT ( ! isolate - > thread_manager ( ) - > IsLockedByCurrentThread ( ) ) ; <nl> + isolate - > thread_manager ( ) - > Lock ( ) ; <nl> + isolate - > thread_manager ( ) - > RestoreThread ( ) ; <nl> } <nl> <nl> <nl> namespace internal { <nl> <nl> <nl> bool ThreadManager : : RestoreThread ( ) { <nl> - ASSERT ( IsLockedByCurrentThread ( ) ) ; <nl> / / First check whether the current thread has been ' lazily archived ' , ie <nl> / / not archived at all . If that is the case we put the state storage we <nl> / / had prepared back in the free list , since we didn ' t need it after all . <nl> if ( lazily_archived_thread_ . Equals ( ThreadId : : Current ( ) ) ) { <nl> lazily_archived_thread_ = ThreadId : : Invalid ( ) ; <nl> - Isolate : : PerIsolateThreadData * per_thread = <nl> - isolate_ - > FindPerThreadDataForThisThread ( ) ; <nl> - ASSERT ( per_thread ! = NULL ) ; <nl> - ASSERT ( per_thread - > thread_state ( ) = = lazily_archived_thread_state_ ) ; <nl> + ASSERT ( Isolate : : CurrentPerIsolateThreadData ( ) - > thread_state ( ) = = <nl> + lazily_archived_thread_state_ ) ; <nl> lazily_archived_thread_state_ - > set_id ( ThreadId : : Invalid ( ) ) ; <nl> lazily_archived_thread_state_ - > LinkInto ( ThreadState : : FREE_LIST ) ; <nl> lazily_archived_thread_state_ = NULL ; <nl> - per_thread - > set_thread_state ( NULL ) ; <nl> + Isolate : : CurrentPerIsolateThreadData ( ) - > set_thread_state ( NULL ) ; <nl> return true ; <nl> } <nl> <nl> bool ThreadManager : : RestoreThread ( ) { <nl> EagerlyArchiveThread ( ) ; <nl> } <nl> Isolate : : PerIsolateThreadData * per_thread = <nl> - isolate_ - > FindPerThreadDataForThisThread ( ) ; <nl> + Isolate : : CurrentPerIsolateThreadData ( ) ; <nl> if ( per_thread = = NULL | | per_thread - > thread_state ( ) = = NULL ) { <nl> / / This is a new thread . <nl> isolate_ - > stack_guard ( ) - > InitThread ( access ) ; <nl> bool ThreadManager : : RestoreThread ( ) { <nl> char * from = state - > data ( ) ; <nl> from = isolate_ - > handle_scope_implementer ( ) - > RestoreThread ( from ) ; <nl> from = isolate_ - > RestoreThread ( from ) ; <nl> - from = Relocatable : : RestoreState ( isolate_ , from ) ; <nl> + from = Relocatable : : RestoreState ( from ) ; <nl> # ifdef ENABLE_DEBUGGER_SUPPORT <nl> from = isolate_ - > debug ( ) - > RestoreDebug ( from ) ; <nl> # endif <nl> ThreadManager : : ~ ThreadManager ( ) { <nl> void ThreadManager : : ArchiveThread ( ) { <nl> ASSERT ( lazily_archived_thread_ . Equals ( ThreadId : : Invalid ( ) ) ) ; <nl> ASSERT ( ! IsArchived ( ) ) ; <nl> - ASSERT ( IsLockedByCurrentThread ( ) ) ; <nl> ThreadState * state = GetFreeThreadState ( ) ; <nl> state - > Unlink ( ) ; <nl> - Isolate : : PerIsolateThreadData * per_thread = <nl> - isolate_ - > FindOrAllocatePerThreadDataForThisThread ( ) ; <nl> - per_thread - > set_thread_state ( state ) ; <nl> + Isolate : : CurrentPerIsolateThreadData ( ) - > set_thread_state ( state ) ; <nl> lazily_archived_thread_ = ThreadId : : Current ( ) ; <nl> lazily_archived_thread_state_ = state ; <nl> ASSERT ( state - > id ( ) . Equals ( ThreadId : : Invalid ( ) ) ) ; <nl> void ThreadManager : : ArchiveThread ( ) { <nl> <nl> <nl> void ThreadManager : : EagerlyArchiveThread ( ) { <nl> - ASSERT ( IsLockedByCurrentThread ( ) ) ; <nl> ThreadState * state = lazily_archived_thread_state_ ; <nl> state - > LinkInto ( ThreadState : : IN_USE_LIST ) ; <nl> char * to = state - > data ( ) ; <nl> void ThreadManager : : EagerlyArchiveThread ( ) { <nl> / / in ThreadManager : : Iterate ( ObjectVisitor * ) . <nl> to = isolate_ - > handle_scope_implementer ( ) - > ArchiveThread ( to ) ; <nl> to = isolate_ - > ArchiveThread ( to ) ; <nl> - to = Relocatable : : ArchiveState ( isolate_ , to ) ; <nl> + to = Relocatable : : ArchiveState ( to ) ; <nl> # ifdef ENABLE_DEBUGGER_SUPPORT <nl> to = isolate_ - > debug ( ) - > ArchiveDebug ( to ) ; <nl> # endif <nl> void ThreadManager : : FreeThreadResources ( ) { <nl> <nl> <nl> bool ThreadManager : : IsArchived ( ) { <nl> - Isolate : : PerIsolateThreadData * data = <nl> - isolate_ - > FindPerThreadDataForThisThread ( ) ; <nl> + Isolate : : PerIsolateThreadData * data = Isolate : : CurrentPerIsolateThreadData ( ) ; <nl> return data ! = NULL & & data - > thread_state ( ) ! = NULL ; <nl> } <nl> <nl> + <nl> void ThreadManager : : Iterate ( ObjectVisitor * v ) { <nl> / / Expecting no threads during serialization / deserialization <nl> for ( ThreadState * state = FirstThreadStateInUse ( ) ; <nl> mmm a / test / cctest / SConscript <nl> ppp b / test / cctest / SConscript <nl> SOURCES = { <nl> ' test - list . cc ' , <nl> ' test - liveedit . cc ' , <nl> ' test - lock . cc ' , <nl> - ' test - lockers . cc ' , <nl> ' test - log - utils . cc ' , <nl> ' test - log . cc ' , <nl> ' test - mark - compact . cc ' , <nl> mmm a / test / cctest / cctest . gyp <nl> ppp b / test / cctest / cctest . gyp <nl> <nl> ' test - list . cc ' , <nl> ' test - liveedit . cc ' , <nl> ' test - lock . cc ' , <nl> - ' test - lockers . cc ' , <nl> ' test - log . cc ' , <nl> ' test - log - utils . cc ' , <nl> ' test - mark - compact . cc ' , <nl> mmm a / test / cctest / test - api . cc <nl> ppp b / test / cctest / test - api . cc <nl> <nl> # include " v8 . h " <nl> <nl> # include " api . h " <nl> - # include " isolate . h " <nl> # include " compilation - cache . h " <nl> # include " execution . h " <nl> # include " snapshot . h " <nl> TEST ( MultipleIsolatesOnIndividualThreads ) { <nl> isolate2 - > Dispose ( ) ; <nl> } <nl> <nl> - TEST ( IsolateDifferentContexts ) { <nl> - v8 : : Isolate * isolate = v8 : : Isolate : : New ( ) ; <nl> - Persistent < v8 : : Context > context ; <nl> - { <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate ) ; <nl> - v8 : : HandleScope handle_scope ; <nl> - context = v8 : : Context : : New ( ) ; <nl> - v8 : : Context : : Scope context_scope ( context ) ; <nl> - Local < Value > v = CompileRun ( " 2 " ) ; <nl> - CHECK ( v - > IsNumber ( ) ) ; <nl> - CHECK_EQ ( 2 , static_cast < int > ( v - > NumberValue ( ) ) ) ; <nl> - } <nl> - { <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate ) ; <nl> - v8 : : HandleScope handle_scope ; <nl> - context = v8 : : Context : : New ( ) ; <nl> - v8 : : Context : : Scope context_scope ( context ) ; <nl> - Local < Value > v = CompileRun ( " 22 " ) ; <nl> - CHECK ( v - > IsNumber ( ) ) ; <nl> - CHECK_EQ ( 22 , static_cast < int > ( v - > NumberValue ( ) ) ) ; <nl> - } <nl> - } <nl> <nl> class InitDefaultIsolateThread : public v8 : : internal : : Thread { <nl> public : <nl> deleted file mode 100644 <nl> index ba0fdb22066 . . 00000000000 <nl> mmm a / test / cctest / test - lockers . cc <nl> ppp / dev / null <nl> <nl> - / / Copyright 2007 - 2011 the V8 project authors . All rights reserved . <nl> - / / Redistribution and use in source and binary forms , with or without <nl> - / / modification , are permitted provided that the following conditions are <nl> - / / met : <nl> - / / <nl> - / / * Redistributions of source code must retain the above copyright <nl> - / / notice , this list of conditions and the following disclaimer . <nl> - / / * Redistributions in binary form must reproduce the above <nl> - / / copyright notice , this list of conditions and the following <nl> - / / disclaimer in the documentation and / or other materials provided <nl> - / / with the distribution . <nl> - / / * Neither the name of Google Inc . nor the names of its <nl> - / / contributors may be used to endorse or promote products derived <nl> - / / from this software without specific prior written permission . <nl> - / / <nl> - / / THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> - / / " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> - / / LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> - / / A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> - / / OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> - / / SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> - / / LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> - / / DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> - / / THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> - / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> - / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> - <nl> - # include < limits . h > <nl> - <nl> - # include " v8 . h " <nl> - <nl> - # include " api . h " <nl> - # include " isolate . h " <nl> - # include " compilation - cache . h " <nl> - # include " execution . h " <nl> - # include " snapshot . h " <nl> - # include " platform . h " <nl> - # include " utils . h " <nl> - # include " cctest . h " <nl> - # include " parser . h " <nl> - # include " unicode - inl . h " <nl> - <nl> - using : : v8 : : AccessorInfo ; <nl> - using : : v8 : : Context ; <nl> - using : : v8 : : Extension ; <nl> - using : : v8 : : Function ; <nl> - using : : v8 : : HandleScope ; <nl> - using : : v8 : : Local ; <nl> - using : : v8 : : Object ; <nl> - using : : v8 : : ObjectTemplate ; <nl> - using : : v8 : : Persistent ; <nl> - using : : v8 : : Script ; <nl> - using : : v8 : : String ; <nl> - using : : v8 : : Value ; <nl> - using : : v8 : : V8 ; <nl> - <nl> - namespace i = : : i ; <nl> - <nl> - <nl> - <nl> - <nl> - / / Migrating an isolate <nl> - class KangarooThread : public v8 : : internal : : Thread { <nl> - public : <nl> - KangarooThread ( v8 : : Isolate * isolate , <nl> - v8 : : Handle < v8 : : Context > context , int value ) <nl> - : Thread ( NULL , " KangarooThread " ) , <nl> - isolate_ ( isolate ) , context_ ( context ) , value_ ( value ) { <nl> - } <nl> - <nl> - void Run ( ) { <nl> - { <nl> - v8 : : Locker locker ( isolate_ ) ; <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate_ ) ; <nl> - CHECK_EQ ( isolate_ , v8 : : internal : : Isolate : : Current ( ) ) ; <nl> - v8 : : HandleScope scope ; <nl> - v8 : : Context : : Scope context_scope ( context_ ) ; <nl> - Local < Value > v = CompileRun ( " getValue ( ) " ) ; <nl> - CHECK ( v - > IsNumber ( ) ) ; <nl> - CHECK_EQ ( 30 , static_cast < int > ( v - > NumberValue ( ) ) ) ; <nl> - } <nl> - { <nl> - v8 : : Locker locker ( isolate_ ) ; <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate_ ) ; <nl> - v8 : : Context : : Scope context_scope ( context_ ) ; <nl> - v8 : : HandleScope scope ; <nl> - Local < Value > v = CompileRun ( " getValue ( ) " ) ; <nl> - CHECK ( v - > IsNumber ( ) ) ; <nl> - CHECK_EQ ( 30 , static_cast < int > ( v - > NumberValue ( ) ) ) ; <nl> - } <nl> - isolate_ - > Dispose ( ) ; <nl> - } <nl> - <nl> - private : <nl> - v8 : : Isolate * isolate_ ; <nl> - Persistent < v8 : : Context > context_ ; <nl> - int value_ ; <nl> - } ; <nl> - <nl> - / / Migrates an isolate from one thread to another <nl> - TEST ( KangarooIsolates ) { <nl> - v8 : : Isolate * isolate = v8 : : Isolate : : New ( ) ; <nl> - Persistent < v8 : : Context > context ; <nl> - { <nl> - v8 : : Locker locker ( isolate ) ; <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate ) ; <nl> - v8 : : HandleScope handle_scope ; <nl> - context = v8 : : Context : : New ( ) ; <nl> - v8 : : Context : : Scope context_scope ( context ) ; <nl> - CHECK_EQ ( isolate , v8 : : internal : : Isolate : : Current ( ) ) ; <nl> - CompileRun ( " function getValue ( ) { return 30 ; } " ) ; <nl> - } <nl> - KangarooThread thread1 ( isolate , context , 1 ) ; <nl> - thread1 . Start ( ) ; <nl> - thread1 . Join ( ) ; <nl> - } <nl> - <nl> - static void CalcFibAndCheck ( ) { <nl> - Local < Value > v = CompileRun ( " function fib ( n ) { " <nl> - " if ( n < = 2 ) return 1 ; " <nl> - " return fib ( n - 1 ) + fib ( n - 2 ) ; " <nl> - " } " <nl> - " fib ( 10 ) " ) ; <nl> - CHECK ( v - > IsNumber ( ) ) ; <nl> - CHECK_EQ ( 55 , static_cast < int > ( v - > NumberValue ( ) ) ) ; <nl> - } <nl> - <nl> - class JoinableThread { <nl> - public : <nl> - explicit JoinableThread ( const char * name ) <nl> - : name_ ( name ) , <nl> - semaphore_ ( i : : OS : : CreateSemaphore ( 0 ) ) , <nl> - thread_ ( this ) { <nl> - } <nl> - <nl> - virtual ~ JoinableThread ( ) { <nl> - delete semaphore_ ; <nl> - } <nl> - <nl> - void Start ( ) { <nl> - thread_ . Start ( ) ; <nl> - } <nl> - <nl> - void Join ( ) { <nl> - semaphore_ - > Wait ( ) ; <nl> - } <nl> - <nl> - virtual void Run ( ) = 0 ; <nl> - private : <nl> - class ThreadWithSemaphore : public i : : Thread { <nl> - public : <nl> - explicit ThreadWithSemaphore ( JoinableThread * joinable_thread ) <nl> - : Thread ( NULL , joinable_thread - > name_ ) , <nl> - joinable_thread_ ( joinable_thread ) { <nl> - } <nl> - <nl> - virtual void Run ( ) { <nl> - joinable_thread_ - > Run ( ) ; <nl> - joinable_thread_ - > semaphore_ - > Signal ( ) ; <nl> - } <nl> - <nl> - private : <nl> - JoinableThread * joinable_thread_ ; <nl> - } ; <nl> - <nl> - const char * name_ ; <nl> - i : : Semaphore * semaphore_ ; <nl> - ThreadWithSemaphore thread_ ; <nl> - <nl> - friend class ThreadWithSemaphore ; <nl> - <nl> - DISALLOW_COPY_AND_ASSIGN ( JoinableThread ) ; <nl> - } ; <nl> - <nl> - <nl> - class IsolateLockingThreadWithLocalContext : public JoinableThread { <nl> - public : <nl> - explicit IsolateLockingThreadWithLocalContext ( v8 : : Isolate * isolate ) <nl> - : JoinableThread ( " IsolateLockingThread " ) , <nl> - isolate_ ( isolate ) { <nl> - } <nl> - <nl> - virtual void Run ( ) { <nl> - v8 : : Locker locker ( isolate_ ) ; <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate_ ) ; <nl> - v8 : : HandleScope handle_scope ; <nl> - LocalContext local_context ; <nl> - CHECK_EQ ( isolate_ , v8 : : internal : : Isolate : : Current ( ) ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - } <nl> - private : <nl> - v8 : : Isolate * isolate_ ; <nl> - } ; <nl> - <nl> - static void StartJoinAndDeleteThreads ( const i : : List < JoinableThread * > & threads ) { <nl> - for ( int i = 0 ; i < threads . length ( ) ; i + + ) { <nl> - threads [ i ] - > Start ( ) ; <nl> - } <nl> - for ( int i = 0 ; i < threads . length ( ) ; i + + ) { <nl> - threads [ i ] - > Join ( ) ; <nl> - } <nl> - for ( int i = 0 ; i < threads . length ( ) ; i + + ) { <nl> - delete threads [ i ] ; <nl> - } <nl> - } <nl> - <nl> - <nl> - / / Run many threads all locking on the same isolate <nl> - TEST ( IsolateLockingStress ) { <nl> - const int kNThreads = 100 ; <nl> - i : : List < JoinableThread * > threads ( kNThreads ) ; <nl> - v8 : : Isolate * isolate = v8 : : Isolate : : New ( ) ; <nl> - for ( int i = 0 ; i < kNThreads ; i + + ) { <nl> - threads . Add ( new IsolateLockingThreadWithLocalContext ( isolate ) ) ; <nl> - } <nl> - StartJoinAndDeleteThreads ( threads ) ; <nl> - isolate - > Dispose ( ) ; <nl> - } <nl> - <nl> - class IsolateNonlockingThread : public JoinableThread { <nl> - public : <nl> - explicit IsolateNonlockingThread ( ) <nl> - : JoinableThread ( " IsolateNonlockingThread " ) { <nl> - } <nl> - <nl> - virtual void Run ( ) { <nl> - v8 : : Isolate * isolate = v8 : : Isolate : : New ( ) ; <nl> - { <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate ) ; <nl> - v8 : : HandleScope handle_scope ; <nl> - v8 : : Handle < v8 : : Context > context = v8 : : Context : : New ( ) ; <nl> - v8 : : Context : : Scope context_scope ( context ) ; <nl> - CHECK_EQ ( isolate , v8 : : internal : : Isolate : : Current ( ) ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - } <nl> - isolate - > Dispose ( ) ; <nl> - } <nl> - private : <nl> - } ; <nl> - <nl> - / / Run many threads each accessing its own isolate without locking <nl> - TEST ( MultithreadedParallelIsolates ) { <nl> - const int kNThreads = 50 ; <nl> - i : : List < JoinableThread * > threads ( kNThreads ) ; <nl> - for ( int i = 0 ; i < kNThreads ; i + + ) { <nl> - threads . Add ( new IsolateNonlockingThread ( ) ) ; <nl> - } <nl> - StartJoinAndDeleteThreads ( threads ) ; <nl> - } <nl> - <nl> - <nl> - class IsolateNestedLockingThread : public JoinableThread { <nl> - public : <nl> - explicit IsolateNestedLockingThread ( v8 : : Isolate * isolate ) <nl> - : JoinableThread ( " IsolateNestedLocking " ) , isolate_ ( isolate ) { <nl> - } <nl> - virtual void Run ( ) { <nl> - v8 : : Locker lock ( isolate_ ) ; <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate_ ) ; <nl> - v8 : : HandleScope handle_scope ; <nl> - LocalContext local_context ; <nl> - { <nl> - v8 : : Locker another_lock ( isolate_ ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - } <nl> - { <nl> - v8 : : Locker another_lock ( isolate_ ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - } <nl> - } <nl> - private : <nl> - v8 : : Isolate * isolate_ ; <nl> - } ; <nl> - <nl> - / / Run many threads with nested locks <nl> - TEST ( IsolateNestedLocking ) { <nl> - const int kNThreads = 100 ; <nl> - v8 : : Isolate * isolate = v8 : : Isolate : : New ( ) ; <nl> - i : : List < JoinableThread * > threads ( kNThreads ) ; <nl> - for ( int i = 0 ; i < kNThreads ; i + + ) { <nl> - threads . Add ( new IsolateNestedLockingThread ( isolate ) ) ; <nl> - } <nl> - StartJoinAndDeleteThreads ( threads ) ; <nl> - } <nl> - <nl> - <nl> - class SeparateIsolatesLocksNonexclusiveThread : public JoinableThread { <nl> - public : <nl> - SeparateIsolatesLocksNonexclusiveThread ( v8 : : Isolate * isolate1 , <nl> - v8 : : Isolate * isolate2 ) <nl> - : JoinableThread ( " SeparateIsolatesLocksNonexclusiveThread " ) , <nl> - isolate1_ ( isolate1 ) , isolate2_ ( isolate2 ) { <nl> - } <nl> - <nl> - virtual void Run ( ) { <nl> - v8 : : Locker lock ( isolate1_ ) ; <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate1_ ) ; <nl> - v8 : : HandleScope handle_scope ; <nl> - LocalContext local_context ; <nl> - <nl> - IsolateLockingThreadWithLocalContext threadB ( isolate2_ ) ; <nl> - threadB . Start ( ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - threadB . Join ( ) ; <nl> - } <nl> - private : <nl> - v8 : : Isolate * isolate1_ ; <nl> - v8 : : Isolate * isolate2_ ; <nl> - } ; <nl> - <nl> - / / Run parallel threads that lock and access different isolates in parallel <nl> - TEST ( SeparateIsolatesLocksNonexclusive ) { <nl> - const int kNThreads = 100 ; <nl> - v8 : : Isolate * isolate1 = v8 : : Isolate : : New ( ) ; <nl> - v8 : : Isolate * isolate2 = v8 : : Isolate : : New ( ) ; <nl> - i : : List < JoinableThread * > threads ( kNThreads ) ; <nl> - for ( int i = 0 ; i < kNThreads ; i + + ) { <nl> - threads . Add ( new SeparateIsolatesLocksNonexclusiveThread ( isolate1 , <nl> - isolate2 ) ) ; <nl> - } <nl> - StartJoinAndDeleteThreads ( threads ) ; <nl> - isolate2 - > Dispose ( ) ; <nl> - isolate1 - > Dispose ( ) ; <nl> - } <nl> - <nl> - class LockIsolateAndCalculateFibSharedContextThread : public JoinableThread { <nl> - public : <nl> - explicit LockIsolateAndCalculateFibSharedContextThread ( <nl> - v8 : : Isolate * isolate , v8 : : Handle < v8 : : Context > context ) <nl> - : JoinableThread ( " LockIsolateAndCalculateFibThread " ) , <nl> - isolate_ ( isolate ) , <nl> - context_ ( context ) { <nl> - } <nl> - <nl> - virtual void Run ( ) { <nl> - v8 : : Locker lock ( isolate_ ) ; <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate_ ) ; <nl> - HandleScope handle_scope ; <nl> - v8 : : Context : : Scope context_scope ( context_ ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - } <nl> - private : <nl> - v8 : : Isolate * isolate_ ; <nl> - Persistent < v8 : : Context > context_ ; <nl> - } ; <nl> - <nl> - class LockerUnlockerThread : public JoinableThread { <nl> - public : <nl> - explicit LockerUnlockerThread ( v8 : : Isolate * isolate ) <nl> - : JoinableThread ( " LockerUnlockerThread " ) , <nl> - isolate_ ( isolate ) { <nl> - } <nl> - <nl> - virtual void Run ( ) { <nl> - v8 : : Locker lock ( isolate_ ) ; <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate_ ) ; <nl> - v8 : : HandleScope handle_scope ; <nl> - v8 : : Handle < v8 : : Context > context = v8 : : Context : : New ( ) ; <nl> - { <nl> - v8 : : Context : : Scope context_scope ( context ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - } <nl> - { <nl> - isolate_ - > Exit ( ) ; <nl> - v8 : : Unlocker unlocker ( isolate_ ) ; <nl> - LockIsolateAndCalculateFibSharedContextThread thread ( isolate_ , context ) ; <nl> - thread . Start ( ) ; <nl> - thread . Join ( ) ; <nl> - } <nl> - isolate_ - > Enter ( ) ; <nl> - { <nl> - v8 : : Context : : Scope context_scope ( context ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - } <nl> - } <nl> - private : <nl> - v8 : : Isolate * isolate_ ; <nl> - } ; <nl> - <nl> - / / Use unlocker inside of a Locker , multiple threads . <nl> - TEST ( LockerUnlocker ) { <nl> - const int kNThreads = 100 ; <nl> - i : : List < JoinableThread * > threads ( kNThreads ) ; <nl> - v8 : : Isolate * isolate = v8 : : Isolate : : New ( ) ; <nl> - for ( int i = 0 ; i < kNThreads ; i + + ) { <nl> - threads . Add ( new LockerUnlockerThread ( isolate ) ) ; <nl> - } <nl> - StartJoinAndDeleteThreads ( threads ) ; <nl> - isolate - > Dispose ( ) ; <nl> - } <nl> - <nl> - class LockTwiceAndUnlockThread : public JoinableThread { <nl> - public : <nl> - explicit LockTwiceAndUnlockThread ( v8 : : Isolate * isolate ) <nl> - : JoinableThread ( " LockTwiceAndUnlockThread " ) , <nl> - isolate_ ( isolate ) { <nl> - } <nl> - <nl> - virtual void Run ( ) { <nl> - v8 : : Locker lock ( isolate_ ) ; <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate_ ) ; <nl> - v8 : : HandleScope handle_scope ; <nl> - v8 : : Handle < v8 : : Context > context = v8 : : Context : : New ( ) ; <nl> - { <nl> - v8 : : Context : : Scope context_scope ( context ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - } <nl> - { <nl> - v8 : : Locker second_lock ( isolate_ ) ; <nl> - { <nl> - isolate_ - > Exit ( ) ; <nl> - v8 : : Unlocker unlocker ( isolate_ ) ; <nl> - LockIsolateAndCalculateFibSharedContextThread thread ( isolate_ , context ) ; <nl> - thread . Start ( ) ; <nl> - thread . Join ( ) ; <nl> - } <nl> - } <nl> - isolate_ - > Enter ( ) ; <nl> - { <nl> - v8 : : Context : : Scope context_scope ( context ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - } <nl> - } <nl> - private : <nl> - v8 : : Isolate * isolate_ ; <nl> - } ; <nl> - <nl> - / / Use Unlocker inside two Lockers . <nl> - TEST ( LockTwiceAndUnlock ) { <nl> - const int kNThreads = 100 ; <nl> - i : : List < JoinableThread * > threads ( kNThreads ) ; <nl> - v8 : : Isolate * isolate = v8 : : Isolate : : New ( ) ; <nl> - for ( int i = 0 ; i < kNThreads ; i + + ) { <nl> - threads . Add ( new LockTwiceAndUnlockThread ( isolate ) ) ; <nl> - } <nl> - StartJoinAndDeleteThreads ( threads ) ; <nl> - isolate - > Dispose ( ) ; <nl> - } <nl> - <nl> - class LockAndUnlockDifferentIsolatesThread : public JoinableThread { <nl> - public : <nl> - LockAndUnlockDifferentIsolatesThread ( v8 : : Isolate * isolate1 , <nl> - v8 : : Isolate * isolate2 ) <nl> - : JoinableThread ( " LockAndUnlockDifferentIsolatesThread " ) , <nl> - isolate1_ ( isolate1 ) , <nl> - isolate2_ ( isolate2 ) { <nl> - } <nl> - <nl> - virtual void Run ( ) { <nl> - Persistent < v8 : : Context > context1 ; <nl> - Persistent < v8 : : Context > context2 ; <nl> - v8 : : Locker lock1 ( isolate1_ ) ; <nl> - CHECK ( v8 : : Locker : : IsLocked ( isolate1_ ) ) ; <nl> - CHECK ( ! v8 : : Locker : : IsLocked ( isolate2_ ) ) ; <nl> - { <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate1_ ) ; <nl> - v8 : : HandleScope handle_scope ; <nl> - context1 = v8 : : Context : : New ( ) ; <nl> - { <nl> - v8 : : Context : : Scope context_scope ( context1 ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - } <nl> - } <nl> - v8 : : Locker lock2 ( isolate2_ ) ; <nl> - CHECK ( v8 : : Locker : : IsLocked ( isolate1_ ) ) ; <nl> - CHECK ( v8 : : Locker : : IsLocked ( isolate2_ ) ) ; <nl> - { <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate2_ ) ; <nl> - v8 : : HandleScope handle_scope ; <nl> - context2 = v8 : : Context : : New ( ) ; <nl> - { <nl> - v8 : : Context : : Scope context_scope ( context2 ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - } <nl> - } <nl> - { <nl> - v8 : : Unlocker unlock1 ( isolate1_ ) ; <nl> - CHECK ( ! v8 : : Locker : : IsLocked ( isolate1_ ) ) ; <nl> - CHECK ( v8 : : Locker : : IsLocked ( isolate2_ ) ) ; <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate2_ ) ; <nl> - v8 : : HandleScope handle_scope ; <nl> - v8 : : Context : : Scope context_scope ( context2 ) ; <nl> - LockIsolateAndCalculateFibSharedContextThread thread ( isolate1_ , context1 ) ; <nl> - thread . Start ( ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - thread . Join ( ) ; <nl> - } <nl> - } <nl> - private : <nl> - v8 : : Isolate * isolate1_ ; <nl> - v8 : : Isolate * isolate2_ ; <nl> - } ; <nl> - <nl> - / / Lock two isolates and unlock one of them . <nl> - TEST ( LockAndUnlockDifferentIsolates ) { <nl> - v8 : : Isolate * isolate1 = v8 : : Isolate : : New ( ) ; <nl> - v8 : : Isolate * isolate2 = v8 : : Isolate : : New ( ) ; <nl> - LockAndUnlockDifferentIsolatesThread thread ( isolate1 , isolate2 ) ; <nl> - thread . Start ( ) ; <nl> - thread . Join ( ) ; <nl> - isolate2 - > Dispose ( ) ; <nl> - isolate1 - > Dispose ( ) ; <nl> - } <nl> - <nl> - class LockUnlockLockThread : public JoinableThread { <nl> - public : <nl> - LockUnlockLockThread ( v8 : : Isolate * isolate , v8 : : Handle < v8 : : Context > context ) <nl> - : JoinableThread ( " LockUnlockLockThread " ) , <nl> - isolate_ ( isolate ) , <nl> - context_ ( context ) { <nl> - } <nl> - <nl> - virtual void Run ( ) { <nl> - v8 : : Locker lock1 ( isolate_ ) ; <nl> - CHECK ( v8 : : Locker : : IsLocked ( isolate_ ) ) ; <nl> - CHECK ( ! v8 : : Locker : : IsLocked ( ) ) ; <nl> - { <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate_ ) ; <nl> - v8 : : HandleScope handle_scope ; <nl> - v8 : : Context : : Scope context_scope ( context_ ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - } <nl> - { <nl> - v8 : : Unlocker unlock1 ( isolate_ ) ; <nl> - CHECK ( ! v8 : : Locker : : IsLocked ( isolate_ ) ) ; <nl> - CHECK ( ! v8 : : Locker : : IsLocked ( ) ) ; <nl> - { <nl> - v8 : : Locker lock2 ( isolate_ ) ; <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate_ ) ; <nl> - v8 : : HandleScope handle_scope ; <nl> - CHECK ( v8 : : Locker : : IsLocked ( isolate_ ) ) ; <nl> - CHECK ( ! v8 : : Locker : : IsLocked ( ) ) ; <nl> - v8 : : Context : : Scope context_scope ( context_ ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - private : <nl> - v8 : : Isolate * isolate_ ; <nl> - v8 : : Persistent < v8 : : Context > context_ ; <nl> - } ; <nl> - <nl> - / / Locker inside an Unlocker inside a Locker . <nl> - TEST ( LockUnlockLockMultithreaded ) { <nl> - const int kNThreads = 100 ; <nl> - v8 : : Isolate * isolate = v8 : : Isolate : : New ( ) ; <nl> - Persistent < v8 : : Context > context ; <nl> - { <nl> - v8 : : Locker locker_ ( isolate ) ; <nl> - v8 : : Isolate : : Scope isolate_scope ( isolate ) ; <nl> - v8 : : HandleScope handle_scope ; <nl> - context = v8 : : Context : : New ( ) ; <nl> - } <nl> - i : : List < JoinableThread * > threads ( kNThreads ) ; <nl> - for ( int i = 0 ; i < kNThreads ; i + + ) { <nl> - threads . Add ( new LockUnlockLockThread ( isolate , context ) ) ; <nl> - } <nl> - StartJoinAndDeleteThreads ( threads ) ; <nl> - } <nl> - <nl> - class LockUnlockLockDefaultIsolateThread : public JoinableThread { <nl> - public : <nl> - explicit LockUnlockLockDefaultIsolateThread ( v8 : : Handle < v8 : : Context > context ) <nl> - : JoinableThread ( " LockUnlockLockDefaultIsolateThread " ) , <nl> - context_ ( context ) { <nl> - } <nl> - <nl> - virtual void Run ( ) { <nl> - v8 : : Locker lock1 ; <nl> - { <nl> - v8 : : HandleScope handle_scope ; <nl> - v8 : : Context : : Scope context_scope ( context_ ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - } <nl> - { <nl> - v8 : : Unlocker unlock1 ; <nl> - { <nl> - v8 : : Locker lock2 ; <nl> - v8 : : HandleScope handle_scope ; <nl> - v8 : : Context : : Scope context_scope ( context_ ) ; <nl> - CalcFibAndCheck ( ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - private : <nl> - v8 : : Persistent < v8 : : Context > context_ ; <nl> - } ; <nl> - <nl> - / / Locker inside an Unlocker inside a Locker for default isolate . <nl> - TEST ( LockUnlockLockDefaultIsolateMultithreaded ) { <nl> - const int kNThreads = 100 ; <nl> - Persistent < v8 : : Context > context ; <nl> - { <nl> - v8 : : Locker locker_ ; <nl> - v8 : : HandleScope handle_scope ; <nl> - context = v8 : : Context : : New ( ) ; <nl> - } <nl> - i : : List < JoinableThread * > threads ( kNThreads ) ; <nl> - for ( int i = 0 ; i < kNThreads ; i + + ) { <nl> - threads . Add ( new LockUnlockLockDefaultIsolateThread ( context ) ) ; <nl> - } <nl> - StartJoinAndDeleteThreads ( threads ) ; <nl> - } <nl> mmm a / test / cctest / test - thread - termination . cc <nl> ppp b / test / cctest / test - thread - termination . cc <nl> class LoopingThread : public v8 : : internal : : Thread { <nl> } ; <nl> <nl> <nl> - / / Test that multiple threads using default isolate can be terminated <nl> - / / from another thread when using Lockers and preemption . <nl> - TEST ( TerminateMultipleV8ThreadsDefaultIsolate ) { <nl> + / / Test that multiple threads using V8 can be terminated from another <nl> + / / thread when using Lockers and preemption . <nl> + TEST ( TerminateMultipleV8Threads ) { <nl> { <nl> v8 : : Locker locker ; <nl> v8 : : V8 : : Initialize ( ) ; <nl> v8 : : Locker : : StartPreemption ( 1 ) ; <nl> semaphore = v8 : : internal : : OS : : CreateSemaphore ( 0 ) ; <nl> } <nl> - const int kThreads = 10 ; <nl> - i : : List < LoopingThread * > threads ( kThreads ) ; <nl> - for ( int i = 0 ; i < kThreads ; i + + ) { <nl> - threads . Add ( new LoopingThread ( i : : Isolate : : Current ( ) ) ) ; <nl> - } <nl> - for ( int i = 0 ; i < kThreads ; i + + ) { <nl> - threads [ i ] - > Start ( ) ; <nl> - } <nl> - / / Wait until all threads have signaled the semaphore . <nl> - for ( int i = 0 ; i < kThreads ; i + + ) { <nl> - semaphore - > Wait ( ) ; <nl> - } <nl> + LoopingThread thread1 ( i : : Isolate : : Current ( ) ) ; <nl> + thread1 . Start ( ) ; <nl> + LoopingThread thread2 ( i : : Isolate : : Current ( ) ) ; <nl> + thread2 . Start ( ) ; <nl> + / / Wait until both threads have signaled the semaphore . <nl> + semaphore - > Wait ( ) ; <nl> + semaphore - > Wait ( ) ; <nl> { <nl> v8 : : Locker locker ; <nl> - for ( int i = 0 ; i < kThreads ; i + + ) { <nl> - v8 : : V8 : : TerminateExecution ( threads [ i ] - > GetV8ThreadId ( ) ) ; <nl> - } <nl> - } <nl> - for ( int i = 0 ; i < kThreads ; i + + ) { <nl> - threads [ i ] - > Join ( ) ; <nl> - delete threads [ i ] ; <nl> + v8 : : V8 : : TerminateExecution ( thread1 . GetV8ThreadId ( ) ) ; <nl> + v8 : : V8 : : TerminateExecution ( thread2 . GetV8ThreadId ( ) ) ; <nl> } <nl> + thread1 . Join ( ) ; <nl> + thread2 . Join ( ) ; <nl> <nl> delete semaphore ; <nl> semaphore = NULL ; <nl>
Revert " This implements per - isolate locking and unlocking , including tests "
v8/v8
6e4629a81c702f9f59a333fd21bde16c7e52e5a1
2011-05-04T22:35:42Z
mmm a / scripts / record_bag . sh <nl> ppp b / scripts / record_bag . sh <nl> function start ( ) { <nl> / tf \ <nl> / tf_static \ <nl> / apollo / monitor \ <nl> + / apollo / monitor / system_status \ <nl> / apollo / monitor / static_info < / dev / null > " $ { LOG } " 2 > & 1 & <nl> fi <nl> } <nl> mmm a / scripts / record_bag_pnc . sh <nl> ppp b / scripts / record_bag_pnc . sh <nl> function start ( ) { <nl> / tf \ <nl> / tf_static \ <nl> / apollo / monitor \ <nl> + / apollo / monitor / system_status \ <nl> / apollo / monitor / static_info < / dev / null > " $ { LOG } " 2 > & 1 & <nl> fi <nl> } <nl>
script : record / apollo / monitor / system_status
ApolloAuto/apollo
f1ddd335db5f6ee795da44fb7222fe12656e4960
2018-01-03T23:40:03Z
mmm a / hphp / hack / src / hh_fanout / hh_fanout . ml <nl> ppp b / hphp / hack / src / hh_fanout / hh_fanout . ml <nl> let debug_subcommand = <nl> let path = Path . make path in <nl> ( fun ( ) - > Lwt_main . run ( mode_debug ~ env ~ path ~ cursor_id ) ) ) <nl> <nl> + let mode_status ~ ( env : env ) ~ ( cursor_id : string ) : unit Lwt . t = <nl> + let incremental_state = make_incremental_state ~ env in <nl> + let cursor = <nl> + incremental_state # look_up_cursor ~ client_id : None ~ cursor_id <nl> + | > Result . ok_or_failwith <nl> + in <nl> + let fanout_calculations = <nl> + cursor # get_calculate_fanout_results_since_last_typecheck <nl> + in <nl> + let % lwt ( ) = Status . go fanout_calculations in <nl> + Lwt . return_unit <nl> + <nl> + let status_subcommand = <nl> + let open Command . Param in <nl> + let open Command . Let_syntax in <nl> + Command . basic <nl> + ~ summary : <nl> + " EXPERIMENTAL : Shows details about the files that need to be re - typechecked on the next ` calculate - errors ` call " <nl> + ( let % map env = parse_env ( ) <nl> + and cursor_id = <nl> + flag <nl> + " - - cursor " <nl> + ( required string ) <nl> + ~ doc : " CURSOR The cursor that the previous request returned " <nl> + in <nl> + ( fun ( ) - > Lwt_main . run ( mode_status ~ env ~ cursor_id ) ) ) <nl> + <nl> let mode_query <nl> ~ ( env : env ) ~ ( dep_hash : Typing_deps . Dep . t ) ~ ( include_extends : bool ) : <nl> unit Lwt . t = <nl> let ( ) = <nl> ( " calculate " , calculate_subcommand ) ; <nl> ( " calculate - errors " , calculate_errors_subcommand ) ; <nl> ( " debug " , debug_subcommand ) ; <nl> + ( " status " , status_subcommand ) ; <nl> ( " query " , query_subcommand ) ; <nl> ( " query - path " , query_path_subcommand ) ; <nl> ] <nl> mmm a / hphp / hack / src / hh_fanout / incremental . ml <nl> ppp b / hphp / hack / src / hh_fanout / incremental . ml <nl> class cursor ~ client_id ~ cursor_state = <nl> val cursor_state : cursor_state = cursor_state <nl> <nl> method get_file_deltas : Naming_sqlite . file_deltas = <nl> - match cursor_state with <nl> - | Saved_state _ <nl> - | Typecheck_result _ - > <nl> - Relative_path . Map . empty <nl> - | Saved_state_delta { changed_files ; _ } - > changed_files <nl> + let rec helper cursor_state = <nl> + match cursor_state with <nl> + | Saved_state _ - > Relative_path . Map . empty <nl> + | Typecheck_result { previous ; _ } - > helper previous <nl> + | Saved_state_delta { changed_files ; _ } - > changed_files <nl> + in <nl> + helper cursor_state <nl> <nl> method get_calculate_fanout_result : Calculate_fanout . result option = <nl> match cursor_state with <nl> class cursor ~ client_id ~ cursor_state = <nl> None <nl> | Saved_state_delta { fanout_result ; _ } - > Some fanout_result <nl> <nl> + method get_calculate_fanout_results_since_last_typecheck <nl> + : Calculate_fanout . result list = <nl> + let rec helper cursor_state = <nl> + match cursor_state with <nl> + | Saved_state _ <nl> + | Typecheck_result _ - > <nl> + [ ] <nl> + | Saved_state_delta { fanout_result ; previous ; _ } - > <nl> + fanout_result : : helper previous <nl> + in <nl> + helper cursor_state <nl> + <nl> method private load_naming_table ( ctx : Provider_context . t ) : Naming_table . t <nl> = <nl> let rec get_naming_table_path ( state : cursor_state ) : <nl> class cursor ~ client_id ~ cursor_state = <nl> ( Relative_path . Set . elements files_to_typecheck ) <nl> ~ num_workers : ( List . length workers ) ) <nl> in <nl> + Hh_logger . log <nl> + " Got % d new dependency edges as a result of typechecking % d files " <nl> + ( HashSet . length fanout_files_deps ) <nl> + ( Relative_path . Set . cardinal files_to_typecheck ) ; <nl> let typecheck_result = { fanout_files_deps ; errors } in <nl> let cursor = <nl> new cursor <nl> mmm a / hphp / hack / src / hh_fanout / incremental . mli <nl> ppp b / hphp / hack / src / hh_fanout / incremental . mli <nl> class type cursor = <nl> Returns ` None ` if inapplicable for this type of cursor . * ) <nl> method get_calculate_fanout_result : Calculate_fanout . result option <nl> <nl> + ( * * Get the fanout that needs to be re - typechecked ( i . e . the fanout of <nl> + the files changed since the last typecheck ) . <nl> + <nl> + There is one element in the returned list per cursor - advance since the <nl> + last typecheck . The newest elements are first . * ) <nl> + method get_calculate_fanout_results_since_last_typecheck : <nl> + Calculate_fanout . result list <nl> + <nl> ( * * Get the client ID that owns this cursor . * ) <nl> method get_client_id : client_id <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 31ea49e035a <nl> mmm / dev / null <nl> ppp b / hphp / hack / src / hh_fanout / status . ml <nl> <nl> + ( * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the " hack " directory of this source tree . <nl> + * <nl> + * ) <nl> + <nl> + open Core_kernel <nl> + <nl> + let go ( fanout_calculations : Calculate_fanout . result list ) : unit Lwt . t = <nl> + let ( fanout_files , explanations ) = <nl> + List . fold <nl> + fanout_calculations <nl> + ~ init : ( Relative_path . Set . empty , Relative_path . Map . empty ) <nl> + ~ f : ( fun acc { Calculate_fanout . fanout_files ; explanations ; _ } - > <nl> + let ( acc_fanout_files , acc_explanations ) = acc in <nl> + let acc_fanout_files = <nl> + Relative_path . Set . union acc_fanout_files fanout_files <nl> + in <nl> + let acc_explanations = <nl> + Relative_path . Map . union acc_explanations explanations <nl> + in <nl> + ( acc_fanout_files , acc_explanations ) ) <nl> + in <nl> + Relative_path . Map . iter explanations ~ f : ( fun path explanation - > <nl> + let open Calculate_fanout in <nl> + Tty . cprintf ( Tty . Bold Tty . Default ) " % s \ n " ( Relative_path . suffix path ) ; <nl> + <nl> + let get_symbol_num_files symbol = <nl> + match symbol . outgoing_files with <nl> + | Some outgoing_files - > <nl> + ( match Relative_path . Set . cardinal outgoing_files with <nl> + | 1 - > " ( 1 file ) " <nl> + | n - > Printf . sprintf " ( % d files ) " n ) <nl> + | None - > Printf . sprintf " ( ? files ) " <nl> + in <nl> + List . iter explanation . added_symbols ~ f : ( fun added_symbol - > <nl> + Tty . cprintf <nl> + ( Tty . Bold Tty . Green ) <nl> + " A % s " <nl> + added_symbol . symbol_edge . symbol_name ; <nl> + Printf . printf " % s \ n " ( get_symbol_num_files added_symbol ) ) ; <nl> + List . iter explanation . removed_symbols ~ f : ( fun removed_symbol - > <nl> + Tty . cprintf <nl> + ( Tty . Bold Tty . Red ) <nl> + " D % s " <nl> + removed_symbol . symbol_edge . symbol_name ; <nl> + Printf . printf " % s \ n " ( get_symbol_num_files removed_symbol ) ) ; <nl> + List . iter explanation . modified_symbols ~ f : ( fun modified_symbol - > <nl> + Tty . cprintf <nl> + ( Tty . Bold Tty . Blue ) <nl> + " M % s " <nl> + modified_symbol . symbol_edge . symbol_name ; <nl> + Printf . printf " % s \ n " ( get_symbol_num_files modified_symbol ) ) ; <nl> + ( ) ) ; <nl> + Printf . printf " Total files to typecheck : " ; <nl> + Tty . cprintf <nl> + ( Tty . Bold Tty . Default ) <nl> + " % d \ n " <nl> + ( Relative_path . Set . cardinal fanout_files ) ; <nl> + Lwt . return_unit <nl> new file mode 100644 <nl> index 00000000000 . . 0076ce521c2 <nl> mmm / dev / null <nl> ppp b / hphp / hack / src / hh_fanout / status . mli <nl> <nl> + ( * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the " hack " directory of this source tree . <nl> + * <nl> + * ) <nl> + <nl> + ( * * Render the details about the fanout to be checked in a nice , colorized , <nl> + user - readable format . Prints to stdout . * ) <nl> + val go : Calculate_fanout . result list - > unit Lwt . t <nl> mmm a / hphp / hack / test / hh_fanout / hh_fanout_integration_tests . py <nl> ppp b / hphp / hack / test / hh_fanout / hh_fanout_integration_tests . py <nl> <nl> generate_saved_state , <nl> run_hh_fanout , <nl> run_hh_fanout_calculate_errors , <nl> + run_hh_fanout_status , <nl> ) <nl> from libfb . py . testutil import BaseFacebookTestCase <nl> <nl> def file ( path : Path ) - > Path : <nl> ) <nl> cursor3 = result [ " cursor " ] <nl> self . assertEqual ( cursor3 , cursor2 ) <nl> + <nl> + def test_status ( self ) - > None : <nl> + work_dir : str <nl> + with tempfile . TemporaryDirectory ( ) as work_dir : <nl> + <nl> + def file ( path : Path ) - > Path : <nl> + return os . path . join ( work_dir , path ) <nl> + <nl> + # Initial status should have no changed files . <nl> + ( env , saved_state_info ) = self . set_up_work_dir ( work_dir ) <nl> + result = run_hh_fanout ( <nl> + env = env , <nl> + saved_state_info = saved_state_info , <nl> + changed_files = [ ] , <nl> + args = [ ] , <nl> + cursor = None , <nl> + ) <nl> + cursor = cast ( str , result [ " cursor " ] ) <nl> + status = run_hh_fanout_status ( env = env , cursor = cursor ) <nl> + self . assertEqual ( status , " Total files to typecheck : 0 " ) <nl> + <nl> + # Creating files should add each file as an individual element to <nl> + # the status , with no fanout other than itself . <nl> + self . write ( <nl> + file ( " foo . php " ) , <nl> + " " " < ? hh <nl> + function foo ( ) : int { <nl> + return 1 ; <nl> + } <nl> + " " " , <nl> + ) <nl> + self . write ( <nl> + file ( " depends_on_foo . php " ) , <nl> + " " " < ? hh <nl> + function depends_on_foo ( ) : int { <nl> + return foo ( ) ; <nl> + } <nl> + " " " , <nl> + ) <nl> + result = run_hh_fanout ( <nl> + env = env , <nl> + saved_state_info = saved_state_info , <nl> + changed_files = [ file ( " foo . php " ) , file ( " depends_on_foo . php " ) ] , <nl> + args = [ ] , <nl> + cursor = cursor , <nl> + ) <nl> + cursor = cast ( str , result [ " cursor " ] ) <nl> + status = run_hh_fanout_status ( env = env , cursor = cursor ) <nl> + self . assertEqual ( <nl> + status , <nl> + " " " \ <nl> + depends_on_foo . php <nl> + A \ \ depends_on_foo ( 1 file ) <nl> + foo . php <nl> + A \ \ foo ( 1 file ) <nl> + Total files to typecheck : 2 \ <nl> + " " " , <nl> + ) <nl> + <nl> + # Performing a typecheck should clear out the status , since we no <nl> + # longer need to re - typecheck any files . <nl> + result = run_hh_fanout_calculate_errors ( <nl> + env = env , saved_state_info = saved_state_info , cursor = cursor <nl> + ) <nl> + cursor = cast ( str , result [ " cursor " ] ) <nl> + status = run_hh_fanout_status ( env = env , cursor = cursor ) <nl> + self . assertEqual ( status , " Total files to typecheck : 0 " ) <nl> + <nl> + # Changing an upstream file should also include its downstream <nl> + # dependent in its fanout size , according to status . Note that the <nl> + # downstream file itself isn ' t included in the status printout , <nl> + # since it hasn ' t changed . <nl> + result = run_hh_fanout ( <nl> + env = env , <nl> + saved_state_info = saved_state_info , <nl> + changed_files = [ file ( " foo . php " ) ] , <nl> + args = [ ] , <nl> + cursor = cursor , <nl> + ) <nl> + cursor = cast ( str , result [ " cursor " ] ) <nl> + status = run_hh_fanout_status ( env = env , cursor = cursor ) <nl> + self . assertEqual ( <nl> + status , <nl> + " " " \ <nl> + foo . php <nl> + M \ \ foo ( 2 files ) <nl> + Total files to typecheck : 2 \ <nl> + " " " , <nl> + ) <nl> mmm a / hphp / hack / test / hh_fanout / hh_fanout_test_driver . py <nl> ppp b / hphp / hack / test / hh_fanout / hh_fanout_test_driver . py <nl> def exec ( args : List [ str ] , * , raise_on_error : bool = True ) - > str : <nl> + f " Stdout : { stdout } \ n " <nl> + f " Stderr : { stderr } \ n " <nl> ) <nl> + elif DEBUGGING : <nl> + stderr = result . stderr . decode ( ) <nl> + log ( f " Stderr : { stderr } " ) <nl> return stdout <nl> <nl> <nl> def run_hh_fanout_calculate_errors_pretty_print ( <nl> return result <nl> <nl> <nl> + def run_hh_fanout_status ( env : Env , cursor : Cursor ) - > str : <nl> + args = [ ] <nl> + args . extend ( ( " - - from " , " integration - test " ) ) <nl> + args . extend ( ( " - - root " , env . root_dir ) ) <nl> + args . extend ( ( " - - state - path " , os . path . join ( env . root_dir , " hh_fanout_state " ) ) ) <nl> + args . extend ( ( " - - cursor " , cursor ) ) <nl> + <nl> + result = exec ( [ env . hh_fanout_path , " status " , * args ] ) <nl> + result = result . replace ( env . root_dir , " " ) <nl> + result = result . strip ( ) <nl> + return result <nl> + <nl> + <nl> def run_hh_server_check ( env : Env ) - > str : <nl> result = exec ( <nl> [ env . hh_server_path , " - - check " , env . root_dir , " - - no - load " ] , <nl>
` hh_fanout status ` command
facebook/hhvm
2ad8f1114de9caf9c81797cc3bff42da3bc8892d
2020-09-03T21:14:34Z
new file mode 100644 <nl> index 00000000000 . . d345d2d50f2 <nl> mmm / dev / null <nl> ppp b / hphp / test / slow / ext_string / htmlentities_specialchars . php <nl> <nl> + < ? php <nl> + $ s = chr ( 0xAE ) ; / / this is an ISO - 8859 - 1 circle R <nl> + var_dump ( htmlentities ( $ s , ENT_QUOTES | ENT_IGNORE , ' ISO - 8859 - 1 ' ) ) ; <nl> + var_dump ( htmlentities ( $ s , ENT_QUOTES | ENT_IGNORE ) ) ; / / UTF - 8 <nl> + $ s = chr ( 0xFF ) ; / / this is an ISO - 8859 - 1 umlaut <nl> + var_dump ( htmlentities ( $ s , ENT_QUOTES | ENT_IGNORE , ' ISO - 8859 - 1 ' ) ) ; <nl> + var_dump ( htmlentities ( $ s , ENT_QUOTES | ENT_IGNORE ) ) ; / / UTF - 8 <nl> + $ s = chr ( 0xFF ) ; / / this is an ISO - 8859 - 1 umlaut <nl> + var_dump ( htmlentities ( $ s , ENT_QUOTES | ENT_IGNORE , ' ISO - 8859 - 1 ' ) ) ; <nl> + var_dump ( htmlentities ( $ s , ENT_QUOTES | ENT_IGNORE ) ) ; / / UTF - 8 <nl> + / / this is an ISO - 8859 - 1 para sign + 1 / 4 + " ABC " <nl> + $ s = chr ( 0xB6 ) . chr ( 0xBC ) . " AAA " ; <nl> + var_dump ( htmlentities ( $ s , ENT_QUOTES | ENT_IGNORE , ' ISO - 8859 - 1 ' ) ) ; <nl> + var_dump ( htmlentities ( $ s , ENT_QUOTES | ENT_IGNORE ) ) ; / / UTF - 8 <nl> + / / this is an ISO - 8859 - 1 currency sign , but ISO - 8859 - 15 euro sign <nl> + $ s = chr ( 0xA4 ) ; <nl> + var_dump ( htmlentities ( $ s , ENT_QUOTES | ENT_IGNORE , ' ISO - 8859 - 1 ' ) ) ; <nl> + var_dump ( htmlentities ( $ s , ENT_QUOTES | ENT_IGNORE ) ) ; / / UTF - 8 <nl> + / / This works in PHP 5 . x currently , but fatals in HHVM right now <nl> + var_dump ( htmlentities ( $ s , ENT_QUOTES | ENT_IGNORE , ' ISO - 8859 - 15 ' ) ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 77642d9bb6c <nl> mmm / dev / null <nl> ppp b / hphp / test / slow / ext_string / htmlentities_specialchars . php . expectf <nl> <nl> + string ( 5 ) " & reg ; " <nl> + string ( 0 ) " " <nl> + string ( 6 ) " & yuml ; " <nl> + string ( 0 ) " " <nl> + string ( 6 ) " & yuml ; " <nl> + string ( 0 ) " " <nl> + string ( 17 ) " & para ; & frac14 ; AAA " <nl> + string ( 3 ) " AAA " <nl> + string ( 8 ) " & curren ; " <nl> + string ( 0 ) " " <nl> + <nl> + Fatal error : ISO - 8859 - 15 is not implemented yet . % s <nl> mmm a / hphp / zend / zend - html . cpp <nl> ppp b / hphp / zend / zend - html . cpp <nl> char * string_html_encode ( const char * input , int & len , <nl> if ( LIKELY ( c < 0x80 ) ) { <nl> * q + + = c ; <nl> break ; <nl> + } else if ( htmlEnt & & ! utf8 & & ( c - 160 ) < sizeof ( ent_iso_8859_1 ) - 1 ) { <nl> + / * * <nl> + * https : / / github . com / facebook / hhvm / issues / 2186 <nl> + * If not UTF8 , and we are converting to HTML entities , use known <nl> + * entity equivalent of the character , if possible . <nl> + * Since we only support ISO - 8859 - 1 or UTF8 right now , and they use <nl> + * the same mapping array , use it . <nl> + * Start at 0xA0 = 160 <nl> + * / <nl> + * q + + = ' & ' ; <nl> + const char * s = ent_iso_8859_1 [ c - 160 ] ; <nl> + int len = strlen ( s ) ; <nl> + for ( int n = 0 ; n < len ; n + + ) { <nl> + * q + + = * s + + ; <nl> + } <nl> + * q + + = ' ; ' ; <nl> + break ; <nl> } <nl> <nl> bool should_skip = <nl>
htmlentities ( ) should output valid ISO - 8859 - 1 characeters to appropriate entities
facebook/hhvm
ecb565067beb6428dc1a67800c18d604313d051f
2014-11-14T05:00:27Z
mmm a / tensorflow / tools / ci_build / presubmit / ubuntu_16 / gpu_py36_full / build . sh <nl> ppp b / tensorflow / tools / ci_build / presubmit / ubuntu_16 / gpu_py36_full / build . sh <nl> function run_build ( ) { <nl> export ACTION_PATH = " / usr / local / sbin : / usr / local / bin : / usr / sbin : / usr / bin : / sbin : / bin " <nl> export PYTHON_BIN_PATH = " / usr / bin / python3 " <nl> export TF2_BEHAVIOR = 1 <nl> - tag_filters = " gpu , - no_gpu , - nogpu , - benchmark - test , - no_oss , - oss_serial " " $ ( maybe_skip_v1 ) " <nl> + tag_filters = " gpu , - no_gpu , - nogpu , - benchmark - test , - no_oss , - oss_serial , - no_gpu_presubmit " " $ ( maybe_skip_v1 ) " <nl> <nl> # Get the default test targets for bazel . <nl> source tensorflow / tools / ci_build / build_scripts / PRESUBMIT_BUILD_TARGETS . sh <nl>
Adding a tag ` no_gpu_presubmit ` to disable a test in presubmits , but still run nightly .
tensorflow/tensorflow
c2792934d1bcfd177d70554dee99b91c3ae976cb
2020-01-04T00:08:31Z
mmm a / cocos / renderer / CCRenderer . cpp <nl> ppp b / cocos / renderer / CCRenderer . cpp <nl> void Renderer : : setupVBOAndVAO ( ) <nl> void Renderer : : setupVBO ( ) <nl> { <nl> glGenBuffers ( 2 , & _buffersVBO [ 0 ] ) ; <nl> - mapBuffers ( ) ; <nl> + / / Issue # 15652 <nl> + / / Should not initialzie VBO with a large size ( VBO_SIZE = 65536 ) , <nl> + / / it may cause low FPS on some Android devices like LG G4 & Nexus 5X . <nl> + / / It ' s probably because some implementations of OpenGLES driver will <nl> + / / copy the whole memory of VBO which initialzied at the first time <nl> + / / once glBufferData / glBufferSubData is invoked . <nl> + / / For more discussion , please refer to https : / / github . com / cocos2d / cocos2d - x / issues / 15652 <nl> + / / mapBuffers ( ) ; <nl> } <nl> <nl> void Renderer : : mapBuffers ( ) <nl>
issue : fix low FPS on some android devices while auto batch is disabled .
cocos2d/cocos2d-x
126b60945e5b99f8eb3c76f1034fc50360f21c4c
2016-06-12T03:35:30Z
mmm a / plugins / chain_plugin / chain_plugin . cpp <nl> ppp b / plugins / chain_plugin / chain_plugin . cpp <nl> read_only : : get_info_results read_only : : get_info ( const read_only : : get_info_params <nl> / / std : : bitset < 64 > ( db . get_dynamic_global_properties ( ) . recent_slots_filled ) . to_string ( ) , <nl> / / __builtin_popcountll ( db . get_dynamic_global_properties ( ) . recent_slots_filled ) / 64 . 0 , <nl> app ( ) . version_string ( ) , <nl> - symbol ( ) . name ( ) , <nl> - symbol ( ) . precision ( ) , <nl> } ; <nl> } <nl> <nl> mmm a / plugins / chain_plugin / include / eosio / chain_plugin / chain_plugin . hpp <nl> ppp b / plugins / chain_plugin / include / eosio / chain_plugin / chain_plugin . hpp <nl> class read_only { <nl> / / string recent_slots ; <nl> / / double participation_rate = 0 ; <nl> optional < string > server_version_string ; <nl> - optional < string > core_symbol ; <nl> - optional < uint64_t > core_symbol_precision ; <nl> } ; <nl> get_info_results get_info ( const get_info_params & ) const ; <nl> <nl> class chain_plugin : public plugin < chain_plugin > { <nl> FC_REFLECT ( eosio : : chain_apis : : permission , ( perm_name ) ( parent ) ( required_auth ) ) <nl> FC_REFLECT ( eosio : : chain_apis : : empty , ) <nl> FC_REFLECT ( eosio : : chain_apis : : read_only : : get_info_results , <nl> - ( server_version ) ( chain_id ) ( head_block_num ) ( last_irreversible_block_num ) ( last_irreversible_block_id ) ( head_block_id ) ( head_block_time ) ( head_block_producer ) ( virtual_block_cpu_limit ) ( virtual_block_net_limit ) ( block_cpu_limit ) ( block_net_limit ) ( server_version_string ) ( core_symbol ) ( core_symbol_precision ) ) <nl> + ( server_version ) ( chain_id ) ( head_block_num ) ( last_irreversible_block_num ) ( last_irreversible_block_id ) ( head_block_id ) ( head_block_time ) ( head_block_producer ) ( virtual_block_cpu_limit ) ( virtual_block_net_limit ) ( block_cpu_limit ) ( block_net_limit ) ( server_version_string ) ) <nl> FC_REFLECT ( eosio : : chain_apis : : read_only : : get_block_params , ( block_num_or_id ) ) <nl> FC_REFLECT ( eosio : : chain_apis : : read_only : : get_block_header_state_params , ( block_num_or_id ) ) <nl> <nl>
Revert " Simplify Wallet Tools EOSIO Blockchain Detection for End Users "
EOSIO/eos
bcd98026fa15273c4836c477603b35bef6c93b6d
2018-09-06T22:57:55Z
mmm a / atom / browser / mac / atom_application_delegate . mm <nl> ppp b / atom / browser / mac / atom_application_delegate . mm <nl> - ( BOOL ) application : ( NSApplication * ) sender <nl> continueUserActivity : ( NSUserActivity * ) userActivity <nl> restorationHandler : ( void ( ^ ) ( NSArray * restorableObjects ) ) restorationHandler { <nl> std : : string activity_type ( base : : SysNSStringToUTF8 ( userActivity . activityType ) ) ; <nl> - <nl> + <nl> std : : map < std : : string , std : : string > user_info ; <nl> - <nl> + <nl> NSArray * keys = [ userActivity . userInfo allKeys ] ; <nl> for ( NSString * key in keys ) <nl> { <nl> new file mode 100644 <nl> index 000000000000 . . ac1a9a608d70 <nl> mmm / dev / null <nl> ppp b / atom / browser / mac / mac_native_converter . h <nl> <nl> + # import < Cocoa / Cocoa . h > <nl> + <nl> + # include " base / values . h " <nl> + # include " base / strings / sys_string_conversions . h " <nl> + <nl> + @ interface MacNativeConverter : NSObject <nl> + <nl> + - ( base : : ListValue * ) arrayToV8 : ( NSArray * ) nsArray ; <nl> + - ( base : : DictionaryValue * ) dictionaryToV8 : ( NSDictionary * ) nsDictionary ; <nl> + <nl> + @ end <nl> new file mode 100644 <nl> index 000000000000 . . 632440360178 <nl> mmm / dev / null <nl> ppp b / atom / browser / mac / mac_native_converter . mm <nl> <nl> + # import " atom / browser / mac / mac_native_converter . h " <nl> + <nl> + @ implementation MacNativeConverter <nl> + <nl> + - ( base : : ListValue * ) arrayToV8 : ( NSArray * ) nsArray { <nl> + scoped_ptr < base : : ListValue > list ( new base : : ListValue ) ; <nl> + <nl> + for ( id value in nsArray ) { <nl> + if ( [ value isKindOfClass : [ NSArray class ] ] ) { <nl> + list - > Append ( [ self arrayToV8 : value ] ) ; <nl> + } else if ( [ value isKindOfClass : [ NSDictionary class ] ] ) { <nl> + list - > Append ( [ self dictionaryToV8 : value ] ) ; <nl> + } else if ( [ value isKindOfClass : [ NSString class ] ] ) { <nl> + list - > AppendString ( base : : SysNSStringToUTF8 ( value ) ) ; <nl> + } else if ( [ value isKindOfClass : [ NSNumber class ] ] ) { <nl> + list - > AppendInteger ( ( ( NSNumber * ) value ) . intValue ) ; <nl> + } <nl> + } <nl> + <nl> + return list . get ( ) ; <nl> + } <nl> + <nl> + - ( base : : DictionaryValue * ) dictionaryToV8 : ( NSDictionary * ) nsDictionary { <nl> + scoped_ptr < base : : DictionaryValue > dict ( new base : : DictionaryValue ) ; <nl> + <nl> + NSEnumerator * it = [ nsDictionary keyEnumerator ] ; <nl> + while ( NSString * key = [ it nextObject ] ) { <nl> + id value = [ nsDictionary objectForKey : key ] ; <nl> + <nl> + std : : string key_str ( base : : SysNSStringToUTF8 ( key ) ) ; <nl> + <nl> + if ( [ value isKindOfClass : [ NSArray class ] ] ) { <nl> + dict - > Set ( key_str , [ self arrayToV8 : value ] ) ; <nl> + } else if ( [ value isKindOfClass : [ NSDictionary class ] ] ) { <nl> + dict - > Set ( key_str , [ self dictionaryToV8 : value ] ) ; <nl> + } else if ( [ value isKindOfClass : [ NSString class ] ] ) { <nl> + dict - > SetString ( key_str , base : : SysNSStringToUTF8 ( value ) ) ; <nl> + } else if ( [ value isKindOfClass : [ NSNumber class ] ] ) { <nl> + dict - > SetInteger ( key_str , ( ( NSNumber * ) value ) . intValue ) ; <nl> + } <nl> + } <nl> + <nl> + return dict . get ( ) ; <nl> + } <nl> + <nl> + @ end <nl> mmm a / filenames . gypi <nl> ppp b / filenames . gypi <nl> <nl> ' atom / browser / mac / atom_application . mm ' , <nl> ' atom / browser / mac / atom_application_delegate . h ' , <nl> ' atom / browser / mac / atom_application_delegate . mm ' , <nl> + ' atom / browser / mac / mac_native_converter . h ' , <nl> + ' atom / browser / mac / mac_native_converter . mm ' , <nl> ' atom / browser / native_window . cc ' , <nl> ' atom / browser / native_window . h ' , <nl> ' atom / browser / native_window_views_win . cc ' , <nl>
Add some shady methods to get V8 objects or arrays from NSDictionary or NSArray .
electron/electron
2295f3a8327d6df4031dcfcd8daebb771d0e3530
2016-05-02T21:45:59Z
mmm a / utils / cmpcodesize / cmpcodesize / main . py <nl> ppp b / utils / cmpcodesize / cmpcodesize / main . py <nl> <nl> <nl> SHORTCUTS = { <nl> " O " : " bin / Benchmark_O " , <nl> - " Ounchecked " : " bin / Benchmark_Ounchecked " , <nl> + " Osize " : " bin / Benchmark_Osize " , <nl> " Onone " : " bin / Benchmark_Onone " , <nl> " dylib " : " lib / swift / macosx / x86_64 / libswiftCore . dylib " , <nl> } <nl> def main ( ) : <nl> Compares the files in the new and old build - dirs . <nl> Aliases : <nl> O = > bin / Benchmark_O <nl> - Ounchecked = > bin / Benchmark_Ounchecked <nl> + Osize = > bin / Benchmark_Osize <nl> Onone = > bin / Benchmark_Onone <nl> dylib = > lib / swift / macosx / x86_64 / libswiftCore . dylib <nl> Examples : <nl>
cmpcodesize : replace the Ounchecked with the Osize benchmark shortcut .
apple/swift
dcef91680c4f4a2b84d869f38c647065640b859c
2017-11-11T00:13:28Z
mmm a / tools / jenkins / docker_run_tests . sh <nl> ppp b / tools / jenkins / docker_run_tests . sh <nl> rvm use ruby - 2 . 1 <nl> <nl> mkdir - p reports <nl> <nl> - $ RUN_TESTS_COMMAND <nl> + exit_code = 0 <nl> + <nl> + $ RUN_TESTS_COMMAND | | exit_code = $ ? <nl> <nl> cd reports <nl> echo ' < html > < head > < / head > < body > ' > index . html <nl> echo ' < / body > < / html > ' > > index . html <nl> cd . . <nl> <nl> zip - r reports . zip reports <nl> + <nl> + exit $ exit_code <nl>
Generate html reports always .
grpc/grpc
61059d3d3852f4d84d6adb61e33bf1a4114c8202
2015-12-02T00:45:45Z
mmm a / hphp / hack / src / parser / full_fidelity_ast . ml <nl> ppp b / hphp / hack / src / parser / full_fidelity_ast . ml <nl> and pClassElt : class_elt list parser = fun node env - > <nl> * the middle of the declaration , to be associated with individual <nl> * properties , right now we don ' t handle this * ) <nl> let doc_comment_opt = extract_docblock node in <nl> + let modifiers = syntax_to_list_no_separators property_modifiers in <nl> + if Hh_core . List . exists ~ f : is_final modifiers then <nl> + raise_parsing_error env node SyntaxError . final_property ; <nl> + <nl> [ ClassVars <nl> { cv_kinds = pKinds property_modifiers env <nl> ; cv_hint = mpOptional pHint property_type env <nl> mmm a / hphp / hack / src / parser / full_fidelity_parser_errors . ml <nl> ppp b / hphp / hack / src / parser / full_fidelity_parser_errors . ml <nl> let statement_errors env node parents errors = <nl> | Some ( error_node , error_message ) - > <nl> make_error_from_node error_node error_message : : errors <nl> <nl> - let property_errors env node errors = <nl> - match syntax node with <nl> - | PropertyDeclaration p - > <nl> - let modifiers = syntax_to_list_no_separators p . property_modifiers in <nl> - let errors = if Hh_core . List . exists ~ f : is_final modifiers then <nl> - make_error_from_node node SyntaxError . final_property : : errors <nl> - else errors in <nl> - errors <nl> - | _ - > errors <nl> - <nl> let string_starts_with_int s = <nl> if String . length s = 0 then false else <nl> try let _ = int_of_string ( String . make 1 s . [ 0 ] ) in true with _ - > false <nl> let find_syntax_errors env = <nl> statement_errors env node parents errors in <nl> let errors = <nl> methodish_errors env node parents errors in <nl> - let errors = <nl> - property_errors env node errors in <nl> let errors = <nl> expression_errors env node parents errors in <nl> let trait_require_clauses , errors = <nl>
Move property errors to lowering
facebook/hhvm
2649bbb98172dc7658a00cc3aad2483204b60fff
2018-05-25T19:07:13Z
mmm a / docker / test / testflows / runner / Dockerfile <nl> ppp b / docker / test / testflows / runner / Dockerfile <nl> RUN set - x \ <nl> VOLUME / var / lib / docker <nl> EXPOSE 2375 <nl> ENTRYPOINT [ " dockerd - entrypoint . sh " ] <nl> - CMD [ " sh " , " - c " , " python3 regression . py - - local - - clickhouse - binary - path $ { CLICKHOUSE_TESTS_SERVER_BIN_PATH } - - log test . log $ { TESTFLOWS_OPTS } & & cat test . log | tfs report results - - format json > results . json " ] <nl> + CMD [ " sh " , " - c " , " python3 regression . py - - no - color - - local - - clickhouse - binary - path $ { CLICKHOUSE_TESTS_SERVER_BIN_PATH } - - log test . log $ { TESTFLOWS_OPTS } & & cat test . log | tfs report results - - format json > results . json " ] <nl> <nl>
No color
ClickHouse/ClickHouse
e89ccfd5bf99e5c98abe18b0290990d47cd350f6
2020-07-07T13:11:19Z
mmm a / xbmc / filesystem / ZipFile . cpp <nl> ppp b / xbmc / filesystem / ZipFile . cpp <nl> ssize_t CZipFile : : Read ( void * lpBuf , size_t uiBufSize ) <nl> { <nl> if ( uiBufSize + m_iFilePos > mZipItem . csize ) <nl> uiBufSize = mZipItem . csize - m_iFilePos ; <nl> - if ( uiBufSize < 0 ) <nl> - { <nl> + <nl> + if ( uiBufSize = = 0 ) <nl> return 0 ; / / we are past eof , this shouldn ' t happen but test anyway <nl> - } <nl> + <nl> ssize_t iResult = mFile . Read ( lpBuf , uiBufSize ) ; <nl> if ( iResult < 0 ) <nl> return - 1 ; <nl>
fixed : CID 1248213 Unsigned compared against 0
xbmc/xbmc
12c66a27c322694551c8ece97f4f368bfab6d330
2014-12-29T11:17:35Z
mmm a / hphp / hack / src / client / clientStart . ml <nl> ppp b / hphp / hack / src / client / clientStart . ml <nl> let start_server env = <nl> " " ) <nl> | None - > " " in <nl> let hh_server = Printf . sprintf " % s - d % s % s " <nl> - ( get_hhserver ( ) ) <nl> - ( Path . string_of_path env . root ) <nl> + ( Filename . quote ( get_hhserver ( ) ) ) <nl> + ( Filename . quote ( Path . string_of_path env . root ) ) <nl> server_options in <nl> Printf . fprintf stderr " Server launched with the following command : \ n \ t % s \ n % ! " <nl> hh_server ; <nl> mmm a / hphp / hack / src / utils / shell . ml <nl> ppp b / hphp / hack / src / utils / shell . ml <nl> <nl> * <nl> * ) <nl> <nl> - ( * * This is probably not fully safe . It ' ll do a reasonable job of escaping <nl> - silly things like spaces , quotes and so on but there are probably ways <nl> - to break it . * ) <nl> - let escape_string_for_shell str = <nl> - let escaped = Str . global_replace ( Str . regexp " ' " ) " ' \ " ' \ " ' " str in <nl> - " ' " ^ escaped ^ " ' " <nl> + let escape_string_for_shell = Filename . quote <nl> <nl> let escape_spaces str = Str . global_replace ( Str . regexp " " ) " \ \ " str <nl>
Quote the path passed to hh_server
facebook/hhvm
57f69f37d9ef0e6f17eecf4d309b335cbc52f43d
2014-11-27T01:30:31Z
mmm a / tests / test_core . py <nl> ppp b / tests / test_core . py <nl> def test ( ) : <nl> test ( ) <nl> <nl> def test_sse1 ( self ) : <nl> - self . banned_js_engines = [ NODE_JS ] # the test code hits NaN canonicalization on node . js <nl> if self . is_emterpreter ( ) : return self . skip ( ' todo ' ) <nl> - if ' SAFE_HEAP = 1 ' in self . emcc_args : return self . skip ( ' SSE with SAFE_HEAP = 1 breaks due to NaN canonicalization ! ' ) <nl> + if ' SAFE_HEAP = 1 ' in self . emcc_args and SPIDERMONKEY_ENGINE in JS_ENGINES : <nl> + self . banned_js_engines + = [ SPIDERMONKEY_ENGINE ] <nl> + print ' Skipping test_sse1 with SAFE_HEAP = 1 on SpiderMonkey , since it fails due to NaN canonicalization . ' <nl> Settings . PRECISE_F32 = 1 # SIMD currently requires Math . fround <nl> <nl> orig_args = self . emcc_args <nl> def test_sse1 ( self ) : <nl> <nl> # Tests the full SSE1 API . <nl> def test_sse1_full ( self ) : <nl> - self . banned_js_engines = [ NODE_JS ] # the test code hits NaN canonicalization on node . js <nl> if self . is_emterpreter ( ) : return self . skip ( ' todo ' ) <nl> Popen ( [ CLANG , path_from_root ( ' tests ' , ' test_sse1_full . cpp ' ) , ' - o ' , ' test_sse1_full ' , ' - D_CRT_SECURE_NO_WARNINGS = 1 ' ] + get_clang_native_args ( ) , stdout = PIPE ) . communicate ( ) <nl> native_result , err = Popen ( ' . / test_sse1_full ' , stdout = PIPE ) . communicate ( ) <nl> def test_sse1_full ( self ) : <nl> # Tests the full SSE2 API . <nl> def test_sse2_full ( self ) : <nl> if self . is_emterpreter ( ) : return self . skip ( ' todo ' ) <nl> - if SPIDERMONKEY_ENGINE not in JS_ENGINES : return self . skip ( ' test_sse2_full requires SpiderMonkey to run . ' ) <nl> args = [ ] <nl> if ' - O0 ' in self . emcc_args : args + = [ ' - D_DEBUG = 1 ' ] <nl> Popen ( [ CLANG , path_from_root ( ' tests ' , ' test_sse2_full . cpp ' ) , ' - o ' , ' test_sse2_full ' , ' - D_CRT_SECURE_NO_WARNINGS = 1 ' ] + args + get_clang_native_args ( ) , stdout = PIPE ) . communicate ( ) <nl> def test_simd2 ( self ) : <nl> def test_simd3 ( self ) : <nl> if self . is_emterpreter ( ) : return self . skip ( ' todo ' ) <nl> <nl> - self . banned_js_engines = [ NODE_JS ] # fails in simd . js polyfill <nl> - <nl> Settings . PRECISE_F32 = 1 # SIMD currently requires Math . fround <nl> <nl> test_path = path_from_root ( ' tests ' , ' core ' , ' test_simd3 ' ) <nl> def test_simd10 ( self ) : <nl> # test_simd10 is to test that loading and storing arbitrary bit patterns works in SSE1 . <nl> if self . is_emterpreter ( ) : return self . skip ( ' todo ' ) <nl> <nl> - self . banned_js_engines = [ NODE_JS ] # the test code hits NaN canonicalization on node . js <nl> - <nl> test_path = path_from_root ( ' tests ' , ' core ' , ' test_simd10 ' ) <nl> src , output = ( test_path + s for s in ( ' . in ' , ' . out ' ) ) <nl> <nl> def test_simd11 ( self ) : <nl> # test_simd11 is to test that _mm_movemask_ps works correctly when handling input floats with 0xFFFFFFFF NaN bit patterns . <nl> if self . is_emterpreter ( ) : return self . skip ( ' todo ' ) <nl> <nl> - self . banned_js_engines = [ NODE_JS ] # the test code hits NaN canonicalization on node . js <nl> - <nl> test_path = path_from_root ( ' tests ' , ' core ' , ' test_simd11 ' ) <nl> src , output = ( test_path + s for s in ( ' . in ' , ' . out ' ) ) <nl> <nl>
Enable SIMD tests in node . js polyfill , they work now .
emscripten-core/emscripten
50ded570a5d372b38558f9b26a1ecc4ba962777a
2015-09-15T11:43:29Z
mmm a / src / core / surface / call . c <nl> ppp b / src / core / surface / call . c <nl> static void finish_live_ioreq_op ( grpc_call * call , grpc_ioreq_op op , <nl> master - > complete_mask | = 1u < < op ; <nl> if ( status ! = GRPC_OP_OK ) { <nl> master - > status = status ; <nl> - master - > complete_mask = master - > need_mask ; <nl> } <nl> if ( master - > complete_mask = = master - > need_mask ) { <nl> for ( i = 0 ; i < GRPC_IOREQ_OP_COUNT ; i + + ) { <nl>
Always wait for all ops to finish
grpc/grpc
60de66cf7da81585038ad6afa1c857b3f6e3b70c
2015-04-24T20:14:06Z
mmm a / Marlin / Configuration . h <nl> ppp b / Marlin / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / default / Configuration . h <nl> ppp b / Marlin / src / config / default / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / AlephObjects / TAZ4 / Configuration . h <nl> ppp b / Marlin / src / config / examples / AlephObjects / TAZ4 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / AliExpress / CL - 260 / Configuration . h <nl> ppp b / Marlin / src / config / examples / AliExpress / CL - 260 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Anet / A2 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Anet / A2 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Anet / A2plus / Configuration . h <nl> ppp b / Marlin / src / config / examples / Anet / A2plus / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Anet / A6 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Anet / A6 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Anet / A8 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Anet / A8 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Azteeg / X5GT / Configuration . h <nl> ppp b / Marlin / src / config / examples / Azteeg / X5GT / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / BIBO / TouchX / cyclops / Configuration . h <nl> ppp b / Marlin / src / config / examples / BIBO / TouchX / cyclops / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / BIBO / TouchX / default / Configuration . h <nl> ppp b / Marlin / src / config / examples / BIBO / TouchX / default / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / BQ / Hephestos / Configuration . h <nl> ppp b / Marlin / src / config / examples / BQ / Hephestos / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / BQ / Hephestos_2 / Configuration . h <nl> ppp b / Marlin / src / config / examples / BQ / Hephestos_2 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / BQ / WITBOX / Configuration . h <nl> ppp b / Marlin / src / config / examples / BQ / WITBOX / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Cartesio / Configuration . h <nl> ppp b / Marlin / src / config / examples / Cartesio / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Creality / CR - 10 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Creality / CR - 10 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Creality / CR - 10S / Configuration . h <nl> ppp b / Marlin / src / config / examples / Creality / CR - 10S / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Creality / CR - 10mini / Configuration . h <nl> ppp b / Marlin / src / config / examples / Creality / CR - 10mini / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Creality / CR - 8 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Creality / CR - 8 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Creality / Ender - 2 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Creality / Ender - 2 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Creality / Ender - 3 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Creality / Ender - 3 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Creality / Ender - 4 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Creality / Ender - 4 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Einstart - S / Configuration . h <nl> ppp b / Marlin / src / config / examples / Einstart - S / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Felix / Configuration . h <nl> ppp b / Marlin / src / config / examples / Felix / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Felix / DUAL / Configuration . h <nl> ppp b / Marlin / src / config / examples / Felix / DUAL / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / FolgerTech / i3 - 2020 / Configuration . h <nl> ppp b / Marlin / src / config / examples / FolgerTech / i3 - 2020 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Formbot / Raptor / Configuration . h <nl> ppp b / Marlin / src / config / examples / Formbot / Raptor / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Formbot / T_Rex_2 + / Configuration . h <nl> ppp b / Marlin / src / config / examples / Formbot / T_Rex_2 + / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Formbot / T_Rex_3 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Formbot / T_Rex_3 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Geeetech / GT2560 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Geeetech / GT2560 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Geeetech / I3_Pro_X - GT2560 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Geeetech / I3_Pro_X - GT2560 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Geeetech / MeCreator2 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Geeetech / MeCreator2 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> diff - - git a / Marlin / src / config / examples / Geeetech / Prusa i3 Pro B / bltouch / Configuration . h b / Marlin / src / config / examples / Geeetech / Prusa i3 Pro B / bltouch / Configuration . h <nl> mmm a / Marlin / src / config / examples / Geeetech / Prusa i3 Pro B / bltouch / Configuration . h <nl> ppp b / Marlin / src / config / examples / Geeetech / Prusa i3 Pro B / bltouch / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> diff - - git a / Marlin / src / config / examples / Geeetech / Prusa i3 Pro B / noprobe / Configuration . h b / Marlin / src / config / examples / Geeetech / Prusa i3 Pro B / noprobe / Configuration . h <nl> mmm a / Marlin / src / config / examples / Geeetech / Prusa i3 Pro B / noprobe / Configuration . h <nl> ppp b / Marlin / src / config / examples / Geeetech / Prusa i3 Pro B / noprobe / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> diff - - git a / Marlin / src / config / examples / Geeetech / Prusa i3 Pro C / Configuration . h b / Marlin / src / config / examples / Geeetech / Prusa i3 Pro C / Configuration . h <nl> mmm a / Marlin / src / config / examples / Geeetech / Prusa i3 Pro C / Configuration . h <nl> ppp b / Marlin / src / config / examples / Geeetech / Prusa i3 Pro C / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> diff - - git a / Marlin / src / config / examples / Geeetech / Prusa i3 Pro W / Configuration . h b / Marlin / src / config / examples / Geeetech / Prusa i3 Pro W / Configuration . h <nl> mmm a / Marlin / src / config / examples / Geeetech / Prusa i3 Pro W / Configuration . h <nl> ppp b / Marlin / src / config / examples / Geeetech / Prusa i3 Pro W / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Infitary / i3 - M508 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Infitary / i3 - M508 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / JGAurora / A5 / Configuration . h <nl> ppp b / Marlin / src / config / examples / JGAurora / A5 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / MakerParts / Configuration . h <nl> ppp b / Marlin / src / config / examples / MakerParts / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Malyan / M150 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Malyan / M150 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Malyan / M200 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Malyan / M200 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Micromake / C1 / basic / Configuration . h <nl> ppp b / Marlin / src / config / examples / Micromake / C1 / basic / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Micromake / C1 / enhanced / Configuration . h <nl> ppp b / Marlin / src / config / examples / Micromake / C1 / enhanced / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Mks / Sbase / Configuration . h <nl> ppp b / Marlin / src / config / examples / Mks / Sbase / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / RepRapPro / Huxley / Configuration . h <nl> ppp b / Marlin / src / config / examples / RepRapPro / Huxley / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / RepRapWorld / Megatronics / Configuration . h <nl> ppp b / Marlin / src / config / examples / RepRapWorld / Megatronics / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / RigidBot / Configuration . h <nl> ppp b / Marlin / src / config / examples / RigidBot / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / SCARA / Configuration . h <nl> ppp b / Marlin / src / config / examples / SCARA / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / STM32F10 / Configuration . h <nl> ppp b / Marlin / src / config / examples / STM32F10 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / STM32F4 / Configuration . h <nl> ppp b / Marlin / src / config / examples / STM32F4 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Sanguinololu / Configuration . h <nl> ppp b / Marlin / src / config / examples / Sanguinololu / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / TheBorg / Configuration . h <nl> ppp b / Marlin / src / config / examples / TheBorg / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / TinyBoy2 / Configuration . h <nl> ppp b / Marlin / src / config / examples / TinyBoy2 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Tronxy / X1 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Tronxy / X1 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Tronxy / X3A / Configuration . h <nl> ppp b / Marlin / src / config / examples / Tronxy / X3A / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Tronxy / X5S / Configuration . h <nl> ppp b / Marlin / src / config / examples / Tronxy / X5S / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Tronxy / XY100 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Tronxy / XY100 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / UltiMachine / Archim2 / Configuration . h <nl> ppp b / Marlin / src / config / examples / UltiMachine / Archim2 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Velleman / K8200 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Velleman / K8200 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Velleman / K8400 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Velleman / K8400 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / Velleman / K8400 / Dual - head / Configuration . h <nl> ppp b / Marlin / src / config / examples / Velleman / K8400 / Dual - head / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> diff - - git a / Marlin / src / config / examples / Wanhao / Duplicator 6 / Configuration . h b / Marlin / src / config / examples / Wanhao / Duplicator 6 / Configuration . h <nl> mmm a / Marlin / src / config / examples / Wanhao / Duplicator 6 / Configuration . h <nl> ppp b / Marlin / src / config / examples / Wanhao / Duplicator 6 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / adafruit / ST7565 / Configuration . h <nl> ppp b / Marlin / src / config / examples / adafruit / ST7565 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / delta / Anycubic / Kossel / Configuration . h <nl> ppp b / Marlin / src / config / examples / delta / Anycubic / Kossel / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / delta / FLSUN / auto_calibrate / Configuration . h <nl> ppp b / Marlin / src / config / examples / delta / FLSUN / auto_calibrate / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / delta / FLSUN / kossel / Configuration . h <nl> ppp b / Marlin / src / config / examples / delta / FLSUN / kossel / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / delta / FLSUN / kossel_mini / Configuration . h <nl> ppp b / Marlin / src / config / examples / delta / FLSUN / kossel_mini / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / delta / Hatchbox_Alpha / Configuration . h <nl> ppp b / Marlin / src / config / examples / delta / Hatchbox_Alpha / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / delta / generic / Configuration . h <nl> ppp b / Marlin / src / config / examples / delta / generic / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / delta / kossel_mini / Configuration . h <nl> ppp b / Marlin / src / config / examples / delta / kossel_mini / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / delta / kossel_pro / Configuration . h <nl> ppp b / Marlin / src / config / examples / delta / kossel_pro / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / delta / kossel_xl / Configuration . h <nl> ppp b / Marlin / src / config / examples / delta / kossel_xl / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / gCreate / gMax1 . 5 + / Configuration . h <nl> ppp b / Marlin / src / config / examples / gCreate / gMax1 . 5 + / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / makibox / Configuration . h <nl> ppp b / Marlin / src / config / examples / makibox / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / stm32f103ret6 / Configuration . h <nl> ppp b / Marlin / src / config / examples / stm32f103ret6 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / tvrrug / Round2 / Configuration . h <nl> ppp b / Marlin / src / config / examples / tvrrug / Round2 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / config / examples / wt150 / Configuration . h <nl> ppp b / Marlin / src / config / examples / wt150 / Configuration . h <nl> <nl> <nl> / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> / / # define SINGLENOZZLE <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + / / Length of filament to retract and prime on toolchange <nl> + / / # define SINGLENOZZLE_SWAP_LENGTH 12 . 0 <nl> + / / # define SINGLENOZZLE_SWAP_RETRACT_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PRIME_SPEED 3600 / / ( mm / m ) <nl> + / / # define SINGLENOZZLE_SWAP_PARK <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # else <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> <nl> / * * <nl> * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> mmm a / Marlin / src / feature / pause . cpp <nl> ppp b / Marlin / src / feature / pause . cpp <nl> static bool ensure_safe_temperature ( const AdvancedPauseMode mode = ADVANCED_PAUSE_ <nl> return thermalManager . wait_for_hotend ( active_extruder ) ; <nl> } <nl> <nl> - static void do_pause_e_move ( const float & length , const float & fr ) { <nl> + void do_pause_e_move ( const float & length , const float & fr ) { <nl> current_position [ E_AXIS ] + = length / planner . e_factor [ active_extruder ] ; <nl> planner . buffer_line ( current_position , fr , active_extruder ) ; <nl> planner . synchronize ( ) ; <nl> mmm a / Marlin / src / feature / pause . h <nl> ppp b / Marlin / src / feature / pause . h <nl> extern uint8_t did_pause_print ; <nl> # define DXC_PASS <nl> # endif <nl> <nl> + void do_pause_e_move ( const float & length , const float & fr ) ; <nl> + <nl> bool pause_print ( const float & retract , const point_t & park_point , const float & unload_length = 0 , const bool show_lcd = false DXC_PARAMS ) ; <nl> <nl> void wait_for_filament_reload ( const int8_t max_beep_count = 0 DXC_PARAMS ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 630c826d4b8 <nl> mmm / dev / null <nl> ppp b / Marlin / src / gcode / config / M217 . cpp <nl> <nl> + / * * <nl> + * Marlin 3D Printer Firmware <nl> + * Copyright ( C ) 2016 MarlinFirmware [ https : / / github . com / MarlinFirmware / Marlin ] <nl> + * <nl> + * Based on Sprinter and grbl . <nl> + * Copyright ( C ) 2011 Camiel Gubbels / Erik van der Zalm <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation , either version 3 of the License , or <nl> + * ( at your option ) any later version . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + <nl> + # include " . . / . . / inc / MarlinConfigPre . h " <nl> + <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + <nl> + # include " . . / gcode . h " <nl> + # include " . . / . . / module / tool_change . h " <nl> + <nl> + # if NUM_SERIAL > 1 <nl> + # include " . . / . . / gcode / queue . h " <nl> + # endif <nl> + <nl> + void M217_report ( const bool eeprom = false ) { <nl> + # if NUM_SERIAL > 1 <nl> + const int16_t port = command_queue_port [ cmd_queue_index_r ] ; <nl> + # endif <nl> + serialprintPGM_P ( port , eeprom ? PSTR ( " M217 " ) : PSTR ( " Singlenozzle : " ) ) ; <nl> + SERIAL_ECHOPAIR_P ( port , " S " , singlenozzle_swap_length ) ; <nl> + SERIAL_ECHOPAIR_P ( port , " P " , singlenozzle_prime_speed ) ; <nl> + SERIAL_ECHOLNPAIR_P ( port , " R " , singlenozzle_retract_speed ) ; <nl> + } <nl> + <nl> + / * * <nl> + * M217 - Set SINGLENOZZLE toolchange parameters <nl> + * <nl> + * S [ mm ] Swap length <nl> + * P [ mm / s ] Prime speed <nl> + * R [ mm / s ] Retract speed <nl> + * / <nl> + void GcodeSuite : : M217 ( ) { <nl> + <nl> + bool report = true ; <nl> + <nl> + if ( parser . seenval ( ' S ' ) ) { report = false ; const float v = parser . value_float ( ) ; singlenozzle_swap_length = constrain ( v , 0 , 500 ) ; } <nl> + if ( parser . seenval ( ' P ' ) ) { report = false ; const int16_t v = parser . value_int ( ) ; singlenozzle_prime_speed = constrain ( v , 10 , 5400 ) ; } <nl> + if ( parser . seenval ( ' R ' ) ) { report = false ; const int16_t v = parser . value_int ( ) ; singlenozzle_retract_speed = constrain ( v , 10 , 5400 ) ; } <nl> + <nl> + if ( report ) M217_report ( ) ; <nl> + <nl> + } <nl> + <nl> + # endif / / SINGLENOZZLE <nl> mmm a / Marlin / src / gcode / gcode . cpp <nl> ppp b / Marlin / src / gcode / gcode . cpp <nl> void GcodeSuite : : process_parsed_command ( <nl> <nl> case 211 : M211 ( ) ; break ; / / M211 : Enable , Disable , and / or Report software endstops <nl> <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + case 217 : M217 ( ) ; break ; / / M217 : Set filament swap parameters <nl> + # endif <nl> + <nl> # if HOTENDS > 1 <nl> case 218 : M218 ( ) ; break ; / / M218 : Set a tool offset <nl> # endif <nl> mmm a / Marlin / src / gcode / gcode . h <nl> ppp b / Marlin / src / gcode / gcode . h <nl> <nl> * M209 - Turn Automatic Retract Detection on / off : S < 0 | 1 > ( For slicers that don ' t support G10 / 11 ) . ( Requires FWRETRACT_AUTORETRACT ) <nl> Every normal extrude - only move will be classified as retract depending on the direction . <nl> * M211 - Enable , Disable , and / or Report software endstops : S < 0 | 1 > ( Requires MIN_SOFTWARE_ENDSTOPS or MAX_SOFTWARE_ENDSTOPS ) <nl> + * M217 - Set filament swap parameters : " M217 S < length > P < feedrate > R < feedrate > " . ( Requires SINGLENOZZLE ) <nl> * M218 - Set / get a tool offset : " M218 T < index > X < offset > Y < offset > " . ( Requires 2 or more extruders ) <nl> * M220 - Set Feedrate Percentage : " M220 S < percent > " ( i . e . , " FR " on the LCD ) <nl> * M221 - Set Flow Percentage : " M221 S < percent > " <nl> class GcodeSuite { <nl> <nl> static void M211 ( ) ; <nl> <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + static void M217 ( ) ; <nl> + # endif <nl> + <nl> # if HOTENDS > 1 <nl> static void M218 ( ) ; <nl> # endif <nl> mmm a / Marlin / src / gcode / temperature / M104_M109 . cpp <nl> ppp b / Marlin / src / gcode / temperature / M104_M109 . cpp <nl> <nl> # include " . . / . . / feature / leds / leds . h " <nl> # endif <nl> <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + # include " . . / . . / module / tool_change . h " <nl> + # endif <nl> + <nl> / * * <nl> * M104 : Set hot end temperature <nl> * / <nl> void GcodeSuite : : M104 ( ) { <nl> <nl> const uint8_t e = target_extruder ; <nl> <nl> - # if ENABLED ( SINGLENOZZLE ) <nl> - if ( e ! = active_extruder ) return ; <nl> - # endif <nl> - <nl> if ( parser . seenval ( ' S ' ) ) { <nl> const int16_t temp = parser . value_celsius ( ) ; <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + singlenozzle_temp [ e ] = temp ; <nl> + if ( e ! = active_extruder ) return ; <nl> + # endif <nl> thermalManager . setTargetHotend ( temp , e ) ; <nl> <nl> # if ENABLED ( DUAL_X_CARRIAGE ) <nl> void GcodeSuite : : M109 ( ) { <nl> if ( get_target_extruder_from_command ( ) ) return ; <nl> if ( DEBUGGING ( DRYRUN ) ) return ; <nl> <nl> - # if ENABLED ( SINGLENOZZLE ) <nl> - if ( target_extruder ! = active_extruder ) return ; <nl> - # endif <nl> - <nl> const bool no_wait_for_cooling = parser . seenval ( ' S ' ) , <nl> set_temp = no_wait_for_cooling | | parser . seenval ( ' R ' ) ; <nl> if ( set_temp ) { <nl> const int16_t temp = parser . value_celsius ( ) ; <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + singlenozzle_temp [ target_extruder ] = temp ; <nl> + if ( target_extruder ! = active_extruder ) return ; <nl> + # endif <nl> thermalManager . setTargetHotend ( temp , target_extruder ) ; <nl> <nl> # if ENABLED ( DUAL_X_CARRIAGE ) <nl> void GcodeSuite : : M109 ( ) { <nl> # endif <nl> <nl> # if ENABLED ( ULTRA_LCD ) <nl> - const bool heating = thermalManager . isHeatingHotend ( target_extruder ) ; <nl> - if ( heating | | ! no_wait_for_cooling ) <nl> - # if HOTENDS > 1 <nl> - lcd_status_printf_P ( 0 , heating ? PSTR ( " E % i " MSG_HEATING ) : PSTR ( " E % i " MSG_COOLING ) , target_extruder + 1 ) ; <nl> - # else <nl> - lcd_setstatusPGM ( heating ? PSTR ( " E " MSG_HEATING ) : PSTR ( " E " MSG_COOLING ) ) ; <nl> - # endif <nl> + if ( thermalManager . isHeatingHotend ( target_extruder ) | | ! no_wait_for_cooling ) <nl> + thermalManager . set_heating_message ( target_extruder ) ; <nl> # endif <nl> } <nl> <nl> mmm a / Marlin / src / gcode / temperature / M106_M107 . cpp <nl> ppp b / Marlin / src / gcode / temperature / M106_M107 . cpp <nl> <nl> # include " . . / gcode . h " <nl> # include " . . / . . / Marlin . h " / / for fan_speed — should move those to Planner <nl> <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + # include " . . / . . / module / motion . h " <nl> + # include " . . / . . / module / tool_change . h " <nl> + # endif <nl> + <nl> / * * <nl> * M106 : Set Fan Speed <nl> * <nl> <nl> * / <nl> void GcodeSuite : : M106 ( ) { <nl> const uint8_t p = parser . byteval ( ' P ' ) ; <nl> + const uint16_t s = parser . ushortval ( ' S ' , 255 ) ; <nl> + <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + if ( p ! = active_extruder ) { <nl> + if ( p < EXTRUDERS ) singlenozzle_fan_speed [ p ] = MIN ( s , 255U ) ; <nl> + return ; <nl> + } <nl> + # endif <nl> + <nl> if ( p < FAN_COUNT ) { <nl> # if ENABLED ( EXTRA_FAN_SPEED ) <nl> const int16_t t = parser . intval ( ' T ' ) ; <nl> void GcodeSuite : : M106 ( ) { <nl> fan_speed [ p ] = new_fan_speed [ p ] ; <nl> break ; <nl> default : <nl> - new_fan_speed [ p ] = MIN ( t , 255 ) ; <nl> + new_fan_speed [ p ] = MIN ( t , 255U ) ; <nl> break ; <nl> } <nl> return ; <nl> - <nl> } <nl> # endif / / EXTRA_FAN_SPEED <nl> - const uint16_t s = parser . ushortval ( ' S ' , 255 ) ; <nl> fan_speed [ p ] = MIN ( s , 255U ) ; <nl> } <nl> } <nl> void GcodeSuite : : M106 ( ) { <nl> * / <nl> void GcodeSuite : : M107 ( ) { <nl> const uint16_t p = parser . ushortval ( ' P ' ) ; <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + if ( p ! = active_extruder ) { <nl> + if ( p < EXTRUDERS ) singlenozzle_fan_speed [ p ] = 0 ; <nl> + return ; <nl> + } <nl> + # endif <nl> + <nl> if ( p < FAN_COUNT ) fan_speed [ p ] = 0 ; <nl> } <nl> <nl> mmm a / Marlin / src / inc / Conditionals_post . h <nl> ppp b / Marlin / src / inc / Conditionals_post . h <nl> <nl> / / Add commands that need sub - codes to this list <nl> # define USE_GCODE_SUBCODES ENABLED ( G38_PROBE_TARGET ) | | ENABLED ( CNC_COORDINATE_SYSTEMS ) | | ENABLED ( POWER_LOSS_RECOVERY ) <nl> <nl> + / / Single Nozzle swap on toolchange defaults <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + # ifndef SINGLENOZZLE_SWAP_LENGTH <nl> + # define SINGLENOZZLE_SWAP_LENGTH 0 <nl> + # endif <nl> + # ifndef SINGLENOZZLE_SWAP_RETRACT_SPEED <nl> + # define SINGLENOZZLE_SWAP_RETRACT_SPEED 60 . 0 <nl> + # endif <nl> + # ifndef SINGLENOZZLE_SWAP_PRIME_SPEED <nl> + # define SINGLENOZZLE_SWAP_PRIME_SPEED 60 . 0 <nl> + # endif <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # ifndef SINGLENOZZLE_TOOLCHANGE_POSITION <nl> + # define SINGLENOZZLE_TOOLCHANGE_POSITION { ( X_MIN_POS + 10 ) , ( Y_MIN_POS + 10 ) , 5 } <nl> + # endif <nl> + # else <nl> + # ifndef SINGLENOZZLE_TOOLCHANGE_ZRAISE <nl> + # define SINGLENOZZLE_TOOLCHANGE_ZRAISE 2 . 0 <nl> + # endif <nl> + # endif <nl> + # endif <nl> + <nl> / / Parking Extruder <nl> # if ENABLED ( PARKING_EXTRUDER ) <nl> # ifndef PARKING_EXTRUDER_GRAB_DISTANCE <nl> mmm a / Marlin / src / inc / SanityCheck . h <nl> ppp b / Marlin / src / inc / SanityCheck . h <nl> static_assert ( X_MAX_LENGTH > = X_BED_SIZE & & Y_MAX_LENGTH > = Y_BED_SIZE , <nl> # error " EXTRUDERS must be 1 with HEATERS_PARALLEL . " <nl> # endif <nl> <nl> + # if ENABLED ( SINGLENOZZLE ) & & ! defined ( SINGLENOZZLE_SWAP_LENGTH ) <nl> + # define SINGLENOZZLE_SWAP_LENGTH 0 <nl> + # endif <nl> + <nl> # elif ENABLED ( MK2_MULTIPLEXER ) <nl> # error " MK2_MULTIPLEXER requires 2 or more EXTRUDERS . " <nl> # elif ENABLED ( SINGLENOZZLE ) <nl> mmm a / Marlin / src / lcd / language / language_en . h <nl> ppp b / Marlin / src / lcd / language / language_en . h <nl> <nl> # ifndef MSG_AUTORETRACT <nl> # define MSG_AUTORETRACT _UxGT ( " AutoRetr . " ) <nl> # endif <nl> + # ifndef MSG_FILAMENT_SWAP_LENGTH <nl> + # define MSG_FILAMENT_SWAP_LENGTH _UxGT ( " Retract Distance " ) <nl> + # endif <nl> + # ifndef MSG_SINGLENOZZLE_TOOL_CHANGE <nl> + # define MSG_SINGLENOZZLE_TOOL_CHANGE _UxGT ( " Tool Change " ) <nl> + # endif <nl> + # ifndef MSG_SINGLENOZZLE_PRIME_SPD <nl> + # define MSG_SINGLENOZZLE_PRIME_SPD _UxGT ( " Prime Speed " ) <nl> + # endif <nl> + # ifndef MSG_SINGLENOZZLE_RETRACT_SPD <nl> + # define MSG_SINGLENOZZLE_RETRACT_SPD _UxGT ( " Retract Speed " ) <nl> + # endif <nl> # ifndef MSG_FILAMENTCHANGE <nl> # define MSG_FILAMENTCHANGE _UxGT ( " Change filament " ) <nl> # endif <nl> mmm a / Marlin / src / lcd / ultralcd . cpp <nl> ppp b / Marlin / src / lcd / ultralcd . cpp <nl> millis_t next_lcd_update_ms ; <nl> } \ <nl> typedef void _name # # _void <nl> DEFINE_LCD_IMPLEMENTATION_DRAWMENU_SETTING_EDIT_TYPE ( int16_t , int3 , itostr3 ) ; <nl> + DEFINE_LCD_IMPLEMENTATION_DRAWMENU_SETTING_EDIT_TYPE ( int16_t , int4 , itostr4sign ) ; <nl> DEFINE_LCD_IMPLEMENTATION_DRAWMENU_SETTING_EDIT_TYPE ( uint8_t , int8 , i8tostr3 ) ; <nl> DEFINE_LCD_IMPLEMENTATION_DRAWMENU_SETTING_EDIT_TYPE ( float , float3 , ftostr3 ) ; <nl> DEFINE_LCD_IMPLEMENTATION_DRAWMENU_SETTING_EDIT_TYPE ( float , float52 , ftostr52 ) ; <nl> millis_t next_lcd_update_ms ; <nl> typedef void _name # # _void <nl> <nl> DECLARE_MENU_EDIT_TYPE ( int16_t , int3 ) ; <nl> + DECLARE_MENU_EDIT_TYPE ( int16_t , int4 ) ; <nl> DECLARE_MENU_EDIT_TYPE ( uint8_t , int8 ) ; <nl> DECLARE_MENU_EDIT_TYPE ( float , float3 ) ; <nl> DECLARE_MENU_EDIT_TYPE ( float , float52 ) ; <nl> void lcd_quick_feedback ( const bool clear_buttons ) { <nl> <nl> # endif / / POWER_LOSS_RECOVERY <nl> <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + void singlenozzle_swap_menu ( ) { <nl> + START_MENU ( ) ; <nl> + MENU_BACK ( MSG_MAIN ) ; <nl> + MENU_ITEM_EDIT ( float3 , MSG_FILAMENT_SWAP_LENGTH , & singlenozzle_swap_length , 0 , 200 ) ; <nl> + MENU_MULTIPLIER_ITEM_EDIT ( int4 , MSG_SINGLENOZZLE_RETRACT_SPD , & singlenozzle_retract_speed , 10 , 5400 ) ; <nl> + MENU_MULTIPLIER_ITEM_EDIT ( int4 , MSG_SINGLENOZZLE_PRIME_SPD , & singlenozzle_prime_speed , 10 , 5400 ) ; <nl> + END_MENU ( ) ; <nl> + } <nl> + # endif <nl> + <nl> # if ENABLED ( MENU_ITEM_CASE_LIGHT ) <nl> <nl> # include " . . / feature / caselight . h " <nl> <nl> void case_light_menu ( ) { <nl> START_MENU ( ) ; <nl> - / / <nl> - / / ^ Main <nl> - / / <nl> MENU_BACK ( MSG_MAIN ) ; <nl> MENU_ITEM_EDIT_CALLBACK ( int8 , MSG_CASE_LIGHT_BRIGHTNESS , & case_light_brightness , 0 , 255 , update_case_light , true ) ; <nl> MENU_ITEM_EDIT_CALLBACK ( bool , MSG_CASE_LIGHT , ( bool * ) & case_light_on , update_case_light ) ; <nl> void lcd_quick_feedback ( const bool clear_buttons ) { <nl> * / <nl> static void bltouch_menu ( ) { <nl> START_MENU ( ) ; <nl> - / / <nl> - / / ^ Main <nl> - / / <nl> MENU_BACK ( MSG_MAIN ) ; <nl> MENU_ITEM ( gcode , MSG_BLTOUCH_RESET , PSTR ( " M280 P " STRINGIFY ( Z_PROBE_SERVO_NR ) " S " STRINGIFY ( BLTOUCH_RESET ) ) ) ; <nl> MENU_ITEM ( gcode , MSG_BLTOUCH_SELFTEST , PSTR ( " M280 P " STRINGIFY ( Z_PROBE_SERVO_NR ) " S " STRINGIFY ( BLTOUCH_SELFTEST ) ) ) ; <nl> void lcd_quick_feedback ( const bool clear_buttons ) { <nl> void lcd_debug_menu ( ) { <nl> START_MENU ( ) ; <nl> <nl> - MENU_BACK ( MSG_MAIN ) ; / / ^ Main <nl> + MENU_BACK ( MSG_MAIN ) ; <nl> <nl> # if ENABLED ( LCD_PROGRESS_BAR_TEST ) <nl> MENU_ITEM ( submenu , MSG_PROGRESS_BAR_TEST , _progress_bar_test ) ; <nl> void lcd_quick_feedback ( const bool clear_buttons ) { <nl> * / <nl> void lcd_tune_menu ( ) { <nl> START_MENU ( ) ; <nl> - <nl> - / / <nl> - / / ^ Main <nl> - / / <nl> MENU_BACK ( MSG_MAIN ) ; <nl> <nl> / / <nl> void lcd_quick_feedback ( const bool clear_buttons ) { <nl> # endif <nl> } <nl> <nl> + / / <nl> + / / Set single nozzle filament retract and prime length <nl> + / / <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + MENU_ITEM ( submenu , MSG_SINGLENOZZLE_TOOL_CHANGE , singlenozzle_swap_menu ) ; <nl> + # endif <nl> + <nl> / / <nl> / / Set Case light on / off / brightness <nl> / / <nl> void lcd_quick_feedback ( const bool clear_buttons ) { <nl> typedef void _name # # _void <nl> <nl> DEFINE_MENU_EDIT_TYPE ( int16_t , int3 , itostr3 , 1 ) ; <nl> + DEFINE_MENU_EDIT_TYPE ( int16_t , int4 , itostr4sign , 1 ) ; <nl> DEFINE_MENU_EDIT_TYPE ( uint8_t , int8 , i8tostr3 , 1 ) ; <nl> DEFINE_MENU_EDIT_TYPE ( float , float3 , ftostr3 , 1 ) ; <nl> DEFINE_MENU_EDIT_TYPE ( float , float52 , ftostr52 , 100 ) ; <nl> mmm a / Marlin / src / module / configuration_store . cpp <nl> ppp b / Marlin / src / module / configuration_store . cpp <nl> typedef struct { int16_t X , Y , Z ; } tmc <nl> # include " . . / feature / pause . h " <nl> # endif <nl> <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + # include " tool_change . h " <nl> + void M217_report ( const bool eeprom ) ; <nl> + # endif <nl> + <nl> # if ENABLED ( PID_EXTRUSION_SCALING ) <nl> # define LPQ_LEN thermalManager . lpq_len <nl> # endif <nl> typedef struct SettingsDataStruct { <nl> float filament_change_unload_length [ EXTRUDERS ] , / / M603 T U <nl> filament_change_load_length [ EXTRUDERS ] ; / / M603 T L <nl> <nl> + / / <nl> + / / SINGLENOZZLE toolchange values <nl> + / / <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + float singlenozzle_swap_length ; / / M217 S <nl> + int16_t singlenozzle_prime_speed , / / M217 P <nl> + singlenozzle_retract_speed ; / / M217 R <nl> + # endif <nl> + <nl> } SettingsData ; <nl> <nl> # pragma pack ( pop ) <nl> void MarlinSettings : : postprocess ( ) { <nl> for ( uint8_t q = EXTRUDERS * 2 ; q - - ; ) EEPROM_WRITE ( dummy ) ; <nl> # endif <nl> <nl> + / / <nl> + / / SINGLENOZZLE <nl> + / / <nl> + <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + _FIELD_TEST ( singlenozzle_swap_length ) ; <nl> + EEPROM_WRITE ( singlenozzle_swap_length ) ; <nl> + EEPROM_WRITE ( singlenozzle_prime_speed ) ; <nl> + EEPROM_WRITE ( singlenozzle_retract_speed ) ; <nl> + # endif <nl> + <nl> / / <nl> / / Validate CRC and Data Size <nl> / / <nl> void MarlinSettings : : postprocess ( ) { <nl> for ( uint8_t q = EXTRUDERS * 2 ; q - - ; ) EEPROM_READ ( dummy ) ; <nl> # endif <nl> <nl> + / / <nl> + / / SINGLENOZZLE toolchange values <nl> + / / <nl> + <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + _FIELD_TEST ( singlenozzle_swap_length ) ; <nl> + EEPROM_READ ( singlenozzle_swap_length ) ; <nl> + EEPROM_READ ( singlenozzle_prime_speed ) ; <nl> + EEPROM_READ ( singlenozzle_retract_speed ) ; <nl> + # endif <nl> + <nl> eeprom_error = size_error ( eeprom_index - ( EEPROM_OFFSET ) ) ; <nl> if ( eeprom_error ) { <nl> # if ENABLED ( EEPROM_CHITCHAT ) <nl> void MarlinSettings : : reset ( PORTARG_SOLO ) { <nl> # endif <nl> # endif <nl> <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + singlenozzle_swap_length = SINGLENOZZLE_SWAP_LENGTH ; <nl> + singlenozzle_prime_speed = SINGLENOZZLE_SWAP_PRIME_SPEED ; <nl> + singlenozzle_retract_speed = SINGLENOZZLE_SWAP_RETRACT_SPEED ; <nl> + # endif <nl> + <nl> / / <nl> / / Global Leveling <nl> / / <nl> void MarlinSettings : : reset ( PORTARG_SOLO ) { <nl> # endif / / EXTRUDERS > 2 <nl> # endif / / EXTRUDERS = = 1 <nl> # endif / / ADVANCED_PAUSE_FEATURE <nl> + <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + CONFIG_ECHO_START ; <nl> + if ( ! forReplay ) { <nl> + SERIAL_ECHOLNPGM_P ( port , " SINGLENOZZLE : " ) ; <nl> + CONFIG_ECHO_START ; <nl> + } <nl> + M217_report ( true ) ; <nl> + # endif <nl> } <nl> <nl> # endif / / ! DISABLE_M503 <nl> mmm a / Marlin / src / module / temperature . cpp <nl> ppp b / Marlin / src / module / temperature . cpp <nl> void Temperature : : isr ( ) { <nl> <nl> # endif / / AUTO_REPORT_TEMPERATURES <nl> <nl> + # if ENABLED ( ULTRA_LCD ) <nl> + void Temperature : : set_heating_message ( const uint8_t e ) { <nl> + const bool heating = isHeatingHotend ( e ) ; <nl> + # if HOTENDS > 1 <nl> + lcd_status_printf_P ( 0 , heating ? PSTR ( " E % i " MSG_HEATING ) : PSTR ( " E % i " MSG_COOLING ) , int ( e + 1 ) ) ; <nl> + # else <nl> + lcd_setstatusPGM ( heating ? PSTR ( " E " MSG_HEATING ) : PSTR ( " E " MSG_COOLING ) ) ; <nl> + # endif <nl> + } <nl> + # endif <nl> + <nl> # if HAS_TEMP_HOTEND <nl> <nl> # ifndef MIN_COOLING_SLOPE_DEG <nl> mmm a / Marlin / src / module / temperature . h <nl> ppp b / Marlin / src / module / temperature . h <nl> class Temperature { <nl> # endif <nl> # endif <nl> <nl> + # if ENABLED ( ULTRA_LCD ) <nl> + static void set_heating_message ( const uint8_t e ) ; <nl> + # endif <nl> + <nl> private : <nl> <nl> # if ENABLED ( FAST_PWM_FAN ) <nl> class Temperature { <nl> # endif <nl> <nl> # endif / / THERMAL_PROTECTION <nl> - <nl> } ; <nl> <nl> extern Temperature thermalManager ; <nl> mmm a / Marlin / src / module / tool_change . cpp <nl> ppp b / Marlin / src / module / tool_change . cpp <nl> <nl> # include " probe . h " <nl> # include " motion . h " <nl> # include " planner . h " <nl> + # include " temperature . h " <nl> <nl> # include " . . / Marlin . h " <nl> <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + float singlenozzle_swap_length = SINGLENOZZLE_SWAP_LENGTH ; <nl> + int16_t singlenozzle_prime_speed = SINGLENOZZLE_SWAP_PRIME_SPEED , <nl> + singlenozzle_retract_speed = SINGLENOZZLE_SWAP_RETRACT_SPEED ; <nl> + uint16_t singlenozzle_temp [ EXTRUDERS ] ; <nl> + # if FAN_COUNT > 0 <nl> + uint8_t singlenozzle_fan_speed [ EXTRUDERS ] ; <nl> + # endif <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + # include " . . / libs / point_t . h " <nl> + const point_t singlenozzle_change_point = SINGLENOZZLE_TOOLCHANGE_POSITION ; <nl> + # endif <nl> + # endif <nl> + <nl> # if ENABLED ( PARKING_EXTRUDER ) & & PARKING_EXTRUDER_SOLENOIDS_DELAY > 0 <nl> # include " . . / gcode / gcode . h " / / for dwell ( ) <nl> # endif <nl> void tool_change ( const uint8_t tmp_extruder , const float fr_mm_s / * = 0 . 0 * / , bool n <nl> UNUSED ( no_move ) ; <nl> <nl> # if ENABLED ( MK2_MULTIPLEXER ) <nl> - if ( tmp_extruder > = E_STEPPERS ) <nl> - return invalid_extruder_error ( tmp_extruder ) ; <nl> - <nl> + if ( tmp_extruder > = E_STEPPERS ) return invalid_extruder_error ( tmp_extruder ) ; <nl> select_multiplexed_stepper ( tmp_extruder ) ; <nl> # endif <nl> <nl> # if EXTRUDERS > 1 <nl> - / / Set the new active extruder <nl> - active_extruder = tmp_extruder ; <nl> - # endif <nl> + <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + <nl> + # if ENABLED ( PREVENT_COLD_EXTRUSION ) <nl> + if ( ! DEBUGGING ( DRYRUN ) & & thermalManager . targetTooColdToExtrude ( active_extruder ) ) { <nl> + SERIAL_ERROR_START ( ) ; <nl> + SERIAL_ERRORLNPGM ( MSG_HOTEND_TOO_COLD ) ; <nl> + return ; <nl> + } <nl> + # endif <nl> + <nl> + # if FAN_COUNT > 0 <nl> + singlenozzle_fan_speed [ active_extruder ] = fan_speed [ 0 ] ; <nl> + fan_speed [ 0 ] = singlenozzle_fan_speed [ tmp_extruder ] ; <nl> + # endif <nl> + <nl> + set_destination_from_current ( ) ; <nl> + <nl> + current_position [ Z_AXIS ] + = ( <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + singlenozzle_change_point . z <nl> + # else <nl> + SINGLENOZZLE_TOOLCHANGE_ZRAISE <nl> + # endif <nl> + ) ; <nl> + <nl> + planner . buffer_line ( current_position , planner . max_feedrate_mm_s [ Z_AXIS ] , active_extruder ) ; <nl> + <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + current_position [ X_AXIS ] = singlenozzle_change_point . x ; <nl> + current_position [ Y_AXIS ] = singlenozzle_change_point . y ; <nl> + planner . buffer_line ( current_position , planner . max_feedrate_mm_s [ Y_AXIS ] , active_extruder ) ; <nl> + # endif <nl> + <nl> + if ( singlenozzle_swap_length ) { <nl> + # if ENABLED ( ADVANCED_PAUSE_FEATURE ) <nl> + do_pause_e_move ( - singlenozzle_swap_length , MMM_TO_MMS ( singlenozzle_retract_speed ) ) ; <nl> + # else <nl> + current_position [ E_AXIS ] - = singlenozzle_swap_length / planner . e_factor [ active_extruder ] ; <nl> + planner . buffer_line ( current_position , MMM_TO_MMS ( singlenozzle_retract_speed ) , active_extruder ) ; <nl> + # endif <nl> + } <nl> + <nl> + singlenozzle_temp [ active_extruder ] = thermalManager . target_temperature [ 0 ] ; <nl> + if ( singlenozzle_temp [ tmp_extruder ] & & singlenozzle_temp [ tmp_extruder ] ! = singlenozzle_temp [ active_extruder ] ) { <nl> + thermalManager . setTargetHotend ( singlenozzle_temp [ tmp_extruder ] , 0 ) ; <nl> + # if ENABLED ( ULTRA_LCD ) <nl> + thermalManager . set_heating_message ( 0 ) ; <nl> + # endif <nl> + ( void ) thermalManager . wait_for_hotend ( 0 , false ) ; / / Wait for heating or cooling <nl> + } <nl> + <nl> + active_extruder = tmp_extruder ; <nl> + <nl> + if ( singlenozzle_swap_length ) { <nl> + # if ENABLED ( ADVANCED_PAUSE_FEATURE ) <nl> + do_pause_e_move ( singlenozzle_swap_length , singlenozzle_prime_speed ) ; <nl> + # else <nl> + current_position [ E_AXIS ] + = singlenozzle_swap_length / planner . e_factor [ tmp_extruder ] ; <nl> + planner . buffer_line ( current_position , singlenozzle_prime_speed , tmp_extruder ) ; <nl> + # endif <nl> + } <nl> + <nl> + # if ENABLED ( SINGLENOZZLE_SWAP_PARK ) <nl> + current_position [ X_AXIS ] = destination [ X_AXIS ] ; <nl> + current_position [ Y_AXIS ] = destination [ Y_AXIS ] ; <nl> + planner . buffer_line ( current_position , planner . max_feedrate_mm_s [ Y_AXIS ] , active_extruder ) ; <nl> + # endif <nl> + <nl> + do_blocking_move_to ( destination [ X_AXIS ] , destination [ Y_AXIS ] , destination [ Z_AXIS ] ) ; <nl> + <nl> + # else / / ! SINGLENOZZLE <nl> + <nl> + active_extruder = tmp_extruder ; <nl> + <nl> + # endif / / ! SINGLENOZZLE <nl> + <nl> + # endif / / EXTRUDERS > 1 <nl> <nl> # endif / / HOTENDS < = 1 <nl> <nl> mmm a / Marlin / src / module / tool_change . h <nl> ppp b / Marlin / src / module / tool_change . h <nl> <nl> <nl> # endif / / PARKING_EXTRUDER <nl> <nl> + # if ENABLED ( SINGLENOZZLE ) <nl> + extern float singlenozzle_swap_length ; <nl> + extern int16_t singlenozzle_prime_speed , <nl> + singlenozzle_retract_speed ; <nl> + extern uint16_t singlenozzle_temp [ EXTRUDERS ] ; <nl> + # if FAN_COUNT > 0 <nl> + extern uint8_t singlenozzle_fan_speed [ EXTRUDERS ] ; <nl> + # endif <nl> + # endif <nl> + <nl> / * * <nl> * Perform a tool - change , which may result in moving the <nl> * previous tool out of the way and the new tool into place . <nl> mmm a / buildroot / share / tests / STM32F1_tests <nl> ppp b / buildroot / share / tests / STM32F1_tests <nl> set - e <nl> <nl> restore_configs <nl> opt_set MOTHERBOARD BOARD_STM32F1R <nl> + opt_set EXTRUDERS 2 <nl> opt_enable EEPROM_SETTINGS EEPROM_CHITCHAT REPRAP_DISCOUNT_SMART_CONTROLLER SDSUPPORT \ <nl> - PAREN_COMMENTS GCODE_MOTION_MODES <nl> + PAREN_COMMENTS GCODE_MOTION_MODES SINGLENOZZLE SINGLENOZZLE_SWAP_LENGTH <nl> exec_test $ 1 $ 2 " STM32F1R EEPROM_SETTINGS EEPROM_CHITCHAT REPRAP_DISCOUNT_SMART_CONTROLLER SDSUPPORT PAREN_COMMENTS GCODE_MOTION_MODES " <nl> <nl> opt_enable SPINDLE_LASER_ENABLE <nl>
Single nozzle filament change ( )
MarlinFirmware/Marlin
74cd6cb4fc90e4c483e7a97a313c0257790f1375
2018-10-07T22:06:14Z
mmm a / tests / performance / decimal_casts . xml <nl> ppp b / tests / performance / decimal_casts . xml <nl> <nl> - < test max_ignored_relative_change = " 0 . 4 " > <nl> + < test > <nl> < settings > <nl> < max_memory_usage > 10G < / max_memory_usage > <nl> < / settings > <nl>
better test
ClickHouse/ClickHouse
7e984f877b2d7e2d031dd26c8bd491daae55e8f5
2020-08-26T16:21:26Z
mmm a / dlib / svm / structural_svm_distributed . h <nl> ppp b / dlib / svm / structural_svm_distributed . h <nl> <nl> # include " structural_svm_problem . h " <nl> # include " . . / bridge . h " <nl> # include " . . / smart_pointers . h " <nl> + # include " . . / misc_api . h " <nl> + # include " . . / statistics . h " <nl> <nl> <nl> # include " . . / threads . h " <nl> namespace dlib <nl> tsu_in msg ; <nl> tsu_out temp ; <nl> <nl> + timestamper ts ; <nl> + running_stats < double > with_buffer_time ; <nl> + running_stats < double > without_buffer_time ; <nl> + unsigned long num_iterations_executed = 0 ; <nl> + <nl> while ( in . dequeue ( msg ) ) <nl> { <nl> / / initialize the cache and compute psi_true . <nl> namespace dlib <nl> } <nl> else if ( msg . template contains < oracle_request < matrix_type > > ( ) ) <nl> { <nl> + + + num_iterations_executed ; <nl> + <nl> const oracle_request < matrix_type > & req = msg . template get < oracle_request < matrix_type > > ( ) ; <nl> <nl> oracle_response < matrix_type > & data = temp . template get < oracle_response < matrix_type > > ( ) ; <nl> namespace dlib <nl> <nl> data . num = problem . get_num_samples ( ) ; <nl> <nl> - / / how many samples to process in a single task ( aim for 100 jobs per thread ) <nl> - const long block_size = std : : max < long > ( 1 , data . num / ( 1 + tp . num_threads_in_pool ( ) * 100 ) ) ; <nl> + / / how many samples to process in a single task ( aim for 4 jobs per worker ) <nl> + const long num_workers = std : : max ( 1UL , tp . num_threads_in_pool ( ) ) ; <nl> + const long block_size = std : : max ( 1L , data . num / ( num_workers * 4 ) ) ; <nl> + <nl> + const uint64 start_time = ts . get_timestamp ( ) ; <nl> + <nl> + / / pick fastest buffering strategy <nl> + bool buffer_subgradients_locally = with_buffer_time . mean ( ) < without_buffer_time . mean ( ) ; <nl> + <nl> + / / every 50 iterations we should try to flip the buffering scheme to see if <nl> + / / doing it the other way might be better . <nl> + if ( ( num_iterations_executed % 50 ) = = 0 ) <nl> + { <nl> + buffer_subgradients_locally = ! buffer_subgradients_locally ; <nl> + } <nl> <nl> - binder b ( * this , req , data ) ; <nl> + binder b ( * this , req , data , buffer_subgradients_locally ) ; <nl> for ( long i = 0 ; i < data . num ; i + = block_size ) <nl> { <nl> tp . add_task ( b , & binder : : call_oracle , i , std : : min ( i + block_size , data . num ) ) ; <nl> } <nl> tp . wait_for_all_tasks ( ) ; <nl> <nl> + const uint64 stop_time = ts . get_timestamp ( ) ; <nl> + if ( buffer_subgradients_locally ) <nl> + with_buffer_time . add ( stop_time - start_time ) ; <nl> + else <nl> + without_buffer_time . add ( stop_time - start_time ) ; <nl> + <nl> out . enqueue ( temp ) ; <nl> } <nl> } <nl> namespace dlib <nl> binder ( <nl> const node_type & self_ , <nl> const impl : : oracle_request < matrix_type > & req_ , <nl> - impl : : oracle_response < matrix_type > & data_ <nl> - ) : self ( self_ ) , req ( req_ ) , data ( data_ ) { } <nl> + impl : : oracle_response < matrix_type > & data_ , <nl> + bool buffer_subgradients_locally_ <nl> + ) : self ( self_ ) , req ( req_ ) , data ( data_ ) , <nl> + buffer_subgradients_locally ( buffer_subgradients_locally_ ) { } <nl> <nl> void call_oracle ( <nl> long begin , <nl> long end <nl> ) <nl> { <nl> - / / If we are only going to call the separation oracle once then <nl> - / / don ' t run the slightly more complex for loop version of this code . <nl> - if ( end - begin < = 1 ) <nl> + / / If we are only going to call the separation oracle once then don ' t <nl> + / / run the slightly more complex for loop version of this code . Or if <nl> + / / we just don ' t want to run the complex buffering one . The code later <nl> + / / on decides if we should do the buffering based on how long it takes <nl> + / / to execute . We do this because , when the subgradient is really high <nl> + / / dimensional it can take a lot of time to add them together . So we <nl> + / / might want to avoid doing that . <nl> + if ( end - begin < = 1 | | ! buffer_subgradients_locally ) <nl> { <nl> scalar_type loss ; <nl> feature_vector_type ftemp ; <nl> - self . cache [ begin ] . separation_oracle_cached ( req . skip_cache , <nl> + for ( long i = begin ; i < end ; + + i ) <nl> + { <nl> + self . cache [ i ] . separation_oracle_cached ( req . skip_cache , <nl> req . cur_risk_lower_bound , <nl> req . current_solution , <nl> loss , <nl> ftemp ) ; <nl> <nl> - auto_mutex lock ( self . accum_mutex ) ; <nl> - data . loss + = loss ; <nl> - add_to ( data . subgradient , ftemp ) ; <nl> + auto_mutex lock ( self . accum_mutex ) ; <nl> + data . loss + = loss ; <nl> + add_to ( data . subgradient , ftemp ) ; <nl> + } <nl> } <nl> else <nl> { <nl> namespace dlib <nl> const node_type & self ; <nl> const impl : : oracle_request < matrix_type > & req ; <nl> impl : : oracle_response < matrix_type > & data ; <nl> + bool buffer_subgradients_locally ; <nl> } ; <nl> <nl> <nl>
Made the distributed structural svm tools use the same improved job / buffering
davisking/dlib
6e06b0bdf503578418d3fa8197c7262335c656c5
2013-01-26T22:49:43Z
mmm a / include / swift / SILOptimizer / Utils / LoadStoreOptUtils . h <nl> ppp b / include / swift / SILOptimizer / Utils / LoadStoreOptUtils . h <nl> class LSBase { <nl> } <nl> <nl> / / / Print the LSBase . <nl> - virtual void print ( llvm : : raw_ostream & os , SILModule * Mod , <nl> - TypeExpansionContext context ) { <nl> + virtual void print ( llvm : : raw_ostream & os ) { <nl> os < < Base ; <nl> - Path . getValue ( ) . print ( os , * Mod , context ) ; <nl> + SILFunction * F = Base - > getFunction ( ) ; <nl> + if ( F ) { <nl> + Path . getValue ( ) . print ( os , F - > getModule ( ) , TypeExpansionContext ( * F ) ) ; <nl> + } <nl> } <nl> <nl> - virtual void dump ( SILModule * Mod , TypeExpansionContext context ) { <nl> - print ( llvm : : dbgs ( ) , Mod , context ) ; <nl> + virtual void dump ( ) { <nl> + print ( llvm : : dbgs ( ) ) ; <nl> } <nl> } ; <nl> <nl> class LSValue : public LSBase { <nl> return Path . getValue ( ) . createExtract ( Base , Inst , true ) ; <nl> } <nl> <nl> - void print ( llvm : : raw_ostream & os , SILModule * Mod , <nl> - TypeExpansionContext context ) { <nl> + void print ( llvm : : raw_ostream & os ) { <nl> if ( CoveringValue ) { <nl> os < < " Covering Value " ; <nl> return ; <nl> } <nl> - LSBase : : print ( os , Mod , context ) ; <nl> + LSBase : : print ( os ) ; <nl> } <nl> <nl> / / / Expand this SILValue to all individual fields it contains . <nl> mmm a / lib / SILOptimizer / Transforms / RedundantLoadElimination . cpp <nl> ppp b / lib / SILOptimizer / Transforms / RedundantLoadElimination . cpp <nl> bool RLEContext : : run ( ) { <nl> <nl> LLVM_DEBUG ( for ( unsigned i = 0 ; i < LocationVault . size ( ) ; + + i ) { <nl> llvm : : dbgs ( ) < < " LSLocation # " < < i ; <nl> - getLocation ( i ) . print ( llvm : : dbgs ( ) , & Fn - > getModule ( ) , <nl> - TypeExpansionContext ( * Fn ) ) ; <nl> + getLocation ( i ) . print ( llvm : : dbgs ( ) ) ; <nl> } ) ; <nl> <nl> if ( Optimistic ) <nl> mmm a / lib / SILOptimizer / UtilityPasses / LSLocationPrinter . cpp <nl> ppp b / lib / SILOptimizer / UtilityPasses / LSLocationPrinter . cpp <nl> class LSLocationPrinter : public SILModuleTransform { <nl> <nl> llvm : : outs ( ) < < " # " < < Counter + + < < II ; <nl> for ( auto & Loc : Locs ) { <nl> - Loc . print ( llvm : : outs ( ) , & Fn . getModule ( ) , TypeExpansionContext ( Fn ) ) ; <nl> + Loc . print ( llvm : : outs ( ) ) ; <nl> } <nl> Locs . clear ( ) ; <nl> } <nl> class LSLocationPrinter : public SILModuleTransform { <nl> LSLocation : : reduce ( L , & Fn . getModule ( ) , TypeExpansionContext ( Fn ) , SLocs ) ; <nl> llvm : : outs ( ) < < " # " < < Counter + + < < II ; <nl> for ( auto & Loc : SLocs ) { <nl> - Loc . print ( llvm : : outs ( ) , & Fn . getModule ( ) , TypeExpansionContext ( Fn ) ) ; <nl> + Loc . print ( llvm : : outs ( ) ) ; <nl> } <nl> L . reset ( ) ; <nl> Locs . clear ( ) ; <nl>
SILOptimizer : Simplify the LSBase : : print and dump functions .
apple/swift
9958b479fb06c7d8b0e546abcb284cfd9b6a419f
2020-05-14T12:38:44Z
mmm a / csharp / src / Google . Protobuf / WritingPrimitives . cs <nl> ppp b / csharp / src / Google . Protobuf / WritingPrimitives . cs <nl> public static void WriteRawLittleEndian64 ( ref Span < byte > buffer , ref WriterInter <nl> } <nl> } <nl> <nl> - [ MethodImpl ( MethodImplOptions . AggressiveInlining ) ] <nl> + / / This method is intentionally not marked as " AggressiveInlining " , because it slows down <nl> + / / serialization of messages with lots of empty fields . Likely explanation is that <nl> + / / thw WriteRawTag invocations in InternalWriteTo method get inlined too deep and that makes <nl> + / / skipping fields which are not present more expensive ( which is especially constly for <nl> + / / messages with lots of fields of which only a few are present ) . <nl> public static void WriteRawByte ( ref Span < byte > buffer , ref WriterInternalState state , byte value ) <nl> { <nl> if ( state . position = = state . limit ) <nl>
Speed up writing of messages with many fields
protocolbuffers/protobuf
468c3ba8c6b99882bb4bbd1a8d2af72693888fac
2020-06-12T09:34:42Z
mmm a / lib / AST / Expr . cpp <nl> ppp b / lib / AST / Expr . cpp <nl> Expr * Expr : : getSemanticsProvidingExpr ( ) { <nl> Expr * Expr : : getValueProvidingExpr ( ) { <nl> Expr * E = getSemanticsProvidingExpr ( ) ; <nl> <nl> - if ( auto TE = dyn_cast < ForceTryExpr > ( this ) ) <nl> + if ( auto TE = dyn_cast < ForceTryExpr > ( E ) ) <nl> return TE - > getSubExpr ( ) - > getValueProvidingExpr ( ) ; <nl> <nl> / / TODO : <nl>
[ Sema ] Fix implementation of getValueProvidingExpr ( )
apple/swift
8f2790633f7b69eec62bcd52fa550537ddab01ed
2018-03-07T19:52:05Z
mmm a / BUILD <nl> ppp b / BUILD <nl> config_setting ( <nl> <nl> cc_library ( <nl> name = " port " , <nl> + srcs = [ <nl> + " upb / port . c " , <nl> + ] , <nl> textual_hdrs = [ <nl> " upb / port_def . inc " , <nl> " upb / port_undef . inc " , <nl> ] , <nl> - srcs = [ <nl> - " upb / port . c " , <nl> - ] , <nl> ) <nl> <nl> cc_library ( <nl> cc_library ( <nl> ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : COPTS <nl> + " / / conditions : default " : COPTS , <nl> } ) , <nl> visibility = [ " / / visibility : public " ] , <nl> deps = [ " : port " ] , <nl> cc_library ( <nl> ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : COPTS <nl> + " / / conditions : default " : COPTS , <nl> } ) , <nl> visibility = [ " / / visibility : public " ] , <nl> deps = [ <nl> + " : table " , <nl> " : upb " , <nl> ] , <nl> ) <nl> cc_library ( <nl> name = " reflection " , <nl> srcs = [ <nl> " upb / def . c " , <nl> + " upb / msg . h " , <nl> " upb / reflection . c " , <nl> ] , <nl> hdrs = [ <nl> cc_library ( <nl> ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : COPTS <nl> + " / / conditions : default " : COPTS , <nl> } ) , <nl> visibility = [ " / / visibility : public " ] , <nl> deps = [ <nl> cc_library ( <nl> ] , <nl> visibility = [ " / / visibility : public " ] , <nl> deps = [ <nl> + " : port " , <nl> + " : reflection " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " json " , <nl> + srcs = [ <nl> + " upb / json_encode . c " , <nl> + ] , <nl> + hdrs = [ <nl> + " upb / json_encode . h " , <nl> + ] , <nl> + deps = [ <nl> + " : port " , <nl> " : reflection " , <nl> + " : upb " , <nl> ] , <nl> ) <nl> <nl> cc_library ( <nl> ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : COPTS <nl> + " / / conditions : default " : COPTS , <nl> } ) , <nl> deps = [ <nl> - " : reflection " , <nl> " : port " , <nl> + " : reflection " , <nl> " : table " , <nl> " : upb " , <nl> ] , <nl> cc_library ( <nl> ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : COPTS <nl> + " / / conditions : default " : COPTS , <nl> } ) , <nl> deps = [ <nl> " : descriptor_upbproto " , <nl> " : handlers " , <nl> - " : reflection " , <nl> " : port " , <nl> + " : reflection " , <nl> " : table " , <nl> " : upb " , <nl> ] , <nl> cc_library ( <nl> ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : COPTS <nl> + " / / conditions : default " : COPTS , <nl> } ) , <nl> deps = [ <nl> " : upb " , <nl> cc_library ( <nl> hdrs = [ " upbc / generator . h " ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : CPPOPTS <nl> + " / / conditions : default " : CPPOPTS , <nl> } ) , <nl> deps = [ <nl> " @ com_google_absl / / absl / base : core_headers " , <nl> cc_binary ( <nl> srcs = [ " upbc / main . cc " ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : CPPOPTS <nl> + " / / conditions : default " : CPPOPTS , <nl> } ) , <nl> visibility = [ " / / visibility : public " ] , <nl> deps = [ <nl> cc_library ( <nl> ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : CPPOPTS <nl> + " / / conditions : default " : CPPOPTS , <nl> } ) , <nl> deps = [ <nl> " : handlers " , <nl> cc_test ( <nl> ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : COPTS <nl> + " / / conditions : default " : COPTS , <nl> } ) , <nl> deps = [ <nl> " : port " , <nl> cc_test ( <nl> srcs = [ " tests / test_generated_code . c " ] , <nl> deps = [ <nl> " : test_messages_proto3_proto_upb " , <nl> + " : empty_upbdefs_proto " , <nl> " : test_upbproto " , <nl> " : upb_test " , <nl> ] , <nl> ) <nl> <nl> + proto_library ( <nl> + name = " empty_proto " , <nl> + srcs = [ " tests / empty . proto " ] , <nl> + ) <nl> + <nl> upb_proto_reflection_library ( <nl> + name = " empty_upbdefs_proto " , <nl> + testonly = 1 , <nl> + deps = [ " : empty_proto " ] , <nl> + ) <nl> + <nl> + upb_proto_library ( <nl> name = " test_messages_proto3_proto_upb " , <nl> testonly = 1 , <nl> deps = [ " @ com_google_protobuf / / : test_messages_proto3_proto " ] , <nl> cc_test ( <nl> ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : CPPOPTS <nl> + " / / conditions : default " : CPPOPTS , <nl> } ) , <nl> deps = [ <nl> " : handlers " , <nl> cc_test ( <nl> srcs = [ " tests / test_cpp . cc " ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : CPPOPTS <nl> + " / / conditions : default " : CPPOPTS , <nl> } ) , <nl> deps = [ <nl> " : handlers " , <nl> cc_test ( <nl> srcs = [ " tests / test_table . cc " ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : CPPOPTS <nl> + " / / conditions : default " : CPPOPTS , <nl> } ) , <nl> deps = [ <nl> " : port " , <nl> cc_binary ( <nl> srcs = [ " tests / file_descriptor_parsenew_fuzzer . cc " ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : CPPOPTS <nl> + " / / conditions : default " : CPPOPTS , <nl> } ) + select ( { <nl> " / / conditions : default " : [ ] , <nl> " : fuzz " : [ " - fsanitize = fuzzer , address " ] , <nl> cc_test ( <nl> srcs = [ " tests / pb / test_encoder . cc " ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : CPPOPTS <nl> + " / / conditions : default " : CPPOPTS , <nl> } ) , <nl> deps = [ <nl> " : descriptor_upbproto " , <nl> cc_test ( <nl> ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : CPPOPTS <nl> + " / / conditions : default " : CPPOPTS , <nl> } ) , <nl> deps = [ <nl> " : test_json_upbproto " , <nl> cc_binary ( <nl> ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : COPTS <nl> + " / / conditions : default " : COPTS , <nl> } ) + [ " - Ibazel - out / k8 - fastbuild / bin " ] , <nl> deps = [ <nl> " : conformance_proto_upb " , <nl> " : conformance_proto_upbdefs " , <nl> + " : json " , <nl> + " : reflection " , <nl> " : test_messages_proto2_upbdefs " , <nl> " : test_messages_proto3_upbdefs " , <nl> - " : reflection " , <nl> " : textformat " , <nl> " : upb " , <nl> ] , <nl> cc_library ( <nl> hdrs = [ " upb . h " ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : COPTS <nl> + " / / conditions : default " : COPTS , <nl> } ) , <nl> ) <nl> <nl> cc_library ( <nl> <nl> cc_test ( <nl> name = " test_lua " , <nl> - linkstatic = 1 , <nl> srcs = [ " tests / bindings / lua / main . c " ] , <nl> data = [ <nl> - " @ com_google_protobuf / / : conformance_proto " , <nl> - " @ com_google_protobuf / / : descriptor_proto " , <nl> - " : descriptor_proto_lua " , <nl> - " : test_messages_proto3_proto_lua " , <nl> - " : test_proto_lua " , <nl> " tests / bindings / lua / test_upb . lua " , <nl> " third_party / lunit / console . lua " , <nl> " third_party / lunit / lunit . lua " , <nl> " upb / bindings / lua / upb . lua " , <nl> + " : descriptor_proto_lua " , <nl> + " : test_messages_proto3_proto_lua " , <nl> + " : test_proto_lua " , <nl> + " @ com_google_protobuf / / : conformance_proto " , <nl> + " @ com_google_protobuf / / : descriptor_proto " , <nl> ] , <nl> + linkstatic = 1 , <nl> deps = [ <nl> " : lupb " , <nl> " @ lua / / : liblua " , <nl> - ] <nl> + ] , <nl> ) <nl> <nl> cc_binary ( <nl> cc_binary ( <nl> srcs = [ " upb / bindings / lua / upbc . cc " ] , <nl> copts = select ( { <nl> " : windows " : [ ] , <nl> - " / / conditions : default " : CPPOPTS <nl> + " / / conditions : default " : CPPOPTS , <nl> } ) , <nl> visibility = [ " / / visibility : public " ] , <nl> deps = [ <nl> " @ com_google_absl / / absl / strings " , <nl> - " @ com_google_protobuf / / : protoc_lib " <nl> + " @ com_google_protobuf / / : protoc_lib " , <nl> ] , <nl> ) <nl> <nl> mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> target_link_libraries ( upb <nl> port ) <nl> add_library ( generated_code_support__only_for_generated_code_do_not_use__i_give_permission_to_break_me INTERFACE ) <nl> target_link_libraries ( generated_code_support__only_for_generated_code_do_not_use__i_give_permission_to_break_me INTERFACE <nl> + table <nl> upb ) <nl> add_library ( reflection <nl> upb / def . c <nl> + upb / msg . h <nl> upb / reflection . c <nl> upb / def . h <nl> upb / reflection . h ) <nl> add_library ( textformat <nl> upb / text_encode . c <nl> upb / text_encode . h ) <nl> target_link_libraries ( textformat <nl> + port <nl> reflection ) <nl> + add_library ( json <nl> + upb / json_encode . c <nl> + upb / json_encode . h ) <nl> + target_link_libraries ( json <nl> + port <nl> + reflection <nl> + upb ) <nl> add_library ( table INTERFACE ) <nl> target_link_libraries ( table INTERFACE <nl> port <nl> add_library ( handlers <nl> upb / handlers . h <nl> upb / sink . h ) <nl> target_link_libraries ( handlers <nl> - reflection <nl> port <nl> + reflection <nl> table <nl> upb ) <nl> add_library ( upb_pb <nl> add_library ( upb_pb <nl> target_link_libraries ( upb_pb <nl> descriptor_upbproto <nl> handlers <nl> - reflection <nl> port <nl> + reflection <nl> table <nl> upb ) <nl> add_library ( upb_json <nl> mmm a / tests / conformance_upb . c <nl> ppp b / tests / conformance_upb . c <nl> <nl> # include " upb / decode . h " <nl> # include " upb / encode . h " <nl> # include " upb / reflection . h " <nl> + # include " upb / json_encode . h " <nl> # include " upb / text_encode . h " <nl> <nl> int test_count = 0 ; <nl> void serialize_text ( const upb_msg * msg , const upb_msgdef * m , const ctx * c ) { <nl> c - > response , upb_strview_make ( data , len ) ) ; <nl> } <nl> <nl> + void serialize_json ( const upb_msg * msg , const upb_msgdef * m , const ctx * c ) { <nl> + size_t len ; <nl> + size_t len2 ; <nl> + int opts = 0 ; <nl> + char * data ; <nl> + upb_status status ; <nl> + <nl> + upb_status_clear ( & status ) ; <nl> + if ( ! conformance_ConformanceRequest_print_unknown_fields ( c - > request ) ) { <nl> + opts | = UPB_TXTENC_SKIPUNKNOWN ; <nl> + } <nl> + <nl> + len = upb_json_encode ( msg , m , c - > symtab , opts , NULL , 0 , & status ) ; <nl> + <nl> + if ( len = = - 1 ) { <nl> + static const char msg [ ] = " Error serializing . " ; <nl> + conformance_ConformanceResponse_set_serialize_error ( <nl> + c - > response , upb_strview_make ( msg , strlen ( msg ) ) ) ; <nl> + return ; <nl> + } <nl> + <nl> + data = upb_arena_malloc ( c - > arena , len + 1 ) ; <nl> + len2 = upb_json_encode ( msg , m , c - > symtab , opts , data , len + 1 , & status ) ; <nl> + assert ( len = = len2 ) ; <nl> + conformance_ConformanceResponse_set_json_payload ( <nl> + c - > response , upb_strview_make ( data , len ) ) ; <nl> + } <nl> + <nl> bool parse_input ( upb_msg * msg , const upb_msgdef * m , const ctx * c ) { <nl> switch ( conformance_ConformanceRequest_payload_case ( c - > request ) ) { <nl> case conformance_ConformanceRequest_payload_protobuf_payload : <nl> void write_output ( const upb_msg * msg , const upb_msgdef * m , const ctx * c ) { <nl> case conformance_TEXT_FORMAT : <nl> serialize_text ( msg , m , c ) ; <nl> break ; <nl> + case conformance_JSON : <nl> + serialize_json ( msg , m , c ) ; <nl> + break ; <nl> default : { <nl> static const char msg [ ] = " Unsupported output format . " ; <nl> conformance_ConformanceResponse_set_skipped ( <nl> new file mode 100644 <nl> index 00000000000 . . fb45796635b <nl> mmm / dev / null <nl> ppp b / tests / empty . proto <nl> <nl> + syntax = " proto2 " ; <nl> + <nl> new file mode 100644 <nl> index 00000000000 . . d8adf1b722b <nl> mmm / dev / null <nl> ppp b / upb / json_encode . c <nl> <nl> + <nl> + # include " upb / json_encode . h " <nl> + <nl> + # include < ctype . h > <nl> + # include < float . h > <nl> + # include < inttypes . h > <nl> + # include < stdarg . h > <nl> + # include < stdio . h > <nl> + # include < string . h > <nl> + # include < setjmp . h > <nl> + <nl> + # include " upb / decode . h " <nl> + # include " upb / reflection . h " <nl> + <nl> + # include " upb / port_def . inc " <nl> + <nl> + typedef struct { <nl> + char * buf , * ptr , * end ; <nl> + size_t overflow ; <nl> + int indent_depth ; <nl> + int options ; <nl> + const upb_symtab * ext_pool ; <nl> + jmp_buf err ; <nl> + upb_status * status ; <nl> + upb_arena * arena ; <nl> + } jsonenc ; <nl> + <nl> + static void jsonenc_msg ( jsonenc * e , const upb_msg * msg , const upb_msgdef * m ) ; <nl> + static void jsonenc_scalar ( jsonenc * e , upb_msgval val , const upb_fielddef * f ) ; <nl> + static void jsonenc_msgfield ( jsonenc * e , const upb_msg * msg , <nl> + const upb_msgdef * m ) ; <nl> + static void jsonenc_value ( jsonenc * e , const upb_msg * msg , const upb_msgdef * m ) ; <nl> + <nl> + static void jsonenc_err ( jsonenc * e , const char * msg ) { <nl> + upb_status_seterrmsg ( e - > status , msg ) ; <nl> + longjmp ( e - > err , 1 ) ; <nl> + } <nl> + <nl> + static void jsonenc_putbytes ( jsonenc * e , const void * data , size_t len ) { <nl> + size_t have = e - > end - e - > ptr ; <nl> + if ( UPB_LIKELY ( have > = len ) ) { <nl> + memcpy ( e - > ptr , data , len ) ; <nl> + e - > ptr + = len ; <nl> + } else { <nl> + memcpy ( e - > ptr , data , have ) ; <nl> + e - > ptr + = have ; <nl> + e - > overflow + = ( len - have ) ; <nl> + } <nl> + } <nl> + <nl> + static void jsonenc_putstr ( jsonenc * e , const char * str ) { <nl> + jsonenc_putbytes ( e , str , strlen ( str ) ) ; <nl> + } <nl> + <nl> + static void jsonenc_printf ( jsonenc * e , const char * fmt , . . . ) { <nl> + size_t n ; <nl> + size_t have = e - > end - e - > ptr ; <nl> + va_list args ; <nl> + <nl> + va_start ( args , fmt ) ; <nl> + n = _upb_vsnprintf ( e - > ptr , have , fmt , args ) ; <nl> + va_end ( args ) ; <nl> + <nl> + if ( UPB_LIKELY ( have > n ) ) { <nl> + e - > ptr + = n ; <nl> + } else { <nl> + e - > ptr + = have ; <nl> + e - > overflow + = ( n - have ) ; <nl> + } <nl> + } <nl> + <nl> + static void jsonenc_nanos ( jsonenc * e , int32_t nanos ) { <nl> + const char zeros [ 3 ] = " 000 " ; <nl> + <nl> + if ( nanos = = 0 ) return ; <nl> + if ( nanos < 0 | | nanos > = 1000000000 ) { <nl> + jsonenc_err ( e , " error formatting timestamp as JSON : invalid nanos " ) ; <nl> + } <nl> + <nl> + jsonenc_printf ( e , " % 09 " PRId32 , nanos ) ; <nl> + <nl> + / * Remove trailing zeros , 3 at a time . * / <nl> + while ( ( e - > ptr - e - > buf ) > = 3 & & memcmp ( e - > ptr , zeros , 3 ) = = 0 ) { <nl> + e - > ptr - = 3 ; <nl> + } <nl> + } <nl> + <nl> + static void jsonenc_timestamp ( jsonenc * e , const upb_msg * msg , <nl> + const upb_msgdef * m ) { <nl> + const upb_fielddef * seconds_f = upb_msgdef_itof ( m , 1 ) ; <nl> + const upb_fielddef * nanos_f = upb_msgdef_itof ( m , 2 ) ; <nl> + int64_t seconds = upb_msg_get ( msg , seconds_f ) . int64_val ; <nl> + int32_t nanos = upb_msg_get ( msg , nanos_f ) . int32_val ; <nl> + int L , N , I , J , K , hour , min , sec ; <nl> + <nl> + if ( seconds < - 62135596800 ) { <nl> + jsonenc_err ( e , <nl> + " error formatting timestamp as JSON : minimum acceptable value " <nl> + " is 0001 - 01 - 01T00 : 00 : 00Z " ) ; <nl> + } else if ( seconds > 253402300799 ) { <nl> + jsonenc_err ( e , <nl> + " error formatting timestamp as JSON : maximum acceptable value " <nl> + " is 9999 - 12 - 31T23 : 59 : 59Z " ) ; <nl> + } <nl> + <nl> + / * Julian Day - > Y / M / D , Algorithm from : <nl> + * Fliegel , H . F . , and Van Flandern , T . C . , " A Machine Algorithm for <nl> + * Processing Calendar Dates , " Communications of the Association of <nl> + * Computing Machines , vol . 11 ( 1968 ) , p . 657 . * / <nl> + L = ( seconds / 86400 ) + 2440588 ; <nl> + N = 4 * L / 146097 ; <nl> + L = L - ( 146097 * N + 3 ) / 4 ; <nl> + I = 4000 * ( L + 1 ) / 1461001 ; <nl> + L = L - 1461 * I / 4 + 31 ; <nl> + J = 80 * L / 2447 ; <nl> + K = L - 2447 * J / 80 ; <nl> + L = J / 11 ; <nl> + J = J + 2 - 12 * L ; <nl> + I = 100 * ( N - 49 ) + I + L ; <nl> + <nl> + sec = seconds % 60 ; <nl> + min = ( seconds / 60 ) % 60 ; <nl> + hour = ( seconds / 3600 ) % 24 ; <nl> + <nl> + jsonenc_printf ( e , " \ " % 04d - % 02d - % 02dT % 02d : % 02d : % 02d " , I , J , K , hour , min , sec ) ; <nl> + jsonenc_nanos ( e , nanos ) ; <nl> + jsonenc_putstr ( e , " Z \ " " ) ; <nl> + } <nl> + <nl> + static void jsonenc_duration ( jsonenc * e , const upb_msg * msg , const upb_msgdef * m ) { <nl> + const upb_fielddef * seconds_f = upb_msgdef_itof ( m , 1 ) ; <nl> + const upb_fielddef * nanos_f = upb_msgdef_itof ( m , 2 ) ; <nl> + int64_t seconds = upb_msg_get ( msg , seconds_f ) . int64_val ; <nl> + int32_t nanos = upb_msg_get ( msg , nanos_f ) . int32_val ; <nl> + <nl> + if ( seconds > 315576000000 | | seconds < - 315576000000 | | <nl> + ( seconds < 0 ) ! = ( nanos < 0 ) ) { <nl> + jsonenc_err ( e , " bad duration " ) ; <nl> + } <nl> + <nl> + jsonenc_printf ( e , " \ " % " PRId64 , seconds ) ; <nl> + jsonenc_nanos ( e , nanos ) ; <nl> + jsonenc_putstr ( e , " s \ " " ) ; <nl> + } <nl> + <nl> + static void jsonenc_enum ( int32_t val , const upb_fielddef * f , jsonenc * e ) { <nl> + const upb_enumdef * e_def = upb_fielddef_enumsubdef ( f ) ; <nl> + const char * name = upb_enumdef_iton ( e_def , val ) ; <nl> + <nl> + if ( name ) { <nl> + jsonenc_printf ( e , " \ " % s \ " " , name ) ; <nl> + } else { <nl> + jsonenc_printf ( e , " % " PRId32 , val ) ; <nl> + } <nl> + } <nl> + <nl> + static void jsonenc_bytes ( jsonenc * e , upb_strview str ) { <nl> + / * This is the regular base64 , not the " web - safe " version . * / <nl> + static const char base64 [ ] = <nl> + " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789 + / " ; <nl> + const char * ptr = str . data ; <nl> + const char * end = ptr + str . size ; <nl> + char buf [ 4 ] ; <nl> + <nl> + jsonenc_putstr ( e , " \ " " ) ; <nl> + <nl> + while ( end - ptr > = 3 ) { <nl> + buf [ 0 ] = base64 [ ptr [ 0 ] > > 2 ] ; <nl> + buf [ 1 ] = base64 [ ( ( ptr [ 0 ] & 0x3 ) < < 4 ) | ( ptr [ 1 ] > > 4 ) ] ; <nl> + buf [ 2 ] = base64 [ ( ( ptr [ 1 ] & 0xf ) < < 2 ) | ( ptr [ 2 ] > > 6 ) ] ; <nl> + buf [ 3 ] = base64 [ ptr [ 2 ] & 0x3f ] ; <nl> + jsonenc_putbytes ( e , buf , 4 ) ; <nl> + ptr + = 3 ; <nl> + } <nl> + <nl> + switch ( end - ptr ) { <nl> + case 2 : <nl> + buf [ 0 ] = base64 [ ptr [ 0 ] > > 2 ] ; <nl> + buf [ 1 ] = base64 [ ( ( ptr [ 0 ] & 0x3 ) < < 4 ) | ( ptr [ 1 ] > > 4 ) ] ; <nl> + buf [ 2 ] = base64 [ ( ptr [ 1 ] & 0xf ) < < 2 ] ; <nl> + buf [ 3 ] = ' = ' ; <nl> + jsonenc_putbytes ( e , buf , 4 ) ; <nl> + break ; <nl> + case 1 : <nl> + buf [ 0 ] = base64 [ ptr [ 0 ] > > 2 ] ; <nl> + buf [ 1 ] = base64 [ ( ( ptr [ 0 ] & 0x3 ) < < 4 ) ] ; <nl> + buf [ 2 ] = ' = ' ; <nl> + buf [ 3 ] = ' = ' ; <nl> + jsonenc_putbytes ( e , buf , 4 ) ; <nl> + break ; <nl> + } <nl> + <nl> + jsonenc_putstr ( e , " \ " " ) ; <nl> + } <nl> + <nl> + static void jsonenc_stringbody ( jsonenc * e , upb_strview str ) { <nl> + const char * ptr = str . data ; <nl> + const char * end = ptr + str . size ; <nl> + <nl> + while ( ptr < end ) { <nl> + switch ( * ptr ) { <nl> + case ' \ n ' : <nl> + jsonenc_putstr ( e , " \ \ n " ) ; <nl> + break ; <nl> + case ' \ r ' : <nl> + jsonenc_putstr ( e , " \ \ r " ) ; <nl> + break ; <nl> + case ' \ t ' : <nl> + jsonenc_putstr ( e , " \ \ t " ) ; <nl> + break ; <nl> + case ' \ " ' : <nl> + jsonenc_putstr ( e , " \ \ \ " " ) ; <nl> + break ; <nl> + case ' \ f ' : <nl> + jsonenc_putstr ( e , " \ f ' " ) ; <nl> + break ; <nl> + case ' \ b ' : <nl> + jsonenc_putstr ( e , " \ b ' " ) ; <nl> + break ; <nl> + case ' \ \ ' : <nl> + jsonenc_putstr ( e , " \ \ \ \ " ) ; <nl> + break ; <nl> + default : <nl> + if ( ( uint8_t ) * ptr < 0x20 ) { <nl> + jsonenc_printf ( e , " \ \ u % 04x " , ( int ) ( uint8_t ) * ptr ) ; <nl> + } else { <nl> + / * This could be a non - ASCII byte . We rely on the string being valid <nl> + * UTF - 8 . * / <nl> + jsonenc_putbytes ( e , ptr , 1 ) ; <nl> + } <nl> + break ; <nl> + } <nl> + ptr + + ; <nl> + } <nl> + } <nl> + <nl> + static void jsonenc_string ( jsonenc * e , upb_strview str ) { <nl> + jsonenc_putstr ( e , " \ " " ) ; <nl> + jsonenc_stringbody ( e , str ) ; <nl> + jsonenc_putstr ( e , " \ " " ) ; <nl> + } <nl> + <nl> + static void jsonenc_double ( jsonenc * e , const char * fmt , double val ) { <nl> + if ( val = = UPB_INFINITY ) { <nl> + jsonenc_putstr ( e , " \ " Infinity \ " " ) ; <nl> + } else if ( val = = - UPB_INFINITY ) { <nl> + jsonenc_putstr ( e , " \ " - Infinity \ " " ) ; <nl> + } else if ( val ! = val ) { <nl> + jsonenc_putstr ( e , " \ " NaN \ " " ) ; <nl> + } else { <nl> + jsonenc_printf ( e , fmt , val ) ; <nl> + } <nl> + } <nl> + <nl> + static void jsonenc_wrapper ( jsonenc * e , const upb_msg * msg , <nl> + const upb_msgdef * m ) { <nl> + const upb_fielddef * val_f = upb_msgdef_itof ( m , 1 ) ; <nl> + upb_msgval val = upb_msg_get ( m , val_f ) ; <nl> + jsonenc_scalar ( e , val , val_f ) ; <nl> + } <nl> + <nl> + const upb_msgdef * jsonenc_getanymsg ( jsonenc * e , upb_strview type_url ) { <nl> + / * Find last ' / ' , if any . * / <nl> + const char * end = type_url . data + type_url . size ; <nl> + const char * ptr = end ; <nl> + <nl> + if ( ! e - > ext_pool | | type_url . size = = 0 ) return NULL ; <nl> + <nl> + while ( true ) { <nl> + if ( - - ptr = = type_url . data ) { <nl> + / * Type URL must contain at least one ' / ' , with host before . * / <nl> + return NULL ; <nl> + } <nl> + if ( * ptr = = ' / ' ) { <nl> + ptr + + ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + return upb_symtab_lookupmsg2 ( e - > ext_pool , ptr , end - ptr ) ; <nl> + } <nl> + <nl> + static void jsonenc_any ( jsonenc * e , const upb_msg * msg , const upb_msgdef * m ) { <nl> + const upb_fielddef * type_url_f = upb_msgdef_itof ( m , 1 ) ; <nl> + const upb_fielddef * value_f = upb_msgdef_itof ( m , 1 ) ; <nl> + upb_strview type_url = upb_msg_get ( msg , type_url_f ) . str_val ; <nl> + upb_strview value = upb_msg_get ( msg , value_f ) . str_val ; <nl> + const upb_msgdef * any_m = jsonenc_getanymsg ( e , type_url ) ; <nl> + const upb_msglayout * any_layout = upb_msgdef_layout ( any_m ) ; <nl> + upb_msg * any = upb_msg_new ( any_m , e - > arena ) ; <nl> + <nl> + if ( ! upb_decode ( value . data , value . size , any , any_layout , e - > arena ) ) { <nl> + jsonenc_err ( e , " Error decoding message in Any " ) ; <nl> + } <nl> + <nl> + jsonenc_putstr ( e , " { \ " @ type \ " : " ) ; <nl> + jsonenc_string ( e , type_url ) ; <nl> + jsonenc_putstr ( e , " , " ) ; <nl> + <nl> + if ( upb_msgdef_wellknowntype ( m ) = = UPB_WELLKNOWN_UNSPECIFIED ) { <nl> + / * Regular messages : { " @ type " : " . . . " , " foo " : 1 , " bar " : 2 } * / <nl> + jsonenc_msg ( e , any , any_m ) ; <nl> + } else { <nl> + / * Well - known type : { " @ type " : " . . . " , " value " : < well - known encoding > } * / <nl> + jsonenc_putstr ( e , " value : " ) ; <nl> + jsonenc_msgfield ( e , any , any_m ) ; <nl> + } <nl> + <nl> + jsonenc_putstr ( e , " } " ) ; <nl> + } <nl> + <nl> + static void jsonenc_putsep ( jsonenc * e , const char * str , bool * first ) { <nl> + if ( * first ) { <nl> + * first = false ; <nl> + } else { <nl> + jsonenc_putstr ( e , str ) ; <nl> + } <nl> + } <nl> + <nl> + static void jsonenc_fieldpath ( jsonenc * e , upb_strview path ) { <nl> + const char * ptr = path . data ; <nl> + const char * end = ptr + path . size ; <nl> + <nl> + while ( ptr < end ) { <nl> + char ch = * ptr ; <nl> + if ( ch > = ' A ' & & ch < = ' Z ' ) { <nl> + jsonenc_err ( e , " Field mask element may not have upper - case letter . " ) ; <nl> + } else if ( ch = = ' _ ' ) { <nl> + if ( ptr = = end - 1 | | * ( ptr + 1 ) < ' a ' | | * ( ptr + 1 ) > ' z ' ) { <nl> + jsonenc_err ( e , " Underscore must be followed by a lowercase letter . " ) ; <nl> + } <nl> + } else { <nl> + jsonenc_putbytes ( e , & ch , 1 ) ; <nl> + } <nl> + ptr + + ; <nl> + } <nl> + } <nl> + <nl> + static void jsonenc_fieldmask ( jsonenc * e , const upb_msg * msg , <nl> + const upb_msgdef * m ) { <nl> + const upb_fielddef * paths_f = upb_msgdef_itof ( m , 1 ) ; <nl> + const upb_array * paths = upb_msg_get ( msg , paths_f ) . array_val ; <nl> + bool first = true ; <nl> + size_t i , n = 0 ; <nl> + <nl> + if ( paths ) n = upb_array_size ( paths ) ; <nl> + <nl> + jsonenc_putstr ( e , " \ " " ) ; <nl> + <nl> + for ( i = 0 ; i < n ; i + + ) { <nl> + jsonenc_putsep ( e , " , " , & first ) ; <nl> + jsonenc_fieldpath ( e , upb_array_get ( paths , i ) . str_val ) ; <nl> + } <nl> + <nl> + jsonenc_putstr ( e , " \ " " ) ; <nl> + } <nl> + <nl> + static void jsonenc_struct ( jsonenc * e , const upb_msg * msg , <nl> + const upb_msgdef * m ) { <nl> + const upb_fielddef * fields_f = upb_msgdef_itof ( m , 1 ) ; <nl> + const upb_map * fields = upb_msg_get ( msg , fields_f ) . map_val ; <nl> + const upb_msgdef * entry_m = upb_fielddef_msgsubdef ( fields_f ) ; <nl> + const upb_fielddef * value_f = upb_msgdef_itof ( entry_m , 2 ) ; <nl> + size_t iter = UPB_MAP_BEGIN ; <nl> + bool first = true ; <nl> + <nl> + jsonenc_putstr ( e , " { " ) ; <nl> + <nl> + while ( upb_mapiter_next ( fields , & iter ) ) { <nl> + upb_msgval key = upb_mapiter_key ( fields , iter ) ; <nl> + upb_msgval val = upb_mapiter_value ( fields , iter ) ; <nl> + <nl> + jsonenc_putsep ( e , " , " , & first ) ; <nl> + jsonenc_string ( e , key . str_val ) ; <nl> + jsonenc_putstr ( e , " : " ) ; <nl> + jsonenc_value ( e , val . msg_val , upb_fielddef_msgsubdef ( value_f ) ) ; <nl> + } <nl> + <nl> + jsonenc_putstr ( e , " } " ) ; <nl> + } <nl> + <nl> + static void jsonenc_listvalue ( jsonenc * e , const upb_msg * msg , <nl> + const upb_msgdef * m ) { <nl> + const upb_fielddef * values_f = upb_msgdef_itof ( m , 1 ) ; <nl> + const upb_msgdef * values_m = upb_fielddef_msgsubdef ( values_f ) ; <nl> + const upb_array * values = upb_msg_get ( msg , values_f ) . array_val ; <nl> + const size_t size = upb_array_size ( values ) ; <nl> + size_t i ; <nl> + bool first = true ; <nl> + <nl> + jsonenc_putstr ( e , " [ " ) ; <nl> + <nl> + for ( i = 0 ; i < size ; i + + ) { <nl> + upb_msgval elem = upb_array_get ( values , i ) ; <nl> + <nl> + jsonenc_putsep ( e , " , " , & first ) ; <nl> + jsonenc_value ( e , elem . msg_val , values_m ) ; <nl> + } <nl> + <nl> + jsonenc_putstr ( e , " ] " ) ; <nl> + } <nl> + <nl> + static void jsonenc_value ( jsonenc * e , const upb_msg * msg , const upb_msgdef * m ) { <nl> + / * TODO ( haberman ) : do we want a reflection method to get oneof case ? * / <nl> + size_t iter = UPB_MSG_BEGIN ; <nl> + const upb_fielddef * f ; <nl> + upb_msgval val ; <nl> + <nl> + if ( ! upb_msg_next ( msg , m , NULL , & f , & val , & iter ) ) { <nl> + jsonenc_err ( e , " No value set in Value proto " ) ; <nl> + } <nl> + <nl> + switch ( upb_fielddef_number ( f ) ) { <nl> + case 1 : <nl> + jsonenc_putstr ( e , " null " ) ; <nl> + break ; <nl> + case 2 : <nl> + jsonenc_double ( e , " % . 17g " , val . double_val ) ; <nl> + break ; <nl> + case 3 : <nl> + jsonenc_string ( e , val . str_val ) ; <nl> + break ; <nl> + case 4 : <nl> + jsonenc_putstr ( e , val . bool_val ? " true " : " false " ) ; <nl> + break ; <nl> + case 5 : <nl> + jsonenc_struct ( e , val . msg_val , upb_fielddef_msgsubdef ( f ) ) ; <nl> + break ; <nl> + case 6 : <nl> + jsonenc_listvalue ( e , val . msg_val , upb_fielddef_msgsubdef ( f ) ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + static void jsonenc_msgfield ( jsonenc * e , const upb_msg * msg , <nl> + const upb_msgdef * m ) { <nl> + switch ( upb_msgdef_wellknowntype ( m ) ) { <nl> + case UPB_WELLKNOWN_UNSPECIFIED : <nl> + jsonenc_msg ( e , msg , m ) ; <nl> + break ; <nl> + case UPB_WELLKNOWN_ANY : <nl> + jsonenc_any ( e , msg , m ) ; <nl> + break ; <nl> + case UPB_WELLKNOWN_FIELDMASK : <nl> + jsonenc_fieldmask ( e , msg , m ) ; <nl> + break ; <nl> + case UPB_WELLKNOWN_DURATION : <nl> + jsonenc_duration ( e , msg , m ) ; <nl> + break ; <nl> + case UPB_WELLKNOWN_TIMESTAMP : <nl> + jsonenc_timestamp ( e , msg , m ) ; <nl> + break ; <nl> + case UPB_WELLKNOWN_DOUBLEVALUE : <nl> + case UPB_WELLKNOWN_FLOATVALUE : <nl> + case UPB_WELLKNOWN_INT64VALUE : <nl> + case UPB_WELLKNOWN_UINT64VALUE : <nl> + case UPB_WELLKNOWN_INT32VALUE : <nl> + case UPB_WELLKNOWN_UINT32VALUE : <nl> + case UPB_WELLKNOWN_STRINGVALUE : <nl> + case UPB_WELLKNOWN_BYTESVALUE : <nl> + case UPB_WELLKNOWN_BOOLVALUE : <nl> + jsonenc_wrapper ( e , msg , m ) ; <nl> + break ; <nl> + case UPB_WELLKNOWN_VALUE : <nl> + jsonenc_value ( e , msg , m ) ; <nl> + break ; <nl> + case UPB_WELLKNOWN_LISTVALUE : <nl> + jsonenc_listvalue ( e , msg , m ) ; <nl> + break ; <nl> + case UPB_WELLKNOWN_STRUCT : <nl> + jsonenc_listvalue ( e , msg , m ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + static void jsonenc_scalar ( jsonenc * e , upb_msgval val , const upb_fielddef * f ) { <nl> + switch ( upb_fielddef_type ( f ) ) { <nl> + case UPB_TYPE_BOOL : <nl> + jsonenc_putstr ( e , val . bool_val ? " true " : " false " ) ; <nl> + break ; <nl> + case UPB_TYPE_FLOAT : <nl> + jsonenc_double ( e , " % . 9g " , val . float_val ) ; <nl> + break ; <nl> + case UPB_TYPE_DOUBLE : <nl> + jsonenc_double ( e , " % . 17g " , val . double_val ) ; <nl> + break ; <nl> + case UPB_TYPE_INT32 : <nl> + jsonenc_printf ( e , " % " PRId32 , val . int32_val ) ; <nl> + break ; <nl> + case UPB_TYPE_UINT32 : <nl> + jsonenc_printf ( e , " % " PRIu32 , val . uint32_val ) ; <nl> + break ; <nl> + case UPB_TYPE_INT64 : <nl> + jsonenc_printf ( e , " \ " % " PRId64 " \ " " , val . int64_val ) ; <nl> + break ; <nl> + case UPB_TYPE_UINT64 : <nl> + jsonenc_printf ( e , " \ " % " PRIu64 " \ " " , val . uint64_val ) ; <nl> + break ; <nl> + case UPB_TYPE_STRING : <nl> + jsonenc_string ( e , val . str_val ) ; <nl> + break ; <nl> + case UPB_TYPE_BYTES : <nl> + jsonenc_bytes ( e , val . str_val ) ; <nl> + break ; <nl> + case UPB_TYPE_ENUM : <nl> + jsonenc_enum ( val . int32_val , f , e ) ; <nl> + break ; <nl> + case UPB_TYPE_MESSAGE : <nl> + jsonenc_msgfield ( e , val . msg_val , upb_fielddef_msgsubdef ( f ) ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + static void jsonenc_mapkey ( jsonenc * e , upb_msgval val , const upb_fielddef * f ) { <nl> + jsonenc_putstr ( e , " \ " " ) ; <nl> + <nl> + switch ( upb_fielddef_type ( f ) ) { <nl> + case UPB_TYPE_BOOL : <nl> + jsonenc_putstr ( e , val . bool_val ? " true " : " false " ) ; <nl> + break ; <nl> + case UPB_TYPE_INT32 : <nl> + jsonenc_printf ( e , " % " PRId32 , val . int32_val ) ; <nl> + break ; <nl> + case UPB_TYPE_UINT32 : <nl> + jsonenc_printf ( e , " % " PRIu32 , val . uint32_val ) ; <nl> + break ; <nl> + case UPB_TYPE_INT64 : <nl> + jsonenc_printf ( e , " % " PRId64 , val . int64_val ) ; <nl> + break ; <nl> + case UPB_TYPE_UINT64 : <nl> + jsonenc_printf ( e , " % " PRIu64 , val . uint64_val ) ; <nl> + break ; <nl> + case UPB_TYPE_STRING : <nl> + jsonenc_stringbody ( e , val . str_val ) ; <nl> + default : <nl> + UPB_UNREACHABLE ( ) ; <nl> + } <nl> + <nl> + jsonenc_putstr ( e , " \ " : " ) ; <nl> + } <nl> + <nl> + static void jsonenc_array ( jsonenc * e , const upb_array * arr , <nl> + const upb_fielddef * f ) { <nl> + size_t i ; <nl> + size_t size = upb_array_size ( arr ) ; <nl> + bool first = true ; <nl> + <nl> + jsonenc_putstr ( e , " [ " ) ; <nl> + <nl> + for ( i = 0 ; i < size ; i + + ) { <nl> + jsonenc_putsep ( e , " , " , & first ) ; <nl> + jsonenc_scalar ( e , upb_array_get ( arr , i ) , f ) ; <nl> + } <nl> + <nl> + jsonenc_putstr ( e , " ] " ) ; <nl> + } <nl> + <nl> + static void jsonenc_map ( jsonenc * e , const upb_map * map , const upb_fielddef * f ) { <nl> + const upb_msgdef * entry = upb_fielddef_msgsubdef ( f ) ; <nl> + const upb_fielddef * key_f = upb_msgdef_itof ( entry , 1 ) ; <nl> + const upb_fielddef * val_f = upb_msgdef_itof ( entry , 2 ) ; <nl> + size_t iter = UPB_MAP_BEGIN ; <nl> + bool first = true ; <nl> + <nl> + jsonenc_putstr ( e , " { " ) ; <nl> + <nl> + while ( upb_mapiter_next ( map , & iter ) ) { <nl> + jsonenc_putsep ( e , " , " , & first ) ; <nl> + jsonenc_mapkey ( e , upb_mapiter_key ( map , iter ) , key_f ) ; <nl> + jsonenc_scalar ( e , upb_mapiter_value ( map , iter ) , val_f ) ; <nl> + } <nl> + <nl> + jsonenc_putstr ( e , " } " ) ; <nl> + } <nl> + <nl> + static void jsonenc_fieldval ( jsonenc * e , const upb_fielddef * f , <nl> + upb_msgval val , bool * first ) { <nl> + char buf [ 128 ] ; <nl> + const char * name ; <nl> + <nl> + if ( e - > options & UPB_JSONENC_PROTONAMES ) { <nl> + name = upb_fielddef_name ( f ) ; <nl> + } else { <nl> + / * TODO ( haberman ) : we need a better JSON name API . * / <nl> + upb_fielddef_getjsonname ( f , buf , sizeof ( buf ) ) ; <nl> + name = buf ; <nl> + } <nl> + <nl> + jsonenc_putsep ( e , " , " , first ) ; <nl> + jsonenc_printf ( e , " \ " % s \ " : " , name ) ; <nl> + <nl> + if ( upb_fielddef_ismap ( f ) ) { <nl> + jsonenc_map ( e , val . map_val , f ) ; <nl> + } else if ( upb_fielddef_isseq ( f ) ) { <nl> + jsonenc_array ( e , val . array_val , f ) ; <nl> + } else { <nl> + jsonenc_scalar ( e , val , f ) ; <nl> + } <nl> + } <nl> + <nl> + static void jsonenc_msg ( jsonenc * e , const upb_msg * msg , const upb_msgdef * m ) { <nl> + upb_msgval val ; <nl> + const upb_fielddef * f ; <nl> + bool first = true ; <nl> + <nl> + jsonenc_putstr ( e , " { " ) ; <nl> + <nl> + if ( e - > options & UPB_JSONENC_EMITDEFAULTS ) { <nl> + / * Iterate over all fields . * / <nl> + upb_msg_field_iter i ; <nl> + for ( upb_msg_field_begin ( & i , m ) ; ! upb_msg_field_done ( & i ) ; <nl> + upb_msg_field_next ( & i ) ) { <nl> + f = upb_msg_iter_field ( & i ) ; <nl> + jsonenc_fieldval ( e , f , upb_msg_get ( msg , f ) , & first ) ; <nl> + } <nl> + } else { <nl> + / * Iterate over non - empty fields . * / <nl> + size_t iter = UPB_MSG_BEGIN ; <nl> + while ( upb_msg_next ( msg , m , e - > ext_pool , & f , & val , & iter ) ) { <nl> + jsonenc_fieldval ( e , f , val , & first ) ; <nl> + } <nl> + } <nl> + <nl> + jsonenc_putstr ( e , " } " ) ; <nl> + } <nl> + <nl> + size_t jsonenc_nullz ( jsonenc * e , size_t size ) { <nl> + size_t ret = e - > ptr - e - > buf + e - > overflow ; <nl> + <nl> + if ( size > 0 ) { <nl> + if ( e - > ptr = = e - > end ) e - > ptr - - ; <nl> + * e - > ptr = ' \ 0 ' ; <nl> + } <nl> + <nl> + return ret ; <nl> + } <nl> + <nl> + size_t upb_json_encode ( const upb_msg * msg , const upb_msgdef * m , <nl> + const upb_symtab * ext_pool , int options , char * buf , <nl> + size_t size , upb_status * status ) { <nl> + jsonenc e ; <nl> + <nl> + e . buf = buf ; <nl> + e . ptr = buf ; <nl> + e . end = buf + size ; <nl> + e . overflow = 0 ; <nl> + e . options = options ; <nl> + e . ext_pool = ext_pool ; <nl> + e . status = status ; <nl> + <nl> + if ( setjmp ( e . err ) ) return - 1 ; <nl> + <nl> + jsonenc_msg ( & e , msg , m ) ; <nl> + return jsonenc_nullz ( & e , size ) ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 8396fbf80f8 <nl> mmm / dev / null <nl> ppp b / upb / json_encode . h <nl> <nl> + <nl> + # ifndef UPB_JSONENCODE_H_ <nl> + # define UPB_JSONENCODE_H_ <nl> + <nl> + # include " upb / def . h " <nl> + <nl> + # ifdef __cplusplus <nl> + extern " C " { <nl> + # endif <nl> + <nl> + enum { <nl> + / * When set , emits 0 / default values . TOOD ( haberman ) : proto3 only ? * / <nl> + UPB_JSONENC_EMITDEFAULTS = 1 , <nl> + <nl> + / * When set , use normal ( snake_caes ) field names instead of JSON ( camelCase ) <nl> + names . * / <nl> + UPB_JSONENC_PROTONAMES = 2 <nl> + } ; <nl> + <nl> + / * Encodes the given | msg | to JSON format . The message ' s reflection is given in <nl> + * | m | . The symtab in | symtab | is used to find extensions ( if NULL , extensions <nl> + * will not be printed ) . <nl> + * <nl> + * Output is placed in the given buffer , and always NULL - terminated . The output <nl> + * size ( excluding NULL ) is returned . This means that a return value > = | size | <nl> + * implies that the output was truncated . ( These are the same semantics as <nl> + * snprintf ( ) ) . * / <nl> + size_t upb_json_encode ( const upb_msg * msg , const upb_msgdef * m , <nl> + const upb_symtab * ext_pool , int options , char * buf , <nl> + size_t size , upb_status * status ) ; <nl> + <nl> + # ifdef __cplusplus <nl> + } / * extern " C " * / <nl> + # endif <nl> + <nl> + # endif / * UPB_JSONENCODE_H_ * / <nl> mmm a / upb / reflection . c <nl> ppp b / upb / reflection . c <nl> bool upb_msg_next ( const upb_msg * msg , const upb_msgdef * m , <nl> } <nl> / * Continue if NULL or 0 . * / <nl> if ( memcmp ( & test , & zero , sizeof ( test ) ) = = 0 ) continue ; <nl> + <nl> + / * Continue on empty array or map . * / <nl> + if ( upb_fielddef_ismap ( f ) ) { <nl> + if ( upb_map_size ( test . map_val ) = = 0 ) continue ; <nl> + } else if ( upb_fielddef_isseq ( f ) ) { <nl> + if ( upb_array_size ( test . array_val ) = = 0 ) continue ; <nl> + } <nl> } <nl> <nl> * out_val = val ; <nl> mmm a / upb / text_encode . c <nl> ppp b / upb / text_encode . c <nl> static void txtenc_string ( txtenc * e , upb_strview str , bool bytes ) { <nl> } else { <nl> txtenc_putbytes ( e , ptr , 1 ) ; <nl> } <nl> + break ; <nl> } <nl> ptr + + ; <nl> } <nl> mmm a / upbc / generator . cc <nl> ppp b / upbc / generator . cc <nl> void WriteDefSource ( const protobuf : : FileDescriptor * file , Output & output ) { <nl> } <nl> output ( " \ n " ) ; <nl> <nl> - output ( " static const upb_msglayout * layouts [ $ 0 ] = { \ n " , file_messages . size ( ) ) ; <nl> - for ( auto message : file_messages ) { <nl> - output ( " & $ 0 , \ n " , MessageInit ( message ) ) ; <nl> + if ( ! file_messages . empty ( ) ) { <nl> + output ( " static const upb_msglayout * layouts [ $ 0 ] = { \ n " , file_messages . size ( ) ) ; <nl> + for ( auto message : file_messages ) { <nl> + output ( " & $ 0 , \ n " , MessageInit ( message ) ) ; <nl> + } <nl> + output ( " } ; \ n " ) ; <nl> + output ( " \ n " ) ; <nl> } <nl> - output ( " } ; \ n " ) ; <nl> - output ( " \ n " ) ; <nl> <nl> protobuf : : FileDescriptorProto file_proto ; <nl> file - > CopyTo ( & file_proto ) ; <nl> void WriteDefSource ( const protobuf : : FileDescriptor * file , Output & output ) { <nl> <nl> output ( " upb_def_init $ 0 = { \ n " , DefInitSymbol ( file ) ) ; <nl> output ( " deps , \ n " ) ; <nl> - output ( " layouts , \ n " ) ; <nl> + if ( file_messages . empty ( ) ) { <nl> + output ( " NULL , \ n " ) ; <nl> + } else { <nl> + output ( " layouts , \ n " ) ; <nl> + } <nl> output ( " \ " $ 0 \ " , \ n " , file - > name ( ) ) ; <nl> output ( " UPB_STRVIEW_INIT ( descriptor , $ 0 ) \ n " , file_data . size ( ) ) ; <nl> output ( " } ; \ n " ) ; <nl>
Squashed ' third_party / upb / ' changes from 02c89a8b15 . . e70853d71f
grpc/grpc
7eafc04dde203ad17cb454c64dccbe1468f68cb2
2020-02-21T19:49:53Z
mmm a / cmake / README . md <nl> ppp b / cmake / README . md <nl> and that the mingw libs ( ffmpeg , libdvd and others ) are built using <nl> # # # macOS <nl> <nl> For macOS the required dependencies can be found in <nl> - [ docs / README . osx ] ( https : / / github . com / xbmc / xbmc / tree / master / docs / README . osx ) . <nl> + [ docs / README . osx . md ] ( https : / / github . com / xbmc / xbmc / tree / master / docs / README . osx . md ) . <nl> <nl> On macOS it is necessary to build the dependencies in ` tools / depends ` using <nl> ` . / bootstrap & & . / configure - - host = < PLATFORM > & & make ` . The other steps such <nl> deleted file mode 100644 <nl> index e730cd617788 . . 000000000000 <nl> mmm a / docs / README . osx <nl> ppp / dev / null <nl> <nl> - TOC <nl> - 1 . Introduction <nl> - 2 . Getting the source code <nl> - 3 . Install required libs <nl> - 3 . 1 . Install Xcode <nl> - 3 . 1 . 1 Supported Xcode and OSX constellations <nl> - 3 . 2 . Install Kodi build depends <nl> - 3 . 2 . 1 Compiling as 32 Bit binary <nl> - 3 . 2 . 2 Compiling as 64 Bit binary <nl> - 3 . 2 . 3 Compile binary addons <nl> - 4 . How to compile and run <nl> - 4 . 1 Using XCode <nl> - 4 . 2 Using Command line <nl> - 5 . Packaging <nl> - <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - 1 . Introduction <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - <nl> - This is a platform port of Kodi for the Apple OSX operating system . 10 . 9 and 10 . 10 Intel development <nl> - platforms are supported . Xcode 6 or newer are the recommended versions . <nl> - There are 3 ways to build Kodi for Mac , from command - line with make , from command - line <nl> - using xcodebuild or from Xcode . <nl> - <nl> - The minimum version of OSX you need to run ( ! ) Kodi is 10 . 7 atm . <nl> - <nl> - Generally , Xcode is the easiest as it presents the build system in a GUI environment . <nl> - The command - line build is still under development . <nl> - <nl> - Kodi for Mac is composed of a main binary with numerous dynamic libraries and <nl> - codecs that support a multitude of music and video formats . <nl> - <nl> - On Mavericks ( OSX 10 . 9 . x ) we recommend using Xcode 6 . 1 . <nl> - On Yosemite ( OSX 10 . 10 . x ) we recommend using Xcode 6 . 4 . <nl> - On El Capitan ( OSX 10 . 11 . x ) we recommend using Xcode 7 . x or Xcode 8 . x . <nl> - On Sierra ( macOS 10 . 12 . x ) we recomment using Xcode 8 . x . <nl> - <nl> - NOTE TO NEW OS X USERS : All lines that are prefixed with the ' $ ' character are <nl> - commands that need to be typed into a Terminal window . Note that the ' $ ' <nl> - character itself should NOT be typed as part of the command . <nl> - <nl> - ATTENTION : You need to download and install XQuartz from https : / / xquartz . macosforge . org / landing / since <nl> - its not part of OSX anymore since 10 . 8 . <nl> - <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - 2 . Getting the source code <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - <nl> - $ cd $ HOME <nl> - $ git clone git : / / github . com / xbmc / xbmc . git Kodi <nl> - <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - 3 . 1 Install XCODE <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - See point 3 . 1 . 1 below for an updated list of supported / tested Xcode / osx constellations ! ! ! <nl> - <nl> - Install latest Xcode ( 6 . 4 . 0 ) . You can download it from the MacOSX AppStore ( Xcode ) . <nl> - <nl> - Xcode 6 . 4 runs on 10 . 10 and later ( at least Yosemite ) . <nl> - <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - 3 . 1 . 1 Supported Xcode and OSX constellations <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - As far as we know the compilation for mac osx should work with the following <nl> - constellations of Xcode and osx versions ( to be updated once we know more ) : <nl> - <nl> - 1 . XCode 6 . 0 . 1 against OSX SDK 10 . 9 ( M ) <nl> - 2 . XCode 6 . 1 . 0 against OSX SDK 10 . 10 ( Y ) <nl> - 3 . XCode 6 . 2 . 0 against OSX SDK 10 . 10 ( Y ) <nl> - 4 . XCode 6 . 3 . 0 against OSX SDK 10 . 10 ( Y ) <nl> - 5 . Xcode 6 . 4 . 0 against OSX SDK 10 . 10 ( Y ) <nl> - 6 . Xcode 7 . x against OSX SDK 10 . 11 ( EC ) <nl> - 7 . Xcode 8 . 0 against OSX SDK 10 . 12 ( EC ) <nl> - 8 . Xcode 8 . 0 against OSX SDK 10 . 12 ( S ) <nl> - <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - 3 . 2 Install Kodi build depends <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - The following commands will build using the latest OSX SDK found on your system . <nl> - <nl> - 3 . 2 . 1 Compiling as 32 Bit binary <nl> - $ cd $ HOME / Kodi <nl> - $ cd tools / depends <nl> - $ . / bootstrap <nl> - $ . / configure - - host = i386 - apple - darwin <nl> - $ make <nl> - <nl> - 3 . 2 . 2 Compiling as 64 Bit binary <nl> - $ cd $ HOME / Kodi <nl> - $ cd tools / depends <nl> - $ . / bootstrap <nl> - $ . / configure - - host = x86_64 - apple - darwin <nl> - $ make <nl> - <nl> - 3 . 2 . 3 Compile binary addons <nl> - $ make - C target / binary - addons <nl> - <nl> - NOTE : if you only want to build specific addons you can specify like this : <nl> - $ make - C target / binary - addons ADDONS = " pvr . hts pvr . dvblink " <nl> - <nl> - NOTE : You can speedup compilation on multicore systems by doing <nl> - " make - j < number of cores > " instead of " make " . For a dualcore this would read : <nl> - $ make - j2 <nl> - <nl> - ADVANCED developers only ! If you want to specify an OSX SDK version ( if <nl> - multiple versions are installed ) - then append it to the configure line <nl> - above ( example below would use OSX SDK 10 . 9 and build for 64bit ) : <nl> - <nl> - $ . / configure - - host = x86_64 - apple - darwin - - with - sdk = 10 . 9 <nl> - <nl> - Ensure that you also adapt the xcode project to use this SDK version . <nl> - <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - 4 . How to compile <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - Both Xcode and Terminal compilation require that build environment be setup <nl> - first . This is a simple step and involves the following : <nl> - <nl> - 4 . a Compilation by using command - line building via xcodebuild or <nl> - by compiling via Xcode GUI <nl> - <nl> - $ cd $ HOME / Kodi <nl> - $ make - C tools / depends / target / xbmc <nl> - $ make clean <nl> - $ make xcode_depends <nl> - <nl> - 4 . b Compilation by using command - line building via make ( experimental ) <nl> - <nl> - $ cd $ HOME / Kodi <nl> - $ make - C tools / depends / target / xbmc <nl> - $ make clean <nl> - <nl> - The configure operation will setup the build environment for codecs and <nl> - internal libraries that are used by Kodi . This step is required for both Xcode <nl> - and command - line building . The " make clean " ensures that there are no stale <nl> - binaries from git that might cause problems . <nl> - <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - 4 . 1 Using Xcode <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - Start XCode and open the Kodi project ( Kodi . xcodeproj ) located in $ HOME / Kodi . <nl> - For development , Kodi is run from the $ HOME / Kodi directory and needs to have <nl> - the KODI_HOME environment variable set to know where that directory is located . <nl> - To set KODI_HOME environment variable : <nl> - <nl> - Xcode 6 and later <nl> - Menu - > Product - > Edit Sheme - > " Run Kodi " / " Debug " - > Add KODI_HOME into <nl> - the List of " Environment Variables " . Set the value to the path to the Kodi <nl> - root folder . For example , " / Users / bigdog / Documents / Kodi " <nl> - <nl> - There are two build targets " Kodi " and " Kodi . app " ( each in 32Bit and 64Bit flavour ) <nl> - with debug and release settings . The " Kodi " target is used for rapid build and <nl> - debug cycles while the " Kodi . app " target is used to build a self contained <nl> - OSX application bundle . <nl> - <nl> - Set the build target to " Kodi " or " Kodi . app " and be sure to select the same <nl> - architecture as selected in step 3 . 2 ( either i386 for 32Bit or x86_64 for 64Bit ) , <nl> - then build . <nl> - <nl> - If you have selected a specific OSX SDK Version in step 3 . 2 then you might need <nl> - to adapt the active target to use the same OSX SDK version . Else build will fail <nl> - <nl> - The build process will take a long time when building the first time . <nl> - You can see the progress in " Build Results " . There are a large number of static <nl> - and dynamic libraries that will need to be built . Once these are built , <nl> - subsequent builds will be faster . <nl> - <nl> - After the build , you can ether run Kodi for Mac from Xcode or run it from <nl> - the command - line . If you run it from the command - line , make sure your set <nl> - the KODI_HOME environment variable ( export KODI_HOME = $ HOME / Kodi ) . Then , to <nl> - run the debug version : <nl> - <nl> - $ . / build / Debug / Kodi <nl> - <nl> - Or the release version : <nl> - <nl> - $ . / build / Release / Kodi <nl> - <nl> - You can also build via Xcode from the command - line using the following : <nl> - <nl> - $ xcodebuild - configuration Release ONLY_ACTIVE_ARCH = YES ARCHS = x86_64 VALID_ARCHS = x86_64 \ <nl> - - target " Kodi . app " - project Kodi . xcodeproj <nl> - <nl> - You can specify " Release " instead of " Debug " as a configuration . Be sure to set * _ARCHS <nl> - variables to the same architecture as selected in step 3 . 2 ( either i386 for 32Bit or x86_64 <nl> - for 64Bit ) . <nl> - <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - 4 . 2 Using Terminal ( command - line ) ( this is a work in progress and might fail ) <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - There are two methods , a ) make / Xcode and b ) make ( which might fail as it ' s under <nl> - construction ) . <nl> - <nl> - You could try xcodebuild from the command - line with altered SDK - like 10 . 9 in the example <nl> - below ( normally unneeded - for advanced developers ) . <nl> - <nl> - a ) <nl> - $ cd $ HOME / Kodi <nl> - $ export KODI_HOME = ` pwd ` <nl> - $ make xcode_depends <nl> - $ xcodebuild - sdk macosx10 . 9 - project Kodi . xcodeproj - target Kodi . app ONLY_ACTIVE_ARCH = YES \ <nl> - ARCHS = x86_64 VALID_ARCHS = x86_64 - configuration Release build <nl> - <nl> - b ) Building via make : <nl> - $ cd $ HOME / Kodi <nl> - $ export KODI_HOME = ` pwd ` <nl> - $ make <nl> - $ . / Kodi . bin <nl> - <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - 5 . Packaging <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - This section describes how to package Kodi in a disk image for <nl> - distribution . <nl> - <nl> - 1 . Build Kodi . app from XCode so that the application bundle is correctly updated . <nl> - <nl> - 2 . $ cd tools / darwin / packaging / osx <nl> - <nl> - 3 . $ chmod + x . / mkdmg - osx . sh & & . / mkdmg - osx . sh release <nl> - <nl> - 4 . Use release or debug - you have to be sure that you build the corresponding <nl> - version before . <nl> - <nl> - 6 . Find the corresponding dmg in the packaging dir . <nl> new file mode 100644 <nl> index 000000000000 . . 78ba32dfe1a1 <nl> mmm / dev / null <nl> ppp b / docs / README . osx . md <nl> <nl> + # Kodi for Apple macOS <nl> + <nl> + # # TOC <nl> + <nl> + 1 . [ Introduction ] ( # 1 - introduction ) <nl> + 2 . [ Getting the source code ] ( # 2 - getting - the - source - code ) <nl> + 3 . [ Install build dependencies ] ( # 3 - install - build - dependencies ) <nl> + 1 . [ Install Xcode ] ( # 31 - install - xcode ) <nl> + 2 . [ Install Kodi build depends ] ( # 32 - install - kodi - build - depends ) <nl> + 3 . [ Compile Kodi binary addons ] ( # 33 - compile - kodi - binary - addons ) <nl> + 4 . [ How to compile Kodi ] ( # 4 - how - to - compile - kodi ) <nl> + 1 . [ Using Xcode ( or xcodebuild ) ] ( # 41 - using - xcode - or - xcodebuild ) <nl> + 2 . [ Compilation using command - line ( make ) ] ( # 42 - compilation - using - command - line - make ) <nl> + 5 . [ Packaging ] ( # 5 - packaging ) <nl> + 6 . [ References ] ( # 6 - references ) <nl> + <nl> + # # 1 Introduction <nl> + <nl> + This is a platform port of Kodi for the Apple macOS operating system . <nl> + Starting with Kodi v18 the build system has been migrated from native Xcode to <nl> + CMake ( and generated project files ) . <nl> + <nl> + There are 3 ways to build Kodi for Mac : <nl> + <nl> + - Xcode IDE ( easiest as it presents the build system in a GUI environment ) <nl> + - command - line with xcodebuild <nl> + - command - line with make <nl> + <nl> + Kodi for Mac is composed of a main binary with numerous dynamic libraries and <nl> + codecs that support a multitude of music and video formats . <nl> + <nl> + The minimum version of OSX you need to run ( ! ) Kodi is 10 . 7 atm . <nl> + <nl> + - On Mavericks ( OSX 10 . 9 . x ) we recommend using Xcode 6 . 1 . <nl> + - On Yosemite ( OSX 10 . 10 . x ) we recommend using Xcode 6 . 4 . <nl> + - On El Capitan ( OSX 10 . 11 . x ) we recommend using Xcode 7 . x or Xcode 8 . x . <nl> + - On Sierra ( macOS 10 . 12 . x ) we recomment using Xcode 8 . x . <nl> + <nl> + * * ATTENTION * * : You need to download and install XQuartz from < https : / / xquartz . macosforge . org / landing / > <nl> + since its not part of macOS anymore since 10 . 8 . <nl> + <nl> + # # 2 Getting the source code <nl> + <nl> + cd $ HOME <nl> + git clone git : / / github . com / xbmc / xbmc . git Kodi <nl> + <nl> + # # 3 Install build dependencies <nl> + <nl> + # # # 3 . 1 Install Xcode <nl> + <nl> + Install the Xcode version recommended for your macOS version . You can download <nl> + it either from the macOS AppStore ( Xcode ) or from the Apple Developer Homepage . <nl> + <nl> + As far as we know the compilation for macOS should work with the following <nl> + constellations of Xcode and macOS versions ( to be updated once we know more ) : <nl> + <nl> + 1 XCode 6 . 0 . 1 against OSX SDK 10 . 9 ( M ) <nl> + 2 XCode 6 . 1 . 0 against OSX SDK 10 . 10 ( Y ) <nl> + 3 XCode 6 . 2 . 0 against OSX SDK 10 . 10 ( Y ) <nl> + 4 XCode 6 . 3 . 0 against OSX SDK 10 . 10 ( Y ) <nl> + 5 Xcode 6 . 4 . 0 against OSX SDK 10 . 10 ( Y ) <nl> + 6 Xcode 7 . x against OSX SDK 10 . 11 ( EC ) <nl> + 7 Xcode 8 . 0 against OSX SDK 10 . 12 ( EC ) <nl> + 8 Xcode 8 . 0 against OSX SDK 10 . 12 ( S ) <nl> + <nl> + # # # 3 . 2 Install Kodi build depends <nl> + <nl> + Kodi requires a set of build dependencies to be built and installed before you <nl> + will be able to build the Kodi main binary . These often just called * depends * <nl> + are installed using the commands described below ( with the latest macOS SDK <nl> + found on your system ) . <nl> + <nl> + In order to speedup compilation it is recommended to use ` make - j $ ( getconf <nl> + _NPROCESSORS_ONLN ) ` instead of ` make ` to compile on all available processor <nl> + cores . The build machine can also be configured to do this automatically by <nl> + adding ` export MAKEFLAGS = " - j ( getconf _NPROCESSORS_ONLN ) " ` to your shell config <nl> + ( e . g . ` ~ / . bashrc ` ) . <nl> + <nl> + # # # # 3 . 2 . a Compiling as 64 bit libraries ( recommended for most users ) <nl> + <nl> + cd $ HOME / Kodi <nl> + cd tools / depends <nl> + . / bootstrap <nl> + . / configure - - host = x86_64 - apple - darwin <nl> + make <nl> + <nl> + # # # # 3 . 2 . b Compiling as 32 bit libraries <nl> + <nl> + cd $ HOME / Kodi <nl> + cd tools / depends <nl> + . / bootstrap <nl> + . / configure - - host = i386 - apple - darwin <nl> + make <nl> + <nl> + # # # # 3 . 3 . c Advanced topics <nl> + <nl> + The dependencies are built into ` tools / depends ` and installed into <nl> + ` / Users / Shared / xbmc - depends ` . <nl> + <nl> + * * ADVANCED developers only * * : If you want to specify an macOS SDK version ( if <nl> + multiple versions are installed ) - then append it to the configure line <nl> + above . The example below would use the macOS SDK 10 . 9 : <nl> + <nl> + . / configure - - host = arm - apple - darwin - - with - sdk = 10 . 9 <nl> + <nl> + Ensure that you also adapt the Xcode project to use this SDK version . <nl> + <nl> + # # # 3 . 3 Compile Kodi binary addons <nl> + <nl> + Kodi maintains a set of binary addons ( PVR clients , Visualizations , Audio DSP <nl> + plugins and more ) . They can be built as shown below : <nl> + <nl> + cd $ HOME / Kodi <nl> + cd tools / depends <nl> + make - C target / binary - addons <nl> + <nl> + * * NOTE * * : If you only want to build specific addons you can specify like this : <nl> + <nl> + cd $ HOME / Kodi <nl> + cd tools / depends <nl> + make - C target / binary - addons ADDONS = " pvr . hts pvr . dvblink " <nl> + <nl> + # # 4 How to compile Kodi <nl> + <nl> + # # # 4 . 1 Using Xcode ( or xcodebuild ) <nl> + <nl> + # # # # 4 . 1 . 1 Generate CMake project files <nl> + <nl> + Before you can use Xode to build Kodi , the Xcode project has to be generated <nl> + with CMake . Note that CMake is compiled as parts of the depends doesn ' t have <nl> + to be installed separately . Also a Toolchain - file has been generated with is <nl> + used to configure CMake . <nl> + <nl> + mkdir $ HOME / Kodi / build <nl> + cd $ HOME / Kodi / build <nl> + / Users / Shared / xbmc - depends / buildtools - native / bin / cmake - G Xcode - DCMAKE_TOOLCHAIN_FILE = / Users / Shared / xbmc - depends / macosx10 . 12_x86_64 - target / share / Toolchain . cmake . . / project / cmake <nl> + <nl> + The toolchain file location differs depending on your macOS and SDK version and <nl> + you have to replace ` macosx10 . 12_x86_64 ` in the filename above with the correct <nl> + file on your system . Check the directory content to get the filename . <nl> + <nl> + # # # # 4 . 1 . 2 Compilation using Xcode <nl> + <nl> + Start Xcode and open the Kodi project ( kodi . xcodeproj ) located in <nl> + ` $ HOME / Kodi / build ` . <nl> + <nl> + If you have selected a specific OSX SDK Version in step 3 . 2 then you might need <nl> + to adapt the active target to use the same OSX SDK version . Else build will fail . <nl> + <nl> + The build process will take a long time when building the first time . <nl> + You can see the progress in " Build Results " . There are a large number of static <nl> + and dynamic libraries that will need to be built . Once these are built , <nl> + subsequent builds will be faster . <nl> + <nl> + After the build , you can ether run Kodi for Mac from Xcode or run it from <nl> + the command - line . If you run it from the command - line : <nl> + <nl> + $ HOME / Kodi / build / Debug / kodi . bin <nl> + <nl> + Or the release version : <nl> + <nl> + $ HOME / Kodi / build / Release / kodi . bin <nl> + <nl> + Alternatively , you can also build via Xcode from the command - line with <nl> + xcodebuild , triggered by CMake : <nl> + <nl> + cd $ HOME / Kodi / build <nl> + cmake - - build . - - config " Debug " - - - verbose - jobs $ ( getconf _NPROCESSORS_ONLN ) <nl> + <nl> + You can specify ` Release ` instead of ` Debug ` as a configuration . <nl> + <nl> + # # # 4 . 2 Compilation using command - line ( make ) <nl> + <nl> + CMake is also able to generate a Makefile based project that can be used to <nl> + compile with make : <nl> + <nl> + mkdir $ HOME / Kodi / build <nl> + cd $ HOME / Kodi / build <nl> + / Users / Shared / xbmc - depends / buildtools - native / bin / cmake - DCMAKE_TOOLCHAIN_FILE = / Users / Shared / xbmc - depends / macosx10 . 12_x86_64 - target / share / Toolchain . cmake . . / project / cmake <nl> + make <nl> + <nl> + The ` - G ` parameter defines the type of the generated project . Here it either <nl> + has to be omitted , or specifically set to ` - G " Unix Makefiles " ` . <nl> + <nl> + # # 5 Packaging <nl> + <nl> + CMake generate a target called ` dmg ` which will package Kodi . app for <nl> + distribution . <nl> + <nl> + After Kodi has been build , the target ca be triggered with by selecting it in <nl> + Xcode , or if using makefiles by issuing : <nl> + <nl> + make dmg <nl> + <nl> + # # 6 References <nl> + <nl> + - [ project / cmake / README . md ] ( https : / / github . com / xbmc / xbmc / tree / master / project / cmake / README . md ) <nl> + - [ tools / depends / README ] ( https : / / github . com / xbmc / xbmc / tree / master / tools / depends / README ) <nl> + - [ MAC OS X section in forum . kodi . tv ] ( http : / / forum . kodi . tv / forumdisplay . php ? fid = 56 ) <nl>
[ docs ] Update README . osx
xbmc/xbmc
418f31e424b9b4bfcfd205f3f789cb9ae1d42bb5
2016-12-22T18:09:36Z
mmm a / dlib / statistics / statistics . h <nl> ppp b / dlib / statistics / statistics . h <nl> namespace dlib <nl> < < " \ n \ tthis : " < < this <nl> ) ; <nl> <nl> - return covariance ( ) / std : : sqrt ( variance_x ( ) * variance_y ( ) ) ; <nl> + T temp = std : : sqrt ( variance_x ( ) * variance_y ( ) ) ; <nl> + if ( temp ! = 0 ) <nl> + return covariance ( ) / temp ; <nl> + else <nl> + return 0 ; / / just say it ' s zero if there isn ' t any variance in x or y . <nl> } <nl> <nl> T variance_x ( <nl>
Avoid division by zero in running_scalar_covariance_decayed .
davisking/dlib
539993f359e37f93a00c38ccceb6d6a60832f0b0
2016-11-14T20:47:04Z
mmm a / lib / Sema / CSSimplify . cpp <nl> ppp b / lib / Sema / CSSimplify . cpp <nl> ConstraintSystem : : simplifyKeyPathApplicationConstraint ( <nl> return SolutionKind : : Unsolved ; <nl> } ; <nl> <nl> - / / When locator points to a KeyPathDynamicMemberLookup , skip the <nl> + / / When locator points to a KeyPathDynamicMemberLookup , reject the <nl> / / key path application . <nl> - if ( locator . getBaseLocator ( ) - > isForKeyPathDynamicMemberLookup ( ) ) { <nl> + auto last = locator . last ( ) ; <nl> + if ( last & & last - > isKeyPathDynamicMember ( ) ) { <nl> return SolutionKind : : Error ; <nl> } <nl> <nl> mmm a / test / attr / attr_dynamic_member_lookup . swift <nl> ppp b / test / attr / attr_dynamic_member_lookup . swift <nl> func test_combination_of_keypath_and_string_lookups ( ) { <nl> _ = outer . hello . world / / Ok <nl> } <nl> } <nl> + <nl> + / / SR - 12626 <nl> + @ dynamicMemberLookup <nl> + struct SR12626 { <nl> + var i : Int <nl> + <nl> + subscript ( dynamicMember member : KeyPath < SR12626 , Int > ) - > Int { <nl> + get { self [ keyPath : member ] } <nl> + set { self [ keyPath : member ] = newValue } / / expected - error { { cannot assign through subscript : ' member ' is a read - only key path } } <nl> + } <nl> + } <nl> + <nl> + / / SR - 12245 <nl> + public struct SR12425_S { } <nl> + <nl> + @ dynamicMemberLookup <nl> + public struct SR12425_R { } <nl> + <nl> + internal var rightStructInstance : SR12425_R = SR12425_R ( ) <nl> + <nl> + public extension SR12425_R { <nl> + subscript < T > ( dynamicMember member : WritableKeyPath < SR12425_S , T > ) - > T { <nl> + / / TODO ( Diagnostics ) : bad diagnostic for member assign . <nl> + / / A better diagnostic would be : key path of type WritableKeyPath < SR12425_S , T > cannot be applied to a base of type SR12425_R <nl> + get { rightStructInstance [ keyPath : member ] } / / expected - error { { cannot convert return expression of type ' Any ? ' to return type ' T ' } } <nl> + set { rightStructInstance [ keyPath : member ] = newValue } / / expected - error { { type of expression is ambiguous without more context } } <nl> + } <nl> + } <nl>
[ tests ] Adding SR - 12626 and SR - 12425 test cases
apple/swift
ab81236c8cbc5da9b5200b50268fcde177630549
2020-04-20T16:46:38Z
mmm a / dbms / tests / queries / 0_stateless / 00224_distributed_aggregation_memory_efficient_and_overflows . reference <nl> ppp b / dbms / tests / queries / 0_stateless / 00224_distributed_aggregation_memory_efficient_and_overflows . reference <nl> <nl> - 0 2 <nl> - 1 2 <nl> - 2 2 <nl> - 3 2 <nl> - 4 2 <nl> - 5 2 <nl> - 6 2 <nl> - 7 2 <nl> - 8 2 <nl> - 9 2 <nl> + 0 <nl> + 0 <nl> + 0 <nl> + 0 <nl> + 0 <nl> + 0 <nl> + 0 <nl> + 0 <nl> + 0 <nl> + 0 <nl> <nl> - 0 200000 <nl> - 0 2 <nl> - 1 2 <nl> - 2 2 <nl> - 3 2 <nl> - 4 2 <nl> - 5 2 <nl> - 6 2 <nl> - 7 2 <nl> - 8 2 <nl> - 9 2 <nl> + 1 <nl> + 0 <nl> + 0 <nl> + 0 <nl> + 0 <nl> + 0 <nl> + 0 <nl> + 0 <nl> + 0 <nl> + 0 <nl> + 0 <nl> <nl> - 0 200000 <nl> + 1 <nl> mmm a / dbms / tests / queries / 0_stateless / 00224_distributed_aggregation_memory_efficient_and_overflows . sql <nl> ppp b / dbms / tests / queries / 0_stateless / 00224_distributed_aggregation_memory_efficient_and_overflows . sql <nl> <nl> DROP TABLE IF EXISTS test . numbers_100k_log ; <nl> CREATE TABLE test . numbers_100k_log ENGINE = Log AS SELECT * FROM system . numbers LIMIT 100000 ; <nl> <nl> - SELECT number , count ( ) FROM remote ( ' 127 . 0 . 0 . { 1 , 2 } ' , test . numbers_100k_log ) GROUP BY number WITH TOTALS ORDER BY number LIMIT 10 ; <nl> + SELECT count ( ) = 200000 FROM remote ( ' 127 . 0 . 0 . { 1 , 2 } ' , test . numbers_100k_log ) GROUP BY number WITH TOTALS ORDER BY number LIMIT 10 ; <nl> <nl> SET distributed_aggregation_memory_efficient = 1 , <nl> group_by_two_level_threshold = 1000 , <nl> SET distributed_aggregation_memory_efficient = 1 , <nl> max_rows_to_group_by = 1000 , <nl> totals_mode = ' after_having_auto ' ; <nl> <nl> - SELECT number , count ( ) FROM remote ( ' 127 . 0 . 0 . { 1 , 2 } ' , test . numbers_100k_log ) GROUP BY number WITH TOTALS ORDER BY number LIMIT 10 ; <nl> + SELECT count ( ) = 200000 FROM remote ( ' 127 . 0 . 0 . { 1 , 2 } ' , test . numbers_100k_log ) GROUP BY number WITH TOTALS ORDER BY number LIMIT 10 ; <nl> <nl> DROP TABLE test . numbers_100k_log ; <nl>
dbms : fixed test [ # METR - 17536 ] .
ClickHouse/ClickHouse
72a964d86558c1eb0718fa214fd0593d04c7d9cb
2015-09-19T09:44:02Z
mmm a / Source / CNTK / BrainScript / CNTKCoreLib / CNTK . core . bs <nl> ppp b / Source / CNTK / BrainScript / CNTKCoreLib / CNTK . core . bs <nl> EmbeddingLayer { outDim , # dimension of embeddi <nl> # in : [ ( shifting dims ) | ( reduction dim ) | | ( sample dims ) ] <nl> # kernel : [ ( filter dims ) | ( reduction dim ) | ( output dim ) | ] <nl> # out : [ ( shifting dims ) ] | | ( output dim ) | ( sample dims ) ] <nl> + # BUGBUG : filterShape should be first , so that numOutputChannels can default to 1 ( to denote a normal filter ) , and remaining parameters consistent with Times ( ) <nl> ConvolutionalLayer { numOutputChannels , # e . g . ( 1 ) or BS . Constants . None <nl> filterShape , # e . g . ( 3 : 3 ) <nl> bias = true , <nl> mmm a / bindings / python / cntk / blocks . py <nl> ppp b / bindings / python / cntk / blocks . py <nl> <nl> # blocks - - basic building blocks that are semantically not layers ( not used in a layered fashion ) <nl> # e . g . the LSTM <nl> <nl> - # TODO : clean up the dependencies <nl> + # TODO : further clean up the dependencies <nl> import numpy as np <nl> import sys <nl> import os <nl> import time <nl> - from cntk import DeviceDescriptor , Trainer , Axis , text_format_minibatch_source , StreamConfiguration , slice , sigmoid , tanh , past_value , future_value <nl> - from cntk . learner import sgd , fsadagrad , learning_rates_per_sample , momentums_per_sample <nl> - from cntk . ops import parameter , constant , input_variable , placeholder_variable , times , cross_entropy_with_softmax , combine , classification_error <nl> - import itertools <nl> + from cntk import parameter , constant , input_variable , placeholder_variable , combine <nl> + from cntk . ops import times , slice , sigmoid , tanh , past_value , future_value <nl> from cntk . utils . debughelpers import _name_node , _node_name , _node_description , _log_node <nl> from cntk . utils import Record , _as_tuple <nl> from cntk . initializer import glorot_uniform <nl> <nl> - abs_path = os . path . dirname ( os . path . abspath ( __file__ ) ) <nl> - sys . path . append ( os . path . join ( abs_path , " . . " , " . . " ) ) <nl> - <nl> # TODO : As you are on the level of cntk here , you could use relative imports : <nl> # from . ops . functions import Function <nl> # No - > SystemError : Parent module ' ' not loaded , cannot perform relative import <nl> mmm a / bindings / python / cntk / layers . py <nl> ppp b / bindings / python / cntk / layers . py <nl> <nl> # this is what we initialize weight matrices from by default <nl> from cntk . blocks import _default_initializer <nl> <nl> - # Linear - - create a fully - connected linear projection layer <nl> - # TODO : eliminate Linear ; only have Dense <nl> + # Dense - - create a fully - connected linear projection layer with optional non - linear activation <nl> # Note : shape may describe a tensor as well . <nl> # TODO : change to new random - init descriptor <nl> # inputRank given : number of zeroes to add to W ( mapRank must not be given ) <nl> # mapRank given : expand W to leave exactly mapRank axes ( inputRank must not be given ) <nl> # none given : expand W to all ( same as mapRank = 0 ) <nl> - def Dense ( shape , _inf , init = _default_initializer , activation = None , input_rank = None , map_rank = None , bias = True , init_bias = 0 ) : <nl> + def Dense ( shape , init = _default_initializer , activation = None , input_rank = None , map_rank = None , bias = True , init_bias = 0 ) : <nl> if activation is None : # TODO : change default to identity once we no longer need _inf <nl> - # activation = Identity ( _inf = shape ) <nl> activation = Identity ( ) <nl> <nl> out_shape = _as_tuple ( shape ) <nl> def Dense ( shape , _inf , init = _default_initializer , activation = None , input_rank = No <nl> # then Times ( W , x , outputRank = outputRank , inferInputRankToMap = inferInputRankToMap ) + b <nl> # else Times ( W , x , outputRank = outputRank , inferInputRankToMap = inferInputRankToMap ) <nl> <nl> - W = Parameter ( _inf . shape + out_shape , init = init , name = ' W ' ) <nl> - b = Parameter ( out_shape , init = init_bias , name = ' b ' ) if bias else None <nl> - x = Placeholder ( _inf = _inf , name = ' dense_arg ' ) <nl> + W = Parameter ( ( InferredDimension , ) + out_shape , init = init , name = ' W ' ) <nl> + b = Parameter ( out_shape , init = init_bias , name = ' b ' ) if bias else None <nl> + x = Placeholder ( name = ' dense_arg ' ) <nl> apply_x = Function . __matmul__ ( x , W ) + b if bias else \ <nl> Function . __matmul__ ( x , W ) <nl> _extend_Function ( apply_x ) # ( this gets us the > > operator - - TODO : remove once Function natively supports this ) <nl> def Dense ( shape , _inf , init = _default_initializer , activation = None , input_rank = No <nl> # TODO : Once _inf is gone , change interface to pass weights as a Constant , e . g . <nl> # Embedding ( shape , constant ( np . load ( ' PATH ' ) ) ) <nl> # Not nice since now we don ' t need the output shape either . Grmpf . <nl> - def Embedding ( shape , _inf , weights = None , init = _default_initializer , transpose = False ) : <nl> + def Embedding ( shape , weights = None , init = _default_initializer , transpose = False ) : <nl> shape = _as_tuple ( shape ) <nl> if weights is None : # no weights given : learn the embedding <nl> full_shape = ( InferredDimension , ) + shape <nl> def Embedding ( shape , _inf , weights = None , init = _default_initializer , transpose = Fa <nl> # TODO : infer full_shape from weights ? Which in turn should be a constant . . . lots of TODO here <nl> full_shape = ( shape + ( InferredDimension , ) ) if transpose else ( ( InferredDimension , ) + shape ) <nl> E = Constant ( full_shape , init = weights , name = ' E ' ) # TODO : can ' weights ' be a CNTK object already ? Then how to do this ? <nl> - # x = Placeholder ( _inf = _inf , name = ' embedding_arg ' ) <nl> x = Placeholder ( name = ' embedding_arg ' ) <nl> apply_x = Function . __matmul__ ( E , x ) if transpose else \ <nl> Function . __matmul__ ( x , E ) # x is expected to be sparse one - hot <nl> mmm a / bindings / python / cntk / models . py <nl> ppp b / bindings / python / cntk / models . py <nl> <nl> import sys <nl> import os <nl> import time <nl> - # from cntk import DeviceDescriptor , Trainer , Axis , text_format_minibatch_source , StreamConfiguration <nl> - # from cntk . learner import sgd , fsadagrad , learning_rates_per_sample , momentums_per_sample <nl> - # from cntk . ops import parameter , input_variable , placeholder_variable , times , cross_entropy_with_softmax , combine , classification_error <nl> from cntk . utils . debughelpers import _name_node , _node_name , _node_description , _log_node <nl> - from cntk . layers import * <nl> - from cntk . blocks import * <nl> - from cntk . blocks import _name_and_extend_Function , _wrap_rename_Function # ( debugging ) <nl> + # from cntk . layers import * <nl> + from cntk . blocks import Identity <nl> + from cntk . blocks import _wrap_rename_Function # ( debugging ) <nl> <nl> # Sequential - - composite that applies a sequence of functions onto an input <nl> # Sequential ( [ F , G , H ] ) = = = F > > G > > H <nl> # TODO : address this feedback : " I find this arbitrary . You can have Sequential as part of a bigger layer . Or you can view a linear layer already as a model ( which is part of the bigger model ) . " <nl> - def Sequential ( arrayOfFunctions , _inf ) : <nl> + def Sequential ( arrayOfFunctions ) : <nl> import functools # reduce ( ) <nl> - apply_x = functools . reduce ( lambda f , g : f > > g , arrayOfFunctions , Identity ( _inf = _inf ) ) <nl> + apply_x = functools . reduce ( lambda f , g : f > > g , arrayOfFunctions , Identity ( ) ) <nl> apply_x = _wrap_rename_Function ( apply_x , ' Sequential ' ) <nl> return apply_x ; <nl> mmm a / bindings / python / examples / SLUHandsOn / SLUHandsOn . py <nl> ppp b / bindings / python / examples / SLUHandsOn / SLUHandsOn . py <nl> def create_reader ( path ) : <nl> <nl> def create_model ( _inf ) : # TODO : all the _inf stuff will go away once dimension inference works . Should this be a function then ? <nl> return Sequential ( [ <nl> - Embedding ( emb_dim , _inf = _inf ) , <nl> + Embedding ( emb_dim ) , <nl> Recurrence ( LSTM ( shape = hidden_dim , _inf = _inf . with_shape ( emb_dim ) ) , _inf = _inf . with_shape ( emb_dim ) , go_backwards = False , <nl> # ) , <nl> initial_state = Constant ( 0 . 1 , shape = ( 1 ) ) ) , # ( this last option mimics a default in BS to recreate identical results ) <nl> - Dense ( label_dim , _inf = _inf . with_shape ( hidden_dim ) ) <nl> - ] , _inf = _inf ) <nl> + Dense ( label_dim ) <nl> + ] ) <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # <nl> # train action # <nl>
removed _inf from Dense ( ) and Sequential ( ) ;
microsoft/CNTK
6c004f2a03ca0e93ab3b3e607d2bcbfe39b03ae6
2016-10-18T03:36:45Z
mmm a / tensorflow / python / ops / control_flow_ops . py <nl> ppp b / tensorflow / python / ops / control_flow_ops . py <nl> def _RemoveExternalControlEdges ( self , op ) : <nl> <nl> def AddInnerOp ( self , op ) : <nl> " " " Notifies a scope about an operator added to an inner scope . " " " <nl> - pass <nl> + if self . _outer_context : <nl> + self . _outer_context . AddInnerOp ( op ) <nl> <nl> def GetControlPivot ( self ) : <nl> " " " Returns the pivot node for this context , or None . " " " <nl>
Make ControlFlowContext . AddInnerOp recursively propagate the inner op to the enclosing context by default .
tensorflow/tensorflow
9b6b179fe33a0daab4c6b4c7314f77e49825f999
2017-09-26T20:36:40Z
new file mode 100644 <nl> index 000000000000 . . fb6c95f55183 <nl> Binary files / dev / null and b / addons / skin . estouchy / media / flagging / video / av1 . png differ <nl> new file mode 100644 <nl> index 000000000000 . . fb6c95f55183 <nl> Binary files / dev / null and b / addons / skin . estuary / media / flags / videocodec / av1 . png differ <nl> mmm a / xbmc / GUIInfoManager . cpp <nl> ppp b / xbmc / GUIInfoManager . cpp <nl> const infomap container_str [ ] = { { " property " , CONTAINER_PROPERTY } , <nl> / / / _string_ , <nl> / / / @ return The video codec of the currently selected video . Common values : <nl> / / / - < b > 3iv2 < / b > <nl> + / / / - < b > av1 < / b > <nl> / / / - < b > avc1 < / b > <nl> / / / - < b > div2 < / b > <nl> / / / - < b > div3 < / b > <nl> mmm a / xbmc / cores / VideoPlayer / DVDDemuxers / DVDDemuxFFmpeg . cpp <nl> ppp b / xbmc / cores / VideoPlayer / DVDDemuxers / DVDDemuxFFmpeg . cpp <nl> std : : string CDVDDemuxFFmpeg : : GetStreamCodecName ( int iStreamId ) <nl> <nl> AVCodec * codec = avcodec_find_decoder ( stream - > codec ) ; <nl> if ( codec ) <nl> - strName = codec - > name ; <nl> + strName = avcodec_get_name ( codec - > id ) ; <nl> } <nl> return strName ; <nl> } <nl>
Merge pull request from jaylinski / flag / av1
xbmc/xbmc
d40a6cf390797ffb251bdc4060e0c71d96b7ed64
2020-05-24T15:52:10Z
mmm a / tensorflow / docs_src / get_started / premade_estimators . md <nl> ppp b / tensorflow / docs_src / get_started / premade_estimators . md <nl> other features so you can concentrate on your model . For more details see <nl> <nl> An Estimator is any class derived from @ { tf . estimator . Estimator } . TensorFlow <nl> provides a collection of <nl> - [ pre - made Estimators ] ( https : / / developers . google . com / machine - learning / glossary / # pre - made_Estimator ) <nl> + [ pre - made Estimators ] ( https : / / developers . google . com / machine - learning / glossary / # premade_Estimator ) <nl> ( for example , ` LinearRegressor ` ) to implement common ML algorithms . Beyond <nl> those , you may write your own <nl> [ custom Estimators ] ( https : / / developers . google . com / machine - learning / glossary / # custom_Estimator ) . <nl>
Fix broken link
tensorflow/tensorflow
643c14a80f961778e5a535f424aa9355ae26cdeb
2018-05-25T07:27:46Z
deleted file mode 100644 <nl> index 15e87443b5 . . 0000000000 <nl> mmm a / change / react - native - windows - 2020 - 10 - 23 - 17 - 54 - 31 - rnwDepsCheckDontElevate . json <nl> ppp / dev / null <nl> <nl> - { <nl> - " type " : " prerelease " , <nl> - " comment " : " Don ' t require elevation just to check dev dependencies " , <nl> - " packageName " : " react - native - windows " , <nl> - " email " : " asklar @ microsoft . com " , <nl> - " dependentChangeType " : " patch " , <nl> - " date " : " 2020 - 10 - 24T00 : 54 : 30 . 899Z " <nl> - } <nl> mmm a / packages / @ react - native - windows / tester / package . json <nl> ppp b / packages / @ react - native - windows / tester / package . json <nl> <nl> } , <nl> " peerDependencies " : { <nl> " react - native " : " 0 . 0 . 0 - 5bc67b658 " , <nl> - " react - native - windows " : " ^ 0 . 0 . 0 - canary . 188 " <nl> + " react - native - windows " : " ^ 0 . 0 . 0 - canary . 189 " <nl> } , <nl> " devDependencies " : { <nl> " @ rnw - scripts / eslint - config " : " 0 . 1 . 4 " , <nl> <nl> " just - scripts " : " ^ 0 . 44 . 7 " , <nl> " react - native " : " 0 . 0 . 0 - 5bc67b658 " , <nl> " react - native - platform - override " : " ^ 0 . 4 . 0 " , <nl> - " react - native - windows " : " ^ 0 . 0 . 0 - canary . 188 " , <nl> + " react - native - windows " : " ^ 0 . 0 . 0 - canary . 189 " , <nl> " typescript " : " ^ 3 . 8 . 3 " <nl> } <nl> } <nl> mmm a / packages / E2ETest / package . json <nl> ppp b / packages / E2ETest / package . json <nl> <nl> " prompt - sync " : " ^ 4 . 2 . 0 " , <nl> " react " : " 16 . 13 . 1 " , <nl> " react - native " : " 0 . 0 . 0 - 5bc67b658 " , <nl> - " react - native - windows " : " 0 . 0 . 0 - canary . 188 " <nl> + " react - native - windows " : " 0 . 0 . 0 - canary . 189 " <nl> } , <nl> " devDependencies " : { <nl> " @ babel / core " : " ^ 7 . 8 . 4 " , <nl> mmm a / packages / IntegrationTest / package . json <nl> ppp b / packages / IntegrationTest / package . json <nl> <nl> " chai " : " ^ 4 . 2 . 0 " , <nl> " react " : " 16 . 13 . 1 " , <nl> " react - native " : " 0 . 0 . 0 - 5bc67b658 " , <nl> - " react - native - windows " : " ^ 0 . 0 . 0 - canary . 188 " <nl> + " react - native - windows " : " ^ 0 . 0 . 0 - canary . 189 " <nl> } , <nl> " devDependencies " : { <nl> " @ babel / core " : " ^ 7 . 8 . 4 " , <nl> mmm a / packages / microsoft - reactnative - sampleapps / package . json <nl> ppp b / packages / microsoft - reactnative - sampleapps / package . json <nl> <nl> " dependencies " : { <nl> " react " : " 16 . 13 . 1 " , <nl> " react - native " : " 0 . 0 . 0 - 5bc67b658 " , <nl> - " react - native - windows " : " 0 . 0 . 0 - canary . 188 " <nl> + " react - native - windows " : " 0 . 0 . 0 - canary . 189 " <nl> } , <nl> " devDependencies " : { <nl> " @ babel / core " : " ^ 7 . 8 . 4 " , <nl> mmm a / packages / playground / package . json <nl> ppp b / packages / playground / package . json <nl> <nl> " @ react - native - windows / tester " : " 0 . 0 . 1 " , <nl> " react " : " 16 . 13 . 1 " , <nl> " react - native " : " 0 . 0 . 0 - 5bc67b658 " , <nl> - " react - native - windows " : " 0 . 0 . 0 - canary . 188 " <nl> + " react - native - windows " : " 0 . 0 . 0 - canary . 189 " <nl> } , <nl> " devDependencies " : { <nl> " @ babel / core " : " ^ 7 . 8 . 4 " , <nl> mmm a / vnext / CHANGELOG . json <nl> ppp b / vnext / CHANGELOG . json <nl> <nl> { <nl> " name " : " react - native - windows " , <nl> " entries " : [ <nl> + { <nl> + " date " : " Tue , 27 Oct 2020 05 : 04 : 48 GMT " , <nl> + " tag " : " react - native - windows_v0 . 0 . 0 - canary . 189 " , <nl> + " version " : " 0 . 0 . 0 - canary . 189 " , <nl> + " comments " : { <nl> + " prerelease " : [ <nl> + { <nl> + " comment " : " Don ' t require elevation just to check dev dependencies " , <nl> + " author " : " asklar @ microsoft . com " , <nl> + " commit " : " a1d37568673072f9e7fbd7f44a52857c2dd6035b " , <nl> + " package " : " react - native - windows " <nl> + } <nl> + ] <nl> + } <nl> + } , <nl> { <nl> " date " : " Sat , 24 Oct 2020 05 : 07 : 36 GMT " , <nl> " tag " : " react - native - windows_v0 . 0 . 0 - canary . 188 " , <nl> mmm a / vnext / CHANGELOG . md <nl> ppp b / vnext / CHANGELOG . md <nl> <nl> # Change Log - react - native - windows <nl> <nl> - This log was last generated on Sat , 24 Oct 2020 05 : 07 : 36 GMT and should not be manually modified . <nl> + This log was last generated on Tue , 27 Oct 2020 05 : 04 : 48 GMT and should not be manually modified . <nl> <nl> < ! - - Start content - - > <nl> <nl> + # # 0 . 0 . 0 - canary . 189 <nl> + <nl> + Tue , 27 Oct 2020 05 : 04 : 48 GMT <nl> + <nl> + # # # Changes <nl> + <nl> + - Don ' t require elevation just to check dev dependencies ( asklar @ microsoft . com ) <nl> + <nl> # # 0 . 0 . 0 - canary . 188 <nl> <nl> Sat , 24 Oct 2020 05 : 07 : 36 GMT <nl> mmm a / vnext / package . json <nl> ppp b / vnext / package . json <nl> <nl> { <nl> " name " : " react - native - windows " , <nl> - " version " : " 0 . 0 . 0 - canary . 188 " , <nl> + " version " : " 0 . 0 . 0 - canary . 189 " , <nl> " license " : " MIT " , <nl> " repository " : { <nl> " type " : " git " , <nl>
applying package updates * * * NO_CI * * *
microsoft/react-native-windows
19854ad827679cf771089ce90b008a47c4435a4d
2020-10-27T05:04:48Z
mmm a / src / heap / mark - compact . cc <nl> ppp b / src / heap / mark - compact . cc <nl> void MarkCompactCollector : : ClearMarkbits ( ) { <nl> <nl> class MarkCompactCollector : : SweeperTask : public v8 : : Task { <nl> public : <nl> - SweeperTask ( Heap * heap , PagedSpace * space ) : heap_ ( heap ) , space_ ( space ) { } <nl> + SweeperTask ( Heap * heap , AllocationSpace space_to_start ) <nl> + : heap_ ( heap ) , space_to_start_ ( space_to_start ) { } <nl> <nl> virtual ~ SweeperTask ( ) { } <nl> <nl> private : <nl> / / v8 : : Task overrides . <nl> void Run ( ) override { <nl> - heap_ - > mark_compact_collector ( ) - > SweepInParallel ( space_ , 0 ) ; <nl> + DCHECK_GE ( space_to_start_ , FIRST_PAGED_SPACE ) ; <nl> + DCHECK_LE ( space_to_start_ , LAST_PAGED_SPACE ) ; <nl> + const int offset = space_to_start_ - FIRST_PAGED_SPACE ; <nl> + const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1 ; <nl> + for ( int i = 0 ; i < num_spaces ; i + + ) { <nl> + const int space_id = FIRST_PAGED_SPACE + ( ( i + offset ) % num_spaces ) ; <nl> + DCHECK_GE ( space_id , FIRST_PAGED_SPACE ) ; <nl> + DCHECK_LE ( space_id , LAST_PAGED_SPACE ) ; <nl> + heap_ - > mark_compact_collector ( ) - > SweepInParallel ( <nl> + heap_ - > paged_space ( space_id ) , 0 ) ; <nl> + } <nl> heap_ - > mark_compact_collector ( ) - > pending_sweeper_tasks_semaphore_ . Signal ( ) ; <nl> } <nl> <nl> Heap * heap_ ; <nl> - PagedSpace * space_ ; <nl> + AllocationSpace space_to_start_ ; <nl> <nl> DISALLOW_COPY_AND_ASSIGN ( SweeperTask ) ; <nl> } ; <nl> void MarkCompactCollector : : StartSweeperThreads ( ) { <nl> DCHECK ( free_list_code_space_ . get ( ) - > IsEmpty ( ) ) ; <nl> DCHECK ( free_list_map_space_ . get ( ) - > IsEmpty ( ) ) ; <nl> V8 : : GetCurrentPlatform ( ) - > CallOnBackgroundThread ( <nl> - new SweeperTask ( heap ( ) , heap ( ) - > old_space ( ) ) , <nl> - v8 : : Platform : : kShortRunningTask ) ; <nl> + new SweeperTask ( heap ( ) , OLD_SPACE ) , v8 : : Platform : : kShortRunningTask ) ; <nl> V8 : : GetCurrentPlatform ( ) - > CallOnBackgroundThread ( <nl> - new SweeperTask ( heap ( ) , heap ( ) - > code_space ( ) ) , <nl> - v8 : : Platform : : kShortRunningTask ) ; <nl> + new SweeperTask ( heap ( ) , CODE_SPACE ) , v8 : : Platform : : kShortRunningTask ) ; <nl> V8 : : GetCurrentPlatform ( ) - > CallOnBackgroundThread ( <nl> - new SweeperTask ( heap ( ) , heap ( ) - > map_space ( ) ) , <nl> - v8 : : Platform : : kShortRunningTask ) ; <nl> + new SweeperTask ( heap ( ) , MAP_SPACE ) , v8 : : Platform : : kShortRunningTask ) ; <nl> } <nl> <nl> <nl>
[ heap ] Collaborating sweeper tasks
v8/v8
8902513e4d324348758b9ee794d8e9b026a9e7af
2016-02-09T15:09:18Z
deleted file mode 100644 <nl> index c5741415d81 . . 00000000000 <nl> mmm a / admin / static / less / bootstrap / README . md <nl> ppp / dev / null <nl> <nl> - # Bootstrap ' s less files # <nl> - <nl> - # # Note # # <nl> - This is a fork of Bootstrap . <nl> - <nl> - The reason why we did a fork is because this version of Bootstrap does not compile with Less 1 . 4 <nl> - <nl> - From Less 1 . 4 changelog : <nl> - ( ~ " . myclass_ @ { index } " ) { . . . selector interpolation is deprecated , do this instead . myclass_ @ { index } { . . . . This works in 1 . 3 . 1 onwards . <nl> - <nl> - <nl> - This change has been done at the following lines : <nl> - Line 529 <nl> - ` ` ` <nl> - - ( ~ " . span @ { index } " ) { . span ( @ index ) ; } <nl> - + . span @ { index } { . span ( @ index ) ; } <nl> - ` ` ` <nl> - <nl> - Line 535 <nl> - ` ` ` <nl> - - ( ~ " . offset @ { index } " ) { . offset ( @ index ) ; } <nl> - + . offset @ { index } { . offset ( @ index ) ; } <nl> - ` ` ` <nl> - <nl> - Line 572 <nl> - ` ` ` <nl> - - ( ~ " > . span @ { index } " ) { . span ( @ index ) ; } <nl> - + > . span @ { index } { . span ( @ index ) ; } <nl> - ` ` ` <nl> - <nl> - Line 601 <nl> - ` ` ` <nl> - - ( ~ " input . span @ { index } , textarea . span @ { index } , . uneditable - input . span @ { index } " ) { . span ( @ index ) ; } <nl> - + input . span @ { index } , textarea . span @ { index } , . uneditable - input . span @ { index } { . span ( @ index ) ; } <nl> - ` ` ` <nl>
Rename the README file
rethinkdb/rethinkdb
9442a8ab7f83df815a1851eac61456bc8f2061d9
2013-06-27T01:43:53Z
mmm a / servers / visual / visual_server_raster . cpp <nl> ppp b / servers / visual / visual_server_raster . cpp <nl> void VisualServerRaster : : set_default_clear_color ( const Color & p_color ) { <nl> clear_color = p_color ; <nl> } <nl> <nl> - Color VisualServerRaster : : get_default_clear_color ( ) const { <nl> - <nl> - return clear_color ; <nl> - } <nl> - <nl> void VisualServerRaster : : set_boot_image ( const Image & p_image , const Color & p_color , bool p_scale ) { <nl> <nl> if ( p_image . empty ( ) ) <nl> mmm a / servers / visual / visual_server_raster . h <nl> ppp b / servers / visual / visual_server_raster . h <nl> class VisualServerRaster : public VisualServer { <nl> <nl> virtual void set_boot_image ( const Image & p_image , const Color & p_color , bool p_scale ) ; <nl> virtual void set_default_clear_color ( const Color & p_color ) ; <nl> - virtual Color get_default_clear_color ( ) const ; <nl> <nl> VisualServerRaster ( Rasterizer * p_rasterizer ) ; <nl> ~ VisualServerRaster ( ) ; <nl> mmm a / servers / visual / visual_server_wrap_mt . h <nl> ppp b / servers / visual / visual_server_wrap_mt . h <nl> class VisualServerWrapMT : public VisualServer { <nl> <nl> FUNC3 ( set_boot_image , const Image & , const Color & , bool ) ; <nl> FUNC1 ( set_default_clear_color , const Color & ) ; <nl> - FUNC0RC ( Color , get_default_clear_color ) ; <nl> <nl> FUNC0R ( RID , get_test_cube ) ; <nl> <nl> mmm a / servers / visual_server . cpp <nl> ppp b / servers / visual_server . cpp <nl> void VisualServer : : _bind_methods ( ) { <nl> ObjectTypeDB : : bind_method ( _MD ( " free_rid " ) , & VisualServer : : free ) ; <nl> <nl> ObjectTypeDB : : bind_method ( _MD ( " set_default_clear_color " ) , & VisualServer : : set_default_clear_color ) ; <nl> - ObjectTypeDB : : bind_method ( _MD ( " get_default_clear_color " ) , & VisualServer : : get_default_clear_color ) ; <nl> <nl> ObjectTypeDB : : bind_method ( _MD ( " get_render_info " ) , & VisualServer : : get_render_info ) ; <nl> <nl> mmm a / servers / visual_server . h <nl> ppp b / servers / visual_server . h <nl> class VisualServer : public Object { <nl> <nl> virtual void set_boot_image ( const Image & p_image , const Color & p_color , bool p_scale ) = 0 ; <nl> virtual void set_default_clear_color ( const Color & p_color ) = 0 ; <nl> - virtual Color get_default_clear_color ( ) const = 0 ; <nl> <nl> enum Features { <nl> FEATURE_SHADERS , <nl>
Revert " Add / expose VisualServer : : get_default_clear_color ( ) "
godotengine/godot
86b0669f4c1d10ddf393b3f627c1da7078fb4863
2017-01-02T19:32:52Z
mmm a / tests / cpp - tests / Classes / ExtensionsTest / CocoStudioActionTimelineTest / ActionTimelineTestScene . cpp <nl> ppp b / tests / cpp - tests / Classes / ExtensionsTest / CocoStudioActionTimelineTest / ActionTimelineTestScene . cpp <nl> void TestActionTimelineSkeleton : : onEnter ( ) <nl> { <nl> nestSkeleton - > removeFromParentAndCleanup ( false ) ; <nl> } <nl> - / / bug fixed while leftleg ' s child hide with leftleg ' s visible <nl> } ) ; <nl> } <nl> <nl>
remove comment in test case
cocos2d/cocos2d-x
d00e54ad8ad3b752bd05edc03c4242c3ec17d3dc
2015-08-03T08:55:55Z
mmm a / src / main . cpp <nl> ppp b / src / main . cpp <nl> bool CTransaction : : RemoveFromMemoryPool ( ) <nl> / / Remove transaction from memory pool <nl> CRITICAL_BLOCK ( cs_mapTransactions ) <nl> { <nl> - BOOST_FOREACH ( const CTxIn & txin , vin ) <nl> - mapNextTx . erase ( txin . prevout ) ; <nl> - mapTransactions . erase ( GetHash ( ) ) ; <nl> - nTransactionsUpdated + + ; <nl> - - - nPooledTx ; <nl> + uint256 hash = GetHash ( ) ; <nl> + if ( mapTransactions . count ( hash ) ) <nl> + { <nl> + BOOST_FOREACH ( const CTxIn & txin , vin ) <nl> + mapNextTx . erase ( txin . prevout ) ; <nl> + mapTransactions . erase ( hash ) ; <nl> + nTransactionsUpdated + + ; <nl> + - - nPooledTx ; <nl> + } <nl> } <nl> return true ; <nl> } <nl>
Merge pull request from dooglus / master
bitcoin/bitcoin
b0cfef3214036bce18daf746b8ff228111e1fdfc
2012-02-10T17:04:19Z
mmm a / contracts / eosiolib / datastream . hpp <nl> ppp b / contracts / eosiolib / datastream . hpp <nl> namespace eosio { <nl> * / <nl> <nl> / * * <nl> - * % A data stream for reading and writing data in the form of bytes <nl> - * <nl> + * % A data stream for reading and writing data in the form of bytes <nl> + * <nl> * @ brief % A data stream for reading and writing data in the form of bytes . <nl> - * @ tparam T - Type of the datastream buffer <nl> + * @ tparam T - Type of the datastream buffer <nl> * / <nl> template < typename T > <nl> class datastream { <nl> public : <nl> / * * <nl> * Construct a new datastream object given the size of the buffer and start position of the buffer <nl> - * <nl> + * <nl> * @ brief Construct a new datastream object <nl> * @ param start - The start position of the buffer <nl> * @ param s - The size of the buffer <nl> class datastream { <nl> <nl> / * * <nl> * Skips a specified number of bytes from this stream <nl> - * <nl> + * <nl> * @ brief Skips a specific number of bytes from this stream <nl> * @ param s - The number of bytes to skip <nl> * / <nl> class datastream { <nl> <nl> / * * <nl> * Reads a specified number of bytes from the stream into a buffer <nl> - * <nl> + * <nl> * @ brief Reads a specified number of bytes from this stream into a buffer <nl> * @ param d - The pointer to the destination buffer <nl> * @ param s - the number of bytes to read <nl> class datastream { <nl> <nl> / * * <nl> * Writes a specified number of bytes into the stream from a buffer <nl> - * <nl> + * <nl> * @ brief Writes a specified number of bytes into the stream from a buffer <nl> * @ param d - The pointer to the source buffer <nl> * @ param s - The number of bytes to write <nl> class datastream { <nl> <nl> / * * <nl> * Writes a byte into the stream <nl> - * <nl> + * <nl> * @ brief Writes a byte into the stream <nl> * @ param c byte to write <nl> * @ return true <nl> class datastream { <nl> <nl> / * * <nl> * Reads a byte from the stream <nl> - * <nl> + * <nl> * @ brief Reads a byte from the stream <nl> * @ param c - The reference to destination byte <nl> * @ return true <nl> class datastream { <nl> <nl> / * * <nl> * Reads a byte from the stream <nl> - * <nl> + * <nl> * @ brief Reads a byte from the stream <nl> * @ param c - The reference to destination byte <nl> * @ return true <nl> class datastream { <nl> <nl> / * * <nl> * Retrieves the current position of the stream <nl> - * <nl> + * <nl> * @ brief Retrieves the current position of the stream <nl> * @ return T - The current position of the stream <nl> * / <nl> class datastream { <nl> <nl> / * * <nl> * Sets the position within the current stream <nl> - * <nl> + * <nl> * @ brief Sets the position within the current stream <nl> * @ param p - The offset relative to the origin <nl> * @ return true if p is within the range <nl> class datastream { <nl> <nl> / * * <nl> * Gets the position within the current stream <nl> - * <nl> + * <nl> * @ brief Gets the position within the current stream <nl> * @ return p - The position within the current stream <nl> * / <nl> class datastream { <nl> <nl> / * * <nl> * Returns the number of remaining bytes that can be read / skipped <nl> - * <nl> + * <nl> * @ brief Returns the number of remaining bytes that can be read / skipped <nl> * @ return size_t - The number of remaining bytes <nl> * / <nl> class datastream { <nl> private : <nl> / * * <nl> * The start position of the buffer <nl> - * <nl> + * <nl> * @ brief The start position of the buffer <nl> * / <nl> T _start ; <nl> / * * <nl> * The current position of the buffer <nl> - * <nl> + * <nl> * @ brief The current position of the buffer <nl> * / <nl> T _pos ; <nl> / * * <nl> * The end position of the buffer <nl> - * <nl> + * <nl> * @ brief The end position of the buffer <nl> * / <nl> T _end ; <nl> class datastream < size_t > { <nl> public : <nl> / * * <nl> * Construct a new specialized datastream object given the initial size <nl> - * <nl> + * <nl> * @ brief Construct a new specialized datastream object <nl> - * @ param init_size - The initial size <nl> + * @ param init_size - The initial size <nl> * / <nl> datastream ( size_t init_size = 0 ) : _size ( init_size ) { } <nl> <nl> / * * <nl> * Increment the size by s . This behaves the same as write ( const char * , size_t s ) . <nl> - * <nl> + * <nl> * @ brief Increase the size by s <nl> * @ param s - The amount of size to increase <nl> * @ return true <nl> class datastream < size_t > { <nl> inline bool skip ( size_t s ) { _size + = s ; return true ; } <nl> <nl> / * * <nl> - * Increment the size by s . This behaves the same as skip ( size_t s ) <nl> - * <nl> + * Increment the size by s . This behaves the same as skip ( size_t s ) <nl> + * <nl> * @ brief Increase the size by s <nl> * @ param s - The amount of size to increase <nl> * @ return true <nl> class datastream < size_t > { <nl> <nl> / * * <nl> * Increment the size by one <nl> - * <nl> + * <nl> * @ brief Increase the size by one <nl> * @ return true <nl> * / <nl> class datastream < size_t > { <nl> <nl> / * * <nl> * Check validity . It ' s always valid <nl> - * <nl> + * <nl> * @ brief Check validity <nl> * @ return true <nl> * / <nl> class datastream < size_t > { <nl> <nl> / * * <nl> * Set new size <nl> - * <nl> + * <nl> * @ brief Set new size <nl> * @ param p - The new size <nl> - * @ return true <nl> + * @ return true <nl> * / <nl> inline bool seekp ( size_t p ) { _size = p ; return true ; } <nl> <nl> / * * <nl> * Get the size <nl> - * <nl> + * <nl> * @ brief Get the size <nl> - * @ return size_t - The size <nl> + * @ return size_t - The size <nl> * / <nl> inline size_t tellp ( ) const { return _size ; } <nl> <nl> / * * <nl> * Always returns 0 <nl> - * <nl> + * <nl> * @ brief Always returns 0 <nl> * @ return size_t - 0 <nl> * / <nl> class datastream < size_t > { <nl> private : <nl> / * * <nl> * The size used to determine the final size of a serialized value . <nl> - * <nl> + * <nl> * @ brief The size used to determine the final size of a serialized value . <nl> * / <nl> size_t _size ; <nl> class datastream < size_t > { <nl> <nl> / * * <nl> * Serialize a public_key into a stream <nl> - * <nl> + * <nl> * @ brief Serialize a public_key <nl> * @ param ds - The stream to write <nl> * @ param pubkey - The value to serialize <nl> inline datastream < Stream > & operator < < ( datastream < Stream > & ds , const public_key p <nl> <nl> / * * <nl> * Deserialize a public_key from a stream <nl> - * <nl> + * <nl> * @ brief Deserialize a public_key <nl> * @ param ds - The stream to read <nl> * @ param pubkey - The destination for deserialized value <nl> inline datastream < Stream > & operator > > ( datastream < Stream > & ds , public_key & pubkey <nl> <nl> / * * <nl> * Serialize a key256 into a stream <nl> - * <nl> + * <nl> * @ brief Serialize a key256 <nl> * @ param ds - The stream to write <nl> * @ param d - The value to serialize <nl> inline datastream < Stream > & operator < < ( datastream < Stream > & ds , const key256 & d ) { <nl> <nl> / * * <nl> * Deserialize a key256 from a stream <nl> - * <nl> + * <nl> * @ brief Deserialize a key256 <nl> * @ param ds - The stream to read <nl> * @ param d - The destination for deserialized value <nl> inline datastream < Stream > & operator > > ( datastream < Stream > & ds , key256 & d ) { <nl> <nl> / * * <nl> * Serialize a bool into a stream <nl> - * <nl> + * <nl> * @ brief Serialize a bool into a stream <nl> * @ param ds - The stream to read <nl> * @ param d - The value to serialize <nl> inline datastream < Stream > & operator < < ( datastream < Stream > & ds , const bool & d ) { <nl> <nl> / * * <nl> * Deserialize a bool from a stream <nl> - * <nl> + * <nl> * @ brief Deserialize a bool <nl> * @ param ds - The stream to read <nl> * @ param d - The destination for deserialized value <nl> inline datastream < Stream > & operator > > ( datastream < Stream > & ds , bool & d ) { <nl> <nl> / * * <nl> * Serialize a checksum256 into a stream <nl> - * <nl> + * <nl> * @ brief Serialize a checksum256 <nl> * @ param ds - The stream to write <nl> * @ param d - The value to serialize <nl> inline datastream < Stream > & operator < < ( datastream < Stream > & ds , const checksum256 & <nl> <nl> / * * <nl> * Deserialize a checksum256 from a stream <nl> - * <nl> + * <nl> * @ brief Deserialize a checksum256 <nl> * @ param ds - The stream to read <nl> * @ param d - The destination for deserialized value <nl> inline datastream < Stream > & operator > > ( datastream < Stream > & ds , checksum256 & d ) { <nl> <nl> / * * <nl> * Serialize a string into a stream <nl> - * <nl> + * <nl> * @ brief Serialize a string <nl> * @ param ds - The stream to write <nl> * @ param v - The value to serialize <nl> - * @ tparam DataStream - Type of datastream <nl> + * @ tparam DataStream - Type of datastream <nl> * @ return DataStream & - Reference to the datastream <nl> * / <nl> template < typename DataStream > <nl> DataStream & operator < < ( DataStream & ds , const std : : string & v ) { <nl> <nl> / * * <nl> * Deserialize a string from a stream <nl> - * <nl> + * <nl> * @ brief Deserialize a string <nl> * @ param ds - The stream to read <nl> * @ param v - The destination for deserialized value <nl> - * @ tparam DataStream - Type of datastream <nl> + * @ tparam DataStream - Type of datastream <nl> * @ return DataStream & - Reference to the datastream <nl> * / <nl> template < typename DataStream > <nl> DataStream & operator > > ( DataStream & ds , std : : string & v ) { <nl> <nl> / * * <nl> * Serialize a fixed size array into a stream <nl> - * <nl> + * <nl> * @ brief Serialize a fixed size array <nl> * @ param ds - The stream to write <nl> * @ param v - The value to serialize <nl> - * @ tparam DataStream - Type of datastream <nl> + * @ tparam DataStream - Type of datastream <nl> * @ tparam T - Type of the object contained in the array <nl> * @ tparam N - Size of the array <nl> * @ return DataStream & - Reference to the datastream <nl> DataStream & operator < < ( DataStream & ds , const std : : array < T , N > & v ) { <nl> <nl> / * * <nl> * Deserialize a fixed size array from a stream <nl> - * <nl> + * <nl> * @ brief Deserialize a fixed size array <nl> * @ param ds - The stream to read <nl> * @ param v - The destination for deserialized value <nl> DataStream & operator > > ( DataStream & ds , std : : array < T , N > & v ) { <nl> namespace _datastream_detail { <nl> / * * <nl> * Check if type T is a pointer <nl> - * <nl> + * <nl> * @ brief Check if type T is a pointer <nl> * @ tparam T - The type to be checked <nl> * @ return true if T is a pointer <nl> namespace _datastream_detail { <nl> <nl> / * * <nl> * Check if type T is a primitive type <nl> - * <nl> + * <nl> * @ brief Check if type T is a primitive type <nl> * @ tparam T - The type to be checked <nl> * @ return true if T is a primitive type <nl> namespace _datastream_detail { <nl> <nl> / * * <nl> * Pointer should not be serialized , so this function will always throws an error <nl> - * <nl> + * <nl> * @ brief Deserialize a a pointer <nl> * @ param ds - The stream to read <nl> * @ tparam DataStream - Type of datastream <nl> DataStream & operator > > ( DataStream & ds , T ) { <nl> <nl> / * * <nl> * Serialize a fixed size array of non - primitive and non - pointer type <nl> - * <nl> + * <nl> * @ brief Serialize a fixed size array of non - primitive and non - pointer type <nl> * @ param ds - The stream to write <nl> * @ param v - The value to serialize <nl> - * @ tparam DataStream - Type of datastream <nl> + * @ tparam DataStream - Type of datastream <nl> * @ tparam T - Type of the pointer <nl> * @ return DataStream & - Reference to the datastream <nl> * / <nl> DataStream & operator < < ( DataStream & ds , const T ( & v ) [ N ] ) { <nl> <nl> / * * <nl> * Serialize a fixed size array of non - primitive type <nl> - * <nl> + * <nl> * @ brief Serialize a fixed size array of non - primitive type <nl> * @ param ds - The stream to write <nl> * @ param v - The value to serialize <nl> - * @ tparam DataStream - Type of datastream <nl> + * @ tparam DataStream - Type of datastream <nl> * @ tparam T - Type of the pointer <nl> * @ return DataStream & - Reference to the datastream <nl> * / <nl> DataStream & operator < < ( DataStream & ds , const T ( & v ) [ N ] ) { <nl> <nl> / * * <nl> * Deserialize a fixed size array of non - primitive and non - pointer type <nl> - * <nl> + * <nl> * @ brief Deserialize a fixed size array of non - primitive and non - pointer type <nl> * @ param ds - The stream to read <nl> * @ param v - The destination for deserialized value <nl> DataStream & operator > > ( DataStream & ds , T ( & v ) [ N ] ) { <nl> <nl> / * * <nl> * Deserialize a fixed size array of non - primitive type <nl> - * <nl> + * <nl> * @ brief Deserialize a fixed size array of non - primitive type <nl> * @ param ds - The stream to read <nl> * @ param v - The destination for deserialized value <nl> DataStream & operator > > ( DataStream & ds , T ( & v ) [ N ] ) { <nl> <nl> / * * <nl> * Serialize a vector of char <nl> - * <nl> + * <nl> * @ brief Serialize a vector of char <nl> * @ param ds - The stream to write <nl> * @ param v - The value to serialize <nl> - * @ tparam DataStream - Type of datastream <nl> + * @ tparam DataStream - Type of datastream <nl> * @ return DataStream & - Reference to the datastream <nl> * / <nl> template < typename DataStream > <nl> DataStream & operator < < ( DataStream & ds , const vector < char > & v ) { <nl> <nl> / * * <nl> * Serialize a vector <nl> - * <nl> + * <nl> * @ brief Serialize a vector <nl> * @ param ds - The stream to write <nl> * @ param v - The value to serialize <nl> - * @ tparam DataStream - Type of datastream <nl> + * @ tparam DataStream - Type of datastream <nl> * @ tparam T - Type of the object contained in the vector <nl> * @ return DataStream & - Reference to the datastream <nl> * / <nl> DataStream & operator < < ( DataStream & ds , const vector < T > & v ) { <nl> <nl> / * * <nl> * Deserialize a vector of char <nl> - * <nl> + * <nl> * @ brief Deserialize a vector of char <nl> * @ param ds - The stream to read <nl> * @ param v - The destination for deserialized value <nl> DataStream & operator > > ( DataStream & ds , vector < char > & v ) { <nl> } <nl> <nl> / * * <nl> - * Deserialize a vector <nl> - * <nl> + * Deserialize a vector <nl> + * <nl> * @ brief Deserialize a vector <nl> * @ param ds - The stream to read <nl> * @ param v - The destination for deserialized value <nl> DataStream & operator > > ( DataStream & ds , vector < T > & v ) { <nl> return ds ; <nl> } <nl> <nl> - < < < < < < < HEAD <nl> - / * * <nl> - * Serialize a map <nl> - * <nl> - * @ brief Serialize a map <nl> - * @ param ds - The stream to write <nl> - * @ param m - The value to serialize <nl> - * @ tparam DataStream - Type of datastream <nl> - * @ tparam K - Type of the key contained in the map <nl> - * @ tparam V - Type of the value contained in the map <nl> - * @ return DataStream & - Reference to the datastream <nl> - * / <nl> - = = = = = = = <nl> template < typename DataStream , typename T > <nl> DataStream & operator < < ( DataStream & ds , const std : : set < T > & s ) { <nl> ds < < unsigned_int ( s . size ( ) ) ; <nl> DataStream & operator > > ( DataStream & ds , std : : set < T > & s ) { <nl> return ds ; <nl> } <nl> <nl> - > > > > > > > origin / slim <nl> + / * * <nl> + * Serialize a map <nl> + * <nl> + * @ brief Serialize a map <nl> + * @ param ds - The stream to write <nl> + * @ param m - The value to serialize <nl> + * @ tparam DataStream - Type of datastream <nl> + * @ tparam K - Type of the key contained in the map <nl> + * @ tparam V - Type of the value contained in the map <nl> + * @ return DataStream & - Reference to the datastream <nl> + * / <nl> template < typename DataStream , typename K , typename V > <nl> DataStream & operator < < ( DataStream & ds , const std : : map < K , V > & m ) { <nl> ds < < unsigned_int ( m . size ( ) ) ; <nl> DataStream & operator < < ( DataStream & ds , const std : : map < K , V > & m ) { <nl> } <nl> <nl> / * * <nl> - * Deserialize a map <nl> - * <nl> + * Deserialize a map <nl> + * <nl> * @ brief Deserialize a map <nl> * @ param ds - The stream to read <nl> * @ param m - The destination for deserialized value <nl> DataStream & operator > > ( DataStream & ds , std : : map < K , V > & m ) { <nl> return ds ; <nl> } <nl> <nl> - < < < < < < < HEAD <nl> - / * * <nl> - * Serialize a flat map <nl> - * <nl> - * @ brief Serialize a flat map <nl> - * @ param ds - The stream to write <nl> - * @ param m - The value to serialize <nl> - * @ tparam DataStream - Type of datastream <nl> - * @ tparam K - Type of the key contained in the flat map <nl> - * @ tparam V - Type of the value contained in the flat map <nl> - * @ return DataStream & - Reference to the datastream <nl> - * / <nl> - = = = = = = = <nl> template < typename DataStream , typename T > <nl> DataStream & operator < < ( DataStream & ds , const boost : : container : : flat_set < T > & s ) { <nl> ds < < unsigned_int ( s . size ( ) ) ; <nl> DataStream & operator > > ( DataStream & ds , boost : : container : : flat_set < T > & s ) { <nl> return ds ; <nl> } <nl> <nl> - > > > > > > > origin / slim <nl> + <nl> + / * * <nl> + * Serialize a flat map <nl> + * <nl> + * @ brief Serialize a flat map <nl> + * @ param ds - The stream to write <nl> + * @ param m - The value to serialize <nl> + * @ tparam DataStream - Type of datastream <nl> + * @ tparam K - Type of the key contained in the flat map <nl> + * @ tparam V - Type of the value contained in the flat map <nl> + * @ return DataStream & - Reference to the datastream <nl> + * / <nl> template < typename DataStream , typename K , typename V > <nl> DataStream & operator < < ( DataStream & ds , const boost : : container : : flat_map < K , V > & m ) { <nl> ds < < unsigned_int ( m . size ( ) ) ; <nl> DataStream & operator < < ( DataStream & ds , const boost : : container : : flat_map < K , V > & m <nl> } <nl> <nl> / * * <nl> - * Deserialize a flat map <nl> - * <nl> + * Deserialize a flat map <nl> + * <nl> * @ brief Deserialize a flat map <nl> * @ param ds - The stream to read <nl> * @ param m - The destination for deserialized value <nl> DataStream & operator > > ( DataStream & ds , boost : : container : : flat_map < K , V > & m ) { <nl> <nl> / * * <nl> * Serialize a tuple <nl> - * <nl> + * <nl> * @ brief Serialize a tuple <nl> * @ param ds - The stream to write <nl> * @ param t - The value to serialize <nl> - * @ tparam DataStream - Type of datastream <nl> + * @ tparam DataStream - Type of datastream <nl> * @ tparam Args - Type of the objects contained in the tuple <nl> * @ return DataStream & - Reference to the datastream <nl> * / <nl> DataStream & operator < < ( DataStream & ds , const std : : tuple < Args . . . > & t ) { <nl> <nl> / * * <nl> * Deserialize a tuple <nl> - * <nl> + * <nl> * @ brief Deserialize a tuple <nl> * @ param ds - The stream to read <nl> * @ param t - The destination for deserialized value <nl> DataStream & operator > > ( DataStream & ds , std : : tuple < Args . . . > & t ) { <nl> <nl> / * * <nl> * Serialize a class <nl> - * <nl> + * <nl> * @ brief Serialize a class <nl> * @ param ds - The stream to write <nl> * @ param v - The value to serialize <nl> - * @ tparam DataStream - Type of datastream <nl> + * @ tparam DataStream - Type of datastream <nl> * @ tparam T - Type of class <nl> * @ return DataStream & - Reference to the datastream <nl> * / <nl> DataStream & operator < < ( DataStream & ds , const T & v ) { <nl> <nl> / * * <nl> * Deserialize a class <nl> - * <nl> + * <nl> * @ brief Deserialize a class <nl> * @ param ds - The stream to read <nl> * @ param v - The destination for deserialized value <nl> DataStream & operator > > ( DataStream & ds , T & v ) { <nl> <nl> / * * <nl> * Serialize a primitive type <nl> - * <nl> + * <nl> * @ brief Serialize a primitive type <nl> * @ param ds - The stream to write <nl> * @ param v - The value to serialize <nl> - * @ tparam DataStream - Type of datastream <nl> + * @ tparam DataStream - Type of datastream <nl> * @ tparam T - Type of the primitive type <nl> * @ return DataStream & - Reference to the datastream <nl> * / <nl> DataStream & operator < < ( DataStream & ds , const T & v ) { <nl> <nl> / * * <nl> * Deserialize a primitive type <nl> - * <nl> + * <nl> * @ brief Deserialize a primitive type <nl> * @ param ds - The stream to read <nl> * @ param v - The destination for deserialized value <nl> DataStream & operator > > ( DataStream & ds , T & v ) { <nl> <nl> / * * <nl> * Unpack data inside a fixed size buffer as T <nl> - * <nl> + * <nl> * @ brief Unpack data inside a fixed size buffer as T <nl> * @ tparam T - Type of the unpacked data <nl> * @ param buffer - Pointer to the buffer <nl> T unpack ( const char * buffer , size_t len ) { <nl> <nl> / * * <nl> * Unpack data inside a variable size buffer as T <nl> - * <nl> + * <nl> * @ brief Unpack data inside a variable size buffer as T <nl> * @ tparam T - Type of the unpacked data <nl> * @ param bytes - Buffer <nl> T unpack ( const vector < char > & bytes ) { <nl> <nl> / * * <nl> * Get the size of the packed data <nl> - * <nl> + * <nl> * @ brief Get the size of the packed data <nl> * @ tparam T - Type of the data to be packed <nl> * @ param value - Data to be packed <nl> size_t pack_size ( const T & value ) { <nl> <nl> / * * <nl> * Get packed data <nl> - * <nl> + * <nl> * @ brief Get packed data <nl> * @ tparam T - Type of the data to be packed <nl> * @ param value - Data to be packed <nl> bytes pack ( const T & value ) { <nl> <nl> / * * <nl> * Serialize a checksum160 type <nl> - * <nl> + * <nl> * @ brief Serialize a checksum160 type <nl> * @ param ds - The stream to write <nl> * @ param cs - The value to serialize <nl> inline datastream < Stream > & operator < < ( datastream < Stream > & ds , const checksum160 & <nl> <nl> / * * <nl> * Deserialize a checksum160 type <nl> - * <nl> + * <nl> * @ brief Deserialize a checksum160 type <nl> * @ param ds - The stream to read <nl> * @ param cs - The destination for deserialized value <nl> inline datastream < Stream > & operator > > ( datastream < Stream > & ds , checksum160 & cs ) { <nl> <nl> / * * <nl> * Serialize a checksum512 type <nl> - * <nl> + * <nl> * @ brief Serialize a checksum512 type <nl> * @ param ds - The stream to write <nl> * @ param cs - The value to serialize <nl> inline datastream < Stream > & operator < < ( datastream < Stream > & ds , const checksum512 & <nl> <nl> / * * <nl> * Deserialize a checksum512 type <nl> - * <nl> + * <nl> * @ brief Deserialize a checksum512 type <nl> * @ param ds - The stream to read <nl> * @ param cs - The destination for deserialized value <nl> mmm a / contracts / eosiolib / transaction . h <nl> ppp b / contracts / eosiolib / transaction . h <nl> extern " C " { <nl> <nl> / * * <nl> * Sends a deferred transaction . <nl> - * <nl> + * <nl> * @ brief Sends a deferred transaction . <nl> * @ param sender_id - ID of sender <nl> * @ param payer - Account paying for RAM <nl> - * @ param serialized_transaction - Pointer of serialized transaction to be deferred <nl> + * @ param serialized_transaction - Pointer of serialized transaction to be deferred <nl> * @ param size - Size to reserve <nl> * / <nl> void send_deferred ( const uint128_t & sender_id , account_name payer , const char * serialized_transaction , size_t size ) ; <nl> <nl> / * * <nl> * Cancels a deferred transaction . <nl> - * <nl> + * <nl> * @ brief Cancels a deferred transaction . <nl> * @ param sender_id - The id of the sender <nl> * <nl> * @ pre The deferred transaction ID exists . <nl> * @ pre The deferred transaction ID has not yet been published . <nl> * @ post Deferred transaction canceled . <nl> - * <nl> + * <nl> * Example : <nl> * @ code <nl> * id = 0xffffffffffffffff <nl> extern " C " { <nl> <nl> / * * <nl> * Access a copy of the currently executing transaction . <nl> - * <nl> + * <nl> * @ brief Access a copy of the currently executing transaction . <nl> * @ param buffer - a buffer to write the current transaction to <nl> * @ param size - the size of the buffer , 0 to return required size <nl> extern " C " { <nl> <nl> / * * <nl> * Gets the size of the currently executing transaction . <nl> - * <nl> + * <nl> * @ brief Gets the size of the currently executing transaction . <nl> * @ return size of the currently executing transaction <nl> * / <nl> extern " C " { <nl> <nl> / * * <nl> * Gets the block number used for TAPOS on the currently executing transaction . <nl> - * <nl> + * <nl> * @ brief Gets the block number used for TAPOS on the currently executing transaction . <nl> * @ return block number used for TAPOS on the currently executing transaction <nl> * Example : <nl> extern " C " { <nl> <nl> / * * <nl> * Gets the block prefix used for TAPOS on the currently executing transaction . <nl> - * <nl> + * <nl> * @ brief Gets the block prefix used for TAPOS on the currently executing transaction . <nl> * @ return block prefix used for TAPOS on the currently executing transaction <nl> * Example : <nl> extern " C " { <nl> <nl> / * * <nl> * Gets the expiration of the currently executing transaction . <nl> - * <nl> + * <nl> * @ brief Gets the expiration of the currently executing transaction . <nl> * @ return expiration of the currently executing transaction <nl> * Example : <nl> extern " C " { <nl> <nl> / * * <nl> * Retrieves the indicated action from the active transaction . <nl> - * <nl> + * <nl> * @ brief Retrieves the indicated action from the active transaction . <nl> * @ param type - 0 for context free action , 1 for action <nl> * @ param index - the index of the requested action <nl> extern " C " { <nl> <nl> / * * <nl> * Retrieve the signed_transaction . context_free_data [ index ] . <nl> - * <nl> + * <nl> * @ brief Retrieve the signed_transaction . context_free_data [ index ] . <nl> * @ param index - the index of the context_free_data entry to retrieve <nl> * @ param buff - output buff of the context_free_data entry <nl> extern " C " { <nl> * / <nl> int get_context_free_data ( uint32_t index , char * buff , size_t size ) ; <nl> <nl> - < < < < < < < HEAD <nl> - / * * <nl> - * Checks that prodived authorizations is enough to execute the transaction . <nl> - * <nl> - * @ brief Checks that prodived authorizations is enough to execute the transaction . <nl> - * @ param serialized_transaction - Pointer of serialized transaction to check authorization for <nl> - * @ param size - Size of transaction <nl> - * @ param permissions - Permissions to check for authorization <nl> - * @ param psize - Size of permissions <nl> - * / <nl> - void check_auth ( const char * serialized_transaction , size_t size , const char * permissions , size_t psize ) ; <nl> - <nl> - = = = = = = = <nl> - > > > > > > > origin / slim <nl> / / / @ } transactioncapi <nl> } <nl>
fixed missed conflicts
EOSIO/eos
853c0bf98304175aa994bf5c0e4997b1c3e33383
2018-05-10T10:55:08Z
mmm a / src / mongo / s / server . cpp <nl> ppp b / src / mongo / s / server . cpp <nl> static bool runMongosServer ( bool doUpgrade ) { <nl> return false ; <nl> } <nl> <nl> + if ( doUpgrade ) { <nl> + log ( ) < < " Config database is at version v " < < CURRENT_CONFIG_VERSION ; <nl> + return true ; <nl> + } <nl> + <nl> configServer . reloadSettings ( ) ; <nl> <nl> init ( ) ; <nl>
SERVER - 13294 : mongos exit after - - upgrade
mongodb/mongo
288745abb978e8d444a4e4bcbbd35176f05d2a06
2014-03-20T16:31:50Z
mmm a / Makefile . in <nl> ppp b / Makefile . in <nl> JAVASCRIPT_JSLINT = \ <nl> @ srcdir @ / html / admin / js / arangodb / browser . js \ <nl> @ srcdir @ / js / actions / system / api - collection . js \ <nl> @ srcdir @ / js / actions / system / api - structure . js \ <nl> + @ srcdir @ / js / client / modules / org / arangodb . js \ <nl> @ srcdir @ / js / common / bootstrap / errors . js \ <nl> @ srcdir @ / js / common / bootstrap / module - console . js \ <nl> @ srcdir @ / js / common / bootstrap / module - fs . js \ <nl> JAVASCRIPT_JSLINT = \ <nl> @ srcdir @ / js / common / bootstrap / modules . js \ <nl> @ srcdir @ / js / common / bootstrap / monkeypatches . js \ <nl> @ srcdir @ / js / server / ArangoCollection . js \ <nl> + @ srcdir @ / js / server / ArangoStructure . js \ <nl> @ srcdir @ / js / server / modules / org / arangodb . js \ <nl> - @ srcdir @ / js / server / modules / org / arangodb / actions . js \ <nl> - @ srcdir @ / js / server / ArangoStructure . js <nl> + @ srcdir @ / js / server / modules / org / arangodb / validator . js \ <nl> + @ srcdir @ / js / server / modules / org / arangodb / formatter . js \ <nl> + @ srcdir @ / js / server / modules / org / arangodb / parser . js \ <nl> + @ srcdir @ / js / server / modules / org / arangodb / actions . js <nl> <nl> @ ENABLE_ALL_IN_ONE_ICU_TRUE @ ICUDIR = @ abs_top_srcdir @ / 3rdParty / icu / BUILD <nl> all : $ ( BUILT_SOURCES ) <nl> mmm a / js / Makefile . files <nl> ppp b / js / Makefile . files <nl> JAVASCRIPT_JSLINT = \ <nl> @ srcdir @ / html / admin / js / arangodb / browser . js \ <nl> @ srcdir @ / js / actions / system / api - collection . js \ <nl> @ srcdir @ / js / actions / system / api - structure . js \ <nl> + @ srcdir @ / js / client / modules / org / arangodb . js \ <nl> @ srcdir @ / js / common / bootstrap / errors . js \ <nl> @ srcdir @ / js / common / bootstrap / module - console . js \ <nl> @ srcdir @ / js / common / bootstrap / module - fs . js \ <nl> JAVASCRIPT_JSLINT = \ <nl> @ srcdir @ / js / common / bootstrap / modules . js \ <nl> @ srcdir @ / js / common / bootstrap / monkeypatches . js \ <nl> @ srcdir @ / js / server / ArangoCollection . js \ <nl> + @ srcdir @ / js / server / ArangoStructure . js \ <nl> @ srcdir @ / js / server / modules / org / arangodb . js \ <nl> - @ srcdir @ / js / server / modules / org / arangodb / actions . js \ <nl> - @ srcdir @ / js / server / ArangoStructure . js <nl> + @ srcdir @ / js / server / modules / org / arangodb / validator . js \ <nl> + @ srcdir @ / js / server / modules / org / arangodb / formatter . js \ <nl> + @ srcdir @ / js / server / modules / org / arangodb / parser . js \ <nl> + @ srcdir @ / js / server / modules / org / arangodb / actions . js <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # # # @ brief executes jslint <nl> mmm a / js / client / modules / org / arangodb . js <nl> ppp b / js / client / modules / org / arangodb . js <nl> <nl> - / * jslint indent : 2 , <nl> - nomen : true , <nl> - maxlen : 100 , <nl> - sloppy : true , <nl> - vars : true , <nl> - white : true , <nl> - plusplus : true * / <nl> + / * jslint indent : 2 , nomen : true , maxlen : 100 , sloppy : true , vars : true , white : true , plusplus : true * / <nl> / * global require , exports * / <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> var internal = require ( " internal " ) ; <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> exports . db = internal . db ; <nl> + exports . ArangoCollection = internal . ArangoCollection ; <nl> + exports . ArangoError = internal . ArangoError ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ } <nl> mmm a / js / common / bootstrap / module - internal . js <nl> ppp b / js / common / bootstrap / module - internal . js <nl> <nl> <nl> if ( typeof PRETTY_PRINT ! = = " undefined " ) { <nl> internal . PRETTY_PRINT = PRETTY_PRINT ; <nl> + delete PRETTY_PRINT ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> old mode 100755 <nl> new mode 100644 <nl> mmm a / js / server / modules / org / arangodb . js <nl> ppp b / js / server / modules / org / arangodb . js <nl> <nl> - / * jslint indent : 2 , <nl> - nomen : true , <nl> - maxlen : 100 , <nl> - sloppy : true , <nl> - vars : true , <nl> - white : true , <nl> - plusplus : true * / <nl> + / * jslint indent : 2 , nomen : true , maxlen : 100 , sloppy : true , vars : true , white : true , plusplus : true * / <nl> / * global require , exports * / <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> var internal = require ( " internal " ) ; <nl> <nl> exports . db = internal . db ; <nl> exports . ArangoCollection = internal . ArangoCollection ; <nl> + exports . ArangoError = internal . ArangoError ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ } <nl> mmm a / js / server / modules / org / arangodb / formatter . js <nl> ppp b / js / server / modules / org / arangodb / formatter . js <nl> <nl> - / * jslint indent : 2 , <nl> - nomen : true , <nl> - maxlen : 100 , <nl> - sloppy : true , <nl> - vars : true , <nl> - white : true , <nl> - plusplus : true * / <nl> + / * jslint indent : 2 , nomen : true , maxlen : 100 , sloppy : true , vars : true , white : true , plusplus : true * / <nl> / * global require , exports * / <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / / / @ author Copyright 2011 - 2012 , triAGENS GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + var arangodb = require ( " org / arangodb " ) ; <nl> + var actions = require ( " org / arangodb / actions " ) ; <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - number parsers <nl> + / / - - SECTION - - number formatters <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> exports . number = function ( value , info , lang ) { <nl> result = String ( value ) ; <nl> } <nl> else { <nl> - error = new ArangoError ( ) ; <nl> - error . errorNum = internal . errors . ERROR_NOT_IMPLEMENTED . code ; <nl> + error = new arangodb . ArangoError ( ) ; <nl> + error . errorNum = actions . ERROR_NOT_IMPLEMENTED ; <nl> error . errorMessage = " format ' " + format + " ' not implemented " ; <nl> <nl> throw error ; <nl> exports . number = function ( value , info , lang ) { <nl> } <nl> <nl> return result ; <nl> - } <nl> + } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ } <nl> mmm a / js / server / modules / org / arangodb / parser . js <nl> ppp b / js / server / modules / org / arangodb / parser . js <nl> <nl> - / * jslint indent : 2 , <nl> - nomen : true , <nl> - maxlen : 100 , <nl> - sloppy : true , <nl> - vars : true , <nl> - white : true , <nl> - plusplus : true * / <nl> + / * jslint indent : 2 , nomen : true , maxlen : 100 , sloppy : true , vars : true , white : true , plusplus : true * / <nl> / * global require , exports * / <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / / / @ author Copyright 2011 - 2012 , triAGENS GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + var arangodb = require ( " org / arangodb " ) ; <nl> + var actions = require ( " org / arangodb / actions " ) ; <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - number parsers <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> exports . number = function ( value , info , lang ) { <nl> format = info . format ; <nl> <nl> if ( format = = = " % d " ) { <nl> - result = parseInt ( value ) ; <nl> + result = parseInt ( value , 10 ) ; <nl> } <nl> else if ( format = = = " % f " ) { <nl> result = parseFloat ( value ) ; <nl> exports . number = function ( value , info , lang ) { <nl> result = parseInt ( value , 8 ) ; <nl> } <nl> else { <nl> - error = new ArangoError ( ) ; <nl> - error . errorNum = internal . errors . ERROR_NOT_IMPLEMENTED . code ; <nl> + error = new arangodb . ArangoError ( ) ; <nl> + error . errorNum = actions . ERROR_NOT_IMPLEMENTED ; <nl> error . errorMessage = " format ' " + format + " ' not implemented " ; <nl> <nl> throw error ; <nl> exports . number = function ( value , info , lang ) { <nl> } <nl> <nl> if ( result = = = null | | result = = = undefined | | isNaN ( result ) ) { <nl> - error = new ArangoError ( ) ; <nl> - error . errorNum = internal . errors . ERROR_ARANGO_PARSER_FAILED ; <nl> + error = new arangodb . ArangoError ( ) ; <nl> + error . errorNum = actions . ERROR_ARANGO_PARSER_FAILED ; <nl> error . errorMessage = " format ' " + format + " ' not implemented " ; <nl> <nl> throw error ; <nl> } <nl> <nl> return result ; <nl> - } <nl> + } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ } <nl> mmm a / js / server / modules / org / arangodb / validator . js <nl> ppp b / js / server / modules / org / arangodb / validator . js <nl> <nl> - / * jslint indent : 2 , <nl> - nomen : true , <nl> - maxlen : 100 , <nl> - sloppy : true , <nl> - vars : true , <nl> - white : true , <nl> - plusplus : true * / <nl> + / * jslint indent : 2 , nomen : true , maxlen : 100 , sloppy : true , vars : true , white : true , plusplus : true * / <nl> / * global require , exports * / <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / / / @ author Copyright 2011 - 2012 , triAGENS GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + var arangodb = require ( " org / arangodb " ) ; <nl> + var actions = require ( " org / arangodb / actions " ) ; <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - number validators <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> <nl> exports . positiveNumber = function ( value , info , lang ) { <nl> if ( value < = 0 . 0 ) { <nl> - error = new ArangoError ( ) ; <nl> - error . errorNum = internal . errors . ERROR_ARANGO_VALIDATION_FAILED . code ; <nl> + var error = new arangodb . ArangoError ( ) ; <nl> + error . errorNum = actions . ERROR_ARANGO_VALIDATION_FAILED ; <nl> error . errorMessage = " number must be positive " ; <nl> <nl> throw error ; <nl> } <nl> - } <nl> + } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief negative number <nl> exports . positiveNumber = function ( value , info , lang ) { <nl> <nl> exports . negativeNumber = function ( value , info , lang ) { <nl> if ( 0 . 0 < = value ) { <nl> - error = new ArangoError ( ) ; <nl> - error . errorNum = internal . errors . ERROR_ARANGO_VALIDATION_FAILED . code ; <nl> + var error = new arangodb . ArangoError ( ) ; <nl> + error . errorNum = actions . ERROR_ARANGO_VALIDATION_FAILED ; <nl> error . errorMessage = " number must be negative " ; <nl> <nl> throw error ; <nl> } <nl> - } <nl> + } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief zero <nl> exports . negativeNumber = function ( value , info , lang ) { <nl> <nl> exports . zeroNumber = function ( value , info , lang ) { <nl> if ( value = = = 0 . 0 ) { <nl> - error = new ArangoError ( ) ; <nl> - error . errorNum = internal . errors . ERROR_ARANGO_VALIDATION_FAILED . code ; <nl> + var error = new arangodb . ArangoError ( ) ; <nl> + error . errorNum = actions . ERROR_ARANGO_VALIDATION_FAILED ; <nl> error . errorMessage = " number must be zero " ; <nl> <nl> throw error ; <nl> } <nl> - } <nl> + } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief non - positive number <nl> exports . zeroNumber = function ( value , info , lang ) { <nl> <nl> exports . nonPositiveNumber = function ( value , info , lang ) { <nl> if ( 0 . 0 < value ) { <nl> - error = new ArangoError ( ) ; <nl> - error . errorNum = internal . errors . ERROR_ARANGO_VALIDATION_FAILED . code ; <nl> + var error = new arangodb . ArangoError ( ) ; <nl> + error . errorNum = actions . ERROR_ARANGO_VALIDATION_FAILED ; <nl> error . errorMessage = " number must be non - positive " ; <nl> <nl> throw error ; <nl> } <nl> - } <nl> + } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief non - negative number <nl> exports . nonPositiveNumber = function ( value , info , lang ) { <nl> <nl> exports . nonNegativeNumber = function ( value , info , lang ) { <nl> if ( value < 0 . 0 ) { <nl> - error = new ArangoError ( ) ; <nl> - error . errorNum = internal . errors . ERROR_ARANGO_VALIDATION_FAILED . code ; <nl> + var error = new arangodb . ArangoError ( ) ; <nl> + error . errorNum = actions . ERROR_ARANGO_VALIDATION_FAILED ; <nl> error . errorMessage = " number must be non - negative " ; <nl> <nl> throw error ; <nl> } <nl> - } <nl> + } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief zero <nl> exports . nonNegativeNumber = function ( value , info , lang ) { <nl> <nl> exports . nonZeroNumber = function ( value , info , lang ) { <nl> if ( value ! = = 0 . 0 ) { <nl> - error = new ArangoError ( ) ; <nl> - error . errorNum = internal . errors . ERROR_ARANGO_VALIDATION_FAILED . code ; <nl> + var error = new arangodb . ArangoError ( ) ; <nl> + error . errorNum = actions . ERROR_ARANGO_VALIDATION_FAILED ; <nl> error . errorMessage = " number must be non - zero " ; <nl> <nl> throw error ; <nl> } <nl> - } <nl> + } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ } <nl>
fixed jslint warnings
arangodb/arangodb
f5b1476d78268b44062080763037e96ec03dddc6
2013-01-06T10:05:28Z
mmm a / test / cpp / qps / driver . cc <nl> ppp b / test / cpp / qps / driver . cc <nl> std : : unique_ptr < ScenarioResult > RunScenario ( <nl> stats . request_results ( i ) . count ( ) ; <nl> } <nl> result - > add_client_stats ( ) - > CopyFrom ( stats ) ; <nl> + / / That final status should be the last message on the client stream <nl> + / / GPR_ASSERT ( ! client - > stream - > Read ( & client_status ) ) ; <nl> } else { <nl> gpr_log ( GPR_ERROR , " Couldn ' t get final status from client % zu " , i ) ; <nl> } <nl> std : : unique_ptr < ScenarioResult > RunScenario ( <nl> } <nl> } <nl> <nl> - / / Collect servers ' final run results right after finishing server <nl> - for ( size_t i = 0 ; i < num_servers ; i + + ) { <nl> - auto server = & servers [ i ] ; <nl> - / / Read the server final status <nl> - if ( server - > stream - > Read ( & server_status ) ) { <nl> - gpr_log ( GPR_INFO , " Received final status from server % zu " , i ) ; <nl> - result - > add_server_stats ( ) - > CopyFrom ( server_status . stats ( ) ) ; <nl> - result - > add_server_cores ( server_status . cores ( ) ) ; <nl> - / / That final status should be the last message on the server stream <nl> - GPR_ASSERT ( ! server - > stream - > Read ( & server_status ) ) ; <nl> - } else { <nl> - gpr_log ( GPR_ERROR , " Couldn ' t get final status from server % zu " , i ) ; <nl> - } <nl> - } <nl> - <nl> / / Get final rpc status from clients <nl> for ( size_t i = 0 ; i < num_clients ; i + + ) { <nl> auto client = & clients [ i ] ; <nl> std : : unique_ptr < ScenarioResult > RunScenario ( <nl> } <nl> } <nl> <nl> + / / Post - processing the results summary <nl> + merged_latencies . FillProto ( result - > mutable_latencies ( ) ) ; <nl> + for ( std : : unordered_map < int , int64_t > : : iterator it = merged_statuses . begin ( ) ; <nl> + it ! = merged_statuses . end ( ) ; + + it ) { <nl> + RequestResultCount * rrc = result - > add_request_results ( ) ; <nl> + rrc - > set_status_code ( it - > first ) ; <nl> + rrc - > set_count ( it - > second ) ; <nl> + } <nl> + <nl> + / / Collect servers ' final run results right after finishing server <nl> + for ( size_t i = 0 ; i < num_servers ; i + + ) { <nl> + auto server = & servers [ i ] ; <nl> + / / Read the server final status <nl> + if ( server - > stream - > Read ( & server_status ) ) { <nl> + gpr_log ( GPR_INFO , " Received final status from server % zu " , i ) ; <nl> + result - > add_server_stats ( ) - > CopyFrom ( server_status . stats ( ) ) ; <nl> + result - > add_server_cores ( server_status . cores ( ) ) ; <nl> + / / That final status should be the last message on the server stream <nl> + GPR_ASSERT ( ! server - > stream - > Read ( & server_status ) ) ; <nl> + } else { <nl> + gpr_log ( GPR_ERROR , " Couldn ' t get final status from server % zu " , i ) ; <nl> + } <nl> + } <nl> + <nl> / / Get final rpc status from servers <nl> for ( size_t i = 0 ; i < num_servers ; i + + ) { <nl> auto server = & servers [ i ] ; <nl> std : : unique_ptr < ScenarioResult > RunScenario ( <nl> if ( g_inproc_servers ! = nullptr ) { <nl> delete g_inproc_servers ; <nl> } <nl> - <nl> - / / Post - processing the results summary <nl> - merged_latencies . FillProto ( result - > mutable_latencies ( ) ) ; <nl> - for ( std : : unordered_map < int , int64_t > : : iterator it = merged_statuses . begin ( ) ; <nl> - it ! = merged_statuses . end ( ) ; + + it ) { <nl> - RequestResultCount * rrc = result - > add_request_results ( ) ; <nl> - rrc - > set_status_code ( it - > first ) ; <nl> - rrc - > set_count ( it - > second ) ; <nl> - } <nl> postprocess_scenario_result ( result . get ( ) ) ; <nl> return result ; <nl> } <nl>
revert changes
grpc/grpc
03ac8490ea01862b9bfa8f7094b1c63f12b2b81e
2019-10-28T17:20:21Z
mmm a / tensorflow / lite / experimental / micro / tools / make / third_party_downloads . inc <nl> ppp b / tensorflow / lite / experimental / micro / tools / make / third_party_downloads . inc <nl> LEON_BCC2_MD5 : = " cdf78082be4882da2a92c9baa82fe765 " <nl> TSIM_URL : = " https : / / www . gaisler . com / anonftp / tsim / tsim - eval - 2 . 0 . 63 . tar . gz " <nl> TSIM_MD5 : = " afa0095d3ed989a949e1467f94e41d2f " <nl> <nl> - CMSIS_URL : = " https : / / github . com / ARM - software / CMSIS_5 / archive / 01c7adb7685da540be9297b5a93e6640ea3333ce . zip " <nl> - CMSIS_MD5 : = " 3dec53cc74f1d5d79036952137be5d5e " <nl> + CMSIS_URL : = " https : / / github . com / ARM - software / CMSIS_5 / archive / 5deff575d14ed255616d23b61d7b95f3b6dd19a8 . zip " <nl> + CMSIS_MD5 : = " 3adb1a3d4b7aabfbb40972c609730c9e " <nl> <nl> AM_SDK_URL : = " http : / / s3 . asia . ambiqmicro . com / downloads / AmbiqSuite - Rel2 . 0 . 0 . zip " <nl> AM_SDK_MD5 : = " 70332bc6968602bd85bee600ca81d06f " <nl>
Update CMSIS dependency . Unblocks building CMSIS optimized kernels for 8 - bit integer models .
tensorflow/tensorflow
cf77678285c8411f1827e2831311a50fb966042c
2019-11-13T23:49:33Z
mmm a / patches / chromium / . patches <nl> ppp b / patches / chromium / . patches <nl> fix_undo_redo_broken_in_webviews . patch <nl> fix_account_for_print_preview_disabled_when_printing_to_pdf . patch <nl> web_contents . patch <nl> ui_gtk_public_header . patch <nl> + layoutng_make_hittestresult_localpoint_for_inline_element . patch <nl> new file mode 100644 <nl> index 000000000000 . . e0cdaff2ac29 <nl> mmm / dev / null <nl> ppp b / patches / chromium / layoutng_make_hittestresult_localpoint_for_inline_element . patch <nl> <nl> + From 0000000000000000000000000000000000000000 Mon Sep 17 00 : 00 : 00 2001 <nl> + From : Yoshifumi Inoue < yosin @ chromium . org > <nl> + Date : Fri , 17 Apr 2020 09 : 22 : 24 + 0000 <nl> + Subject : Make HitTestResult : : LocalPoint ( ) for inline element as same as legacy <nl> + layout <nl> + <nl> + This patch changes | NGBoxFragmentPainter : : NodeAtPoint ( ) | to set offset in <nl> + containing block instead of offset in underlying element for inline element as <nl> + legacy layout to make hit testing on inline element with : : after pseudo class <nl> + with adapting | PositionForPoint ( ) | . <nl> + <nl> + The document [ 1 ] contains investigation notes of this CL . <nl> + <nl> + [ 1 ] https : / / bit . ly / 2REZ7P9 Hit Test with : : after <nl> + <nl> + Bug : 1043471 <nl> + Change - Id : I81ada0ccd7bff31a84ce4746785ea83eb175937c <nl> + Reviewed - on : https : / / chromium - review . googlesource . com / c / chromium / src / + / 2151775 <nl> + Commit - Queue : Koji Ishii < kojii @ chromium . org > <nl> + Auto - Submit : Yoshifumi Inoue < yosin @ chromium . org > <nl> + Reviewed - by : Koji Ishii < kojii @ chromium . org > <nl> + Cr - Commit - Position : refs / heads / master @ { # 759982 } <nl> + <nl> pppmmm a / third_party / blink / renderer / core / layout / hit_testing_test . cc <nl> ppp + b / third_party / blink / renderer / core / layout / hit_testing_test . cc <nl> + <nl> + / / found in the LICENSE file . <nl> + <nl> + # include " third_party / blink / renderer / core / css / css_property_names . h " <nl> + + # include " third_party / blink / renderer / core / editing / text_affinity . h " <nl> + # include " third_party / blink / renderer / core / testing / core_unit_test_helper . h " <nl> + <nl> + namespace blink { <nl> + <nl> + - class HitTestingTest : public RenderingTest { } ; <nl> + + class HitTestingTest : public RenderingTest { <nl> + + protected : <nl> + + bool LayoutNGEnabled ( ) const { <nl> + + return RuntimeEnabledFeatures : : LayoutNGEnabled ( ) ; <nl> + + } <nl> + + <nl> + + PositionWithAffinity HitTest ( const PhysicalOffset offset ) { <nl> + + const HitTestRequest hit_request ( HitTestRequest : : kActive ) ; <nl> + + const HitTestLocation hit_location ( offset ) ; <nl> + + HitTestResult hit_result ( hit_request , hit_location ) ; <nl> + + if ( ! GetLayoutView ( ) . HitTest ( hit_location , hit_result ) ) <nl> + + return PositionWithAffinity ( ) ; <nl> + + / / Simulate | PositionWithAffinityOfHitTestResult ( ) | in <nl> + + / / " selection_controller . cc " <nl> + + LayoutObject * const layout_object = <nl> + + hit_result . InnerPossiblyPseudoNode ( ) - > GetLayoutObject ( ) ; <nl> + + if ( ! layout_object ) <nl> + + return PositionWithAffinity ( ) ; <nl> + + return layout_object - > PositionForPoint ( hit_result . LocalPoint ( ) ) ; <nl> + + } <nl> + + } ; <nl> + + <nl> + + / / http : / / crbug . com / 1043471 <nl> + + TEST_F ( HitTestingTest , PseudoElementAfter ) { <nl> + + LoadAhem ( ) ; <nl> + + InsertStyleElement ( <nl> + + " body { margin : 0px ; font : 10px / 10px Ahem ; } " <nl> + + " # cd : : after { content : ' XYZ ' ; margin - left : 100px ; } " ) ; <nl> + + SetBodyInnerHTML ( " < div id = ab > ab < span id = cd > cd < / span > < / div > " ) ; <nl> + + const auto & text_ab = * To < Text > ( GetElementById ( " ab " ) - > firstChild ( ) ) ; <nl> + + const auto & text_cd = * To < Text > ( GetElementById ( " cd " ) - > lastChild ( ) ) ; <nl> + + <nl> + + EXPECT_EQ ( PositionWithAffinity ( Position ( text_ab , 0 ) ) , <nl> + + HitTest ( PhysicalOffset ( 5 , 5 ) ) ) ; <nl> + + / / Because of hit testing at " b " , position should be | kDownstream | . <nl> + + EXPECT_EQ ( PositionWithAffinity ( Position ( text_ab , 1 ) , <nl> + + LayoutNGEnabled ( ) ? TextAffinity : : kDownstream <nl> + + : TextAffinity : : kUpstream ) , <nl> + + HitTest ( PhysicalOffset ( 15 , 5 ) ) ) ; <nl> + + EXPECT_EQ ( PositionWithAffinity ( Position ( text_cd , 0 ) ) , <nl> + + HitTest ( PhysicalOffset ( 25 , 5 ) ) ) ; <nl> + + / / Because of hit testing at " d " , position should be | kDownstream | . <nl> + + EXPECT_EQ ( PositionWithAffinity ( Position ( text_cd , 1 ) , <nl> + + LayoutNGEnabled ( ) ? TextAffinity : : kDownstream <nl> + + : TextAffinity : : kUpstream ) , <nl> + + HitTest ( PhysicalOffset ( 35 , 5 ) ) ) ; <nl> + + / / Because of hit testing at right of < span cd > , result position should be <nl> + + / / | kUpstream | . <nl> + + EXPECT_EQ ( PositionWithAffinity ( Position ( text_cd , 2 ) , <nl> + + LayoutNGEnabled ( ) ? TextAffinity : : kUpstream <nl> + + : TextAffinity : : kDownstream ) , <nl> + + HitTest ( PhysicalOffset ( 45 , 5 ) ) ) ; <nl> + + EXPECT_EQ ( PositionWithAffinity ( Position ( text_cd , 2 ) , <nl> + + LayoutNGEnabled ( ) ? TextAffinity : : kUpstream <nl> + + : TextAffinity : : kDownstream ) , <nl> + + HitTest ( PhysicalOffset ( 55 , 5 ) ) ) ; <nl> + + EXPECT_EQ ( PositionWithAffinity ( Position ( text_cd , 2 ) , <nl> + + LayoutNGEnabled ( ) ? TextAffinity : : kUpstream <nl> + + : TextAffinity : : kDownstream ) , <nl> + + HitTest ( PhysicalOffset ( 65 , 5 ) ) ) ; <nl> + + } <nl> + <nl> + TEST_F ( HitTestingTest , OcclusionHitTest ) { <nl> + SetBodyInnerHTML ( R " HTML ( <nl> pppmmm a / third_party / blink / renderer / core / layout / ng / inline / ng_inline_cursor . cc <nl> ppp + b / third_party / blink / renderer / core / layout / ng / inline / ng_inline_cursor . cc <nl> + PositionWithAffinity NGInlineCursor : : PositionForPointInInlineBox ( <nl> + } <nl> + } <nl> + <nl> + + if ( container - > Type ( ) = = NGFragmentItem : : kLine ) { <nl> + + / / There are no inline items to hit in this line box , e . g . < span > with <nl> + + / / size and border . We try in lines before | this | line in the block . <nl> + + / / See editing / selection / last - empty - inline . html <nl> + + NGInlineCursor cursor ; <nl> + + cursor . MoveTo ( * this ) ; <nl> + + const PhysicalOffset point_in_line = <nl> + + point - Current ( ) . OffsetInContainerBlock ( ) ; <nl> + + for ( ; ; ) { <nl> + + cursor . MoveToPreviousLine ( ) ; <nl> + + if ( ! cursor ) <nl> + + break ; <nl> + + const PhysicalOffset adjusted_point = <nl> + + point_in_line + cursor . Current ( ) . OffsetInContainerBlock ( ) ; <nl> + + if ( auto position = cursor . PositionForPointInInlineBox ( adjusted_point ) ) <nl> + + return position ; <nl> + + } <nl> + + } <nl> + + <nl> + return PositionWithAffinity ( ) ; <nl> + } <nl> + <nl> pppmmm a / third_party / blink / renderer / core / paint / ng / ng_box_fragment_painter . cc <nl> ppp + b / third_party / blink / renderer / core / paint / ng / ng_box_fragment_painter . cc <nl> + bool NGBoxFragmentPainter : : NodeAtPoint ( const HitTestContext & hit_test , <nl> + if ( fragment . IsInlineBox ( ) ) <nl> + bounds_rect = PhysicalRect ( PixelSnappedIntRect ( bounds_rect ) ) ; <nl> + if ( hit_test . location . Intersects ( bounds_rect ) ) { <nl> + - if ( hit_test . AddNodeToResult ( fragment . NodeForHitTest ( ) , bounds_rect , <nl> + - physical_offset ) ) <nl> + - return true ; <nl> + + / / We set offset in container block instead of offset in | fragment | like <nl> + + / / | NGBoxFragmentPainter : : HitTestTextFragment ( ) | . <nl> + + / / See http : / / crbug . com / 1043471 <nl> + + if ( box_item_ & & box_item_ - > IsInlineBox ( ) ) { <nl> + + if ( hit_test . AddNodeToResult ( <nl> + + fragment . NodeForHitTest ( ) , bounds_rect , <nl> + + physical_offset - box_item_ - > OffsetInContainerBlock ( ) ) ) <nl> + + return true ; <nl> + + } else if ( paint_fragment_ & & <nl> + + paint_fragment_ - > PhysicalFragment ( ) . IsInline ( ) ) { <nl> + + if ( hit_test . AddNodeToResult ( <nl> + + fragment . NodeForHitTest ( ) , bounds_rect , <nl> + + physical_offset - paint_fragment_ - > OffsetInContainerBlock ( ) ) ) <nl> + + return true ; <nl> + + } else { <nl> + + if ( hit_test . AddNodeToResult ( fragment . NodeForHitTest ( ) , bounds_rect , <nl> + + physical_offset ) ) <nl> + + return true ; <nl> + + } <nl> + } <nl> + } <nl> + <nl> pppmmm a / third_party / blink / renderer / core / paint / ng / ng_paint_fragment . cc <nl> ppp + b / third_party / blink / renderer / core / paint / ng / ng_paint_fragment . cc <nl> + PositionWithAffinity NGPaintFragment : : PositionForPointInInlineLevelBox ( <nl> + return child_position . value ( ) ; <nl> + } <nl> + <nl> + + if ( PhysicalFragment ( ) . IsLineBox ( ) ) { <nl> + + / / There are no inline items to hit in this line box , e . g . < span > with <nl> + + / / size and border . We try in lines before | this | line in the block . <nl> + + / / See editing / selection / last - empty - inline . html <nl> + + NGInlineCursor cursor ( * Parent ( ) ) ; <nl> + + cursor . MoveTo ( * this ) ; <nl> + + const PhysicalOffset point_in_line = point - OffsetInContainerBlock ( ) ; <nl> + + for ( ; ; ) { <nl> + + cursor . MoveToPreviousLine ( ) ; <nl> + + if ( ! cursor ) <nl> + + break ; <nl> + + const NGPaintFragment & line = * cursor . CurrentPaintFragment ( ) ; <nl> + + const PhysicalOffset adjusted_point = <nl> + + point_in_line + line . OffsetInContainerBlock ( ) ; <nl> + + if ( auto position = line . PositionForPointInInlineLevelBox ( adjusted_point ) ) <nl> + + return position ; <nl> + + } <nl> + + } <nl> + + <nl> + return PositionWithAffinity ( ) ; <nl> + } <nl> + <nl>
fix : cherry - pick 04dab5a91b61 from chromium ( )
electron/electron
928e23a26368a795a759d10e524c78c1c0a7d23e
2020-04-21T01:33:40Z
mmm a / js / client / modules / @ arangodb / test - utils . js <nl> ppp b / js / client / modules / @ arangodb / test - utils . js <nl> function performTests ( options , testList , testname , runFn , serverOptions , startS <nl> <nl> while ( first | | options . loopEternal ) { <nl> if ( ! continueTesting ) { <nl> - print ( ' oops ! ' ) ; <nl> - print ( ' Skipping , ' + te + ' server is gone . ' ) ; <nl> + print ( ' oops ! Skipping , ' + te + ' server is gone . ' ) ; <nl> <nl> results [ te ] = { <nl> status : false , <nl>
reduce lines in output
arangodb/arangodb
6abef15252701762b4f879b5649b81bd44e759fd
2017-04-26T09:07:20Z
mmm a / etc / override . json <nl> ppp b / etc / override . json <nl> <nl> ] <nl> } , <nl> " Where . ComplexNested " : { <nl> - " end " : " 2015 - 09 - 15T12 : 10 : 35 . 012Z " , <nl> - " max " : 32 . 75997154310515 , <nl> + " end " : " 2015 - 08 - 21T01 : 01 : 56 . 247Z " , <nl> + " max " : 39 . 86797064420749 , <nl> " name " : " Where . ComplexNested " , <nl> - " order " : 1345 , <nl> + " order " : 1011 , <nl> " results " : { <nl> " 1 " : { <nl> " error_values " : [ <nl> <nl> 0 , <nl> 0 <nl> ] , <nl> - " ops_per_sec " : 13 . 095737299033477 , <nl> + " ops_per_sec " : 15 . 651827922691883 , <nl> " ops_per_sec_values " : [ <nl> - 12 . 295154301211847 , <nl> - 13 . 074360624146205 , <nl> - 13 . 104736429003005 , <nl> - 13 . 474821602279148 , <nl> - 13 . 529613538527167 <nl> + 15 . 191939157083253 , <nl> + 15 . 671807746991163 , <nl> + 15 . 787124021647944 , <nl> + 15 . 771620545989547 , <nl> + 15 . 836648141747505 <nl> ] <nl> } , <nl> " 2 " : { <nl> <nl> 0 , <nl> 0 <nl> ] , <nl> - " ops_per_sec " : 23 . 522559685182255 , <nl> + " ops_per_sec " : 27 . 797012083774867 , <nl> " ops_per_sec_values " : [ <nl> - 23 . 288160935921333 , <nl> - 23 . 894938104211157 , <nl> - 23 . 668979849216765 , <nl> - 23 . 51769825370221 , <nl> - 23 . 243021282859818 <nl> + 27 . 941180841861833 , <nl> + 27 . 6256050802494 , <nl> + 27 . 69238186559393 , <nl> + 27 . 901691851736345 , <nl> + 27 . 824200779432825 <nl> ] <nl> } , <nl> " 4 " : { <nl> <nl> 0 , <nl> 0 <nl> ] , <nl> - " ops_per_sec " : 31 . 72194357310742 , <nl> + " ops_per_sec " : 39 . 63527245662377 , <nl> " ops_per_sec_values " : [ <nl> - 30 . 93304709428353 , <nl> - 32 . 235952936299775 , <nl> - 31 . 766984237853816 , <nl> - 31 . 82576087783751 , <nl> - 31 . 84797271926248 <nl> + 39 . 22578162790542 , <nl> + 39 . 67070551290571 , <nl> + 39 . 6664779384325 , <nl> + 39 . 77193466717717 , <nl> + 39 . 84146253669803 <nl> ] <nl> } , <nl> " 8 " : { <nl> <nl> 0 , <nl> 0 <nl> ] , <nl> - " ops_per_sec " : 32 . 75997154310515 , <nl> + " ops_per_sec " : 39 . 86797064420749 , <nl> " ops_per_sec_values " : [ <nl> - 31 . 124252462150686 , <nl> - 33 . 43462133531621 , <nl> - 33 . 36167449853111 , <nl> - 32 . 862681737879136 , <nl> - 33 . 01662768164859 <nl> + 39 . 24212469420574 , <nl> + 40 . 21898748208538 , <nl> + 40 . 04707973764279 , <nl> + 39 . 91539501566288 , <nl> + 39 . 91626629144066 <nl> ] <nl> } , <nl> - " end " : " 2015 - 09 - 15T12 : 05 : 13 . 775Z " , <nl> - " start " : " 2015 - 09 - 15T11 : 59 : 51 . 617Z " <nl> + " end " : " 2015 - 08 - 21T00 : 56 : 24 . 997Z " , <nl> + " start " : " 2015 - 08 - 21T00 : 50 : 52 . 856Z " <nl> } , <nl> - " revision " : " a9c87bd28a3e9c7c28fa60df2d0861a607ce9a7f " , <nl> - " tag " : " 3 . 1 . 8 - Baseline " , <nl> + " revision " : " 7d7f4fb3b6f6a171eacf53384053df0fe728db42 " , <nl> + " tag " : " 3 . 1 . 7 - Baseline " , <nl> " threads " : [ <nl> " 1 " , <nl> " 2 " , <nl> " 4 " , <nl> " 8 " <nl> + ] , <nl> + " ticket " : [ <nl> + " SERVER - 19901 " <nl> ] <nl> } , <nl> " Where . Mixed " : { <nl> <nl> ] <nl> } , <nl> " Where . ReallyBigNestedComparison . Where " : { <nl> - " end " : " 2015 - 09 - 15T12 : 10 : 35 . 012Z " , <nl> - " max " : 90 . 94029488735745 , <nl> + " end " : " 2015 - 08 - 21T01 : 01 : 56 . 247Z " , <nl> + " max " : 172 . 4404401788601 , <nl> " name " : " Where . ReallyBigNestedComparison . Where " , <nl> - " order " : 1345 , <nl> + " order " : 1011 , <nl> " results " : { <nl> " 1 " : { <nl> " error_values " : [ <nl> <nl> 0 , <nl> 0 <nl> ] , <nl> - " ops_per_sec " : 36 . 68897462484265 , <nl> + " ops_per_sec " : 65 . 40985522330645 , <nl> " ops_per_sec_values " : [ <nl> - 36 . 79064781732484 , <nl> - 36 . 616303329456784 , <nl> - 36 . 662961183687614 , <nl> - 36 . 69321540452974 , <nl> - 36 . 68174538921425 <nl> + 66 . 08003829847296 , <nl> + 65 . 10423167520578 , <nl> + 65 . 35161366524225 , <nl> + 65 . 3268339459805 , <nl> + 65 . 18655853163078 <nl> ] <nl> } , <nl> " 2 " : { <nl> <nl> 0 , <nl> 0 <nl> ] , <nl> - " ops_per_sec " : 66 . 3110214236034 , <nl> + " ops_per_sec " : 120 . 91552048368399 , <nl> " ops_per_sec_values " : [ <nl> - 64 . 9068546815164 , <nl> - 66 . 77218665357533 , <nl> - 66 . 67779953707694 , <nl> - 67 . 6094360435676 , <nl> - 65 . 58883020228073 <nl> + 120 . 67401122607389 , <nl> + 120 . 59294191872554 , <nl> + 119 . 7441307414317 , <nl> + 123 . 39474875734523 , <nl> + 120 . 17176977484361 <nl> ] <nl> } , <nl> " 4 " : { <nl> <nl> 0 , <nl> 0 <nl> ] , <nl> - " ops_per_sec " : 90 . 94029488735745 , <nl> + " ops_per_sec " : 169 . 44134766053267 , <nl> " ops_per_sec_values " : [ <nl> - 89 . 61533165322801 , <nl> - 92 . 29134547852964 , <nl> - 91 . 02272899287034 , <nl> - 90 . 61371373833347 , <nl> - 91 . 15835457382576 <nl> + 168 . 92838573271143 , <nl> + 170 . 16433411348564 , <nl> + 169 . 07058517915317 , <nl> + 169 . 66863177899452 , <nl> + 169 . 37480149831862 <nl> ] <nl> } , <nl> " 8 " : { <nl> <nl> 0 , <nl> 0 <nl> ] , <nl> - " ops_per_sec " : 90 . 54814644162953 , <nl> + " ops_per_sec " : 172 . 4404401788601 , <nl> " ops_per_sec_values " : [ <nl> - 87 . 89904685829055 , <nl> - 90 . 95787560450326 , <nl> - 91 . 2829531956063 , <nl> - 91 . 70717562102396 , <nl> - 90 . 89368092872351 <nl> + 168 . 78590043414823 , <nl> + 171 . 5940371072105 , <nl> + 174 . 40545181476347 , <nl> + 173 . 6075936040897 , <nl> + 173 . 80921793408854 <nl> ] <nl> } , <nl> - " end " : " 2015 - 09 - 15T12 : 10 : 35 . 012Z " , <nl> - " start " : " 2015 - 09 - 15T12 : 05 : 13 . 775Z " <nl> + " end " : " 2015 - 08 - 21T01 : 01 : 56 . 247Z " , <nl> + " start " : " 2015 - 08 - 21T00 : 56 : 24 . 997Z " <nl> } , <nl> - " revision " : " a9c87bd28a3e9c7c28fa60df2d0861a607ce9a7f " , <nl> - " tag " : " 3 . 1 . 8 - Baseline " , <nl> + " revision " : " 7d7f4fb3b6f6a171eacf53384053df0fe728db42 " , <nl> + " tag " : " 3 . 1 . 7 - Baseline " , <nl> " threads " : [ <nl> " 1 " , <nl> " 2 " , <nl> " 4 " , <nl> " 8 " <nl> + ] , <nl> + " ticket " : [ <nl> + " SERVER - 19901 " <nl> ] <nl> } , <nl> " Where . SimpleNested . Where " : { <nl> - " end " : " 2015 - 09 - 15T12 : 10 : 35 . 012Z " , <nl> - " max " : 697 . 0417333446017 , <nl> + " end " : " 2015 - 08 - 21T01 : 01 : 56 . 247Z " , <nl> + " max " : 918 . 8564767605206 , <nl> " name " : " Where . SimpleNested . Where " , <nl> - " order " : 1345 , <nl> + " order " : 1011 , <nl> " results " : { <nl> " 1 " : { <nl> " error_values " : [ <nl> <nl> 0 , <nl> 0 <nl> ] , <nl> - " ops_per_sec " : 241 . 78949945149867 , <nl> + " ops_per_sec " : 300 . 46818108364266 , <nl> " ops_per_sec_values " : [ <nl> - 242 . 3096645599243 , <nl> - 241 . 91499107213727 , <nl> - 241 . 32760171948416 , <nl> - 241 . 75977117407663 , <nl> - 241 . 6354687318711 <nl> + 302 . 76173091721205 , <nl> + 298 . 9870837579817 , <nl> + 299 . 2893376675012 , <nl> + 300 . 94294121834497 , <nl> + 300 . 3598118571735 <nl> ] <nl> } , <nl> " 2 " : { <nl> <nl> 0 , <nl> 0 <nl> ] , <nl> - " ops_per_sec " : 442 . 51704017006796 , <nl> + " ops_per_sec " : 554 . 8372966126933 , <nl> " ops_per_sec_values " : [ <nl> - 444 . 3226878523137 , <nl> - 438 . 24077858383566 , <nl> - 445 . 66548348809386 , <nl> - 440 . 2825562769243 , <nl> - 444 . 0736946491718 <nl> + 557 . 3943214953682 , <nl> + 550 . 9657299315983 , <nl> + 557 . 2586791989552 , <nl> + 553 . 6726118277962 , <nl> + 554 . 895140609749 <nl> ] <nl> } , <nl> " 4 " : { <nl> <nl> 0 , <nl> 0 <nl> ] , <nl> - " ops_per_sec " : 663 . 515405092767 , <nl> + " ops_per_sec " : 860 . 106481962812 , <nl> " ops_per_sec_values " : [ <nl> - 663 . 614889475 , <nl> - 665 . 5057045625332 , <nl> - 662 . 612224956052 , <nl> - 662 . 6992565348955 , <nl> - 663 . 1449499353544 <nl> + 860 . 8819891559986 , <nl> + 860 . 4493612979376 , <nl> + 862 . 0822119113818 , <nl> + 856 . 7087631951839 , <nl> + 860 . 4100842535579 <nl> ] <nl> } , <nl> " 8 " : { <nl> <nl> 0 , <nl> 0 <nl> ] , <nl> - " ops_per_sec " : 697 . 0417333446017 , <nl> + " ops_per_sec " : 918 . 8564767605206 , <nl> " ops_per_sec_values " : [ <nl> - 696 . 3778590882589 , <nl> - 697 . 4745161594557 , <nl> - 692 . 9023924058854 , <nl> - 698 . 3653115499629 , <nl> - 700 . 0885875194458 <nl> + 912 . 5172468552668 , <nl> + 919 . 3331515003613 , <nl> + 922 . 877959113335 , <nl> + 916 . 8946611615779 , <nl> + 922 . 6593651720623 <nl> ] <nl> } , <nl> - " end " : " 2015 - 09 - 15T11 : 49 : 48 . 151Z " , <nl> - " start " : " 2015 - 09 - 15T11 : 47 : 47 . 475Z " <nl> + " end " : " 2015 - 08 - 21T00 : 40 : 49 . 247Z " , <nl> + " start " : " 2015 - 08 - 21T00 : 38 : 46 . 818Z " <nl> } , <nl> - " revision " : " a9c87bd28a3e9c7c28fa60df2d0861a607ce9a7f " , <nl> - " tag " : " 3 . 1 . 8 - Baseline " , <nl> + " revision " : " 7d7f4fb3b6f6a171eacf53384053df0fe728db42 " , <nl> + " tag " : " 3 . 1 . 7 - Baseline " , <nl> " threads " : [ <nl> " 1 " , <nl> " 2 " , <nl> " 4 " , <nl> " 8 " <nl> + ] , <nl> + " ticket " : [ <nl> + " SERVER - 19901 " <nl> ] <nl> } <nl> } <nl>
SERVER - 20564 reset overrides for $ where tests
mongodb/mongo
7a4af12aa859f5567550f715a35f5935f35a810a
2015-11-18T16:31:34Z
mmm a / python / caffe / _caffe . cpp <nl> ppp b / python / caffe / _caffe . cpp <nl> BOOST_PYTHON_MODULE ( _caffe ) { <nl> . def ( " _set_input_arrays " , & Net_SetInputArrays , <nl> bp : : with_custodian_and_ward < 1 , 2 , bp : : with_custodian_and_ward < 1 , 3 > > ( ) ) <nl> . def ( " save " , & Net_Save ) ; <nl> + bp : : register_ptr_to_python < shared_ptr < Net < Dtype > > > ( ) ; <nl> <nl> bp : : class_ < Blob < Dtype > , shared_ptr < Blob < Dtype > > , boost : : noncopyable > ( <nl> " Blob " , bp : : no_init ) <nl> BOOST_PYTHON_MODULE ( _caffe ) { <nl> NdarrayCallPolicies ( ) ) ) <nl> . add_property ( " diff " , bp : : make_function ( & Blob < Dtype > : : mutable_cpu_diff , <nl> NdarrayCallPolicies ( ) ) ) ; <nl> + bp : : register_ptr_to_python < shared_ptr < Blob < Dtype > > > ( ) ; <nl> <nl> bp : : class_ < Layer < Dtype > , shared_ptr < PythonLayer < Dtype > > , <nl> boost : : noncopyable > ( " Layer " , bp : : init < const LayerParameter & > ( ) ) <nl> BOOST_PYTHON_MODULE ( _caffe ) { <nl> . def ( " step " , & Solver < Dtype > : : Step ) <nl> . def ( " restore " , & Solver < Dtype > : : Restore ) <nl> . def ( " snapshot " , & Solver < Dtype > : : Snapshot ) ; <nl> + bp : : register_ptr_to_python < shared_ptr < Solver < Dtype > > > ( ) ; <nl> <nl> bp : : class_ < SGDSolver < Dtype > , bp : : bases < Solver < Dtype > > , <nl> shared_ptr < SGDSolver < Dtype > > , boost : : noncopyable > ( <nl>
Merge pull request from errord / fix - boost - shared_ptr - caffe - Blob - float - no - register
BVLC/caffe
c2354b98609916dd9176fbacde4e93184a6996d0
2016-03-04T00:42:00Z
mmm a / drivers / gles3 / rasterizer_scene_gles3 . cpp <nl> ppp b / drivers / gles3 / rasterizer_scene_gles3 . cpp <nl> bool RasterizerSceneGLES3 : : _setup_material ( RasterizerStorageGLES3 : : Material * p_m <nl> <nl> if ( state . current_depth_draw ! = p_material - > shader - > spatial . depth_draw_mode ) { <nl> switch ( p_material - > shader - > spatial . depth_draw_mode ) { <nl> - case RasterizerStorageGLES3 : : Shader : : Spatial : : DEPTH_DRAW_ALPHA_PREPASS : <nl> + case RasterizerStorageGLES3 : : Shader : : Spatial : : DEPTH_DRAW_ALPHA_PREPASS : { <nl> + glDepthMask ( GL_TRUE ) ; <nl> + / / If some transparent objects write to depth , we need to re - copy depth texture when we need it <nl> + if ( p_alpha_pass & & ! state . used_depth_prepass ) { <nl> + state . prepared_depth_texture = false ; <nl> + } <nl> + } break ; <nl> case RasterizerStorageGLES3 : : Shader : : Spatial : : DEPTH_DRAW_OPAQUE : { <nl> <nl> glDepthMask ( ! p_alpha_pass ) ; <nl> } break ; <nl> case RasterizerStorageGLES3 : : Shader : : Spatial : : DEPTH_DRAW_ALWAYS : { <nl> glDepthMask ( GL_TRUE ) ; <nl> + / / If some transparent objects write to depth , we need to re - copy depth texture when we need it <nl> + if ( p_alpha_pass ) { <nl> + state . prepared_depth_texture = false ; <nl> + } <nl> } break ; <nl> case RasterizerStorageGLES3 : : Shader : : Spatial : : DEPTH_DRAW_NEVER : { <nl> glDepthMask ( GL_FALSE ) ; <nl> void RasterizerSceneGLES3 : : render_scene ( const Transform & p_cam_transform , const <nl> return ; <nl> } <nl> <nl> + if ( env & & ( env - > dof_blur_far_enabled | | env - > dof_blur_near_enabled ) & & storage - > frame . current_rt & & storage - > frame . current_rt - > buffers . active ) <nl> + _prepare_depth_texture ( ) ; <nl> _post_process ( env , p_cam_projection ) ; <nl> / / Needed only for debugging <nl> / * if ( shadow_atlas & & storage - > frame . current_rt ) { <nl>
Merge pull request from raphael10241024 / fix_dof
godotengine/godot
afecc0bc2283dfbfbce70ae95c28e0ed4ecd4160
2019-08-16T21:20:22Z
mmm a / include / osquery / database / db_handle . h <nl> ppp b / include / osquery / database / db_handle . h <nl> extern const std : : string kEvents ; <nl> / / DBHandle RAII singleton <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + class DBHandle ; <nl> + typedef std : : shared_ptr < DBHandle > DBHandleRef ; <nl> + <nl> / * * <nl> * @ brief RAII singleton around RocksDB database access . <nl> * <nl> class DBHandle { <nl> * <nl> * @ return a shared pointer to an instance of DBHandle <nl> * / <nl> - static std : : shared_ptr < DBHandle > getInstance ( ) ; <nl> + static DBHandleRef getInstance ( ) ; <nl> <nl> / * * <nl> * @ brief Check the sanity of the database configuration options <nl> class DBHandle { <nl> * <nl> * @ return a shared pointer to an instance of DBHandle <nl> * / <nl> - static std : : shared_ptr < DBHandle > getInstanceAtPath ( const std : : string & path ) ; <nl> + static DBHandleRef getInstanceAtPath ( const std : : string & path ) ; <nl> <nl> / * * <nl> * @ brief A method which gets you an in - memory RocksDB instance . <nl> class DBHandle { <nl> * <nl> * @ return a shared pointer to an instance of DBHandle <nl> * / <nl> - static std : : shared_ptr < DBHandle > getInstanceInMemory ( ) ; <nl> + static DBHandleRef getInstanceInMemory ( ) ; <nl> <nl> / * * <nl> * @ brief A method which allows you to configure various aspects of RocksDB <nl> class DBHandle { <nl> * <nl> * @ return a shared pointer to an instance of DBHandle <nl> * / <nl> - static std : : shared_ptr < DBHandle > getInstance ( const std : : string & path , <nl> - bool in_memory ) ; <nl> + static DBHandleRef getInstance ( const std : : string & path , bool in_memory ) ; <nl> <nl> / * * <nl> * @ brief Private helper around accessing the column family handle for a <nl> mmm a / include / osquery / database / query . h <nl> ppp b / include / osquery / database / query . h <nl> <nl> # include < string > <nl> # include < vector > <nl> <nl> - # include < osquery / core . h > <nl> + # include < osquery / status . h > <nl> # include < osquery / database / db_handle . h > <nl> # include < osquery / database / results . h > <nl> <nl> class Query { <nl> * Given a query , this constructor calculates the value of columnFamily_ , <nl> * which can be accessed via the getColumnFamilyName getter method . <nl> * <nl> - * @ param q a SheduledQuery struct which represents the query which <nl> - * you would like to interact with <nl> + * @ param q a SheduledQuery struct <nl> * / <nl> - explicit Query ( const std : : string & name , ScheduledQuery q ) <nl> + explicit Query ( const std : : string & name , const ScheduledQuery & q ) <nl> : query_ ( q ) , name_ ( name ) { } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> class Query { <nl> * This method retrieves the data from RocksDB and returns the data in a <nl> * HistoricalQueryResults struct . <nl> * <nl> - * @ param hQR a reference to a HistoricalQueryResults struct which will be <nl> - * populated with results if the osquery : : Status indicates the operation was <nl> - * successful <nl> + * @ param hQR the output HistoricalQueryResults struct <nl> * <nl> - * @ return an instance of osquery : : Status indicating the success or failure <nl> - * of the operation <nl> + * @ return the success or failure of the operation <nl> * / <nl> - Status getHistoricalQueryResults ( HistoricalQueryResults & hQR ) ; <nl> + / / Status getHistoricalQueryResults ( HistoricalQueryResults & hQR ) ; <nl> + Status getPreviousQueryResults ( QueryData & results ) ; <nl> <nl> private : <nl> / * * <nl> * @ brief Serialize the data in RocksDB into a useful data structure using a <nl> * custom database handle <nl> * <nl> - * This method is the same as getHistoricalQueryResults ( ) , but with the <nl> + * This method is the same as getHistoricalQueryResults , but with the <nl> * addition of a parameter which allows you to pass a custom RocksDB <nl> - * database handle . This version of getHistoricalQueryResults should only be <nl> - * used internally and by unit tests . <nl> - * <nl> - * @ param hQR a reference to a HistoricalQueryResults struct which will be <nl> - * populated with results if the osquery : : Status indicates the operation was <nl> - * successful @ param db the RocksDB database handle to use to acquire the <nl> - * relevant data <nl> + * database handle . <nl> * <nl> + * @ param hQR the output HistoricalQueryResults struct <nl> * @ param db a shared pointer to a custom DBHandle <nl> * <nl> - * @ return an instance of osquery : : Status indicating the success or failure <nl> - * of the operation <nl> - * <nl> + * @ return the success or failure of the operation <nl> * @ see getHistoricalQueryResults <nl> * / <nl> - Status getHistoricalQueryResults ( HistoricalQueryResults & hQR , <nl> - std : : shared_ptr < DBHandle > db ) ; <nl> + / / Status getHistoricalQueryResults ( HistoricalQueryResults & hQR , <nl> + / / std : : shared_ptr < DBHandle > db ) ; <nl> + Status getPreviousQueryResults ( QueryData & results , DBHandleRef db ) ; <nl> <nl> public : <nl> / * * <nl> class Query { <nl> * @ param db a custom RocksDB database handle <nl> * <nl> * @ return a vector containing the string names of all scheduled queries <nl> - * which currently exist in the database <nl> * <nl> * @ see getStoredQueryNames ( ) <nl> * / <nl> - static std : : vector < std : : string > getStoredQueryNames ( <nl> - std : : shared_ptr < DBHandle > db ) ; <nl> + static std : : vector < std : : string > getStoredQueryNames ( DBHandleRef db ) ; <nl> <nl> public : <nl> / * * <nl> * @ brief Accessor method for checking if a given scheduled query exists in <nl> * the database <nl> * <nl> - * @ return a boolean indicating whether or not the scheduled query which is <nl> - * being operated on already exists in the database <nl> + * @ return does the scheduled query which is already exists in the database <nl> * / <nl> bool isQueryNameInDatabase ( ) ; <nl> <nl> class Query { <nl> * <nl> * @ param db a custom RocksDB database handle <nl> * <nl> - * @ return a boolean indicating whether or not the scheduled query which is <nl> - * being operated on already exists in the database <nl> + * @ return does the scheduled query which is already exists in the database <nl> * / <nl> - bool isQueryNameInDatabase ( std : : shared_ptr < DBHandle > db ) ; <nl> + bool isQueryNameInDatabase ( DBHandleRef db ) ; <nl> <nl> public : <nl> / * * <nl> class Query { <nl> * @ return an instance of osquery : : Status indicating the success or failure <nl> * of the operation <nl> * / <nl> - Status addNewResults ( const QueryData & qd , int unix_time ) ; <nl> + Status addNewResults ( const QueryData & qd ) ; <nl> <nl> private : <nl> / * * <nl> class Query { <nl> * @ return an instance of osquery : : Status indicating the success or failure <nl> * of the operation <nl> * / <nl> - Status addNewResults ( const QueryData & qd , <nl> - int unix_time , <nl> - std : : shared_ptr < DBHandle > db ) ; <nl> + Status addNewResults ( const QueryData & qd , DBHandleRef db ) ; <nl> <nl> public : <nl> / * * <nl> - * @ brief Add a new set of results to the persistant storage and get back <nl> - * the diff results . <nl> + * @ brief Add a new set of results to the persistent storage and get back <nl> + * the differential results . <nl> * <nl> - * Given the results of the execution of a scheduled query , add the results <nl> + * Given the results of an execution of a scheduled query , add the results <nl> * to the database using addNewResults and get back a data structure <nl> * indicating what rows in the query ' s results have changed . <nl> * <nl> - * @ param qd the QueryData object , which has the results of the query which <nl> - * you would like to store <nl> - * @ param dr a reference to a DiffResults object , which will be populated <nl> - * with the difference of the execution which is currently in the database <nl> - * and the execution you just put in the database <nl> - * @ param unix_time the time that the query was executed <nl> + * @ param qd the QueryData object containing query results to store <nl> + * @ param dr an output to a DiffResults object populated based on last run <nl> * <nl> - * @ return an instance of osquery : : Status indicating the success or failure <nl> - * of the operation <nl> + * @ return the success or failure of the operation <nl> * / <nl> - Status addNewResults ( const QueryData & qd , DiffResults & dr , int unix_time ) ; <nl> + Status addNewResults ( const QueryData & qd , DiffResults & dr ) ; <nl> <nl> private : <nl> / * * <nl> - * @ brief Add a new set of results to the persistant storage and get back <nl> - * the diff results , using a custom database handle . <nl> + * @ brief Add a new set of results to the persistent storage and get back <nl> + * the differential results , using a custom database handle . <nl> * <nl> - * This method is the same as addNewResults ( ) , but with the addition of a <nl> + * This method is the same as Query : : addNewResults , but with the addition of a <nl> * parameter which allows you to pass a custom RocksDB database handle <nl> * <nl> - * @ param qd the QueryData object , which has the results of the query which <nl> - * you would like to store <nl> - * @ param dr a reference to a DiffResults object , which will be populated <nl> - * with the difference of the execution which is currently in the database <nl> - * and the execution you just put in the database <nl> - * @ param calculate_diff a boolean indicating whether or not you ' d like to <nl> - * calculate the diff result to be stored in the dr parameter . <nl> - * @ param unix_time the time that the query was executed <nl> + * @ param qd the QueryData object containing query results to store <nl> + * @ param dr an output to a DiffResults object populated based on last run <nl> * <nl> - * @ return an instance of osquery : : Status indicating the success or failure <nl> - * of the operation <nl> + * @ return the success or failure of the operation <nl> * / <nl> Status addNewResults ( const QueryData & qd , <nl> DiffResults & dr , <nl> bool calculate_diff , <nl> - int unix_time , <nl> - std : : shared_ptr < DBHandle > db ) ; <nl> + DBHandleRef db ) ; <nl> <nl> public : <nl> / * * <nl> * @ brief A getter for the most recent result set for a scheduled query <nl> * <nl> - * @ param qd the QueryData object which will be populated if all operations <nl> - * are successful <nl> + * @ param qd the output QueryData object <nl> * <nl> - * @ return an instance of osquery : : Status indicating the success or failure <nl> - * of the operation <nl> + * @ return the success or failure of the operation <nl> * / <nl> - osquery : : Status getCurrentResults ( QueryData & qd ) ; <nl> + Status getCurrentResults ( QueryData & qd ) ; <nl> <nl> private : <nl> / * * <nl> * @ brief A getter for the most recent result set for a scheduled query , <nl> * but with the addition of a parameter which allows you to pass a custom <nl> - * RocksDB database handle <nl> + * RocksDB database handle . <nl> * <nl> - * This method is the same as getCurrentResults ( ) , but with addition of a <nl> - * parameter which allows you to pass a custom RocksDB database handle <nl> + * This method is the same as Query : : getCurrentResults , but with addition of a <nl> + * parameter which allows you to pass a custom RocksDB database handle . <nl> * <nl> - * @ param qd the QueryData object which will be populated if all operations <nl> - * are successful <nl> + * @ param qd the output QueryData object <nl> * @ param db a custom RocksDB database handle <nl> * <nl> - * @ return an instance of osquery : : Status indicating the success or failure <nl> - * of the operation <nl> + * @ return the success or failure of the operation <nl> * / <nl> - Status getCurrentResults ( QueryData & qd , std : : shared_ptr < DBHandle > db ) ; <nl> + Status getCurrentResults ( QueryData & qd , DBHandleRef db ) ; <nl> <nl> private : <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> class Query { <nl> FRIEND_TEST ( QueryTests , test_is_query_name_in_database ) ; <nl> FRIEND_TEST ( QueryTests , test_get_stored_query_names ) ; <nl> FRIEND_TEST ( QueryTests , test_get_executions ) ; <nl> - FRIEND_TEST ( QueryTests , test_get_current_results ) ; <nl> - FRIEND_TEST ( QueryTests , test_get_historical_query_results ) ; <nl> + FRIEND_TEST ( QueryTests , test_get_query_results ) ; <nl> FRIEND_TEST ( QueryTests , test_query_name_not_found_in_db ) ; <nl> } ; <nl> } <nl> mmm a / include / osquery / database / results . h <nl> ppp b / include / osquery / database / results . h <nl> <nl> <nl> # include < osquery / status . h > <nl> <nl> + namespace pt = boost : : property_tree ; <nl> + <nl> namespace osquery { <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> typedef std : : map < std : : string , RowData > Row ; <nl> * @ brief Serialize a Row into a property tree <nl> * <nl> * @ param r the Row to serialize <nl> - * @ param tree a reference to a property tree which , if all operations are <nl> - * completed successfully , the contents of Row will be serialized into <nl> + * @ param tree the output property tree <nl> * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation <nl> + * @ return Status indicating the success or failure of the operation <nl> * / <nl> Status serializeRow ( const Row & r , boost : : property_tree : : ptree & tree ) ; <nl> <nl> Status serializeRow ( const Row & r , boost : : property_tree : : ptree & tree ) ; <nl> * @ brief Serialize a Row object into a JSON string <nl> * <nl> * @ param r the Row to serialize <nl> - * @ param json a reference to a string which , if all operations are completed <nl> - * successfully , the contents of Row will be serialized into <nl> + * @ param json the output JSON string <nl> * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation <nl> + * @ return Status indicating the success or failure of the operation <nl> * / <nl> Status serializeRowJSON ( const Row & r , std : : string & json ) ; <nl> <nl> + / * * <nl> + * @ brief Deserialize a Row object from a property tree <nl> + * <nl> + * @ param tree the input property tree <nl> + * @ param r the output Row structure <nl> + * <nl> + * @ return Status indicating the success or failure of the operation <nl> + * / <nl> Status deserializeRow ( const boost : : property_tree : : ptree & tree , Row & r ) ; <nl> + <nl> + / * * <nl> + * @ brief Deserialize a Row object from a JSON string <nl> + * <nl> + * @ param json the input JSON string <nl> + * @ param r the output Row structure <nl> + * <nl> + * @ return Status indicating the success or failure of the operation <nl> + * / <nl> Status deserializeRowJSON ( const std : : string & json , Row & r ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> typedef std : : vector < Row > QueryData ; <nl> * @ brief Serialize a QueryData object into a property tree <nl> * <nl> * @ param q the QueryData to serialize <nl> - * @ param tree a reference to a property tree which , if all operations are <nl> - * completed successfully , the contents of QueryData will be serialized into <nl> + * @ param tree the output property tree <nl> * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation <nl> + * @ return Status indicating the success or failure of the operation <nl> * / <nl> - Status serializeQueryData ( const QueryData & q , <nl> - boost : : property_tree : : ptree & tree ) ; <nl> + Status serializeQueryData ( const QueryData & q , pt : : ptree & tree ) ; <nl> + <nl> + / * * <nl> + * @ brief Serialize a QueryData object into a JSON string <nl> + * <nl> + * @ param q the QueryData to serialize <nl> + * @ param json the output JSON string <nl> + * <nl> + * @ return Status indicating the success or failure of the operation <nl> + * / <nl> + Status serializeQueryDataJSON ( const QueryData & q , std : : string & json ) ; <nl> + <nl> + Status deserializeQueryData ( const pt : : ptree & tree , QueryData & qd ) ; <nl> + Status deserializeQueryDataJSON ( const std : : string & json , QueryData & qd ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / DiffResults <nl> struct DiffResults { <nl> * @ brief Serialize a DiffResults object into a property tree <nl> * <nl> * @ param d the DiffResults to serialize <nl> - * @ param tree a reference to a property tree which , if all operations are <nl> - * completed successfully , the contents of DiffResults will be serialized into <nl> + * @ param tree the output property tree <nl> * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation <nl> + * @ return Status indicating the success or failure of the operation <nl> * / <nl> - Status serializeDiffResults ( const DiffResults & d , <nl> - boost : : property_tree : : ptree & tree ) ; <nl> + Status serializeDiffResults ( const DiffResults & d , pt : : ptree & tree ) ; <nl> <nl> / * * <nl> * @ brief Serialize a DiffResults object into a JSON string <nl> * <nl> * @ param d the DiffResults to serialize <nl> - * @ param json a reference to a string which , if all operations are completed <nl> - * successfully , the contents of DiffResults will be serialized into <nl> + * @ param json the output JSON string <nl> * <nl> * @ return an instance of osquery : : Status , indicating the success or failure <nl> * of the operation <nl> Status serializeDiffResultsJSON ( const DiffResults & d , std : : string & json ) ; <nl> * / <nl> DiffResults diff ( const QueryData & old_ , const QueryData & new_ ) ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / HistoricalQueryResults <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / * * <nl> - * @ brief A representation of scheduled query ' s historical results on disk <nl> - * <nl> - * In practice , a HistoricalQueryResults object is generated after inspecting <nl> - * the persistent data storage . <nl> - * / <nl> - struct HistoricalQueryResults { <nl> - / * * <nl> - * @ brief the most recent results in the database <nl> - * <nl> - * mostRecentResults - > first is the timestamp of the most recent results and <nl> - * mostRecentResults - > second is the query result data of the most recent <nl> - * / <nl> - std : : pair < int , QueryData > mostRecentResults ; <nl> - <nl> - / / / equals operator <nl> - bool operator = = ( const HistoricalQueryResults & comp ) const { <nl> - return ( comp . mostRecentResults = = mostRecentResults ) ; <nl> - } <nl> - <nl> - / / / not equals operator <nl> - bool operator ! = ( const HistoricalQueryResults & comp ) const { <nl> - return ! ( * this = = comp ) ; <nl> - } <nl> - } ; <nl> - <nl> - / * * <nl> - * @ brief Serialize a HistoricalQueryResults object into a property tree <nl> - * <nl> - * @ param r the HistoricalQueryResults to serialize <nl> - * @ param tree a reference to a property tree which , if all operations are <nl> - * completed successfully , the contents of HistoricalQueryResults will be <nl> - * serialized into <nl> - * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation <nl> - * / <nl> - Status serializeHistoricalQueryResults ( const HistoricalQueryResults & r , <nl> - boost : : property_tree : : ptree & tree ) ; <nl> - <nl> - / * * <nl> - * @ brief Serialize a HistoricalQueryResults object into a JSON string <nl> - * <nl> - * @ param r the HistoricalQueryResults to serialize <nl> - * @ param json a reference to a string which , if all operations are completed <nl> - * successfully , the contents of HistoricalQueryResults will be serialized <nl> - * into <nl> - * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation <nl> - * / <nl> - Status serializeHistoricalQueryResultsJSON ( const HistoricalQueryResults & r , <nl> - std : : string & json ) ; <nl> - <nl> - / * * <nl> - * @ brief Deserialize a property tree into a HistoricalQueryResults object <nl> - * <nl> - * @ param tree a property tree which contains a serialized <nl> - * HistoricalQueryResults <nl> - * @ param r a reference to a HistoricalQueryResults object which , if all <nl> - * operations are completed successfully , the contents of tree will be <nl> - * serialized into <nl> - * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation <nl> - * / <nl> - Status deserializeHistoricalQueryResults ( <nl> - const boost : : property_tree : : ptree & tree , HistoricalQueryResults & r ) ; <nl> - <nl> - / * * <nl> - * @ brief Deserialize JSON into a HistoricalQueryResults object <nl> - * <nl> - * @ param json a string which contains a serialized HistoricalQueryResults <nl> - * @ param r a reference to a HistoricalQueryResults object which , if all <nl> - * operations are completed successfully , the contents of json will be <nl> - * serialized into <nl> - * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation <nl> - * / <nl> - Status deserializeHistoricalQueryResultsJSON ( const std : : string & json , <nl> - HistoricalQueryResults & r ) ; <nl> - <nl> / * * <nl> * @ brief Add a Row to a QueryData if the Row hasn ' t appeared in the QueryData <nl> * already <nl> Status deserializeHistoricalQueryResultsJSON ( const std : : string & json , <nl> * @ param q the QueryData list to append to <nl> * @ param r the Row to add to q <nl> * <nl> - * @ return true if the Row was added to the QueryData , false if it wasn ' t <nl> + * @ return true if the Row was added to the QueryData , false if it was not <nl> * / <nl> bool addUniqueRowToQueryData ( QueryData & q , const Row & r ) ; <nl> <nl> bool addUniqueRowToQueryData ( QueryData & q , const Row & r ) ; <nl> * @ param oldData the old QueryData to copy <nl> * @ param newData the new escaped QueryData object <nl> * / <nl> - void escapeQueryData ( const osquery : : QueryData & oldData , osquery : : QueryData & newData ) ; <nl> + void escapeQueryData ( const QueryData & oldData , QueryData & newData ) ; <nl> <nl> / * * <nl> * @ brief represents the relevant parameters of a scheduled query . <nl> struct ScheduledQuery { <nl> int interval ; <nl> / / / A temporary splayed internal . <nl> int splayed_interval ; <nl> + / / / Set of query options . <nl> + std : : map < std : : string , bool > options ; <nl> <nl> / / / equals operator <nl> bool operator = = ( const ScheduledQuery & comp ) const { <nl> struct ScheduledQuery { <nl> } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / ScheduledQueryLogItem <nl> + / / QueryLogItem <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / * * <nl> - * @ brief A data structure which represents data to log in the event of an <nl> - * operating system state change <nl> + * @ brief Query results from a schedule , snapshot , or ad - hoc execution . <nl> * <nl> * When a scheduled query yields new results , we need to log that information <nl> - * to our upstream logging receiver . The data that needs to be logged is the <nl> - * entire DiffResults set as well as some additional metadata . <nl> + * to our upstream logging receiver . A QueryLogItem contains metadata and <nl> + * results in potentially - differential form for a logger . <nl> * / <nl> - struct ScheduledQueryLogItem { <nl> - / / / The data which was changed as a result of the scheduled query <nl> - DiffResults diffResults ; <nl> + struct QueryLogItem { <nl> + / / / Differential results from the query , only added is filled in for snapshots <nl> + / / / and ad - hoc queries . <nl> + DiffResults results ; <nl> <nl> - / / / The name of the scheduled query <nl> + / / / The name of the scheduled query . <nl> std : : string name ; <nl> <nl> - / / / The identifier ( hostname , or uuid ) of the host on which the query was <nl> - / / / executed <nl> - std : : string hostIdentifier ; <nl> + / / / The identifier ( hostname , or uuid ) of the host <nl> + std : : string identifier ; <nl> <nl> - / / / The time that the query was executed , in unix time <nl> - int unixTime ; <nl> + / / / The time that the query was executed , seconds as UNIX time . <nl> + int time ; <nl> <nl> - / / / The time that the query was executed , in ASCII <nl> - std : : string calendarTime ; <nl> + / / / The time that the query was executed , an ASCII string . <nl> + std : : string calendar_time ; <nl> <nl> / / / equals operator <nl> - bool operator = = ( const ScheduledQueryLogItem & comp ) const { <nl> - return ( comp . diffResults = = diffResults ) & & ( comp . name = = name ) ; <nl> + bool operator = = ( const QueryLogItem & comp ) const { <nl> + return ( comp . results = = results ) & & ( comp . name = = name ) ; <nl> } <nl> <nl> / / / not equals operator <nl> - bool operator ! = ( const ScheduledQueryLogItem & comp ) const { <nl> - return ! ( * this = = comp ) ; <nl> - } <nl> + bool operator ! = ( const QueryLogItem & comp ) const { return ! ( * this = = comp ) ; } <nl> } ; <nl> <nl> / * * <nl> - * @ brief Serialize a ScheduledQueryLogItem object into a property tree <nl> + * @ brief Serialize a QueryLogItem object into a property tree <nl> * <nl> - * @ param i the ScheduledQueryLogItem to serialize <nl> - * @ param tree a reference to a property tree which , if all operations are <nl> - * completed successfully , the contents of ScheduledQueryLogItem will be <nl> - * serialized into <nl> + * @ param item the QueryLogItem to serialize <nl> + * @ param tree the output property tree <nl> * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation <nl> + * @ return Status indicating the success or failure of the operation <nl> * / <nl> - Status serializeScheduledQueryLogItem ( const ScheduledQueryLogItem & i , <nl> - boost : : property_tree : : ptree & tree ) ; <nl> + Status serializeQueryLogItem ( const QueryLogItem & item , pt : : ptree & tree ) ; <nl> <nl> / * * <nl> - * @ brief Serialize a ScheduledQueryLogItem object into a JSON string <nl> + * @ brief Serialize a QueryLogItem object into a JSON string <nl> * <nl> - * @ param i the ScheduledQueryLogItem to serialize <nl> - * @ param json a reference to a string which , if all operations are completed <nl> - * successfully , the contents of ScheduledQueryLogItem will be serialized into <nl> + * @ param item the QueryLogItem to serialize <nl> + * @ param json the output JSON string <nl> * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation <nl> + * @ return Status indicating the success or failure of the operation <nl> * / <nl> - Status serializeScheduledQueryLogItemJSON ( const ScheduledQueryLogItem & i , <nl> - std : : string & json ) ; <nl> + Status serializeQueryLogItemJSON ( const QueryLogItem & item , std : : string & json ) ; <nl> + <nl> + Status deserializeQueryLogItem ( const pt : : ptree & tree , QueryLogItem & item ) ; <nl> + Status deserializeQueryLogItemJSON ( const std : : string & json , QueryLogItem & item ) ; <nl> <nl> / * * <nl> - * @ brief Serialize a ScheduledQueryLogItem object into a property tree <nl> + * @ brief Serialize a QueryLogItem object into a property tree <nl> * of events , a list of actions . <nl> * <nl> - * @ param item the ScheduledQueryLogItem to serialize <nl> - * @ param tree a reference to a property tree which , if all operations are <nl> - * completed successfully , the contents of ScheduledQueryLogItem will be <nl> - * serialized into <nl> + * @ param item the QueryLogItem to serialize <nl> + * @ param tree the output property tree <nl> * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation <nl> + * @ return Status indicating the success or failure of the operation <nl> * / <nl> - Status serializeScheduledQueryLogItemAsEvents ( <nl> - const ScheduledQueryLogItem & item , boost : : property_tree : : ptree & tree ) ; <nl> + Status serializeQueryLogItemAsEvents ( const QueryLogItem & item , pt : : ptree & tree ) ; <nl> <nl> / * * <nl> - * @ brief Serialize a ScheduledQueryLogItem object into a JSON string of events , <nl> + * @ brief Serialize a QueryLogItem object into a JSON string of events , <nl> * a list of actions . <nl> * <nl> - * @ param i the ScheduledQueryLogItem to serialize <nl> - * @ param json a reference to a string which , if all operations are completed <nl> - * successfully , the contents of ScheduledQueryLogItem will be serialized into <nl> + * @ param i the QueryLogItem to serialize <nl> + * @ param json the output JSON string <nl> * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation <nl> + * @ return Status indicating the success or failure of the operation <nl> * / <nl> - Status serializeScheduledQueryLogItemAsEventsJSON ( <nl> - const ScheduledQueryLogItem & i , std : : string & json ) ; <nl> + Status serializeQueryLogItemAsEventsJSON ( const QueryLogItem & i , <nl> + std : : string & json ) ; <nl> } <nl> mmm a / include / osquery / logger . h <nl> ppp b / include / osquery / logger . h <nl> class LoggerPlugin : public Plugin { <nl> virtual Status logStatus ( const std : : vector < StatusLogLine > & log ) { <nl> return Status ( 1 , " Not enabled " ) ; <nl> } <nl> + <nl> + virtual Status logEvent ( const QueryLogItem & event ) { <nl> + return Status ( 1 , " Not used " ) ; <nl> + } <nl> + <nl> + virtual Status logSnapshot ( const QueryLogItem & snapshot ) { <nl> + return Status ( 1 , " Not used " ) ; <nl> + } <nl> + <nl> + virtual Status logHealth ( const QueryLogItem & health ) { <nl> + return Status ( 1 , " Not used " ) ; <nl> + } <nl> } ; <nl> <nl> / / / Start status logging to a buffer until the logger plugin is online . <nl> void initLogger ( const std : : string & name , bool forward_all = false ) ; <nl> * log normal osquery operations , use Google Logging . <nl> * <nl> * @ param s the string to log <nl> + * @ param category a category / metadata key <nl> * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation . <nl> + * @ return Status indicating the success or failure of the operation <nl> * / <nl> - Status logString ( const std : : string & s ) ; <nl> + Status logString ( const std : : string & message , const std : : string & category ) ; <nl> <nl> / * * <nl> * @ brief Log a string using a specific logger receiver . <nl> Status logString ( const std : : string & s ) ; <nl> * Note that this method should only be used to log results . If you ' d like to <nl> * log normal osquery operations , use Google Logging . <nl> * <nl> - * @ param s the string to log <nl> + * @ param message the string to log <nl> + * @ param category a category / metadata key <nl> * @ param receiver a string representing the log receiver to use <nl> * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation . <nl> + * @ return Status indicating the success or failure of the operation <nl> * / <nl> - Status logString ( const std : : string & s , const std : : string & receiver ) ; <nl> + Status logString ( const std : : string & message , <nl> + const std : : string & category , <nl> + const std : : string & receiver ) ; <nl> <nl> / * * <nl> - * @ brief Directly log results of scheduled queries to the default receiver <nl> + * @ brief Log results of scheduled queries to the default receiver <nl> * <nl> * @ param item a struct representing the results of a scheduled query <nl> * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation . <nl> + * @ return Status indicating the success or failure of the operation <nl> * / <nl> - Status logScheduledQueryLogItem ( const ScheduledQueryLogItem & item ) ; <nl> + Status logQueryLogItem ( const QueryLogItem & item ) ; <nl> <nl> / * * <nl> - * @ brief Directly log results of scheduled queries to a specified receiver <nl> + * @ brief Log results of scheduled queries to a specified receiver <nl> * <nl> * @ param item a struct representing the results of a scheduled query <nl> * @ param receiver a string representing the log receiver to use <nl> * <nl> - * @ return an instance of osquery : : Status , indicating the success or failure <nl> - * of the operation . <nl> + * @ return Status indicating the success or failure of the operation <nl> + * / <nl> + Status logQueryLogItem ( const QueryLogItem & item , const std : : string & receiver ) ; <nl> + <nl> + / * * <nl> + * @ brief Log raw results from a query ( or a snapshot scheduled query ) . <nl> + * <nl> + * @ param results the unmangled results from the query planner . <nl> + * <nl> + * @ return Status indicating the success or failure of the operation <nl> + * / <nl> + Status logSnapshotQuery ( const QueryLogItem & item ) ; <nl> + <nl> + / * * <nl> + * @ brief Log the worker ' s health along with health of each query . <nl> + * <nl> + * @ param results the query results from the osquery schedule appended with a <nl> + * row of health from the worker . <nl> + * <nl> + * @ return Status indicating the success or failure of the operation <nl> * / <nl> - Status logScheduledQueryLogItem ( const ScheduledQueryLogItem & item , <nl> - const std : : string & receiver ) ; <nl> + Status logHealthStatus ( const QueryLogItem & item ) ; <nl> <nl> / * * <nl> * @ brief Logger plugin registry . <nl> mmm a / osquery / config / config . cpp <nl> ppp b / osquery / config / config . cpp <nl> Status Config : : update ( const std : : map < std : : string , std : : string > & config ) { <nl> ConfigData conf ; <nl> for ( const auto & source : config ) { <nl> if ( Registry : : external ( ) ) { <nl> - VLOG ( 1 ) < < " Updating extension config source : " < < source . first ; <nl> + VLOG ( 1 ) < < " Updating extension config with source : " < < source . first ; <nl> } else { <nl> - VLOG ( 1 ) < < " Updating config source : " < < source . first ; <nl> + VLOG ( 1 ) < < " Updating config with source : " < < source . first ; <nl> } <nl> getInstance ( ) . raw_ [ source . first ] = source . second ; <nl> } <nl> inline void mergeOption ( const tree_node & option , ConfigData & conf ) { <nl> conf . all_data . add_child ( " options . " + key , option . second ) ; <nl> } <nl> <nl> - / / inline void mergeScheduledQuery ( const tree_node & node , ConfigData & conf ) { <nl> inline void mergeScheduledQuery ( const std : : string & name , <nl> const tree_node & node , <nl> ConfigData & conf ) { <nl> inline void mergeScheduledQuery ( const std : : string & name , <nl> ScheduledQuery query ; <nl> query . query = node . second . get < std : : string > ( " query " , " " ) ; <nl> query . interval = node . second . get < int > ( " interval " , 0 ) ; <nl> + / / This is a candidate for a catch - all iterator with a catch for boolean type . <nl> + query . options [ " snapshot " ] = node . second . get < bool > ( " snapshot " , false ) ; <nl> <nl> / / Check if this query exists , if so , check if it was changed . <nl> if ( conf . schedule . count ( name ) > 0 ) { <nl> mmm a / osquery / core / test_util . cpp <nl> ppp b / osquery / core / test_util . cpp <nl> <nl> <nl> # include " osquery / core / test_util . h " <nl> <nl> - namespace pt = boost : : property_tree ; <nl> - <nl> namespace osquery { <nl> <nl> QueryData getTestDBExpectedResults ( ) { <nl> std : : vector < std : : pair < std : : string , QueryData > > getTestDBResultStream ( ) { <nl> return results ; <nl> } <nl> <nl> - osquery : : ScheduledQuery getOsqueryScheduledQuery ( ) { <nl> + ScheduledQuery getOsqueryScheduledQuery ( ) { <nl> ScheduledQuery sq ; <nl> sq . query = " SELECT filename FROM fs WHERE path = ' / bin ' ORDER BY filename " ; <nl> sq . interval = 5 ; <nl> return sq ; <nl> } <nl> <nl> - std : : pair < boost : : property_tree : : ptree , Row > getSerializedRow ( ) { <nl> + std : : pair < pt : : ptree , Row > getSerializedRow ( ) { <nl> Row r ; <nl> r [ " foo " ] = " bar " ; <nl> r [ " meaning_of_life " ] = " 42 " ; <nl> std : : pair < boost : : property_tree : : ptree , Row > getSerializedRow ( ) { <nl> return std : : make_pair ( arr , r ) ; <nl> } <nl> <nl> - std : : pair < boost : : property_tree : : ptree , QueryData > getSerializedQueryData ( ) { <nl> + std : : pair < pt : : ptree , QueryData > getSerializedQueryData ( ) { <nl> auto r = getSerializedRow ( ) ; <nl> QueryData q = { r . second , r . second } ; <nl> pt : : ptree arr ; <nl> std : : pair < boost : : property_tree : : ptree , QueryData > getSerializedQueryData ( ) { <nl> return std : : make_pair ( arr , q ) ; <nl> } <nl> <nl> - std : : pair < boost : : property_tree : : ptree , DiffResults > getSerializedDiffResults ( ) { <nl> + std : : pair < pt : : ptree , DiffResults > getSerializedDiffResults ( ) { <nl> auto qd = getSerializedQueryData ( ) ; <nl> DiffResults diff_results ; <nl> diff_results . added = qd . second ; <nl> std : : pair < boost : : property_tree : : ptree , DiffResults > getSerializedDiffResults ( ) { <nl> <nl> std : : pair < std : : string , DiffResults > getSerializedDiffResultsJSON ( ) { <nl> auto results = getSerializedDiffResults ( ) ; <nl> - <nl> std : : ostringstream ss ; <nl> pt : : write_json ( ss , results . first , false ) ; <nl> - <nl> return std : : make_pair ( ss . str ( ) , results . second ) ; <nl> } <nl> <nl> - std : : pair < pt : : ptree , HistoricalQueryResults > <nl> - getSerializedHistoricalQueryResults ( ) { <nl> - auto qd = getSerializedQueryData ( ) ; <nl> - auto dr = getSerializedDiffResults ( ) ; <nl> - HistoricalQueryResults r ; <nl> - r . mostRecentResults . first = 2 ; <nl> - r . mostRecentResults . second = qd . second ; <nl> - <nl> - pt : : ptree root ; <nl> - <nl> - pt : : ptree mostRecentResults ; <nl> - mostRecentResults . add_child ( " 2 " , qd . first ) ; <nl> - root . add_child ( " mostRecentResults " , mostRecentResults ) ; <nl> - <nl> - return std : : make_pair ( root , r ) ; <nl> - } <nl> - <nl> - std : : pair < std : : string , HistoricalQueryResults > <nl> - getSerializedHistoricalQueryResultsJSON ( ) { <nl> - auto results = getSerializedHistoricalQueryResults ( ) ; <nl> - <nl> + std : : pair < std : : string , QueryData > getSerializedQueryDataJSON ( ) { <nl> + auto results = getSerializedQueryData ( ) ; <nl> std : : ostringstream ss ; <nl> pt : : write_json ( ss , results . first , false ) ; <nl> - <nl> return std : : make_pair ( ss . str ( ) , results . second ) ; <nl> } <nl> <nl> - std : : pair < boost : : property_tree : : ptree , ScheduledQueryLogItem > <nl> - getSerializedScheduledQueryLogItem ( ) { <nl> - ScheduledQueryLogItem i ; <nl> + std : : pair < pt : : ptree , QueryLogItem > getSerializedQueryLogItem ( ) { <nl> + QueryLogItem i ; <nl> pt : : ptree root ; <nl> auto dr = getSerializedDiffResults ( ) ; <nl> - i . diffResults = dr . second ; <nl> + i . results = dr . second ; <nl> i . name = " foobar " ; <nl> - i . calendarTime = " Mon Aug 25 12 : 10 : 57 2014 " ; <nl> - i . unixTime = 1408993857 ; <nl> - i . hostIdentifier = " foobaz " ; <nl> + i . calendar_time = " Mon Aug 25 12 : 10 : 57 2014 " ; <nl> + i . time = 1408993857 ; <nl> + i . identifier = " foobaz " ; <nl> root . add_child ( " diffResults " , dr . first ) ; <nl> root . put < std : : string > ( " name " , " foobar " ) ; <nl> root . put < std : : string > ( " hostIdentifier " , " foobaz " ) ; <nl> getSerializedScheduledQueryLogItem ( ) { <nl> return std : : make_pair ( root , i ) ; <nl> } <nl> <nl> - std : : pair < std : : string , ScheduledQueryLogItem > <nl> - getSerializedScheduledQueryLogItemJSON ( ) { <nl> - auto results = getSerializedScheduledQueryLogItem ( ) ; <nl> + std : : pair < std : : string , QueryLogItem > getSerializedQueryLogItemJSON ( ) { <nl> + auto results = getSerializedQueryLogItem ( ) ; <nl> <nl> std : : ostringstream ss ; <nl> pt : : write_json ( ss , results . first , false ) ; <nl> mmm a / osquery / core / test_util . h <nl> ppp b / osquery / core / test_util . h <nl> <nl> # include < osquery / database . h > <nl> # include < osquery / filesystem . h > <nl> <nl> + namespace pt = boost : : property_tree ; <nl> + <nl> namespace osquery { <nl> <nl> / / / Any SQL - dependent tests should use kTestQuery for a pre - populated example . <nl> const std : : string kTestWorkingDirectory = " / tmp / osquery - tests / " ; <nl> / / / A fake directory tree should be used for filesystem iterator testing . <nl> const std : : string kFakeDirectory = kTestWorkingDirectory + " fstree " ; <nl> <nl> + ScheduledQuery getOsqueryScheduledQuery ( ) ; <nl> + <nl> / / getTestDBExpectedResults returns the results of kTestQuery of the table that <nl> / / initially gets returned from createTestDB ( ) <nl> QueryData getTestDBExpectedResults ( ) ; <nl> QueryData getTestDBExpectedResults ( ) ; <nl> / / need to be performed on the dataset to make the results be pair . second <nl> std : : vector < std : : pair < std : : string , QueryData > > getTestDBResultStream ( ) ; <nl> <nl> - / / getOsqueryScheduledQuery returns a test scheduled query which would normally <nl> - / / be returned via the config <nl> - ScheduledQuery getOsqueryScheduledQuery ( ) ; <nl> - <nl> / / getSerializedRow ( ) return an std : : pair where pair - > first is a string which <nl> / / should serialize to pair - > second . pair - > second should deserialize <nl> / / to pair - > first <nl> - std : : pair < boost : : property_tree : : ptree , Row > getSerializedRow ( ) ; <nl> + std : : pair < pt : : ptree , Row > getSerializedRow ( ) ; <nl> <nl> / / getSerializedQueryData ( ) return an std : : pair where pair - > first is a string <nl> / / which should serialize to pair - > second . pair - > second should <nl> / / deserialize to pair - > first <nl> - std : : pair < boost : : property_tree : : ptree , QueryData > getSerializedQueryData ( ) ; <nl> + std : : pair < pt : : ptree , QueryData > getSerializedQueryData ( ) ; <nl> + std : : pair < std : : string , QueryData > getSerializedQueryDataJSON ( ) ; <nl> <nl> / / getSerializedDiffResults ( ) return an std : : pair where pair - > first is a string <nl> / / which should serialize to pair - > second . pair - > second should <nl> / / deserialize to pair - > first <nl> - std : : pair < boost : : property_tree : : ptree , DiffResults > getSerializedDiffResults ( ) ; <nl> - <nl> + std : : pair < pt : : ptree , DiffResults > getSerializedDiffResults ( ) ; <nl> std : : pair < std : : string , DiffResults > getSerializedDiffResultsJSON ( ) ; <nl> <nl> - / / getSerializedHistoricalQueryResults ( ) return an std : : pair where pair - > first <nl> + / / getSerializedQueryLogItem ( ) return an std : : pair where pair - > first <nl> / / is a string which should serialize to pair - > second . pair - > second <nl> / / should deserialize to pair - > first <nl> - std : : pair < boost : : property_tree : : ptree , HistoricalQueryResults > <nl> - getSerializedHistoricalQueryResults ( ) ; <nl> - <nl> - std : : pair < std : : string , HistoricalQueryResults > <nl> - getSerializedHistoricalQueryResultsJSON ( ) ; <nl> - <nl> - / / getSerializedScheduledQueryLogItem ( ) return an std : : pair where pair - > first <nl> - / / is a string which should serialize to pair - > second . pair - > second <nl> - / / should deserialize to pair - > first <nl> - std : : pair < boost : : property_tree : : ptree , ScheduledQueryLogItem > <nl> - getSerializedScheduledQueryLogItem ( ) ; <nl> - <nl> - std : : pair < std : : string , ScheduledQueryLogItem > <nl> - getSerializedScheduledQueryLogItemJSON ( ) ; <nl> + std : : pair < pt : : ptree , QueryLogItem > getSerializedQueryLogItem ( ) ; <nl> + std : : pair < std : : string , QueryLogItem > getSerializedQueryLogItemJSON ( ) ; <nl> <nl> / / generate content for a PEM - encoded certificate <nl> std : : string getCACertificateContent ( ) ; <nl> mmm a / osquery / database / query . cpp <nl> ppp b / osquery / database / query . cpp <nl> <nl> <nl> namespace osquery { <nl> <nl> - const std : : string kQueryNameNotFoundError = " query name not found in database " ; <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / Getters and setters <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> int Query : : getInterval ( ) { return query_ . interval ; } <nl> / / Data access methods <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - Status Query : : getHistoricalQueryResults ( HistoricalQueryResults & hQR ) { <nl> - return getHistoricalQueryResults ( hQR , DBHandle : : getInstance ( ) ) ; <nl> + Status Query : : getPreviousQueryResults ( QueryData & results ) { <nl> + return getPreviousQueryResults ( results , DBHandle : : getInstance ( ) ) ; <nl> } <nl> <nl> - Status Query : : getHistoricalQueryResults ( HistoricalQueryResults & hQR , <nl> - std : : shared_ptr < DBHandle > db ) { <nl> - if ( isQueryNameInDatabase ( ) ) { <nl> - std : : string raw ; <nl> - auto get_status = db - > Get ( kQueries , name_ , raw ) ; <nl> - if ( get_status . ok ( ) ) { <nl> - auto deserialize_status = deserializeHistoricalQueryResultsJSON ( raw , hQR ) ; <nl> - if ( ! deserialize_status . ok ( ) ) { <nl> - return deserialize_status ; <nl> - } <nl> - } else { <nl> - return get_status ; <nl> - } <nl> - } else { <nl> - return Status ( 1 , kQueryNameNotFoundError ) ; <nl> + Status Query : : getPreviousQueryResults ( QueryData & results , DBHandleRef db ) { <nl> + if ( ! isQueryNameInDatabase ( ) ) { <nl> + return Status ( 1 , " Query name not found in database " ) ; <nl> + } <nl> + <nl> + std : : string raw ; <nl> + auto status = db - > Get ( kQueries , name_ , raw ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> + } <nl> + <nl> + status = deserializeQueryDataJSON ( raw , results ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> } <nl> return Status ( 0 , " OK " ) ; <nl> } <nl> std : : vector < std : : string > Query : : getStoredQueryNames ( ) { <nl> return getStoredQueryNames ( DBHandle : : getInstance ( ) ) ; <nl> } <nl> <nl> - std : : vector < std : : string > Query : : getStoredQueryNames ( <nl> - std : : shared_ptr < DBHandle > db ) { <nl> + std : : vector < std : : string > Query : : getStoredQueryNames ( DBHandleRef db ) { <nl> std : : vector < std : : string > results ; <nl> db - > Scan ( kQueries , results ) ; <nl> return results ; <nl> bool Query : : isQueryNameInDatabase ( ) { <nl> return isQueryNameInDatabase ( DBHandle : : getInstance ( ) ) ; <nl> } <nl> <nl> - bool Query : : isQueryNameInDatabase ( std : : shared_ptr < DBHandle > db ) { <nl> + bool Query : : isQueryNameInDatabase ( DBHandleRef db ) { <nl> auto names = Query : : getStoredQueryNames ( db ) ; <nl> return std : : find ( names . begin ( ) , names . end ( ) , name_ ) ! = names . end ( ) ; <nl> } <nl> <nl> - Status Query : : addNewResults ( const osquery : : QueryData & qd , int unix_time ) { <nl> - return addNewResults ( qd , unix_time , DBHandle : : getInstance ( ) ) ; <nl> + Status Query : : addNewResults ( const osquery : : QueryData & qd ) { <nl> + return addNewResults ( qd , DBHandle : : getInstance ( ) ) ; <nl> } <nl> <nl> - Status Query : : addNewResults ( const QueryData & qd , <nl> - int unix_time , <nl> - std : : shared_ptr < DBHandle > db ) { <nl> + Status Query : : addNewResults ( const QueryData & qd , DBHandleRef db ) { <nl> DiffResults dr ; <nl> - return addNewResults ( qd , dr , false , unix_time , db ) ; <nl> + return addNewResults ( qd , dr , false , db ) ; <nl> } <nl> <nl> - osquery : : Status Query : : addNewResults ( const osquery : : QueryData & qd , <nl> - osquery : : DiffResults & dr , <nl> - int unix_time ) { <nl> - return addNewResults ( qd , dr , true , unix_time , DBHandle : : getInstance ( ) ) ; <nl> + Status Query : : addNewResults ( const QueryData & qd , DiffResults & dr ) { <nl> + return addNewResults ( qd , dr , true , DBHandle : : getInstance ( ) ) ; <nl> } <nl> <nl> - osquery : : Status Query : : addNewResults ( const osquery : : QueryData & qd , <nl> - osquery : : DiffResults & dr , <nl> - bool calculate_diff , <nl> - int unix_time , <nl> - std : : shared_ptr < DBHandle > db ) { <nl> - HistoricalQueryResults hQR ; <nl> - auto hqr_status = getHistoricalQueryResults ( hQR , db ) ; <nl> - if ( ! hqr_status . ok ( ) & & hqr_status . toString ( ) ! = kQueryNameNotFoundError ) { <nl> - return hqr_status ; <nl> - } <nl> - <nl> - QueryData escaped_qd ; <nl> - / / remove all non - ascii characters from the string <nl> - escapeQueryData ( qd , escaped_qd ) ; <nl> - <nl> + Status Query : : addNewResults ( const QueryData & current_qd , <nl> + DiffResults & dr , <nl> + bool calculate_diff , <nl> + DBHandleRef db ) { <nl> + / / Get the rows from the last run of this query name . <nl> + QueryData previous_qd ; <nl> + auto status = getPreviousQueryResults ( previous_qd ) ; <nl> + <nl> + / / Sanitize all non - ASCII characters from the query data values . <nl> + QueryData escaped_current_qd ; <nl> + escapeQueryData ( current_qd , escaped_current_qd ) ; <nl> + / / Calculate the differential between previous and current query results . <nl> if ( calculate_diff ) { <nl> - dr = diff ( hQR . mostRecentResults . second , escaped_qd ) ; <nl> + dr = diff ( previous_qd , escaped_current_qd ) ; <nl> } <nl> - hQR . mostRecentResults . first = unix_time ; <nl> - hQR . mostRecentResults . second = escaped_qd ; <nl> + <nl> + / / Replace the " previous " query data with the current . <nl> std : : string json ; <nl> - auto serialize_status = serializeHistoricalQueryResultsJSON ( hQR , json ) ; <nl> - if ( ! serialize_status . ok ( ) ) { <nl> - return serialize_status ; <nl> - } <nl> - auto put_status = db - > Put ( kQueries , name_ , json ) ; <nl> - if ( ! put_status . ok ( ) ) { <nl> - return put_status ; <nl> + status = serializeQueryDataJSON ( escaped_current_qd , json ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> } <nl> - return Status ( 0 , " OK " ) ; <nl> - } <nl> <nl> - osquery : : Status Query : : getCurrentResults ( osquery : : QueryData & qd ) { <nl> - return getCurrentResults ( qd , DBHandle : : getInstance ( ) ) ; <nl> - } <nl> - <nl> - Status Query : : getCurrentResults ( QueryData & qd , std : : shared_ptr < DBHandle > db ) { <nl> - HistoricalQueryResults hQR ; <nl> - auto s = getHistoricalQueryResults ( hQR , db ) ; <nl> - if ( s . ok ( ) ) { <nl> - qd = hQR . mostRecentResults . second ; <nl> + status = db - > Put ( kQueries , name_ , json ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> } <nl> - return s ; <nl> + return Status ( 0 , " OK " ) ; <nl> } <nl> } <nl> mmm a / osquery / database / results . cpp <nl> ppp b / osquery / database / results . cpp <nl> <nl> # include < osquery / logger . h > <nl> <nl> namespace pt = boost : : property_tree ; <nl> - using osquery : : Status ; <nl> - typedef unsigned char byte ; <nl> <nl> namespace osquery { <nl> <nl> + typedef unsigned char byte ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / Row - the representation of a row in a set of database results . Row is a <nl> / / simple map where individual column names are keys , which map to the Row ' s <nl> namespace osquery { <nl> <nl> std : : string escapeNonPrintableBytes ( const std : : string & data ) { <nl> std : : string escaped ; <nl> - char const hex_chars [ 16 ] = { ' 0 ' , <nl> - ' 1 ' , <nl> - ' 2 ' , <nl> - ' 3 ' , <nl> - ' 4 ' , <nl> - ' 5 ' , <nl> - ' 6 ' , <nl> - ' 7 ' , <nl> - ' 8 ' , <nl> - ' 9 ' , <nl> - ' A ' , <nl> - ' B ' , <nl> - ' C ' , <nl> - ' D ' , <nl> - ' E ' , <nl> - ' F ' } ; <nl> + / / clang - format off <nl> + char const hex_chars [ 16 ] = { <nl> + ' 0 ' , ' 1 ' , ' 2 ' , ' 3 ' , ' 4 ' , ' 5 ' , ' 6 ' , ' 7 ' , ' 8 ' , ' 9 ' , <nl> + ' A ' , ' B ' , ' C ' , ' D ' , ' E ' , ' F ' , <nl> + } ; <nl> + / / clang - format on <nl> for ( int i = 0 ; i < data . length ( ) ; i + + ) { <nl> if ( ( ( byte ) data [ i ] ) < 0x20 | | ( ( byte ) data [ i ] ) > = 0x80 ) { <nl> escaped + = " \ \ x " ; <nl> Status deserializeRow ( const pt : : ptree & tree , Row & r ) { <nl> r [ i . first ] = i . second . data ( ) ; <nl> } <nl> } <nl> - return Status ( 0 , " OK " ) ; <nl> } catch ( const std : : exception & e ) { <nl> LOG ( ERROR ) < < e . what ( ) ; <nl> return Status ( 1 , e . what ( ) ) ; <nl> } <nl> + return Status ( 0 , " OK " ) ; <nl> } <nl> <nl> Status deserializeRowJSON ( const std : : string & json , Row & r ) { <nl> Status serializeQueryData ( const QueryData & q , pt : : ptree & tree ) { <nl> return Status ( 0 , " OK " ) ; <nl> } <nl> <nl> + Status serializeQueryDataJSON ( const QueryData & q , std : : string & json ) { <nl> + pt : : ptree tree ; <nl> + try { <nl> + auto status = serializeQueryData ( q , tree ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> + } <nl> + std : : ostringstream ss ; <nl> + pt : : write_json ( ss , tree , false ) ; <nl> + json = ss . str ( ) ; <nl> + } catch ( const std : : exception & e ) { <nl> + return Status ( 1 , e . what ( ) ) ; <nl> + } <nl> + return Status ( 0 , " OK " ) ; <nl> + } <nl> + <nl> + Status deserializeQueryData ( const pt : : ptree & tree , QueryData & qd ) { <nl> + try { <nl> + for ( const auto & i : tree ) { <nl> + Row r ; <nl> + auto status = deserializeRow ( i . second , r ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> + } <nl> + qd . push_back ( r ) ; <nl> + } <nl> + } catch ( const std : : exception & e ) { <nl> + return Status ( 1 , e . what ( ) ) ; <nl> + } <nl> + return Status ( 0 , " OK " ) ; <nl> + } <nl> + <nl> + Status deserializeQueryDataJSON ( const std : : string & json , QueryData & qd ) { <nl> + pt : : ptree tree ; <nl> + try { <nl> + std : : stringstream j ; <nl> + j < < json ; <nl> + pt : : read_json ( j , tree ) ; <nl> + } catch ( const std : : exception & e ) { <nl> + return Status ( 1 , e . what ( ) ) ; <nl> + } <nl> + return deserializeQueryData ( tree , qd ) ; <nl> + } <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / DiffResults - the representation of two diffed QueryData result sets . <nl> / / Given and old and new QueryData , DiffResults indicates the " added " subset <nl> Status serializeQueryData ( const QueryData & q , pt : : ptree & tree ) { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> Status serializeDiffResults ( const DiffResults & d , pt : : ptree & tree ) { <nl> - try { <nl> - pt : : ptree added ; <nl> - auto added_status = serializeQueryData ( d . added , added ) ; <nl> - if ( ! added_status . ok ( ) ) { <nl> - return added_status ; <nl> + pt : : ptree added ; <nl> + auto status = serializeQueryData ( d . added , added ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> + } <nl> + tree . add_child ( " added " , added ) ; <nl> + <nl> + pt : : ptree removed ; <nl> + status = serializeQueryData ( d . removed , removed ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> + } <nl> + tree . add_child ( " removed " , removed ) ; <nl> + return Status ( 0 , " OK " ) ; <nl> + } <nl> + <nl> + Status deserializeDiffResults ( const pt : : ptree & tree , DiffResults & dr ) { <nl> + if ( tree . count ( " added " ) > 0 ) { <nl> + auto status = deserializeQueryData ( tree . get_child ( " added " ) , dr . added ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> } <nl> - tree . add_child ( " added " , added ) ; <nl> + } <nl> <nl> - pt : : ptree removed ; <nl> - auto removed_status = serializeQueryData ( d . removed , removed ) ; <nl> - if ( ! removed_status . ok ( ) ) { <nl> - return removed_status ; <nl> + if ( tree . count ( " removed " ) > 0 ) { <nl> + auto status = deserializeQueryData ( tree . get_child ( " removed " ) , dr . removed ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> } <nl> - tree . add_child ( " removed " , removed ) ; <nl> - } catch ( const std : : exception & e ) { <nl> - return Status ( 1 , e . what ( ) ) ; <nl> } <nl> return Status ( 0 , " OK " ) ; <nl> } <nl> <nl> Status serializeDiffResultsJSON ( const DiffResults & d , std : : string & json ) { <nl> + pt : : ptree tree ; <nl> + auto status = serializeDiffResults ( d , tree ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> + } <nl> + <nl> try { <nl> - pt : : ptree tree ; <nl> - auto s = serializeDiffResults ( d , tree ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> - } <nl> std : : ostringstream ss ; <nl> pt : : write_json ( ss , tree , false ) ; <nl> json = ss . str ( ) ; <nl> Status serializeDiffResultsJSON ( const DiffResults & d , std : : string & json ) { <nl> return Status ( 0 , " OK " ) ; <nl> } <nl> <nl> - DiffResults diff ( const QueryData & old_ , const QueryData & new_ ) { <nl> + DiffResults diff ( const QueryData & old , const QueryData & current ) { <nl> DiffResults r ; <nl> QueryData overlap ; <nl> <nl> - for ( const auto & i : new_ ) { <nl> - auto item = std : : find ( old_ . begin ( ) , old_ . end ( ) , i ) ; <nl> - if ( item ! = old_ . end ( ) ) { <nl> + for ( const auto & i : current ) { <nl> + auto item = std : : find ( old . begin ( ) , old . end ( ) , i ) ; <nl> + if ( item ! = old . end ( ) ) { <nl> overlap . push_back ( i ) ; <nl> } else { <nl> r . added . push_back ( i ) ; <nl> DiffResults diff ( const QueryData & old_ , const QueryData & new_ ) { <nl> } <nl> <nl> std : : multiset < Row > overlap_set ( overlap . begin ( ) , overlap . end ( ) ) ; <nl> - <nl> - std : : multiset < Row > old_set ( old_ . begin ( ) , old_ . end ( ) ) ; <nl> - <nl> + std : : multiset < Row > old_set ( old . begin ( ) , old . end ( ) ) ; <nl> std : : set_difference ( old_set . begin ( ) , <nl> old_set . end ( ) , <nl> overlap_set . begin ( ) , <nl> overlap_set . end ( ) , <nl> std : : back_inserter ( r . removed ) ) ; <nl> - <nl> return r ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / HistoricalQueryResults - the representation of the historical results of <nl> - / / a particlar scheduled database query . <nl> + / / QueryLogItem - the representation of a log result occuring when a <nl> + / / scheduled query yields operating system state change . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - Status serializeHistoricalQueryResultsJSON ( const HistoricalQueryResults & r , <nl> - std : : string & json ) { <nl> - try { <nl> - pt : : ptree tree ; <nl> - auto s = serializeHistoricalQueryResults ( r , tree ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> - } <nl> - std : : ostringstream ss ; <nl> - pt : : write_json ( ss , tree , false ) ; <nl> - json = ss . str ( ) ; <nl> - } catch ( const std : : exception & e ) { <nl> - return Status ( 1 , e . what ( ) ) ; <nl> + Status serializeQueryLogItem ( const QueryLogItem & i , pt : : ptree & tree ) { <nl> + pt : : ptree diff_results ; <nl> + auto status = serializeDiffResults ( i . results , diff_results ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> } <nl> + <nl> + tree . add_child ( " diffResults " , diff_results ) ; <nl> + tree . put < std : : string > ( " name " , i . name ) ; <nl> + tree . put < std : : string > ( " hostIdentifier " , i . identifier ) ; <nl> + tree . put < std : : string > ( " calendarTime " , i . calendar_time ) ; <nl> + tree . put < int > ( " unixTime " , i . time ) ; <nl> return Status ( 0 , " OK " ) ; <nl> } <nl> <nl> - Status serializeHistoricalQueryResults ( const HistoricalQueryResults & r , <nl> - pt : : ptree & tree ) { <nl> - try { <nl> - pt : : ptree mostRecentResults ; <nl> + Status serializeQueryLogItemJSON ( const QueryLogItem & i , std : : string & json ) { <nl> + pt : : ptree tree ; <nl> + auto status = serializeQueryLogItem ( i , tree ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> + } <nl> <nl> - pt : : ptree most_recent_serialized ; <nl> - auto mrr_status = <nl> - serializeQueryData ( r . mostRecentResults . second , most_recent_serialized ) ; <nl> - if ( ! mrr_status . ok ( ) ) { <nl> - return mrr_status ; <nl> - } <nl> - mostRecentResults . add_child ( <nl> - boost : : lexical_cast < std : : string > ( r . mostRecentResults . first ) , <nl> - most_recent_serialized ) ; <nl> - tree . add_child ( " mostRecentResults " , mostRecentResults ) ; <nl> + try { <nl> + std : : ostringstream ss ; <nl> + pt : : write_json ( ss , tree , false ) ; <nl> + json = ss . str ( ) ; <nl> } catch ( const std : : exception & e ) { <nl> return Status ( 1 , e . what ( ) ) ; <nl> } <nl> return Status ( 0 , " OK " ) ; <nl> } <nl> <nl> - Status deserializeHistoricalQueryResults ( const pt : : ptree & tree , <nl> - HistoricalQueryResults & r ) { <nl> - try { <nl> - for ( const auto & v : tree . get_child ( " mostRecentResults " ) ) { <nl> - try { <nl> - int execution = boost : : lexical_cast < int > ( v . first ) ; <nl> - r . mostRecentResults . first = execution ; <nl> - } catch ( const boost : : bad_lexical_cast & e ) { <nl> - return Status ( 1 , e . what ( ) ) ; <nl> - } <nl> - <nl> - QueryData q ; <nl> - for ( const auto & each : v . second ) { <nl> - Row row_ ; <nl> - for ( const auto & item : each . second ) { <nl> - row_ [ item . first ] = item . second . get_value < std : : string > ( ) ; <nl> - } <nl> - q . push_back ( row_ ) ; <nl> - } <nl> - r . mostRecentResults . second = q ; <nl> + Status deserializeQueryLogItem ( const pt : : ptree & tree , QueryLogItem & item ) { <nl> + if ( tree . count ( " diffResults " ) > 0 ) { <nl> + auto status = <nl> + deserializeDiffResults ( tree . get_child ( " diffResults " ) , item . results ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> } <nl> - <nl> - return Status ( 0 , " OK " ) ; <nl> - } catch ( const std : : exception & e ) { <nl> - LOG ( ERROR ) < < e . what ( ) ; <nl> - return Status ( 1 , e . what ( ) ) ; <nl> } <nl> + <nl> + item . name = tree . get < std : : string > ( " name " , " " ) ; <nl> + item . identifier = tree . get < std : : string > ( " hostIdentifier " , " " ) ; <nl> + item . calendar_time = tree . get < std : : string > ( " calendarTime " , " " ) ; <nl> + item . time = tree . get < int > ( " unixTime " , 0 ) ; <nl> + return Status ( 0 , " OK " ) ; <nl> } <nl> <nl> - Status deserializeHistoricalQueryResultsJSON ( const std : : string & json , <nl> - HistoricalQueryResults & r ) { <nl> + Status deserializeQueryLogItemJSON ( const std : : string & json , <nl> + QueryLogItem & item ) { <nl> pt : : ptree tree ; <nl> try { <nl> std : : stringstream j ; <nl> Status deserializeHistoricalQueryResultsJSON ( const std : : string & json , <nl> } catch ( const std : : exception & e ) { <nl> return Status ( 1 , e . what ( ) ) ; <nl> } <nl> - return deserializeHistoricalQueryResults ( tree , r ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / ScheduledQueryLogItem - the representation of a log result occuring when a <nl> - / / scheduled query yields operating system state change . <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - Status serializeScheduledQueryLogItem ( const ScheduledQueryLogItem & i , <nl> - boost : : property_tree : : ptree & tree ) { <nl> - try { <nl> - pt : : ptree diffResults ; <nl> - auto diff_results_status = serializeDiffResults ( i . diffResults , diffResults ) ; <nl> - if ( ! diff_results_status . ok ( ) ) { <nl> - return diff_results_status ; <nl> - } <nl> - <nl> - tree . add_child ( " diffResults " , diffResults ) ; <nl> - tree . put < std : : string > ( " name " , i . name ) ; <nl> - tree . put < std : : string > ( " hostIdentifier " , i . hostIdentifier ) ; <nl> - tree . put < std : : string > ( " calendarTime " , i . calendarTime ) ; <nl> - tree . put < int > ( " unixTime " , i . unixTime ) ; <nl> - } catch ( const std : : exception & e ) { <nl> - return Status ( 1 , e . what ( ) ) ; <nl> - } <nl> - return Status ( 0 , " OK " ) ; <nl> + return deserializeQueryLogItem ( tree , item ) ; <nl> } <nl> <nl> - Status serializeEvent ( const ScheduledQueryLogItem & item , <nl> - const boost : : property_tree : : ptree & event , <nl> - boost : : property_tree : : ptree & tree ) { <nl> + Status serializeEvent ( const QueryLogItem & item , <nl> + const pt : : ptree & event , <nl> + pt : : ptree & tree ) { <nl> tree . put < std : : string > ( " name " , item . name ) ; <nl> - tree . put < std : : string > ( " hostIdentifier " , item . hostIdentifier ) ; <nl> - tree . put < std : : string > ( " calendarTime " , item . calendarTime ) ; <nl> - tree . put < int > ( " unixTime " , item . unixTime ) ; <nl> + tree . put < std : : string > ( " hostIdentifier " , item . identifier ) ; <nl> + tree . put < std : : string > ( " calendarTime " , item . calendar_time ) ; <nl> + tree . put < int > ( " unixTime " , item . time ) ; <nl> <nl> pt : : ptree columns ; <nl> for ( auto & i : event ) { <nl> Status serializeEvent ( const ScheduledQueryLogItem & item , <nl> return Status ( 0 , " OK " ) ; <nl> } <nl> <nl> - Status serializeScheduledQueryLogItemAsEvents ( <nl> - const ScheduledQueryLogItem & item , boost : : property_tree : : ptree & tree ) { <nl> - try { <nl> - pt : : ptree diff_results ; <nl> - auto status = serializeDiffResults ( item . diffResults , diff_results ) ; <nl> - if ( ! status . ok ( ) ) { <nl> - return status ; <nl> - } <nl> + Status serializeQueryLogItemAsEvents ( const QueryLogItem & i , pt : : ptree & tree ) { <nl> + pt : : ptree diff_results ; <nl> + auto status = serializeDiffResults ( i . results , diff_results ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> + } <nl> <nl> - for ( auto & i : diff_results ) { <nl> - for ( auto & j : i . second ) { <nl> - pt : : ptree event ; <nl> - serializeEvent ( item , j . second , event ) ; <nl> - event . put < std : : string > ( " action " , i . first ) ; <nl> - tree . push_back ( std : : make_pair ( " " , event ) ) ; <nl> - } <nl> + for ( auto & action : diff_results ) { <nl> + for ( auto & row : action . second ) { <nl> + pt : : ptree event ; <nl> + serializeEvent ( i , row . second , event ) ; <nl> + event . put < std : : string > ( " action " , action . first ) ; <nl> + tree . push_back ( std : : make_pair ( " " , event ) ) ; <nl> } <nl> - } catch ( const std : : exception & e ) { <nl> - return Status ( 1 , e . what ( ) ) ; <nl> } <nl> - <nl> return Status ( 0 , " OK " ) ; <nl> } <nl> <nl> - Status serializeScheduledQueryLogItemAsEventsJSON ( <nl> - const ScheduledQueryLogItem & i , std : : string & json ) { <nl> + Status serializeQueryLogItemAsEventsJSON ( const QueryLogItem & i , <nl> + std : : string & json ) { <nl> + pt : : ptree tree ; <nl> + auto status = serializeQueryLogItemAsEvents ( i , tree ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> + } <nl> + <nl> try { <nl> - pt : : ptree tree ; <nl> - auto s = serializeScheduledQueryLogItemAsEvents ( i , tree ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> - } <nl> std : : ostringstream ss ; <nl> for ( auto & event : tree ) { <nl> pt : : write_json ( ss , event . second , false ) ; <nl> Status serializeScheduledQueryLogItemAsEventsJSON ( <nl> return Status ( 0 , " OK " ) ; <nl> } <nl> <nl> - Status serializeScheduledQueryLogItemJSON ( const ScheduledQueryLogItem & i , <nl> - std : : string & json ) { <nl> - try { <nl> - pt : : ptree tree ; <nl> - auto s = serializeScheduledQueryLogItem ( i , tree ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> - } <nl> - std : : ostringstream ss ; <nl> - pt : : write_json ( ss , tree , false ) ; <nl> - json = ss . str ( ) ; <nl> - } catch ( const std : : exception & e ) { <nl> - return Status ( 1 , e . what ( ) ) ; <nl> - } <nl> - return Status ( 0 , " OK " ) ; <nl> - } <nl> - <nl> bool addUniqueRowToQueryData ( QueryData & q , const Row & r ) { <nl> if ( std : : find ( q . begin ( ) , q . end ( ) , r ) ! = q . end ( ) ) { <nl> return false ; <nl> mmm a / osquery / database / tests / query_tests . cpp <nl> ppp b / osquery / database / tests / query_tests . cpp <nl> namespace osquery { <nl> <nl> class QueryTests : public testing : : Test { <nl> public : <nl> - void SetUp ( ) { db = DBHandle : : getInstanceAtPath ( kTestingQueryDBPath ) ; } <nl> + void SetUp ( ) { db_ = DBHandle : : getInstanceAtPath ( kTestingQueryDBPath ) ; } <nl> void TearDown ( ) { boost : : filesystem : : remove_all ( kTestingQueryDBPath ) ; } <nl> <nl> public : <nl> - std : : shared_ptr < DBHandle > db ; <nl> + std : : shared_ptr < DBHandle > db_ ; <nl> } ; <nl> <nl> TEST_F ( QueryTests , test_get_column_family_name ) { <nl> TEST_F ( QueryTests , test_private_members ) { <nl> } <nl> <nl> TEST_F ( QueryTests , test_add_and_get_current_results ) { <nl> + / / Test adding a " current " set of results to a scheduled query instance . <nl> auto query = getOsqueryScheduledQuery ( ) ; <nl> auto cf = Query ( " foobar " , query ) ; <nl> - auto s = cf . addNewResults ( getTestDBExpectedResults ( ) , std : : time ( 0 ) , db ) ; <nl> - EXPECT_TRUE ( s . ok ( ) ) ; <nl> - EXPECT_EQ ( s . toString ( ) , " OK " ) ; <nl> + auto status = cf . addNewResults ( getTestDBExpectedResults ( ) , db_ ) ; <nl> + EXPECT_TRUE ( status . ok ( ) ) ; <nl> + EXPECT_EQ ( status . toString ( ) , " OK " ) ; <nl> + <nl> + / / Simulate results from several schedule runs , calculate differentials . <nl> for ( auto result : getTestDBResultStream ( ) ) { <nl> + / / Get the results from the previous query execution ( from RocksDB ) . <nl> + QueryData previous_qd ; <nl> + auto status = cf . getPreviousQueryResults ( previous_qd , db_ ) ; <nl> + EXPECT_TRUE ( status . ok ( ) ) ; <nl> + EXPECT_EQ ( status . toString ( ) , " OK " ) ; <nl> + <nl> + / / Add the " current " results and output the differentials . <nl> DiffResults dr ; <nl> - HistoricalQueryResults hQR ; <nl> - auto hqr_status = cf . getHistoricalQueryResults ( hQR , db ) ; <nl> - EXPECT_TRUE ( hqr_status . ok ( ) ) ; <nl> - EXPECT_EQ ( hqr_status . toString ( ) , " OK " ) ; <nl> - auto s = cf . addNewResults ( result . second , dr , true , std : : time ( 0 ) , db ) ; <nl> + auto s = cf . addNewResults ( result . second , dr , true , db_ ) ; <nl> EXPECT_TRUE ( s . ok ( ) ) ; <nl> - DiffResults expected = diff ( hQR . mostRecentResults . second , result . second ) ; <nl> + <nl> + / / Call the diffing utility directly . <nl> + DiffResults expected = diff ( previous_qd , result . second ) ; <nl> EXPECT_EQ ( dr , expected ) ; <nl> + <nl> + / / After Query : : addNewResults the previous results are now current . <nl> QueryData qd ; <nl> - cf . getCurrentResults ( qd , db ) ; <nl> + cf . getPreviousQueryResults ( qd , db_ ) ; <nl> EXPECT_EQ ( qd , result . second ) ; <nl> } <nl> } <nl> <nl> - TEST_F ( QueryTests , test_get_historical_query_results ) { <nl> - auto hQR = getSerializedHistoricalQueryResultsJSON ( ) ; <nl> + TEST_F ( QueryTests , test_get_query_results ) { <nl> + / / Grab an expected set of query data and add it as the previous result . <nl> + auto encoded_qd = getSerializedQueryDataJSON ( ) ; <nl> auto query = getOsqueryScheduledQuery ( ) ; <nl> - auto put_status = db - > Put ( kQueries , " foobar " , hQR . first ) ; <nl> - EXPECT_TRUE ( put_status . ok ( ) ) ; <nl> - EXPECT_EQ ( put_status . toString ( ) , " OK " ) ; <nl> + auto status = db_ - > Put ( kQueries , " foobar " , encoded_qd . first ) ; <nl> + EXPECT_TRUE ( status . ok ( ) ) ; <nl> + <nl> + / / Use the Query retrieval API to check the now " previous " result . <nl> + QueryData previous_qd ; <nl> auto cf = Query ( " foobar " , query ) ; <nl> - HistoricalQueryResults from_db ; <nl> - auto query_status = cf . getHistoricalQueryResults ( from_db , db ) ; <nl> - EXPECT_TRUE ( query_status . ok ( ) ) ; <nl> - EXPECT_EQ ( query_status . toString ( ) , " OK " ) ; <nl> - EXPECT_EQ ( from_db , hQR . second ) ; <nl> + status = cf . getPreviousQueryResults ( previous_qd , db_ ) ; <nl> + EXPECT_TRUE ( status . ok ( ) ) ; <nl> } <nl> <nl> TEST_F ( QueryTests , test_query_name_not_found_in_db ) { <nl> - HistoricalQueryResults from_db ; <nl> + / / Try to retrieve results from a query that has not executed . <nl> + QueryData previous_qd ; <nl> auto query = getOsqueryScheduledQuery ( ) ; <nl> auto cf = Query ( " not_a_real_query " , query ) ; <nl> - auto query_status = cf . getHistoricalQueryResults ( from_db , db ) ; <nl> - EXPECT_FALSE ( query_status . ok ( ) ) ; <nl> - EXPECT_EQ ( query_status . toString ( ) , " query name not found in database " ) ; <nl> + auto status = cf . getPreviousQueryResults ( previous_qd , db_ ) ; <nl> + EXPECT_FALSE ( status . ok ( ) ) ; <nl> } <nl> <nl> TEST_F ( QueryTests , test_is_query_name_in_database ) { <nl> auto query = getOsqueryScheduledQuery ( ) ; <nl> auto cf = Query ( " foobar " , query ) ; <nl> - auto hQR = getSerializedHistoricalQueryResultsJSON ( ) ; <nl> - auto put_status = db - > Put ( kQueries , " foobar " , hQR . first ) ; <nl> - EXPECT_TRUE ( put_status . ok ( ) ) ; <nl> - EXPECT_EQ ( put_status . toString ( ) , " OK " ) ; <nl> - EXPECT_TRUE ( cf . isQueryNameInDatabase ( db ) ) ; <nl> + auto encoded_qd = getSerializedQueryDataJSON ( ) ; <nl> + auto status = db_ - > Put ( kQueries , " foobar " , encoded_qd . first ) ; <nl> + EXPECT_TRUE ( status . ok ( ) ) ; <nl> + / / Now test that the query name exists . <nl> + EXPECT_TRUE ( cf . isQueryNameInDatabase ( db_ ) ) ; <nl> } <nl> <nl> TEST_F ( QueryTests , test_get_stored_query_names ) { <nl> auto query = getOsqueryScheduledQuery ( ) ; <nl> auto cf = Query ( " foobar " , query ) ; <nl> - auto hQR = getSerializedHistoricalQueryResultsJSON ( ) ; <nl> - auto put_status = db - > Put ( kQueries , " foobar " , hQR . first ) ; <nl> - EXPECT_TRUE ( put_status . ok ( ) ) ; <nl> - EXPECT_EQ ( put_status . toString ( ) , " OK " ) ; <nl> - auto names = cf . getStoredQueryNames ( db ) ; <nl> + auto encoded_qd = getSerializedQueryDataJSON ( ) ; <nl> + auto status = db_ - > Put ( kQueries , " foobar " , encoded_qd . first ) ; <nl> + EXPECT_TRUE ( status . ok ( ) ) ; <nl> + <nl> + / / Stored query names is a factory method included alongside every query . <nl> + / / It will include the set of query names with existing " previous " results . <nl> + auto names = cf . getStoredQueryNames ( db_ ) ; <nl> auto in_vector = std : : find ( names . begin ( ) , names . end ( ) , " foobar " ) ; <nl> EXPECT_NE ( in_vector , names . end ( ) ) ; <nl> } <nl> - <nl> - TEST_F ( QueryTests , test_get_current_results ) { <nl> - auto hQR = getSerializedHistoricalQueryResultsJSON ( ) ; <nl> - auto query = getOsqueryScheduledQuery ( ) ; <nl> - auto put_status = db - > Put ( kQueries , " foobar " , hQR . first ) ; <nl> - EXPECT_TRUE ( put_status . ok ( ) ) ; <nl> - EXPECT_EQ ( put_status . toString ( ) , " OK " ) ; <nl> - auto cf = Query ( " foobar " , query ) ; <nl> - QueryData qd ; <nl> - auto query_status = cf . getCurrentResults ( qd , db ) ; <nl> - EXPECT_TRUE ( query_status . ok ( ) ) ; <nl> - EXPECT_EQ ( query_status . toString ( ) , " OK " ) ; <nl> - EXPECT_EQ ( qd , hQR . second . mostRecentResults . second ) ; <nl> - } <nl> } <nl> mmm a / osquery / database / tests / results_tests . cpp <nl> ppp b / osquery / database / tests / results_tests . cpp <nl> TEST_F ( ResultsTests , test_serialize_row ) { <nl> EXPECT_EQ ( results . first , tree ) ; <nl> } <nl> <nl> + TEST_F ( ResultsTests , test_deserialize_row_json ) { <nl> + auto results = getSerializedRow ( ) ; <nl> + std : : string input ; <nl> + serializeRowJSON ( results . second , input ) ; <nl> + <nl> + / / Pull the serialized JSON back into a Row output container . <nl> + Row output ; <nl> + auto s = deserializeRowJSON ( input , output ) ; <nl> + EXPECT_TRUE ( s . ok ( ) ) ; <nl> + / / The output container should match the input row . <nl> + EXPECT_EQ ( output , results . second ) ; <nl> + } <nl> + <nl> TEST_F ( ResultsTests , test_serialize_query_data ) { <nl> auto results = getSerializedQueryData ( ) ; <nl> pt : : ptree tree ; <nl> TEST_F ( ResultsTests , test_serialize_query_data ) { <nl> EXPECT_EQ ( results . first , tree ) ; <nl> } <nl> <nl> + TEST_F ( ResultsTests , test_serialize_query_data_json ) { <nl> + auto results = getSerializedQueryDataJSON ( ) ; <nl> + std : : string json ; <nl> + auto s = serializeQueryDataJSON ( results . second , json ) ; <nl> + EXPECT_TRUE ( s . ok ( ) ) ; <nl> + EXPECT_EQ ( s . toString ( ) , " OK " ) ; <nl> + EXPECT_EQ ( results . first , json ) ; <nl> + } <nl> + <nl> + TEST_F ( ResultsTests , test_deserialize_query_data_json ) { <nl> + auto results = getSerializedQueryDataJSON ( ) ; <nl> + <nl> + / / Pull the serialized JSON back into a QueryData output container . <nl> + QueryData output ; <nl> + auto s = deserializeQueryDataJSON ( results . first , output ) ; <nl> + EXPECT_TRUE ( s . ok ( ) ) ; <nl> + / / The output container should match the input query data . <nl> + EXPECT_EQ ( output , results . second ) ; <nl> + } <nl> + <nl> TEST_F ( ResultsTests , test_serialize_diff_results ) { <nl> auto results = getSerializedDiffResults ( ) ; <nl> pt : : ptree tree ; <nl> TEST_F ( ResultsTests , test_serialize_diff_results_json ) { <nl> EXPECT_EQ ( results . first , json ) ; <nl> } <nl> <nl> - TEST_F ( ResultsTests , test_serialize_historical_query_results ) { <nl> - auto results = getSerializedHistoricalQueryResults ( ) ; <nl> + TEST_F ( ResultsTests , test_serialize_query_log_item ) { <nl> + auto results = getSerializedQueryLogItem ( ) ; <nl> pt : : ptree tree ; <nl> - auto s = serializeHistoricalQueryResults ( results . second , tree ) ; <nl> + auto s = serializeQueryLogItem ( results . second , tree ) ; <nl> EXPECT_TRUE ( s . ok ( ) ) ; <nl> EXPECT_EQ ( s . toString ( ) , " OK " ) ; <nl> EXPECT_EQ ( results . first , tree ) ; <nl> } <nl> <nl> - TEST_F ( ResultsTests , test_serialize_historical_query_results_json ) { <nl> - auto results = getSerializedHistoricalQueryResultsJSON ( ) ; <nl> + TEST_F ( ResultsTests , test_serialize_query_log_item_json ) { <nl> + auto results = getSerializedQueryLogItemJSON ( ) ; <nl> std : : string json ; <nl> - auto s = serializeHistoricalQueryResultsJSON ( results . second , json ) ; <nl> + auto s = serializeQueryLogItemJSON ( results . second , json ) ; <nl> EXPECT_TRUE ( s . ok ( ) ) ; <nl> EXPECT_EQ ( s . toString ( ) , " OK " ) ; <nl> EXPECT_EQ ( results . first , json ) ; <nl> } <nl> <nl> - TEST_F ( ResultsTests , test_deserialize_historical_query_results ) { <nl> - auto results = getSerializedHistoricalQueryResults ( ) ; <nl> - HistoricalQueryResults r ; <nl> - auto s = deserializeHistoricalQueryResults ( results . first , r ) ; <nl> - EXPECT_EQ ( results . second , r ) ; <nl> - EXPECT_EQ ( results . second . mostRecentResults , r . mostRecentResults ) ; <nl> - EXPECT_TRUE ( s . ok ( ) ) ; <nl> - EXPECT_EQ ( s . toString ( ) , " OK " ) ; <nl> - } <nl> - <nl> - TEST_F ( ResultsTests , test_deserialize_historical_query_results_json ) { <nl> - auto results = getSerializedHistoricalQueryResultsJSON ( ) ; <nl> - HistoricalQueryResults r ; <nl> - auto s = deserializeHistoricalQueryResultsJSON ( results . first , r ) ; <nl> - EXPECT_EQ ( results . second , r ) ; <nl> - EXPECT_EQ ( results . second . mostRecentResults , r . mostRecentResults ) ; <nl> - EXPECT_TRUE ( s . ok ( ) ) ; <nl> - EXPECT_EQ ( s . toString ( ) , " OK " ) ; <nl> - } <nl> + TEST_F ( ResultsTests , test_deserialize_query_log_item_json ) { <nl> + auto results = getSerializedQueryLogItemJSON ( ) ; <nl> <nl> - TEST_F ( ResultsTests , test_serialize_scheduled_query_log_item ) { <nl> - auto results = getSerializedScheduledQueryLogItem ( ) ; <nl> - pt : : ptree tree ; <nl> - auto s = serializeScheduledQueryLogItem ( results . second , tree ) ; <nl> + / / Pull the serialized JSON back into a QueryLogItem output container . <nl> + QueryLogItem output ; <nl> + auto s = deserializeQueryLogItemJSON ( results . first , output ) ; <nl> EXPECT_TRUE ( s . ok ( ) ) ; <nl> - EXPECT_EQ ( s . toString ( ) , " OK " ) ; <nl> - EXPECT_EQ ( results . first , tree ) ; <nl> - } <nl> - <nl> - TEST_F ( ResultsTests , test_serialize_scheduled_query_log_item_json ) { <nl> - auto results = getSerializedScheduledQueryLogItemJSON ( ) ; <nl> - std : : string json ; <nl> - auto s = serializeScheduledQueryLogItemJSON ( results . second , json ) ; <nl> - EXPECT_TRUE ( s . ok ( ) ) ; <nl> - EXPECT_EQ ( s . toString ( ) , " OK " ) ; <nl> - EXPECT_EQ ( results . first , json ) ; <nl> + / / The output container should match the input query data . <nl> + EXPECT_EQ ( output , results . second ) ; <nl> } <nl> <nl> TEST_F ( ResultsTests , test_unicode_to_ascii_conversion ) { <nl> mmm a / osquery / dispatcher / scheduler . cpp <nl> ppp b / osquery / dispatcher / scheduler . cpp <nl> Status getHostIdentifier ( std : : string & ident ) { <nl> auto status = db - > Scan ( kConfigurations , results ) ; <nl> <nl> if ( ! status . ok ( ) ) { <nl> - VLOG ( 1 ) < < " Could not access database , using hostname as the host " <nl> - " identifier " ; <nl> + VLOG ( 1 ) < < " Could not access database ; using hostname as host identifier " ; <nl> ident = osquery : : getHostname ( ) ; <nl> return Status ( 0 , " OK " ) ; <nl> } <nl> Status getHostIdentifier ( std : : string & ident ) { <nl> results . end ( ) ) { <nl> status = db - > Get ( kConfigurations , " hostIdentifier " , ident ) ; <nl> if ( ! status . ok ( ) ) { <nl> - VLOG ( 1 ) < < " Could not access database , using hostname as the host " <nl> - " identifier " ; <nl> + VLOG ( 1 ) < < " Could not access database ; using hostname as host identifier " ; <nl> ident = osquery : : getHostname ( ) ; <nl> } <nl> return status ; <nl> Status getHostIdentifier ( std : : string & ident ) { <nl> <nl> / / There was no uuid stored in the database , generate one and store it . <nl> ident = osquery : : generateHostUuid ( ) ; <nl> - VLOG ( 1 ) < < " Using uuid " < < ident < < " to identify this host " ; <nl> + VLOG ( 1 ) < < " Using uuid " < < ident < < " as host identifier " ; <nl> return db - > Put ( kConfigurations , " hostIdentifier " , ident ) ; <nl> } <nl> <nl> void launchQuery ( const std : : string & name , const ScheduledQuery & query ) { <nl> - LOG ( INFO ) < < " Executing query : " < < query . query ; <nl> - int unix_time = std : : time ( 0 ) ; <nl> + / / Execute the scheduled query and create a named query object . <nl> + VLOG ( 1 ) < < " Executing query : " < < query . query ; <nl> auto sql = SQL ( query . query ) ; <nl> if ( ! sql . ok ( ) ) { <nl> LOG ( ERROR ) < < " Error executing query ( " < < query . query <nl> void launchQuery ( const std : : string & name , const ScheduledQuery & query ) { <nl> return ; <nl> } <nl> <nl> + / / Fill in a host identifier fields based on configuration or availability . <nl> + std : : string ident ; <nl> + auto status = getHostIdentifier ( ident ) ; <nl> + if ( ! status . ok ( ) | | ident . empty ( ) ) { <nl> + ident = " < unknown > " ; <nl> + } <nl> + <nl> + / / A query log item contains an optional set of differential results or <nl> + / / a copy of the most - recent execution alongside some query metadata . <nl> + QueryLogItem item ; <nl> + item . name = name ; <nl> + item . identifier = ident ; <nl> + item . time = osquery : : getUnixTime ( ) ; <nl> + item . calendar_time = osquery : : getAsciiTime ( ) ; <nl> + <nl> + if ( query . options . count ( " snapshot " ) & & query . options . at ( " snapshot " ) ) { <nl> + / / This is a snapshot query , emit results with a differential or state . <nl> + item . results . added = std : : move ( sql . rows ( ) ) ; <nl> + logSnapshotQuery ( item ) ; <nl> + return ; <nl> + } <nl> + <nl> + / / Create a database - backed set of query results . <nl> auto dbQuery = Query ( name , query ) ; <nl> DiffResults diff_results ; <nl> - auto status = dbQuery . addNewResults ( sql . rows ( ) , diff_results , unix_time ) ; <nl> + / / Add this execution ' s set of results to the database - tracked named query . <nl> + / / We can then ask for a differential from the last time this named query <nl> + / / was executed by exact matching each row . <nl> + status = dbQuery . addNewResults ( sql . rows ( ) , diff_results ) ; <nl> if ( ! status . ok ( ) ) { <nl> LOG ( ERROR ) < < " Error adding new results to database : " < < status . what ( ) ; <nl> return ; <nl> void launchQuery ( const std : : string & name , const ScheduledQuery & query ) { <nl> return ; <nl> } <nl> <nl> - ScheduledQueryLogItem item ; <nl> - item . diffResults = diff_results ; <nl> - item . name = name ; <nl> - <nl> - std : : string ident ; <nl> - status = getHostIdentifier ( ident ) ; <nl> - if ( status . ok ( ) ) { <nl> - item . hostIdentifier = ident ; <nl> - } else if ( ident . empty ( ) ) { <nl> - ident = " < unknown > " ; <nl> - } <nl> - <nl> - item . unixTime = osquery : : getUnixTime ( ) ; <nl> - item . calendarTime = osquery : : getAsciiTime ( ) ; <nl> - <nl> - VLOG ( 1 ) < < " Found results for query " < < name < < " for host : " < < ident ; <nl> - status = logScheduledQueryLogItem ( item ) ; <nl> + VLOG ( 1 ) < < " Found results for query ( " < < name < < " ) for host : " < < ident ; <nl> + item . results = diff_results ; <nl> + status = logQueryLogItem ( item ) ; <nl> if ( ! status . ok ( ) ) { <nl> - LOG ( ERROR ) < < " Error logging the results of query \ " " < < query . query < < " \ " " <nl> - < < " : " < < status . toString ( ) ; <nl> + LOG ( ERROR ) < < " Error logging the results of query ( " < < query . query <nl> + < < " ) : " < < status . toString ( ) ; <nl> } <nl> } <nl> <nl> void SchedulerRunner : : enter ( ) { <nl> } <nl> } <nl> } <nl> - / / Put the thread into an interruptable sleep without a config instance . <nl> + / / Put the thread into an interruptible sleep without a config instance . <nl> osquery : : interruptableSleep ( interval_ * 1000 ) ; <nl> } <nl> } <nl> mmm a / osquery / logger / logger . cpp <nl> ppp b / osquery / logger / logger . cpp <nl> void initLogger ( const std : : string & name , bool forward_all ) { <nl> serializeIntermediateLog ( intermediate_logs , request ) ; <nl> auto status = Registry : : call ( " logger " , request ) ; <nl> if ( status . ok ( ) | | forward_all ) { <nl> - / / When init returns success we reenabled the log sink in forwarding <nl> + / / When init returns success we re - enabled the log sink in forwarding <nl> / / mode . Now , Glog status logs are buffered and sent to logStatus . <nl> BufferedLogSink : : forward ( true ) ; <nl> BufferedLogSink : : enable ( ) ; <nl> void BufferedLogSink : : send ( google : : LogSeverity severity , <nl> <nl> Status LoggerPlugin : : call ( const PluginRequest & request , <nl> PluginResponse & response ) { <nl> + QueryLogItem item ; <nl> std : : vector < StatusLogLine > intermediate_logs ; <nl> if ( request . count ( " string " ) > 0 ) { <nl> - return this - > logString ( request . at ( " string " ) ) ; <nl> + auto status = Status ( 0 , " OK " ) ; <nl> + if ( request . count ( " category " ) & & request . at ( " category " ) = = " event " ) { <nl> + / / Optionally overload the logEvent method , but receive a duplicate . <nl> + / / message to log string . <nl> + deserializeQueryLogItemJSON ( request . at ( " event " ) , item ) ; <nl> + status = this - > logEvent ( item ) ; <nl> + } <nl> + <nl> + if ( status . ok ( ) ) { <nl> + return this - > logString ( request . at ( " string " ) ) ; <nl> + } else { <nl> + return status ; <nl> + } <nl> + } else if ( request . count ( " snapshot " ) > 0 ) { <nl> + deserializeQueryLogItemJSON ( request . at ( " snapshot " ) , item ) ; <nl> + return this - > logSnapshot ( item ) ; <nl> + } else if ( request . count ( " health " ) > 0 ) { <nl> + deserializeQueryLogItemJSON ( request . at ( " health " ) , item ) ; <nl> + return this - > logHealth ( item ) ; <nl> } else if ( request . count ( " init " ) > 0 ) { <nl> deserializeIntermediateLog ( request , intermediate_logs ) ; <nl> return this - > init ( request . at ( " init " ) , intermediate_logs ) ; <nl> Status LoggerPlugin : : call ( const PluginRequest & request , <nl> } <nl> } <nl> <nl> - Status logString ( const std : : string & s ) { <nl> - return logString ( s , Registry : : getActive ( " logger " ) ) ; <nl> + Status logString ( const std : : string & message , const std : : string & category ) { <nl> + return logString ( message , category , Registry : : getActive ( " logger " ) ) ; <nl> } <nl> <nl> - Status logString ( const std : : string & s , const std : : string & receiver ) { <nl> + Status logString ( const std : : string & message , <nl> + const std : : string & category , <nl> + const std : : string & receiver ) { <nl> if ( ! Registry : : exists ( " logger " , receiver ) ) { <nl> LOG ( ERROR ) < < " Logger receiver " < < receiver < < " not found " ; <nl> return Status ( 1 , " Logger receiver not found " ) ; <nl> } <nl> <nl> - auto status = Registry : : call ( " logger " , receiver , { { " string " , s } } ) ; <nl> + auto status = Registry : : call ( <nl> + " logger " , receiver , { { " string " , message } , { " category " , category } } ) ; <nl> return Status ( 0 , " OK " ) ; <nl> } <nl> <nl> - Status logScheduledQueryLogItem ( const osquery : : ScheduledQueryLogItem & results ) { <nl> - return logScheduledQueryLogItem ( results , Registry : : getActive ( " logger " ) ) ; <nl> + Status logQueryLogItem ( const QueryLogItem & results ) { <nl> + return logQueryLogItem ( results , Registry : : getActive ( " logger " ) ) ; <nl> } <nl> <nl> - Status logScheduledQueryLogItem ( const osquery : : ScheduledQueryLogItem & results , <nl> - const std : : string & receiver ) { <nl> + Status logQueryLogItem ( const QueryLogItem & results , <nl> + const std : : string & receiver ) { <nl> std : : string json ; <nl> Status status ; <nl> if ( FLAGS_log_result_events ) { <nl> - status = serializeScheduledQueryLogItemAsEventsJSON ( results , json ) ; <nl> + status = serializeQueryLogItemAsEventsJSON ( results , json ) ; <nl> } else { <nl> - status = serializeScheduledQueryLogItemJSON ( results , json ) ; <nl> + status = serializeQueryLogItemJSON ( results , json ) ; <nl> } <nl> if ( ! status . ok ( ) ) { <nl> return status ; <nl> } <nl> - return logString ( json , receiver ) ; <nl> + return logString ( json , " event " , receiver ) ; <nl> + } <nl> + <nl> + Status logSnapshotQuery ( const QueryLogItem & item ) { <nl> + std : : string json ; <nl> + if ( ! serializeQueryLogItemJSON ( item , json ) ) { <nl> + return Status ( 1 , " Could not serialize snapshot " ) ; <nl> + } <nl> + return Registry : : call ( " logger " , { { " snapshot " , json } } ) ; <nl> + } <nl> + <nl> + Status logHealthStatus ( const QueryLogItem & item ) { <nl> + std : : string json ; <nl> + if ( ! serializeQueryLogItemJSON ( item , json ) ) { <nl> + return Status ( 1 , " Could not serialize health " ) ; <nl> + } <nl> + return Registry : : call ( " logger " , { { " health " , json } } ) ; <nl> } <nl> } <nl> mmm a / osquery / logger / tests / logger_tests . cpp <nl> ppp b / osquery / logger / tests / logger_tests . cpp <nl> class LoggerTests : public testing : : Test { <nl> <nl> / / Count calls to logStatus <nl> static int statuses_logged ; <nl> + / / Count added and removed snapshot rows <nl> + static int snapshot_rows_added ; <nl> + static int snapshot_rows_removed ; <nl> + / / Count the added health status rows <nl> + static int health_status_rows ; <nl> <nl> private : <nl> / / / Save the status of logging before running tests , restore afterward . <nl> std : : vector < std : : string > LoggerTests : : log_lines ; <nl> StatusLogLine LoggerTests : : last_status ; <nl> std : : vector < std : : string > LoggerTests : : status_messages ; <nl> int LoggerTests : : statuses_logged = 0 ; <nl> + int LoggerTests : : snapshot_rows_added = 0 ; <nl> + int LoggerTests : : snapshot_rows_removed = 0 ; <nl> + int LoggerTests : : health_status_rows = 0 ; <nl> <nl> class TestLoggerPlugin : public LoggerPlugin { <nl> public : <nl> class TestLoggerPlugin : public LoggerPlugin { <nl> return Status ( 0 , " OK " ) ; <nl> } <nl> <nl> + Status logSnapshot ( const QueryLogItem & snapshot ) { <nl> + LoggerTests : : snapshot_rows_added + = snapshot . results . added . size ( ) ; <nl> + LoggerTests : : snapshot_rows_removed + = snapshot . results . removed . size ( ) ; <nl> + return Status ( 0 , " OK " ) ; <nl> + } <nl> + <nl> + Status logHealth ( const QueryLogItem & health ) { <nl> + LoggerTests : : health_status_rows + = health . results . added . size ( ) ; <nl> + return Status ( 0 , " OK " ) ; <nl> + } <nl> + <nl> virtual ~ TestLoggerPlugin ( ) { } <nl> } ; <nl> <nl> TEST_F ( LoggerTests , test_logger_variations ) { <nl> / / does NOT handle Glog logs , there will be no statuses logged . <nl> EXPECT_EQ ( LoggerTests : : statuses_logged , 0 ) ; <nl> } <nl> + <nl> + TEST_F ( LoggerTests , test_logger_snapshots ) { <nl> + / / A snapshot query should not include removed items . <nl> + QueryLogItem item ; <nl> + item . name = " test_query " ; <nl> + item . identifier = " unknown_test_host " ; <nl> + item . time = 0 ; <nl> + item . calendar_time = " no_time " ; <nl> + <nl> + / / Add a fake set of results . <nl> + item . results . added . push_back ( { { " test_column " , " test_value " } } ) ; <nl> + logSnapshotQuery ( item ) ; <nl> + <nl> + / / Expect the plugin to optionally handle snapshot logging . <nl> + EXPECT_EQ ( LoggerTests : : snapshot_rows_added , 1 ) ; <nl> + <nl> + / / Add the same item as a health status log item . <nl> + logHealthStatus ( item ) ; <nl> + EXPECT_EQ ( LoggerTests : : health_status_rows , 1 ) ; <nl> + } <nl> } <nl>
Allow snapshot scheduled items
osquery/osquery
b66a350526764af232507af1dbd2c6f46ce12a7d
2015-04-29T22:55:00Z
mmm a / trunk / research / api - server / server . py <nl> ppp b / trunk / research / api - server / server . py <nl> def __init__ ( self ) : <nl> <nl> self . public_ip = cherrypy . request . remote . ip <nl> self . heartbeat = time . time ( ) <nl> + <nl> + self . clients = 0 <nl> <nl> def dead ( self ) : <nl> dead_time_seconds = 10 <nl> def json_dump ( self ) : <nl> data [ " public_ip " ] = self . public_ip <nl> data [ " heartbeat " ] = self . heartbeat <nl> data [ " heartbeat_h " ] = time . strftime ( " % Y - % m - % d % H : % M : % S " , time . localtime ( self . heartbeat ) ) <nl> + data [ " clients " ] = self . clients <nl> return data <nl> <nl> ' ' ' <nl> def __json_dump_nodes ( self , peers ) : <nl> for node in peers : <nl> data . append ( node . json_dump ( ) ) <nl> return data <nl> + <nl> + def __select_peer ( self , peers , ip ) : <nl> + target = None <nl> + for peer in peers : <nl> + if target is None or target . clients > peer . clients : <nl> + target = peer <nl> + if target is None : <nl> + return None <nl> + target . clients + = 1 <nl> + return target . ip <nl> <nl> def GET ( self , type = None , format = None , origin = None , vhost = None , port = None , stream = None ) : <nl> enable_crossdomain ( ) <nl> def GET ( self , type = None , format = None , origin = None , vhost = None , port = None , stream <nl> server = origin <nl> peers = self . __get_peers_for_play ( ip ) <nl> if len ( peers ) > 0 : <nl> - server = peers [ 0 ] . ip <nl> + server = self . __select_peer ( peers , ip ) <nl> if type = = " hls " : <nl> hls_url = " http : / / % s : % s / % s . m3u8 " % ( server , port , stream ) <nl> hls_url = hls_url . replace ( " . m3u8 . m3u8 " , " . m3u8 " ) <nl> def PUT ( self ) : <nl> <nl> node . heartbeat = time . time ( ) <nl> node . srs_status = str ( json_req [ " srs_status " ] ) <nl> + node . ip = str ( json_req [ " ip " ] ) <nl> + node . public_ip = cherrypy . request . remote . ip <nl> + # reset if restart . <nl> + if node . srs_status ! = " running " : <nl> + node . clients = 0 <nl> <nl> self . __refresh_nodes ( ) <nl> peers = self . __get_peers ( node ) <nl>
update api , select the lowest load of peer
ossrs/srs
b85e667991dfc10628f95b738bb23c88c3b89dd7
2014-04-21T01:11:08Z
mmm a / s / commands_admin . cpp <nl> ppp b / s / commands_admin . cpp <nl> namespace mongo { <nl> <nl> if ( host = = " localhost " | | host . find ( " localhost : " ) = = 0 | | <nl> host = = " 127 . 0 . 0 . 1 " | | host . find ( " 127 . 0 . 0 . 1 : " ) = = 0 ) { <nl> - if ( cmdObj [ " allowLocal " ] . type ( ) ! = Bool | | <nl> - ! cmdObj [ " allowLocal " ] . boolean ( ) ) { <nl> + if ( ! cmdObj [ " allowLocal " ] . trueValue ( ) ) { <nl> errmsg = <nl> " can ' t use localhost as a shard since all shards need to communicate . " <nl> " allowLocal to override for testing " ; <nl>
make addshard less strict SHARDING - 75
mongodb/mongo
1ed6940015c5b54acc6452c539a93ae8fa2e67e1
2010-01-26T18:49:55Z
mmm a / utils / build - script <nl> ppp b / utils / build - script <nl> class BuildScriptInvocation ( object ) : <nl> " - - darwin - deployment - version - watchos = % s " % ( <nl> args . darwin_deployment_version_watchos ) , <nl> " - - cmake " , toolchain . cmake , <nl> + " - - cmake - c - compiler " , toolchain . cc , <nl> + " - - cmake - cxx - compiler " , toolchain . cxx , <nl> " - - cmark - build - type " , args . cmark_build_variant , <nl> " - - llvm - build - type " , args . llvm_build_variant , <nl> " - - swift - build - type " , args . swift_build_variant , <nl> def handle_swiftenv_args ( args ) : <nl> sys . exit ( 2 ) # 2 is the same as ` argparse ` error exit code . <nl> <nl> if args . swiftenv_path is not None : <nl> + # mkdir the swiftenv_path <nl> + if not os . path . exists ( args . swiftenv_path ) : <nl> + os . makedirs ( args . swiftenv_path ) <nl> + <nl> # Only use swiftenv_make / swiftenv_script if one is provided <nl> # utils / swiftenv - make is default <nl> if args . swiftenv_make is None : <nl> iterations with - O " , <nl> " directory . " . format ( android . adb . commands . DEVICE_TEMP_DIR ) , <nl> default = android . adb . commands . DEVICE_TEMP_DIR , <nl> metavar = " PATH " ) <nl> + <nl> + parser . add_argument ( <nl> + " - - cmake - c - compiler " , <nl> + help = " the absolute path to CC , the ' clang ' compiler for the host " <nl> + " platform . Default is auto detected . " , <nl> + type = arguments . type . executable , <nl> + metavar = " PATH " ) <nl> + parser . add_argument ( <nl> + " - - cmake - cxx - compiler " , <nl> + help = " the absolute path to CXX , the ' clang + + ' compiler for the host " <nl> + " platform . Default is auto detected . " , <nl> + type = arguments . type . executable , <nl> + metavar = " PATH " ) <nl> parser . add_argument ( <nl> " - - swiftenv - path " , <nl> help = " the absolute path to a directory containing replacement compiler commands " ) <nl> iterations with - O " , <nl> # Abstracted swiftenv_args for - - preset and not <nl> handle_swiftenv_args ( args ) <nl> <nl> + # Let args . cmake_c_compiler win over swiftenv ' s cc / cxx <nl> + if args . cmake_c_compiler is None and args . swiftenv_path is not None : <nl> + if os . path . exists ( args . swiftenv_path + " / clang " ) : <nl> + args . cmake_c_compiler = args . swiftenv_path + " / clang " <nl> + if args . cmake_cxx_compiler is None and args . swiftenv_path is not None : <nl> + if os . path . exists ( args . swiftenv_path + " / clang + + " ) : <nl> + args . cmake_cxx_compiler = args . swiftenv_path + " / clang + + " <nl> + <nl> + if args . cmake_c_compiler is not None : <nl> + toolchain . cc = args . cmake_c_compiler <nl> + if args . cmake_cxx_compiler is not None : <nl> + toolchain . cxx = args . cmake_cxx_compiler <nl> + <nl> if args . cmake is not None : <nl> toolchain . cmake = args . cmake <nl> <nl> mmm a / utils / build - script - impl <nl> ppp b / utils / build - script - impl <nl> check_swiftenv_args <nl> # If we are using a swiftenv , do this next block <nl> if [ " $ { SWIFTENV_PATH } " ] ; then <nl> <nl> - if [ ! - d " $ { SWIFTENV_PATH } " ] ; then <nl> + if [ [ ! - d " $ { SWIFTENV_PATH } " ] ] ; then <nl> echo " $ { COMMAND_NAME } : SWIFTENV_PATH does not exist , it will be created " <nl> fi <nl> if [ [ " $ { SWIFTENV_RECREATE } " ] ] ; then <nl> if [ " $ { SWIFTENV_PATH } " ] ; then <nl> fi <nl> <nl> # redo the swiftenv by deleting , mkdir , and running the SWIFTENV_MAKE script <nl> - if [ ! - d " $ { SWIFTENV_PATH } " ] | | [ [ " $ { SWIFTENV_RECREATE } " ] ] ; then <nl> + if [ [ ! - d " $ { SWIFTENV_PATH } " ] ] | | [ [ " $ { SWIFTENV_RECREATE } " ] ] ; then <nl> if [ ! - f " $ { SWIFTENV_MAKE } " ] ; then <nl> echo " $ { COMMAND_NAME } error : SWIFTENV_MAKE is not set . " <nl> exit 2 <nl> if [ " $ { SWIFTENV_PATH } " ] ; then <nl> exit 2 <nl> fi <nl> <nl> - call rm - rf " $ { SWIFTENV_PATH } " <nl> - call mkdir - p " $ { SWIFTENV_PATH } " <nl> - echo " $ { COMMAND_NAME } : Calling \ " $ { SWIFTENV_MAKE } \ " \ " $ { SWIFTENV_PATH } \ " \ " $ { SWIFTENV_SCRIPT } \ " " <nl> - call " $ { SWIFTENV_MAKE } " " $ { SWIFTENV_PATH } " " $ { SWIFTENV_SCRIPT } " <nl> + if [ [ - d " $ { SWIFTENV_PATH } " ] ] & & [ [ " $ { SWIFTENV_RECREATE } " ] ] ; then <nl> + echo " $ { COMMAND_NAME } : SWIFTENV_PATH does exists , it will be deleted and recreated " <nl> + call rm - rf " $ { SWIFTENV_PATH } " <nl> + echo " $ { COMMAND_NAME } : Calling mkdir on SWIFTENV_PATH : $ { SWIFTENV_PATH } " <nl> + call mkdir - p " $ { SWIFTENV_PATH } " <nl> + echo " $ { COMMAND_NAME } : Calling \ " $ { SWIFTENV_MAKE } \ " \ " $ { SWIFTENV_PATH } \ " \ " $ { SWIFTENV_SCRIPT } \ " " <nl> + call " $ { SWIFTENV_MAKE } " " $ { SWIFTENV_PATH } " " $ { SWIFTENV_SCRIPT } " <nl> + fi <nl> + <nl> + else <nl> + echo " $ { COMMAND_NAME } : SWIFTENV_PATH exists , not recreating the SWIFTENV " <nl> fi <nl> fi <nl> <nl> function set_cmake_tools { <nl> if [ " $ { CMAKE_C_COMPILER } " ] ; then cmake_options = ( " $ { cmake_options [ @ ] } " - DCMAKE_C_COMPILER : PATH = " $ { CMAKE_C_COMPILER } " ) ; fi <nl> if [ " $ { CMAKE_CXX_COMPILER } " ] ; then cmake_options = ( " $ { cmake_options [ @ ] } " - DCMAKE_CXX_COMPILER : PATH = " $ { CMAKE_CXX_COMPILER } " ) ; fi <nl> if [ " $ { CMAKE_AR } " ] ; then cmake_options = ( " $ { cmake_options [ @ ] } " - DCMAKE_AR : PATH = " $ { CMAKE_AR } " ) ; fi <nl> - if [ " $ { CMAKE_LD } " ] ; then cmake_options = ( " $ { cmake_options [ @ ] } " - DCMAKE_LD : PATH = " $ { CMAKE_LD } " ) ; fi <nl> if [ " $ { CMAKE_RANLIB } " ] ; then cmake_options = ( " $ { cmake_options [ @ ] } " - DCMAKE_RANLIB : PATH = " $ { CMAKE_RANLIB } " ) ; fi <nl> if [ " $ { CMAKE_LIPO } " ] ; then cmake_options = ( " $ { cmake_options [ @ ] } " - DCMAKE_LIPO : PATH = " $ { CMAKE_LIPO } " ) ; fi <nl> - if [ " $ { CMAKE_LIBTOOL } " ] ; then cmake_options = ( " $ { cmake_options [ @ ] } " - DCMAKE_LIBTOOL : PATH = " $ { CMAKE_LIBTOOL } " ) ; fi <nl> if [ " $ { CMAKE_INSTALL_NAME_TOOL } " ] ; then cmake_options = ( " $ { cmake_options [ @ ] } " - DCMAKE_INSTALL_NAME_TOOL : PATH = " $ { CMAKE_INSTALL_NAME_TOOL } " ) ; fi <nl> if [ " $ { CMAKE_CODESIGN } " ] ; then cmake_options = ( " $ { cmake_options [ @ ] } " - DCMAKE_CODESIGN : PATH = " $ { CMAKE_CODESIGN } " ) ; fi <nl> if [ " $ { CMAKE_PYTHON } " ] ; then cmake_options = ( " $ { cmake_options [ @ ] } " - DPYTHON_EXECUTABLE : PATH = " $ { CMAKE_PYTHON } " ) ; fi <nl> for host in " $ { ALL_HOSTS [ @ ] } " ; do <nl> build_targets = ( llvm - tblgen clang - headers ) <nl> fi <nl> <nl> - set_cmake_tools <nl> + if [ " $ { CMAKE_LIBTOOL } " ] ; then <nl> + cmake_options = ( <nl> + " $ { cmake_options [ @ ] } " <nl> + - DCMAKE_LIBTOOL : PATH = " $ { CMAKE_LIBTOOL } " <nl> + ) <nl> + fi <nl> <nl> # Note : we set the variable : <nl> # <nl> mmm a / utils / swift_build_support / swift_build_support / cmake . py <nl> ppp b / utils / swift_build_support / swift_build_support / cmake . py <nl> def common_options ( self ) : <nl> if args . export_compile_commands : <nl> define ( " CMAKE_EXPORT_COMPILE_COMMANDS " , " ON " ) <nl> <nl> + if args . distcc : <nl> + define ( " CMAKE_C_COMPILER : PATH " , toolchain . distcc ) <nl> + define ( " CMAKE_C_COMPILER_ARG1 " , toolchain . cc ) <nl> + define ( " CMAKE_CXX_COMPILER : PATH " , toolchain . distcc ) <nl> + define ( " CMAKE_CXX_COMPILER_ARG1 " , toolchain . cxx ) <nl> + else : <nl> + define ( " CMAKE_C_COMPILER : PATH " , toolchain . cc ) <nl> + define ( " CMAKE_CXX_COMPILER : PATH " , toolchain . cxx ) <nl> + <nl> if args . cmake_generator = = ' Xcode ' : <nl> define ( " CMAKE_CONFIGURATION_TYPES " , <nl> " Debug ; Release ; MinSizeRel ; RelWithDebInfo " ) <nl> mmm a / utils / swift_build_support / tests / test_cmake . py <nl> ppp b / utils / swift_build_support / tests / test_cmake . py <nl> def mock_distcc_path ( self ) : <nl> def default_args ( self ) : <nl> " " " Return new args object with default values <nl> " " " <nl> - return Namespace ( enable_asan = False , <nl> + return Namespace ( cmake_c_compiler = " / path / to / clang " , <nl> + cmake_cxx_compiler = " / path / to / clang + + " , <nl> + enable_asan = False , <nl> enable_ubsan = False , <nl> enable_tsan = False , <nl> export_compile_commands = False , <nl> def cmake ( self , args ) : <nl> " " " Return new CMake object initialized with given args <nl> " " " <nl> toolchain = host_toolchain ( ) <nl> + toolchain . cc = args . cmake_c_compiler <nl> + toolchain . cxx = args . cmake_cxx_compiler <nl> if args . distcc : <nl> toolchain . distcc = self . mock_distcc_path ( ) <nl> toolchain . ninja = self . which_ninja ( args ) <nl> def test_common_options_defaults ( self ) : <nl> self . assertEqual ( <nl> list ( cmake . common_options ( ) ) , <nl> [ " - G " , " Ninja " , <nl> + " - DCMAKE_C_COMPILER : PATH = / path / to / clang " , <nl> + " - DCMAKE_CXX_COMPILER : PATH = / path / to / clang + + " , <nl> " - DCMAKE_MAKE_PROGRAM = " + self . which_ninja ( args ) ] ) <nl> <nl> def test_common_options_asan ( self ) : <nl> def test_common_options_asan ( self ) : <nl> list ( cmake . common_options ( ) ) , <nl> [ " - G " , " Ninja " , <nl> " - DLLVM_USE_SANITIZER = Address " , <nl> + " - DCMAKE_C_COMPILER : PATH = / path / to / clang " , <nl> + " - DCMAKE_CXX_COMPILER : PATH = / path / to / clang + + " , <nl> " - DCMAKE_MAKE_PROGRAM = " + self . which_ninja ( args ) ] ) <nl> <nl> def test_common_options_ubsan ( self ) : <nl> def test_common_options_ubsan ( self ) : <nl> list ( cmake . common_options ( ) ) , <nl> [ " - G " , " Ninja " , <nl> " - DLLVM_USE_SANITIZER = Undefined " , <nl> + " - DCMAKE_C_COMPILER : PATH = / path / to / clang " , <nl> + " - DCMAKE_CXX_COMPILER : PATH = / path / to / clang + + " , <nl> " - DCMAKE_MAKE_PROGRAM = " + self . which_ninja ( args ) ] ) <nl> <nl> def test_common_options_tsan ( self ) : <nl> def test_common_options_tsan ( self ) : <nl> list ( cmake . common_options ( ) ) , <nl> [ " - G " , " Ninja " , <nl> " - DLLVM_USE_SANITIZER = Thread " , <nl> + " - DCMAKE_C_COMPILER : PATH = / path / to / clang " , <nl> + " - DCMAKE_CXX_COMPILER : PATH = / path / to / clang + + " , <nl> " - DCMAKE_MAKE_PROGRAM = " + self . which_ninja ( args ) ] ) <nl> <nl> def test_common_options_asan_ubsan ( self ) : <nl> def test_common_options_asan_ubsan ( self ) : <nl> list ( cmake . common_options ( ) ) , <nl> [ " - G " , " Ninja " , <nl> " - DLLVM_USE_SANITIZER = Address ; Undefined " , <nl> + " - DCMAKE_C_COMPILER : PATH = / path / to / clang " , <nl> + " - DCMAKE_CXX_COMPILER : PATH = / path / to / clang + + " , <nl> " - DCMAKE_MAKE_PROGRAM = " + self . which_ninja ( args ) ] ) <nl> <nl> def test_common_options_ubsan_tsan ( self ) : <nl> def test_common_options_ubsan_tsan ( self ) : <nl> list ( cmake . common_options ( ) ) , <nl> [ " - G " , " Ninja " , <nl> " - DLLVM_USE_SANITIZER = Undefined ; Thread " , <nl> + " - DCMAKE_C_COMPILER : PATH = / path / to / clang " , <nl> + " - DCMAKE_CXX_COMPILER : PATH = / path / to / clang + + " , <nl> " - DCMAKE_MAKE_PROGRAM = " + self . which_ninja ( args ) ] ) <nl> <nl> def test_common_options_asan_ubsan_tsan ( self ) : <nl> def test_common_options_asan_ubsan_tsan ( self ) : <nl> list ( cmake . common_options ( ) ) , <nl> [ " - G " , " Ninja " , <nl> " - DLLVM_USE_SANITIZER = Address ; Undefined ; Thread " , <nl> + " - DCMAKE_C_COMPILER : PATH = / path / to / clang " , <nl> + " - DCMAKE_CXX_COMPILER : PATH = / path / to / clang + + " , <nl> " - DCMAKE_MAKE_PROGRAM = " + self . which_ninja ( args ) ] ) <nl> <nl> def test_common_options_export_compile_commands ( self ) : <nl> def test_common_options_export_compile_commands ( self ) : <nl> list ( cmake . common_options ( ) ) , <nl> [ " - G " , " Ninja " , <nl> " - DCMAKE_EXPORT_COMPILE_COMMANDS = ON " , <nl> + " - DCMAKE_C_COMPILER : PATH = / path / to / clang " , <nl> + " - DCMAKE_CXX_COMPILER : PATH = / path / to / clang + + " , <nl> " - DCMAKE_MAKE_PROGRAM = " + self . which_ninja ( args ) ] ) <nl> <nl> def test_common_options_distcc ( self ) : <nl> def test_common_options_distcc ( self ) : <nl> self . assertEqual ( <nl> list ( cmake . common_options ( ) ) , <nl> [ " - G " , " Ninja " , <nl> + " - DCMAKE_C_COMPILER : PATH = " + self . mock_distcc_path ( ) , <nl> + " - DCMAKE_C_COMPILER_ARG1 = / path / to / clang " , <nl> + " - DCMAKE_CXX_COMPILER : PATH = " + self . mock_distcc_path ( ) , <nl> + " - DCMAKE_CXX_COMPILER_ARG1 = / path / to / clang + + " , <nl> " - DCMAKE_MAKE_PROGRAM = " + self . which_ninja ( args ) ] ) <nl> <nl> def test_common_options_xcode ( self ) : <nl> def test_common_options_xcode ( self ) : <nl> self . assertEqual ( <nl> list ( cmake . common_options ( ) ) , <nl> [ " - G " , " Xcode " , <nl> + " - DCMAKE_C_COMPILER : PATH = / path / to / clang " , <nl> + " - DCMAKE_CXX_COMPILER : PATH = / path / to / clang + + " , <nl> " - DCMAKE_CONFIGURATION_TYPES = " + <nl> " Debug ; Release ; MinSizeRel ; RelWithDebInfo " ] ) <nl> <nl> def test_common_options_clang_compiler_version ( self ) : <nl> self . assertEqual ( <nl> list ( cmake . common_options ( ) ) , <nl> [ " - G " , " Ninja " , <nl> + " - DCMAKE_C_COMPILER : PATH = / path / to / clang " , <nl> + " - DCMAKE_CXX_COMPILER : PATH = / path / to / clang + + " , <nl> " - DLLVM_VERSION_MAJOR : STRING = 3 " , <nl> " - DLLVM_VERSION_MINOR : STRING = 8 " , <nl> " - DLLVM_VERSION_PATCH : STRING = 0 " , <nl> def test_common_options_build_ninja ( self ) : <nl> self . assertEqual ( <nl> list ( cmake . common_options ( ) ) , <nl> [ " - G " , " Ninja " , <nl> + " - DCMAKE_C_COMPILER : PATH = / path / to / clang " , <nl> + " - DCMAKE_CXX_COMPILER : PATH = / path / to / clang + + " , <nl> " - DCMAKE_MAKE_PROGRAM = " + self . which_ninja ( args ) ] ) <nl> <nl> def test_common_options_full ( self ) : <nl> def test_common_options_full ( self ) : <nl> [ " - G " , " Xcode " , <nl> " - DLLVM_USE_SANITIZER = Address ; Undefined " , <nl> " - DCMAKE_EXPORT_COMPILE_COMMANDS = ON " , <nl> + " - DCMAKE_C_COMPILER : PATH = " + self . mock_distcc_path ( ) , <nl> + " - DCMAKE_C_COMPILER_ARG1 = / path / to / clang " , <nl> + " - DCMAKE_CXX_COMPILER : PATH = " + self . mock_distcc_path ( ) , <nl> + " - DCMAKE_CXX_COMPILER_ARG1 = / path / to / clang + + " , <nl> " - DCMAKE_CONFIGURATION_TYPES = " + <nl> " Debug ; Release ; MinSizeRel ; RelWithDebInfo " , <nl> " - DLLVM_VERSION_MAJOR : STRING = 3 " , <nl> mmm a / utils / swiftenv - script <nl> ppp b / utils / swiftenv - script <nl> function find_real_command { <nl> fi <nl> case " $ command_name " in <nl> clang | clang + + | ld | swift | swiftc | lipo | ar | codesign | dsymutil | libtool | ranlib | strip | llvm - tblgen | python ) " $ { find_util } " " $ { command_name } " ; ; <nl> - * ) echo " $ 1 " ; ; <nl> + * ) echo " $ { command_name } " ; ; <nl> esac <nl> } <nl> function find_o_target { # last - o wins , can short circuit for faster compiles <nl> function find_o_target { # last - o wins , can short circuit for faster compiles <nl> esac ; done <nl> echo " $ { o_target } " <nl> } <nl> - function find_swift_targets { <nl> + function find_module_target { <nl> while test $ # - gt 0 ; do <nl> case " $ 1 " in <nl> - - o ) <nl> - local out = " $ 2 " ; <nl> - outfiles = ( " $ { outfiles [ @ ] } " " $ { out } " ) <nl> - if [ [ " $ 2 " = = * . swiftmodule ] ] ; then <nl> - outfiles = ( " $ { outfiles [ @ ] } " " $ { out : 0 : $ { # out } - 12 } . swiftdoc " ) <nl> - fi <nl> - shift ; shift ; ; <nl> - - output - file - map ) <nl> - outfiles = ( " $ { outfiles [ @ ] } " " $ 2 " ) ; shift ; shift ; ; <nl> - - emit - module - path ) <nl> - local module_path = " $ 2 " ; <nl> - outfiles = ( " $ { outfiles [ @ ] } " " $ { module_path } " ) <nl> - outfiles = ( " $ { outfiles [ @ ] } " " $ { module_path : 0 : $ { # module_path } - 12 } . swiftdoc " ) <nl> - shift ; shift ; ; <nl> - - emit - objc - header - path ) <nl> - outfiles = ( " $ { outfiles [ @ ] } " " $ 2 " ) ; shift ; shift ; ; <nl> + - emit - module - path ) echo " $ 2 " ; break ; ; <nl> * ) shift ; ; <nl> esac ; done <nl> } <nl> + function emit_module_p { # emit - o ' s arg + swiftmodule <nl> + while test $ # - gt 0 ; do <nl> + case " $ 1 " in <nl> + - emit - module ) return 0 ; ; <nl> + * ) shift ; ; <nl> + esac ; done ; return 1 <nl> + } <nl> function find_command_targets { # sets : outfiles <nl> local name = " $ 1 " <nl> local args = ( " $ { @ : 2 } " ) <nl> case " $ name " in <nl> clang | clang + + | ld | libtool | clang - tblgen | llvm - tblgen | lipo | gyb ) <nl> outfiles = ( " $ { outfiles [ @ ] } " $ ( find_o_target " $ { args [ @ ] } " ) ) ; ; <nl> - swift | swiftc ) find_swift_targets " $ { args [ @ ] } " ; ; <nl> + swift | swiftc ) <nl> + outfiles = ( " $ { outfiles [ @ ] } " $ ( find_o_target " $ { args [ @ ] } " ) ) ; <nl> + if emit_module_p " $ { args [ @ ] } " ; then <nl> + local last_outfile ; last_outfile = ( " $ { outfiles [ @ ] : - 1 } " ) <nl> + outfiles = ( " $ { outfiles [ @ ] } " " $ { last_outfile : 0 : $ { # last_outfile } - 12 } . swiftdoc " ) ; fi <nl> + find_module_target " $ { args [ @ ] } " ; ; <nl> ranlib ) outfiles = ( " $ { outfiles [ @ ] } " $ ( first " $ { args [ @ ] } " ) ) ; ; <nl> ar ) outfiles = ( " $ { outfiles [ @ ] } " $ ( second " $ { args [ @ ] } " ) ) ; ; <nl> codesign | dsymutil | install_name_tool | strip ) outfiles = ( " $ { outfiles [ @ ] } " $ ( last " $ { args [ @ ] } " ) ) ; ; <nl>
Merge pull request from apple / revert - 8268 - swiftenv - fixes
apple/swift
7a1a41ff614abd157415984839229797ded8926a
2017-03-23T00:00:47Z