diff
stringlengths 41
2.03M
| msg
stringlengths 1
1.5k
⌀ | repo
stringlengths 5
40
| sha
stringlengths 40
40
| time
stringlengths 20
20
|
---|---|---|---|---|
mmm a / src / qt / bitcoin . cpp <nl> ppp b / src / qt / bitcoin . cpp <nl> void BitcoinApplication : : parameterSetup ( ) <nl> m_node . initParameterInteraction ( ) ; <nl> } <nl> <nl> - void BitcoinApplication : : SetPrune ( bool prune ) <nl> + void BitcoinApplication : : InitializePruneSetting ( bool prune ) <nl> { <nl> / / If prune is set , intentionally override existing prune size with <nl> / / the default size since this is called when choosing a new datadir . <nl> int GuiMain ( int argc , char * argv [ ] ) <nl> <nl> if ( did_show_intro ) { <nl> / / Store intro dialog settings other than datadir ( network specific ) <nl> - app . SetPrune ( prune ) ; <nl> + app . InitializePruneSetting ( prune ) ; <nl> } <nl> <nl> if ( gArgs . GetBoolArg ( " - splash " , DEFAULT_SPLASHSCREEN ) & & ! gArgs . GetBoolArg ( " - min " , false ) ) <nl> mmm a / src / qt / bitcoin . h <nl> ppp b / src / qt / bitcoin . h <nl> class BitcoinApplication : public QApplication <nl> void parameterSetup ( ) ; <nl> / / / Create options model <nl> void createOptionsModel ( bool resetSettings ) ; <nl> - / / / Update prune value <nl> - void SetPrune ( bool prune ) ; <nl> + / / / Initialize prune setting <nl> + void InitializePruneSetting ( bool prune ) ; <nl> / / / Create main window <nl> void createWindow ( const NetworkStyle * networkStyle ) ; <nl> / / / Create splash screen <nl>
|
qt : Rename SetPrune ( ) to InitializePruneSetting ( )
|
bitcoin/bitcoin
|
af112ab62895b145660f4cd7ff842e9cfea2a530
|
2020-01-07T22:16:37Z
|
mmm a / src / library_browser . js <nl> ppp b / src / library_browser . js <nl> mergeInto ( LibraryManager . library , { <nl> # endif <nl> <nl> contextHandle = GL . createContext ( canvas , contextAttributes ) ; <nl> - ctx = GL . getContext ( contextHandle ) . GLctx ; <nl> + if ( contextHandle ) { <nl> + ctx = GL . getContext ( contextHandle ) . GLctx ; <nl> + } <nl> / / Set the background of the WebGL canvas to black <nl> canvas . style . backgroundColor = " black " ; <nl> } else { <nl>
|
Merge pull request from Daft - Freak / patch - 3
|
emscripten-core/emscripten
|
36b36ea5a3681fa5deef9f674eca29835ef4d94c
|
2014-09-02T19:13:39Z
|
mmm a / benchmark / opperf / nd_operations / array_rearrange . py <nl> ppp b / benchmark / opperf / nd_operations / array_rearrange . py <nl> <nl> <nl> def run_rearrange_operators_benchmarks ( ctx = mx . cpu ( ) , dtype = ' float32 ' , profiler = ' native ' , warmup = 25 , runs = 100 ) : <nl> " " " Runs benchmarks with the given context and precision ( dtype ) for all the <nl> - rearrange operators in MXNet . <nl> + rearrange operators in MXNet . <nl> <nl> Parameters <nl> mmmmmmmmm - <nl> def run_rearrange_operators_benchmarks ( ctx = mx . cpu ( ) , dtype = ' float32 ' , profiler = ' <nl> Dictionary of results . Key - > Name of the operator , Value - > Benchmark results . <nl> <nl> " " " <nl> - # Fetch all optimizer operators <nl> + # Fetch all array rerrange operators <nl> mx_rearrange_ops = get_all_rearrange_operators ( ) <nl> <nl> # Run benchmarks <nl> new file mode 100644 <nl> index 00000000000 . . 9d894087343 <nl> mmm / dev / null <nl> ppp b / benchmark / opperf / nd_operations / nn_loss_operators . py <nl> <nl> + # Licensed to the Apache Software Foundation ( ASF ) under one <nl> + # or more contributor license agreements . See the NOTICE file <nl> + # distributed with this work for additional information <nl> + # regarding copyright ownership . The ASF licenses this file <nl> + # to you under the Apache License , Version 2 . 0 ( the <nl> + # " License " ) ; you may not use this file except in compliance <nl> + # with the License . You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , <nl> + # software distributed under the License is distributed on an <nl> + # " AS IS " BASIS , WITHOUT WARRANTIES OR CONDITIONS OF ANY <nl> + # KIND , either express or implied . See the License for the <nl> + # specific language governing permissions and limitations <nl> + # under the License . <nl> + <nl> + import mxnet as mx <nl> + from benchmark . opperf . utils . benchmark_utils import run_op_benchmarks <nl> + from benchmark . opperf . utils . op_registry_utils import get_all_loss_operators <nl> + <nl> + " " " Performance benchmark tests for MXNet Neural Network Loss Operators <nl> + <nl> + 1 . smooth_l1 <nl> + 2 . CTCLoss <nl> + 3 . MakeLoss <nl> + 4 . softmax_cross_entropy <nl> + " " " <nl> + <nl> + <nl> + def run_loss_operators_benchmarks ( ctx = mx . cpu ( ) , dtype = ' float32 ' , profiler = ' native ' , warmup = 25 , runs = 100 ) : <nl> + " " " Runs benchmarks with the given context and precision ( dtype ) for all the <nl> + Neural Network loss operators in MXNet . <nl> + <nl> + Parameters <nl> + mmmmmmmmm - <nl> + ctx : mx . ctx <nl> + Context to run benchmarks <nl> + dtype : str , default ' float32 ' <nl> + Precision to use for benchmarks <nl> + profiler : str , default ' native ' <nl> + Type of Profiler to use ( native / python ) <nl> + warmup : int , default 25 <nl> + Number of times to run for warmup <nl> + runs : int , default 100 <nl> + Number of runs to capture benchmark results <nl> + <nl> + Returns <nl> + mmmmmm - <nl> + Dictionary of results . Key - > Name of the operator , Value - > Benchmark results . <nl> + <nl> + " " " <nl> + # Fetch all loss operators <nl> + mx_loss_ops = get_all_loss_operators ( ) <nl> + <nl> + # Run benchmarks <nl> + mx_loss_op_results = run_op_benchmarks ( mx_loss_ops , dtype , ctx , profiler , warmup , runs ) <nl> + return mx_loss_op_results <nl> mmm a / benchmark / opperf / opperf . py <nl> ppp b / benchmark / opperf / opperf . py <nl> <nl> from benchmark . opperf . nd_operations . nn_basic_operators import run_nn_basic_operators_benchmarks <nl> from benchmark . opperf . nd_operations . nn_optimizer_operators import run_optimizer_operators_benchmarks <nl> from benchmark . opperf . nd_operations . array_rearrange import run_rearrange_operators_benchmarks <nl> + from benchmark . opperf . nd_operations . nn_loss_operators import run_loss_operators_benchmarks <nl> <nl> from benchmark . opperf . utils . common_utils import merge_map_list , save_to_file <nl> from benchmark . opperf . utils . op_registry_utils import get_operators_with_no_benchmark , \ <nl> def run_all_mxnet_operator_benchmarks ( ctx = mx . cpu ( ) , dtype = ' float32 ' , profiler = ' n <nl> <nl> # Run all Optimizer operations benchmarks with default input values <nl> mxnet_operator_benchmark_results . append ( run_optimizer_operators_benchmarks ( ctx = ctx , dtype = dtype , profiler = profiler ) ) <nl> + <nl> # Run all Transpose Convolution operations benchmarks with default input values <nl> mxnet_operator_benchmark_results . append ( run_transpose_convolution_operators_benchmarks ( ctx = ctx , dtype = dtype , profiler = profiler ) ) <nl> <nl> + # Run all NN loss operations benchmarks with default input values <nl> + mxnet_operator_benchmark_results . append ( run_loss_operators_benchmarks ( ctx = ctx , dtype = dtype , profiler = profiler ) ) <nl> + <nl> # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * PREPARE FINAL RESULTS * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> final_benchmark_result_map = merge_map_list ( mxnet_operator_benchmark_results ) <nl> return final_benchmark_result_map <nl> mmm a / benchmark / opperf / rules / default_params . py <nl> ppp b / benchmark / opperf / rules / default_params . py <nl> <nl> DEFAULT_DIM_1 = [ 0 ] <nl> DEFAULT_DIM_2 = [ 1 ] <nl> <nl> + # For loss operators <nl> + DEFAULT_DATA_3d = [ ( 1024 , 100 , 100 ) ] <nl> + DEFAULT_LABEL = [ ( 100 , 100 ) ] <nl> + DEFAULT_DATA_SMCE = [ ( 1024 , 1024 ) ] <nl> + DEFAULT_LABEL_SMCE = [ ( 1024 , ) ] <nl> + <nl> # Default Inputs . MXNet Op Param Name to Default Input mapping <nl> DEFAULTS_INPUTS = { " data " : DEFAULT_DATA , <nl> " sample " : DEFAULT_SAMPLE , <nl> <nl> " dim1 " : DEFAULT_DIM_1 , <nl> " dim2 " : DEFAULT_DIM_2 , <nl> " block_size " : DEFAULT_BLOCK_SIZE , <nl> - " args " : DEFAULT_ARGS } <nl> + " args " : DEFAULT_ARGS , <nl> + " data_smce " : DEFAULT_DATA_SMCE , <nl> + " data_3d " : DEFAULT_DATA_3d , <nl> + " label_smce " : DEFAULT_LABEL_SMCE , <nl> + " label " : DEFAULT_LABEL } <nl> <nl> <nl> # These are names of MXNet operator parameters that is of type NDArray . <nl> <nl> " mu " , " sigma " , " lam " , " alpha " , " beta " , " gamma " , " k " , " p " , <nl> " low " , " high " , " weight " , " bias " , " moving_mean " , " moving_var " , <nl> " weight " , " weight32 " , " grad " , " mean " , " var " , " mom " , " n " , " d " , <nl> - " v " , " z " , " g " , " delta " , " args " ] <nl> + " v " , " z " , " g " , " delta " , " args " , " label " ] <nl> mmm a / benchmark / opperf / utils / benchmark_utils . py <nl> ppp b / benchmark / opperf / utils / benchmark_utils . py <nl> <nl> from . profiler_utils import cpp_profile , python_profile <nl> <nl> <nl> + no_backward = [ ' softmax_cross_entropy ' ] <nl> + <nl> def _prepare_op_inputs ( inputs , run_backward , dtype , ctx ) : <nl> mx . random . seed ( 41 ) <nl> kwargs_list = [ ] <nl> def run_op_benchmarks ( ops , dtype , ctx , profiler , warmup , runs ) : <nl> for op , op_params in ops . items ( ) : <nl> # Prepare inputs for the operator <nl> inputs = prepare_op_inputs ( op , op_params ) <nl> + <nl> + # setting backward false for ops with known issue <nl> + if op in no_backward : <nl> + op_params [ " has_backward " ] = False <nl> + <nl> # Run benchmarks <nl> cur_op_res = run_performance_test ( op_params [ " nd_op_handle " ] , <nl> run_backward = op_params [ " has_backward " ] , <nl> mmm a / benchmark / opperf / utils / op_registry_utils . py <nl> ppp b / benchmark / opperf / utils / op_registry_utils . py <nl> def prepare_op_inputs ( op , arg_params ) : <nl> # 4d tensor is needed only by following two ops <nl> ops_4d = [ ' depth_to_space ' , ' space_to_depth ' ] <nl> <nl> + # 3d tensor is needed by following ops <nl> + ops_3d = [ ' CTCLoss ' , ' ctc_loss ' ] <nl> + <nl> # Prepare op to default input mapping <nl> arg_values = { } <nl> for arg_name , arg_type in zip ( arg_params [ " params " ] [ " arg_names " ] , <nl> def prepare_op_inputs ( op , arg_params ) : <nl> arg_values [ arg_name ] = DEFAULTS_INPUTS [ arg_name + " _nd " ] <nl> elif " NDArray " in arg_type and op in ops_4d and arg_name + " _4d " in DEFAULTS_INPUTS : <nl> arg_values [ arg_name ] = DEFAULTS_INPUTS [ arg_name + " _4d " ] <nl> + elif " NDArray " in arg_type and op in ops_3d and arg_name + " _3d " in DEFAULTS_INPUTS : <nl> + arg_values [ arg_name ] = DEFAULTS_INPUTS [ arg_name + " _3d " ] <nl> + elif " NDArray " in arg_type and op = = ' softmax_cross_entropy ' : <nl> + arg_values [ arg_name ] = DEFAULTS_INPUTS [ arg_name + " _smce " ] <nl> elif arg_name in DEFAULTS_INPUTS : <nl> arg_values [ arg_name ] = DEFAULTS_INPUTS [ arg_name ] <nl> elif " float " in arg_type and arg_name + " _float " in DEFAULTS_INPUTS : <nl> def get_all_rearrange_operators ( ) : <nl> if op_name in rearrange_ops and op_name not in unique_ops : <nl> rearrange_mx_operators [ op_name ] = mx_operators [ op_name ] <nl> return rearrange_mx_operators <nl> - <nl> + <nl> + def get_all_loss_operators ( ) : <nl> + " " " Gets all Neural Network loss operators registered with MXNet . <nl> + <nl> + Returns <nl> + mmmmmm - <nl> + { " operator_name " : { " has_backward " , " nd_op_handle " , " params " } } <nl> + " " " <nl> + loss_ops = [ ' smooth_l1 ' , ' CTCLoss ' , ' ctc_loss ' , ' MakeLoss ' , ' softmax_cross_entropy ' ] <nl> + <nl> + # Get all mxnet operators <nl> + mx_operators = _get_all_mxnet_operators ( ) <nl> + <nl> + # Filter for NN Loss operators <nl> + loss_mx_operators = { } <nl> + for op_name , op_params in mx_operators . items ( ) : <nl> + if op_name in loss_ops and op_name not in unique_ops : <nl> + loss_mx_operators [ op_name ] = mx_operators [ op_name ] <nl> + return loss_mx_operators <nl> + <nl> <nl> def get_operators_with_no_benchmark ( operators_with_benchmark ) : <nl> " " " Gets all MXNet operators with not benchmark . <nl> mmm a / benchmark / opperf / utils / profiler_utils . py <nl> ppp b / benchmark / opperf / utils / profiler_utils . py <nl> def _get_operator_profile ( operator_name , operator_profile_results ) : <nl> # alias map : dictionary of the form { " alias " : " registered_name " } <nl> # allows to retrieve alias operator profile from the profiler results <nl> # TODO handling - " identity " : " _copy " <nl> - alias_map = { " broadcast_plus " : " broadcast_add " , " broadcast_minus " : " broadcast_sub " , " flatten " : " Flatten " , " max_axis " : " max " , <nl> - " swapaxes " : " SwapAxis " , " flip " : " reverse " , " reshape " : " Reshape " , " crop " : " slice " , " sum_axis " : " sum " , " min_axis " : " min " } <nl> + alias_map = { " broadcast_plus " : " broadcast_add " , " broadcast_minus " : " broadcast_sub " , " flatten " : " Flatten " , " max_axis " : " max " , <nl> + " swapaxes " : " SwapAxis " , " flip " : " reverse " , " reshape " : " Reshape " , " crop " : " slice " , " sum_axis " : " sum " , " min_axis " : " min " , " CTCLoss " : " ctc_loss " } <nl> <nl> op_name = None <nl> <nl>
|
[ OpPerf ] Add Neural network loss ops ( )
|
apache/incubator-mxnet
|
88cc54d6153fba046f167d8d1780561fecead2f7
|
2020-02-03T18:24:17Z
|
mmm a / validation - test / compiler_crashers_2_fixed / 0128 - rdar35088384 . swift <nl> ppp b / validation - test / compiler_crashers_2_fixed / 0128 - rdar35088384 . swift <nl> <nl> - / / RUN : % swift - target - frontend - typecheck - verify % s <nl> + / / RUN : % target - swift - frontend - typecheck - verify % s <nl> <nl> protocol Command { } <nl> <nl>
|
% swift - target - frontend = > % target - swift - frontend .
|
apple/swift
|
13d19e5a23dd03161c18e8eb10e5e8623b6a32b2
|
2018-01-16T18:48:04Z
|
mmm a / src / bitcoinrpc . cpp <nl> ppp b / src / bitcoinrpc . cpp <nl> using namespace boost ; <nl> using namespace boost : : asio ; <nl> using namespace json_spirit ; <nl> <nl> - void ThreadRPCServer2 ( void * parg ) ; <nl> - <nl> static std : : string strRPCUserColonPass ; <nl> <nl> - const Object emptyobj ; <nl> - <nl> - void ThreadRPCServer3 ( void * parg ) ; <nl> + / / These are created by StartRPCThreads , destroyed in StopRPCThreads <nl> + static asio : : io_service * rpc_io_service = NULL ; <nl> + static ssl : : context * rpc_ssl_context = NULL ; <nl> + static boost : : thread_group * rpc_worker_group = NULL ; <nl> <nl> static inline unsigned short GetDefaultRPCPort ( ) <nl> { <nl> class AcceptedConnectionImpl : public AcceptedConnection <nl> iostreams : : stream < SSLIOStreamDevice < Protocol > > _stream ; <nl> } ; <nl> <nl> - void ThreadRPCServer ( void * parg ) <nl> - { <nl> - / / Make this thread recognisable as the RPC listener <nl> - RenameThread ( " bitcoin - rpclist " ) ; <nl> - <nl> - try <nl> - { <nl> - vnThreadsRunning [ THREAD_RPCLISTENER ] + + ; <nl> - ThreadRPCServer2 ( parg ) ; <nl> - vnThreadsRunning [ THREAD_RPCLISTENER ] - - ; <nl> - } <nl> - catch ( std : : exception & e ) { <nl> - vnThreadsRunning [ THREAD_RPCLISTENER ] - - ; <nl> - PrintException ( & e , " ThreadRPCServer ( ) " ) ; <nl> - } catch ( . . . ) { <nl> - vnThreadsRunning [ THREAD_RPCLISTENER ] - - ; <nl> - PrintException ( NULL , " ThreadRPCServer ( ) " ) ; <nl> - } <nl> - printf ( " ThreadRPCServer exited \ n " ) ; <nl> - } <nl> + void ServiceConnection ( AcceptedConnection * conn ) ; <nl> <nl> / / Forward declaration required for RPCListen <nl> template < typename Protocol , typename SocketAcceptorService > <nl> static void RPCAcceptHandler ( boost : : shared_ptr < basic_socket_acceptor < Protocol , <nl> AcceptedConnection * conn , <nl> const boost : : system : : error_code & error ) <nl> { <nl> - vnThreadsRunning [ THREAD_RPCLISTENER ] + + ; <nl> - <nl> / / Immediately start accepting new connections , except when we ' re cancelled or our socket is closed . <nl> - if ( error ! = asio : : error : : operation_aborted <nl> - & & acceptor - > is_open ( ) ) <nl> + if ( error ! = asio : : error : : operation_aborted & & acceptor - > is_open ( ) ) <nl> RPCListen ( acceptor , context , fUseSSL ) ; <nl> <nl> AcceptedConnectionImpl < ip : : tcp > * tcp_conn = dynamic_cast < AcceptedConnectionImpl < ip : : tcp > * > ( conn ) ; <nl> static void RPCAcceptHandler ( boost : : shared_ptr < basic_socket_acceptor < Protocol , <nl> / / Restrict callers by IP . It is important to <nl> / / do this before starting client thread , to filter out <nl> / / certain DoS and misbehaving clients . <nl> - else if ( tcp_conn <nl> - & & ! ClientAllowed ( tcp_conn - > peer . address ( ) ) ) <nl> + else if ( tcp_conn & & ! ClientAllowed ( tcp_conn - > peer . address ( ) ) ) <nl> { <nl> / / Only send a 403 if we ' re not using SSL to prevent a DoS during the SSL handshake . <nl> if ( ! fUseSSL ) <nl> conn - > stream ( ) < < HTTPReply ( HTTP_FORBIDDEN , " " , false ) < < std : : flush ; <nl> delete conn ; <nl> } <nl> - <nl> - / / start HTTP client thread <nl> - else if ( ! NewThread ( ThreadRPCServer3 , conn ) ) { <nl> - printf ( " Failed to create RPC server client thread \ n " ) ; <nl> + else { <nl> + ServiceConnection ( conn ) ; <nl> + conn - > close ( ) ; <nl> delete conn ; <nl> } <nl> - <nl> - vnThreadsRunning [ THREAD_RPCLISTENER ] - - ; <nl> } <nl> <nl> - void ThreadRPCServer2 ( void * parg ) <nl> + void StartRPCThreads ( ) <nl> { <nl> - printf ( " ThreadRPCServer started \ n " ) ; <nl> - <nl> strRPCUserColonPass = mapArgs [ " - rpcuser " ] + " : " + mapArgs [ " - rpcpassword " ] ; <nl> if ( ( mapArgs [ " - rpcpassword " ] = = " " ) | | <nl> ( mapArgs [ " - rpcuser " ] = = mapArgs [ " - rpcpassword " ] ) ) <nl> void ThreadRPCServer2 ( void * parg ) <nl> return ; <nl> } <nl> <nl> - const bool fUseSSL = GetBoolArg ( " - rpcssl " ) ; <nl> + assert ( rpc_io_service = = NULL ) ; <nl> + rpc_io_service = new asio : : io_service ( ) ; <nl> + rpc_ssl_context = new ssl : : context ( * rpc_io_service , ssl : : context : : sslv23 ) ; <nl> <nl> - asio : : io_service io_service ; <nl> + const bool fUseSSL = GetBoolArg ( " - rpcssl " ) ; <nl> <nl> - ssl : : context context ( io_service , ssl : : context : : sslv23 ) ; <nl> if ( fUseSSL ) <nl> { <nl> - context . set_options ( ssl : : context : : no_sslv2 ) ; <nl> + rpc_ssl_context - > set_options ( ssl : : context : : no_sslv2 ) ; <nl> <nl> filesystem : : path pathCertFile ( GetArg ( " - rpcsslcertificatechainfile " , " server . cert " ) ) ; <nl> if ( ! pathCertFile . is_complete ( ) ) pathCertFile = filesystem : : path ( GetDataDir ( ) ) / pathCertFile ; <nl> - if ( filesystem : : exists ( pathCertFile ) ) context . use_certificate_chain_file ( pathCertFile . string ( ) ) ; <nl> + if ( filesystem : : exists ( pathCertFile ) ) rpc_ssl_context - > use_certificate_chain_file ( pathCertFile . string ( ) ) ; <nl> else printf ( " ThreadRPCServer ERROR : missing server certificate file % s \ n " , pathCertFile . string ( ) . c_str ( ) ) ; <nl> <nl> filesystem : : path pathPKFile ( GetArg ( " - rpcsslprivatekeyfile " , " server . pem " ) ) ; <nl> if ( ! pathPKFile . is_complete ( ) ) pathPKFile = filesystem : : path ( GetDataDir ( ) ) / pathPKFile ; <nl> - if ( filesystem : : exists ( pathPKFile ) ) context . use_private_key_file ( pathPKFile . string ( ) , ssl : : context : : pem ) ; <nl> + if ( filesystem : : exists ( pathPKFile ) ) rpc_ssl_context - > use_private_key_file ( pathPKFile . string ( ) , ssl : : context : : pem ) ; <nl> else printf ( " ThreadRPCServer ERROR : missing server private key file % s \ n " , pathPKFile . string ( ) . c_str ( ) ) ; <nl> <nl> string strCiphers = GetArg ( " - rpcsslciphers " , " TLSv1 + HIGH : ! SSLv2 : ! aNULL : ! eNULL : ! AH : ! 3DES : @ STRENGTH " ) ; <nl> - SSL_CTX_set_cipher_list ( context . impl ( ) , strCiphers . c_str ( ) ) ; <nl> + SSL_CTX_set_cipher_list ( rpc_ssl_context - > impl ( ) , strCiphers . c_str ( ) ) ; <nl> } <nl> <nl> / / Try a dual IPv6 / IPv4 socket , falling back to separate IPv4 and IPv6 sockets <nl> void ThreadRPCServer2 ( void * parg ) <nl> asio : : ip : : address bindAddress = loopback ? asio : : ip : : address_v6 : : loopback ( ) : asio : : ip : : address_v6 : : any ( ) ; <nl> ip : : tcp : : endpoint endpoint ( bindAddress , GetArg ( " - rpcport " , GetDefaultRPCPort ( ) ) ) ; <nl> boost : : system : : error_code v6_only_error ; <nl> - boost : : shared_ptr < ip : : tcp : : acceptor > acceptor ( new ip : : tcp : : acceptor ( io_service ) ) ; <nl> - <nl> - boost : : signals2 : : signal < void ( ) > StopRequests ; <nl> + boost : : shared_ptr < ip : : tcp : : acceptor > acceptor ( new ip : : tcp : : acceptor ( * rpc_io_service ) ) ; <nl> <nl> bool fListening = false ; <nl> std : : string strerr ; <nl> void ThreadRPCServer2 ( void * parg ) <nl> acceptor - > bind ( endpoint ) ; <nl> acceptor - > listen ( socket_base : : max_connections ) ; <nl> <nl> - RPCListen ( acceptor , context , fUseSSL ) ; <nl> - / / Cancel outstanding listen - requests for this acceptor when shutting down <nl> - StopRequests . connect ( signals2 : : slot < void ( ) > ( <nl> - static_cast < void ( ip : : tcp : : acceptor : : * ) ( ) > ( & ip : : tcp : : acceptor : : close ) , acceptor . get ( ) ) <nl> - . track ( acceptor ) ) ; <nl> + RPCListen ( acceptor , * rpc_ssl_context , fUseSSL ) ; <nl> <nl> fListening = true ; <nl> } <nl> void ThreadRPCServer2 ( void * parg ) <nl> bindAddress = loopback ? asio : : ip : : address_v4 : : loopback ( ) : asio : : ip : : address_v4 : : any ( ) ; <nl> endpoint . address ( bindAddress ) ; <nl> <nl> - acceptor . reset ( new ip : : tcp : : acceptor ( io_service ) ) ; <nl> + acceptor . reset ( new ip : : tcp : : acceptor ( * rpc_io_service ) ) ; <nl> acceptor - > open ( endpoint . protocol ( ) ) ; <nl> acceptor - > set_option ( boost : : asio : : ip : : tcp : : acceptor : : reuse_address ( true ) ) ; <nl> acceptor - > bind ( endpoint ) ; <nl> acceptor - > listen ( socket_base : : max_connections ) ; <nl> <nl> - RPCListen ( acceptor , context , fUseSSL ) ; <nl> - / / Cancel outstanding listen - requests for this acceptor when shutting down <nl> - StopRequests . connect ( signals2 : : slot < void ( ) > ( <nl> - static_cast < void ( ip : : tcp : : acceptor : : * ) ( ) > ( & ip : : tcp : : acceptor : : close ) , acceptor . get ( ) ) <nl> - . track ( acceptor ) ) ; <nl> + RPCListen ( acceptor , * rpc_ssl_context , fUseSSL ) ; <nl> <nl> fListening = true ; <nl> } <nl> void ThreadRPCServer2 ( void * parg ) <nl> return ; <nl> } <nl> <nl> - vnThreadsRunning [ THREAD_RPCLISTENER ] - - ; <nl> - while ( ! fShutdown ) <nl> - io_service . run_one ( ) ; <nl> - vnThreadsRunning [ THREAD_RPCLISTENER ] + + ; <nl> - StopRequests ( ) ; <nl> + rpc_worker_group = new boost : : thread_group ( ) ; <nl> + for ( int i = 0 ; i < GetArg ( " - rpcthreads " , 4 ) ; i + + ) <nl> + rpc_worker_group - > create_thread ( boost : : bind ( & asio : : io_service : : run , rpc_io_service ) ) ; <nl> + } <nl> + <nl> + void StopRPCThreads ( ) <nl> + { <nl> + if ( rpc_io_service = = NULL ) return ; <nl> + <nl> + rpc_io_service - > stop ( ) ; <nl> + rpc_worker_group - > join_all ( ) ; <nl> + delete rpc_worker_group ; rpc_worker_group = NULL ; <nl> + delete rpc_ssl_context ; rpc_ssl_context = NULL ; <nl> + delete rpc_io_service ; rpc_io_service = NULL ; <nl> } <nl> <nl> class JSONRequest <nl> static string JSONRPCExecBatch ( const Array & vReq ) <nl> return write_string ( Value ( ret ) , false ) + " \ n " ; <nl> } <nl> <nl> - static CCriticalSection cs_THREAD_RPCHANDLER ; <nl> - <nl> - void ThreadRPCServer3 ( void * parg ) <nl> + void ServiceConnection ( AcceptedConnection * conn ) <nl> { <nl> - / / Make this thread recognisable as the RPC handler <nl> - RenameThread ( " bitcoin - rpchand " ) ; <nl> - <nl> - { <nl> - LOCK ( cs_THREAD_RPCHANDLER ) ; <nl> - vnThreadsRunning [ THREAD_RPCHANDLER ] + + ; <nl> - } <nl> - AcceptedConnection * conn = ( AcceptedConnection * ) parg ; <nl> - <nl> bool fRun = true ; <nl> - loop { <nl> - if ( fShutdown | | ! fRun ) <nl> - { <nl> - conn - > close ( ) ; <nl> - delete conn ; <nl> - { <nl> - LOCK ( cs_THREAD_RPCHANDLER ) ; <nl> - - - vnThreadsRunning [ THREAD_RPCHANDLER ] ; <nl> - } <nl> - return ; <nl> - } <nl> - <nl> + while ( fRun ) <nl> + { <nl> int nProto = 0 ; <nl> map < string , string > mapHeaders ; <nl> string strRequest , strMethod , strURI ; <nl> void ThreadRPCServer3 ( void * parg ) <nl> break ; <nl> } <nl> } <nl> - <nl> - delete conn ; <nl> - { <nl> - LOCK ( cs_THREAD_RPCHANDLER ) ; <nl> - vnThreadsRunning [ THREAD_RPCHANDLER ] - - ; <nl> - } <nl> } <nl> <nl> json_spirit : : Value CRPCTable : : execute ( const std : : string & strMethod , const json_spirit : : Array & params ) const <nl> mmm a / src / bitcoinrpc . h <nl> ppp b / src / bitcoinrpc . h <nl> enum RPCErrorCode <nl> <nl> json_spirit : : Object JSONRPCError ( int code , const std : : string & message ) ; <nl> <nl> - void ThreadRPCServer ( void * parg ) ; <nl> + void StartRPCThreads ( ) ; <nl> + void StopRPCThreads ( ) ; <nl> int CommandLineRPC ( int argc , char * argv [ ] ) ; <nl> <nl> / * * Convert parameter values for RPC call from strings to command - specific JSON objects . * / <nl> mmm a / src / checkqueue . h <nl> ppp b / src / checkqueue . h <nl> template < typename T > class CCheckQueue { <nl> / / Master thread blocks on this when out of work <nl> boost : : condition_variable condMaster ; <nl> <nl> - / / Quit method blocks on this until all workers are gone <nl> - boost : : condition_variable condQuit ; <nl> - <nl> / / The queue of elements to be processed . <nl> / / As the order of booleans doesn ' t matter , it is used as a LIFO ( stack ) <nl> std : : vector < T > queue ; <nl> template < typename T > class CCheckQueue { <nl> while ( queue . empty ( ) ) { <nl> if ( ( fMaster | | fQuit ) & & nTodo = = 0 ) { <nl> nTotal - - ; <nl> - if ( nTotal = = 0 ) <nl> - condQuit . notify_one ( ) ; <nl> bool fRet = fAllOk ; <nl> / / reset the status for new work later <nl> if ( fMaster ) <nl> template < typename T > class CCheckQueue { <nl> condWorker . notify_all ( ) ; <nl> } <nl> <nl> - / / Shut the queue down <nl> - void Quit ( ) { <nl> - boost : : unique_lock < boost : : mutex > lock ( mutex ) ; <nl> - fQuit = true ; <nl> - / / No need to wake the master , as he will quit automatically when all jobs are <nl> - / / done . <nl> - condWorker . notify_all ( ) ; <nl> - <nl> - while ( nTotal > 0 ) <nl> - condQuit . wait ( lock ) ; <nl> - } <nl> - <nl> ~ CCheckQueue ( ) { <nl> - Quit ( ) ; <nl> } <nl> <nl> friend class CCheckQueueControl < T > ; <nl> mmm a / src / init . cpp <nl> ppp b / src / init . cpp <nl> void Shutdown ( void * parg ) <nl> <nl> / / Make this thread recognisable as the shutdown thread <nl> RenameThread ( " bitcoin - shutoff " ) ; <nl> - <nl> - bool fFirstThread = false ; <nl> - { <nl> - TRY_LOCK ( cs_Shutdown , lockShutdown ) ; <nl> - if ( lockShutdown ) <nl> - { <nl> - fFirstThread = ! fTaken ; <nl> - fTaken = true ; <nl> - } <nl> - } <nl> - static bool fExit ; <nl> - if ( fFirstThread ) <nl> + nTransactionsUpdated + + ; <nl> + StopRPCThreads ( ) ; <nl> + bitdb . Flush ( false ) ; <nl> + StopNode ( ) ; <nl> { <nl> fShutdown = true ; <nl> fRequestShutdown = true ; <nl> nTransactionsUpdated + + ; <nl> + StopRPCThreads ( ) ; <nl> bitdb . Flush ( false ) ; <nl> - { <nl> - LOCK ( cs_main ) ; <nl> - ThreadScriptCheckQuit ( ) ; <nl> - } <nl> StopNode ( ) ; <nl> { <nl> LOCK ( cs_main ) ; <nl> void Shutdown ( void * parg ) <nl> void DetectShutdownThread ( boost : : thread_group * threadGroup ) <nl> { <nl> while ( fRequestShutdown = = false ) <nl> - Sleep ( 200 ) ; <nl> + MilliSleep ( 200 ) ; <nl> threadGroup - > interrupt_all ( ) ; <nl> } <nl> <nl> std : : string HelpMessage ( ) <nl> " - rpcport = < port > " + _ ( " Listen for JSON - RPC connections on < port > ( default : 8332 or testnet : 18332 ) " ) + " \ n " + <nl> " - rpcallowip = < ip > " + _ ( " Allow JSON - RPC connections from specified IP address " ) + " \ n " + <nl> " - rpcconnect = < ip > " + _ ( " Send commands to node running on < ip > ( default : 127 . 0 . 0 . 1 ) " ) + " \ n " + <nl> + " - rpcthreads = < n > " + _ ( " Use this mean threads to service RPC calls ( default : 4 ) " ) + " \ n " + <nl> " - blocknotify = < cmd > " + _ ( " Execute command when the best block changes ( % s in cmd is replaced by block hash ) " ) + " \ n " + <nl> " - walletnotify = < cmd > " + _ ( " Execute command when a wallet transaction changes ( % s in cmd is replaced by TxID ) " ) + " \ n " + <nl> " - alertnotify = < cmd > " + _ ( " Execute command when a relevant alert is received ( % s in cmd is replaced by message ) " ) + " \ n " + <nl> struct CImportingNow <nl> } <nl> } ; <nl> <nl> - struct CImportData { <nl> - std : : vector < boost : : filesystem : : path > vFiles ; <nl> - } ; <nl> - <nl> - void ThreadImport ( void * data ) { <nl> - CImportData * import = reinterpret_cast < CImportData * > ( data ) ; <nl> <nl> + void ThreadImport ( std : : vector < boost : : filesystem : : path > vImportFiles ) <nl> + { <nl> RenameThread ( " bitcoin - loadblk " ) ; <nl> <nl> - vnThreadsRunning [ THREAD_IMPORT ] + + ; <nl> - <nl> / / - reindex <nl> if ( fReindex ) { <nl> CImportingNow imp ; <nl> int nFile = 0 ; <nl> - while ( ! fRequestShutdown ) { <nl> + while ( true ) { <nl> CDiskBlockPos pos ( nFile , 0 ) ; <nl> FILE * file = OpenBlockFile ( pos , true ) ; <nl> if ( ! file ) <nl> void ThreadImport ( void * data ) { <nl> LoadExternalBlockFile ( file , & pos ) ; <nl> nFile + + ; <nl> } <nl> - if ( ! fRequestShutdown ) { <nl> - pblocktree - > WriteReindexing ( false ) ; <nl> - fReindex = false ; <nl> - printf ( " Reindexing finished \ n " ) ; <nl> - / / To avoid ending up in a situation without genesis block , re - try initializing ( no - op if reindexing worked ) : <nl> - InitBlockIndex ( ) ; <nl> - } <nl> + pblocktree - > WriteReindexing ( false ) ; <nl> + fReindex = false ; <nl> + printf ( " Reindexing finished \ n " ) ; <nl> + / / To avoid ending up in a situation without genesis block , re - try initializing ( no - op if reindexing worked ) : <nl> + InitBlockIndex ( ) ; <nl> } <nl> <nl> / / hardcoded $ DATADIR / bootstrap . dat <nl> filesystem : : path pathBootstrap = GetDataDir ( ) / " bootstrap . dat " ; <nl> - if ( filesystem : : exists ( pathBootstrap ) & & ! fRequestShutdown ) { <nl> + if ( filesystem : : exists ( pathBootstrap ) ) { <nl> FILE * file = fopen ( pathBootstrap . string ( ) . c_str ( ) , " rb " ) ; <nl> if ( file ) { <nl> CImportingNow imp ; <nl> void ThreadImport ( void * data ) { <nl> } <nl> <nl> / / - loadblock = <nl> - BOOST_FOREACH ( boost : : filesystem : : path & path , import - > vFiles ) { <nl> - if ( fRequestShutdown ) <nl> - break ; <nl> + BOOST_FOREACH ( boost : : filesystem : : path & path , vImportFiles ) { <nl> FILE * file = fopen ( path . string ( ) . c_str ( ) , " rb " ) ; <nl> if ( file ) { <nl> CImportingNow imp ; <nl> void ThreadImport ( void * data ) { <nl> LoadExternalBlockFile ( file ) ; <nl> } <nl> } <nl> - <nl> - delete import ; <nl> - <nl> - vnThreadsRunning [ THREAD_IMPORT ] - - ; <nl> } <nl> <nl> / * * Initialize bitcoin . <nl> bool AppInit2 ( boost : : thread_group & threadGroup ) <nl> if ( nScriptCheckThreads ) { <nl> printf ( " Using % u threads for script verification \ n " , nScriptCheckThreads ) ; <nl> for ( int i = 0 ; i < nScriptCheckThreads - 1 ; i + + ) <nl> - NewThread ( ThreadScriptCheck , NULL ) ; <nl> + threadGroup . create_thread ( & ThreadScriptCheck ) ; <nl> } <nl> <nl> int64 nStart ; <nl> bool AppInit2 ( boost : : thread_group & threadGroup ) <nl> fNoListen = ! GetBoolArg ( " - listen " , true ) ; <nl> fDiscover = GetBoolArg ( " - discover " , true ) ; <nl> fNameLookup = GetBoolArg ( " - dns " , true ) ; <nl> - # ifdef USE_UPNP <nl> - fUseUPnP = GetBoolArg ( " - upnp " , USE_UPNP ) ; <nl> - # endif <nl> <nl> bool fBound = false ; <nl> if ( ! fNoListen ) { <nl> bool AppInit2 ( boost : : thread_group & threadGroup ) <nl> if ( ! ConnectBestBlock ( state ) ) <nl> strErrors < < " Failed to connect best block " ; <nl> <nl> - CImportData * pimport = new CImportData ( ) ; <nl> + std : : vector < boost : : filesystem : : path > vImportFiles ; <nl> if ( mapArgs . count ( " - loadblock " ) ) <nl> { <nl> BOOST_FOREACH ( string strFile , mapMultiArgs [ " - loadblock " ] ) <nl> - pimport - > vFiles . push_back ( strFile ) ; <nl> + vImportFiles . push_back ( strFile ) ; <nl> } <nl> - NewThread ( ThreadImport , pimport ) ; <nl> + threadGroup . create_thread ( boost : : bind ( & ThreadImport , vImportFiles ) ) ; <nl> <nl> / / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Step 10 : load peers <nl> <nl> bool AppInit2 ( boost : : thread_group & threadGroup ) <nl> printf ( " mapWallet . size ( ) = % " PRIszu " \ n " , pwalletMain - > mapWallet . size ( ) ) ; <nl> printf ( " mapAddressBook . size ( ) = % " PRIszu " \ n " , pwalletMain - > mapAddressBook . size ( ) ) ; <nl> <nl> - if ( ! NewThread ( StartNode , NULL ) ) <nl> + if ( ! NewThread ( StartNode , ( void * ) & threadGroup ) ) <nl> InitError ( _ ( " Error : could not start node " ) ) ; <nl> <nl> if ( fServer ) <nl> - NewThread ( ThreadRPCServer , NULL ) ; <nl> + StartRPCThreads ( ) ; <nl> <nl> / / Generate coins in the background <nl> GenerateBitcoins ( GetBoolArg ( " - gen " , false ) , pwalletMain ) ; <nl> mmm a / src / main . cpp <nl> ppp b / src / main . cpp <nl> bool FindUndoPos ( CValidationState & state , int nFile , CDiskBlockPos & pos , unsigne <nl> <nl> static CCheckQueue < CScriptCheck > scriptcheckqueue ( 128 ) ; <nl> <nl> - void ThreadScriptCheck ( void * ) { <nl> - vnThreadsRunning [ THREAD_SCRIPTCHECK ] + + ; <nl> + void ThreadScriptCheck ( ) { <nl> RenameThread ( " bitcoin - scriptch " ) ; <nl> scriptcheckqueue . Thread ( ) ; <nl> - vnThreadsRunning [ THREAD_SCRIPTCHECK ] - - ; <nl> - } <nl> - <nl> - void ThreadScriptCheckQuit ( ) { <nl> - scriptcheckqueue . Quit ( ) ; <nl> } <nl> <nl> bool CBlock : : ConnectBlock ( CValidationState & state , CBlockIndex * pindex , CCoinsViewCache & view , bool fJustCheck ) <nl> bool LoadExternalBlockFile ( FILE * fileIn , CDiskBlockPos * dbp ) <nl> } <nl> } <nl> uint64 nRewind = blkdat . GetPos ( ) ; <nl> - while ( blkdat . good ( ) & & ! blkdat . eof ( ) & & ! fRequestShutdown ) { <nl> + while ( blkdat . good ( ) & & ! blkdat . eof ( ) ) { <nl> + boost : : this_thread : : interruption_point ( ) ; <nl> + <nl> blkdat . SetPos ( nRewind ) ; <nl> nRewind + + ; / / start one byte further next time , in case of failure <nl> blkdat . SetLimit ( ) ; / / remove former limit <nl> mmm a / src / main . h <nl> ppp b / src / main . h <nl> CBlockIndex * FindBlockByHeight ( int nHeight ) ; <nl> bool ProcessMessages ( CNode * pfrom ) ; <nl> / * * Send queued protocol messages to be sent to a give node * / <nl> bool SendMessages ( CNode * pto , bool fSendTrickle ) ; <nl> - / * * Run the importer thread , which deals with reindexing , loading bootstrap . dat , and whatever is passed to - loadblock * / <nl> - void ThreadImport ( void * parg ) ; <nl> / * * Run an instance of the script checking thread * / <nl> - void ThreadScriptCheck ( void * parg ) ; <nl> - / * * Stop the script checking threads * / <nl> - void ThreadScriptCheckQuit ( ) ; <nl> + void ThreadScriptCheck ( ) ; <nl> / * * Run the miner threads * / <nl> void GenerateBitcoins ( bool fGenerate , CWallet * pwallet ) ; <nl> / * * Generate a new block , without valid proof - of - work * / <nl> mmm a / src / net . cpp <nl> ppp b / src / net . cpp <nl> using namespace boost ; <nl> <nl> static const int MAX_OUTBOUND_CONNECTIONS = 8 ; <nl> <nl> - void ThreadMessageHandler2 ( void * parg ) ; <nl> - void ThreadSocketHandler2 ( void * parg ) ; <nl> - void ThreadOpenConnections2 ( void * parg ) ; <nl> - void ThreadOpenAddedConnections2 ( void * parg ) ; <nl> - # ifdef USE_UPNP <nl> - void ThreadMapPort2 ( void * parg ) ; <nl> - # endif <nl> - void ThreadDNSAddressSeed2 ( void * parg ) ; <nl> bool OpenNetworkConnection ( const CAddress & addrConnect , CSemaphoreGrant * grantOutbound = NULL , const char * strDest = NULL , bool fOneShot = false ) ; <nl> <nl> <nl> struct LocalServiceInfo { <nl> / / Global state variables <nl> / / <nl> bool fDiscover = true ; <nl> - bool fUseUPnP = false ; <nl> uint64 nLocalServices = NODE_NETWORK ; <nl> static CCriticalSection cs_mapLocalHost ; <nl> static map < CNetAddr , LocalServiceInfo > mapLocalHost ; <nl> void SocketSendData ( CNode * pnode ) <nl> pnode - > vSendMsg . erase ( pnode - > vSendMsg . begin ( ) , it ) ; <nl> } <nl> <nl> - void ThreadSocketHandler ( void * parg ) <nl> - { <nl> - / / Make this thread recognisable as the networking thread <nl> - RenameThread ( " bitcoin - net " ) ; <nl> - <nl> - try <nl> - { <nl> - vnThreadsRunning [ THREAD_SOCKETHANDLER ] + + ; <nl> - ThreadSocketHandler2 ( parg ) ; <nl> - vnThreadsRunning [ THREAD_SOCKETHANDLER ] - - ; <nl> - } <nl> - catch ( std : : exception & e ) { <nl> - vnThreadsRunning [ THREAD_SOCKETHANDLER ] - - ; <nl> - PrintException ( & e , " ThreadSocketHandler ( ) " ) ; <nl> - } catch ( . . . ) { <nl> - vnThreadsRunning [ THREAD_SOCKETHANDLER ] - - ; <nl> - throw ; / / support pthread_cancel ( ) <nl> - } <nl> - printf ( " ThreadSocketHandler exited \ n " ) ; <nl> - } <nl> - <nl> static list < CNode * > vNodesDisconnected ; <nl> <nl> - void ThreadSocketHandler2 ( void * parg ) <nl> + void ThreadSocketHandler ( ) <nl> { <nl> - printf ( " ThreadSocketHandler started \ n " ) ; <nl> unsigned int nPrevNodeCount = 0 ; <nl> loop <nl> { <nl> void ThreadSocketHandler2 ( void * parg ) <nl> } <nl> } <nl> <nl> - vnThreadsRunning [ THREAD_SOCKETHANDLER ] - - ; <nl> int nSelect = select ( have_fds ? hSocketMax + 1 : 0 , <nl> & fdsetRecv , & fdsetSend , & fdsetError , & timeout ) ; <nl> - vnThreadsRunning [ THREAD_SOCKETHANDLER ] + + ; <nl> - if ( fShutdown ) <nl> - return ; <nl> + boost : : this_thread : : interruption_point ( ) ; <nl> + <nl> if ( nSelect = = SOCKET_ERROR ) <nl> { <nl> if ( have_fds ) <nl> void ThreadSocketHandler2 ( void * parg ) <nl> } <nl> BOOST_FOREACH ( CNode * pnode , vNodesCopy ) <nl> { <nl> - if ( fShutdown ) <nl> - return ; <nl> + boost : : this_thread : : interruption_point ( ) ; <nl> <nl> / / <nl> / / Receive <nl> void ThreadSocketHandler2 ( void * parg ) <nl> <nl> <nl> # ifdef USE_UPNP <nl> - void ThreadMapPort ( void * parg ) <nl> + void ThreadMapPort ( ) <nl> { <nl> - / / Make this thread recognisable as the UPnP thread <nl> - RenameThread ( " bitcoin - UPnP " ) ; <nl> - <nl> - try <nl> - { <nl> - vnThreadsRunning [ THREAD_UPNP ] + + ; <nl> - ThreadMapPort2 ( parg ) ; <nl> - vnThreadsRunning [ THREAD_UPNP ] - - ; <nl> - } <nl> - catch ( std : : exception & e ) { <nl> - vnThreadsRunning [ THREAD_UPNP ] - - ; <nl> - PrintException ( & e , " ThreadMapPort ( ) " ) ; <nl> - } catch ( . . . ) { <nl> - vnThreadsRunning [ THREAD_UPNP ] - - ; <nl> - PrintException ( NULL , " ThreadMapPort ( ) " ) ; <nl> - } <nl> - printf ( " ThreadMapPort exited \ n " ) ; <nl> - } <nl> - <nl> - void ThreadMapPort2 ( void * parg ) <nl> - { <nl> - printf ( " ThreadMapPort started \ n " ) ; <nl> - <nl> std : : string port = strprintf ( " % u " , GetListenPort ( ) ) ; <nl> const char * multicastif = 0 ; <nl> const char * minissdpdpath = 0 ; <nl> void ThreadMapPort2 ( void * parg ) <nl> } <nl> <nl> string strDesc = " Bitcoin " + FormatFullVersion ( ) ; <nl> - # ifndef UPNPDISCOVER_SUCCESS <nl> - / * miniupnpc 1 . 5 * / <nl> - r = UPNP_AddPortMapping ( urls . controlURL , data . first . servicetype , <nl> - port . c_str ( ) , port . c_str ( ) , lanaddr , strDesc . c_str ( ) , " TCP " , 0 ) ; <nl> - # else <nl> - / * miniupnpc 1 . 6 * / <nl> - r = UPNP_AddPortMapping ( urls . controlURL , data . first . servicetype , <nl> - port . c_str ( ) , port . c_str ( ) , lanaddr , strDesc . c_str ( ) , " TCP " , 0 , " 0 " ) ; <nl> - # endif <nl> <nl> - if ( r ! = UPNPCOMMAND_SUCCESS ) <nl> - printf ( " AddPortMapping ( % s , % s , % s ) failed with code % d ( % s ) \ n " , <nl> - port . c_str ( ) , port . c_str ( ) , lanaddr , r , strupnperror ( r ) ) ; <nl> - else <nl> - printf ( " UPnP Port Mapping successful . \ n " ) ; <nl> - int i = 1 ; <nl> - loop { <nl> - if ( fShutdown | | ! fUseUPnP ) <nl> - { <nl> - r = UPNP_DeletePortMapping ( urls . controlURL , data . first . servicetype , port . c_str ( ) , " TCP " , 0 ) ; <nl> - printf ( " UPNP_DeletePortMapping ( ) returned : % d \ n " , r ) ; <nl> - freeUPNPDevlist ( devlist ) ; devlist = 0 ; <nl> - FreeUPNPUrls ( & urls ) ; <nl> - return ; <nl> - } <nl> - if ( i % 600 = = 0 ) / / Refresh every 20 minutes <nl> - { <nl> + try { <nl> + loop { <nl> # ifndef UPNPDISCOVER_SUCCESS <nl> / * miniupnpc 1 . 5 * / <nl> r = UPNP_AddPortMapping ( urls . controlURL , data . first . servicetype , <nl> void ThreadMapPort2 ( void * parg ) <nl> port . c_str ( ) , port . c_str ( ) , lanaddr , r , strupnperror ( r ) ) ; <nl> else <nl> printf ( " UPnP Port Mapping successful . \ n " ) ; ; <nl> + <nl> + MilliSleep ( 20 * 60 * 1000 ) ; / / Refresh every 20 minutes <nl> } <nl> - MilliSleep ( 2000 ) ; <nl> - i + + ; <nl> + } <nl> + catch ( boost : : thread_interrupted ) <nl> + { <nl> + r = UPNP_DeletePortMapping ( urls . controlURL , data . first . servicetype , port . c_str ( ) , " TCP " , 0 ) ; <nl> + printf ( " UPNP_DeletePortMapping ( ) returned : % d \ n " , r ) ; <nl> + freeUPNPDevlist ( devlist ) ; devlist = 0 ; <nl> + FreeUPNPUrls ( & urls ) ; <nl> + throw ; <nl> } <nl> } else { <nl> printf ( " No valid UPnP IGDs found \ n " ) ; <nl> freeUPNPDevlist ( devlist ) ; devlist = 0 ; <nl> if ( r ! = 0 ) <nl> FreeUPNPUrls ( & urls ) ; <nl> - loop { <nl> - if ( fShutdown | | ! fUseUPnP ) <nl> - return ; <nl> - MilliSleep ( 2000 ) ; <nl> - } <nl> } <nl> } <nl> <nl> - void MapPort ( ) <nl> + void MapPort ( bool fUseUPnP ) <nl> { <nl> - if ( fUseUPnP & & vnThreadsRunning [ THREAD_UPNP ] < 1 ) <nl> + static boost : : thread * upnp_thread = NULL ; <nl> + <nl> + if ( fUseUPnP ) <nl> { <nl> - if ( ! NewThread ( ThreadMapPort , NULL ) ) <nl> - printf ( " Error : ThreadMapPort ( ThreadMapPort ) failed \ n " ) ; <nl> + if ( upnp_thread ) { <nl> + upnp_thread - > interrupt ( ) ; <nl> + upnp_thread - > join ( ) ; <nl> + delete upnp_thread ; <nl> + } <nl> + upnp_thread = new boost : : thread ( boost : : bind ( & TraceThread < boost : : function < void ( ) > > , " upnp " , & ThreadMapPort ) ) ; <nl> + } <nl> + else if ( upnp_thread ) { <nl> + upnp_thread - > interrupt ( ) ; <nl> + upnp_thread - > join ( ) ; <nl> + delete upnp_thread ; <nl> + upnp_thread = NULL ; <nl> } <nl> } <nl> + <nl> # else <nl> - void MapPort ( ) <nl> + void MapPort ( bool ) <nl> { <nl> / / Intentionally left blank . <nl> } <nl> static const char * strTestNetDNSSeed [ ] [ 2 ] = { <nl> { NULL , NULL } <nl> } ; <nl> <nl> - void ThreadDNSAddressSeed ( void * parg ) <nl> - { <nl> - / / Make this thread recognisable as the DNS seeding thread <nl> - RenameThread ( " bitcoin - dnsseed " ) ; <nl> - <nl> - try <nl> - { <nl> - vnThreadsRunning [ THREAD_DNSSEED ] + + ; <nl> - ThreadDNSAddressSeed2 ( parg ) ; <nl> - vnThreadsRunning [ THREAD_DNSSEED ] - - ; <nl> - } <nl> - catch ( std : : exception & e ) { <nl> - vnThreadsRunning [ THREAD_DNSSEED ] - - ; <nl> - PrintException ( & e , " ThreadDNSAddressSeed ( ) " ) ; <nl> - } catch ( . . . ) { <nl> - vnThreadsRunning [ THREAD_DNSSEED ] - - ; <nl> - throw ; / / support pthread_cancel ( ) <nl> - } <nl> - printf ( " ThreadDNSAddressSeed exited \ n " ) ; <nl> - } <nl> - <nl> - void ThreadDNSAddressSeed2 ( void * parg ) <nl> + void ThreadDNSAddressSeed ( ) <nl> { <nl> static const char * ( * strDNSSeed ) [ 2 ] = fTestNet ? strTestNetDNSSeed : strMainNetDNSSeed ; <nl> <nl> - printf ( " ThreadDNSAddressSeed started \ n " ) ; <nl> int found = 0 ; <nl> <nl> printf ( " Loading addresses from DNS seeds ( could take a while ) \ n " ) ; <nl> void DumpAddresses ( ) <nl> addrman . size ( ) , GetTimeMillis ( ) - nStart ) ; <nl> } <nl> <nl> - void ThreadDumpAddress2 ( void * parg ) <nl> - { <nl> - printf ( " ThreadDumpAddress started \ n " ) ; <nl> - <nl> - vnThreadsRunning [ THREAD_DUMPADDRESS ] + + ; <nl> - while ( ! fShutdown ) <nl> - { <nl> - DumpAddresses ( ) ; <nl> - vnThreadsRunning [ THREAD_DUMPADDRESS ] - - ; <nl> - MilliSleep ( 100000 ) ; <nl> - vnThreadsRunning [ THREAD_DUMPADDRESS ] + + ; <nl> - } <nl> - vnThreadsRunning [ THREAD_DUMPADDRESS ] - - ; <nl> - } <nl> - <nl> - void ThreadDumpAddress ( void * parg ) <nl> - { <nl> - / / Make this thread recognisable as the address dumping thread <nl> - RenameThread ( " bitcoin - adrdump " ) ; <nl> - <nl> - try <nl> - { <nl> - ThreadDumpAddress2 ( parg ) ; <nl> - } <nl> - catch ( std : : exception & e ) { <nl> - PrintException ( & e , " ThreadDumpAddress ( ) " ) ; <nl> - } <nl> - printf ( " ThreadDumpAddress exited \ n " ) ; <nl> - } <nl> - <nl> - void ThreadOpenConnections ( void * parg ) <nl> - { <nl> - / / Make this thread recognisable as the connection opening thread <nl> - RenameThread ( " bitcoin - opencon " ) ; <nl> - <nl> - try <nl> - { <nl> - vnThreadsRunning [ THREAD_OPENCONNECTIONS ] + + ; <nl> - ThreadOpenConnections2 ( parg ) ; <nl> - vnThreadsRunning [ THREAD_OPENCONNECTIONS ] - - ; <nl> - } <nl> - catch ( std : : exception & e ) { <nl> - vnThreadsRunning [ THREAD_OPENCONNECTIONS ] - - ; <nl> - PrintException ( & e , " ThreadOpenConnections ( ) " ) ; <nl> - } catch ( . . . ) { <nl> - vnThreadsRunning [ THREAD_OPENCONNECTIONS ] - - ; <nl> - PrintException ( NULL , " ThreadOpenConnections ( ) " ) ; <nl> - } <nl> - printf ( " ThreadOpenConnections exited \ n " ) ; <nl> - } <nl> - <nl> void static ProcessOneShot ( ) <nl> { <nl> string strDest ; <nl> void static ProcessOneShot ( ) <nl> } <nl> } <nl> <nl> - void ThreadOpenConnections2 ( void * parg ) <nl> + void ThreadOpenConnections ( ) <nl> { <nl> - printf ( " ThreadOpenConnections started \ n " ) ; <nl> - <nl> / / Connect to specific addresses <nl> if ( mapArgs . count ( " - connect " ) & & mapMultiArgs [ " - connect " ] . size ( ) > 0 ) <nl> { <nl> void ThreadOpenConnections2 ( void * parg ) <nl> for ( int i = 0 ; i < 10 & & i < nLoop ; i + + ) <nl> { <nl> MilliSleep ( 500 ) ; <nl> - if ( fShutdown ) <nl> - return ; <nl> } <nl> } <nl> MilliSleep ( 500 ) ; <nl> void ThreadOpenConnections2 ( void * parg ) <nl> { <nl> ProcessOneShot ( ) ; <nl> <nl> - vnThreadsRunning [ THREAD_OPENCONNECTIONS ] - - ; <nl> MilliSleep ( 500 ) ; <nl> - vnThreadsRunning [ THREAD_OPENCONNECTIONS ] + + ; <nl> - if ( fShutdown ) <nl> - return ; <nl> <nl> - <nl> - vnThreadsRunning [ THREAD_OPENCONNECTIONS ] - - ; <nl> CSemaphoreGrant grant ( * semOutbound ) ; <nl> - vnThreadsRunning [ THREAD_OPENCONNECTIONS ] + + ; <nl> - if ( fShutdown ) <nl> - return ; <nl> + boost : : this_thread : : interruption_point ( ) ; <nl> <nl> / / Add seed nodes if IRC isn ' t working <nl> if ( addrman . size ( ) = = 0 & & ( GetTime ( ) - nStart > 60 ) & & ! fTestNet ) <nl> void ThreadOpenConnections2 ( void * parg ) <nl> } <nl> } <nl> <nl> - void ThreadOpenAddedConnections ( void * parg ) <nl> + void ThreadOpenAddedConnections ( ) <nl> { <nl> - / / Make this thread recognisable as the connection opening thread <nl> - RenameThread ( " bitcoin - opencon " ) ; <nl> - <nl> - try <nl> - { <nl> - vnThreadsRunning [ THREAD_ADDEDCONNECTIONS ] + + ; <nl> - ThreadOpenAddedConnections2 ( parg ) ; <nl> - vnThreadsRunning [ THREAD_ADDEDCONNECTIONS ] - - ; <nl> - } <nl> - catch ( std : : exception & e ) { <nl> - vnThreadsRunning [ THREAD_ADDEDCONNECTIONS ] - - ; <nl> - PrintException ( & e , " ThreadOpenAddedConnections ( ) " ) ; <nl> - } catch ( . . . ) { <nl> - vnThreadsRunning [ THREAD_ADDEDCONNECTIONS ] - - ; <nl> - PrintException ( NULL , " ThreadOpenAddedConnections ( ) " ) ; <nl> - } <nl> - printf ( " ThreadOpenAddedConnections exited \ n " ) ; <nl> - } <nl> - <nl> - void ThreadOpenAddedConnections2 ( void * parg ) <nl> - { <nl> - printf ( " ThreadOpenAddedConnections started \ n " ) ; <nl> - <nl> { <nl> LOCK ( cs_vAddedNodes ) ; <nl> vAddedNodes = mapMultiArgs [ " - addnode " ] ; <nl> } <nl> <nl> if ( HaveNameProxy ( ) ) { <nl> - while ( ! fShutdown ) { <nl> + while ( true ) { <nl> list < string > lAddresses ( 0 ) ; <nl> { <nl> LOCK ( cs_vAddedNodes ) ; <nl> void ThreadOpenAddedConnections2 ( void * parg ) <nl> CSemaphoreGrant grant ( * semOutbound ) ; <nl> OpenNetworkConnection ( addr , & grant , strAddNode . c_str ( ) ) ; <nl> MilliSleep ( 500 ) ; <nl> - if ( fShutdown ) <nl> - return ; <nl> } <nl> - vnThreadsRunning [ THREAD_ADDEDCONNECTIONS ] - - ; <nl> MilliSleep ( 120000 ) ; / / Retry every 2 minutes <nl> - vnThreadsRunning [ THREAD_ADDEDCONNECTIONS ] + + ; <nl> } <nl> - return ; <nl> } <nl> <nl> for ( unsigned int i = 0 ; true ; i + + ) <nl> void ThreadOpenAddedConnections2 ( void * parg ) <nl> CSemaphoreGrant grant ( * semOutbound ) ; <nl> OpenNetworkConnection ( CAddress ( vserv [ i % vserv . size ( ) ] ) , & grant ) ; <nl> MilliSleep ( 500 ) ; <nl> - if ( fShutdown ) <nl> - return ; <nl> } <nl> - if ( fShutdown ) <nl> - return ; <nl> - vnThreadsRunning [ THREAD_ADDEDCONNECTIONS ] - - ; <nl> MilliSleep ( 120000 ) ; / / Retry every 2 minutes <nl> - vnThreadsRunning [ THREAD_ADDEDCONNECTIONS ] + + ; <nl> - if ( fShutdown ) <nl> - return ; <nl> } <nl> } <nl> <nl> bool OpenNetworkConnection ( const CAddress & addrConnect , CSemaphoreGrant * grantOu <nl> / / <nl> / / Initiate outbound network connection <nl> / / <nl> - if ( fShutdown ) <nl> - return false ; <nl> + boost : : this_thread : : interruption_point ( ) ; <nl> if ( ! strDest ) <nl> if ( IsLocal ( addrConnect ) | | <nl> FindNode ( ( CNetAddr ) addrConnect ) | | CNode : : IsBanned ( addrConnect ) | | <nl> bool OpenNetworkConnection ( const CAddress & addrConnect , CSemaphoreGrant * grantOu <nl> if ( strDest & & FindNode ( strDest ) ) <nl> return false ; <nl> <nl> - vnThreadsRunning [ THREAD_OPENCONNECTIONS ] - - ; <nl> CNode * pnode = ConnectNode ( addrConnect , strDest ) ; <nl> - vnThreadsRunning [ THREAD_OPENCONNECTIONS ] + + ; <nl> - if ( fShutdown ) <nl> - return false ; <nl> + boost : : this_thread : : interruption_point ( ) ; <nl> + <nl> if ( ! pnode ) <nl> return false ; <nl> if ( grantOutbound ) <nl> bool OpenNetworkConnection ( const CAddress & addrConnect , CSemaphoreGrant * grantOu <nl> <nl> <nl> <nl> - <nl> - void ThreadMessageHandler ( void * parg ) <nl> - { <nl> - / / Make this thread recognisable as the message handling thread <nl> - RenameThread ( " bitcoin - msghand " ) ; <nl> - <nl> - try <nl> - { <nl> - vnThreadsRunning [ THREAD_MESSAGEHANDLER ] + + ; <nl> - ThreadMessageHandler2 ( parg ) ; <nl> - vnThreadsRunning [ THREAD_MESSAGEHANDLER ] - - ; <nl> - } <nl> - catch ( std : : exception & e ) { <nl> - vnThreadsRunning [ THREAD_MESSAGEHANDLER ] - - ; <nl> - PrintException ( & e , " ThreadMessageHandler ( ) " ) ; <nl> - } catch ( . . . ) { <nl> - vnThreadsRunning [ THREAD_MESSAGEHANDLER ] - - ; <nl> - PrintException ( NULL , " ThreadMessageHandler ( ) " ) ; <nl> - } <nl> - printf ( " ThreadMessageHandler exited \ n " ) ; <nl> - } <nl> - <nl> - void ThreadMessageHandler2 ( void * parg ) <nl> + void ThreadMessageHandler ( ) <nl> { <nl> - printf ( " ThreadMessageHandler started \ n " ) ; <nl> SetThreadPriority ( THREAD_PRIORITY_BELOW_NORMAL ) ; <nl> - while ( ! fShutdown ) <nl> + while ( true ) <nl> { <nl> vector < CNode * > vNodesCopy ; <nl> { <nl> void ThreadMessageHandler2 ( void * parg ) <nl> if ( ! ProcessMessages ( pnode ) ) <nl> pnode - > CloseSocketDisconnect ( ) ; <nl> } <nl> - if ( fShutdown ) <nl> - return ; <nl> + boost : : this_thread : : interruption_point ( ) ; <nl> <nl> / / Send messages <nl> { <nl> void ThreadMessageHandler2 ( void * parg ) <nl> if ( lockSend ) <nl> SendMessages ( pnode , pnode = = pnodeTrickle ) ; <nl> } <nl> - if ( fShutdown ) <nl> - return ; <nl> + boost : : this_thread : : interruption_point ( ) ; <nl> } <nl> <nl> { <nl> void ThreadMessageHandler2 ( void * parg ) <nl> pnode - > Release ( ) ; <nl> } <nl> <nl> - / / Wait and allow messages to bunch up . <nl> - / / Reduce vnThreadsRunning so StopNode has permission to exit while <nl> - / / we ' re sleeping , but we must always check fShutdown after doing this . <nl> - vnThreadsRunning [ THREAD_MESSAGEHANDLER ] - - ; <nl> MilliSleep ( 100 ) ; <nl> - if ( fRequestShutdown ) <nl> - StartShutdown ( ) ; <nl> - vnThreadsRunning [ THREAD_MESSAGEHANDLER ] + + ; <nl> - if ( fShutdown ) <nl> - return ; <nl> } <nl> } <nl> <nl> void static Discover ( ) <nl> <nl> void StartNode ( void * parg ) <nl> { <nl> + boost : : thread_group * threadGroup = ( boost : : thread_group * ) parg ; <nl> + <nl> / / Make this thread recognisable as the startup thread <nl> RenameThread ( " bitcoin - start " ) ; <nl> <nl> void StartNode ( void * parg ) <nl> if ( ! GetBoolArg ( " - dnsseed " , true ) ) <nl> printf ( " DNS seeding disabled \ n " ) ; <nl> else <nl> - if ( ! NewThread ( ThreadDNSAddressSeed , NULL ) ) <nl> - printf ( " Error : NewThread ( ThreadDNSAddressSeed ) failed \ n " ) ; <nl> + threadGroup - > create_thread ( boost : : bind ( & TraceThread < boost : : function < void ( ) > > , " dnsseed " , & ThreadDNSAddressSeed ) ) ; <nl> <nl> / / Map ports with UPnP <nl> - if ( fUseUPnP ) <nl> - MapPort ( ) ; <nl> + MapPort ( GetBoolArg ( " - upnp " , USE_UPNP ) ) ; <nl> <nl> / / Send and receive from sockets , accept connections <nl> - if ( ! NewThread ( ThreadSocketHandler , NULL ) ) <nl> - printf ( " Error : NewThread ( ThreadSocketHandler ) failed \ n " ) ; <nl> + threadGroup - > create_thread ( boost : : bind ( & TraceThread < void ( * ) ( ) > , " net " , & ThreadSocketHandler ) ) ; <nl> <nl> / / Initiate outbound connections from - addnode <nl> - if ( ! NewThread ( ThreadOpenAddedConnections , NULL ) ) <nl> - printf ( " Error : NewThread ( ThreadOpenAddedConnections ) failed \ n " ) ; <nl> + threadGroup - > create_thread ( boost : : bind ( & TraceThread < void ( * ) ( ) > , " addcon " , & ThreadOpenAddedConnections ) ) ; <nl> <nl> / / Initiate outbound connections <nl> - if ( ! NewThread ( ThreadOpenConnections , NULL ) ) <nl> - printf ( " Error : NewThread ( ThreadOpenConnections ) failed \ n " ) ; <nl> + threadGroup - > create_thread ( boost : : bind ( & TraceThread < void ( * ) ( ) > , " opencon " , & ThreadOpenConnections ) ) ; <nl> <nl> / / Process messages <nl> - if ( ! NewThread ( ThreadMessageHandler , NULL ) ) <nl> - printf ( " Error : NewThread ( ThreadMessageHandler ) failed \ n " ) ; <nl> + threadGroup - > create_thread ( boost : : bind ( & TraceThread < void ( * ) ( ) > , " msghand " , & ThreadMessageHandler ) ) ; <nl> <nl> / / Dump network addresses <nl> - if ( ! NewThread ( ThreadDumpAddress , NULL ) ) <nl> - printf ( " Error ; NewThread ( ThreadDumpAddress ) failed \ n " ) ; <nl> + threadGroup - > create_thread ( boost : : bind ( & LoopForever < void ( * ) ( ) > , " dumpaddr " , & DumpAddresses , 10000 ) ) ; <nl> } <nl> <nl> bool StopNode ( ) <nl> { <nl> printf ( " StopNode ( ) \ n " ) ; <nl> GenerateBitcoins ( false , NULL ) ; <nl> + MapPort ( false ) ; <nl> fShutdown = true ; <nl> nTransactionsUpdated + + ; <nl> int64 nStart = GetTime ( ) ; <nl> bool StopNode ( ) <nl> break ; <nl> MilliSleep ( 20 ) ; <nl> } while ( true ) ; <nl> - if ( vnThreadsRunning [ THREAD_SOCKETHANDLER ] > 0 ) printf ( " ThreadSocketHandler still running \ n " ) ; <nl> - if ( vnThreadsRunning [ THREAD_OPENCONNECTIONS ] > 0 ) printf ( " ThreadOpenConnections still running \ n " ) ; <nl> - if ( vnThreadsRunning [ THREAD_MESSAGEHANDLER ] > 0 ) printf ( " ThreadMessageHandler still running \ n " ) ; <nl> - if ( vnThreadsRunning [ THREAD_RPCLISTENER ] > 0 ) printf ( " ThreadRPCListener still running \ n " ) ; <nl> - if ( vnThreadsRunning [ THREAD_RPCHANDLER ] > 0 ) printf ( " ThreadsRPCServer still running \ n " ) ; <nl> - # ifdef USE_UPNP <nl> - if ( vnThreadsRunning [ THREAD_UPNP ] > 0 ) printf ( " ThreadMapPort still running \ n " ) ; <nl> - # endif <nl> - if ( vnThreadsRunning [ THREAD_DNSSEED ] > 0 ) printf ( " ThreadDNSAddressSeed still running \ n " ) ; <nl> - if ( vnThreadsRunning [ THREAD_ADDEDCONNECTIONS ] > 0 ) printf ( " ThreadOpenAddedConnections still running \ n " ) ; <nl> - if ( vnThreadsRunning [ THREAD_DUMPADDRESS ] > 0 ) printf ( " ThreadDumpAddresses still running \ n " ) ; <nl> - while ( vnThreadsRunning [ THREAD_MESSAGEHANDLER ] > 0 | | vnThreadsRunning [ THREAD_RPCHANDLER ] > 0 ) <nl> - MilliSleep ( 20 ) ; <nl> MilliSleep ( 50 ) ; <nl> DumpAddresses ( ) ; <nl> <nl> mmm a / src / net . h <nl> ppp b / src / net . h <nl> void AddressCurrentlyConnected ( const CService & addr ) ; <nl> CNode * FindNode ( const CNetAddr & ip ) ; <nl> CNode * FindNode ( const CService & ip ) ; <nl> CNode * ConnectNode ( CAddress addrConnect , const char * strDest = NULL , int64 nTimeout = 0 ) ; <nl> - void MapPort ( ) ; <nl> + void MapPort ( bool fUseUPnP ) ; <nl> unsigned short GetListenPort ( ) ; <nl> bool BindListenPort ( const CService & bindAddr , std : : string & strError = REF ( std : : string ( ) ) ) ; <nl> void StartNode ( void * parg ) ; <nl> CAddress GetLocalAddress ( const CNetAddr * paddrPeer = NULL ) ; <nl> / * * Thread types * / <nl> enum threadId <nl> { <nl> - THREAD_SOCKETHANDLER , <nl> - THREAD_OPENCONNECTIONS , <nl> - THREAD_MESSAGEHANDLER , <nl> - THREAD_RPCLISTENER , <nl> - THREAD_UPNP , <nl> - THREAD_DNSSEED , <nl> - THREAD_ADDEDCONNECTIONS , <nl> - THREAD_DUMPADDRESS , <nl> - THREAD_RPCHANDLER , <nl> - THREAD_IMPORT , <nl> - THREAD_SCRIPTCHECK , <nl> - <nl> THREAD_MAX <nl> } ; <nl> <nl> extern bool fDiscover ; <nl> - extern bool fUseUPnP ; <nl> extern uint64 nLocalServices ; <nl> extern uint64 nLocalHostNonce ; <nl> extern boost : : array < int , THREAD_MAX > vnThreadsRunning ; <nl> mmm a / src / qt / optionsmodel . cpp <nl> ppp b / src / qt / optionsmodel . cpp <nl> bool OptionsModel : : setData ( const QModelIndex & index , const QVariant & value , in <nl> settings . setValue ( " fMinimizeToTray " , fMinimizeToTray ) ; <nl> break ; <nl> case MapPortUPnP : <nl> - fUseUPnP = value . toBool ( ) ; <nl> - settings . setValue ( " fUseUPnP " , fUseUPnP ) ; <nl> - MapPort ( ) ; <nl> + settings . setValue ( " fUseUPnP " , value . toBool ( ) ) ; <nl> + MapPort ( value . toBool ( ) ) ; <nl> break ; <nl> case MinimizeOnClose : <nl> fMinimizeOnClose = value . toBool ( ) ; <nl> mmm a / src / test / test_bitcoin . cpp <nl> ppp b / src / test / test_bitcoin . cpp <nl> extern void noui_connect ( ) ; <nl> struct TestingSetup { <nl> CCoinsViewDB * pcoinsdbview ; <nl> boost : : filesystem : : path pathTemp ; <nl> + boost : : thread_group threadGroup ; <nl> <nl> TestingSetup ( ) { <nl> fPrintToDebugger = true ; / / don ' t want to write to debug . log file <nl> struct TestingSetup { <nl> RegisterWallet ( pwalletMain ) ; <nl> nScriptCheckThreads = 3 ; <nl> for ( int i = 0 ; i < nScriptCheckThreads - 1 ; i + + ) <nl> - NewThread ( ThreadScriptCheck , NULL ) ; <nl> + threadGroup . create_thread ( & ThreadScriptCheck ) ; <nl> } <nl> ~ TestingSetup ( ) <nl> { <nl> - ThreadScriptCheckQuit ( ) ; <nl> + threadGroup . interrupt_all ( ) ; <nl> + threadGroup . join_all ( ) ; <nl> delete pwalletMain ; <nl> pwalletMain = NULL ; <nl> delete pcoinsTip ; <nl>
|
Port Thread * methods to boost : : thread_group
|
bitcoin/bitcoin
|
21eb5adadbe3110a8708f2570185566e1f137a49
|
2013-04-03T23:57:13Z
|
mmm a / src / net . cpp <nl> ppp b / src / net . cpp <nl> bool CNode : : ReceiveMsgBytes ( const char * pch , unsigned int nBytes ) <nl> return false ; <nl> <nl> if ( msg . in_data & & msg . hdr . nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH ) { <nl> - LogPrint ( " net " , " Oversized message from peer = % i , disconnecting " , GetId ( ) ) ; <nl> + LogPrint ( " net " , " Oversized message from peer = % i , disconnecting \ n " , GetId ( ) ) ; <nl> return false ; <nl> } <nl> <nl>
|
Make sure LogPrint strings are line - terminated
|
bitcoin/bitcoin
|
9bebf60698c34502319d6a8218f2ee0c4fa72ef6
|
2015-08-29T16:40:13Z
|
mmm a / tensorflow / lite / delegates / nnapi / acceleration_test_list . cc <nl> ppp b / tensorflow / lite / delegates / nnapi / acceleration_test_list . cc <nl> SelectOpTest / . + , 29 <nl> # slice_test <nl> - SliceOpTest / SliceOpTest / IndexInt64 / . + <nl> - SliceOpTest / SliceOpTest / SliceString / . + <nl> + - SliceOpTest / SliceOpTest / SliceInt64 / . + <nl> + - SliceOpTest / SliceOpTest / SliceBool / . + <nl> # Only constant tensors <nl> SliceOpTest / SliceOpTest / . + / 0 , 29 <nl> <nl>
|
Internal test failure resolved
|
tensorflow/tensorflow
|
b1d722304b28c0b69c36b9090f04d50faf52ca84
|
2020-07-03T15:40:51Z
|
mmm a / src / CipherDialog . cpp <nl> ppp b / src / CipherDialog . cpp <nl> <nl> # include < QPushButton > <nl> # include < QRegExpValidator > <nl> <nl> + # include < QtCore / qmath . h > <nl> + <nl> CipherDialog : : CipherDialog ( QWidget * parent , bool encrypt ) : <nl> QDialog ( parent ) , <nl> ui ( new Ui : : CipherDialog ) , <nl> CipherDialog : : CipherDialog ( QWidget * parent , bool encrypt ) : <nl> { <nl> ui - > setupUi ( this ) ; <nl> <nl> + int minimumPageSizeExponent = 9 ; <nl> + int maximumPageSizeExponent = 16 ; <nl> + int defaultPageSizeExponent = 10 ; <nl> + <nl> + for ( int exponent = minimumPageSizeExponent ; exponent < = maximumPageSizeExponent ; exponent + + ) <nl> + { <nl> + int pageSize = static_cast < int > ( qPow ( 2 , exponent ) ) ; <nl> + ui - > comboPageSize - > addItem ( QLocale ( ) . toString ( pageSize ) , pageSize ) ; <nl> + <nl> + if ( exponent = = defaultPageSizeExponent ) <nl> + ui - > comboPageSize - > setCurrentIndex ( exponent - minimumPageSizeExponent ) ; <nl> + } <nl> + <nl> + ui - > comboPageSize - > setMinimumWidth ( ui - > editPassword - > width ( ) ) ; <nl> + <nl> if ( encrypt ) <nl> { <nl> ui - > labelDialogDescription - > setText ( tr ( " Please set a key to encrypt the database . \ nNote that if you change any of the other , optional , settings you ' ll need " <nl> QString CipherDialog : : password ( ) const <nl> <nl> int CipherDialog : : pageSize ( ) const <nl> { <nl> - return ui - > spinPageSize - > value ( ) ; <nl> + return ui - > comboPageSize - > itemData ( ui - > comboPageSize - > currentIndex ( ) ) . toInt ( ) ; <nl> } <nl> <nl> void CipherDialog : : checkInputFields ( ) <nl> mmm a / src / CipherDialog . ui <nl> ppp b / src / CipherDialog . ui <nl> <nl> < / widget > <nl> < / item > <nl> < item row = " 2 " column = " 1 " > <nl> - < widget class = " QSpinBox " name = " spinPageSize " > <nl> - < property name = " minimum " > <nl> - < number > 512 < / number > <nl> - < / property > <nl> - < property name = " maximum " > <nl> - < number > 65536 < / number > <nl> - < / property > <nl> - < property name = " value " > <nl> - < number > 1024 < / number > <nl> - < / property > <nl> - < / widget > <nl> + < widget class = " QComboBox " name = " comboPageSize " / > <nl> < / item > <nl> < / layout > <nl> < / item > <nl> <nl> < tabstop > editPassword < / tabstop > <nl> < tabstop > comboKeyFormat < / tabstop > <nl> < tabstop > editPassword2 < / tabstop > <nl> - < tabstop > spinPageSize < / tabstop > <nl> + < tabstop > comboPageSize < / tabstop > <nl> < / tabstops > <nl> < resources / > <nl> < connections > <nl>
|
Make sure only powers of two are entered for the page size ( )
|
sqlitebrowser/sqlitebrowser
|
9c2cec628b9e8fef8dd5f5a39f701662cd2ee3b8
|
2018-06-08T14:31:11Z
|
mmm a / redis - desktop - manager / source / redis / RedisConnectionOverSsh . cpp <nl> ppp b / redis - desktop - manager / source / redis / RedisConnectionOverSsh . cpp <nl> void RedisConnectionOverSsh : : OnSocketReadyRead ( ) <nl> <nl> void RedisConnectionOverSsh : : OnAuthRequired ( QList < QxtSshClient : : AuthenticationMethod > authMethods ) <nl> { <nl> - int size = authMethods . size ( ) ; <nl> - <nl> } <nl> <nl> QString RedisConnectionOverSsh : : getLastError ( ) <nl> QVariant RedisConnectionOverSsh : : execute ( QString command ) <nl> QByteArray byteArray = formattedCommand . toUtf8 ( ) ; <nl> const char * cString = byteArray . constData ( ) ; <nl> <nl> - int result = socket - > write ( cString , byteArray . size ( ) ) ; <nl> + socket - > write ( cString , byteArray . size ( ) ) ; <nl> <nl> / / wait for ready read <nl> syncTimer . start ( config . executeTimeout ) ; <nl>
|
Fixed warnings
|
uglide/RedisDesktopManager
|
a5dfe18d87ad877bf257e9e16fe65c37998ece57
|
2013-10-07T14:19:28Z
|
mmm a / folly / synchronization / test / BarrierTest . cpp <nl> ppp b / folly / synchronization / test / BarrierTest . cpp <nl> <nl> <nl> # include < folly / portability / GTest . h > <nl> <nl> - using namespace folly ; <nl> using namespace folly : : test ; <nl> <nl> class BarrierTest : public testing : : Test { } ; <nl>
|
Remove dead includes in folly / synchronization
|
facebook/folly
|
135cff30a54b77523ff404a269a960ad981ff8df
|
2020-01-28T01:26:08Z
|
mmm a / hphp / runtime / ext / ext_process . cpp <nl> ppp b / hphp / runtime / ext / ext_process . cpp <nl> struct SignalHandlers final : RequestEventHandler { <nl> handlers . reset ( ) ; <nl> / / restore the old signal mask , thus unblock those that should be <nl> pthread_sigmask ( SIG_SETMASK , & oldSet , NULL ) ; <nl> + inited . store ( true ) ; <nl> } <nl> void requestShutdown ( ) override { <nl> / / block all signals <nl> struct SignalHandlers final : RequestEventHandler { <nl> pthread_sigmask ( SIG_BLOCK , & set , NULL ) ; <nl> <nl> handlers . reset ( ) ; <nl> + inited . store ( false ) ; <nl> } <nl> - <nl> Array handlers ; <nl> int signaled [ _NSIG ] ; <nl> sigset_t oldSet ; <nl> + std : : atomic < bool > inited ; <nl> } ; <nl> IMPLEMENT_STATIC_REQUEST_LOCAL ( SignalHandlers , s_signal_handlers ) ; <nl> <nl> - / / We must register the s_signal_handlers RequestEventHandler <nl> - / / immediately : otherwise , pcntl_signal_handler might try to register <nl> - / / it while processing a signal , which means calling malloc to insert <nl> - / / it into various vectors and sets , which is not ok from a signal <nl> - / / handler . <nl> - static InitFiniNode initSignalHandler ( <nl> - [ ] { s_signal_handlers . get ( ) ; } , <nl> - InitFiniNode : : When : : ThreadInit <nl> - ) ; <nl> + static bool signalHandlersInited ( ) { <nl> + return s_signal_handlers . getInited ( ) & & s_signal_handlers - > inited . load ( ) ; <nl> + } <nl> <nl> static void pcntl_signal_handler ( int signo ) { <nl> - if ( signo > 0 & & signo < _NSIG & & ! g_context . isNull ( ) ) { <nl> + if ( signo > 0 & & signo < _NSIG & & signalHandlersInited ( ) ) { <nl> s_signal_handlers - > signaled [ signo ] = 1 ; <nl> RequestInjectionData & data = ThreadInfo : : s_threadInfo . getNoCheck ( ) - > <nl> m_reqInjectionData ; <nl> class SignalHandlersStaticInitializer { <nl> static SignalHandlersStaticInitializer s_signal_handlers_initializer ; <nl> <nl> bool f_pcntl_signal_dispatch ( ) { <nl> + if ( ! signalHandlersInited ( ) ) return true ; <nl> int * signaled = s_signal_handlers - > signaled ; <nl> for ( int i = 0 ; i < _NSIG ; i + + ) { <nl> if ( signaled [ i ] ) { <nl> mmm a / hphp / runtime / ext / icu / icu . cpp <nl> ppp b / hphp / runtime / ext / icu / icu . cpp <nl> void IntlError : : clearError ( bool clearGlobalError / * = true * / ) { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / INI Setting <nl> <nl> - / * gcc 4 . 7 doesn ' t support thread_locale storage <nl> - * required for dynamic initializers ( like std : : string ) <nl> - * So wrap it up in a RequestEventHandler until we set <nl> - * gcc 4 . 8 as our minimum version <nl> - * / <nl> - struct DefaultLocale final : RequestEventHandler { <nl> - void requestInit ( ) override { } <nl> - void requestShutdown ( ) override { } <nl> - std : : string m_defaultLocale ; <nl> - } ; <nl> - IMPLEMENT_STATIC_REQUEST_LOCAL ( DefaultLocale , s_default_locale ) ; <nl> + static __thread std : : string * s_defaultLocale ; <nl> <nl> void IntlExtension : : bindIniSettings ( ) { <nl> + / / TODO : t5226715 We shouldn ' t need to check s_defaultLocale here , <nl> + / / but right now this is called for every request . <nl> + if ( s_defaultLocale ) return ; <nl> + s_defaultLocale = new std : : string ; <nl> IniSetting : : Bind ( this , IniSetting : : PHP_INI_ALL , <nl> " intl . default_locale " , " " , <nl> - & s_default_locale - > m_defaultLocale ) ; <nl> + s_defaultLocale ) ; <nl> + } <nl> + <nl> + void IntlExtension : : threadShutdown ( ) { <nl> + delete s_defaultLocale ; <nl> + s_defaultLocale = nullptr ; <nl> } <nl> <nl> const String GetDefaultLocale ( ) { <nl> - String locale ( s_default_locale - > m_defaultLocale ) ; <nl> - if ( locale . empty ( ) ) { <nl> - locale = String ( uloc_getDefault ( ) , CopyString ) ; <nl> + assert ( s_defaultLocale ) ; <nl> + if ( s_defaultLocale - > empty ( ) ) { <nl> + return String ( uloc_getDefault ( ) , CopyString ) ; <nl> } <nl> - return locale ; <nl> + return * s_defaultLocale ; <nl> } <nl> <nl> bool SetDefaultLocale ( const String & locale ) { <nl> - s_default_locale - > m_defaultLocale = locale . toCppString ( ) ; <nl> + assert ( s_defaultLocale ) ; <nl> + * s_defaultLocale = locale . toCppString ( ) ; <nl> return true ; <nl> } <nl> <nl> mmm a / hphp / runtime / ext / icu / icu . h <nl> ppp b / hphp / runtime / ext / icu / icu . h <nl> class IntlExtension : public Extension { <nl> void threadInit ( ) override { <nl> bindIniSettings ( ) ; <nl> } <nl> - <nl> + void threadShutdown ( ) override ; <nl> private : <nl> void bindIniSettings ( ) ; <nl> void bindConstants ( ) ; <nl>
|
Fix some request locals
|
facebook/hhvm
|
4d23eaef8ca6e19e6f284bb916ff14aecb612c1e
|
2014-10-20T21:00:26Z
|
mmm a / hphp / runtime / ext / asio / blockable_wait_handle . ext_hhvm . cpp <nl> ppp b / hphp / runtime / ext / asio / blockable_wait_handle . ext_hhvm . cpp <nl> <nl> <nl> namespace HPHP { <nl> <nl> - HPHP : : VM : : Instance * new_BlockableWaitHandle_Instance ( HPHP : : VM : : Class * cls ) { <nl> - size_t nProps = cls - > numDeclProperties ( ) ; <nl> - size_t builtinPropSize = sizeof ( c_BlockableWaitHandle ) - sizeof ( ObjectData ) ; <nl> - size_t size = HPHP : : VM : : Instance : : sizeForNProps ( nProps ) + builtinPropSize ; <nl> - HPHP : : VM : : Instance * inst = ( HPHP : : VM : : Instance * ) ALLOCOBJSZ ( size ) ; <nl> - new ( ( void * ) inst ) c_BlockableWaitHandle ( cls ) ; <nl> - return inst ; <nl> - } <nl> - <nl> IMPLEMENT_CLASS ( BlockableWaitHandle ) ; <nl> / * <nl> void HPHP : : c_BlockableWaitHandle : : t___construct ( ) <nl> mmm a / hphp / runtime / ext / asio / static_wait_handle . ext_hhvm . cpp <nl> ppp b / hphp / runtime / ext / asio / static_wait_handle . ext_hhvm . cpp <nl> <nl> <nl> namespace HPHP { <nl> <nl> - HPHP : : VM : : Instance * new_StaticWaitHandle_Instance ( HPHP : : VM : : Class * cls ) { <nl> - size_t nProps = cls - > numDeclProperties ( ) ; <nl> - size_t builtinPropSize = sizeof ( c_StaticWaitHandle ) - sizeof ( ObjectData ) ; <nl> - size_t size = HPHP : : VM : : Instance : : sizeForNProps ( nProps ) + builtinPropSize ; <nl> - HPHP : : VM : : Instance * inst = ( HPHP : : VM : : Instance * ) ALLOCOBJSZ ( size ) ; <nl> - new ( ( void * ) inst ) c_StaticWaitHandle ( cls ) ; <nl> - return inst ; <nl> - } <nl> - <nl> IMPLEMENT_CLASS ( StaticWaitHandle ) ; <nl> / * <nl> void HPHP : : c_StaticWaitHandle : : t___construct ( ) <nl> mmm a / hphp / runtime / ext / asio / wait_handle . ext_hhvm . cpp <nl> ppp b / hphp / runtime / ext / asio / wait_handle . ext_hhvm . cpp <nl> <nl> <nl> namespace HPHP { <nl> <nl> - HPHP : : VM : : Instance * new_WaitHandle_Instance ( HPHP : : VM : : Class * cls ) { <nl> - size_t nProps = cls - > numDeclProperties ( ) ; <nl> - size_t builtinPropSize = sizeof ( c_WaitHandle ) - sizeof ( ObjectData ) ; <nl> - size_t size = HPHP : : VM : : Instance : : sizeForNProps ( nProps ) + builtinPropSize ; <nl> - HPHP : : VM : : Instance * inst = ( HPHP : : VM : : Instance * ) ALLOCOBJSZ ( size ) ; <nl> - new ( ( void * ) inst ) c_WaitHandle ( cls ) ; <nl> - return inst ; <nl> - } <nl> - <nl> IMPLEMENT_CLASS ( WaitHandle ) ; <nl> / * <nl> void HPHP : : c_WaitHandle : : t___construct ( ) <nl> mmm a / hphp / runtime / ext / asio / waitable_wait_handle . ext_hhvm . cpp <nl> ppp b / hphp / runtime / ext / asio / waitable_wait_handle . ext_hhvm . cpp <nl> <nl> <nl> namespace HPHP { <nl> <nl> - HPHP : : VM : : Instance * new_WaitableWaitHandle_Instance ( HPHP : : VM : : Class * cls ) { <nl> - size_t nProps = cls - > numDeclProperties ( ) ; <nl> - size_t builtinPropSize = sizeof ( c_WaitableWaitHandle ) - sizeof ( ObjectData ) ; <nl> - size_t size = HPHP : : VM : : Instance : : sizeForNProps ( nProps ) + builtinPropSize ; <nl> - HPHP : : VM : : Instance * inst = ( HPHP : : VM : : Instance * ) ALLOCOBJSZ ( size ) ; <nl> - new ( ( void * ) inst ) c_WaitableWaitHandle ( cls ) ; <nl> - return inst ; <nl> - } <nl> - <nl> IMPLEMENT_CLASS ( WaitableWaitHandle ) ; <nl> / * <nl> void HPHP : : c_WaitableWaitHandle : : t___construct ( ) <nl> mmm a / hphp / runtime / ext_hhvm / ext_hhvm_infotabs . cpp <nl> ppp b / hphp / runtime / ext_hhvm / ext_hhvm_infotabs . cpp <nl> TypedValue * fg_nzuncompress ( VM : : ActRec * ar ) ; <nl> TypedValue * fg_lz4compress ( VM : : ActRec * ar ) ; <nl> TypedValue * fg_lz4hccompress ( VM : : ActRec * ar ) ; <nl> TypedValue * fg_lz4uncompress ( VM : : ActRec * ar ) ; <nl> - VM : : Instance * new_WaitHandle_Instance ( VM : : Class * ) ; <nl> TypedValue * tg_10WaitHandle___construct ( VM : : ActRec * ar ) ; <nl> TypedValue * tg_10WaitHandle_import ( VM : : ActRec * ar ) ; <nl> TypedValue * tg_10WaitHandle_join ( VM : : ActRec * ar ) ; <nl> TypedValue * tg_10WaitHandle_isFailed ( VM : : ActRec * ar ) ; <nl> TypedValue * tg_10WaitHandle_getID ( VM : : ActRec * ar ) ; <nl> TypedValue * tg_10WaitHandle_getName ( VM : : ActRec * ar ) ; <nl> TypedValue * tg_10WaitHandle_getExceptionIfFailed ( VM : : ActRec * ar ) ; <nl> - VM : : Instance * new_StaticWaitHandle_Instance ( VM : : Class * ) ; <nl> TypedValue * tg_16StaticWaitHandle___construct ( VM : : ActRec * ar ) ; <nl> VM : : Instance * new_StaticResultWaitHandle_Instance ( VM : : Class * ) ; <nl> TypedValue * tg_22StaticResultWaitHandle___construct ( VM : : ActRec * ar ) ; <nl> TypedValue * tg_22StaticResultWaitHandle_create ( VM : : ActRec * ar ) ; <nl> VM : : Instance * new_StaticExceptionWaitHandle_Instance ( VM : : Class * ) ; <nl> TypedValue * tg_25StaticExceptionWaitHandle___construct ( VM : : ActRec * ar ) ; <nl> TypedValue * tg_25StaticExceptionWaitHandle_create ( VM : : ActRec * ar ) ; <nl> - VM : : Instance * new_WaitableWaitHandle_Instance ( VM : : Class * ) ; <nl> TypedValue * tg_18WaitableWaitHandle___construct ( VM : : ActRec * ar ) ; <nl> TypedValue * tg_18WaitableWaitHandle_getParents ( VM : : ActRec * ar ) ; <nl> TypedValue * tg_18WaitableWaitHandle_getStackTrace ( VM : : ActRec * ar ) ; <nl> - VM : : Instance * new_BlockableWaitHandle_Instance ( VM : : Class * ) ; <nl> TypedValue * tg_19BlockableWaitHandle___construct ( VM : : ActRec * ar ) ; <nl> VM : : Instance * new_ContinuationWaitHandle_Instance ( VM : : Class * ) ; <nl> TypedValue * tg_22ContinuationWaitHandle___construct ( VM : : ActRec * ar ) ; <nl> static const HhbcExtMethodInfo hhbc_ext_methods_XMLWriter [ ] = { <nl> <nl> const long long hhbc_ext_class_count = 71 ; <nl> const HhbcExtClassInfo hhbc_ext_classes [ ] = { <nl> - { " WaitHandle " , new_WaitHandle_Instance , sizeof ( c_WaitHandle ) , hhbc_ext_method_count_WaitHandle , hhbc_ext_methods_WaitHandle , & c_WaitHandle : : s_cls } , <nl> - { " StaticWaitHandle " , new_StaticWaitHandle_Instance , sizeof ( c_StaticWaitHandle ) , hhbc_ext_method_count_StaticWaitHandle , hhbc_ext_methods_StaticWaitHandle , & c_StaticWaitHandle : : s_cls } , <nl> + { " WaitHandle " , nullptr , sizeof ( c_WaitHandle ) , hhbc_ext_method_count_WaitHandle , hhbc_ext_methods_WaitHandle , & c_WaitHandle : : s_cls } , <nl> + { " StaticWaitHandle " , nullptr , sizeof ( c_StaticWaitHandle ) , hhbc_ext_method_count_StaticWaitHandle , hhbc_ext_methods_StaticWaitHandle , & c_StaticWaitHandle : : s_cls } , <nl> { " StaticResultWaitHandle " , new_StaticResultWaitHandle_Instance , sizeof ( c_StaticResultWaitHandle ) , hhbc_ext_method_count_StaticResultWaitHandle , hhbc_ext_methods_StaticResultWaitHandle , & c_StaticResultWaitHandle : : s_cls } , <nl> { " StaticExceptionWaitHandle " , new_StaticExceptionWaitHandle_Instance , sizeof ( c_StaticExceptionWaitHandle ) , hhbc_ext_method_count_StaticExceptionWaitHandle , hhbc_ext_methods_StaticExceptionWaitHandle , & c_StaticExceptionWaitHandle : : s_cls } , <nl> - { " WaitableWaitHandle " , new_WaitableWaitHandle_Instance , sizeof ( c_WaitableWaitHandle ) , hhbc_ext_method_count_WaitableWaitHandle , hhbc_ext_methods_WaitableWaitHandle , & c_WaitableWaitHandle : : s_cls } , <nl> - { " BlockableWaitHandle " , new_BlockableWaitHandle_Instance , sizeof ( c_BlockableWaitHandle ) , hhbc_ext_method_count_BlockableWaitHandle , hhbc_ext_methods_BlockableWaitHandle , & c_BlockableWaitHandle : : s_cls } , <nl> + { " WaitableWaitHandle " , nullptr , sizeof ( c_WaitableWaitHandle ) , hhbc_ext_method_count_WaitableWaitHandle , hhbc_ext_methods_WaitableWaitHandle , & c_WaitableWaitHandle : : s_cls } , <nl> + { " BlockableWaitHandle " , nullptr , sizeof ( c_BlockableWaitHandle ) , hhbc_ext_method_count_BlockableWaitHandle , hhbc_ext_methods_BlockableWaitHandle , & c_BlockableWaitHandle : : s_cls } , <nl> { " ContinuationWaitHandle " , new_ContinuationWaitHandle_Instance , sizeof ( c_ContinuationWaitHandle ) , hhbc_ext_method_count_ContinuationWaitHandle , hhbc_ext_methods_ContinuationWaitHandle , & c_ContinuationWaitHandle : : s_cls } , <nl> { " GenArrayWaitHandle " , new_GenArrayWaitHandle_Instance , sizeof ( c_GenArrayWaitHandle ) , hhbc_ext_method_count_GenArrayWaitHandle , hhbc_ext_methods_GenArrayWaitHandle , & c_GenArrayWaitHandle : : s_cls } , <nl> { " SetResultToRefWaitHandle " , new_SetResultToRefWaitHandle_Instance , sizeof ( c_SetResultToRefWaitHandle ) , hhbc_ext_method_count_SetResultToRefWaitHandle , hhbc_ext_methods_SetResultToRefWaitHandle , & c_SetResultToRefWaitHandle : : s_cls } , <nl> mmm a / hphp / runtime / ext_hhvm / gen_ext_hhvm . php <nl> ppp b / hphp / runtime / ext_hhvm / gen_ext_hhvm . php <nl> function phase2 ( ) { <nl> / / avoid multiple definition issues when an extension class is <nl> / / spread among a few cpp files . <nl> if ( $ obj - > name = = " __construct " ) { <nl> - emit_ctor_helper ( $ ext_hhvm_cpp , $ cname ) ; <nl> + if ( ! ( $ cls_info [ ' flags ' ] & IsAbstract ) ) { <nl> + emit_ctor_helper ( $ ext_hhvm_cpp , $ cname ) ; <nl> + } <nl> if ( $ cls_info [ ' flags ' ] & NoDefaultSweep ) { <nl> fwrite ( $ ext_hhvm_cpp , <nl> " IMPLEMENT_CLASS_NO_DEFAULT_SWEEP ( $ cname ) ; \ n " ) ; <nl> mmm a / hphp / runtime / ext_hhvm / gen_infotabs . php <nl> ppp b / hphp / runtime / ext_hhvm / gen_infotabs . php <nl> function main ( ) { <nl> " ( VM : : ActRec * ar ) ; \ n " ) ; <nl> } <nl> foreach ( $ ext_class_info as $ cname = > $ cls_info ) { <nl> - fwrite ( $ outfile , <nl> - " VM : : Instance * new_ " . $ cname . " _Instance ( " . <nl> - " VM : : Class * ) ; \ n " ) ; <nl> + if ( ! ( $ cls_info [ ' flags ' ] & IsAbstract ) ) { <nl> + fwrite ( $ outfile , <nl> + " VM : : Instance * new_ " . $ cname . " _Instance ( " . <nl> + " VM : : Class * ) ; \ n " ) ; <nl> + } <nl> foreach ( $ cls_info [ ' methods ' ] as $ obj ) { <nl> fwrite ( $ outfile , " TypedValue * tg_ " . getUniqueFuncName ( $ obj ) . <nl> " ( VM : : ActRec * ar ) ; \ n " ) ; <nl> function main ( ) { <nl> fwrite ( $ outfile , " , \ n " ) ; <nl> } <nl> $ firstParam = false ; <nl> - fwrite ( $ outfile , ' { " ' . $ cname . ' " , new_ ' . $ cname . ' _Instance ' . <nl> + $ constructor = ( $ cls_info [ ' flags ' ] & IsAbstract ) <nl> + ? ' nullptr ' : ' new_ ' . $ cname . ' _Instance ' ; <nl> + fwrite ( $ outfile , ' { " ' . $ cname . ' " , ' . $ constructor . <nl> ' , sizeof ( c_ ' . $ cname . ' ) ' . <nl> ' , hhbc_ext_method_count_ ' . $ cname . <nl> ' , hhbc_ext_methods_ ' . $ cname . <nl>
|
Do not generate new instance helper for abstract extension classes
|
facebook/hhvm
|
6aa91b97c8df81c6d5e291454b25a5da6cf531b9
|
2013-03-09T01:52:39Z
|
mmm a / documentation / sphinx / source / release - notes . rst <nl> ppp b / documentation / sphinx / source / release - notes . rst <nl> Bindings <nl> <nl> * API version updated to 600 . There are no changes since API version 520 . <nl> * Several cases where functions in go might previously cause a panic now return a non - ` ` nil ` ` error . ` ( PR # 532 ) < https : / / github . com / apple / foundationdb / pull / 532 > ` _ <nl> + * C API calls made on the network thread could be reordered with calls made from other threads . [ 6 . 0 . 2 ] ` ( Issue # 518 ) < https : / / github . com / apple / foundationdb / issues / 518 > ` _ <nl> <nl> Other Changes <nl> mmmmmmmmmmmm - <nl> mmm a / flow / Net2 . actor . cpp <nl> ppp b / flow / Net2 . actor . cpp <nl> void Net2 : : onMainThread ( Promise < Void > & & signal , int taskID ) { <nl> <nl> if ( thread_network = = this ) <nl> { <nl> + processThreadReady ( ) ; <nl> this - > ready . push ( OrderedTask ( priority - ( + + tasksIssued ) , taskID , p ) ) ; <nl> } else { <nl> if ( threadReady . push ( OrderedTask ( priority , taskID , p ) ) ) <nl>
|
Merge pull request from ajbeamon / on - main - thread - fix
|
apple/foundationdb
|
19f9cd25a0dd85b509fb19efc363a0d77f74333d
|
2018-07-16T23:20:07Z
|
mmm a / AUTHORS <nl> ppp b / AUTHORS <nl> Developers : <nl> <nl> mchinen <nl> fix emulator issue for OpenGL ES 2 . 0 on Android <nl> + Fix missing protocol method warning for iOS 6 . 0 addition <nl> <nl> DenizPiri <nl> use CCLOG to implement print ( ) function in LUA <nl>
|
Update AUTHORS
|
cocos2d/cocos2d-x
|
587dfb8e9f6bf29c640f99e47b102f8440ea2eb2
|
2013-05-04T15:56:29Z
|
mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . py_func . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . py_func . md <nl> <nl> - # # # ` tf . py_func ( func , inp , Tout , name = None ) ` { # py_func } <nl> + # # # ` tf . py_func ( func , inp , Tout , stateful = True , name = None ) ` { # py_func } <nl> <nl> Wraps a python function and uses it as a tensorflow op . <nl> <nl> sinh ( x ) as an op in the graph . <nl> * < b > ` inp ` < / b > : A list of ` Tensor ` . <nl> * < b > ` Tout ` < / b > : A list of tensorflow data types indicating what ` func ` <nl> returns . <nl> + * < b > ` stateful ` < / b > : A boolean indicating whether the function should be considered <nl> + stateful or stateless . I . e . whether it , given the same input , will <nl> + return the same output and at the same time does not change state <nl> + in an observable way . Optimizations such as common subexpression <nl> + elimination are only possible when operations are stateless . <nl> * < b > ` name ` < / b > : A name for the operation ( optional ) . <nl> <nl> # # # # # Returns : <nl> mmm a / tensorflow / g3doc / api_docs / python / script_ops . md <nl> ppp b / tensorflow / g3doc / api_docs / python / script_ops . md <nl> TensorFlow operators . <nl> # # Other Functions and Classes <nl> - - - <nl> <nl> - # # # ` tf . py_func ( func , inp , Tout , name = None ) ` { # py_func } <nl> + # # # ` tf . py_func ( func , inp , Tout , stateful = True , name = None ) ` { # py_func } <nl> <nl> Wraps a python function and uses it as a tensorflow op . <nl> <nl> sinh ( x ) as an op in the graph . <nl> * < b > ` inp ` < / b > : A list of ` Tensor ` . <nl> * < b > ` Tout ` < / b > : A list of tensorflow data types indicating what ` func ` <nl> returns . <nl> + * < b > ` stateful ` < / b > : A boolean indicating whether the function should be considered <nl> + stateful or stateless . I . e . whether it , given the same input , will <nl> + return the same output and at the same time does not change state <nl> + in an observable way . Optimizations such as common subexpression <nl> + elimination are only possible when operations are stateless . <nl> * < b > ` name ` < / b > : A name for the operation ( optional ) . <nl> <nl> # # # # # Returns : <nl>
|
Update generated Python Op docs .
|
tensorflow/tensorflow
|
ccb41b81b5cbc0490899a5280c04c7be5a351c20
|
2016-07-26T15:48:42Z
|
mmm a / tensorflow / python / kernel_tests / fifo_queue_test . py <nl> ppp b / tensorflow / python / kernel_tests / fifo_queue_test . py <nl> <nl> @ test_util . run_v1_only ( " FIFOQueue removed from v2 " ) <nl> class FIFOQueueTest ( test . TestCase ) : <nl> <nl> - def setUp ( self ) : <nl> - # We need each thread to keep its own device stack or the device scopes <nl> - # won ' t be properly nested . <nl> - ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> - <nl> def testConstructor ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> q = data_flow_ops . FIFOQueue ( 10 , dtypes_lib . float32 , name = " Q " ) <nl> def testEnqueueDictWithoutNames ( self ) : <nl> q . enqueue_many ( { " a " : [ 12 . 0 , 13 . 0 ] } ) <nl> <nl> def testParallelEnqueue ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 10 , dtypes_lib . float32 ) <nl> elems = [ 10 . 0 , 20 . 0 , 30 . 0 , 40 . 0 , 50 . 0 , 60 . 0 , 70 . 0 , 80 . 0 , 90 . 0 , 100 . 0 ] <nl> def enqueue ( enqueue_op ) : <nl> self . assertItemsEqual ( elems , results ) <nl> <nl> def testParallelDequeue ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 10 , dtypes_lib . float32 ) <nl> elems = [ 10 . 0 , 20 . 0 , 30 . 0 , 40 . 0 , 50 . 0 , 60 . 0 , 70 . 0 , 80 . 0 , 90 . 0 , 100 . 0 ] <nl> def testDequeueHalf ( self ) : <nl> self . assertEqual ( [ elems [ i ] ] , vals ) <nl> <nl> def testEnqueueAndBlockingDequeue ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 3 , dtypes_lib . float32 ) <nl> elems = [ 10 . 0 , 20 . 0 , 30 . 0 ] <nl> def testEnqueueDequeueManyWrongShape ( self ) : <nl> self . evaluate ( dequeued_t ) <nl> <nl> def testParallelEnqueueMany ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 1000 , dtypes_lib . float32 , shapes = ( ) ) <nl> elems = [ 10 . 0 * x for x in range ( 100 ) ] <nl> def enqueue ( ) : <nl> self . assertItemsEqual ( dequeued_t . eval ( ) , elems * 10 ) <nl> <nl> def testParallelDequeueMany ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 1000 , dtypes_lib . float32 , shapes = ( ) ) <nl> elems = [ 10 . 0 * x for x in range ( 1000 ) ] <nl> def dequeue ( ) : <nl> self . assertItemsEqual ( elems , dequeued_elems ) <nl> <nl> def testParallelDequeueUpTo ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 1000 , dtypes_lib . float32 , shapes = ( ) ) <nl> elems = [ 10 . 0 * x for x in range ( 1000 ) ] <nl> def dequeue ( ) : <nl> self . assertItemsEqual ( elems , dequeued_elems ) <nl> <nl> def testParallelEnqueueAndDequeue ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 50 , dtypes_lib . float32 , shapes = ( ) ) <nl> initial_elements = [ 10 . 0 ] * 49 <nl> def enqueue ( ) : <nl> self . assertEqual ( 0 , q . size ( ) . eval ( ) ) <nl> <nl> def testBlockingDequeueMany ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 10 , dtypes_lib . float32 , ( ) ) <nl> elems = [ 10 . 0 , 20 . 0 , 30 . 0 , 40 . 0 ] <nl> def dequeue ( ) : <nl> self . assertAllEqual ( elems , dequeued_elems ) <nl> <nl> def testBlockingDequeueUpTo ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 10 , dtypes_lib . float32 , ( ) ) <nl> elems = [ 10 . 0 , 20 . 0 , 30 . 0 , 40 . 0 ] <nl> def testDequeueFromClosedQueue ( self ) : <nl> self . evaluate ( dequeued_t ) <nl> <nl> def testBlockingDequeueFromClosedQueue ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 10 , dtypes_lib . float32 ) <nl> elems = [ 10 . 0 , 20 . 0 , 30 . 0 , 40 . 0 ] <nl> def dequeue ( ) : <nl> dequeue_thread . join ( ) <nl> <nl> def testBlockingDequeueFromClosedEmptyQueue ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 10 , dtypes_lib . float32 ) <nl> close_op = q . close ( ) <nl> def dequeue ( ) : <nl> dequeue_thread . join ( ) <nl> <nl> def testBlockingDequeueManyFromClosedQueue ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 10 , dtypes_lib . float32 , ( ) ) <nl> elems = [ 10 . 0 , 20 . 0 , 30 . 0 , 40 . 0 ] <nl> def dequeue ( ) : <nl> dequeue_thread . join ( ) <nl> <nl> def testBlockingDequeueManyButNotAllFromClosedQueue ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 10 , dtypes_lib . float32 , ( ) ) <nl> elems = [ 10 . 0 , 20 . 0 , 30 . 0 , 40 . 0 ] <nl> def dequeue ( ) : <nl> dequeue_thread . join ( ) <nl> <nl> def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany ( self ) : <nl> - with self . cached_session ( ) as sess : <nl> + with ops . Graph ( ) . as_default ( ) , self . session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 4 , dtypes_lib . float32 , ( ) ) <nl> elems = [ 10 . 0 , 20 . 0 , 30 . 0 , 40 . 0 ] <nl> enqueue_op = q . enqueue_many ( ( elems , ) ) <nl> def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany ( self ) : <nl> cleanup_dequeue_t = q . dequeue ( ) <nl> <nl> def enqueue ( ) : <nl> - self . evaluate ( enqueue_op ) <nl> + sess . run ( enqueue_op ) <nl> <nl> def dequeue ( ) : <nl> - self . assertAllEqual ( elems [ 0 : 3 ] , self . evaluate ( dequeued_t ) ) <nl> + self . assertAllEqual ( elems [ 0 : 3 ] , sess . run ( dequeued_t ) ) <nl> with self . assertRaises ( errors_impl . OutOfRangeError ) : <nl> - self . evaluate ( dequeued_t ) <nl> - self . assertEqual ( elems [ 3 ] , self . evaluate ( cleanup_dequeue_t ) ) <nl> + sess . run ( dequeued_t ) <nl> + self . assertEqual ( elems [ 3 ] , sess . run ( cleanup_dequeue_t ) ) <nl> <nl> def close ( ) : <nl> - self . evaluate ( close_op ) <nl> + sess . run ( close_op ) <nl> <nl> enqueue_thread = self . checkedThread ( target = enqueue ) <nl> enqueue_thread . start ( ) <nl> def dequeue ( ) : <nl> self . assertEqual ( 0 , q . size ( ) . eval ( ) ) <nl> <nl> def testBlockingDequeueManyFromClosedEmptyQueue ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 10 , dtypes_lib . float32 , ( ) ) <nl> close_op = q . close ( ) <nl> def dequeue ( ) : <nl> dequeue_thread . join ( ) <nl> <nl> def testBlockingDequeueUpToFromClosedEmptyQueue ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 10 , dtypes_lib . float32 , ( ) ) <nl> close_op = q . close ( ) <nl> def testEnqueueManyToClosedQueue ( self ) : <nl> enqueue_op . run ( ) <nl> <nl> def testBlockingEnqueueToFullQueue ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 4 , dtypes_lib . float32 ) <nl> elems = [ 10 . 0 , 20 . 0 , 30 . 0 , 40 . 0 ] <nl> def blocking_enqueue ( ) : <nl> thread . join ( ) <nl> <nl> def testBlockingEnqueueManyToFullQueue ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 4 , dtypes_lib . float32 ) <nl> elems = [ 10 . 0 , 20 . 0 , 30 . 0 , 40 . 0 ] <nl> def blocking_enqueue ( ) : <nl> thread . join ( ) <nl> <nl> def testBlockingEnqueueBeforeClose ( self ) : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 4 , dtypes_lib . float32 ) <nl> elems = [ 10 . 0 , 20 . 0 , 30 . 0 , 40 . 0 ] <nl> def blocking_enqueue ( ) : <nl> <nl> # The close_op should run after the blocking_enqueue_op has blocked . <nl> # TODO ( mrry ) : Figure out how to do this without sleeping . <nl> - time . sleep ( 0 . 1 ) <nl> + time . sleep ( 0 . 2 ) <nl> <nl> def close ( ) : <nl> self . evaluate ( close_op ) <nl> def close ( ) : <nl> self . assertEqual ( 0 , q . size ( ) . eval ( ) ) <nl> <nl> def testBlockingEnqueueManyBeforeClose ( self ) : <nl> - with self . cached_session ( ) as sess : <nl> + # We need each thread to keep its own device stack or the device scopes <nl> + # won ' t be properly nested . <nl> + ops . get_default_graph ( ) . switch_to_thread_local ( ) <nl> + with self . session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 4 , dtypes_lib . float32 ) <nl> elems = [ 10 . 0 , 20 . 0 , 30 . 0 ] <nl> enqueue_op = q . enqueue_many ( ( elems , ) ) <nl> def testBlockingEnqueueManyBeforeClose ( self ) : <nl> enqueue_op . run ( ) <nl> <nl> def blocking_enqueue ( ) : <nl> - self . evaluate ( blocking_enqueue_op ) <nl> + sess . run ( blocking_enqueue_op ) <nl> <nl> enqueue_thread = self . checkedThread ( target = blocking_enqueue ) <nl> enqueue_thread . start ( ) <nl> def blocking_enqueue ( ) : <nl> time . sleep ( 0 . 1 ) <nl> <nl> def close ( ) : <nl> - self . evaluate ( close_op ) <nl> + sess . run ( close_op ) <nl> <nl> close_thread = self . checkedThread ( target = close ) <nl> close_thread . start ( ) <nl> def testSelectQueueOutOfRange ( self ) : <nl> <nl> def _blockingDequeue ( self , sess , dequeue_op ) : <nl> with self . assertRaisesOpError ( " was cancelled " ) : <nl> - self . evaluate ( dequeue_op ) <nl> + sess . run ( dequeue_op ) <nl> <nl> def _blockingDequeueMany ( self , sess , dequeue_many_op ) : <nl> with self . assertRaisesOpError ( " was cancelled " ) : <nl> - self . evaluate ( dequeue_many_op ) <nl> + sess . run ( dequeue_many_op ) <nl> <nl> def _blockingEnqueue ( self , sess , enqueue_op ) : <nl> with self . assertRaisesOpError ( " was cancelled " ) : <nl> - self . evaluate ( enqueue_op ) <nl> + sess . run ( enqueue_op ) <nl> <nl> def _blockingEnqueueMany ( self , sess , enqueue_many_op ) : <nl> with self . assertRaisesOpError ( " was cancelled " ) : <nl> - self . evaluate ( enqueue_many_op ) <nl> + sess . run ( enqueue_many_op ) <nl> <nl> def testResetOfBlockingOperation ( self ) : <nl> - with self . cached_session ( ) as sess : <nl> + with self . session ( ) as sess : <nl> q_empty = data_flow_ops . FIFOQueue ( 5 , dtypes_lib . float32 , ( ) ) <nl> dequeue_op = q_empty . dequeue ( ) <nl> dequeue_many_op = q_empty . dequeue_many ( 1 ) <nl> def testResetOfBlockingOperation ( self ) : <nl> for t in threads : <nl> t . join ( ) <nl> <nl> + # Create a new session that hasn ' t been closed , so cached_session <nl> + # isn ' t messed up . <nl> + with self . session ( ) as sess : <nl> + pass <nl> + <nl> def testBigEnqueueMany ( self ) : <nl> with self . cached_session ( ) as sess : <nl> q = data_flow_ops . FIFOQueue ( 5 , dtypes_lib . int32 , ( ( ) , ) ) <nl>
|
Fix flakiness of kernel_tests : fifo_queue_test .
|
tensorflow/tensorflow
|
6dfff4f02535e8f9d91aa3a2d193b7ab354489d7
|
2019-03-23T00:37:10Z
|
mmm a / src / cascadia / TerminalApp / defaults . json <nl> ppp b / src / cascadia / TerminalApp / defaults . json <nl> <nl> ] , <nl> " keybindings " : <nl> [ <nl> - { " command " : " closePane " , " keys " : [ " ctrl + shift + w " ] } , <nl> - { " command " : " closeWindow " , " keys " : [ " alt + f4 " ] } , <nl> - { " command " : " copy " , " keys " : [ " ctrl + shift + c " ] } , <nl> - { " command " : " duplicateTab " , " keys " : [ " ctrl + shift + d " ] } , <nl> - { " command " : " newTab " , " keys " : [ " ctrl + shift + t " ] } , <nl> + { " command " : " closePane " , " keys " : [ " ctrl + shift + w " ] } , <nl> + { " command " : " closeWindow " , " keys " : [ " alt + f4 " ] } , <nl> + { " command " : " copy " , " keys " : [ " ctrl + shift + c " ] } , <nl> + { " command " : " decreaseFontSize " , " keys " : [ " ctrl + - " ] } , <nl> + { " command " : " duplicateTab " , " keys " : [ " ctrl + shift + d " ] } , <nl> + { " command " : " increaseFontSize " , " keys " : [ " ctrl + = " ] } , <nl> + { " command " : { " action " : " moveFocus " , " direction " : " down " } , " keys " : [ " alt + down " ] } , <nl> + { " command " : { " action " : " moveFocus " , " direction " : " left " } , " keys " : [ " alt + left " ] } , <nl> + { " command " : { " action " : " moveFocus " , " direction " : " right " } , " keys " : [ " alt + right " ] } , <nl> + { " command " : { " action " : " moveFocus " , " direction " : " up " } , " keys " : [ " alt + up " ] } , <nl> + { " command " : " newTab " , " keys " : [ " ctrl + shift + t " ] } , <nl> { " command " : { " action " : " newTab " , " index " : 0 } , " keys " : [ " ctrl + shift + 1 " ] } , <nl> { " command " : { " action " : " newTab " , " index " : 1 } , " keys " : [ " ctrl + shift + 2 " ] } , <nl> { " command " : { " action " : " newTab " , " index " : 2 } , " keys " : [ " ctrl + shift + 3 " ] } , <nl> <nl> { " command " : { " action " : " newTab " , " index " : 6 } , " keys " : [ " ctrl + shift + 7 " ] } , <nl> { " command " : { " action " : " newTab " , " index " : 7 } , " keys " : [ " ctrl + shift + 8 " ] } , <nl> { " command " : { " action " : " newTab " , " index " : 8 } , " keys " : [ " ctrl + shift + 9 " ] } , <nl> - { " command " : " nextTab " , " keys " : [ " ctrl + tab " ] } , <nl> - { " command " : " openNewTabDropdown " , " keys " : [ " ctrl + shift + space " ] } , <nl> - { " command " : " openSettings " , " keys " : [ " ctrl + , " ] } , <nl> - { " command " : " paste " , " keys " : [ " ctrl + shift + v " ] } , <nl> - { " command " : " prevTab " , " keys " : [ " ctrl + shift + tab " ] } , <nl> - { " command " : " scrollDown " , " keys " : [ " ctrl + shift + down " ] } , <nl> - { " command " : " scrollDownPage " , " keys " : [ " ctrl + shift + pgdn " ] } , <nl> - { " command " : " scrollUp " , " keys " : [ " ctrl + shift + up " ] } , <nl> - { " command " : " scrollUpPage " , " keys " : [ " ctrl + shift + pgup " ] } , <nl> + { " command " : " nextTab " , " keys " : [ " ctrl + tab " ] } , <nl> + { " command " : " openNewTabDropdown " , " keys " : [ " ctrl + shift + space " ] } , <nl> + { " command " : " openSettings " , " keys " : [ " ctrl + , " ] } , <nl> + { " command " : " paste " , " keys " : [ " ctrl + shift + v " ] } , <nl> + { " command " : " prevTab " , " keys " : [ " ctrl + shift + tab " ] } , <nl> + { " command " : { " action " : " resizePane " , " direction " : " down " } , " keys " : [ " alt + shift + down " ] } , <nl> + { " command " : { " action " : " resizePane " , " direction " : " left " } , " keys " : [ " alt + shift + left " ] } , <nl> + { " command " : { " action " : " resizePane " , " direction " : " right " } , " keys " : [ " alt + shift + right " ] } , <nl> + { " command " : { " action " : " resizePane " , " direction " : " up " } , " keys " : [ " alt + shift + up " ] } , <nl> + { " command " : " scrollDown " , " keys " : [ " ctrl + shift + down " ] } , <nl> + { " command " : " scrollDownPage " , " keys " : [ " ctrl + shift + pgdn " ] } , <nl> + { " command " : " scrollUp " , " keys " : [ " ctrl + shift + up " ] } , <nl> + { " command " : " scrollUpPage " , " keys " : [ " ctrl + shift + pgup " ] } , <nl> + { " command " : " splitHorizontal " , " keys " : [ " alt + shift + - " ] } , <nl> + { " command " : " splitVertical " , " keys " : [ " alt + shift + plus " ] } , <nl> { " command " : { " action " : " switchToTab " , " index " : 0 } , " keys " : [ " ctrl + alt + 1 " ] } , <nl> { " command " : { " action " : " switchToTab " , " index " : 1 } , " keys " : [ " ctrl + alt + 2 " ] } , <nl> { " command " : { " action " : " switchToTab " , " index " : 2 } , " keys " : [ " ctrl + alt + 3 " ] } , <nl> <nl> { " command " : { " action " : " switchToTab " , " index " : 6 } , " keys " : [ " ctrl + alt + 7 " ] } , <nl> { " command " : { " action " : " switchToTab " , " index " : 7 } , " keys " : [ " ctrl + alt + 8 " ] } , <nl> { " command " : { " action " : " switchToTab " , " index " : 8 } , " keys " : [ " ctrl + alt + 9 " ] } , <nl> - { " command " : " decreaseFontSize " , " keys " : [ " ctrl + - " ] } , <nl> - { " command " : " increaseFontSize " , " keys " : [ " ctrl + = " ] } , <nl> - { " command " : " toggleFullscreen " , " keys " : [ " alt + enter " ] } , <nl> - { " command " : " toggleFullscreen " , " keys " : [ " f11 " ] } <nl> + { " command " : " toggleFullscreen " , " keys " : [ " alt + enter " ] } , <nl> + { " command " : " toggleFullscreen " , " keys " : [ " f11 " ] } <nl> ] <nl> } <nl>
|
Added default keybindings for panes ( )
|
microsoft/terminal
|
d1e9de788297af959a35b0dcc6abb36dce13a672
|
2019-11-19T18:00:47Z
|
mmm a / src / app / modules / gui . cpp <nl> ppp b / src / app / modules / gui . cpp <nl> void exit_module_gui ( ) <nl> static void load_gui_config ( int & w , int & h , bool & maximized , <nl> std : : string & windowLayout ) <nl> { <nl> - w = get_config_int ( " GfxMode " , " Width " , 0 ) ; <nl> - h = get_config_int ( " GfxMode " , " Height " , 0 ) ; <nl> + gfx : : Size defSize = she : : instance ( ) - > defaultNewDisplaySize ( ) ; <nl> + <nl> + w = get_config_int ( " GfxMode " , " Width " , defSize . w ) ; <nl> + h = get_config_int ( " GfxMode " , " Height " , defSize . h ) ; <nl> maximized = get_config_bool ( " GfxMode " , " Maximized " , false ) ; <nl> windowLayout = get_config_string ( " GfxMode " , " WindowLayout " , " " ) ; <nl> } <nl> mmm a / src / she / alleg4 / she . cpp <nl> ppp b / src / she / alleg4 / she . cpp <nl> class Alleg4System : public CommonSystem { <nl> / / Do nothing <nl> } <nl> <nl> + gfx : : Size defaultNewDisplaySize ( ) override { <nl> + return gfx : : Size ( 0 , 0 ) ; <nl> + } <nl> + <nl> Display * defaultDisplay ( ) override { <nl> return unique_display ; <nl> } <nl> mmm a / src / she / skia / skia_system . h <nl> ppp b / src / she / skia / skia_system . h <nl> class SkiaSystem : public CommonSystem { <nl> m_gpuAcceleration = state ; <nl> } <nl> <nl> + gfx : : Size defaultNewDisplaySize ( ) override { <nl> + gfx : : Size sz ; <nl> + # ifdef _WIN32 <nl> + sz . w = GetSystemMetrics ( SM_CXMAXIMIZED ) ; <nl> + sz . h = GetSystemMetrics ( SM_CYMAXIMIZED ) ; <nl> + sz . w - = GetSystemMetrics ( SM_CXSIZEFRAME ) * 4 ; <nl> + sz . h - = GetSystemMetrics ( SM_CYSIZEFRAME ) * 4 ; <nl> + sz . w = MAX ( 0 , sz . w ) ; <nl> + sz . h = MAX ( 0 , sz . h ) ; <nl> + # endif <nl> + return sz ; <nl> + } <nl> + <nl> Display * defaultDisplay ( ) override { <nl> return m_defaultDisplay ; <nl> } <nl> mmm a / src / she / system . h <nl> ppp b / src / she / system . h <nl> namespace she { <nl> virtual EventQueue * eventQueue ( ) = 0 ; <nl> virtual bool gpuAcceleration ( ) const = 0 ; <nl> virtual void setGpuAcceleration ( bool state ) = 0 ; <nl> + virtual gfx : : Size defaultNewDisplaySize ( ) = 0 ; <nl> virtual Display * defaultDisplay ( ) = 0 ; <nl> virtual Display * createDisplay ( int width , int height , int scale ) = 0 ; <nl> virtual Surface * createSurface ( int width , int height ) = 0 ; <nl> mmm a / src / she / win / window . h <nl> ppp b / src / she / win / window . h <nl> namespace she { <nl> <nl> SetWindowLongPtr ( hwnd , GWLP_USERDATA , LONG_PTR ( self ) ) ; <nl> <nl> + / / Center the window <nl> + RECT workarea ; <nl> + if ( SystemParametersInfo ( SPI_GETWORKAREA , 0 , ( PVOID ) & workarea , 0 ) ) { <nl> + SetWindowPos ( hwnd , nullptr , <nl> + ( workarea . right - workarea . left ) / 2 - width / 2 , <nl> + ( workarea . bottom - workarea . top ) / 2 - height / 2 , 0 , 0 , <nl> + SWP_NOSIZE | <nl> + SWP_NOSENDCHANGING | <nl> + SWP_NOOWNERZORDER | <nl> + SWP_NOZORDER | <nl> + SWP_NOREDRAW ) ; <nl> + } <nl> + <nl> / / Set scroll info to receive WM_HSCROLL / VSCROLL events ( events <nl> / / generated by some trackpad drivers ) . <nl> SCROLLINFO si ; <nl>
|
Improve default window size on Skia / Win port ( fix )
|
aseprite/aseprite
|
a6544d92afda30ab679d1cc42a26a222672ce87c
|
2015-12-28T21:02:07Z
|
mmm a / Code / CryEngine / CryAudioSystem / ATL . cpp <nl> ppp b / Code / CryEngine / CryAudioSystem / ATL . cpp <nl> inline ERequestResult ConvertToRequestResult ( ERequestStatus const eAudioRequestS <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> CAudioTranslationLayer : : CAudioTranslationLayer ( ) <nl> - : m_audioObjectMgr ( m_audioEventMgr , m_audioStandaloneFileMgr ) <nl> + : m_audioObjectMgr ( m_audioEventMgr , m_audioStandaloneFileMgr , m_audioListenerMgr ) <nl> , m_fileCacheMgr ( m_preloadRequests ) <nl> , m_xmlProcessor ( m_triggers , m_parameters , m_switches , m_environments , m_preloadRequests , m_fileCacheMgr , m_internalControls ) <nl> { <nl> ERequestStatus CAudioTranslationLayer : : SetImpl ( IAudioImpl * const pImpl ) <nl> / / ! \ note We create the global audio object lazily to make sure it <nl> / / ! gets created on the audio thread so that we can assert that all <nl> / / ! pooled allocations happen on the audio thread ( and that it is <nl> - / / ! therefore safe to use singlethreaded pool allocator <nl> + / / ! therefore safe to use single - threaded pool allocator <nl> / / ! implementations ) . <nl> - m_pGlobalAudioObject = new CATLAudioObject ( nullptr ) ; <nl> + m_pGlobalAudioObject = new CATLAudioObject ( nullptr , ZERO ) ; <nl> <nl> # if defined ( INCLUDE_AUDIO_PRODUCTION_CODE ) <nl> m_pGlobalAudioObject - > m_name = " Global Audio Object " ; <nl> mmm a / Code / CryEngine / CryAudioSystem / ATLAudioObject . cpp <nl> ppp b / Code / CryEngine / CryAudioSystem / ATLAudioObject . cpp <nl> SwitchStateId CryAudio : : CATLAudioObject : : s_occlusionTypeStateIds [ eOcclusionType_ <nl> } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - CATLAudioObject : : CATLAudioObject ( Impl : : IAudioObject * const pImplData / * = nullptr * / ) <nl> + CATLAudioObject : : CATLAudioObject ( Impl : : IAudioObject * const pImplData , Vec3 const & audioListenerPosition ) <nl> : m_pImplData ( pImplData ) <nl> - , m_propagationProcessor ( m_attributes . transformation ) <nl> + , m_propagationProcessor ( m_attributes . transformation , audioListenerPosition ) <nl> { <nl> m_propagationProcessor . Init ( this ) ; <nl> } <nl> mmm a / Code / CryEngine / CryAudioSystem / ATLAudioObject . h <nl> ppp b / Code / CryEngine / CryAudioSystem / ATLAudioObject . h <nl> class CATLAudioObject final : public IObject , public CPoolObject < CATLAudioObject <nl> { <nl> public : <nl> <nl> - explicit CATLAudioObject ( Impl : : IAudioObject * const pImplData = nullptr ) ; <nl> + explicit CATLAudioObject ( Impl : : IAudioObject * const pImplData , Vec3 const & audioListenerPosition ) ; <nl> <nl> CATLAudioObject ( CATLAudioObject const & ) = delete ; <nl> CATLAudioObject ( CATLAudioObject & & ) = delete ; <nl> mmm a / Code / CryEngine / CryAudioSystem / AudioCVars . cpp <nl> ppp b / Code / CryEngine / CryAudioSystem / AudioCVars . cpp <nl> void CAudioCVars : : RegisterVariables ( ) <nl> m_audioEventPoolSize = 256 ; <nl> m_audioStandaloneFilePoolSize = 1 ; <nl> m_audioProxiesInitType = 0 ; <nl> - m_occlusionMaxDistance = 500 . 0f ; <nl> m_occlusionMaxSyncDistance = 10 . 0f ; <nl> m_occlusionHighDistance = 10 . 0f ; <nl> m_occlusionMediumDistance = 80 . 0f ; <nl> void CAudioCVars : : RegisterVariables ( ) <nl> m_audioEventPoolSize = 256 ; <nl> m_audioStandaloneFilePoolSize = 1 ; <nl> m_audioProxiesInitType = 0 ; <nl> - m_occlusionMaxDistance = 500 . 0f ; <nl> m_occlusionMaxSyncDistance = 10 . 0f ; <nl> m_occlusionHighDistance = 10 . 0f ; <nl> m_occlusionMediumDistance = 80 . 0f ; <nl> void CAudioCVars : : RegisterVariables ( ) <nl> m_audioEventPoolSize = 256 ; <nl> m_audioStandaloneFilePoolSize = 1 ; <nl> m_audioProxiesInitType = 0 ; <nl> - m_occlusionMaxDistance = 500 . 0f ; <nl> m_occlusionMaxSyncDistance = 10 . 0f ; <nl> m_occlusionHighDistance = 10 . 0f ; <nl> m_occlusionMediumDistance = 80 . 0f ; <nl> void CAudioCVars : : RegisterVariables ( ) <nl> m_audioEventPoolSize = 256 ; <nl> m_audioStandaloneFilePoolSize = 1 ; <nl> m_audioProxiesInitType = 0 ; <nl> - m_occlusionMaxDistance = 500 . 0f ; <nl> m_occlusionMaxSyncDistance = 10 . 0f ; <nl> m_occlusionHighDistance = 10 . 0f ; <nl> m_occlusionMediumDistance = 80 . 0f ; <nl> void CAudioCVars : : RegisterVariables ( ) <nl> m_audioEventPoolSize = 256 ; <nl> m_audioStandaloneFilePoolSize = 1 ; <nl> m_audioProxiesInitType = 0 ; <nl> - m_occlusionMaxDistance = 500 . 0f ; <nl> m_occlusionMaxSyncDistance = 10 . 0f ; <nl> m_occlusionHighDistance = 10 . 0f ; <nl> m_occlusionMediumDistance = 80 . 0f ; <nl> void CAudioCVars : : RegisterVariables ( ) <nl> m_audioEventPoolSize = 256 ; <nl> m_audioStandaloneFilePoolSize = 1 ; <nl> m_audioProxiesInitType = 0 ; <nl> - m_occlusionMaxDistance = 500 . 0f ; <nl> m_occlusionMaxSyncDistance = 10 . 0f ; <nl> m_occlusionHighDistance = 10 . 0f ; <nl> m_occlusionMediumDistance = 80 . 0f ; <nl> void CAudioCVars : : RegisterVariables ( ) <nl> m_audioEventPoolSize = 256 ; <nl> m_audioStandaloneFilePoolSize = 1 ; <nl> m_audioProxiesInitType = 0 ; <nl> - m_occlusionMaxDistance = 500 . 0f ; <nl> m_occlusionMaxSyncDistance = 10 . 0f ; <nl> m_occlusionHighDistance = 10 . 0f ; <nl> m_occlusionMediumDistance = 80 . 0f ; <nl> void CAudioCVars : : RegisterVariables ( ) <nl> # endif <nl> <nl> REGISTER_CVAR2 ( " s_OcclusionMaxDistance " , & m_occlusionMaxDistance , m_occlusionMaxDistance , VF_CHEAT | VF_CHEAT_NOCHECK , <nl> - " Obstruction / Occlusion is not calculated for the sounds , whose distance to the listener is greater than this value . Setting this value to 0 disables obstruction / occlusion calculations . \ n " <nl> + " Occlusion is not calculated for audio objects , whose distance to the listener is greater than this value . Setting this value to 0 disables obstruction / occlusion calculations . \ n " <nl> " Usage : s_OcclusionMaxDistance [ 0 / . . . ] \ n " <nl> " Default : 500 m \ n " ) ; <nl> <nl> + REGISTER_CVAR2 ( " s_OcclusionMinDistance " , & m_occlusionMinDistance , m_occlusionMinDistance , VF_CHEAT | VF_CHEAT_NOCHECK , <nl> + " Occlusion is not calculated for audio objects , whose distance to the listener is smaller than this value . \ n " <nl> + " Usage : s_OcclusionMinDistance [ 0 / . . . ] \ n " <nl> + " Default : 0 . 1 m \ n " ) ; <nl> + <nl> REGISTER_CVAR2 ( " s_OcclusionMaxSyncDistance " , & m_occlusionMaxSyncDistance , m_occlusionMaxSyncDistance , VF_CHEAT | VF_CHEAT_NOCHECK , <nl> " Physics rays are processed synchronously for the sounds that are closer to the listener than this value , and asynchronously for the rest ( possible performance optimization ) . \ n " <nl> " Usage : s_OcclusionMaxSyncDistance [ 0 / . . . ] \ n " <nl> void CAudioCVars : : UnregisterVariables ( ) <nl> if ( pConsole ! = nullptr ) <nl> { <nl> pConsole - > UnregisterVariable ( " s_OcclusionMaxDistance " ) ; <nl> + pConsole - > UnregisterVariable ( " s_OcclusionMinDistance " ) ; <nl> pConsole - > UnregisterVariable ( " s_OcclusionMaxSyncDistance " ) ; <nl> pConsole - > UnregisterVariable ( " s_OcclusionHighDistance " ) ; <nl> pConsole - > UnregisterVariable ( " s_OcclusionMediumDistance " ) ; <nl> mmm a / Code / CryEngine / CryAudioSystem / AudioCVars . h <nl> ppp b / Code / CryEngine / CryAudioSystem / AudioCVars . h <nl> class CAudioCVars final <nl> int m_audioProxiesInitType = 0 ; <nl> int m_tickWithMainThread = 0 ; <nl> <nl> - float m_occlusionMaxDistance = 0 . 0f ; <nl> + float m_occlusionMaxDistance = 500 . 0f ; <nl> + float m_occlusionMinDistance = 0 . 1f ; <nl> float m_occlusionMaxSyncDistance = 0 . 0f ; <nl> float m_occlusionHighDistance = 0 . 0f ; <nl> float m_occlusionMediumDistance = 0 . 0f ; <nl> mmm a / Code / CryEngine / CryAudioSystem / AudioObjectManager . cpp <nl> ppp b / Code / CryEngine / CryAudioSystem / AudioObjectManager . cpp <nl> <nl> # include " AudioObjectManager . h " <nl> # include " AudioEventManager . h " <nl> # include " AudioStandaloneFileManager . h " <nl> + # include " AudioListenerManager . h " <nl> # include " ATLAudioObject . h " <nl> # include " AudioCVars . h " <nl> # include " IAudioImpl . h " <nl> using namespace CryAudio ; <nl> using namespace CryAudio : : Impl ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - CAudioObjectManager : : CAudioObjectManager ( CAudioEventManager & audioEventMgr , CAudioStandaloneFileManager & audioStandaloneFileMgr ) <nl> + CAudioObjectManager : : CAudioObjectManager ( <nl> + CAudioEventManager & audioEventMgr , <nl> + CAudioStandaloneFileManager & audioStandaloneFileMgr , <nl> + CAudioListenerManager const & listenerManager ) <nl> : m_pImpl ( nullptr ) <nl> , m_timeSinceLastControlsUpdate ( 0 . 0f ) <nl> , m_audioEventMgr ( audioEventMgr ) <nl> , m_audioStandaloneFileMgr ( audioStandaloneFileMgr ) <nl> + , m_listenerManager ( listenerManager ) <nl> { <nl> } <nl> <nl> void CAudioObjectManager : : Update ( float const deltaTime , SObject3DAttributes cons <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> CATLAudioObject * CAudioObjectManager : : ConstructAudioObject ( char const * const szName ) <nl> { <nl> - CATLAudioObject * const pAudioObject = new CATLAudioObject ( m_pImpl - > ConstructAudioObject ( szName ) ) ; <nl> + CATLAudioObject * const pAudioObject = new CATLAudioObject ( m_pImpl - > ConstructAudioObject ( szName ) , m_listenerManager . GetActiveListenerAttributes ( ) . transformation . GetPosition ( ) ) ; <nl> pAudioObject - > SetFlag ( eAudioObjectFlags_DoNotRelease ) ; <nl> <nl> # if defined ( INCLUDE_AUDIO_PRODUCTION_CODE ) <nl> mmm a / Code / CryEngine / CryAudioSystem / AudioObjectManager . h <nl> ppp b / Code / CryEngine / CryAudioSystem / AudioObjectManager . h <nl> namespace CryAudio <nl> class CATLAudioObject ; <nl> class CAudioEventManager ; <nl> class CAudioStandaloneFileManager ; <nl> + class CAudioListenerManager ; <nl> <nl> namespace Impl <nl> { <nl> class CAudioObjectManager final <nl> <nl> using ConstructedAudioObjectsList = std : : list < CATLAudioObject * > ; <nl> <nl> - explicit CAudioObjectManager ( CAudioEventManager & audioEventMgr , CAudioStandaloneFileManager & audioStandaloneFileMgr ) ; <nl> + explicit CAudioObjectManager ( <nl> + CAudioEventManager & audioEventMgr , <nl> + CAudioStandaloneFileManager & audioStandaloneFileMgr , <nl> + CAudioListenerManager const & listenerManager ) ; <nl> ~ CAudioObjectManager ( ) ; <nl> <nl> CAudioObjectManager ( CAudioObjectManager const & ) = delete ; <nl> class CAudioObjectManager final <nl> <nl> CAudioEventManager & m_audioEventMgr ; <nl> CAudioStandaloneFileManager & m_audioStandaloneFileMgr ; <nl> + CAudioListenerManager const & m_listenerManager ; <nl> } ; <nl> } / / namespace CryAudio <nl> mmm a / Code / CryEngine / CryAudioSystem / PropagationProcessor . cpp <nl> ppp b / Code / CryEngine / CryAudioSystem / PropagationProcessor . cpp <nl> int CPropagationProcessor : : OnObstructionTest ( EventPhys const * pEvent ) <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - CPropagationProcessor : : CPropagationProcessor ( CObjectTransformation const & transformation ) <nl> + CPropagationProcessor : : CPropagationProcessor ( CObjectTransformation const & transformation , Vec3 const & audioListenerPosition ) <nl> : m_obstruction ( 0 . 0f ) <nl> , m_occlusion ( 0 . 0f ) <nl> , m_occlusionMultiplier ( 1 . 0f ) <nl> CPropagationProcessor : : CPropagationProcessor ( CObjectTransformation const & transf <nl> s_numRaySamplePositionsHigh = s_raySamplePositionsHigh . size ( ) ; <nl> } <nl> <nl> + m_currentListenerDistance = audioListenerPosition . GetDistance ( m_transformation . GetPosition ( ) ) ; <nl> m_raysOcclusion . resize ( s_numRaySamplePositionsHigh , 0 . 0f ) ; <nl> m_raysInfo . reserve ( s_numConcurrentRaysHigh ) ; <nl> } <nl> void CPropagationProcessor : : Update ( <nl> <nl> if ( CanRunObstructionOcclusion ( ) ) <nl> { <nl> - if ( m_currentListenerDistance < g_audioCVars . m_occlusionMaxDistance ) <nl> + if ( m_currentListenerDistance < g_audioCVars . m_occlusionHighDistance ) <nl> { <nl> - if ( m_currentListenerDistance < g_audioCVars . m_occlusionHighDistance ) <nl> - { <nl> - m_occlusionTypeWhenAdaptive = eOcclusionType_High ; <nl> - } <nl> - else if ( m_currentListenerDistance < g_audioCVars . m_occlusionMediumDistance ) <nl> - { <nl> - m_occlusionTypeWhenAdaptive = eOcclusionType_Medium ; <nl> - } <nl> - else <nl> - { <nl> - m_occlusionTypeWhenAdaptive = eOcclusionType_Low ; <nl> - } <nl> - <nl> - RunObstructionQuery ( audioListenerPosition ) ; <nl> + m_occlusionTypeWhenAdaptive = eOcclusionType_High ; <nl> + } <nl> + else if ( m_currentListenerDistance < g_audioCVars . m_occlusionMediumDistance ) <nl> + { <nl> + m_occlusionTypeWhenAdaptive = eOcclusionType_Medium ; <nl> } <nl> else <nl> { <nl> - m_obstruction = 0 . 0f ; <nl> - m_occlusion = 0 . 0f ; <nl> + m_occlusionTypeWhenAdaptive = eOcclusionType_Low ; <nl> } <nl> + <nl> + RunObstructionQuery ( audioListenerPosition ) ; <nl> + } <nl> + else <nl> + { <nl> + m_obstruction = 0 . 0f ; <nl> + m_occlusion = 0 . 0f ; <nl> } <nl> <nl> # if defined ( INCLUDE_AUDIO_PRODUCTION_CODE ) <nl> void CPropagationProcessor : : SetOcclusionType ( EOcclusionType const occlusionType , <nl> m_obstruction = 0 . 0f ; <nl> m_occlusion = 0 . 0f ; <nl> <nl> + / / First time run is synchronous and center ray only to get a quick initial value to start from . <nl> + Vec3 const direction ( m_transformation . GetPosition ( ) - audioListenerPosition ) ; <nl> + m_currentListenerDistance = direction . GetLength ( ) ; <nl> + <nl> if ( CanRunObstructionOcclusion ( ) ) <nl> { <nl> - / / First time run is synchronous and center ray only to get a quick initial value to start from . <nl> - Vec3 const direction ( m_transformation . GetPosition ( ) - audioListenerPosition ) ; <nl> - m_currentListenerDistance = direction . GetLength ( ) ; <nl> - CRY_ASSERT_MESSAGE ( m_currentListenerDistance , " distance to listener must never be 0 here " ) ; <nl> - <nl> - if ( m_currentListenerDistance < g_audioCVars . m_occlusionMaxDistance ) <nl> + Vec3 directionNormalized ( direction / m_currentListenerDistance ) ; <nl> + Vec3 const finalDirection ( direction - ( directionNormalized * g_audioCVars . m_occlusionRayLengthOffset ) ) ; <nl> + <nl> + CAudioRayInfo & rayInfo = m_raysInfo [ 0 ] ; <nl> + static int const physicsFlags = ent_water | ent_static | ent_sleeping_rigid | ent_rigid | ent_terrain ; <nl> + rayInfo . numHits = static_cast < size_t > ( gEnv - > pPhysicalWorld - > RayWorldIntersection ( <nl> + audioListenerPosition , <nl> + finalDirection , <nl> + physicsFlags , <nl> + rwi_pierceability0 , <nl> + rayInfo . hits , <nl> + static_cast < int > ( s_maxRayHits ) , <nl> + nullptr , <nl> + 0 , <nl> + & rayInfo , <nl> + PHYS_FOREIGN_ID_SOUND_OBSTRUCTION ) ) ; <nl> + <nl> + rayInfo . numHits = min ( rayInfo . numHits + 1 , s_maxRayHits ) ; <nl> + float totalOcclusion = 0 . 0f ; <nl> + <nl> + if ( rayInfo . numHits > 0 ) <nl> { <nl> - Vec3 directionNormalized ( direction / m_currentListenerDistance ) ; <nl> - Vec3 const finalDirection ( direction - ( directionNormalized * g_audioCVars . m_occlusionRayLengthOffset ) ) ; <nl> - <nl> - CAudioRayInfo & rayInfo = m_raysInfo [ 0 ] ; <nl> - static int const physicsFlags = ent_water | ent_static | ent_sleeping_rigid | ent_rigid | ent_terrain ; <nl> - rayInfo . numHits = static_cast < size_t > ( gEnv - > pPhysicalWorld - > RayWorldIntersection ( <nl> - audioListenerPosition , <nl> - finalDirection , <nl> - physicsFlags , <nl> - rwi_pierceability0 , <nl> - rayInfo . hits , <nl> - static_cast < int > ( s_maxRayHits ) , <nl> - nullptr , <nl> - 0 , <nl> - & rayInfo , <nl> - PHYS_FOREIGN_ID_SOUND_OBSTRUCTION ) ) ; <nl> - <nl> - rayInfo . numHits = min ( rayInfo . numHits + 1 , s_maxRayHits ) ; <nl> - float totalOcclusion = 0 . 0f ; <nl> - <nl> - if ( rayInfo . numHits > 0 ) <nl> + ISurfaceTypeManager * const pSurfaceTypeManager = gEnv - > p3DEngine - > GetMaterialManager ( ) - > GetSurfaceTypeManager ( ) ; <nl> + CRY_ASSERT ( rayInfo . numHits < = s_maxRayHits ) ; <nl> + <nl> + for ( size_t i = 0 ; i < rayInfo . numHits ; + + i ) <nl> { <nl> - ISurfaceTypeManager * const pSurfaceTypeManager = gEnv - > p3DEngine - > GetMaterialManager ( ) - > GetSurfaceTypeManager ( ) ; <nl> - CRY_ASSERT ( rayInfo . numHits < = s_maxRayHits ) ; <nl> + float const distance = rayInfo . hits [ i ] . dist ; <nl> <nl> - for ( size_t i = 0 ; i < rayInfo . numHits ; + + i ) <nl> + if ( distance > 0 . 0f ) <nl> { <nl> - float const distance = rayInfo . hits [ i ] . dist ; <nl> + ISurfaceType * const pMat = pSurfaceTypeManager - > GetSurfaceType ( rayInfo . hits [ i ] . surface_idx ) ; <nl> <nl> - if ( distance > 0 . 0f ) <nl> + if ( pMat ! = nullptr ) <nl> { <nl> - ISurfaceType * const pMat = pSurfaceTypeManager - > GetSurfaceType ( rayInfo . hits [ i ] . surface_idx ) ; <nl> - <nl> - if ( pMat ! = nullptr ) <nl> - { <nl> - ISurfaceType : : SPhysicalParams const & physParams = pMat - > GetPhyscalParams ( ) ; <nl> - totalOcclusion + = physParams . sound_obstruction ; <nl> - } <nl> + ISurfaceType : : SPhysicalParams const & physParams = pMat - > GetPhyscalParams ( ) ; <nl> + totalOcclusion + = physParams . sound_obstruction ; <nl> } <nl> } <nl> } <nl> + } <nl> <nl> - m_occlusion = clamp_tpl ( totalOcclusion , 0 . 0f , 1 . 0f ) * m_occlusionMultiplier ; <nl> + m_occlusion = clamp_tpl ( totalOcclusion , 0 . 0f , 1 . 0f ) * m_occlusionMultiplier ; <nl> <nl> - for ( auto & rayOcclusion : m_raysOcclusion ) <nl> - { <nl> - rayOcclusion = m_occlusion ; <nl> - } <nl> + for ( auto & rayOcclusion : m_raysOcclusion ) <nl> + { <nl> + rayOcclusion = m_occlusion ; <nl> } <nl> } <nl> else <nl> void CPropagationProcessor : : SetOcclusionType ( EOcclusionType const occlusionType , <nl> } <nl> } <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + bool CryAudio : : CPropagationProcessor : : CanRunObstructionOcclusion ( ) const <nl> + { <nl> + return <nl> + m_occlusionType ! = eOcclusionType_None & & <nl> + m_occlusionType ! = eOcclusionType_Ignore & & <nl> + m_currentListenerDistance > g_audioCVars . m_occlusionMinDistance & & <nl> + m_currentListenerDistance < g_audioCVars . m_occlusionMaxDistance & & <nl> + s_bCanIssueRWIs ; <nl> + } <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> void CPropagationProcessor : : GetPropagationData ( SATLSoundPropagationData & propagationData ) const <nl> { <nl> size_t CPropagationProcessor : : s_totalAsyncPhysRays = 0 ; <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> void CPropagationProcessor : : DrawObstructionRays ( IRenderAuxGeom & auxGeom ) const <nl> { <nl> - if ( CanRunObstructionOcclusion ( ) & & ( m_currentListenerDistance < g_audioCVars . m_occlusionMaxDistance ) ) <nl> + if ( CanRunObstructionOcclusion ( ) ) <nl> { <nl> size_t const numConcurrentRays = GetNumConcurrentRays ( ) ; <nl> CRY_ASSERT ( numConcurrentRays > 0 ) ; <nl> mmm a / Code / CryEngine / CryAudioSystem / PropagationProcessor . h <nl> ppp b / Code / CryEngine / CryAudioSystem / PropagationProcessor . h <nl> class CPropagationProcessor <nl> typedef std : : vector < CAudioRayInfo > RayInfoVec ; <nl> typedef std : : vector < float > RayOcclusionVec ; <nl> <nl> - CPropagationProcessor ( CObjectTransformation const & transformation ) ; <nl> + CPropagationProcessor ( CObjectTransformation const & transformation , Vec3 const & audioListenerPosition ) ; <nl> ~ CPropagationProcessor ( ) ; <nl> <nl> void Init ( CATLAudioObject * pAudioObject ) ; <nl> class CPropagationProcessor <nl> <nl> void Update ( float const deltaTime , float const distance , Vec3 const & audioListenerPosition ) ; <nl> void SetOcclusionType ( EOcclusionType const occlusionType , Vec3 const & audioListenerPosition ) ; <nl> - bool CanRunObstructionOcclusion ( ) const { return s_bCanIssueRWIs & & m_occlusionType ! = eOcclusionType_None & & m_occlusionType ! = eOcclusionType_Ignore ; } <nl> + bool CanRunObstructionOcclusion ( ) const ; <nl> void GetPropagationData ( SATLSoundPropagationData & propagationData ) const ; <nl> void ProcessPhysicsRay ( CAudioRayInfo * const pAudioRayInfo ) ; <nl> void ReleasePendingRays ( ) ; <nl> mmm a / Code / CryEngine / CryAudioSystem / implementations / CryAudioImplWwise / ATLEntities . cpp <nl> ppp b / Code / CryEngine / CryAudioSystem / implementations / CryAudioImplWwise / ATLEntities . cpp <nl> namespace Impl <nl> namespace Wwise <nl> { <nl> <nl> - AkGameObjectID SAudioObject : : s_dummyGameObjectId = static_cast < AkGameObjectID > ( - 2 ) ; <nl> + AkGameObjectID CAudioObject : : s_dummyGameObjectId = static_cast < AkGameObjectID > ( - 2 ) ; <nl> <nl> / / AK callbacks <nl> void EndEventCallback ( AkCallbackType callbackType , AkCallbackInfo * pCallbackInfo ) <nl> void PrepareEventCallback ( <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - ERequestStatus SAudioObject : : Update ( ) <nl> + ERequestStatus CAudioObject : : Update ( ) <nl> { <nl> ERequestStatus result = eRequestStatus_Failure ; <nl> <nl> - if ( bNeedsToUpdateEnvironments ) <nl> + if ( m_bNeedsToUpdateEnvironments ) <nl> { <nl> result = PostEnvironmentAmounts ( ) ; <nl> } <nl> ERequestStatus SAudioObject : : Update ( ) <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - ERequestStatus SAudioObject : : Set3DAttributes ( SObject3DAttributes const & attributes ) <nl> + ERequestStatus CAudioObject : : Set3DAttributes ( SObject3DAttributes const & attributes ) <nl> { <nl> AkSoundPosition soundPos ; <nl> FillAKObjectPosition ( attributes . transformation , soundPos ) ; <nl> <nl> - AKRESULT const wwiseResult = AK : : SoundEngine : : SetPosition ( id , soundPos ) ; <nl> + AKRESULT const wwiseResult = AK : : SoundEngine : : SetPosition ( m_id , soundPos ) ; <nl> <nl> if ( ! IS_WWISE_OK ( wwiseResult ) ) <nl> { <nl> ERequestStatus SAudioObject : : Set3DAttributes ( SObject3DAttributes const & attribut <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - ERequestStatus SAudioObject : : SetEnvironment ( IAudioEnvironment const * const pIAudioEnvironment , float const amount ) <nl> + ERequestStatus CAudioObject : : SetEnvironment ( IAudioEnvironment const * const pIAudioEnvironment , float const amount ) <nl> { <nl> static float const envEpsilon = 0 . 0001f ; <nl> <nl> ERequestStatus SAudioObject : : SetEnvironment ( IAudioEnvironment const * const pIAud <nl> { <nl> case eWwiseAudioEnvironmentType_AuxBus : <nl> { <nl> - float const currentAmount = stl : : find_in_map ( environemntImplAmounts , pEnvironment - > busId , - 1 . 0f ) ; <nl> + float const currentAmount = stl : : find_in_map ( m_environemntImplAmounts , pEnvironment - > busId , - 1 . 0f ) ; <nl> <nl> if ( ( currentAmount = = - 1 . 0f ) | | ( fabs ( currentAmount - amount ) > envEpsilon ) ) <nl> { <nl> - environemntImplAmounts [ pEnvironment - > busId ] = amount ; <nl> - bNeedsToUpdateEnvironments = true ; <nl> + m_environemntImplAmounts [ pEnvironment - > busId ] = amount ; <nl> + m_bNeedsToUpdateEnvironments = true ; <nl> } <nl> <nl> result = eRequestStatus_Success ; <nl> ERequestStatus SAudioObject : : SetEnvironment ( IAudioEnvironment const * const pIAud <nl> { <nl> AkRtpcValue rtpcValue = static_cast < AkRtpcValue > ( pEnvironment - > multiplier * amount + pEnvironment - > shift ) ; <nl> <nl> - AKRESULT const wwiseResult = AK : : SoundEngine : : SetRTPCValue ( pEnvironment - > rtpcId , rtpcValue , id ) ; <nl> + AKRESULT const wwiseResult = AK : : SoundEngine : : SetRTPCValue ( pEnvironment - > rtpcId , rtpcValue , m_id ) ; <nl> <nl> if ( IS_WWISE_OK ( wwiseResult ) ) <nl> { <nl> ERequestStatus SAudioObject : : SetEnvironment ( IAudioEnvironment const * const pIAud <nl> " Wwise failed to set the Rtpc % u to value % f on object % u in SetEnvironement ( ) " , <nl> pEnvironment - > rtpcId , <nl> rtpcValue , <nl> - id ) ; <nl> + m_id ) ; <nl> } <nl> break ; <nl> <nl> ERequestStatus SAudioObject : : SetEnvironment ( IAudioEnvironment const * const pIAud <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - ERequestStatus SAudioObject : : SetParameter ( IParameter const * const pAudioRtpc , float const value ) <nl> + ERequestStatus CAudioObject : : SetParameter ( IParameter const * const pAudioRtpc , float const value ) <nl> { <nl> ERequestStatus result = eRequestStatus_Failure ; <nl> SAudioRtpc const * const pAKRtpcData = static_cast < SAudioRtpc const * const > ( pAudioRtpc ) ; <nl> ERequestStatus SAudioObject : : SetParameter ( IParameter const * const pAudioRtpc , fl <nl> { <nl> AkRtpcValue rtpcValue = static_cast < AkRtpcValue > ( pAKRtpcData - > mult * value + pAKRtpcData - > shift ) ; <nl> <nl> - AKRESULT const wwiseResult = AK : : SoundEngine : : SetRTPCValue ( pAKRtpcData - > id , rtpcValue , id ) ; <nl> + AKRESULT const wwiseResult = AK : : SoundEngine : : SetRTPCValue ( pAKRtpcData - > id , rtpcValue , m_id ) ; <nl> <nl> if ( IS_WWISE_OK ( wwiseResult ) ) <nl> { <nl> ERequestStatus SAudioObject : : SetParameter ( IParameter const * const pAudioRtpc , fl <nl> " Wwise failed to set the Rtpc % " PRISIZE_T " to value % f on object % " PRISIZE_T , <nl> pAKRtpcData - > id , <nl> static_cast < AkRtpcValue > ( value ) , <nl> - id ) ; <nl> + m_id ) ; <nl> } <nl> } <nl> else <nl> ERequestStatus SAudioObject : : SetParameter ( IParameter const * const pAudioRtpc , fl <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - ERequestStatus SAudioObject : : SetSwitchState ( IAudioSwitchState const * const pIAudioSwitchState ) <nl> + ERequestStatus CAudioObject : : SetSwitchState ( IAudioSwitchState const * const pIAudioSwitchState ) <nl> { <nl> ERequestStatus result = eRequestStatus_Failure ; <nl> <nl> ERequestStatus SAudioObject : : SetSwitchState ( IAudioSwitchState const * const pIAud <nl> { <nl> case eWwiseSwitchType_Switch : <nl> { <nl> - AkGameObjectID const gameObjectId = id ! = AK_INVALID_GAME_OBJECT ? id : s_dummyGameObjectId ; <nl> + AkGameObjectID const gameObjectId = m_id ! = AK_INVALID_GAME_OBJECT ? m_id : s_dummyGameObjectId ; <nl> <nl> AKRESULT const wwiseResult = AK : : SoundEngine : : SetSwitch ( <nl> pSwitchState - > switchId , <nl> ERequestStatus SAudioObject : : SetSwitchState ( IAudioSwitchState const * const pIAud <nl> AKRESULT const wwiseResult = AK : : SoundEngine : : SetRTPCValue ( <nl> pSwitchState - > switchId , <nl> static_cast < AkRtpcValue > ( pSwitchState - > rtpcValue ) , <nl> - id ) ; <nl> + m_id ) ; <nl> <nl> if ( IS_WWISE_OK ( wwiseResult ) ) <nl> { <nl> ERequestStatus SAudioObject : : SetSwitchState ( IAudioSwitchState const * const pIAud <nl> " Wwise failed to set the Rtpc % " PRISIZE_T " to value % f on object % " PRISIZE_T , <nl> pSwitchState - > switchId , <nl> static_cast < AkRtpcValue > ( pSwitchState - > rtpcValue ) , <nl> - id ) ; <nl> + m_id ) ; <nl> } <nl> <nl> break ; <nl> ERequestStatus SAudioObject : : SetSwitchState ( IAudioSwitchState const * const pIAud <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - ERequestStatus SAudioObject : : SetObstructionOcclusion ( float const obstruction , float const occlusion ) <nl> + ERequestStatus CAudioObject : : SetObstructionOcclusion ( float const obstruction , float const occlusion ) <nl> { <nl> ERequestStatus result = eRequestStatus_Failure ; <nl> <nl> AKRESULT const wwiseResult = AK : : SoundEngine : : SetObjectObstructionAndOcclusion ( <nl> - id , <nl> + m_id , <nl> 0 , / / only set the obstruction / occlusion for the default listener for now <nl> static_cast < AkReal32 > ( occlusion ) , / / Currently used on obstruction until the ATL produces a correct obstruction value . <nl> static_cast < AkReal32 > ( occlusion ) ) ; <nl> ERequestStatus SAudioObject : : SetObstructionOcclusion ( float const obstruction , fl <nl> " Wwise failed to set Obstruction % f and Occlusion % f on object % " PRISIZE_T , <nl> obstruction , <nl> occlusion , <nl> - id ) ; <nl> + m_id ) ; <nl> } <nl> <nl> return result ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - ERequestStatus SAudioObject : : ExecuteTrigger ( IAudioTrigger const * const pIAudioTrigger , IAudioEvent * const pIAudioEvent ) <nl> + ERequestStatus CAudioObject : : ExecuteTrigger ( IAudioTrigger const * const pIAudioTrigger , IAudioEvent * const pIAudioEvent ) <nl> { <nl> <nl> ERequestStatus result = eRequestStatus_Failure ; <nl> ERequestStatus SAudioObject : : ExecuteTrigger ( IAudioTrigger const * const pIAudioTr <nl> { <nl> AkGameObjectID gameObjectId = AK_INVALID_GAME_OBJECT ; <nl> <nl> - if ( id ! = AK_INVALID_GAME_OBJECT ) <nl> + if ( m_id ! = AK_INVALID_GAME_OBJECT ) <nl> { <nl> - gameObjectId = id ; <nl> + gameObjectId = m_id ; <nl> PostEnvironmentAmounts ( ) ; <nl> } <nl> else <nl> { <nl> / / If ID is invalid , then it is the global audio object <nl> - gameObjectId = SAudioObject : : s_dummyGameObjectId ; <nl> + gameObjectId = CAudioObject : : s_dummyGameObjectId ; <nl> } <nl> <nl> AkPlayingID const id = AK : : SoundEngine : : PostEvent ( pAudioTrigger - > id , gameObjectId , AK_EndOfEvent , & EndEventCallback , pAudioEvent ) ; <nl> ERequestStatus SAudioObject : : ExecuteTrigger ( IAudioTrigger const * const pIAudioTr <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - ERequestStatus SAudioObject : : StopAllTriggers ( ) <nl> + ERequestStatus CAudioObject : : StopAllTriggers ( ) <nl> { <nl> - AkGameObjectID const gameObjectId = id ! = AK_INVALID_GAME_OBJECT ? id : SAudioObject : : s_dummyGameObjectId ; <nl> + AkGameObjectID const gameObjectId = m_id ! = AK_INVALID_GAME_OBJECT ? m_id : CAudioObject : : s_dummyGameObjectId ; <nl> AK : : SoundEngine : : StopAll ( gameObjectId ) ; <nl> return eRequestStatus_Success ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - ERequestStatus SAudioObject : : PostEnvironmentAmounts ( ) <nl> + ERequestStatus CAudioObject : : PostEnvironmentAmounts ( ) <nl> { <nl> <nl> ERequestStatus result = eRequestStatus_Failure ; <nl> AkAuxSendValue auxValues [ AK_MAX_AUX_PER_OBJ ] ; <nl> uint32 auxIndex = 0 ; <nl> <nl> - SAudioObject : : EnvironmentImplMap : : iterator iEnvPair = environemntImplAmounts . begin ( ) ; <nl> - SAudioObject : : EnvironmentImplMap : : const_iterator const iEnvStart = environemntImplAmounts . begin ( ) ; <nl> - SAudioObject : : EnvironmentImplMap : : const_iterator const iEnvEnd = environemntImplAmounts . end ( ) ; <nl> + CAudioObject : : EnvironmentImplMap : : iterator iEnvPair = m_environemntImplAmounts . begin ( ) ; <nl> + CAudioObject : : EnvironmentImplMap : : const_iterator const iEnvStart = m_environemntImplAmounts . begin ( ) ; <nl> + CAudioObject : : EnvironmentImplMap : : const_iterator const iEnvEnd = m_environemntImplAmounts . end ( ) ; <nl> <nl> - if ( environemntImplAmounts . size ( ) < = AK_MAX_AUX_PER_OBJ ) <nl> + if ( m_environemntImplAmounts . size ( ) < = AK_MAX_AUX_PER_OBJ ) <nl> { <nl> for ( ; iEnvPair ! = iEnvEnd ; + + auxIndex ) <nl> { <nl> ERequestStatus SAudioObject : : PostEnvironmentAmounts ( ) <nl> / / If an amount is zero , we still want to send it to the middleware , but we also want to remove it from the map . <nl> if ( fAmount = = 0 . 0f ) <nl> { <nl> - environemntImplAmounts . erase ( iEnvPair + + ) ; <nl> + m_environemntImplAmounts . erase ( iEnvPair + + ) ; <nl> } <nl> else <nl> { <nl> ERequestStatus SAudioObject : : PostEnvironmentAmounts ( ) <nl> { <nl> if ( iEnvPair - > second = = 0 . 0f ) <nl> { <nl> - environemntImplAmounts . erase ( iEnvPair + + ) ; <nl> + m_environemntImplAmounts . erase ( iEnvPair + + ) ; <nl> } <nl> else <nl> { <nl> ERequestStatus SAudioObject : : PostEnvironmentAmounts ( ) <nl> <nl> CRY_ASSERT ( auxIndex < = AK_MAX_AUX_PER_OBJ ) ; <nl> <nl> - AKRESULT const wwiseResult = AK : : SoundEngine : : SetGameObjectAuxSendValues ( id , auxValues , auxIndex ) ; <nl> + AKRESULT const wwiseResult = AK : : SoundEngine : : SetGameObjectAuxSendValues ( m_id , auxValues , auxIndex ) ; <nl> <nl> if ( IS_WWISE_OK ( wwiseResult ) ) <nl> { <nl> ERequestStatus SAudioObject : : PostEnvironmentAmounts ( ) <nl> } <nl> else <nl> { <nl> - g_audioImplLogger . Log ( eAudioLogType_Warning , " Wwise SetGameObjectAuxSendValues failed on object % " PRISIZE_T " with AKRESULT : % d " , id , wwiseResult ) ; <nl> + g_audioImplLogger . Log ( eAudioLogType_Warning , " Wwise SetGameObjectAuxSendValues failed on object % " PRISIZE_T " with AKRESULT : % d " , m_id , wwiseResult ) ; <nl> } <nl> <nl> - bNeedsToUpdateEnvironments = false ; <nl> + m_bNeedsToUpdateEnvironments = false ; <nl> <nl> return result ; <nl> } <nl> mmm a / Code / CryEngine / CryAudioSystem / implementations / CryAudioImplWwise / ATLEntities . h <nl> ppp b / Code / CryEngine / CryAudioSystem / implementations / CryAudioImplWwise / ATLEntities . h <nl> namespace Impl <nl> namespace Wwise <nl> { <nl> <nl> - struct SAudioObject final : public IAudioObject , public CPoolObject < SAudioObject > <nl> + class CAudioObject final : public IAudioObject , public CPoolObject < CAudioObject > <nl> { <nl> + public : <nl> + <nl> typedef std : : map < AkAuxBusID , float > EnvironmentImplMap ; <nl> <nl> - explicit SAudioObject ( AkGameObjectID const _id ) <nl> - : id ( _id ) <nl> - , bNeedsToUpdateEnvironments ( false ) <nl> + explicit CAudioObject ( AkGameObjectID const id ) <nl> + : m_id ( id ) <nl> + , m_bNeedsToUpdateEnvironments ( false ) <nl> { } <nl> <nl> - virtual ~ SAudioObject ( ) override = default ; <nl> + virtual ~ CAudioObject ( ) override = default ; <nl> <nl> - SAudioObject ( SAudioObject const & ) = delete ; <nl> - SAudioObject ( SAudioObject & & ) = delete ; <nl> - SAudioObject & operator = ( SAudioObject const & ) = delete ; <nl> - SAudioObject & operator = ( SAudioObject & & ) = delete ; <nl> + CAudioObject ( CAudioObject const & ) = delete ; <nl> + CAudioObject ( CAudioObject & & ) = delete ; <nl> + CAudioObject & operator = ( CAudioObject const & ) = delete ; <nl> + CAudioObject & operator = ( CAudioObject & & ) = delete ; <nl> <nl> / / IAudioObject <nl> virtual ERequestStatus Update ( ) override ; <nl> struct SAudioObject final : public IAudioObject , public CPoolObject < SAudioObject <nl> virtual ERequestStatus StopFile ( IAudioStandaloneFile * const pIFile ) override { return eRequestStatus_Success ; } <nl> / / ~ IAudioObject <nl> <nl> - AkGameObjectID const id ; <nl> - bool bNeedsToUpdateEnvironments ; <nl> - EnvironmentImplMap environemntImplAmounts ; <nl> + AkGameObjectID const m_id ; <nl> + bool m_bNeedsToUpdateEnvironments ; <nl> + EnvironmentImplMap m_environemntImplAmounts ; <nl> <nl> static AkGameObjectID s_dummyGameObjectId ; <nl> <nl> private : <nl> + <nl> ERequestStatus PostEnvironmentAmounts ( ) ; <nl> } ; <nl> <nl> mmm a / Code / CryEngine / CryAudioSystem / implementations / CryAudioImplWwise / AudioImpl . cpp <nl> ppp b / Code / CryEngine / CryAudioSystem / implementations / CryAudioImplWwise / AudioImpl . cpp <nl> ERequestStatus CAudioImpl : : Init ( uint32 const audioObjectPoolSize , uint32 const e <nl> / / we will need to shut down what has been initialized so far . Therefore make sure to call Shutdown ( ) before returning eARS_FAILURE ! <nl> <nl> MEMSTAT_CONTEXT ( EMemStatContextTypes : : MSC_Other , 0 , " Wwise Audio Object Pool " ) ; <nl> - SAudioObject : : CreateAllocator ( audioObjectPoolSize ) ; <nl> + CAudioObject : : CreateAllocator ( audioObjectPoolSize ) ; <nl> <nl> MEMSTAT_CONTEXT ( EMemStatContextTypes : : MSC_Other , 0 , " Wwise Audio Event Pool " ) ; <nl> SAudioEvent : : CreateAllocator ( eventPoolSize ) ; <nl> ERequestStatus CAudioImpl : : Init ( uint32 const audioObjectPoolSize , uint32 const e <nl> / / } <nl> <nl> / / Register the DummyGameObject used for the events that don ' t need a location in the game world <nl> - wwiseResult = AK : : SoundEngine : : RegisterGameObj ( SAudioObject : : s_dummyGameObjectId , " DummyObject " ) ; <nl> + wwiseResult = AK : : SoundEngine : : RegisterGameObj ( CAudioObject : : s_dummyGameObjectId , " DummyObject " ) ; <nl> <nl> if ( wwiseResult ! = AK_Success ) <nl> { <nl> ERequestStatus CAudioImpl : : ShutDown ( ) <nl> if ( AK : : SoundEngine : : IsInitialized ( ) ) <nl> { <nl> / / UnRegister the DummyGameObject <nl> - wwiseResult = AK : : SoundEngine : : UnregisterGameObj ( SAudioObject : : s_dummyGameObjectId ) ; <nl> + wwiseResult = AK : : SoundEngine : : UnregisterGameObj ( CAudioObject : : s_dummyGameObjectId ) ; <nl> <nl> if ( wwiseResult ! = AK_Success ) <nl> { <nl> ERequestStatus CAudioImpl : : Release ( ) <nl> <nl> delete this ; <nl> <nl> - SAudioObject : : FreeMemoryPool ( ) ; <nl> + CAudioObject : : FreeMemoryPool ( ) ; <nl> SAudioEvent : : FreeMemoryPool ( ) ; <nl> <nl> return eRequestStatus_Success ; <nl> char const * const CAudioImpl : : GetAudioFileLocation ( SAudioFileEntryInfo * const pF <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> IAudioObject * CAudioImpl : : ConstructGlobalAudioObject ( ) <nl> { <nl> - return new SAudioObject ( AK_INVALID_GAME_OBJECT ) ; <nl> + return new CAudioObject ( AK_INVALID_GAME_OBJECT ) ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> IAudioObject * CAudioImpl : : ConstructAudioObject ( char const * const szAudioObjectNa <nl> AK : : SoundEngine : : RegisterGameObj ( id ) ; <nl> # endif / / INCLUDE_WWISE_IMPL_PRODUCTION_CODE <nl> <nl> - return static_cast < IAudioObject * > ( new SAudioObject ( id ) ) ; <nl> + return static_cast < IAudioObject * > ( new CAudioObject ( id ) ) ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> void CAudioImpl : : DestructAudioObject ( IAudioObject const * const pAudioObject ) <nl> { <nl> - SAudioObject const * pWwiseAudioObject = static_cast < SAudioObject const * > ( pAudioObject ) ; <nl> - AKRESULT const wwiseResult = AK : : SoundEngine : : UnregisterGameObj ( pWwiseAudioObject - > id ) ; <nl> + CAudioObject const * pWwiseAudioObject = static_cast < CAudioObject const * > ( pAudioObject ) ; <nl> + AKRESULT const wwiseResult = AK : : SoundEngine : : UnregisterGameObj ( pWwiseAudioObject - > m_id ) ; <nl> if ( ! IS_WWISE_OK ( wwiseResult ) ) <nl> { <nl> g_audioImplLogger . Log ( eAudioLogType_Warning , " Wwise UnregisterGameObj failed with AKRESULT : % d " , wwiseResult ) ; <nl> void CAudioImpl : : GetMemoryInfo ( SAudioImplMemoryInfo & memoryInfo ) const <nl> memoryInfo . secondaryPoolAllocations = 0 ; <nl> # endif / / PROVIDE_AUDIO_IMPL_SECONDARY_POOL <nl> { <nl> - auto & allocator = SAudioObject : : GetAllocator ( ) ; <nl> + auto & allocator = CAudioObject : : GetAllocator ( ) ; <nl> auto mem = allocator . GetTotalMemory ( ) ; <nl> auto pool = allocator . GetCounts ( ) ; <nl> memoryInfo . poolUsedObjects = pool . nUsed ; <nl> mmm a / Code / CryPlugins / CryDefaultEntities / Module / Audio / AudioAreaAmbienceEntity . cpp <nl> ppp b / Code / CryPlugins / CryDefaultEntities / Module / Audio / AudioAreaAmbienceEntity . cpp <nl> void CAudioAreaAmbienceEntity : : ProcessEvent ( SEntityEvent & event ) <nl> m_areaState = EAreaState : : Inside ; <nl> <nl> UpdateRtpc ( 1 . f ) ; <nl> - DisableObstruction ( ) ; <nl> } <nl> break ; <nl> case ENTITY_EVENT_MOVEINSIDEAREA : <nl> void CAudioAreaAmbienceEntity : : ProcessEvent ( SEntityEvent & event ) <nl> case ENTITY_EVENT_LEAVEAREA : <nl> { <nl> m_areaState = EAreaState : : Outside ; <nl> - SetObstruction ( ) ; <nl> } <nl> break ; <nl> case ENTITY_EVENT_LEAVENEARAREA : <nl> void CAudioAreaAmbienceEntity : : OnResetState ( ) <nl> gEnv - > pAudioSystem - > GetAudioTriggerId ( m_stopTriggerName , m_stopTriggerId ) ; <nl> gEnv - > pAudioSystem - > GetAudioParameterId ( m_rtpcName , m_rtpcId ) ; <nl> gEnv - > pAudioSystem - > GetAudioParameterId ( m_globalRtpcName , m_globalRtpcId ) ; <nl> - <nl> gEnv - > pAudioSystem - > GetAudioEnvironmentId ( m_environmentName , m_environmentId ) ; <nl> <nl> - m_obstructionSwitchId = AudioEntitiesUtils : : GetObstructionOcclusionSwitch ( ) ; <nl> - <nl> const auto & stateIds = AudioEntitiesUtils : : GetObstructionOcclusionStateIds ( ) ; <nl> - audioProxy . SetSwitchState ( m_obstructionSwitchId , stateIds [ m_obstructionType ] ) ; <nl> + audioProxy . SetSwitchState ( AudioEntitiesUtils : : GetObstructionOcclusionSwitch ( ) , stateIds [ m_obstructionType ] ) ; <nl> <nl> audioProxy . SetFadeDistance ( m_rtpcDistance ) ; <nl> audioProxy . SetEnvironmentFadeDistance ( m_environmentDistance ) ; <nl> void CAudioAreaAmbienceEntity : : UpdateFadeValue ( float distance ) <nl> } <nl> } <nl> <nl> - void CAudioAreaAmbienceEntity : : SetObstruction ( ) <nl> - { <nl> - auto pAudioProxy = GetEntity ( ) - > GetComponent < IEntityAudioComponent > ( ) ; <nl> - if ( pAudioProxy = = nullptr ) <nl> - return ; <nl> - <nl> - const auto & stateIds = AudioEntitiesUtils : : GetObstructionOcclusionStateIds ( ) ; <nl> - pAudioProxy - > SetSwitchState ( m_obstructionSwitchId , stateIds [ m_obstructionType ] ) ; <nl> - } <nl> - <nl> - void CAudioAreaAmbienceEntity : : DisableObstruction ( ) <nl> - { <nl> - auto pAudioProxy = GetEntity ( ) - > GetComponent < IEntityAudioComponent > ( ) ; <nl> - if ( pAudioProxy = = nullptr ) <nl> - return ; <nl> - <nl> - const auto & stateIds = AudioEntitiesUtils : : GetObstructionOcclusionStateIds ( ) ; <nl> - pAudioProxy - > SetSwitchState ( m_obstructionSwitchId , stateIds [ 0 ] ) ; <nl> - } <nl> - <nl> void CAudioAreaAmbienceEntity : : SerializeProperties ( Serialization : : IArchive & archive ) <nl> { <nl> archive ( m_bEnabled , " Enabled " , " Enabled " ) ; <nl> mmm a / Code / CryPlugins / CryDefaultEntities / Module / Audio / AudioAreaAmbienceEntity . h <nl> ppp b / Code / CryPlugins / CryDefaultEntities / Module / Audio / AudioAreaAmbienceEntity . h <nl> class CAudioAreaAmbienceEntity final <nl> <nl> const bool IsPlaying ( ) const { return m_playingTriggerId ! = CryAudio : : InvalidControlId ; } <nl> <nl> - void SetObstruction ( ) ; <nl> - void DisableObstruction ( ) ; <nl> - <nl> protected : <nl> CryAudio : : ControlId m_playTriggerId = CryAudio : : InvalidControlId ; <nl> CryAudio : : ControlId m_stopTriggerId = CryAudio : : InvalidControlId ; <nl> class CAudioAreaAmbienceEntity final <nl> <nl> CryAudio : : ControlId m_environmentId = CryAudio : : InvalidControlId ; <nl> <nl> - CryAudio : : ControlId m_obstructionSwitchId = CryAudio : : InvalidControlId ; <nl> - <nl> EAreaState m_areaState = EAreaState : : Outside ; <nl> float m_fadeValue = 0 . 0f ; <nl> <nl> mmm a / Code / CryPlugins / CryDefaultEntities / Module / Audio / AudioAreaEntity . cpp <nl> ppp b / Code / CryPlugins / CryDefaultEntities / Module / Audio / AudioAreaEntity . cpp <nl> void CAudioAreaEntity : : ProcessEvent ( SEntityEvent & event ) <nl> m_fadeValue = 1 . 0f ; <nl> ActivateFlowNodeOutput ( eOutputPorts_OnNearToInside , TFlowInputData ( true ) ) ; <nl> ActivateFlowNodeOutput ( eOutputPorts_FadeValue , TFlowInputData ( m_fadeValue ) ) ; <nl> - UpdateObstruction ( ) ; <nl> break ; <nl> <nl> case ENTITY_EVENT_MOVEINSIDEAREA : <nl> void CAudioAreaEntity : : ProcessEvent ( SEntityEvent & event ) <nl> case ENTITY_EVENT_LEAVEAREA : <nl> m_areaState = EAreaState : : Near ; <nl> ActivateFlowNodeOutput ( eOutputPorts_OnInsideToNear , TFlowInputData ( true ) ) ; <nl> - UpdateObstruction ( ) ; <nl> break ; <nl> <nl> case ENTITY_EVENT_LEAVENEARAREA : <nl> void CAudioAreaEntity : : OnResetState ( ) <nl> ControlId environmentId = InvalidEnvironmentId ; <nl> gEnv - > pAudioSystem - > GetAudioTriggerId ( m_environmentName , environmentId ) ; <nl> <nl> - / / Reset values <nl> + const auto & stateIds = AudioEntitiesUtils : : GetObstructionOcclusionStateIds ( ) ; <nl> + m_pProxy - > SetSwitchState ( AudioEntitiesUtils : : GetObstructionOcclusionSwitch ( ) , stateIds [ m_obstructionType ] ) ; <nl> + <nl> m_pProxy - > SetFadeDistance ( m_fadeDistance ) ; <nl> m_pProxy - > SetEnvironmentFadeDistance ( m_environmentFadeDistance ) ; <nl> <nl> void CAudioAreaEntity : : OnResetState ( ) <nl> { <nl> SetEnvironmentId ( InvalidEnvironmentId ) ; <nl> } <nl> - <nl> - UpdateObstruction ( ) ; <nl> } <nl> <nl> void CAudioAreaEntity : : SetEnvironmentId ( const ControlId environmentId ) <nl> void CAudioAreaEntity : : SetEnvironmentId ( const ControlId environmentId ) <nl> <nl> } <nl> <nl> - void CAudioAreaEntity : : UpdateObstruction ( ) <nl> - { <nl> - const auto & stateIds = AudioEntitiesUtils : : GetObstructionOcclusionStateIds ( ) ; <nl> - if ( m_areaState = = EAreaState : : Near ) <nl> - { <nl> - / / Enable obstruction <nl> - m_pProxy - > SetSwitchState ( AudioEntitiesUtils : : GetObstructionOcclusionSwitch ( ) , stateIds [ m_obstructionType ] ) ; <nl> - } <nl> - else if ( m_areaState = = EAreaState : : Inside ) <nl> - { <nl> - / / Disable obstruction <nl> - m_pProxy - > SetSwitchState ( AudioEntitiesUtils : : GetObstructionOcclusionSwitch ( ) , stateIds [ eOcclusionType_Ignore ] ) ; <nl> - } <nl> - } <nl> - <nl> void CAudioAreaEntity : : UpdateFadeValue ( const float distance ) <nl> { <nl> if ( ! m_bEnabled ) <nl> mmm a / Code / CryPlugins / CryDefaultEntities / Module / Audio / AudioAreaEntity . h <nl> ppp b / Code / CryPlugins / CryDefaultEntities / Module / Audio / AudioAreaEntity . h <nl> class CAudioAreaEntity final <nl> <nl> private : <nl> void SetEnvironmentId ( const CryAudio : : ControlId environmentId ) ; <nl> - void UpdateObstruction ( ) ; <nl> void UpdateFadeValue ( const float distance ) ; <nl> <nl> bool m_bEnabled = true ; <nl>
|
! XI Copying / / ce / audio to main ( / / ce / main ) ( 1479592 , 1479596 ) ( Approved by thomasw )
|
CRYTEK/CRYENGINE
|
3ec8b9183dbce7368e4bec493638211a51feaf47
|
2016-12-21T14:50:24Z
|
mmm a / Source / Readers / CompositeDataReader / CompositeDataReader . cpp <nl> ppp b / Source / Readers / CompositeDataReader / CompositeDataReader . cpp <nl> <nl> <nl> namespace Microsoft { namespace MSR { namespace CNTK { <nl> <nl> - CompositeDataReader : : CompositeDataReader ( const std : : string & precision ) : m_layout ( make_shared < MBLayout > ( ) ) , <nl> - m_precision ( precision ) , <nl> + CompositeDataReader : : CompositeDataReader ( const ConfigParameters & config , MemoryProviderPtr provider ) : m_layout ( make_shared < MBLayout > ( ) ) , <nl> m_corpus ( std : : make_shared < CorpusDescriptor > ( ) ) , <nl> - m_endOfEpoch ( false ) <nl> + m_provider ( provider ) <nl> { <nl> - } <nl> - <nl> - void CompositeDataReader : : Init ( const ConfigParameters & config ) <nl> - { <nl> - m_provider = std : : make_shared < HeapMemoryProvider > ( ) ; <nl> - <nl> - / / if prefetch - launching asynchronously , <nl> - / / otherwise deferring - synchronous execution during . get ( ) call <nl> - bool prefetch = config ( L " prefetch " , true ) ; <nl> - m_launchType = prefetch ? launch : : async : launch : : deferred ; <nl> - <nl> - / / Layout can be asked before actual reading . <nl> - / / TODO : should be gone when SGD changed . <nl> - m_layout - > Init ( 0 , 0 ) ; <nl> - <nl> / / Identifying packing mode . <nl> bool frameMode = config ( L " frameMode " , true ) ; <nl> bool truncated = config ( L " truncated " , false ) ; <nl> void CompositeDataReader : : Init ( const ConfigParameters & config ) <nl> else if ( truncated ) <nl> { <nl> m_packingMode = PackingMode : : truncated ; <nl> + m_truncationLength = config ( L " truncationLength " , 0 ) ; <nl> + if ( m_truncationLength = = 0 ) <nl> + { <nl> + InvalidArgument ( " Truncation length cannot be 0 . " ) ; <nl> + } <nl> } <nl> else <nl> { <nl> void CompositeDataReader : : Init ( const ConfigParameters & config ) <nl> } <nl> } <nl> <nl> - void CompositeDataReader : : StartMinibatchLoop ( size_t mbSize , size_t epoch , size_t requestedEpochSamples ) <nl> - { <nl> - return StartDistributedMinibatchLoop ( mbSize , epoch , 0 , 1 , requestedEpochSamples ) ; <nl> - } <nl> - <nl> - void CompositeDataReader : : StartDistributedMinibatchLoop ( <nl> - size_t requestedMBSize , <nl> - size_t epoch , <nl> - size_t subsetNum , <nl> - size_t numSubsets , <nl> - size_t requestedEpochSamples / * = requestDataSize * / ) <nl> - { <nl> - EpochConfiguration config ; <nl> - config . m_workerRank = subsetNum ; <nl> - config . m_numberOfWorkers = numSubsets ; <nl> - config . m_minibatchSizeInSamples = requestedMBSize ; <nl> - config . m_totalEpochSizeInSamples = requestedEpochSamples ; <nl> - config . m_epochIndex = epoch ; <nl> - <nl> - / / Make sure there are no outstanding reads . <nl> - if ( m_prefetchTask . valid ( ) ) <nl> - { <nl> - m_prefetchTask . wait ( ) ; <nl> - } <nl> - <nl> - m_endOfEpoch = false ; <nl> - <nl> - / / Nothing is running , let ' s reconfigure the packer according to the new epoch . <nl> - StartEpoch ( config ) ; <nl> - <nl> - / / Ok , start reading in sync or async manner . <nl> - m_prefetchTask = std : : async ( m_launchType , [ this ] ( ) <nl> - { <nl> - return m_packer - > ReadMinibatch ( ) ; <nl> - } ) ; <nl> - } <nl> - <nl> - bool CompositeDataReader : : GetMinibatch ( StreamMinibatchInputs & matrices ) <nl> - { <nl> - if ( m_endOfEpoch ) <nl> - { <nl> - return false ; <nl> - } <nl> - <nl> - / / Check that all matrices have the same device id . <nl> - / / If not we should inject the IMemoryProvider per stream . <nl> - int deviceId = matrices . begin ( ) - > second . matrix - > GetDeviceId ( ) ; <nl> - for ( auto mx : matrices ) <nl> - { <nl> - if ( mx . second . matrix - > GetDeviceId ( ) ! = deviceId ) <nl> - { <nl> - assert ( false ) ; <nl> - } <nl> - } <nl> - <nl> - assert ( m_prefetchTask . valid ( ) ) ; <nl> - <nl> - Minibatch minibatch = m_prefetchTask . get ( ) ; <nl> - if ( minibatch . m_endOfEpoch ) <nl> - { <nl> - m_endOfEpoch = true ; <nl> - if ( minibatch . m_data . empty ( ) ) <nl> - { <nl> - return false ; <nl> - } <nl> - } <nl> - <nl> - if ( ! minibatch . m_data . empty ( ) ) <nl> - { <nl> - / / TODO : Use alternating pinned buffer in the packer , do not copy anything , but pack into the pinned memory . <nl> - / / Copy returned minibatch to the matrices . <nl> - for ( const auto & mx : matrices ) <nl> - { <nl> - assert ( m_nameToStreamId . find ( mx . first ) ! = m_nameToStreamId . end ( ) ) ; <nl> - size_t streamId = m_nameToStreamId [ mx . first ] ; <nl> - <nl> - const auto & stream = minibatch . m_data [ streamId ] ; <nl> - m_layout - > CopyFrom ( stream - > m_layout ) ; <nl> - <nl> - size_t columnNumber = m_layout - > GetNumCols ( ) ; <nl> - size_t rowNumber = m_streams [ streamId ] - > m_sampleLayout - > GetNumElements ( ) ; <nl> - <nl> - if ( m_precision = = " float " ) <nl> - { <nl> - auto * data = reinterpret_cast < const float * > ( stream - > m_data ) ; <nl> - matrices . GetInputMatrix < float > ( mx . first ) . SetValue ( rowNumber , columnNumber , mx . second . matrix - > GetDeviceId ( ) , const_cast < float * > ( data ) , matrixFlagNormal ) ; <nl> - } <nl> - else <nl> - { <nl> - assert ( m_precision = = " double " ) ; <nl> - auto * data = reinterpret_cast < const double * > ( stream - > m_data ) ; <nl> - matrices . GetInputMatrix < double > ( mx . first ) . SetValue ( rowNumber , columnNumber , mx . second . matrix - > GetDeviceId ( ) , const_cast < double * > ( data ) , matrixFlagNormal ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - m_prefetchTask = std : : async ( m_launchType , [ this ] ( ) <nl> - { <nl> - return m_packer - > ReadMinibatch ( ) ; <nl> - } ) ; <nl> - <nl> - return ! minibatch . m_data . empty ( ) ; <nl> - } <nl> - <nl> - bool CompositeDataReader : : DataEnd ( ) <nl> + std : : vector < StreamDescriptionPtr > CompositeDataReader : : GetStreamDescriptions ( ) <nl> { <nl> - / / Note : Return value never used . <nl> - return false ; <nl> + return m_streams ; <nl> } <nl> <nl> - void CompositeDataReader : : CopyMBLayoutTo ( MBLayoutPtr layout ) <nl> + Minibatch CompositeDataReader : : ReadMinibatch ( ) <nl> { <nl> - layout - > CopyFrom ( m_layout ) ; <nl> - } <nl> - <nl> - size_t CompositeDataReader : : GetNumParallelSequences ( ) <nl> - { <nl> - return m_layout - > GetNumParallelSequences ( ) ; <nl> + return m_packer - > ReadMinibatch ( ) ; <nl> } <nl> <nl> void CompositeDataReader : : CreateDeserializers ( const ConfigParameters & readerConfig ) <nl> IDataDeserializerPtr CompositeDataReader : : CreateDeserializer ( const ConfigParamet <nl> return IDataDeserializerPtr ( d ) ; <nl> } <nl> <nl> - void CompositeDataReader : : StartEpoch ( const EpochConfiguration & config ) <nl> + void CompositeDataReader : : StartEpoch ( const EpochConfiguration & cfg ) <nl> { <nl> + EpochConfiguration config = cfg ; <nl> + <nl> if ( config . m_totalEpochSizeInSamples < = 0 ) <nl> { <nl> RuntimeError ( " Unsupported minibatch size ' % d ' . " , ( int ) config . m_totalEpochSizeInSamples ) ; <nl> void CompositeDataReader : : StartEpoch ( const EpochConfiguration & config ) <nl> break ; <nl> case PackingMode : : truncated : <nl> { <nl> + config . m_truncationSize = m_truncationLength ; <nl> m_packer = std : : make_shared < TruncatedBPTTPacker > ( <nl> m_provider , <nl> m_randomizer , <nl> void CompositeDataReader : : StartEpoch ( const EpochConfiguration & config ) <nl> default : <nl> LogicError ( " Unsupported type of packer ' % d ' . " , ( int ) m_packingMode ) ; <nl> } <nl> - } <nl> <nl> - } } } <nl> + m_packer - > StartEpoch ( config ) ; <nl> + } <nl> <nl> + } } } <nl> \ No newline at end of file <nl> mmm a / Source / Readers / CompositeDataReader / CompositeDataReader . h <nl> ppp b / Source / Readers / CompositeDataReader / CompositeDataReader . h <nl> <nl> # include < string > <nl> # include < future > <nl> # include " DataReader . h " <nl> + # include < Reader . h > <nl> <nl> namespace Microsoft { namespace MSR { namespace CNTK { <nl> <nl> struct Minibatch ; <nl> / / TODO : Add transformers as the next step . <nl> / / TODO : Same code as in ReaderLib shim , the one in the ReaderLib will be deleted as the next step . <nl> / / TODO : Change this interface when SGD is changed . <nl> - class CompositeDataReader : public IDataReader , protected Plugin , public ScriptableObjects : : Object <nl> + class CompositeDataReader : public Reader , protected Plugin <nl> { <nl> public : <nl> - CompositeDataReader ( const std : : string & precision ) ; <nl> + CompositeDataReader ( const ConfigParameters & parameters , MemoryProviderPtr provider ) ; <nl> <nl> - / / Currently we do not support BS configuration . <nl> - virtual void Init ( const ScriptableObjects : : IConfigRecord & / * config * / ) override <nl> - { <nl> - assert ( false ) ; <nl> - } <nl> - <nl> - virtual void Init ( const ConfigParameters & config ) override ; <nl> + / / Describes the streams this reader produces . <nl> + std : : vector < StreamDescriptionPtr > GetStreamDescriptions ( ) override ; <nl> <nl> - virtual void Destroy ( ) override <nl> - { <nl> - delete this ; <nl> - } <nl> + / / Starts a new epoch with the provided configuration <nl> + void StartEpoch ( const EpochConfiguration & config ) override ; <nl> <nl> - virtual void StartMinibatchLoop ( size_t mbSize , size_t epoch , size_t requestedEpochSamples = requestDataSize ) override ; <nl> - virtual void StartDistributedMinibatchLoop ( size_t requestedMBSize , size_t epoch , size_t subsetNum , size_t numSubsets , size_t requestedEpochSamples ) override ; <nl> - <nl> - virtual bool SupportsDistributedMBRead ( ) const override <nl> - { <nl> - return true ; <nl> - } <nl> - <nl> - virtual bool GetMinibatch ( StreamMinibatchInputs & matrices ) override ; <nl> - virtual bool DataEnd ( ) override ; <nl> - void CopyMBLayoutTo ( MBLayoutPtr ) override ; <nl> - virtual size_t GetNumParallelSequences ( ) override ; <nl> + / / Reads a minibatch that contains data across all streams . <nl> + Minibatch ReadMinibatch ( ) override ; <nl> <nl> private : <nl> void CreateDeserializers ( const ConfigParameters & readerConfig ) ; <nl> IDataDeserializerPtr CreateDeserializer ( const ConfigParameters & readerConfig , bool primary ) ; <nl> - void StartEpoch ( const EpochConfiguration & config ) ; <nl> + <nl> <nl> enum class PackingMode <nl> { <nl> mmm a / Source / Readers / CompositeDataReader / Exports . cpp <nl> ppp b / Source / Readers / CompositeDataReader / Exports . cpp <nl> <nl> # define DATAREADER_EXPORTS <nl> # include " DataReader . h " <nl> # include " CompositeDataReader . h " <nl> + # include " ReaderShim . h " <nl> + # include " HeapMemoryProvider . h " <nl> <nl> namespace Microsoft { namespace MSR { namespace CNTK { <nl> <nl> + auto factory = [ ] ( const ConfigParameters & parameters ) - > ReaderPtr <nl> + { <nl> + return std : : make_shared < CompositeDataReader > ( parameters , std : : make_shared < HeapMemoryProvider > ( ) ) ; <nl> + } ; <nl> + <nl> extern " C " DATAREADER_API void GetReaderF ( IDataReader * * preader ) <nl> { <nl> - * preader = new CompositeDataReader ( " float " ) ; <nl> + * preader = new ReaderShim < float > ( factory ) ; <nl> } <nl> <nl> extern " C " DATAREADER_API void GetReaderD ( IDataReader * * preader ) <nl> { <nl> - * preader = new CompositeDataReader ( " double " ) ; <nl> + * preader = new ReaderShim < double > ( factory ) ; <nl> } <nl> <nl> } } } <nl> mmm a / Tests / EndToEndTests / Speech / ExperimentalHtkmlfReader / LSTM / FullUtterance / cntk . cntk <nl> ppp b / Tests / EndToEndTests / Speech / ExperimentalHtkmlfReader / LSTM / FullUtterance / cntk . cntk <nl> speechTrain = [ <nl> ] <nl> <nl> # define network using BrainScript <nl> - BrainScriptNetworkBuilder = [ <nl> + BrainScriptNetworkBuilder = [ <nl> <nl> # import some namespaces <nl> # TODO : allow to say import BS . RNNs LSTMP or import BS . RNNs to import all ( literally creates new dict members mirroring those ) <nl> - RecurrentLSTMP = BS . RNNs . RecurrentLSTMP <nl> + RecurrentLSTMP2 = BS . RNNs . RecurrentLSTMP2 <nl> Parameters = BS . Parameters <nl> <nl> useSelfStabilization = true <nl> speechTrain = [ <nl> labelDim = 132 <nl> <nl> / / hidden dimensions <nl> - cellDim = 1024 <nl> - hiddenDim = 256 <nl> + innerCellDim = 1024 <nl> + hiddenDim = 256 <nl> numLSTMLayers = 3 / / number of hidden LSTM model layers <nl> <nl> / / features <nl> speechTrain = [ <nl> / / define the stack of hidden LSTM layers <nl> LSTMoutput [ k : 1 . . numLSTMLayers ] = <nl> if k = = 1 <nl> - then RecurrentLSTMP ( baseFeatDim , hiddenDim , cellDim , featNorm , enableSelfStabilization = useSelfStabilization ) <nl> - else RecurrentLSTMP ( hiddenDim , hiddenDim , cellDim , LSTMoutput [ k - 1 ] , enableSelfStabilization = useSelfStabilization ) <nl> + then RecurrentLSTMP2 ( hiddenDim , cellDim = innerCellDim , featNorm , inputDim = baseFeatDim , enableSelfStabilization = useSelfStabilization ) . h <nl> + else RecurrentLSTMP2 ( hiddenDim , cellDim = innerCellDim , LSTMoutput [ k - 1 ] , inputDim = hiddenDim , enableSelfStabilization = useSelfStabilization ) . h <nl> <nl> / / and add a softmax layer on top <nl> - W ( x ) = Parameters . WeightParam ( labelDim , hiddenDim ) * Parameters . Stabilize ( x , enabled = useSelfStabilization ) <nl> - B = Parameters . BiasParam ( labelDim ) <nl> + W ( x ) = Parameters . WeightParam ( labelDim , hiddenDim ) * Parameters . Stabilize ( x , enabled = useSelfStabilization ) <nl> + B = Parameters . BiasParam ( labelDim ) <nl> <nl> z = W ( LSTMoutput [ numLSTMLayers ] ) + B ; / / top - level input to Softmax <nl> <nl> / / training <nl> - cr = CrossEntropyWithSoftmax ( labels , z , tag = ' criterion ' ) / / this is the objective <nl> - Err = ErrorPrediction ( labels , z , tag = ' eval ' ) / / this also gets tracked <nl> + useExplicitCriterion = true <nl> + crNode = CrossEntropyWithSoftmax ( labels , z ) / / this is the objective , as a node <nl> + crExplicit = - ( ReducePlus ( labels . * LogSoftmax ( z ) ) ) / / manually - defined per - sample objective <nl> + cr = Pass ( if useExplicitCriterion then crExplicit else crNode , tag = ' criterion ' ) <nl> + Err = ErrorPrediction ( labels , z , tag = ' evaluation ' ) / / this also gets tracked <nl> <nl> / / decoding <nl> logPrior = LogPrior ( labels ) <nl> - ScaledLogLikelihood = Minus ( z , logPrior , tag = ' output ' ) / / sadly we can ' t say x - y since we want to assign a tag <nl> + ScaledLogLikelihood = Minus ( z , logPrior , tag = ' output ' ) / / sadly we can ' t say x - y since we want to assign a tag <nl> ] <nl> ] <nl>
|
Apdating to the latest support of different MBLayouts
|
microsoft/CNTK
|
6fbce83eebbdbf69c782632aadc17c408a84ade5
|
2016-05-02T07:55:11Z
|
mmm a / src / test / data / script_valid . json <nl> ppp b / src / test / data / script_valid . json <nl> <nl> [ " 2147483647 " , " 1ADD 1 " ] , <nl> [ " - 2147483647 " , " 1ADD 1 " ] , <nl> <nl> + [ " 1 " , " 0x02 0x0100 EQUAL NOT " , " Not the same byte array . . . " ] , <nl> + [ " 1 " , " 0x02 0x0100 NUMEQUAL " , " . . . but they are numerically equal " ] , <nl> + [ " 11 " , " 0x4c 0x03 0x0b0000 NUMEQUAL " ] , <nl> + [ " 0 " , " 0x01 0x80 EQUAL NOT " ] , <nl> + [ " 0 " , " 0x01 0x80 NUMEQUAL " , " Zero numerically equals negative zero " ] , <nl> + [ " 0 " , " 0x02 0x0080 NUMEQUAL " ] , <nl> + [ " 0x03 0x000080 " , " 0x04 0x00000080 NUMEQUAL " ] , <nl> + [ " 0x03 0x100080 " , " 0x04 0x10000080 NUMEQUAL " ] , <nl> + [ " 0x03 0x100000 " , " 0x04 0x10000000 NUMEQUAL " ] , <nl> <nl> [ " NOP " , " NOP 1 " , " The following tests check the if ( stack . size ( ) < N ) tests in each opcode " ] , <nl> [ " 1 " , " IF 1 ENDIF " , " They are here to catch copy - and - paste errors " ] , <nl>
|
More unit tests for NUMEQUAL
|
bitcoin/bitcoin
|
bdd34642dc7e6d5c2d8c7cf6b53e6041b89bf022
|
2013-08-06T06:06:07Z
|
mmm a / atom / browser / ui / file_dialog_gtk . cc <nl> ppp b / atom / browser / ui / file_dialog_gtk . cc <nl> class FileChooserDialog { <nl> gtk_file_chooser_set_current_folder ( GTK_FILE_CHOOSER ( dialog_ ) , <nl> settings . default_path . value ( ) . c_str ( ) ) ; <nl> } else { <nl> - gtk_file_chooser_set_current_folder ( GTK_FILE_CHOOSER ( dialog_ ) , <nl> - settings . default_path . DirName ( ) . value ( ) . c_str ( ) ) ; <nl> + if ( settings . default_path . IsAbsolute ( ) ) { <nl> + gtk_file_chooser_set_current_folder ( <nl> + GTK_FILE_CHOOSER ( dialog_ ) , <nl> + settings . default_path . DirName ( ) . value ( ) . c_str ( ) ) ; <nl> + } <nl> + <nl> gtk_file_chooser_set_current_name ( GTK_FILE_CHOOSER ( dialog_ ) , <nl> settings . default_path . BaseName ( ) . value ( ) . c_str ( ) ) ; <nl> } <nl> mmm a / atom / browser / ui / file_dialog_mac . mm <nl> ppp b / atom / browser / ui / file_dialog_mac . mm <nl> void SetupDialog ( NSSavePanel * dialog , <nl> if ( base : : DirectoryExists ( settings . default_path ) ) { <nl> default_dir = base : : SysUTF8ToNSString ( settings . default_path . value ( ) ) ; <nl> } else { <nl> - default_dir = <nl> - base : : SysUTF8ToNSString ( settings . default_path . DirName ( ) . value ( ) ) ; <nl> + if ( settings . default_path . IsAbsolute ( ) ) { <nl> + default_dir = <nl> + base : : SysUTF8ToNSString ( settings . default_path . DirName ( ) . value ( ) ) ; <nl> + } <nl> + <nl> default_filename = <nl> base : : SysUTF8ToNSString ( settings . default_path . BaseName ( ) . value ( ) ) ; <nl> } <nl> mmm a / atom / browser / ui / file_dialog_win . cc <nl> ppp b / atom / browser / ui / file_dialog_win . cc <nl> class FileDialog { <nl> } <nl> } <nl> <nl> - SetDefaultFolder ( settings . default_path ) ; <nl> + if ( settings . default_path . IsAbsolute ( ) ) { <nl> + SetDefaultFolder ( settings . default_path ) ; <nl> + } <nl> } <nl> <nl> bool Show ( atom : : NativeWindow * parent_window ) { <nl> mmm a / docs / api / dialog . md <nl> ppp b / docs / api / dialog . md <nl> shown . <nl> * ` browserWindow ` BrowserWindow ( optional ) <nl> * ` options ` Object <nl> * ` title ` String ( optional ) <nl> - * ` defaultPath ` String ( optional ) <nl> + * ` defaultPath ` String ( optional ) - Absolute directory path , absolute file <nl> + path , or file name to use by default . <nl> * ` buttonLabel ` String ( optional ) - Custom label for the confirmation button , when <nl> left empty the default label will be used . <nl> * ` filters ` [ FileFilter [ ] ] ( structures / file - filter . md ) ( optional ) <nl>
|
Use last selected directory when using default file name in dialog . showSaveDialog ( )
|
electron/electron
|
19555bbab2ae6681e61547a5f912260d65f13243
|
2017-05-25T23:18:23Z
|
old mode 100755 <nl> new mode 100644 <nl> old mode 100755 <nl> new mode 100644 <nl> old mode 100755 <nl> new mode 100644 <nl> index 950485eeb . . da40f7025 <nl> mmm a / benchs / bench_gpu_sift1m . py <nl> ppp b / benchs / bench_gpu_sift1m . py <nl> <nl> - # ! / usr / bin / env python2 <nl> - <nl> # Copyright ( c ) Facebook , Inc . and its affiliates . <nl> # <nl> # This source code is licensed under the MIT license found in the <nl> # LICENSE file in the root directory of this source tree . <nl> <nl> - from __future__ import print_function <nl> import os <nl> import time <nl> import numpy as np <nl> old mode 100755 <nl> new mode 100644 <nl> index 6f1fe2900 . . 36971451e <nl> mmm a / benchs / bench_hnsw . py <nl> ppp b / benchs / bench_hnsw . py <nl> <nl> - # ! / usr / bin / env python2 <nl> - <nl> # Copyright ( c ) Facebook , Inc . and its affiliates . <nl> # <nl> # This source code is licensed under the MIT license found in the <nl> # LICENSE file in the root directory of this source tree . <nl> <nl> - from __future__ import print_function <nl> import time <nl> import sys <nl> import numpy as np <nl> old mode 100755 <nl> new mode 100644 <nl> new file mode 100644 <nl> index 000000000 . . 3ad2bbd23 <nl> mmm / dev / null <nl> ppp b / benchs / bench_partition . py <nl> <nl> + <nl> + import time <nl> + import faiss <nl> + import numpy as np <nl> + <nl> + def do_partition ( n , qin , maxval = 65536 , seed = 123 , id_type = ' int64 ' ) : <nl> + print ( <nl> + f " n = { n } qin = { qin } maxval = { maxval } id_type = { id_type } " , <nl> + end = " \ t " , flush = True <nl> + ) <nl> + <nl> + # print ( " seed = " , seed ) <nl> + rs = np . random . RandomState ( seed ) <nl> + vals = rs . randint ( maxval , size = n ) . astype ( ' uint16 ' ) <nl> + ids = ( rs . permutation ( n ) + 12345 ) . astype ( id_type ) <nl> + <nl> + sp = faiss . swig_ptr <nl> + <nl> + tab_a = faiss . AlignedTableUint16 ( ) <nl> + faiss . copy_array_to_AlignedTable ( vals , tab_a ) <nl> + <nl> + nrun = 2000 <nl> + <nl> + times = [ ] <nl> + nerr = 0 <nl> + stats = faiss . cvar . partition_stats <nl> + stats . reset ( ) <nl> + for _run in range ( nrun ) : <nl> + faiss . copy_array_to_AlignedTable ( vals , tab_a ) <nl> + t0 = time . time ( ) <nl> + # print ( " tab a type " , tab_a . get ( ) ) <nl> + if type ( qin ) = = int : <nl> + q = qin <nl> + faiss . CMax_uint16_partition_fuzzy ( <nl> + tab_a . get ( ) , sp ( ids ) , n , q , q , None ) <nl> + else : <nl> + q_min , q_max = qin <nl> + q = np . array ( [ - 1 ] , dtype = ' uint64 ' ) <nl> + thresh2 = faiss . CMax_uint16_partition_fuzzy ( <nl> + tab_a . get ( ) , sp ( ids ) , n , <nl> + q_min , q_max , sp ( q ) <nl> + ) <nl> + q = q [ 0 ] <nl> + <nl> + if not ( q_min < = q < = q_max ) : <nl> + nerr + = 1 <nl> + <nl> + t1 = time . time ( ) <nl> + <nl> + times . append ( t1 - t0 ) <nl> + <nl> + times = np . array ( times [ 100 : ] ) * 1000000 <nl> + <nl> + <nl> + print ( <nl> + f " times { times . mean ( ) : . 3f } µs ( ± { times . std ( ) : . 4f } µs ) nerr = { nerr } " <nl> + f " bissect { stats . bissect_cycles / 1e6 : . 3f } Mcy " <nl> + f " compress { stats . compress_cycles / 1e6 : . 3f } Mcy " <nl> + ) <nl> + <nl> + do_partition ( 200 , ( 100 , 100 ) ) <nl> + do_partition ( 200 , ( 100 , 150 ) ) <nl> + do_partition ( 2000 , ( 1000 , 1000 ) ) <nl> + do_partition ( 2000 , ( 1000 , 1500 ) ) <nl> + do_partition ( 20000 , ( 10000 , 10000 ) ) <nl> + do_partition ( 20000 , ( 10000 , 15000 ) ) <nl> + <nl> + <nl> + do_partition ( 200 , ( 100 , 100 ) , id_type = ' int32 ' ) <nl> + do_partition ( 200 , ( 100 , 150 ) , id_type = ' int32 ' ) <nl> + do_partition ( 2000 , ( 1000 , 1000 ) , id_type = ' int32 ' ) <nl> + do_partition ( 2000 , ( 1000 , 1500 ) , id_type = ' int32 ' ) <nl> + do_partition ( 20000 , ( 10000 , 10000 ) , id_type = ' int32 ' ) <nl> + do_partition ( 20000 , ( 10000 , 15000 ) , id_type = ' int32 ' ) <nl> old mode 100755 <nl> new mode 100644 <nl> index 4c777076e . . 0396b56b5 <nl> mmm a / benchs / bench_polysemous_1bn . py <nl> ppp b / benchs / bench_polysemous_1bn . py <nl> <nl> - # ! / usr / bin / env python2 <nl> - <nl> # Copyright ( c ) Facebook , Inc . and its affiliates . <nl> # <nl> # This source code is licensed under the MIT license found in the <nl> # LICENSE file in the root directory of this source tree . <nl> <nl> - from __future__ import print_function <nl> import os <nl> import sys <nl> import time <nl> old mode 100755 <nl> new mode 100644 <nl> old mode 100755 <nl> new mode 100644 <nl> index 6d012a7b7 . . 4809f67a3 <nl> mmm a / benchs / bench_scalar_quantizer . py <nl> ppp b / benchs / bench_scalar_quantizer . py <nl> <nl> - # ! / usr / bin / env python2 <nl> - <nl> # Copyright ( c ) Facebook , Inc . and its affiliates . <nl> # <nl> # This source code is licensed under the MIT license found in the <nl> # LICENSE file in the root directory of this source tree . <nl> <nl> - from __future__ import print_function <nl> import time <nl> import numpy as np <nl> import faiss <nl> old mode 100755 <nl> new mode 100644 <nl> old mode 100755 <nl> new mode 100644 <nl> mmm a / conda / faiss / meta . yaml <nl> ppp b / conda / faiss / meta . yaml <nl> outputs : <nl> - scipy <nl> - pytorch <nl> commands : <nl> - - python - m unittest discover - s tests - p " test_ * " <nl> - - python - m unittest discover - s tests - p " torch_ * " <nl> + - python - X faulthandler - m unittest discover - v - s tests - p " test_ * " <nl> + - python - X faulthandler - m unittest discover - v - s tests - p " torch_ * " <nl> source_files : <nl> - tests / <nl> mmm a / contrib / README . md <nl> ppp b / contrib / README . md <nl> Numpy ndarrays can continue to be used in the Faiss python interface after impor <nl> <nl> Tested in ` tests / test_contrib_torch . py ` ( CPU ) and ` gpu / test / test_contrib_torch_gpu . py ` ( GPU ) . <nl> <nl> + # # # inspect_tools . py <nl> + <nl> + Functions to inspect C + + objects wrapped by SWIG . Most often this just means reading <nl> + fields and converting them to the proper python array . <nl> + <nl> + <nl> # # # datasets . py <nl> <nl> ( may require h5py ) <nl> mmm a / contrib / exhaustive_search . py <nl> ppp b / contrib / exhaustive_search . py <nl> <nl> <nl> def knn_ground_truth ( xq , db_iterator , k ) : <nl> " " " Computes the exact KNN search results for a dataset that possibly <nl> - does not fit in RAM but for whihch we have an iterator that <nl> + does not fit in RAM but for which we have an iterator that <nl> returns it block by block . <nl> " " " <nl> t0 = time . time ( ) <nl> def knn_ground_truth ( xq , db_iterator , k ) : <nl> <nl> return rh . D , rh . I <nl> <nl> - def knn ( xq , xb , k , distance_type = faiss . METRIC_L2 ) : <nl> - " " " wrapper around the faiss knn functions without index " " " <nl> - nq , d = xq . shape <nl> - nb , d2 = xb . shape <nl> - assert d = = d2 <nl> - <nl> - I = np . empty ( ( nq , k ) , dtype = ' int64 ' ) <nl> - D = np . empty ( ( nq , k ) , dtype = ' float32 ' ) <nl> - <nl> - if distance_type = = faiss . METRIC_L2 : <nl> - heaps = faiss . float_maxheap_array_t ( ) <nl> - heaps . k = k <nl> - heaps . nh = nq <nl> - heaps . val = faiss . swig_ptr ( D ) <nl> - heaps . ids = faiss . swig_ptr ( I ) <nl> - faiss . knn_L2sqr ( <nl> - faiss . swig_ptr ( xq ) , faiss . swig_ptr ( xb ) , <nl> - d , nq , nb , heaps <nl> - ) <nl> - elif distance_type = = faiss . METRIC_INNER_PRODUCT : <nl> - heaps = faiss . float_minheap_array_t ( ) <nl> - heaps . k = k <nl> - heaps . nh = nq <nl> - heaps . val = faiss . swig_ptr ( D ) <nl> - heaps . ids = faiss . swig_ptr ( I ) <nl> - faiss . knn_inner_product ( <nl> - faiss . swig_ptr ( xq ) , faiss . swig_ptr ( xb ) , <nl> - d , nq , nb , heaps <nl> - ) <nl> - return D , I <nl> + # knn function used to be here <nl> + knn = faiss . knn <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 000000000 . . e3a1d164f <nl> mmm / dev / null <nl> ppp b / contrib / inspect_tools . py <nl> <nl> + # Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + # <nl> + # This source code is licensed under the MIT license found in the <nl> + # LICENSE file in the root directory of this source tree . <nl> + <nl> + import numpy as np <nl> + import faiss <nl> + <nl> + def get_invlist ( invlists , l ) : <nl> + " " " returns the inverted lists content as a pair of ( list_ids , list_codes ) . <nl> + The codes are reshaped to a proper size <nl> + " " " <nl> + invlists = faiss . downcast_InvertedLists ( invlists ) <nl> + ls = invlists . list_size ( l ) <nl> + list_ids = np . zeros ( ls , dtype = ' int64 ' ) <nl> + ids = codes = None <nl> + try : <nl> + ids = invlists . get_ids ( l ) <nl> + faiss . memcpy ( faiss . swig_ptr ( list_ids ) , ids , list_ids . nbytes ) <nl> + codes = invlists . get_codes ( l ) <nl> + if invlists . code_size ! = faiss . InvertedLists . INVALID_CODE_SIZE : <nl> + list_codes = np . zeros ( ( ls , invlists . code_size ) , dtype = ' uint8 ' ) <nl> + else : <nl> + # it ' s a BlockInvertedLists <nl> + npb = invlists . n_per_block <nl> + bs = invlists . block_size <nl> + ls_round = ( ls + npb - 1 ) / / npb <nl> + list_codes = np . zeros ( ( ls_round , bs / / npb , npb ) , dtype = ' uint8 ' ) <nl> + faiss . memcpy ( faiss . swig_ptr ( list_codes ) , codes , list_codes . nbytes ) <nl> + finally : <nl> + if ids is not None : <nl> + invlists . release_ids ( l , ids ) <nl> + if codes is not None : <nl> + invlists . release_codes ( l , codes ) <nl> + return list_ids , list_codes <nl> + <nl> + <nl> + def get_invlist_sizes ( invlists ) : <nl> + " " " return the array of sizes of the inverted lists " " " <nl> + return np . array ( [ <nl> + invlists . list_size ( i ) <nl> + for i in range ( invlists . nlist ) <nl> + ] , dtype = ' int64 ' ) <nl> + <nl> + <nl> + def print_object_fields ( obj ) : <nl> + " " " list values all fields of an object known to SWIG " " " <nl> + <nl> + for name in obj . __class__ . __swig_getmethods__ : <nl> + print ( f " { name } = { getattr ( obj , name ) } " ) <nl> + <nl> + <nl> + def get_pq_centroids ( pq ) : <nl> + " " " return the PQ centroids as an array " " " <nl> + cen = faiss . vector_to_array ( pq . centroids ) <nl> + return cen . reshape ( pq . M , pq . ksub , pq . dsub ) <nl> + <nl> + <nl> + def get_LinearTransform_matrix ( pca ) : <nl> + " " " extract matrix + bias from the PCA object <nl> + works for any linear transform ( OPQ , random rotation , etc . ) <nl> + " " " <nl> + b = faiss . vector_to_array ( pca . b ) <nl> + A = faiss . vector_to_array ( pca . A ) . reshape ( pca . d_out , pca . d_in ) <nl> + return A , b <nl> deleted file mode 100644 <nl> index 968e39f0a . . 000000000 <nl> mmm a / contrib / ivf_tools . py <nl> ppp / dev / null <nl> <nl> - # Copyright ( c ) Facebook , Inc . and its affiliates . <nl> - # <nl> - # This source code is licensed under the MIT license found in the <nl> - # LICENSE file in the root directory of this source tree . <nl> - <nl> - import numpy as np <nl> - import faiss <nl> - <nl> - def get_invlist ( invlists , l ) : <nl> - " " " returns the inverted lists content . " " " <nl> - ls = invlists . list_size ( l ) <nl> - list_ids = np . zeros ( ls , dtype = ' int64 ' ) <nl> - ids = codes = None <nl> - try : <nl> - ids = invlists . get_ids ( l ) <nl> - faiss . memcpy ( faiss . swig_ptr ( list_ids ) , ids , list_ids . nbytes ) <nl> - codes = invlists . get_codes ( l ) <nl> - list_codes = np . zeros ( ( ls , invlists . code_size ) , dtype = ' uint8 ' ) <nl> - faiss . memcpy ( faiss . swig_ptr ( list_codes ) , codes , list_codes . nbytes ) <nl> - finally : <nl> - if ids is not None : <nl> - invlists . release_ids ( l , ids ) <nl> - if codes is not None : <nl> - invlists . release_codes ( l , codes ) <nl> - return list_ids , list_codes <nl> mmm a / faiss / CMakeLists . txt <nl> ppp b / faiss / CMakeLists . txt <nl> <nl> add_library ( faiss <nl> AutoTune . cpp <nl> Clustering . cpp <nl> - DirectMap . cpp <nl> IVFlib . cpp <nl> Index . cpp <nl> Index2Layer . cpp <nl> add_library ( faiss <nl> IndexIVFFlat . cpp <nl> IndexIVFPQ . cpp <nl> IndexIVFPQR . cpp <nl> + IndexIVFPQFastScan . cpp <nl> + IndexPQFastScan . cpp <nl> IndexIVFSpectralHash . cpp <nl> IndexLSH . cpp <nl> IndexLattice . cpp <nl> add_library ( faiss <nl> IndexReplicas . cpp <nl> IndexScalarQuantizer . cpp <nl> IndexShards . cpp <nl> - InvertedLists . cpp <nl> MatrixStats . cpp <nl> MetaIndexes . cpp <nl> VectorTransform . cpp <nl> add_library ( faiss <nl> impl / ScalarQuantizer . cpp <nl> impl / index_read . cpp <nl> impl / index_write . cpp <nl> + impl / pq4_fast_scan . cpp <nl> + impl / pq4_fast_scan_search_1 . cpp <nl> + impl / pq4_fast_scan_search_qbs . cpp <nl> impl / io . cpp <nl> impl / lattice_Zn . cpp <nl> + invlists / DirectMap . cpp <nl> + invlists / InvertedLists . cpp <nl> + invlists / BlockInvertedLists . cpp <nl> + invlists / InvertedListsIOHook . cpp <nl> utils / Heap . cpp <nl> utils / WorkerThread . cpp <nl> utils / distances . cpp <nl> add_library ( faiss <nl> utils / extra_distances . cpp <nl> utils / hamming . cpp <nl> utils / partitioning . cpp <nl> + utils / quantize_lut . cpp <nl> utils / random . cpp <nl> utils / utils . cpp <nl> ) <nl> add_library ( faiss <nl> set ( FAISS_HEADERS <nl> AutoTune . h <nl> Clustering . h <nl> - DirectMap . h <nl> IVFlib . h <nl> Index . h <nl> Index2Layer . h <nl> set ( FAISS_HEADERS <nl> IndexIVFFlat . h <nl> IndexIVFPQ . h <nl> IndexIVFPQR . h <nl> + IndexIVFPQFastScan . h <nl> + IndexPQFastScan . h <nl> IndexIVFSpectralHash . h <nl> IndexLSH . h <nl> IndexLattice . h <nl> set ( FAISS_HEADERS <nl> IndexReplicas . h <nl> IndexScalarQuantizer . h <nl> IndexShards . h <nl> - InvertedLists . h <nl> MatrixStats . h <nl> MetaIndexes . h <nl> MetricType . h <nl> set ( FAISS_HEADERS <nl> impl / io . h <nl> impl / io_macros . h <nl> impl / lattice_Zn . h <nl> + impl / pq4_fast_scan . h <nl> + impl / simd_result_handlers . h <nl> impl / platform_macros . h <nl> + invlists / InvertedLists . h <nl> + invlists / BlockInvertedLists . h <nl> + invlists / DirectMap . h <nl> + invlists / InvertedListsIOHook . h <nl> utils / Heap . h <nl> + utils / AlignedTable . h <nl> utils / WorkerThread . h <nl> utils / distances . h <nl> utils / extra_distances . h <nl> set ( FAISS_HEADERS <nl> utils / hamming . h <nl> utils / ordered_key_value . h <nl> utils / partitioning . h <nl> + utils / quantize_lut . h <nl> utils / random . h <nl> utils / simdlib . h <nl> + utils / simdlib_emulated . h <nl> + utils / simdlib_avx2 . h <nl> utils / utils . h <nl> ) <nl> <nl> if ( NOT WIN32 ) <nl> - target_sources ( faiss PRIVATE OnDiskInvertedLists . cpp ) <nl> - list ( APPEND FAISS_HEADERS OnDiskInvertedLists . h ) <nl> + target_sources ( faiss PRIVATE invlists / OnDiskInvertedLists . cpp ) <nl> + list ( APPEND FAISS_HEADERS invlists / OnDiskInvertedLists . h ) <nl> endif ( ) <nl> <nl> if ( FAISS_OPT_LEVEL STREQUAL " avx2 " ) <nl> - target_compile_options ( faiss PRIVATE $ < $ < COMPILE_LANGUAGE : CXX > : - mavx2 - mf16c - mpopcnt > ) <nl> + target_compile_options ( faiss PRIVATE $ < $ < COMPILE_LANGUAGE : CXX > : - mavx2 - mfma - mf16c - mpopcnt > ) <nl> set_target_properties ( faiss PROPERTIES OUTPUT_NAME " faiss_avx2 " ) <nl> elseif ( FAISS_OPT_LEVEL STREQUAL " sse4 " ) <nl> target_compile_options ( faiss PRIVATE $ < $ < COMPILE_LANGUAGE : CXX > : - msse4 - mpopcnt > ) <nl> mmm a / faiss / IndexBinaryHash . cpp <nl> ppp b / faiss / IndexBinaryHash . cpp <nl> <nl> <nl> # include < faiss / impl / AuxIndexStructures . h > <nl> # include < faiss / impl / FaissAssert . h > <nl> - <nl> - # ifdef _MSC_VER <nl> - # include < intrin . h > <nl> - <nl> - static inline int __builtin_ctzll ( uint64_t x ) { <nl> - unsigned long ret ; <nl> - _BitScanForward64 ( & ret , x ) ; <nl> - return ( int ) ret ; <nl> - } <nl> - # endif / / _MSC_VER <nl> + # include < faiss / impl / platform_macros . h > <nl> <nl> namespace faiss { <nl> <nl> mmm a / faiss / IndexFlat . cpp <nl> ppp b / faiss / IndexFlat . cpp <nl> IndexRefineFlat : : IndexRefineFlat ( Index * base_index ) : <nl> " base_index should be empty in the beginning " ) ; <nl> } <nl> <nl> + <nl> + IndexRefineFlat : : IndexRefineFlat ( Index * base_index , const float * xb ) : <nl> + Index ( base_index - > d , base_index - > metric_type ) , <nl> + refine_index ( base_index - > d , base_index - > metric_type ) , <nl> + base_index ( base_index ) , own_fields ( false ) , <nl> + k_factor ( 1 ) <nl> + { <nl> + is_trained = base_index - > is_trained ; <nl> + refine_index . add ( base_index - > ntotal , xb ) ; <nl> + ntotal = base_index - > ntotal ; <nl> + } <nl> + <nl> + <nl> + <nl> IndexRefineFlat : : IndexRefineFlat ( ) { <nl> base_index = nullptr ; <nl> own_fields = false ; <nl> void IndexRefineFlat : : reset ( ) <nl> } <nl> <nl> namespace { <nl> + <nl> typedef faiss : : Index : : idx_t idx_t ; <nl> <nl> template < class C > <nl> static void reorder_2_heaps ( <nl> } <nl> <nl> <nl> - } <nl> + } / / anonymous namespace <nl> <nl> <nl> void IndexRefineFlat : : search ( <nl> mmm a / faiss / IndexFlat . h <nl> ppp b / faiss / IndexFlat . h <nl> struct IndexRefineFlat : Index { <nl> / / / the base_index ( should be > = 1 ) <nl> float k_factor ; <nl> <nl> + / / / intitialize from empty index <nl> explicit IndexRefineFlat ( Index * base_index ) ; <nl> <nl> + / / / initialize from index and corresponding data <nl> + IndexRefineFlat ( Index * base_index , const float * xb ) ; <nl> + <nl> IndexRefineFlat ( ) ; <nl> <nl> void train ( idx_t n , const float * x ) override ; <nl> mmm a / faiss / IndexIVF . cpp <nl> ppp b / faiss / IndexIVF . cpp <nl> void IndexIVF : : replace_invlists ( InvertedLists * il , bool own ) <nl> { <nl> if ( own_invlists ) { <nl> delete invlists ; <nl> + invlists = nullptr ; <nl> } <nl> / / FAISS_THROW_IF_NOT ( ntotal = = 0 ) ; <nl> if ( il ) { <nl> - FAISS_THROW_IF_NOT ( il - > nlist = = nlist & & <nl> - il - > code_size = = code_size ) ; <nl> + FAISS_THROW_IF_NOT ( il - > nlist = = nlist ) ; <nl> + FAISS_THROW_IF_NOT ( <nl> + il - > code_size = = code_size | | <nl> + il - > code_size = = InvertedLists : : INVALID_CODE_SIZE <nl> + ) ; <nl> } <nl> invlists = il ; <nl> own_invlists = own ; <nl> mmm a / faiss / IndexIVF . h <nl> ppp b / faiss / IndexIVF . h <nl> <nl> # include < stdint . h > <nl> <nl> # include < faiss / Index . h > <nl> - # include < faiss / InvertedLists . h > <nl> - # include < faiss / DirectMap . h > <nl> + # include < faiss / invlists / InvertedLists . h > <nl> + # include < faiss / invlists / DirectMap . h > <nl> # include < faiss / Clustering . h > <nl> # include < faiss / impl / platform_macros . h > <nl> # include < faiss / utils / Heap . h > <nl> struct IndexIVF : Index , Level1Quantizer { <nl> * instead of ids ( used for reranking ) . <nl> * @ param params used to override the object ' s search parameters <nl> * / <nl> - virtual void search_preassigned ( idx_t n , const float * x , idx_t k , <nl> - const idx_t * assign , <nl> - const float * centroid_dis , <nl> - float * distances , idx_t * labels , <nl> - bool store_pairs , <nl> - const IVFSearchParameters * params = nullptr <nl> - ) const ; <nl> + virtual void search_preassigned ( <nl> + idx_t n , const float * x , idx_t k , <nl> + const idx_t * assign , const float * centroid_dis , <nl> + float * distances , idx_t * labels , <nl> + bool store_pairs , <nl> + const IVFSearchParameters * params = nullptr <nl> + ) const ; <nl> <nl> / * * assign the vectors , then call search_preassign * / <nl> void search ( idx_t n , const float * x , idx_t k , <nl> struct IndexIVF : Index , Level1Quantizer { <nl> void range_search ( idx_t n , const float * x , float radius , <nl> RangeSearchResult * result ) const override ; <nl> <nl> - void range_search_preassigned ( idx_t nx , const float * x , float radius , <nl> - const idx_t * keys , const float * coarse_dis , <nl> - RangeSearchResult * result , <nl> - bool store_pairs = false , <nl> - const IVFSearchParameters * params = nullptr ) const ; <nl> + void range_search_preassigned ( <nl> + idx_t nx , const float * x , float radius , <nl> + const idx_t * keys , const float * coarse_dis , <nl> + RangeSearchResult * result , <nl> + bool store_pairs = false , <nl> + const IVFSearchParameters * params = nullptr ) const ; <nl> <nl> / / / get a scanner for this index ( store_pairs means ignore labels ) <nl> virtual InvertedListScanner * get_InvertedListScanner ( <nl> mmm a / faiss / IndexIVFPQ . cpp <nl> ppp b / faiss / IndexIVFPQ . cpp <nl> void IndexIVFPQ : : reconstruct_from_offset ( int64_t list_no , int64_t offset , <nl> <nl> <nl> / / / 2G by default , accommodates tables up to PQ32 w / 65536 centroids <nl> - size_t IndexIVFPQ : : precomputed_table_max_bytes = ( ( size_t ) 1 ) < < 31 ; <nl> + size_t precomputed_table_max_bytes = ( ( size_t ) 1 ) < < 31 ; <nl> <nl> / * * Precomputed tables for residuals <nl> * <nl> size_t IndexIVFPQ : : precomputed_table_max_bytes = ( ( size_t ) 1 ) < < 31 ; <nl> * is faster when the length of the lists is > ksub * M . <nl> * / <nl> <nl> - void IndexIVFPQ : : precompute_table ( ) <nl> + void initialize_IVFPQ_precomputed_table ( <nl> + int & use_precomputed_table , <nl> + const Index * quantizer , <nl> + const ProductQuantizer & pq , <nl> + AlignedTable < float > & precomputed_table , <nl> + bool verbose <nl> + ) <nl> { <nl> - if ( use_precomputed_table = = - 1 ) <nl> + size_t nlist = quantizer - > ntotal ; <nl> + size_t d = quantizer - > d ; <nl> + FAISS_THROW_IF_NOT ( d = = pq . d ) ; <nl> + <nl> + if ( use_precomputed_table = = - 1 ) { <nl> + precomputed_table . resize ( 0 ) ; <nl> return ; <nl> + } <nl> <nl> if ( use_precomputed_table = = 0 ) { / / then choose the type of table <nl> if ( quantizer - > metric_type = = METRIC_INNER_PRODUCT ) { <nl> void IndexIVFPQ : : precompute_table ( ) <nl> printf ( " IndexIVFPQ : : precompute_table : precomputed " <nl> " tables not needed for inner product quantizers \ n " ) ; <nl> } <nl> + precomputed_table . resize ( 0 ) ; <nl> return ; <nl> } <nl> const MultiIndexQuantizer * miq = <nl> void IndexIVFPQ : : precompute_table ( ) <nl> <nl> } <nl> <nl> + void IndexIVFPQ : : precompute_table ( ) <nl> + { <nl> + initialize_IVFPQ_precomputed_table ( <nl> + use_precomputed_table , quantizer , pq , precomputed_table , <nl> + verbose <nl> + ) ; <nl> + } <nl> + <nl> + <nl> + <nl> namespace { <nl> <nl> using idx_t = Index : : idx_t ; <nl> struct QueryTables { <nl> } else if ( use_precomputed_table = = 1 ) { <nl> dis0 = coarse_dis ; <nl> <nl> - fvec_madd ( pq . M * pq . ksub , <nl> - & ivfpq . precomputed_table [ key * pq . ksub * pq . M ] , <nl> - - 2 . 0 , sim_table_2 , <nl> - sim_table ) ; <nl> - <nl> + fvec_madd ( <nl> + pq . M * pq . ksub , <nl> + ivfpq . precomputed_table . data ( ) + key * pq . ksub * pq . M , <nl> + - 2 . 0 , sim_table_2 , <nl> + sim_table <nl> + ) ; <nl> <nl> if ( polysemous_ht ! = 0 ) { <nl> ivfpq . quantizer - > compute_residual ( qi , residual_vec , key ) ; <nl> struct QueryTables { <nl> k > > = cpq . nbits ; <nl> <nl> / / get corresponding table <nl> - const float * pc = & ivfpq . precomputed_table <nl> - [ ( ki * pq . M + cm * Mf ) * pq . ksub ] ; <nl> + const float * pc = ivfpq . precomputed_table . data ( ) + <nl> + ( ki * pq . M + cm * Mf ) * pq . ksub ; <nl> <nl> if ( polysemous_ht = = 0 ) { <nl> <nl> struct QueryTables { <nl> if ( use_precomputed_table = = 1 ) { <nl> dis0 = coarse_dis ; <nl> <nl> - const float * s = & ivfpq . precomputed_table [ key * pq . ksub * pq . M ] ; <nl> + const float * s = ivfpq . precomputed_table . data ( ) + <nl> + key * pq . ksub * pq . M ; <nl> for ( int m = 0 ; m < pq . M ; m + + ) { <nl> sim_table_ptrs [ m ] = s ; <nl> s + = pq . ksub ; <nl> struct QueryTables { <nl> int ki = k & ( ( uint64_t ( 1 ) < < cpq . nbits ) - 1 ) ; <nl> k > > = cpq . nbits ; <nl> <nl> - const float * pc = & ivfpq . precomputed_table <nl> - [ ( ki * pq . M + cm * Mf ) * pq . ksub ] ; <nl> + const float * pc = ivfpq . precomputed_table . data ( ) + <nl> + ( ki * pq . M + cm * Mf ) * pq . ksub ; <nl> <nl> for ( int m = m0 ; m < m0 + Mf ; m + + ) { <nl> sim_table_ptrs [ m ] = pc ; <nl> mmm a / faiss / IndexIVFPQ . h <nl> ppp b / faiss / IndexIVFPQ . h <nl> <nl> # include < faiss / IndexIVF . h > <nl> # include < faiss / IndexPQ . h > <nl> # include < faiss / impl / platform_macros . h > <nl> - <nl> + # include < faiss / utils / AlignedTable . h > <nl> <nl> namespace faiss { <nl> <nl> struct IVFPQSearchParameters : IVFSearchParameters { <nl> } ; <nl> <nl> <nl> + <nl> + FAISS_API extern size_t precomputed_table_max_bytes ; <nl> + <nl> + <nl> / * * Inverted file with Product Quantizer encoding . Each residual <nl> * vector is encoded as a product quantizer code . <nl> * / <nl> - struct FAISS_API IndexIVFPQ : IndexIVF { <nl> + struct IndexIVFPQ : IndexIVF { <nl> bool by_residual ; / / / < Encode residual or plain vector ? <nl> <nl> ProductQuantizer pq ; / / / < produces the codes <nl> struct FAISS_API IndexIVFPQ : IndexIVF { <nl> <nl> / * * Precompute table that speed up query preprocessing at some <nl> * memory cost ( used only for by_residual with L2 metric ) <nl> - * = - 1 : force disable <nl> - * = 0 : decide heuristically ( default : use tables only if they are <nl> - * < precomputed_tables_max_bytes ) <nl> - * = 1 : tables that work for all quantizers ( size 256 * nlist * M ) <nl> - * = 2 : specific version for MultiIndexQuantizer ( much more compact ) <nl> * / <nl> int use_precomputed_table ; <nl> - static size_t precomputed_table_max_bytes ; <nl> <nl> / / / if use_precompute_table <nl> / / / size nlist * pq . M * pq . ksub <nl> - std : : vector < float > precomputed_table ; <nl> + AlignedTable < float > precomputed_table ; <nl> <nl> IndexIVFPQ ( <nl> Index * quantizer , size_t d , size_t nlist , <nl> struct FAISS_API IndexIVFPQ : IndexIVF { <nl> <nl> } ; <nl> <nl> + / * * Pre - compute distance tables for IVFPQ with by - residual and METRIC_L2 <nl> + * <nl> + * @ param use_precomputed_table ( I / O ) <nl> + * = - 1 : force disable <nl> + * = 0 : decide heuristically ( default : use tables only if they are <nl> + * < precomputed_tables_max_bytes ) , set use_precomputed_table on output <nl> + * = 1 : tables that work for all quantizers ( size 256 * nlist * M ) <nl> + * = 2 : specific version for MultiIndexQuantizer ( much more compact ) <nl> + * @ param precomputed_table precomputed table to intialize <nl> + * / <nl> + <nl> + void initialize_IVFPQ_precomputed_table ( <nl> + int & use_precomputed_table , <nl> + const Index * quantizer , <nl> + const ProductQuantizer & pq , <nl> + AlignedTable < float > & precomputed_table , <nl> + bool verbose <nl> + ) ; <nl> <nl> / / / statistics are robust to internal threading , but not if <nl> / / / IndexIVFPQ : : search_preassigned is called by multiple threads <nl> new file mode 100644 <nl> index 000000000 . . 414990495 <nl> mmm / dev / null <nl> ppp b / faiss / IndexIVFPQFastScan . cpp <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + # include < faiss / IndexIVFPQFastScan . h > <nl> + <nl> + # include < cassert > <nl> + # include < cstdio > <nl> + # include < inttypes . h > <nl> + <nl> + # include < omp . h > <nl> + <nl> + # include < memory > <nl> + <nl> + # include < faiss / impl / FaissAssert . h > <nl> + # include < faiss / utils / utils . h > <nl> + # include < faiss / utils / distances . h > <nl> + # include < faiss / utils / simdlib . h > <nl> + # include < faiss / impl / AuxIndexStructures . h > <nl> + <nl> + # include < faiss / invlists / BlockInvertedLists . h > <nl> + <nl> + # include < faiss / impl / simd_result_handlers . h > <nl> + # include < faiss / utils / quantize_lut . h > <nl> + # include < faiss / impl / pq4_fast_scan . h > <nl> + <nl> + namespace faiss { <nl> + <nl> + using namespace simd_result_handlers ; <nl> + <nl> + <nl> + inline size_t roundup ( size_t a , size_t b ) { <nl> + return ( a + b - 1 ) / b * b ; <nl> + } <nl> + <nl> + <nl> + IndexIVFPQFastScan : : IndexIVFPQFastScan ( <nl> + Index * quantizer , size_t d , size_t nlist , <nl> + size_t M , size_t nbits_per_idx , <nl> + MetricType metric , int bbs ) : <nl> + IndexIVF ( quantizer , d , nlist , 0 , metric ) , <nl> + pq ( d , M , nbits_per_idx ) , <nl> + bbs ( bbs ) <nl> + { <nl> + FAISS_THROW_IF_NOT ( nbits_per_idx = = 4 ) ; <nl> + M2 = roundup ( pq . M , 2 ) ; <nl> + by_residual = false ; / / set to false by default because it ' s much faster <nl> + is_trained = false ; <nl> + code_size = pq . code_size ; <nl> + <nl> + replace_invlists ( <nl> + new BlockInvertedLists ( nlist , bbs , bbs * M2 / 2 ) , <nl> + true <nl> + ) ; <nl> + } <nl> + <nl> + IndexIVFPQFastScan : : IndexIVFPQFastScan ( ) <nl> + { <nl> + by_residual = false ; <nl> + bbs = 0 ; <nl> + M2 = 0 ; <nl> + } <nl> + <nl> + <nl> + IndexIVFPQFastScan : : IndexIVFPQFastScan ( const IndexIVFPQ & orig , int bbs ) : <nl> + IndexIVF ( <nl> + orig . quantizer , orig . d , orig . nlist , <nl> + orig . pq . code_size , orig . metric_type ) , <nl> + pq ( orig . pq ) , <nl> + bbs ( bbs ) <nl> + { <nl> + FAISS_THROW_IF_NOT ( orig . pq . nbits = = 4 ) ; <nl> + <nl> + by_residual = orig . by_residual ; <nl> + ntotal = orig . ntotal ; <nl> + is_trained = orig . is_trained ; <nl> + nprobe = orig . nprobe ; <nl> + size_t M = pq . M ; <nl> + <nl> + M2 = roundup ( M , 2 ) ; <nl> + <nl> + replace_invlists ( <nl> + new BlockInvertedLists ( orig . nlist , bbs , bbs * M2 / 2 ) , <nl> + true <nl> + ) ; <nl> + <nl> + precomputed_table . resize ( orig . precomputed_table . size ( ) ) ; <nl> + <nl> + if ( precomputed_table . nbytes ( ) > 0 ) { <nl> + memcpy ( precomputed_table . get ( ) , orig . precomputed_table . data ( ) , <nl> + precomputed_table . nbytes ( ) <nl> + ) ; <nl> + } <nl> + <nl> + for ( size_t i = 0 ; i < nlist ; i + + ) { <nl> + size_t nb = orig . invlists - > list_size ( i ) ; <nl> + size_t nb2 = roundup ( nb , bbs ) ; <nl> + AlignedTable < uint8_t > tmp ( nb2 * M2 / 2 ) ; <nl> + pq4_pack_codes ( <nl> + InvertedLists : : ScopedCodes ( orig . invlists , i ) . get ( ) , <nl> + nb , M , nb2 , bbs , M2 , <nl> + tmp . get ( ) <nl> + ) ; <nl> + invlists - > add_entries ( <nl> + i , nb , <nl> + InvertedLists : : ScopedIds ( orig . invlists , i ) . get ( ) , <nl> + tmp . get ( ) <nl> + ) ; <nl> + } <nl> + <nl> + orig_invlists = orig . invlists ; <nl> + } <nl> + <nl> + <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Training <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + void IndexIVFPQFastScan : : train_residual ( idx_t n , const float * x_in ) <nl> + { <nl> + <nl> + const float * x = fvecs_maybe_subsample ( <nl> + d , ( size_t * ) & n , pq . cp . max_points_per_centroid * pq . ksub , <nl> + x_in , verbose , pq . cp . seed ) ; <nl> + <nl> + std : : unique_ptr < float [ ] > del_x ; <nl> + if ( x ! = x_in ) { <nl> + del_x . reset ( ( float * ) x ) ; <nl> + } <nl> + <nl> + const float * trainset ; <nl> + AlignedTable < float > residuals ; <nl> + <nl> + if ( by_residual ) { <nl> + if ( verbose ) printf ( " computing residuals \ n " ) ; <nl> + std : : vector < idx_t > assign ( n ) ; <nl> + quantizer - > assign ( n , x , assign . data ( ) ) ; <nl> + residuals . resize ( n * d ) ; <nl> + for ( idx_t i = 0 ; i < n ; i + + ) { <nl> + quantizer - > compute_residual ( <nl> + x + i * d , <nl> + residuals . data ( ) + i * d , <nl> + assign [ i ] <nl> + ) ; <nl> + } <nl> + trainset = residuals . data ( ) ; <nl> + } else { <nl> + trainset = x ; <nl> + } <nl> + <nl> + if ( verbose ) { <nl> + printf ( " training % zdx % zd product quantizer on % zd vectors in % dD \ n " , <nl> + pq . M , pq . ksub , n , d ) ; <nl> + } <nl> + pq . verbose = verbose ; <nl> + pq . train ( n , trainset ) ; <nl> + <nl> + if ( by_residual & & metric_type = = METRIC_L2 ) { <nl> + precompute_table ( ) ; <nl> + } <nl> + <nl> + } <nl> + <nl> + void IndexIVFPQFastScan : : precompute_table ( ) <nl> + { <nl> + initialize_IVFPQ_precomputed_table ( <nl> + use_precomputed_table , <nl> + quantizer , pq , precomputed_table , verbose <nl> + ) ; <nl> + } <nl> + <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Code management functions <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + <nl> + <nl> + void IndexIVFPQFastScan : : encode_vectors ( <nl> + idx_t n , const float * x , const idx_t * list_nos , <nl> + uint8_t * codes , bool include_listnos ) const <nl> + { <nl> + <nl> + if ( by_residual ) { <nl> + AlignedTable < float > residuals ( n * d ) ; <nl> + for ( size_t i = 0 ; i < n ; i + + ) { <nl> + if ( list_nos [ i ] < 0 ) { <nl> + memset ( residuals . data ( ) + i * d , 0 , sizeof ( residuals [ 0 ] ) * d ) ; <nl> + } else { <nl> + quantizer - > compute_residual ( <nl> + x + i * d , residuals . data ( ) + i * d , list_nos [ i ] ) ; <nl> + } <nl> + } <nl> + pq . compute_codes ( residuals . data ( ) , codes , n ) ; <nl> + } else { <nl> + pq . compute_codes ( x , codes , n ) ; <nl> + } <nl> + <nl> + if ( include_listnos ) { <nl> + size_t coarse_size = coarse_code_size ( ) ; <nl> + for ( idx_t i = n - 1 ; i > = 0 ; i - - ) { <nl> + uint8_t * code = codes + i * ( coarse_size + code_size ) ; <nl> + memmove ( code + coarse_size , <nl> + codes + i * code_size , code_size ) ; <nl> + encode_listno ( list_nos [ i ] , code ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + <nl> + void IndexIVFPQFastScan : : add_with_ids ( <nl> + idx_t n , const float * x , const idx_t * xids ) { <nl> + <nl> + / / copied from IndexIVF : : add_with_ids mmm > <nl> + <nl> + / / do some blocking to avoid excessive allocs <nl> + idx_t bs = 65536 ; <nl> + if ( n > bs ) { <nl> + for ( idx_t i0 = 0 ; i0 < n ; i0 + = bs ) { <nl> + idx_t i1 = std : : min ( n , i0 + bs ) ; <nl> + if ( verbose ) { <nl> + printf ( <nl> + " IndexIVFPQFastScan : : add_with_ids % " <nl> + PRId64 " : % " PRId64 " \ n " , i0 , i1 <nl> + ) ; <nl> + } <nl> + add_with_ids ( i1 - i0 , x + i0 * d , <nl> + xids ? xids + i0 : nullptr ) ; <nl> + } <nl> + return ; <nl> + } <nl> + InterruptCallback : : check ( ) ; <nl> + <nl> + AlignedTable < uint8_t > codes ( n * code_size ) ; <nl> + <nl> + FAISS_THROW_IF_NOT ( is_trained ) ; <nl> + direct_map . check_can_add ( xids ) ; <nl> + <nl> + std : : unique_ptr < idx_t [ ] > idx ( new idx_t [ n ] ) ; <nl> + quantizer - > assign ( n , x , idx . get ( ) ) ; <nl> + size_t nadd = 0 , nminus1 = 0 ; <nl> + <nl> + for ( size_t i = 0 ; i < n ; i + + ) { <nl> + if ( idx [ i ] < 0 ) nminus1 + + ; <nl> + } <nl> + <nl> + AlignedTable < uint8_t > flat_codes ( n * code_size ) ; <nl> + encode_vectors ( n , x , idx . get ( ) , flat_codes . get ( ) ) ; <nl> + <nl> + DirectMapAdd dm_adder ( direct_map , n , xids ) ; <nl> + <nl> + / / < mmm <nl> + <nl> + BlockInvertedLists * bil = dynamic_cast < BlockInvertedLists * > ( invlists ) ; <nl> + FAISS_THROW_IF_NOT_MSG ( bil , " only block inverted lists supported " ) ; <nl> + <nl> + / / prepare batches <nl> + std : : vector < idx_t > order ( n ) ; <nl> + for ( idx_t i = 0 ; i < n ; i + + ) { order [ i ] = i ; } <nl> + <nl> + / / TODO should not need stable <nl> + std : : stable_sort ( order . begin ( ) , order . end ( ) , <nl> + [ & idx ] ( idx_t a , idx_t b ) { <nl> + return idx [ a ] < idx [ b ] ; <nl> + } <nl> + ) ; <nl> + <nl> + / / TODO parallelize <nl> + idx_t i0 = 0 ; <nl> + while ( i0 < n ) { <nl> + idx_t list_no = idx [ order [ i0 ] ] ; <nl> + idx_t i1 = i0 + 1 ; <nl> + while ( i1 < n & & idx [ order [ i1 ] ] = = list_no ) { <nl> + i1 + + ; <nl> + } <nl> + <nl> + if ( list_no = = - 1 ) { <nl> + i0 = i1 ; <nl> + continue ; <nl> + } <nl> + <nl> + / / make linear array <nl> + AlignedTable < uint8_t > list_codes ( ( i1 - i0 ) * code_size ) ; <nl> + size_t list_size = bil - > list_size ( list_no ) ; <nl> + <nl> + bil - > resize ( list_no , list_size + i1 - i0 ) ; <nl> + <nl> + for ( idx_t i = i0 ; i < i1 ; i + + ) { <nl> + size_t ofs = list_size + i - i0 ; <nl> + idx_t id = xids ? xids [ order [ i ] ] : ntotal + order [ i ] ; <nl> + dm_adder . add ( order [ i ] , list_no , ofs ) ; <nl> + bil - > ids [ list_no ] [ ofs ] = id ; <nl> + memcpy ( <nl> + list_codes . data ( ) + ( i - i0 ) * code_size , <nl> + flat_codes . data ( ) + order [ i ] * code_size , <nl> + code_size <nl> + ) ; <nl> + nadd + + ; <nl> + } <nl> + pq4_pack_codes_range ( <nl> + list_codes . data ( ) , pq . M , <nl> + list_size , list_size + i1 - i0 , <nl> + bbs , M2 , bil - > codes [ list_no ] . data ( ) <nl> + ) ; <nl> + <nl> + i0 = i1 ; <nl> + } <nl> + <nl> + ntotal + = n ; <nl> + <nl> + } <nl> + <nl> + <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * search <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + <nl> + namespace { <nl> + <nl> + / / from impl / ProductQuantizer . cpp <nl> + template < class C , typename dis_t > <nl> + void pq_estimators_from_tables_generic ( <nl> + const ProductQuantizer & pq , size_t nbits , <nl> + const uint8_t * codes , size_t ncodes , <nl> + const dis_t * dis_table , const int64_t * ids , <nl> + float dis0 , <nl> + size_t k , typename C : : T * heap_dis , int64_t * heap_ids ) <nl> + { <nl> + using accu_t = typename C : : T ; <nl> + const size_t M = pq . M ; <nl> + const size_t ksub = pq . ksub ; <nl> + for ( size_t j = 0 ; j < ncodes ; + + j ) { <nl> + PQDecoderGeneric decoder ( <nl> + codes + j * pq . code_size , nbits <nl> + ) ; <nl> + accu_t dis = dis0 ; <nl> + const dis_t * dt = dis_table ; <nl> + for ( size_t m = 0 ; m < M ; m + + ) { <nl> + uint64_t c = decoder . decode ( ) ; <nl> + dis + = dt [ c ] ; <nl> + dt + = ksub ; <nl> + } <nl> + <nl> + if ( C : : cmp ( heap_dis [ 0 ] , dis ) ) { <nl> + heap_pop < C > ( k , heap_dis , heap_ids ) ; <nl> + heap_push < C > ( k , heap_dis , heap_ids , dis , ids [ j ] ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + using idx_t = Index : : idx_t ; <nl> + using namespace quantize_lut ; <nl> + <nl> + void fvec_madd_avx ( <nl> + size_t n , const float * a , <nl> + float bf , const float * b , float * c ) <nl> + { <nl> + assert ( is_aligned_pointer ( a ) ) ; <nl> + assert ( is_aligned_pointer ( b ) ) ; <nl> + assert ( is_aligned_pointer ( c ) ) ; <nl> + assert ( n % 8 = = 0 ) ; <nl> + simd8float32 bf8 ( bf ) ; <nl> + n / = 8 ; <nl> + for ( size_t i = 0 ; i < n ; i + + ) { <nl> + simd8float32 ai ( a ) ; <nl> + simd8float32 bi ( b ) ; <nl> + <nl> + simd8float32 ci = fmadd ( bf8 , bi , ai ) ; <nl> + ci . store ( c ) ; <nl> + c + = 8 ; <nl> + a + = 8 ; <nl> + b + = 8 ; <nl> + } <nl> + <nl> + } <nl> + <nl> + } / / anonymous namespace <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Look - Up Table functions <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + <nl> + void IndexIVFPQFastScan : : compute_LUT ( <nl> + size_t n , const float * x , <nl> + const idx_t * coarse_ids , const float * coarse_dis , <nl> + AlignedTable < float > & dis_tables , <nl> + AlignedTable < float > & biases <nl> + ) const <nl> + { <nl> + const IndexIVFPQFastScan & ivfpq = * this ; <nl> + size_t dim12 = pq . ksub * pq . M ; <nl> + size_t d = pq . d ; <nl> + size_t nprobe = ivfpq . nprobe ; <nl> + <nl> + if ( ivfpq . by_residual ) { <nl> + <nl> + if ( ivfpq . metric_type = = METRIC_L2 ) { <nl> + <nl> + dis_tables . resize ( n * nprobe * dim12 ) ; <nl> + <nl> + if ( ivfpq . use_precomputed_table = = 1 ) { <nl> + biases . resize ( n * nprobe ) ; <nl> + memcpy ( biases . get ( ) , coarse_dis , sizeof ( float ) * n * nprobe ) ; <nl> + <nl> + AlignedTable < float > ip_table ( n * dim12 ) ; <nl> + pq . compute_inner_prod_tables ( n , x , ip_table . get ( ) ) ; <nl> + <nl> + # pragma omp parallel if ( n * nprobe > 8000 ) <nl> + for ( idx_t i = 0 ; i < n ; i + + ) { <nl> + for ( idx_t j = 0 ; j < nprobe ; j + + ) { <nl> + size_t ij = i * nprobe + j ; <nl> + <nl> + fvec_madd_avx ( <nl> + dim12 , <nl> + precomputed_table . get ( ) + coarse_ids [ ij ] * dim12 , <nl> + - 2 , ip_table . get ( ) + i * dim12 , <nl> + dis_tables . get ( ) + ij * dim12 <nl> + ) ; <nl> + } <nl> + } <nl> + <nl> + } else { <nl> + <nl> + std : : unique_ptr < float [ ] > xrel ( new float [ n * nprobe * d ] ) ; <nl> + biases . resize ( n * nprobe ) ; <nl> + memset ( biases . get ( ) , 0 , sizeof ( float ) * n * nprobe ) ; <nl> + <nl> + # pragma omp parallel if ( n > 8000 ) <nl> + for ( idx_t i = 0 ; i < n ; i + + ) { <nl> + for ( idx_t j = 0 ; j < nprobe ; j + + ) { <nl> + ivfpq . quantizer - > compute_residual ( <nl> + x + i * d , & xrel [ ( i * nprobe + j ) * d ] , <nl> + coarse_ids [ i * nprobe + j ] ) ; <nl> + } <nl> + } <nl> + <nl> + pq . compute_distance_tables ( <nl> + n * nprobe , xrel . get ( ) , dis_tables . get ( ) ) ; <nl> + <nl> + } <nl> + <nl> + } else if ( ivfpq . metric_type = = METRIC_INNER_PRODUCT ) { <nl> + dis_tables . resize ( n * dim12 ) ; <nl> + pq . compute_inner_prod_tables ( n , x , dis_tables . get ( ) ) ; <nl> + / / compute_inner_prod_tables ( pq , n , x , dis_tables . get ( ) ) ; <nl> + <nl> + biases . resize ( n * nprobe ) ; <nl> + memcpy ( biases . get ( ) , coarse_dis , sizeof ( float ) * n * nprobe ) ; <nl> + } else { <nl> + FAISS_THROW_FMT ( " metric % d not supported " , ivfpq . metric_type ) ; <nl> + } <nl> + <nl> + } else { <nl> + dis_tables . resize ( n * dim12 ) ; <nl> + if ( ivfpq . metric_type = = METRIC_L2 ) { <nl> + pq . compute_distance_tables ( n , x , dis_tables . get ( ) ) ; <nl> + } else if ( ivfpq . metric_type = = METRIC_INNER_PRODUCT ) { <nl> + pq . compute_inner_prod_tables ( n , x , dis_tables . get ( ) ) ; <nl> + } else { <nl> + FAISS_THROW_FMT ( " metric % d not supported " , ivfpq . metric_type ) ; <nl> + } <nl> + } <nl> + <nl> + } <nl> + <nl> + void IndexIVFPQFastScan : : compute_LUT_uint8 ( <nl> + size_t n , const float * x , <nl> + const idx_t * coarse_ids , const float * coarse_dis , <nl> + AlignedTable < uint8_t > & dis_tables , <nl> + AlignedTable < uint16_t > & biases , <nl> + float * normalizers <nl> + ) const { <nl> + const IndexIVFPQFastScan & ivfpq = * this ; <nl> + AlignedTable < float > dis_tables_float ; <nl> + AlignedTable < float > biases_float ; <nl> + <nl> + uint64_t t0 = get_cy ( ) ; <nl> + compute_LUT ( <nl> + n , x , <nl> + coarse_ids , coarse_dis , <nl> + dis_tables_float , biases_float <nl> + ) ; <nl> + IVFFastScan_stats . t_compute_distance_tables + = get_cy ( ) - t0 ; <nl> + <nl> + bool lut_is_3d = ivfpq . by_residual & & ivfpq . metric_type = = METRIC_L2 ; <nl> + size_t dim123 = pq . ksub * pq . M ; <nl> + size_t dim123_2 = pq . ksub * M2 ; <nl> + if ( lut_is_3d ) { <nl> + dim123 * = nprobe ; <nl> + dim123_2 * = nprobe ; <nl> + } <nl> + dis_tables . resize ( n * dim123_2 ) ; <nl> + if ( biases_float . get ( ) ) { <nl> + biases . resize ( n * nprobe ) ; <nl> + } <nl> + uint64_t t1 = get_cy ( ) ; <nl> + <nl> + # pragma omp parallel if ( n > 8000 ) <nl> + for ( size_t i = 0 ; i < n ; i + + ) { <nl> + const float * t_in = dis_tables_float . get ( ) + i * dim123 ; <nl> + const float * b_in = nullptr ; <nl> + uint8_t * t_out = dis_tables . get ( ) + i * dim123_2 ; <nl> + uint16_t * b_out = nullptr ; <nl> + if ( biases_float . get ( ) ) { <nl> + b_in = biases_float . get ( ) + i * nprobe ; <nl> + b_out = biases . get ( ) + i * nprobe ; <nl> + } <nl> + <nl> + quantize_LUT_and_bias ( <nl> + nprobe , pq . M , pq . ksub , lut_is_3d , <nl> + t_in , b_in , <nl> + t_out , M2 , b_out , <nl> + normalizers + 2 * i , normalizers + 2 * i + 1 <nl> + ) ; <nl> + } <nl> + IVFFastScan_stats . t_round + = get_cy ( ) - t1 ; <nl> + <nl> + } <nl> + <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Search functions <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + template < bool is_max > <nl> + void IndexIVFPQFastScan : : search_dispatch_implem ( <nl> + idx_t n , <nl> + const float * x , <nl> + idx_t k , <nl> + float * distances , <nl> + idx_t * labels ) const <nl> + { <nl> + using Cfloat = typename std : : conditional < is_max , <nl> + CMax < float , int64_t > , CMin < float , int64_t > > : : type ; <nl> + <nl> + using C = typename std : : conditional < is_max , <nl> + CMax < uint16_t , int64_t > , CMin < uint16_t , int64_t > > : : type ; <nl> + <nl> + if ( n = = 0 ) { <nl> + return ; <nl> + } <nl> + <nl> + / / actual implementation used <nl> + int impl = implem ; <nl> + <nl> + if ( impl = = 0 ) { <nl> + if ( bbs = = 32 ) { <nl> + impl = 12 ; <nl> + } else { <nl> + impl = 10 ; <nl> + } <nl> + if ( k > 20 ) { <nl> + impl + + ; <nl> + } <nl> + } <nl> + <nl> + if ( impl = = 1 ) { <nl> + search_implem_1 < Cfloat > ( n , x , k , distances , labels ) ; <nl> + } else if ( impl = = 2 ) { <nl> + search_implem_2 < C > ( n , x , k , distances , labels ) ; <nl> + <nl> + } else if ( impl > = 10 & & impl < = 13 ) { <nl> + size_t ndis = 0 , nlist_visited = 0 ; <nl> + int nt = std : : min ( omp_get_max_threads ( ) , int ( n ) ) ; <nl> + if ( nt < 2 ) { <nl> + if ( impl = = 12 | | impl = = 13 ) { <nl> + search_implem_12 < C > <nl> + ( n , x , k , distances , labels , impl , & ndis , & nlist_visited ) ; <nl> + } else { <nl> + search_implem_10 < C > <nl> + ( n , x , k , distances , labels , impl , & ndis , & nlist_visited ) ; <nl> + } <nl> + } else { <nl> + / / explicitly slice over threads <nl> + # pragma omp parallel for num_threads ( nt ) reduction ( + : ndis , nlist_visited ) <nl> + for ( int slice = 0 ; slice < nt ; slice + + ) { <nl> + idx_t i0 = n * slice / nt ; <nl> + idx_t i1 = n * ( slice + 1 ) / nt ; <nl> + float * dis_i = distances + i0 * k ; <nl> + idx_t * lab_i = labels + i0 * k ; <nl> + if ( impl = = 12 | | impl = = 13 ) { <nl> + search_implem_12 < C > ( <nl> + i1 - i0 , x + i0 * d , k , dis_i , lab_i , <nl> + impl , & ndis , & nlist_visited <nl> + ) ; <nl> + } else { <nl> + search_implem_10 < C > ( <nl> + i1 - i0 , x + i0 * d , k , dis_i , lab_i , <nl> + impl , & ndis , & nlist_visited <nl> + ) ; <nl> + } <nl> + } <nl> + } <nl> + indexIVF_stats . nq + = n ; <nl> + indexIVF_stats . ndis + = ndis ; <nl> + indexIVF_stats . nlist + = nlist_visited ; <nl> + } else { <nl> + FAISS_THROW_FMT ( " implem % d does not exist " , implem ) ; <nl> + } <nl> + <nl> + } <nl> + <nl> + <nl> + void IndexIVFPQFastScan : : search ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels ) const <nl> + { <nl> + if ( metric_type = = METRIC_L2 ) { <nl> + search_dispatch_implem < true > ( n , x , k , distances , labels ) ; <nl> + } else { <nl> + search_dispatch_implem < false > ( n , x , k , distances , labels ) ; <nl> + } <nl> + } <nl> + <nl> + template < class C > <nl> + void IndexIVFPQFastScan : : search_implem_1 ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels ) const <nl> + { <nl> + FAISS_THROW_IF_NOT ( orig_invlists ) ; <nl> + <nl> + std : : unique_ptr < idx_t [ ] > coarse_ids ( new idx_t [ n * nprobe ] ) ; <nl> + std : : unique_ptr < float [ ] > coarse_dis ( new float [ n * nprobe ] ) ; <nl> + <nl> + quantizer - > search ( n , x , nprobe , coarse_dis . get ( ) , coarse_ids . get ( ) ) ; <nl> + <nl> + size_t dim12 = pq . ksub * pq . M ; <nl> + AlignedTable < float > dis_tables ; <nl> + AlignedTable < float > biases ; <nl> + <nl> + compute_LUT ( <nl> + n , x , <nl> + coarse_ids . get ( ) , coarse_dis . get ( ) , <nl> + dis_tables , biases <nl> + ) ; <nl> + <nl> + bool single_LUT = ! ( by_residual & & metric_type = = METRIC_L2 ) ; <nl> + <nl> + size_t ndis = 0 , nlist_visited = 0 ; <nl> + <nl> + # pragma omp parallel for reduction ( + : ndis , nlist_visited ) <nl> + for ( idx_t i = 0 ; i < n ; i + + ) { <nl> + int64_t * heap_ids = labels + i * k ; <nl> + float * heap_dis = distances + i * k ; <nl> + heap_heapify < C > ( k , heap_dis , heap_ids ) ; <nl> + float * LUT = nullptr ; <nl> + <nl> + if ( single_LUT ) { <nl> + LUT = dis_tables . get ( ) + i * dim12 ; <nl> + } <nl> + for ( idx_t j = 0 ; j < nprobe ; j + + ) { <nl> + if ( ! single_LUT ) { <nl> + LUT = dis_tables . get ( ) + ( i * nprobe + j ) * dim12 ; <nl> + } <nl> + idx_t list_no = coarse_ids [ i * nprobe + j ] ; <nl> + if ( list_no < 0 ) continue ; <nl> + size_t ls = orig_invlists - > list_size ( list_no ) ; <nl> + if ( ls = = 0 ) continue ; <nl> + InvertedLists : : ScopedCodes codes ( orig_invlists , list_no ) ; <nl> + InvertedLists : : ScopedIds ids ( orig_invlists , list_no ) ; <nl> + <nl> + float bias = biases . get ( ) ? biases [ i * nprobe + j ] : 0 ; <nl> + <nl> + pq_estimators_from_tables_generic < C > ( <nl> + pq , pq . nbits , codes . get ( ) , ls , <nl> + LUT , ids . get ( ) , bias , <nl> + k , heap_dis , heap_ids <nl> + ) ; <nl> + nlist_visited + + ; <nl> + ndis + + ; <nl> + } <nl> + heap_reorder < C > ( k , heap_dis , heap_ids ) ; <nl> + } <nl> + indexIVF_stats . nq + = n ; <nl> + indexIVF_stats . ndis + = ndis ; <nl> + indexIVF_stats . nlist + = nlist_visited ; <nl> + } <nl> + <nl> + template < class C > <nl> + void IndexIVFPQFastScan : : search_implem_2 ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels ) const <nl> + { <nl> + FAISS_THROW_IF_NOT ( orig_invlists ) ; <nl> + <nl> + std : : unique_ptr < idx_t [ ] > coarse_ids ( new idx_t [ n * nprobe ] ) ; <nl> + std : : unique_ptr < float [ ] > coarse_dis ( new float [ n * nprobe ] ) ; <nl> + <nl> + quantizer - > search ( n , x , nprobe , coarse_dis . get ( ) , coarse_ids . get ( ) ) ; <nl> + <nl> + size_t dim12 = pq . ksub * M2 ; <nl> + AlignedTable < uint8_t > dis_tables ; <nl> + AlignedTable < uint16_t > biases ; <nl> + std : : unique_ptr < float [ ] > normalizers ( new float [ 2 * n ] ) ; <nl> + <nl> + compute_LUT_uint8 ( <nl> + n , x , <nl> + coarse_ids . get ( ) , coarse_dis . get ( ) , <nl> + dis_tables , biases , <nl> + normalizers . get ( ) <nl> + ) ; <nl> + <nl> + <nl> + bool single_LUT = ! ( by_residual & & metric_type = = METRIC_L2 ) ; <nl> + <nl> + size_t ndis = 0 , nlist_visited = 0 ; <nl> + <nl> + # pragma omp parallel for reduction ( + : ndis , nlist_visited ) <nl> + for ( idx_t i = 0 ; i < n ; i + + ) { <nl> + std : : vector < uint16_t > tmp_dis ( k ) ; <nl> + int64_t * heap_ids = labels + i * k ; <nl> + uint16_t * heap_dis = tmp_dis . data ( ) ; <nl> + heap_heapify < C > ( k , heap_dis , heap_ids ) ; <nl> + const uint8_t * LUT = nullptr ; <nl> + <nl> + if ( single_LUT ) { <nl> + LUT = dis_tables . get ( ) + i * dim12 ; <nl> + } <nl> + for ( idx_t j = 0 ; j < nprobe ; j + + ) { <nl> + if ( ! single_LUT ) { <nl> + LUT = dis_tables . get ( ) + ( i * nprobe + j ) * dim12 ; <nl> + } <nl> + idx_t list_no = coarse_ids [ i * nprobe + j ] ; <nl> + if ( list_no < 0 ) continue ; <nl> + size_t ls = orig_invlists - > list_size ( list_no ) ; <nl> + if ( ls = = 0 ) continue ; <nl> + InvertedLists : : ScopedCodes codes ( orig_invlists , list_no ) ; <nl> + InvertedLists : : ScopedIds ids ( orig_invlists , list_no ) ; <nl> + <nl> + uint16_t bias = biases . get ( ) ? biases [ i * nprobe + j ] : 0 ; <nl> + <nl> + pq_estimators_from_tables_generic < C > ( <nl> + pq , pq . nbits , codes . get ( ) , ls , <nl> + LUT , ids . get ( ) , bias , <nl> + k , heap_dis , heap_ids <nl> + ) ; <nl> + <nl> + nlist_visited + + ; <nl> + ndis + = ls ; <nl> + } <nl> + heap_reorder < C > ( k , heap_dis , heap_ids ) ; <nl> + / / convert distances to float <nl> + { <nl> + float one_a = 1 / normalizers [ 2 * i ] , b = normalizers [ 2 * i + 1 ] ; <nl> + if ( skip & 16 ) { <nl> + one_a = 1 ; <nl> + b = 0 ; <nl> + } <nl> + float * heap_dis_float = distances + i * k ; <nl> + for ( int j = 0 ; j < k ; j + + ) { <nl> + heap_dis_float [ j ] = b + heap_dis [ j ] * one_a ; <nl> + } <nl> + } <nl> + } <nl> + indexIVF_stats . nq + = n ; <nl> + indexIVF_stats . ndis + = ndis ; <nl> + indexIVF_stats . nlist + = nlist_visited ; <nl> + } <nl> + <nl> + <nl> + <nl> + template < class C > <nl> + void IndexIVFPQFastScan : : search_implem_10 ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels , <nl> + int impl , size_t * ndis_out , size_t * nlist_out ) const <nl> + { <nl> + memset ( distances , - 1 , sizeof ( float ) * k * n ) ; <nl> + memset ( labels , - 1 , sizeof ( idx_t ) * k * n ) ; <nl> + <nl> + using HeapHC = HeapHandler < C , true > ; <nl> + using ReservoirHC = ReservoirHandler < C , true > ; <nl> + using SingleResultHC = SingleResultHandler < C , true > ; <nl> + <nl> + <nl> + std : : unique_ptr < idx_t [ ] > coarse_ids ( new idx_t [ n * nprobe ] ) ; <nl> + std : : unique_ptr < float [ ] > coarse_dis ( new float [ n * nprobe ] ) ; <nl> + <nl> + uint64_t times [ 10 ] ; <nl> + memset ( times , 0 , sizeof ( times ) ) ; <nl> + int ti = 0 ; <nl> + # define TIC times [ ti + + ] = get_cy ( ) <nl> + TIC ; <nl> + <nl> + quantizer - > search ( n , x , nprobe , coarse_dis . get ( ) , coarse_ids . get ( ) ) ; <nl> + <nl> + TIC ; <nl> + <nl> + size_t dim12 = pq . ksub * M2 ; <nl> + AlignedTable < uint8_t > dis_tables ; <nl> + AlignedTable < uint16_t > biases ; <nl> + std : : unique_ptr < float [ ] > normalizers ( new float [ 2 * n ] ) ; <nl> + <nl> + compute_LUT_uint8 ( <nl> + n , x , <nl> + coarse_ids . get ( ) , coarse_dis . get ( ) , <nl> + dis_tables , biases , normalizers . get ( ) <nl> + ) ; <nl> + <nl> + TIC ; <nl> + <nl> + bool single_LUT = ! ( by_residual & & metric_type = = METRIC_L2 ) ; <nl> + <nl> + TIC ; <nl> + size_t ndis = 0 , nlist_visited = 0 ; <nl> + <nl> + { <nl> + AlignedTable < uint16_t > tmp_distances ( k ) ; <nl> + for ( idx_t i = 0 ; i < n ; i + + ) { <nl> + const uint8_t * LUT = nullptr ; <nl> + int qmap1 [ 1 ] = { 0 } ; <nl> + std : : unique_ptr < SIMDResultHandler < C , true > > handler ; <nl> + <nl> + if ( k = = 1 ) { <nl> + handler . reset ( new SingleResultHC ( 1 , 0 ) ) ; <nl> + } else if ( impl = = 10 ) { <nl> + handler . reset ( new HeapHC ( 1 , tmp_distances . get ( ) , labels + i * k , k , 0 ) ) ; <nl> + } else if ( impl = = 11 ) { <nl> + handler . reset ( new ReservoirHC ( 1 , 0 , k , 2 * k ) ) ; <nl> + } else { <nl> + FAISS_THROW_MSG ( " invalid " ) ; <nl> + } <nl> + <nl> + handler - > q_map = qmap1 ; <nl> + <nl> + if ( single_LUT ) { <nl> + LUT = dis_tables . get ( ) + i * dim12 ; <nl> + } <nl> + for ( idx_t j = 0 ; j < nprobe ; j + + ) { <nl> + size_t ij = i * nprobe + j ; <nl> + if ( ! single_LUT ) { <nl> + LUT = dis_tables . get ( ) + ij * dim12 ; <nl> + } <nl> + if ( biases . get ( ) ) { <nl> + handler - > dbias = biases . get ( ) + ij ; <nl> + } <nl> + <nl> + idx_t list_no = coarse_ids [ ij ] ; <nl> + if ( list_no < 0 ) continue ; <nl> + size_t ls = invlists - > list_size ( list_no ) ; <nl> + if ( ls = = 0 ) continue ; <nl> + <nl> + InvertedLists : : ScopedCodes codes ( invlists , list_no ) ; <nl> + InvertedLists : : ScopedIds ids ( invlists , list_no ) ; <nl> + <nl> + handler - > ntotal = ls ; <nl> + handler - > id_map = ids . get ( ) ; <nl> + <nl> + # define DISPATCH ( classHC ) \ <nl> + if ( auto * res = dynamic_cast < classHC * > ( handler . get ( ) ) ) { \ <nl> + pq4_accumulate_loop ( \ <nl> + 1 , roundup ( ls , bbs ) , bbs , M2 , \ <nl> + codes . get ( ) , LUT , \ <nl> + * res \ <nl> + ) ; \ <nl> + } <nl> + DISPATCH ( HeapHC ) <nl> + else DISPATCH ( ReservoirHC ) <nl> + else DISPATCH ( SingleResultHC ) <nl> + # undef DISPATCH <nl> + <nl> + nlist_visited + + ; <nl> + ndis + + ; <nl> + } <nl> + <nl> + handler - > to_flat_arrays ( <nl> + distances + i * k , labels + i * k , <nl> + skip & 16 ? nullptr : normalizers . get ( ) + i * 2 <nl> + ) ; <nl> + } <nl> + } <nl> + * ndis_out = ndis ; <nl> + * nlist_out = nlist ; <nl> + } <nl> + <nl> + <nl> + <nl> + template < class C > <nl> + void IndexIVFPQFastScan : : search_implem_12 ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels , <nl> + int impl , size_t * ndis_out , size_t * nlist_out ) const <nl> + { <nl> + if ( n = = 0 ) { / / does not work well with reservoir <nl> + return ; <nl> + } <nl> + FAISS_THROW_IF_NOT ( bbs = = 32 ) ; <nl> + <nl> + std : : unique_ptr < idx_t [ ] > coarse_ids ( new idx_t [ n * nprobe ] ) ; <nl> + std : : unique_ptr < float [ ] > coarse_dis ( new float [ n * nprobe ] ) ; <nl> + <nl> + uint64_t times [ 10 ] ; <nl> + memset ( times , 0 , sizeof ( times ) ) ; <nl> + int ti = 0 ; <nl> + # define TIC times [ ti + + ] = get_cy ( ) <nl> + TIC ; <nl> + <nl> + quantizer - > search ( n , x , nprobe , coarse_dis . get ( ) , coarse_ids . get ( ) ) ; <nl> + <nl> + TIC ; <nl> + <nl> + size_t dim12 = pq . ksub * M2 ; <nl> + AlignedTable < uint8_t > dis_tables ; <nl> + AlignedTable < uint16_t > biases ; <nl> + std : : unique_ptr < float [ ] > normalizers ( new float [ 2 * n ] ) ; <nl> + <nl> + compute_LUT_uint8 ( <nl> + n , x , <nl> + coarse_ids . get ( ) , coarse_dis . get ( ) , <nl> + dis_tables , biases , normalizers . get ( ) <nl> + ) ; <nl> + <nl> + TIC ; <nl> + <nl> + struct QC { <nl> + int qno ; <nl> + int list_no ; <nl> + int rank ; <nl> + } ; <nl> + bool single_LUT = ! ( by_residual & & metric_type = = METRIC_L2 ) ; <nl> + <nl> + std : : vector < QC > qcs ; <nl> + { <nl> + int ij = 0 ; <nl> + for ( int i = 0 ; i < n ; i + + ) { <nl> + for ( int j = 0 ; j < nprobe ; j + + ) { <nl> + if ( coarse_ids [ ij ] > = 0 ) { <nl> + qcs . push_back ( QC { i , int ( coarse_ids [ ij ] ) , int ( j ) } ) ; <nl> + } <nl> + ij + + ; <nl> + } <nl> + } <nl> + std : : sort ( <nl> + qcs . begin ( ) , qcs . end ( ) , <nl> + [ ] ( const QC & a , const QC & b ) { <nl> + return a . list_no < b . list_no ; <nl> + } <nl> + ) ; <nl> + } <nl> + TIC ; <nl> + <nl> + std : : unique_ptr < SIMDResultHandler < C , true > > handler ; <nl> + AlignedTable < uint16_t > tmp_distances ; <nl> + <nl> + using HeapHC = HeapHandler < C , true > ; <nl> + using ReservoirHC = ReservoirHandler < C , true > ; <nl> + using SingleResultHC = SingleResultHandler < C , true > ; <nl> + <nl> + if ( k = = 1 ) { <nl> + handler . reset ( new SingleResultHC ( n , 0 ) ) ; <nl> + } else if ( impl = = 12 ) { <nl> + tmp_distances . resize ( n * k ) ; <nl> + handler . reset ( new HeapHC ( n , tmp_distances . get ( ) , labels , k , 0 ) ) ; <nl> + } else if ( impl = = 13 ) { <nl> + handler . reset ( new ReservoirHC ( n , 0 , k , 2 * k ) ) ; <nl> + } <nl> + <nl> + int qbs2 = this - > qbs2 ? this - > qbs2 : 11 ; <nl> + <nl> + std : : vector < uint16_t > tmp_bias ; <nl> + if ( biases . get ( ) ) { <nl> + tmp_bias . resize ( qbs2 ) ; <nl> + handler - > dbias = tmp_bias . data ( ) ; <nl> + } <nl> + TIC ; <nl> + <nl> + size_t ndis = 0 ; <nl> + <nl> + size_t i0 = 0 ; <nl> + uint64_t t_copy_pack = 0 , t_scan = 0 ; <nl> + while ( i0 < qcs . size ( ) ) { <nl> + uint64_t tt0 = get_cy ( ) ; <nl> + <nl> + int list_no = qcs [ i0 ] . list_no ; <nl> + size_t i1 = i0 + 1 ; <nl> + <nl> + while ( i1 < qcs . size ( ) & & i1 < i0 + qbs2 ) { <nl> + if ( qcs [ i1 ] . list_no ! = list_no ) { <nl> + break ; <nl> + } <nl> + i1 + + ; <nl> + } <nl> + <nl> + size_t list_size = invlists - > list_size ( list_no ) ; <nl> + <nl> + if ( list_size = = 0 ) { <nl> + i0 = i1 ; <nl> + continue ; <nl> + } <nl> + <nl> + int nc = i1 - i0 ; <nl> + <nl> + std : : vector < int > q_map ( nc ) , lut_entries ( nc ) ; <nl> + AlignedTable < uint8_t > LUT ( nc * dim12 ) ; <nl> + memset ( LUT . get ( ) , - 1 , nc * dim12 ) ; <nl> + int qbs = pq4_preferred_qbs ( nc ) ; <nl> + <nl> + for ( size_t i = i0 ; i < i1 ; i + + ) { <nl> + const QC & qc = qcs [ i ] ; <nl> + q_map [ i - i0 ] = qc . qno ; <nl> + int ij = qc . qno * nprobe + qc . rank ; <nl> + lut_entries [ i - i0 ] = single_LUT ? qc . qno : ij ; <nl> + if ( biases . get ( ) ) { <nl> + tmp_bias [ i - i0 ] = biases [ ij ] ; <nl> + } <nl> + } <nl> + pq4_pack_LUT_qbs_q_map ( <nl> + qbs , M2 , dis_tables . get ( ) , lut_entries . data ( ) , <nl> + LUT . get ( ) <nl> + ) ; <nl> + <nl> + ndis + = ( i1 - i0 ) * list_size ; <nl> + <nl> + InvertedLists : : ScopedCodes codes ( invlists , list_no ) ; <nl> + InvertedLists : : ScopedIds ids ( invlists , list_no ) ; <nl> + <nl> + handler - > ntotal = list_size ; <nl> + handler - > q_map = q_map . data ( ) ; <nl> + handler - > id_map = ids . get ( ) ; <nl> + uint64_t tt1 = get_cy ( ) ; <nl> + <nl> + # define DISPATCH ( classHC ) \ <nl> + if ( auto * res = dynamic_cast < classHC * > ( handler . get ( ) ) ) { \ <nl> + pq4_accumulate_loop_qbs ( \ <nl> + qbs , list_size , M2 , \ <nl> + codes . get ( ) , LUT . get ( ) , \ <nl> + * res \ <nl> + ) ; \ <nl> + } <nl> + DISPATCH ( HeapHC ) <nl> + else DISPATCH ( ReservoirHC ) <nl> + else DISPATCH ( SingleResultHC ) <nl> + <nl> + uint64_t tt2 = get_cy ( ) ; <nl> + t_copy_pack + = tt1 - tt0 ; <nl> + t_scan + = tt2 - tt1 ; <nl> + i0 = i1 ; <nl> + } <nl> + TIC ; <nl> + <nl> + / / labels is the same array <nl> + handler - > to_flat_arrays ( <nl> + distances , labels , <nl> + skip & 16 ? nullptr : normalizers . get ( ) <nl> + ) ; <nl> + <nl> + TIC ; <nl> + <nl> + / / these stats are not thread - safe <nl> + <nl> + for ( int i = 1 ; i < ti ; i + + ) { <nl> + IVFFastScan_stats . times [ i ] + = times [ i ] - times [ i - 1 ] ; <nl> + } <nl> + IVFFastScan_stats . t_copy_pack + = t_copy_pack ; <nl> + IVFFastScan_stats . t_scan + = t_scan ; <nl> + <nl> + if ( auto * rh = dynamic_cast < ReservoirHC * > ( handler . get ( ) ) ) { <nl> + for ( int i = 0 ; i < 4 ; i + + ) { <nl> + IVFFastScan_stats . reservoir_times [ i ] + = rh - > times [ i ] ; <nl> + } <nl> + } <nl> + <nl> + * ndis_out = ndis ; <nl> + * nlist_out = nlist ; <nl> + <nl> + } <nl> + <nl> + <nl> + IVFFastScanStats IVFFastScan_stats ; <nl> + <nl> + } / / namespace faiss <nl> new file mode 100644 <nl> index 000000000 . . d9c9e661d <nl> mmm / dev / null <nl> ppp b / faiss / IndexIVFPQFastScan . h <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + # pragma once <nl> + <nl> + # include < memory > <nl> + <nl> + # include < faiss / IndexIVFPQ . h > <nl> + # include < faiss / impl / ProductQuantizer . h > <nl> + # include < faiss / utils / AlignedTable . h > <nl> + <nl> + namespace faiss { <nl> + <nl> + <nl> + / * * Fast scan version of IVFPQ . Works for 4 - bit PQ for now . <nl> + * <nl> + * The codes in the inverted lists are not stored sequentially but <nl> + * grouped in blocks of size bbs . This makes it possible to very quickly <nl> + * compute distances with SIMD instructions . <nl> + * <nl> + * Implementations ( implem ) : <nl> + * 0 : auto - select implementation ( default ) <nl> + * 1 : orig ' s search , re - implemented <nl> + * 2 : orig ' s search , re - ordered by invlist <nl> + * 10 : optimizer int16 search , collect results in heap , no qbs <nl> + * 11 : idem , collect results in reservoir <nl> + * 12 : optimizer int16 search , collect results in heap , uses qbs <nl> + * 13 : idem , collect results in reservoir <nl> + * / <nl> + <nl> + struct IndexIVFPQFastScan : IndexIVF { <nl> + <nl> + bool by_residual ; / / / < Encode residual or plain vector ? <nl> + ProductQuantizer pq ; / / / < produces the codes <nl> + <nl> + / / size of the kernel <nl> + int bbs ; / / set at build time <nl> + <nl> + / / M rounded up to a multiple of 2 <nl> + size_t M2 ; <nl> + <nl> + / / / precomputed tables management <nl> + int use_precomputed_table = 0 ; <nl> + / / / if use_precompute_table size ( nlist , pq . M , pq . ksub ) <nl> + AlignedTable < float > precomputed_table ; <nl> + <nl> + / / search - time implementation <nl> + int implem = 0 ; <nl> + / / skip some parts of the computation ( for timing ) <nl> + int skip = 0 ; <nl> + <nl> + / / batching factors at search time ( 0 = default ) <nl> + int qbs = 0 ; <nl> + size_t qbs2 = 0 ; <nl> + <nl> + IndexIVFPQFastScan ( <nl> + Index * quantizer , size_t d , size_t nlist , <nl> + size_t M , size_t nbits_per_idx , <nl> + MetricType metric = METRIC_L2 , int bbs = 32 ) ; <nl> + <nl> + IndexIVFPQFastScan ( ) ; <nl> + <nl> + / / built from an IndexIVFPQ <nl> + explicit IndexIVFPQFastScan ( const IndexIVFPQ & orig , int bbs = 32 ) ; <nl> + <nl> + / / / orig ' s inverted lists ( for debugging ) <nl> + InvertedLists * orig_invlists = nullptr ; <nl> + <nl> + void train_residual ( idx_t n , const float * x ) override ; <nl> + <nl> + / / / build precomputed table , possibly updating use_precomputed_table <nl> + void precompute_table ( ) ; <nl> + <nl> + / / / same as the regular IVFPQ encoder . The codes are not reorganized by <nl> + / / / blocks a that point <nl> + void encode_vectors ( <nl> + idx_t n , const float * x , <nl> + const idx_t * list_nos , uint8_t * codes , <nl> + bool include_listno = false ) const override ; <nl> + <nl> + void add_with_ids ( <nl> + idx_t n , const float * x , const idx_t * xids ) override ; <nl> + <nl> + void search ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels ) const override ; <nl> + <nl> + / / prepare look - up tables <nl> + <nl> + void compute_LUT ( <nl> + size_t n , const float * x , <nl> + const idx_t * coarse_ids , const float * coarse_dis , <nl> + AlignedTable < float > & dis_tables , <nl> + AlignedTable < float > & biases <nl> + ) const ; <nl> + <nl> + void compute_LUT_uint8 ( <nl> + size_t n , const float * x , <nl> + const idx_t * coarse_ids , const float * coarse_dis , <nl> + AlignedTable < uint8_t > & dis_tables , <nl> + AlignedTable < uint16_t > & biases , <nl> + float * normalizers <nl> + ) const ; <nl> + <nl> + / / internal search funcs <nl> + <nl> + template < bool is_max > <nl> + void search_dispatch_implem ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels ) const ; <nl> + <nl> + template < class C > <nl> + void search_implem_1 ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels ) const ; <nl> + <nl> + template < class C > <nl> + void search_implem_2 ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels ) const ; <nl> + <nl> + / / implem 10 and 12 are not multithreaded internally , so <nl> + / / export search stats <nl> + template < class C > <nl> + void search_implem_10 ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels , <nl> + int impl , size_t * ndis_out , size_t * nlist_out ) const ; <nl> + <nl> + template < class C > <nl> + void search_implem_12 ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels , <nl> + int impl , size_t * ndis_out , size_t * nlist_out ) const ; <nl> + <nl> + <nl> + <nl> + } ; <nl> + <nl> + struct IVFFastScanStats { <nl> + uint64_t times [ 10 ] ; <nl> + uint64_t t_compute_distance_tables , t_round ; <nl> + uint64_t t_copy_pack , t_scan , t_to_flat ; <nl> + uint64_t reservoir_times [ 4 ] ; <nl> + <nl> + double Mcy_at ( int i ) { <nl> + return times [ i ] / ( 1000 * 1000 . 0 ) ; <nl> + } <nl> + <nl> + double Mcy_reservoir_at ( int i ) { <nl> + return reservoir_times [ i ] / ( 1000 * 1000 . 0 ) ; <nl> + } <nl> + IVFFastScanStats ( ) { reset ( ) ; } <nl> + void reset ( ) { <nl> + memset ( this , 0 , sizeof ( * this ) ) ; <nl> + } <nl> + } ; <nl> + <nl> + FAISS_API extern IVFFastScanStats IVFFastScan_stats ; <nl> + <nl> + <nl> + } / / namespace faiss <nl> new file mode 100644 <nl> index 000000000 . . dc8295f97 <nl> mmm / dev / null <nl> ppp b / faiss / IndexPQFastScan . cpp <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + <nl> + # include < faiss / IndexPQFastScan . h > <nl> + <nl> + # include < cassert > <nl> + # include < memory > <nl> + # include < limits . h > <nl> + <nl> + # include < omp . h > <nl> + <nl> + # include < faiss / impl / FaissAssert . h > <nl> + # include < faiss / utils / utils . h > <nl> + # include < faiss / utils / random . h > <nl> + <nl> + # include < faiss / impl / simd_result_handlers . h > <nl> + # include < faiss / utils / quantize_lut . h > <nl> + # include < faiss / impl / pq4_fast_scan . h > <nl> + <nl> + <nl> + namespace faiss { <nl> + <nl> + using namespace simd_result_handlers ; <nl> + <nl> + inline size_t roundup ( size_t a , size_t b ) { <nl> + return ( a + b - 1 ) / b * b ; <nl> + } <nl> + <nl> + IndexPQFastScan : : IndexPQFastScan ( <nl> + int d , size_t M , size_t nbits , <nl> + MetricType metric , <nl> + int bbs ) : <nl> + Index ( d , metric ) , pq ( d , M , nbits ) , <nl> + bbs ( bbs ) , ntotal2 ( 0 ) , M2 ( roundup ( M , 2 ) ) <nl> + { <nl> + is_trained = false ; <nl> + } <nl> + <nl> + IndexPQFastScan : : IndexPQFastScan ( ) : <nl> + bbs ( 0 ) , ntotal2 ( 0 ) , M2 ( 0 ) <nl> + { } <nl> + <nl> + IndexPQFastScan : : IndexPQFastScan ( const IndexPQ & orig , int bbs ) : <nl> + Index ( orig . d , orig . metric_type ) , <nl> + pq ( orig . pq ) , <nl> + bbs ( bbs ) <nl> + { <nl> + FAISS_THROW_IF_NOT ( orig . pq . nbits = = 4 ) ; <nl> + ntotal = orig . ntotal ; <nl> + is_trained = orig . is_trained ; <nl> + orig_codes = orig . codes . data ( ) ; <nl> + <nl> + qbs = 0 ; / / means use default <nl> + <nl> + / / pack the codes <nl> + <nl> + size_t M = pq . M ; <nl> + <nl> + FAISS_THROW_IF_NOT ( bbs % 32 = = 0 ) ; <nl> + M2 = roundup ( M , 2 ) ; <nl> + ntotal2 = roundup ( ntotal , bbs ) ; <nl> + <nl> + codes . resize ( ntotal2 * M2 / 2 ) ; <nl> + <nl> + / / printf ( " M = % d M2 = % d code_size = % d \ n " , M , M2 , pq . code_size ) ; <nl> + pq4_pack_codes ( <nl> + orig . codes . data ( ) , <nl> + ntotal , M , <nl> + ntotal2 , bbs , M2 , <nl> + codes . get ( ) <nl> + ) ; <nl> + } <nl> + <nl> + void IndexPQFastScan : : train ( idx_t n , const float * x ) <nl> + { <nl> + if ( is_trained ) { <nl> + return ; <nl> + } <nl> + pq . train ( n , x ) ; <nl> + is_trained = true ; <nl> + } <nl> + <nl> + <nl> + void IndexPQFastScan : : add ( idx_t n , const float * x ) { <nl> + FAISS_THROW_IF_NOT ( is_trained ) ; <nl> + AlignedTable < uint8_t > tmp_codes ( n * pq . code_size ) ; <nl> + pq . compute_codes ( x , tmp_codes . get ( ) , n ) ; <nl> + ntotal2 = roundup ( ntotal + n , bbs ) ; <nl> + size_t new_size = ntotal2 * M2 / 2 ; <nl> + size_t old_size = codes . size ( ) ; <nl> + if ( new_size > old_size ) { <nl> + codes . resize ( new_size ) ; <nl> + memset ( codes . get ( ) + old_size , 0 , new_size - old_size ) ; <nl> + } <nl> + pq4_pack_codes_range ( <nl> + tmp_codes . get ( ) , pq . M , ntotal , ntotal + n , <nl> + bbs , M2 , codes . get ( ) <nl> + ) ; <nl> + ntotal + = n ; <nl> + } <nl> + <nl> + void IndexPQFastScan : : reset ( ) <nl> + { <nl> + codes . resize ( 0 ) ; <nl> + ntotal = 0 ; <nl> + } <nl> + <nl> + <nl> + <nl> + namespace { <nl> + <nl> + / / from impl / ProductQuantizer . cpp <nl> + template < class C , typename dis_t > <nl> + void pq_estimators_from_tables_generic ( <nl> + const ProductQuantizer & pq , size_t nbits , <nl> + const uint8_t * codes , size_t ncodes , <nl> + const dis_t * dis_table , size_t k , <nl> + typename C : : T * heap_dis , int64_t * heap_ids ) <nl> + { <nl> + using accu_t = typename C : : T ; <nl> + const size_t M = pq . M ; <nl> + const size_t ksub = pq . ksub ; <nl> + for ( size_t j = 0 ; j < ncodes ; + + j ) { <nl> + PQDecoderGeneric decoder ( <nl> + codes + j * pq . code_size , nbits <nl> + ) ; <nl> + accu_t dis = 0 ; <nl> + const dis_t * __restrict dt = dis_table ; <nl> + for ( size_t m = 0 ; m < M ; m + + ) { <nl> + uint64_t c = decoder . decode ( ) ; <nl> + dis + = dt [ c ] ; <nl> + dt + = ksub ; <nl> + } <nl> + <nl> + if ( C : : cmp ( heap_dis [ 0 ] , dis ) ) { <nl> + heap_pop < C > ( k , heap_dis , heap_ids ) ; <nl> + heap_push < C > ( k , heap_dis , heap_ids , dis , j ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + } / / anonymous namespace <nl> + <nl> + <nl> + using namespace quantize_lut ; <nl> + <nl> + void IndexPQFastScan : : compute_quantized_LUT ( <nl> + idx_t n , const float * x , <nl> + uint8_t * lut , float * normalizers ) const <nl> + { <nl> + size_t dim12 = pq . ksub * pq . M ; <nl> + std : : unique_ptr < float [ ] > dis_tables ( new float [ n * dim12 ] ) ; <nl> + if ( metric_type = = METRIC_L2 ) { <nl> + pq . compute_distance_tables ( n , x , dis_tables . get ( ) ) ; <nl> + } else { <nl> + pq . compute_inner_prod_tables ( n , x , dis_tables . get ( ) ) ; <nl> + } <nl> + <nl> + for ( uint64_t i = 0 ; i < n ; i + + ) { <nl> + round_uint8_per_column ( <nl> + dis_tables . get ( ) + i * dim12 , pq . M , pq . ksub , <nl> + & normalizers [ 2 * i ] , & normalizers [ 2 * i + 1 ] <nl> + ) ; <nl> + } <nl> + <nl> + for ( uint64_t i = 0 ; i < n ; i + + ) { <nl> + const float * t_in = dis_tables . get ( ) + i * dim12 ; <nl> + uint8_t * t_out = lut + i * M2 * pq . ksub ; <nl> + <nl> + for ( int j = 0 ; j < dim12 ; j + + ) { <nl> + t_out [ j ] = int ( t_in [ j ] ) ; <nl> + } <nl> + memset ( t_out + dim12 , 0 , ( M2 - pq . M ) * pq . ksub ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Search driver routine <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + <nl> + void IndexPQFastScan : : search ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels ) const <nl> + { <nl> + if ( metric_type = = METRIC_L2 ) { <nl> + search_dispatch_implem < true > ( n , x , k , distances , labels ) ; <nl> + } else { <nl> + search_dispatch_implem < false > ( n , x , k , distances , labels ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + template < bool is_max > <nl> + void IndexPQFastScan : : search_dispatch_implem ( <nl> + idx_t n , <nl> + const float * x , <nl> + idx_t k , <nl> + float * distances , <nl> + idx_t * labels ) const <nl> + { <nl> + using Cfloat = typename std : : conditional < is_max , <nl> + CMax < float , int64_t > , CMin < float , int64_t > > : : type ; <nl> + <nl> + using C = typename std : : conditional < is_max , <nl> + CMax < uint16_t , int > , CMin < uint16_t , int > > : : type ; <nl> + <nl> + if ( n = = 0 ) { <nl> + return ; <nl> + } <nl> + <nl> + / / actual implementation used <nl> + int impl = implem ; <nl> + <nl> + if ( impl = = 0 ) { <nl> + if ( bbs = = 32 ) { <nl> + impl = 12 ; <nl> + } else { <nl> + impl = 14 ; <nl> + } <nl> + if ( k > 20 ) { <nl> + impl + + ; <nl> + } <nl> + } <nl> + <nl> + if ( implem = = 1 ) { <nl> + FAISS_THROW_IF_NOT ( orig_codes ) ; <nl> + FAISS_THROW_IF_NOT ( is_max ) ; <nl> + float_maxheap_array_t res = { <nl> + size_t ( n ) , size_t ( k ) , labels , distances } ; <nl> + pq . search ( x , n , orig_codes , ntotal , & res , true ) ; <nl> + } else if ( implem = = 2 | | implem = = 3 | | implem = = 4 ) { <nl> + FAISS_THROW_IF_NOT ( orig_codes ) ; <nl> + <nl> + size_t dim12 = pq . ksub * pq . M ; <nl> + std : : unique_ptr < float [ ] > dis_tables ( new float [ n * dim12 ] ) ; <nl> + if ( is_max ) { <nl> + pq . compute_distance_tables ( n , x , dis_tables . get ( ) ) ; <nl> + } else { <nl> + pq . compute_inner_prod_tables ( n , x , dis_tables . get ( ) ) ; <nl> + } <nl> + <nl> + std : : vector < float > normalizers ( n * 2 ) ; <nl> + <nl> + if ( implem = = 2 ) { <nl> + / / default float <nl> + } else if ( implem = = 3 | | implem = = 4 ) { <nl> + for ( uint64_t i = 0 ; i < n ; i + + ) { <nl> + round_uint8_per_column ( <nl> + dis_tables . get ( ) + i * dim12 , pq . M , <nl> + pq . ksub , <nl> + & normalizers [ 2 * i ] , & normalizers [ 2 * i + 1 ] <nl> + ) ; <nl> + } <nl> + } <nl> + <nl> + for ( int64_t i = 0 ; i < n ; i + + ) { <nl> + int64_t * heap_ids = labels + i * k ; <nl> + float * heap_dis = distances + i * k ; <nl> + <nl> + heap_heapify < Cfloat > ( k , heap_dis , heap_ids ) ; <nl> + <nl> + pq_estimators_from_tables_generic < Cfloat > ( <nl> + pq , pq . nbits , orig_codes , ntotal , <nl> + dis_tables . get ( ) + i * dim12 , <nl> + k , heap_dis , heap_ids <nl> + ) ; <nl> + <nl> + heap_reorder < Cfloat > ( k , heap_dis , heap_ids ) ; <nl> + <nl> + if ( implem = = 4 ) { <nl> + float a = normalizers [ 2 * i ] ; <nl> + float b = normalizers [ 2 * i + 1 ] ; <nl> + <nl> + for ( int j = 0 ; j < k ; j + + ) { <nl> + heap_dis [ j ] = heap_dis [ j ] / a + b ; <nl> + } <nl> + } <nl> + } <nl> + } else if ( impl > = 12 & & impl < = 15 ) { <nl> + FAISS_THROW_IF_NOT ( ntotal < INT_MAX ) ; <nl> + int nt = std : : min ( omp_get_max_threads ( ) , int ( n ) ) ; <nl> + if ( nt < 2 ) { <nl> + if ( impl = = 12 | | impl = = 13 ) { <nl> + search_implem_12 < C > ( n , x , k , distances , labels , impl ) ; <nl> + } else { <nl> + search_implem_14 < C > ( n , x , k , distances , labels , impl ) ; <nl> + } <nl> + } else { <nl> + / / explicitly slice over threads <nl> + # pragma omp parallel for num_threads ( nt ) <nl> + for ( int slice = 0 ; slice < nt ; slice + + ) { <nl> + idx_t i0 = n * slice / nt ; <nl> + idx_t i1 = n * ( slice + 1 ) / nt ; <nl> + float * dis_i = distances + i0 * k ; <nl> + idx_t * lab_i = labels + i0 * k ; <nl> + if ( impl = = 12 | | impl = = 13 ) { <nl> + search_implem_12 < C > ( <nl> + i1 - i0 , x + i0 * d , k , dis_i , lab_i , impl ) ; <nl> + } else { <nl> + search_implem_14 < C > ( <nl> + i1 - i0 , x + i0 * d , k , dis_i , lab_i , impl ) ; <nl> + } <nl> + } <nl> + } <nl> + } else { <nl> + FAISS_THROW_FMT ( " invalid implem % d impl = % d " , implem , impl ) ; <nl> + } <nl> + <nl> + } <nl> + <nl> + template < class C > <nl> + void IndexPQFastScan : : search_implem_12 ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels , <nl> + int impl ) const <nl> + { <nl> + <nl> + FAISS_THROW_IF_NOT ( bbs = = 32 ) ; <nl> + <nl> + / / handle qbs2 blocking by recursive call <nl> + int64_t qbs2 = this - > qbs = = 0 ? 11 : pq4_qbs_to_nq ( this - > qbs ) ; <nl> + if ( n > qbs2 ) { <nl> + for ( int64_t i0 = 0 ; i0 < n ; i0 + = qbs2 ) { <nl> + int64_t i1 = std : : min ( i0 + qbs2 , n ) ; <nl> + search_implem_12 < C > ( <nl> + i1 - i0 , x + d * i0 , k , <nl> + distances + i0 * k , labels + i0 * k , impl <nl> + ) ; <nl> + } <nl> + return ; <nl> + } <nl> + <nl> + size_t dim12 = pq . ksub * M2 ; <nl> + AlignedTable < uint8_t > quantized_dis_tables ( n * dim12 ) ; <nl> + std : : unique_ptr < float [ ] > normalizers ( new float [ 2 * n ] ) ; <nl> + <nl> + if ( skip & 1 ) { <nl> + quantized_dis_tables . clear ( ) ; <nl> + } else { <nl> + compute_quantized_LUT ( <nl> + n , x , quantized_dis_tables . get ( ) , normalizers . get ( ) <nl> + ) ; <nl> + } <nl> + <nl> + AlignedTable < uint8_t > LUT ( n * dim12 ) ; <nl> + <nl> + / / block sizes are encoded in qbs , 4 bits at a time <nl> + <nl> + / / caution : we override an object field <nl> + int qbs = this - > qbs ; <nl> + <nl> + if ( n ! = pq4_qbs_to_nq ( qbs ) ) { <nl> + qbs = pq4_preferred_qbs ( n ) ; <nl> + } <nl> + <nl> + int LUT_nq = pq4_pack_LUT_qbs ( <nl> + qbs , M2 , quantized_dis_tables . get ( ) , LUT . get ( ) <nl> + ) ; <nl> + FAISS_THROW_IF_NOT ( LUT_nq = = n ) ; <nl> + <nl> + if ( k = = 1 ) { <nl> + SingleResultHandler < C > handler ( n , ntotal ) ; <nl> + if ( skip & 4 ) { <nl> + / / pass <nl> + } else { <nl> + handler . disable = bool ( skip & 2 ) ; <nl> + pq4_accumulate_loop_qbs ( <nl> + qbs , ntotal2 , M2 , <nl> + codes . get ( ) , LUT . get ( ) , <nl> + handler <nl> + ) ; <nl> + } <nl> + <nl> + handler . to_flat_arrays ( distances , labels , normalizers . get ( ) ) ; <nl> + <nl> + } else if ( impl = = 12 ) { <nl> + <nl> + std : : vector < uint16_t > tmp_dis ( n * k ) ; <nl> + std : : vector < int32_t > tmp_ids ( n * k ) ; <nl> + <nl> + if ( skip & 4 ) { <nl> + / / skip <nl> + } else { <nl> + HeapHandler < C > handler ( n , tmp_dis . data ( ) , tmp_ids . data ( ) , k , ntotal ) ; <nl> + handler . disable = bool ( skip & 2 ) ; <nl> + <nl> + pq4_accumulate_loop_qbs ( <nl> + qbs , ntotal2 , M2 , <nl> + codes . get ( ) , LUT . get ( ) , <nl> + handler <nl> + ) ; <nl> + <nl> + if ( ! ( skip & 8 ) ) { <nl> + handler . to_flat_arrays ( distances , labels , normalizers . get ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + } else { / / impl = = 13 <nl> + <nl> + ReservoirHandler < C > handler ( n , ntotal , k , 2 * k ) ; <nl> + handler . disable = bool ( skip & 2 ) ; <nl> + <nl> + if ( skip & 4 ) { <nl> + / / skip <nl> + } else { <nl> + pq4_accumulate_loop_qbs ( <nl> + qbs , ntotal2 , M2 , <nl> + codes . get ( ) , LUT . get ( ) , <nl> + handler <nl> + ) ; <nl> + } <nl> + <nl> + if ( ! ( skip & 8 ) ) { <nl> + handler . to_flat_arrays ( distances , labels , normalizers . get ( ) ) ; <nl> + } <nl> + <nl> + FastScan_stats . t0 + = handler . times [ 0 ] ; <nl> + FastScan_stats . t1 + = handler . times [ 1 ] ; <nl> + FastScan_stats . t2 + = handler . times [ 2 ] ; <nl> + FastScan_stats . t3 + = handler . times [ 3 ] ; <nl> + <nl> + } <nl> + } <nl> + <nl> + FastScanStats FastScan_stats ; <nl> + <nl> + template < class C > <nl> + void IndexPQFastScan : : search_implem_14 ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels , int impl ) const <nl> + { <nl> + <nl> + FAISS_THROW_IF_NOT ( bbs % 32 = = 0 ) ; <nl> + <nl> + int qbs2 = qbs = = 0 ? 4 : qbs ; <nl> + <nl> + / / handle qbs2 blocking by recursive call <nl> + if ( n > qbs2 ) { <nl> + for ( int64_t i0 = 0 ; i0 < n ; i0 + = qbs2 ) { <nl> + int64_t i1 = std : : min ( i0 + qbs2 , n ) ; <nl> + search_implem_14 < C > ( <nl> + i1 - i0 , x + d * i0 , k , <nl> + distances + i0 * k , labels + i0 * k , impl <nl> + ) ; <nl> + } <nl> + return ; <nl> + } <nl> + <nl> + size_t dim12 = pq . ksub * M2 ; <nl> + AlignedTable < uint8_t > quantized_dis_tables ( n * dim12 ) ; <nl> + std : : unique_ptr < float [ ] > normalizers ( new float [ 2 * n ] ) ; <nl> + <nl> + if ( skip & 1 ) { <nl> + quantized_dis_tables . clear ( ) ; <nl> + } else { <nl> + compute_quantized_LUT ( <nl> + n , x , quantized_dis_tables . get ( ) , normalizers . get ( ) <nl> + ) ; <nl> + } <nl> + <nl> + AlignedTable < uint8_t > LUT ( n * dim12 ) ; <nl> + pq4_pack_LUT ( n , M2 , quantized_dis_tables . get ( ) , LUT . get ( ) ) ; <nl> + <nl> + if ( k = = 1 ) { <nl> + SingleResultHandler < C > handler ( n , ntotal ) ; <nl> + if ( skip & 4 ) { <nl> + / / pass <nl> + } else { <nl> + handler . disable = bool ( skip & 2 ) ; <nl> + pq4_accumulate_loop ( <nl> + n , ntotal2 , bbs , M2 , <nl> + codes . get ( ) , LUT . get ( ) , <nl> + handler <nl> + ) ; <nl> + } <nl> + handler . to_flat_arrays ( distances , labels , normalizers . get ( ) ) ; <nl> + <nl> + } else if ( impl = = 14 ) { <nl> + <nl> + std : : vector < uint16_t > tmp_dis ( n * k ) ; <nl> + std : : vector < int32_t > tmp_ids ( n * k ) ; <nl> + <nl> + if ( skip & 4 ) { <nl> + / / skip <nl> + } else if ( k > 1 ) { <nl> + HeapHandler < C > handler ( n , tmp_dis . data ( ) , tmp_ids . data ( ) , k , ntotal ) ; <nl> + handler . disable = bool ( skip & 2 ) ; <nl> + <nl> + pq4_accumulate_loop ( <nl> + n , ntotal2 , bbs , M2 , <nl> + codes . get ( ) , LUT . get ( ) , <nl> + handler <nl> + ) ; <nl> + <nl> + if ( ! ( skip & 8 ) ) { <nl> + handler . to_flat_arrays ( distances , labels , normalizers . get ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + } else { / / impl = = 15 <nl> + <nl> + ReservoirHandler < C > handler ( n , ntotal , k , 2 * k ) ; <nl> + handler . disable = bool ( skip & 2 ) ; <nl> + <nl> + if ( skip & 4 ) { <nl> + / / skip <nl> + } else { <nl> + pq4_accumulate_loop ( <nl> + n , ntotal2 , bbs , M2 , <nl> + codes . get ( ) , LUT . get ( ) , <nl> + handler <nl> + ) ; <nl> + } <nl> + <nl> + if ( ! ( skip & 8 ) ) { <nl> + handler . to_flat_arrays ( distances , labels , normalizers . get ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + } / / namespace faiss <nl> new file mode 100644 <nl> index 000000000 . . ee32f2fdb <nl> mmm / dev / null <nl> ppp b / faiss / IndexPQFastScan . h <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + <nl> + # pragma once <nl> + <nl> + # include < faiss / IndexPQ . h > <nl> + # include < faiss / impl / ProductQuantizer . h > <nl> + # include < faiss / utils / AlignedTable . h > <nl> + <nl> + <nl> + namespace faiss { <nl> + <nl> + <nl> + / * * Fast scan version of IndexPQ . Works for 4 - bit PQ for now . <nl> + * <nl> + * The codes are not stored sequentially but grouped in blocks of size bbs . <nl> + * This makes it possible to compute distances quickly with SIMD instructions . <nl> + * <nl> + * Implementations : <nl> + * 12 : blocked loop with internal loop on Q with qbs <nl> + * 13 : same with reservoir accumulator to store results <nl> + * 14 : no qbs with heap accumulator <nl> + * 15 : no qbs with reservoir accumulator <nl> + * / <nl> + <nl> + struct IndexPQFastScan : Index { <nl> + ProductQuantizer pq ; <nl> + <nl> + / / implementation to select <nl> + int implem = 0 ; <nl> + / / skip some parts of the computation ( for timing ) <nl> + int skip = 0 ; <nl> + <nl> + / / size of the kernel <nl> + int bbs ; / / set at build time <nl> + int qbs = 0 ; / / query block size 0 = use default <nl> + <nl> + / / packed version of the codes <nl> + size_t ntotal2 ; <nl> + size_t M2 ; <nl> + <nl> + AlignedTable < uint8_t > codes ; <nl> + <nl> + / / this is for testing purposes only ( set when initialized by IndexPQ ) <nl> + const uint8_t * orig_codes = nullptr ; <nl> + <nl> + IndexPQFastScan ( <nl> + int d , size_t M , size_t nbits , <nl> + MetricType metric = METRIC_L2 , <nl> + int bbs = 32 <nl> + ) ; <nl> + <nl> + IndexPQFastScan ( ) ; <nl> + <nl> + / / / build from an existing IndexPQ <nl> + explicit IndexPQFastScan ( const IndexPQ & orig , int bbs = 32 ) ; <nl> + <nl> + void train ( idx_t n , const float * x ) override ; <nl> + void add ( idx_t n , const float * x ) override ; <nl> + void reset ( ) override ; <nl> + void search ( <nl> + idx_t n , <nl> + const float * x , <nl> + idx_t k , <nl> + float * distances , <nl> + idx_t * labels ) const override ; <nl> + <nl> + / / called by search function <nl> + void compute_quantized_LUT ( <nl> + idx_t n , const float * x , <nl> + uint8_t * lut , float * normalizers ) const ; <nl> + <nl> + template < bool is_max > <nl> + void search_dispatch_implem ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels ) const ; <nl> + <nl> + template < class C > <nl> + void search_implem_2 ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels ) const ; <nl> + <nl> + template < class C > <nl> + void search_implem_12 ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels , int impl ) const ; <nl> + <nl> + template < class C > <nl> + void search_implem_14 ( <nl> + idx_t n , const float * x , idx_t k , <nl> + float * distances , idx_t * labels , int impl ) const ; <nl> + <nl> + } ; <nl> + <nl> + struct FastScanStats { <nl> + uint64_t t0 , t1 , t2 , t3 ; <nl> + FastScanStats ( ) { reset ( ) ; } <nl> + void reset ( ) { <nl> + memset ( this , 0 , sizeof ( * this ) ) ; <nl> + } <nl> + } ; <nl> + <nl> + FAISS_API extern FastScanStats FastScan_stats ; <nl> + <nl> + } / / namespace faiss <nl> mmm a / faiss / gpu / impl / IVFBase . cu <nl> ppp b / faiss / gpu / impl / IVFBase . cu <nl> <nl> <nl> <nl> # include < faiss / gpu / impl / IVFBase . cuh > <nl> - # include < faiss / InvertedLists . h > <nl> + # include < faiss / invlists / InvertedLists . h > <nl> # include < faiss / gpu / GpuResources . h > <nl> # include < faiss / gpu / impl / FlatIndex . cuh > <nl> # include < faiss / gpu / impl / IVFAppend . cuh > <nl> mmm a / faiss / gpu / test / TestUtils . h <nl> ppp b / faiss / gpu / test / TestUtils . h <nl> <nl> <nl> # include < faiss / impl / FaissAssert . h > <nl> # include < faiss / Index . h > <nl> - # include < faiss / InvertedLists . h > <nl> + # include < faiss / invlists / InvertedLists . h > <nl> # include < initializer_list > <nl> # include < gtest / gtest . h > <nl> # include < cstring > <nl> mmm a / faiss / impl / index_read . cpp <nl> ppp b / faiss / impl / index_read . cpp <nl> <nl> # include < sys / types . h > <nl> # include < sys / stat . h > <nl> <nl> - # ifndef _MSC_VER <nl> - # include < sys / mman . h > <nl> - # endif / / ! _MSC_VER <nl> - <nl> # include < faiss / impl / FaissAssert . h > <nl> # include < faiss / impl / io . h > <nl> # include < faiss / impl / io_macros . h > <nl> # include < faiss / utils / hamming . h > <nl> <nl> + # include < faiss / invlists / InvertedListsIOHook . h > <nl> + <nl> # include < faiss / IndexFlat . h > <nl> # include < faiss / VectorTransform . h > <nl> # include < faiss / IndexPreTransform . h > <nl> <nl> # include < faiss / IndexScalarQuantizer . h > <nl> # include < faiss / IndexHNSW . h > <nl> # include < faiss / IndexLattice . h > <nl> + # include < faiss / IndexPQFastScan . h > <nl> + # include < faiss / IndexIVFPQFastScan . h > <nl> + <nl> # include < faiss / IndexBinaryFlat . h > <nl> # include < faiss / IndexBinaryFromFloat . h > <nl> # include < faiss / IndexBinaryHNSW . h > <nl> # include < faiss / IndexBinaryIVF . h > <nl> # include < faiss / IndexBinaryHash . h > <nl> <nl> - # ifndef _MSC_VER <nl> - # include < faiss / OnDiskInvertedLists . h > <nl> - # endif / / ! _MSC_VER <nl> - <nl> - <nl> namespace faiss { <nl> <nl> <nl> VectorTransform * read_VectorTransform ( IOReader * f ) { <nl> } <nl> vt = itqt ; <nl> } else { <nl> - FAISS_THROW_MSG ( " fourcc not recognized " ) ; <nl> + FAISS_THROW_FMT ( <nl> + " fourcc % ud ( \ " % s \ " ) not recognized " , <nl> + h , fourcc_inv_printable ( h ) . c_str ( ) <nl> + ) ; <nl> } <nl> READ1 ( vt - > d_in ) ; <nl> READ1 ( vt - > d_out ) ; <nl> static void read_ArrayInvertedLists_sizes ( <nl> sizes [ idsizes [ j ] ] = idsizes [ j + 1 ] ; <nl> } <nl> } else { <nl> - FAISS_THROW_MSG ( " invalid list_type " ) ; <nl> + FAISS_THROW_FMT ( <nl> + " list_type % ud ( \ " % s \ " ) not recognized " , <nl> + list_type , fourcc_inv_printable ( list_type ) . c_str ( ) <nl> + ) ; <nl> } <nl> } <nl> <nl> InvertedLists * read_InvertedLists ( IOReader * f , int io_flags ) { <nl> } <nl> return ails ; <nl> <nl> - # ifdef _MSC_VER <nl> - } else { <nl> - FAISS_THROW_MSG ( " Unsupported inverted list format for Windows " ) ; <nl> - } <nl> - # else <nl> } else if ( h = = fourcc ( " ilar " ) & & ( io_flags & IO_FLAG_SKIP_IVF_DATA ) ) { <nl> / / code is always ilxx where xx is specific to the type of invlists we want <nl> / / so we get the 16 high bits from the io_flag and the 16 low bits as " il " <nl> InvertedLists * read_InvertedLists ( IOReader * f , int io_flags ) { <nl> } else { <nl> return InvertedListsIOHook : : lookup ( h ) - > read ( f , io_flags ) ; <nl> } <nl> - # endif / / ! _MSC_VER <nl> <nl> } <nl> <nl> InvertedLists * read_InvertedLists ( IOReader * f , int io_flags ) { <nl> static void read_InvertedLists ( <nl> IndexIVF * ivf , IOReader * f , int io_flags ) { <nl> InvertedLists * ils = read_InvertedLists ( f , io_flags ) ; <nl> - FAISS_THROW_IF_NOT ( ! ils | | ( ils - > nlist = = ivf - > nlist & & <nl> - ils - > code_size = = ivf - > code_size ) ) ; <nl> + if ( ils ) { <nl> + FAISS_THROW_IF_NOT ( ils - > nlist = = ivf - > nlist ) ; <nl> + FAISS_THROW_IF_NOT ( ils - > code_size = = InvertedLists : : INVALID_CODE_SIZE | | <nl> + ils - > code_size = = ivf - > code_size ) ; <nl> + } <nl> ivf - > invlists = ils ; <nl> ivf - > own_invlists = true ; <nl> } <nl> Index * read_index ( IOReader * f , int io_flags ) { <nl> dynamic_cast < IndexPQ * > ( idxhnsw - > storage ) - > pq . compute_sdc_table ( ) ; <nl> } <nl> idx = idxhnsw ; <nl> + } else if ( h = = fourcc ( " IPfs " ) ) { <nl> + IndexPQFastScan * idxpqfs = new IndexPQFastScan ( ) ; <nl> + read_index_header ( idxpqfs , f ) ; <nl> + read_ProductQuantizer ( & idxpqfs - > pq , f ) ; <nl> + READ1 ( idxpqfs - > implem ) ; <nl> + READ1 ( idxpqfs - > bbs ) ; <nl> + READ1 ( idxpqfs - > qbs ) ; <nl> + READ1 ( idxpqfs - > ntotal2 ) ; <nl> + READ1 ( idxpqfs - > M2 ) ; <nl> + READVECTOR ( idxpqfs - > codes ) ; <nl> + idx = idxpqfs ; <nl> + <nl> + } else if ( h = = fourcc ( " IwPf " ) ) { <nl> + IndexIVFPQFastScan * ivpq = new IndexIVFPQFastScan ( ) ; <nl> + read_ivf_header ( ivpq , f ) ; <nl> + READ1 ( ivpq - > by_residual ) ; <nl> + READ1 ( ivpq - > code_size ) ; <nl> + READ1 ( ivpq - > bbs ) ; <nl> + READ1 ( ivpq - > M2 ) ; <nl> + READ1 ( ivpq - > implem ) ; <nl> + READ1 ( ivpq - > qbs2 ) ; <nl> + read_ProductQuantizer ( & ivpq - > pq , f ) ; <nl> + read_InvertedLists ( ivpq , f , io_flags ) ; <nl> + ivpq - > precompute_table ( ) ; <nl> + idx = ivpq ; <nl> } else { <nl> - FAISS_THROW_FMT ( " Index type 0x % 08x not supported \ n " , h ) ; <nl> + FAISS_THROW_FMT ( <nl> + " Index type 0x % 08x ( \ " % s \ " ) not recognized " , <nl> + h , fourcc_inv_printable ( h ) . c_str ( ) <nl> + ) ; <nl> idx = nullptr ; <nl> } <nl> return idx ; <nl> IndexBinary * read_index_binary ( IOReader * f , int io_flags ) { <nl> } <nl> idx = idxmh ; <nl> } else { <nl> - FAISS_THROW_FMT ( " Index type 0x % 08x not supported \ n " , h ) ; <nl> + FAISS_THROW_FMT ( <nl> + " Index type % 08x ( \ " % s \ " ) not recognized " , <nl> + h , fourcc_inv_printable ( h ) . c_str ( ) <nl> + ) ; <nl> idx = nullptr ; <nl> } <nl> return idx ; <nl> IndexBinary * read_index_binary ( const char * fname , int io_flags ) { <nl> return idx ; <nl> } <nl> <nl> - # ifndef _MSC_VER <nl> - <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> - * InvertedListIOHook ' s <nl> - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - <nl> - InvertedListsIOHook : : InvertedListsIOHook ( <nl> - const std : : string & key , const std : : string & classname ) : <nl> - key ( key ) , classname ( classname ) <nl> - { } <nl> - <nl> - namespace { <nl> - <nl> - / / / std : : vector that deletes its contents <nl> - struct IOHookTable : std : : vector < InvertedListsIOHook * > { <nl> - <nl> - IOHookTable ( ) { <nl> - push_back ( new OnDiskInvertedListsIOHook ( ) ) ; <nl> - } <nl> - <nl> - ~ IOHookTable ( ) { <nl> - for ( auto x : * this ) { <nl> - delete x ; <nl> - } <nl> - } <nl> - } ; <nl> - <nl> - static IOHookTable InvertedListsIOHook_table ; <nl> - <nl> - } / / anonymous namepsace <nl> - <nl> - InvertedListsIOHook * InvertedListsIOHook : : lookup ( int h ) <nl> - { <nl> - for ( const auto & callback : InvertedListsIOHook_table ) { <nl> - if ( h = = fourcc ( callback - > key ) ) { <nl> - return callback ; <nl> - } <nl> - } <nl> - FAISS_THROW_FMT ( " read_InvertedLists : could not load ArrayInvertedLists as % 04x " , h ) ; <nl> - } <nl> - <nl> - InvertedListsIOHook * InvertedListsIOHook : : lookup_classname ( const std : : string & classname ) <nl> - { <nl> - for ( const auto & callback : InvertedListsIOHook_table ) { <nl> - if ( callback - > classname = = classname ) { <nl> - return callback ; <nl> - } <nl> - } <nl> - FAISS_THROW_FMT ( " read_InvertedLists : could not find classname % s " , classname . c_str ( ) ) ; <nl> - } <nl> - <nl> - void InvertedListsIOHook : : add_callback ( InvertedListsIOHook * cb ) <nl> - { <nl> - InvertedListsIOHook_table . push_back ( cb ) ; <nl> - } <nl> - <nl> - void InvertedListsIOHook : : print_callbacks ( ) <nl> - { <nl> - printf ( " registered % zd InvertedListsIOHooks : \ n " , <nl> - InvertedListsIOHook_table . size ( ) ) ; <nl> - for ( const auto & cb : InvertedListsIOHook_table ) { <nl> - printf ( " % 08x % s % s \ n " , <nl> - fourcc ( cb - > key . c_str ( ) ) , <nl> - cb - > key . c_str ( ) , <nl> - cb - > classname . c_str ( ) ) ; <nl> - } <nl> - } <nl> - <nl> - # endif / / ! _MSC_VER <nl> - <nl> <nl> <nl> } / / namespace faiss <nl> mmm a / faiss / impl / index_write . cpp <nl> ppp b / faiss / impl / index_write . cpp <nl> <nl> # include < sys / types . h > <nl> # include < sys / stat . h > <nl> <nl> - # ifndef _MSC_VER <nl> - # include < sys / mman . h > <nl> - # endif / / ! _MSC_VER <nl> + # include < faiss / invlists / InvertedListsIOHook . h > <nl> <nl> # include < faiss / impl / FaissAssert . h > <nl> # include < faiss / impl / io . h > <nl> <nl> # include < faiss / IndexScalarQuantizer . h > <nl> # include < faiss / IndexHNSW . h > <nl> # include < faiss / IndexLattice . h > <nl> + # include < faiss / IndexPQFastScan . h > <nl> + # include < faiss / IndexIVFPQFastScan . h > <nl> <nl> # include < faiss / IndexBinaryFlat . h > <nl> # include < faiss / IndexBinaryFromFloat . h > <nl> <nl> # include < faiss / IndexBinaryIVF . h > <nl> # include < faiss / IndexBinaryHash . h > <nl> <nl> - # ifndef _MSC_VER <nl> - # include < faiss / OnDiskInvertedLists . h > <nl> - # endif / / ! _MSC_VER <nl> - <nl> - <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> * The I / O format is the content of the class . For objects that are <nl> * inherited , like Index , a 4 - character - code ( fourcc ) indicates which <nl> <nl> * or deprecated fields ) , the fourcc can be replaced . New code should <nl> * be able to read the old fourcc and fill in new classes . <nl> * <nl> - * TODO : serialization to strings for use in Python pickle or Torch <nl> - * serialization . <nl> - * <nl> * TODO : in this file , the read functions that encouter errors may <nl> * leak memory . <nl> * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> void write_InvertedLists ( const InvertedLists * ils , IOWriter * f ) { <nl> WRITEANDCHECK ( ails - > ids [ i ] . data ( ) , n ) ; <nl> } <nl> } <nl> - # ifndef _MSC_VER <nl> - } else { <nl> <nl> + } else { <nl> InvertedListsIOHook : : lookup_classname ( <nl> typeid ( * ils ) . name ( ) ) - > write ( ils , f ) ; <nl> - <nl> - / * <nl> - fprintf ( stderr , " WARN ! write_InvertedLists : unsupported invlist type , " <nl> - " saving null invlist \ n " ) ; <nl> - uint32_t h = fourcc ( " il00 " ) ; <nl> - WRITE1 ( h ) ; <nl> - * / <nl> - # endif / / ! _MSC_VER <nl> } <nl> } <nl> <nl> void write_index ( const Index * idx , IOWriter * f ) { <nl> write_index_header ( idxhnsw , f ) ; <nl> write_HNSW ( & idxhnsw - > hnsw , f ) ; <nl> write_index ( idxhnsw - > storage , f ) ; <nl> + } else if ( const IndexPQFastScan * idxpqfs = <nl> + dynamic_cast < const IndexPQFastScan * > ( idx ) ) { <nl> + uint32_t h = fourcc ( " IPfs " ) ; <nl> + WRITE1 ( h ) ; <nl> + write_index_header ( idxpqfs , f ) ; <nl> + write_ProductQuantizer ( & idxpqfs - > pq , f ) ; <nl> + WRITE1 ( idxpqfs - > implem ) ; <nl> + WRITE1 ( idxpqfs - > bbs ) ; <nl> + WRITE1 ( idxpqfs - > qbs ) ; <nl> + WRITE1 ( idxpqfs - > ntotal2 ) ; <nl> + WRITE1 ( idxpqfs - > M2 ) ; <nl> + WRITEVECTOR ( idxpqfs - > codes ) ; <nl> + } else if ( const IndexIVFPQFastScan * ivpq = <nl> + dynamic_cast < const IndexIVFPQFastScan * > ( idx ) ) { <nl> + uint32_t h = fourcc ( " IwPf " ) ; <nl> + WRITE1 ( h ) ; <nl> + write_ivf_header ( ivpq , f ) ; <nl> + WRITE1 ( ivpq - > by_residual ) ; <nl> + WRITE1 ( ivpq - > code_size ) ; <nl> + WRITE1 ( ivpq - > bbs ) ; <nl> + WRITE1 ( ivpq - > M2 ) ; <nl> + WRITE1 ( ivpq - > implem ) ; <nl> + WRITE1 ( ivpq - > qbs2 ) ; <nl> + write_ProductQuantizer ( & ivpq - > pq , f ) ; <nl> + write_InvertedLists ( ivpq - > invlists , f ) ; <nl> } else { <nl> - FAISS_THROW_MSG ( " don ' t know how to serialize this type of index " ) ; <nl> + FAISS_THROW_MSG ( " don ' t know how to serialize this type of index " ) ; <nl> } <nl> } <nl> <nl> mmm a / faiss / impl / io . cpp <nl> ppp b / faiss / impl / io . cpp <nl> BufferedIOWriter : : ~ BufferedIOWriter ( ) <nl> <nl> <nl> uint32_t fourcc ( const char sx [ 4 ] ) { <nl> - assert ( 4 = = strlen ( sx ) ) ; <nl> + FAISS_THROW_IF_NOT ( 4 = = strlen ( sx ) ) ; <nl> const unsigned char * x = ( unsigned char * ) sx ; <nl> return x [ 0 ] | x [ 1 ] < < 8 | x [ 2 ] < < 16 | x [ 3 ] < < 24 ; <nl> } <nl> <nl> uint32_t fourcc ( const std : : string & sx ) { <nl> - assert ( sx . length ( ) = = 4 ) ; <nl> + FAISS_THROW_IF_NOT ( sx . length ( ) = = 4 ) ; <nl> const unsigned char * x = ( unsigned char * ) sx . c_str ( ) ; <nl> return x [ 0 ] | x [ 1 ] < < 8 | x [ 2 ] < < 16 | x [ 3 ] < < 24 ; <nl> } <nl> <nl> + void fourcc_inv ( uint32_t x , char str [ 5 ] ) { <nl> + * ( uint32_t * ) str = x ; <nl> + str [ 5 ] = 0 ; <nl> + } <nl> + <nl> + std : : string fourcc_inv ( uint32_t x ) { <nl> + char str [ 5 ] ; <nl> + fourcc_inv ( x , str ) ; <nl> + return std : : string ( str ) ; <nl> + } <nl> + <nl> + <nl> + std : : string fourcc_inv_printable ( uint32_t x ) { <nl> + char cstr [ 5 ] ; <nl> + fourcc_inv ( x , cstr ) ; <nl> + std : : string str = " " ; <nl> + for ( int i = 0 ; i < 4 ; i + + ) { <nl> + uint8_t c = cstr [ i ] ; <nl> + if ( 32 < = c & & c < 127 ) { <nl> + str + = c ; <nl> + } else { <nl> + char buf [ 10 ] ; <nl> + sprintf ( buf , " \ \ x % 02x " , c ) ; <nl> + str + = buf ; <nl> + } <nl> + } <nl> + return str ; <nl> + } <nl> + <nl> + <nl> + <nl> <nl> } / / namespace faiss <nl> mmm a / faiss / impl / io . h <nl> ppp b / faiss / impl / io . h <nl> struct BufferedIOWriter : IOWriter { <nl> uint32_t fourcc ( const char sx [ 4 ] ) ; <nl> uint32_t fourcc ( const std : : string & sx ) ; <nl> <nl> + / / decoding of fourcc ( int32 - > string ) <nl> + void fourcc_inv ( uint32_t x , char str [ 5 ] ) ; <nl> + std : : string fourcc_inv ( uint32_t x ) ; <nl> + std : : string fourcc_inv_printable ( uint32_t x ) ; <nl> + <nl> <nl> } / / namespace faiss <nl> mmm a / faiss / impl / lattice_Zn . cpp <nl> ppp b / faiss / impl / lattice_Zn . cpp <nl> <nl> # include < algorithm > <nl> <nl> # include < faiss / utils / distances . h > <nl> + # include < faiss / impl / platform_macros . h > <nl> <nl> - # ifdef _MSC_VER <nl> - <nl> - # include < intrin . h > <nl> - <nl> - static inline int __builtin_ctzll ( uint64_t x ) { <nl> - unsigned long ret ; <nl> - _BitScanForward64 ( & ret , x ) ; <nl> - return ( int ) ret ; <nl> - } <nl> - <nl> - static inline int __builtin_clzll ( uint64_t x ) { <nl> - return ( int ) __lzcnt64 ( x ) ; <nl> - } <nl> - <nl> - # endif / / _MSC_VER <nl> <nl> namespace faiss { <nl> <nl> mmm a / faiss / impl / platform_macros . h <nl> ppp b / faiss / impl / platform_macros . h <nl> <nl> <nl> # pragma once <nl> <nl> + <nl> # ifdef _MSC_VER <nl> <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Windows specific macros <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + <nl> # ifdef FAISS_MAIN_LIB <nl> # define FAISS_API __declspec ( dllexport ) <nl> # else / / _FAISS_MAIN_LIB <nl> <nl> <nl> # define __PRETTY_FUNCTION__ __FUNCSIG__ <nl> <nl> + # define posix_memalign ( p , a , s ) ( ( ( * ( p ) ) = _aligned_malloc ( ( s ) , ( a ) ) ) , * ( p ) ? 0 : errno ) <nl> + # define posix_memalign_free _aligned_free <nl> + <nl> + / / aligned should be in front of the declaration <nl> + # define ALIGNED ( x ) __declspec ( align ( x ) ) <nl> + <nl> + / / redefine the GCC intrinsics with Windows equivalents <nl> + <nl> + # include < intrin . h > <nl> + <nl> + inline int __builtin_ctzll ( uint64_t x ) { <nl> + unsigned long ret ; <nl> + _BitScanForward64 ( & ret , x ) ; <nl> + return ( int ) ret ; <nl> + } <nl> + <nl> + inline int __builtin_ctz ( unsigned long x ) { <nl> + unsigned long ret ; <nl> + _BitScanForward ( & ret , x ) ; <nl> + return ( int ) ret ; <nl> + } <nl> + <nl> + inline int __builtin_clzll ( uint64_t x ) { <nl> + return ( int ) __lzcnt64 ( x ) ; <nl> + } <nl> + <nl> + # define __builtin_popcountl __popcnt64 <nl> + <nl> # else <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Linux and OSX <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> <nl> # define FAISS_API <nl> + # define posix_memalign_free free <nl> + <nl> + / / aligned should be * in front * of the declaration , for compatibility with windows <nl> + # define ALIGNED ( x ) __attribute__ ( ( aligned ( x ) ) ) <nl> <nl> # endif / / _MSC_VER <nl> + <nl> + <nl> + <nl> new file mode 100644 <nl> index 000000000 . . 74d284c58 <nl> mmm / dev / null <nl> ppp b / faiss / impl / pq4_fast_scan . cpp <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + # include < faiss / impl / pq4_fast_scan . h > <nl> + # include < faiss / impl / FaissAssert . h > <nl> + # include < faiss / impl / simd_result_handlers . h > <nl> + <nl> + # include < array > <nl> + <nl> + <nl> + namespace faiss { <nl> + <nl> + <nl> + using namespace simd_result_handlers ; <nl> + <nl> + <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Packing functions for codes <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + <nl> + <nl> + namespace { <nl> + <nl> + / * extract the column starting at ( i , j ) <nl> + * from packed matrix src of size ( m , n ) * / <nl> + template < typename T , class TA > <nl> + void get_matrix_column ( <nl> + T * src , <nl> + size_t m , size_t n , <nl> + int64_t i , int64_t j , <nl> + TA & dest ) { <nl> + for ( int64_t k = 0 ; k < dest . size ( ) ; k + + ) { <nl> + if ( k + i > = 0 & & k + i < m ) { <nl> + dest [ k ] = src [ ( k + i ) * n + j ] ; <nl> + } else { <nl> + dest [ k ] = 0 ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + } / / anonymous namespace <nl> + <nl> + <nl> + void pq4_pack_codes ( <nl> + const uint8_t * codes , <nl> + size_t ntotal , size_t M , <nl> + size_t nb , size_t bbs , size_t nsq , <nl> + uint8_t * blocks <nl> + ) <nl> + { <nl> + FAISS_THROW_IF_NOT ( bbs % 32 = = 0 ) ; <nl> + FAISS_THROW_IF_NOT ( nb % bbs = = 0 ) ; <nl> + FAISS_THROW_IF_NOT ( nsq % 2 = = 0 ) ; <nl> + <nl> + memset ( blocks , 0 , nb * nsq / 2 ) ; <nl> + const uint8_t perm0 [ 16 ] = <nl> + { 0 , 8 , 1 , 9 , 2 , 10 , 3 , 11 , <nl> + 4 , 12 , 5 , 13 , 6 , 14 , 7 , 15 } ; <nl> + <nl> + uint8_t * codes2 = blocks ; <nl> + for ( size_t i0 = 0 ; i0 < nb ; i0 + = bbs ) { <nl> + for ( int sq = 0 ; sq < nsq ; sq + = 2 ) { <nl> + for ( size_t i = 0 ; i < bbs ; i + = 32 ) { <nl> + std : : array < uint8_t , 32 > c , c0 , c1 ; <nl> + get_matrix_column ( <nl> + codes , ntotal , <nl> + ( M + 1 ) / 2 , <nl> + i0 + i , sq / 2 , c <nl> + ) ; <nl> + for ( int j = 0 ; j < 32 ; j + + ) { <nl> + c0 [ j ] = c [ j ] & 15 ; <nl> + c1 [ j ] = c [ j ] > > 4 ; <nl> + } <nl> + for ( int j = 0 ; j < 16 ; j + + ) { <nl> + uint8_t d0 , d1 ; <nl> + d0 = c0 [ perm0 [ j ] ] | ( c0 [ perm0 [ j ] + 16 ] < < 4 ) ; <nl> + d1 = c1 [ perm0 [ j ] ] | ( c1 [ perm0 [ j ] + 16 ] < < 4 ) ; <nl> + codes2 [ j ] = d0 ; <nl> + codes2 [ j + 16 ] = d1 ; <nl> + } <nl> + codes2 + = 32 ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + void pq4_pack_codes_range ( <nl> + const uint8_t * codes , <nl> + size_t M , <nl> + size_t i0 , size_t i1 , <nl> + size_t bbs , size_t M2 , <nl> + uint8_t * blocks <nl> + ) { <nl> + const uint8_t perm0 [ 16 ] = <nl> + { 0 , 8 , 1 , 9 , 2 , 10 , 3 , 11 , <nl> + 4 , 12 , 5 , 13 , 6 , 14 , 7 , 15 } ; <nl> + <nl> + / / range of affected blocks <nl> + size_t block0 = i0 / bbs ; <nl> + size_t block1 = ( ( i1 - 1 ) / bbs ) + 1 ; <nl> + <nl> + for ( size_t b = block0 ; b < block1 ; b + + ) { <nl> + uint8_t * codes2 = blocks + b * bbs * M2 / 2 ; <nl> + int64_t i_base = b * bbs - i0 ; <nl> + for ( int sq = 0 ; sq < M2 ; sq + = 2 ) { <nl> + for ( size_t i = 0 ; i < bbs ; i + = 32 ) { <nl> + std : : array < uint8_t , 32 > c , c0 , c1 ; <nl> + get_matrix_column ( <nl> + codes , i1 - i0 , <nl> + ( M + 1 ) / 2 , <nl> + i_base + i , sq / 2 , c <nl> + ) ; <nl> + for ( int j = 0 ; j < 32 ; j + + ) { <nl> + c0 [ j ] = c [ j ] & 15 ; <nl> + c1 [ j ] = c [ j ] > > 4 ; <nl> + } <nl> + for ( int j = 0 ; j < 16 ; j + + ) { <nl> + uint8_t d0 , d1 ; <nl> + d0 = c0 [ perm0 [ j ] ] | ( c0 [ perm0 [ j ] + 16 ] < < 4 ) ; <nl> + d1 = c1 [ perm0 [ j ] ] | ( c1 [ perm0 [ j ] + 16 ] < < 4 ) ; <nl> + codes2 [ j ] | = d0 ; <nl> + codes2 [ j + 16 ] | = d1 ; <nl> + } <nl> + codes2 + = 32 ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + } <nl> + <nl> + <nl> + uint8_t pq4_get_packed_element ( <nl> + const uint8_t * data , size_t bbs , size_t nsq , <nl> + size_t i , size_t sq <nl> + ) { <nl> + / / move to correct bbs - sized block <nl> + data + = ( i / bbs * ( nsq / 2 ) + sq / 2 ) * bbs ; <nl> + sq = sq & 1 ; <nl> + i = i % bbs ; <nl> + <nl> + / / another step <nl> + data + = ( i / 32 ) * 32 ; <nl> + i = i % 32 ; <nl> + <nl> + if ( sq = = 1 ) { <nl> + data + = 16 ; <nl> + } <nl> + const uint8_t iperm0 [ 16 ] = <nl> + { 0 , 2 , 4 , 6 , 8 , 10 , 12 , 14 , <nl> + 1 , 3 , 5 , 7 , 9 , 11 , 13 , 15 } ; <nl> + if ( i < 16 ) { <nl> + return data [ iperm0 [ i ] ] & 15 ; <nl> + } else { <nl> + return data [ iperm0 [ i - 16 ] ] > > 4 ; <nl> + } <nl> + <nl> + } <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Packing functions for Look - Up Tables ( LUT ) <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + <nl> + <nl> + <nl> + void pq4_pack_LUT ( <nl> + int nq , int nsq , <nl> + const uint8_t * src , <nl> + uint8_t * dest ) <nl> + { <nl> + <nl> + for ( int q = 0 ; q < nq ; q + + ) { <nl> + for ( int sq = 0 ; sq < nsq ; sq + = 2 ) { <nl> + memcpy ( <nl> + dest + ( sq / 2 * nq + q ) * 32 , <nl> + src + ( q * nsq + sq ) * 16 , <nl> + 16 <nl> + ) ; <nl> + memcpy ( <nl> + dest + ( sq / 2 * nq + q ) * 32 + 16 , <nl> + src + ( q * nsq + sq + 1 ) * 16 , <nl> + 16 <nl> + ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + int pq4_pack_LUT_qbs ( <nl> + int qbs , int nsq , <nl> + const uint8_t * src , <nl> + uint8_t * dest ) <nl> + { <nl> + FAISS_THROW_IF_NOT ( nsq % 2 = = 0 ) ; <nl> + size_t dim12 = 16 * nsq ; <nl> + int i0 = 0 ; <nl> + int qi = qbs ; <nl> + while ( qi ) { <nl> + int nq = qi & 15 ; <nl> + qi > > = 4 ; <nl> + pq4_pack_LUT ( <nl> + nq , nsq , <nl> + src + i0 * dim12 , <nl> + dest + i0 * dim12 <nl> + ) ; <nl> + i0 + = nq ; <nl> + } <nl> + return i0 ; <nl> + } <nl> + <nl> + <nl> + namespace { <nl> + <nl> + void pack_LUT_1_q_map ( <nl> + int nq , const int * q_map , <nl> + int nsq , <nl> + const uint8_t * src , <nl> + uint8_t * dest ) <nl> + { <nl> + <nl> + for ( int qi = 0 ; qi < nq ; qi + + ) { <nl> + int q = q_map [ qi ] ; <nl> + for ( int sq = 0 ; sq < nsq ; sq + = 2 ) { <nl> + memcpy ( <nl> + dest + ( sq / 2 * nq + qi ) * 32 , <nl> + src + ( q * nsq + sq ) * 16 , <nl> + 16 <nl> + ) ; <nl> + memcpy ( <nl> + dest + ( sq / 2 * nq + qi ) * 32 + 16 , <nl> + src + ( q * nsq + sq + 1 ) * 16 , <nl> + 16 <nl> + ) ; <nl> + } <nl> + } <nl> + <nl> + } <nl> + <nl> + } / / anonymous namespace <nl> + <nl> + int pq4_pack_LUT_qbs_q_map ( <nl> + int qbs , int nsq , <nl> + const uint8_t * src , <nl> + const int * q_map , <nl> + uint8_t * dest ) <nl> + { <nl> + FAISS_THROW_IF_NOT ( nsq % 2 = = 0 ) ; <nl> + size_t dim12 = 16 * nsq ; <nl> + int i0 = 0 ; <nl> + int qi = qbs ; <nl> + while ( qi ) { <nl> + int nq = qi & 15 ; <nl> + qi > > = 4 ; <nl> + pack_LUT_1_q_map ( <nl> + nq , q_map + i0 , nsq , <nl> + src , <nl> + dest + i0 * dim12 <nl> + ) ; <nl> + i0 + = nq ; <nl> + } <nl> + return i0 ; <nl> + } <nl> + <nl> + <nl> + <nl> + } / / namespace faiss <nl> new file mode 100644 <nl> index 000000000 . . fb3e32ef2 <nl> mmm / dev / null <nl> ppp b / faiss / impl / pq4_fast_scan . h <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + # pragma once <nl> + <nl> + # include < cstdint > <nl> + # include < cstdlib > <nl> + <nl> + / * * PQ4 SIMD packing and accumulation functions <nl> + * <nl> + * The basic kernel accumulates nq query vectors with bbs = nb * 2 * 16 vectors <nl> + * and produces an output matrix for that . It is interesting for nq * nb < = 4 , <nl> + * otherwise register spilling becomes too large . <nl> + * <nl> + * The implementation of these functions is spread over 3 cpp files to reduce <nl> + * parallel compile times . Templates are instanciated explicitly . <nl> + * / <nl> + <nl> + <nl> + namespace faiss { <nl> + <nl> + <nl> + / * * Pack codes for consumption by the SIMD kernels . <nl> + * The unused bytes are set to 0 . <nl> + * <nl> + * @ param codes input codes , size ( ntotal , ceil ( M / 2 ) ) <nl> + * @ param nototal number of input codes <nl> + * @ param nb output number of codes ( ntotal rounded up to a multiple of <nl> + * bbs ) <nl> + * @ param M2 number of sub - quantizers ( = M rounded up to a muliple of 2 ) <nl> + * @ param bbs size of database blocks ( multiple of 32 ) <nl> + * @ param blocks output array , size nb * nsq / 2 . <nl> + * / <nl> + void pq4_pack_codes ( <nl> + const uint8_t * codes , <nl> + size_t ntotal , size_t M , <nl> + size_t nb , size_t bbs , size_t M2 , <nl> + uint8_t * blocks <nl> + ) ; <nl> + <nl> + / * * Same as pack_codes but write in a given range of the output , <nl> + * leaving the rest untouched . Assumes allocated entries are 0 on input . <nl> + * <nl> + * @ param codes input codes , size ( i1 - i0 , ceil ( M / 2 ) ) <nl> + * @ param i0 first output code to write <nl> + * @ param i1 last output code to write <nl> + * @ param blocks output array , size at least ceil ( i1 / bbs ) * bbs * nsq / 2 <nl> + * / <nl> + void pq4_pack_codes_range ( <nl> + const uint8_t * codes , <nl> + size_t M , <nl> + size_t i0 , size_t i1 , <nl> + size_t bbs , size_t M2 , <nl> + uint8_t * blocks <nl> + ) ; <nl> + <nl> + / * * get a single element from a packed codes table <nl> + * <nl> + * @ param i vector id <nl> + * @ param sq subquantizer ( < nsq ) <nl> + * / <nl> + uint8_t pq4_get_packed_element ( <nl> + const uint8_t * data , size_t bbs , size_t nsq , <nl> + size_t i , size_t sq <nl> + ) ; <nl> + <nl> + / * * Pack Look - up table for consumption by the kernel . <nl> + * <nl> + * @ param nq number of queries <nl> + * @ param nsq number of sub - quantizers ( muliple of 2 ) <nl> + * @ param src input array , size ( nq , 16 ) <nl> + * @ param dest output array , size ( nq , 16 ) <nl> + * / <nl> + void pq4_pack_LUT ( <nl> + int nq , int nsq , <nl> + const uint8_t * src , <nl> + uint8_t * dest <nl> + ) ; <nl> + <nl> + <nl> + <nl> + / * * Loop over database elements and accumulate results into result handler <nl> + * <nl> + * @ param nq number of queries <nl> + * @ param nb number of database elements <nl> + * @ param bbs size of database blocks ( multiple of 32 ) <nl> + * @ param nsq number of sub - quantizers ( muliple of 2 ) <nl> + * @ param codes packed codes array <nl> + * @ param LUT packed look - up table <nl> + * / <nl> + template < class ResultHandler > <nl> + void pq4_accumulate_loop ( <nl> + int nq , <nl> + size_t nb , int bbs , <nl> + int nsq , <nl> + const uint8_t * codes , <nl> + const uint8_t * LUT , <nl> + ResultHandler & res ) ; <nl> + <nl> + <nl> + <nl> + / * qbs versions , supported only for bbs = 32 . <nl> + * <nl> + * The kernel function runs the kernel for * several * query blocks <nl> + * and bbs database vectors . The sizes of the blocks are encoded in qbs as <nl> + * base - 16 digits . <nl> + * <nl> + * For example , qbs = 0x1223 means that the kernel will be run 4 times , the <nl> + * first time with 3 query vectors , second time with 2 query vectors , then 2 <nl> + * vectors again and finally with 1 query vector . The output block will thus be <nl> + * nq = 3 + 2 + 2 + 1 = 6 queries . For a given total block size , the optimal <nl> + * decomposition into sub - blocks ( measured empirically ) is given by <nl> + * preferred_qbs ( ) . <nl> + * / <nl> + <nl> + <nl> + / * compute the number of queries from a base - 16 decomposition * / <nl> + int pq4_qbs_to_nq ( int qbs ) ; <nl> + <nl> + / * * return the preferred decomposition in blocks for a nb of queries . * / <nl> + int pq4_preferred_qbs ( int nq ) ; <nl> + <nl> + / * * Pack Look - up table for consumption by the kernel . <nl> + * <nl> + * @ param qbs 4 - bit encoded number of query blocks , the total number of <nl> + * queries handled ( nq ) is deduced from it <nl> + * @ param nsq number of sub - quantizers ( muliple of 2 ) <nl> + * @ param src input array , size ( nq , 16 ) <nl> + * @ param dest output array , size ( nq , 16 ) <nl> + * @ return nq <nl> + * / <nl> + int pq4_pack_LUT_qbs ( <nl> + int fqbs , int nsq , <nl> + const uint8_t * src , <nl> + uint8_t * dest <nl> + ) ; <nl> + <nl> + / * * Same as pq4_pack_LUT_qbs , except the source vectors are remapped with q_map * / <nl> + int pq4_pack_LUT_qbs_q_map ( <nl> + int qbs , int nsq , <nl> + const uint8_t * src , <nl> + const int * q_map , <nl> + uint8_t * dest ) ; <nl> + <nl> + / * * Run accumulation loop . <nl> + * <nl> + * @ param qbs 4 - bit encded number of queries <nl> + * @ param nb number of database codes ( mutliple of bbs ) <nl> + * @ param nsq number of sub - quantizers <nl> + * @ param codes encoded database vectors ( packed ) <nl> + * @ param LUT look - up table ( packed ) <nl> + * @ param res call - back for the resutls <nl> + * / <nl> + template < class ResultHandler > <nl> + void pq4_accumulate_loop_qbs ( <nl> + int qbs , <nl> + size_t nb , <nl> + int nsq , <nl> + const uint8_t * codes , <nl> + const uint8_t * LUT , <nl> + ResultHandler & res ) ; <nl> + <nl> + <nl> + <nl> + } / / namespace faiss <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 000000000 . . f40691597 <nl> mmm / dev / null <nl> ppp b / faiss / impl / pq4_fast_scan_search_1 . cpp <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + <nl> + # include < faiss / impl / pq4_fast_scan . h > <nl> + <nl> + # include < faiss / impl / FaissAssert . h > <nl> + # include < faiss / impl / simd_result_handlers . h > <nl> + <nl> + <nl> + namespace faiss { <nl> + <nl> + <nl> + using namespace simd_result_handlers ; <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * accumulation functions <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + namespace { <nl> + <nl> + / * <nl> + * The computation kernel <nl> + * It accumulates results for NQ queries and BB * 32 database elements <nl> + * writes results in a ResultHandler <nl> + * / <nl> + <nl> + template < int NQ , int BB , class ResultHandler > <nl> + void kernel_accumulate_block ( <nl> + int nsq , <nl> + const uint8_t * codes , <nl> + const uint8_t * LUT , <nl> + ResultHandler & res ) <nl> + { <nl> + / / distance accumulators <nl> + simd16uint16 accu [ NQ ] [ BB ] [ 4 ] ; <nl> + <nl> + for ( int q = 0 ; q < NQ ; q + + ) { <nl> + for ( int b = 0 ; b < BB ; b + + ) { <nl> + accu [ q ] [ b ] [ 0 ] . clear ( ) ; <nl> + accu [ q ] [ b ] [ 1 ] . clear ( ) ; <nl> + accu [ q ] [ b ] [ 2 ] . clear ( ) ; <nl> + accu [ q ] [ b ] [ 3 ] . clear ( ) ; <nl> + } <nl> + } <nl> + <nl> + for ( int sq = 0 ; sq < nsq ; sq + = 2 ) { <nl> + simd32uint8 lut_cache [ NQ ] ; <nl> + for ( int q = 0 ; q < NQ ; q + + ) { <nl> + lut_cache [ q ] = simd32uint8 ( LUT ) ; <nl> + LUT + = 32 ; <nl> + } <nl> + <nl> + for ( int b = 0 ; b < BB ; b + + ) { <nl> + simd32uint8 c = simd32uint8 ( codes ) ; <nl> + codes + = 32 ; <nl> + simd32uint8 mask ( 15 ) ; <nl> + simd32uint8 chi = simd32uint8 ( simd16uint16 ( c ) > > 4 ) & mask ; <nl> + simd32uint8 clo = c & mask ; <nl> + <nl> + for ( int q = 0 ; q < NQ ; q + + ) { <nl> + simd32uint8 lut = lut_cache [ q ] ; <nl> + simd32uint8 res0 = lut . lookup_2_lanes ( clo ) ; <nl> + simd32uint8 res1 = lut . lookup_2_lanes ( chi ) ; <nl> + <nl> + accu [ q ] [ b ] [ 0 ] + = simd16uint16 ( res0 ) ; <nl> + accu [ q ] [ b ] [ 1 ] + = simd16uint16 ( res0 ) > > 8 ; <nl> + <nl> + accu [ q ] [ b ] [ 2 ] + = simd16uint16 ( res1 ) ; <nl> + accu [ q ] [ b ] [ 3 ] + = simd16uint16 ( res1 ) > > 8 ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + for ( int q = 0 ; q < NQ ; q + + ) { <nl> + for ( int b = 0 ; b < BB ; b + + ) { <nl> + <nl> + accu [ q ] [ b ] [ 0 ] - = accu [ q ] [ b ] [ 1 ] < < 8 ; <nl> + simd16uint16 dis0 = combine2x2 ( accu [ q ] [ b ] [ 0 ] , accu [ q ] [ b ] [ 1 ] ) ; <nl> + <nl> + accu [ q ] [ b ] [ 2 ] - = accu [ q ] [ b ] [ 3 ] < < 8 ; <nl> + simd16uint16 dis1 = combine2x2 ( accu [ q ] [ b ] [ 2 ] , accu [ q ] [ b ] [ 3 ] ) ; <nl> + <nl> + res . handle ( q , b , dis0 , dis1 ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + } <nl> + <nl> + <nl> + template < int NQ , int BB , class ResultHandler > <nl> + void accumulate_fixed_blocks ( <nl> + size_t nb , <nl> + int nsq , <nl> + const uint8_t * codes , <nl> + const uint8_t * LUT , <nl> + ResultHandler & res ) <nl> + { <nl> + constexpr int bbs = 32 * BB ; <nl> + for ( int64_t j0 = 0 ; j0 < nb ; j0 + = bbs ) { <nl> + FixedStorageHandler < NQ , 2 * BB > res2 ; <nl> + kernel_accumulate_block < NQ , BB > ( nsq , codes , LUT , res2 ) ; <nl> + res . set_block_origin ( 0 , j0 ) ; <nl> + res2 . to_other_handler ( res ) ; <nl> + codes + = bbs * nsq / 2 ; <nl> + } <nl> + } <nl> + <nl> + <nl> + } / / anonymous namespace <nl> + <nl> + template < class ResultHandler > <nl> + void pq4_accumulate_loop ( <nl> + int nq , <nl> + size_t nb , int bbs , <nl> + int nsq , <nl> + const uint8_t * codes , <nl> + const uint8_t * LUT , <nl> + ResultHandler & res ) <nl> + { <nl> + FAISS_THROW_IF_NOT ( is_aligned_pointer ( codes ) ) ; <nl> + FAISS_THROW_IF_NOT ( is_aligned_pointer ( LUT ) ) ; <nl> + FAISS_THROW_IF_NOT ( bbs % 32 = = 0 ) ; <nl> + FAISS_THROW_IF_NOT ( nb % bbs = = 0 ) ; <nl> + <nl> + # define DISPATCH ( NQ , BB ) \ <nl> + case NQ * 1000 + BB : \ <nl> + accumulate_fixed_blocks < NQ , BB > ( nb , nsq , codes , LUT , res ) ; \ <nl> + break <nl> + <nl> + switch ( nq * 1000 + bbs / 32 ) { <nl> + DISPATCH ( 1 , 1 ) ; <nl> + DISPATCH ( 1 , 2 ) ; <nl> + DISPATCH ( 1 , 3 ) ; <nl> + DISPATCH ( 1 , 4 ) ; <nl> + DISPATCH ( 1 , 5 ) ; <nl> + DISPATCH ( 2 , 1 ) ; <nl> + DISPATCH ( 2 , 2 ) ; <nl> + DISPATCH ( 3 , 1 ) ; <nl> + DISPATCH ( 4 , 1 ) ; <nl> + default : <nl> + FAISS_THROW_FMT ( " nq = % d bbs = % d not instantiated " , nq , bbs ) ; <nl> + } <nl> + # undef DISPATCH <nl> + <nl> + } <nl> + <nl> + / / explicit template instantiations <nl> + <nl> + <nl> + <nl> + <nl> + # define INSTANTIATE_ACCUMULATE ( TH , C , with_id_map ) \ <nl> + template void pq4_accumulate_loop < TH < C , with_id_map > > \ <nl> + ( int , size_t , int , int , const uint8_t * , const uint8_t * , TH < C , with_id_map > & ) ; <nl> + <nl> + # define INSTANTIATE_3 ( C , with_id_map ) \ <nl> + INSTANTIATE_ACCUMULATE ( SingleResultHandler , C , with_id_map ) \ <nl> + INSTANTIATE_ACCUMULATE ( HeapHandler , C , with_id_map ) \ <nl> + INSTANTIATE_ACCUMULATE ( ReservoirHandler , C , with_id_map ) \ <nl> + <nl> + using Csi = CMax < uint16_t , int > ; <nl> + INSTANTIATE_3 ( Csi , false ) ; <nl> + using CsiMin = CMin < uint16_t , int > ; <nl> + INSTANTIATE_3 ( CsiMin , false ) ; <nl> + <nl> + using Csl = CMax < uint16_t , int64_t > ; <nl> + INSTANTIATE_3 ( Csl , true ) ; <nl> + using CslMin = CMin < uint16_t , int64_t > ; <nl> + INSTANTIATE_3 ( CslMin , true ) ; <nl> + <nl> + <nl> + <nl> + } / / namespace faiss <nl> + <nl> new file mode 100644 <nl> index 000000000 . . a600f2993 <nl> mmm / dev / null <nl> ppp b / faiss / impl / pq4_fast_scan_search_qbs . cpp <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + # include < faiss / impl / pq4_fast_scan . h > <nl> + <nl> + # include < faiss / impl / FaissAssert . h > <nl> + # include < faiss / utils / simdlib . h > <nl> + # include < faiss / impl / simd_result_handlers . h > <nl> + <nl> + <nl> + namespace faiss { <nl> + <nl> + <nl> + using namespace simd_result_handlers ; <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Accumulation functions <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + namespace { <nl> + <nl> + / * <nl> + * The computation kernel <nl> + * It accumulates results for NQ queries and 2 * 16 database elements <nl> + * writes results in a ResultHandler <nl> + * / <nl> + <nl> + template < int NQ , class ResultHandler > <nl> + void kernel_accumulate_block ( <nl> + int nsq , <nl> + const uint8_t * codes , <nl> + const uint8_t * LUT , <nl> + ResultHandler & res ) <nl> + { <nl> + / / dummy alloc to keep the windows compiler happy <nl> + constexpr int NQA = NQ > 0 ? NQ : 1 ; <nl> + / / distance accumulators <nl> + simd16uint16 accu [ NQA ] [ 4 ] ; <nl> + <nl> + for ( int q = 0 ; q < NQ ; q + + ) { <nl> + for ( int b = 0 ; b < 4 ; b + + ) { <nl> + accu [ q ] [ b ] . clear ( ) ; <nl> + } <nl> + } <nl> + <nl> + / / _mm_prefetch ( codes + 768 , 0 ) ; <nl> + for ( int sq = 0 ; sq < nsq ; sq + = 2 ) { <nl> + <nl> + / / prefetch <nl> + simd32uint8 c ( codes ) ; <nl> + codes + = 32 ; <nl> + <nl> + simd32uint8 mask ( 0xf ) ; <nl> + / / shift op does not exist for int8 . . . <nl> + simd32uint8 chi = simd32uint8 ( simd16uint16 ( c ) > > 4 ) & mask ; <nl> + simd32uint8 clo = c & mask ; <nl> + <nl> + for ( int q = 0 ; q < NQ ; q + + ) { <nl> + / / load LUTs for 2 quantizers <nl> + simd32uint8 lut ( LUT ) ; <nl> + LUT + = 32 ; <nl> + <nl> + simd32uint8 res0 = lut . lookup_2_lanes ( clo ) ; <nl> + simd32uint8 res1 = lut . lookup_2_lanes ( chi ) ; <nl> + <nl> + accu [ q ] [ 0 ] + = simd16uint16 ( res0 ) ; <nl> + accu [ q ] [ 1 ] + = simd16uint16 ( res0 ) > > 8 ; <nl> + <nl> + accu [ q ] [ 2 ] + = simd16uint16 ( res1 ) ; <nl> + accu [ q ] [ 3 ] + = simd16uint16 ( res1 ) > > 8 ; <nl> + } <nl> + } <nl> + <nl> + for ( int q = 0 ; q < NQ ; q + + ) { <nl> + accu [ q ] [ 0 ] - = accu [ q ] [ 1 ] < < 8 ; <nl> + simd16uint16 dis0 = combine2x2 ( accu [ q ] [ 0 ] , accu [ q ] [ 1 ] ) ; <nl> + accu [ q ] [ 2 ] - = accu [ q ] [ 3 ] < < 8 ; <nl> + simd16uint16 dis1 = combine2x2 ( accu [ q ] [ 2 ] , accu [ q ] [ 3 ] ) ; <nl> + res . handle ( q , 0 , dis0 , dis1 ) ; <nl> + } <nl> + <nl> + } <nl> + <nl> + / / handle at most 4 blocks of queries <nl> + template < int QBS , class ResultHandler > <nl> + void accumulate_q_4step ( <nl> + size_t ntotal2 , <nl> + int nsq , <nl> + const uint8_t * codes , <nl> + const uint8_t * LUT0 , <nl> + ResultHandler & res ) <nl> + { <nl> + <nl> + constexpr int Q1 = QBS & 15 ; <nl> + constexpr int Q2 = ( QBS > > 4 ) & 15 ; <nl> + constexpr int Q3 = ( QBS > > 8 ) & 15 ; <nl> + constexpr int Q4 = ( QBS > > 12 ) & 15 ; <nl> + constexpr int SQ = Q1 + Q2 + Q3 + Q4 ; <nl> + <nl> + for ( int64_t j0 = 0 ; j0 < ntotal2 ; j0 + = 32 ) { <nl> + FixedStorageHandler < SQ , 2 > res2 ; <nl> + const uint8_t * LUT = LUT0 ; <nl> + kernel_accumulate_block < Q1 > ( nsq , codes , LUT , res2 ) ; <nl> + LUT + = Q1 * nsq * 16 ; <nl> + if ( Q2 > 0 ) { <nl> + res2 . set_block_origin ( Q1 , 0 ) ; <nl> + kernel_accumulate_block < Q2 > ( nsq , codes , LUT , res2 ) ; <nl> + LUT + = Q2 * nsq * 16 ; <nl> + } <nl> + if ( Q3 > 0 ) { <nl> + res2 . set_block_origin ( Q1 + Q2 , 0 ) ; <nl> + kernel_accumulate_block < Q3 > ( nsq , codes , LUT , res2 ) ; <nl> + LUT + = Q3 * nsq * 16 ; <nl> + } <nl> + if ( Q4 > 0 ) { <nl> + res2 . set_block_origin ( Q1 + Q2 + Q3 , 0 ) ; <nl> + kernel_accumulate_block < Q4 > ( nsq , codes , LUT , res2 ) ; <nl> + } <nl> + res . set_block_origin ( 0 , j0 ) ; <nl> + res2 . to_other_handler ( res ) ; <nl> + codes + = 32 * nsq / 2 ; <nl> + } <nl> + } <nl> + <nl> + <nl> + <nl> + <nl> + template < int NQ , class ResultHandler > <nl> + void kernel_accumulate_block_loop ( <nl> + size_t ntotal2 , <nl> + int nsq , <nl> + const uint8_t * codes , <nl> + const uint8_t * LUT , <nl> + ResultHandler & res ) <nl> + { <nl> + <nl> + for ( int64_t j0 = 0 ; j0 < ntotal2 ; j0 + = 32 ) { <nl> + res . set_block_origin ( 0 , j0 ) ; <nl> + kernel_accumulate_block < NQ , ResultHandler > <nl> + ( nsq , codes + j0 * nsq / 2 , LUT , res ) ; <nl> + } <nl> + <nl> + } <nl> + <nl> + / / non - template version of accumulate kernel - - dispatches dynamically <nl> + template < class ResultHandler > <nl> + void accumulate ( <nl> + int nq , <nl> + size_t ntotal2 , <nl> + int nsq , <nl> + const uint8_t * codes , <nl> + const uint8_t * LUT , <nl> + ResultHandler & res ) <nl> + { <nl> + <nl> + assert ( nsq % 2 = = 0 ) ; <nl> + assert ( is_aligned_pointer ( codes ) ) ; <nl> + assert ( is_aligned_pointer ( LUT ) ) ; <nl> + <nl> + # define DISPATCH ( NQ ) \ <nl> + case NQ : \ <nl> + kernel_accumulate_block_loop < NQ , ResultHandler > \ <nl> + ( ntotal2 , nsq , codes , LUT , res ) ; \ <nl> + return <nl> + <nl> + switch ( nq ) { <nl> + DISPATCH ( 1 ) ; <nl> + DISPATCH ( 2 ) ; <nl> + DISPATCH ( 3 ) ; <nl> + DISPATCH ( 4 ) ; <nl> + } <nl> + FAISS_THROW_FMT ( " accumulate nq = % d not instanciated " , <nl> + nq ) ; <nl> + <nl> + # undef DISPATCH <nl> + } <nl> + <nl> + <nl> + } / / anonumous namespace <nl> + <nl> + <nl> + <nl> + template < class ResultHandler > <nl> + void pq4_accumulate_loop_qbs ( <nl> + int qbs , <nl> + size_t ntotal2 , <nl> + int nsq , <nl> + const uint8_t * codes , <nl> + const uint8_t * LUT0 , <nl> + ResultHandler & res ) <nl> + { <nl> + <nl> + assert ( nsq % 2 = = 0 ) ; <nl> + assert ( is_aligned_pointer ( codes ) ) ; <nl> + assert ( is_aligned_pointer ( LUT0 ) ) ; <nl> + <nl> + / / try out optimized versions <nl> + switch ( qbs ) { <nl> + # define DISPATCH ( QBS ) \ <nl> + case QBS : accumulate_q_4step < QBS > \ <nl> + ( ntotal2 , nsq , codes , LUT0 , res ) ; \ <nl> + return ; <nl> + DISPATCH ( 0x3333 ) ; / / 12 <nl> + DISPATCH ( 0x2333 ) ; / / 11 <nl> + DISPATCH ( 0x2233 ) ; / / 10 <nl> + DISPATCH ( 0x333 ) ; / / 9 <nl> + DISPATCH ( 0x2223 ) ; / / 9 <nl> + DISPATCH ( 0x233 ) ; / / 8 <nl> + DISPATCH ( 0x1223 ) ; / / 8 <nl> + DISPATCH ( 0x223 ) ; / / 7 <nl> + DISPATCH ( 0x34 ) ; / / 7 <nl> + DISPATCH ( 0x133 ) ; / / 7 <nl> + DISPATCH ( 0x6 ) ; / / 6 <nl> + DISPATCH ( 0x33 ) ; / / 6 <nl> + DISPATCH ( 0x123 ) ; / / 6 <nl> + DISPATCH ( 0x222 ) ; / / 6 <nl> + DISPATCH ( 0x23 ) ; / / 5 <nl> + DISPATCH ( 0x5 ) ; / / 5 <nl> + DISPATCH ( 0x13 ) ; / / 4 <nl> + DISPATCH ( 0x22 ) ; / / 4 <nl> + DISPATCH ( 0x4 ) ; / / 4 <nl> + DISPATCH ( 0x3 ) ; / / 3 <nl> + DISPATCH ( 0x21 ) ; / / 3 <nl> + DISPATCH ( 0x2 ) ; / / 2 <nl> + DISPATCH ( 0x1 ) ; / / 1 <nl> + # undef DISPATCH <nl> + } <nl> + <nl> + / / default implementation where qbs is not known at compile time <nl> + <nl> + for ( int64_t j0 = 0 ; j0 < ntotal2 ; j0 + = 32 ) { <nl> + const uint8_t * LUT = LUT0 ; <nl> + int qi = qbs ; <nl> + int i0 = 0 ; <nl> + while ( qi ) { <nl> + int nq = qi & 15 ; <nl> + qi > > = 4 ; <nl> + res . set_block_origin ( i0 , j0 ) ; <nl> + # define DISPATCH ( NQ ) \ <nl> + case NQ : \ <nl> + kernel_accumulate_block < NQ , ResultHandler > \ <nl> + ( nsq , codes , LUT , res ) ; \ <nl> + break <nl> + switch ( nq ) { <nl> + DISPATCH ( 1 ) ; <nl> + DISPATCH ( 2 ) ; <nl> + DISPATCH ( 3 ) ; <nl> + DISPATCH ( 4 ) ; <nl> + # undef DISPATCH <nl> + default : <nl> + FAISS_THROW_FMT ( " accumulate nq = % d not instanciated " , <nl> + nq ) ; <nl> + } <nl> + i0 + = nq ; <nl> + LUT + = nq * nsq * 16 ; <nl> + } <nl> + codes + = 32 * nsq / 2 ; <nl> + } <nl> + } <nl> + <nl> + <nl> + <nl> + / / explicit template instantiations <nl> + <nl> + <nl> + # define INSTANTIATE_ACCUMULATE_Q ( RH ) \ <nl> + template void pq4_accumulate_loop_qbs < RH > \ <nl> + ( int , size_t , int , const uint8_t * , const uint8_t * , RH & ) ; <nl> + <nl> + using Csi = CMax < uint16_t , int > ; <nl> + INSTANTIATE_ACCUMULATE_Q ( SingleResultHandler < Csi > ) <nl> + INSTANTIATE_ACCUMULATE_Q ( HeapHandler < Csi > ) <nl> + INSTANTIATE_ACCUMULATE_Q ( ReservoirHandler < Csi > ) <nl> + using Csi2 = CMin < uint16_t , int > ; <nl> + INSTANTIATE_ACCUMULATE_Q ( SingleResultHandler < Csi2 > ) <nl> + INSTANTIATE_ACCUMULATE_Q ( HeapHandler < Csi2 > ) <nl> + INSTANTIATE_ACCUMULATE_Q ( ReservoirHandler < Csi2 > ) <nl> + <nl> + using Cfl = CMax < uint16_t , int64_t > ; <nl> + using HHCsl = HeapHandler < Cfl , true > ; <nl> + using RHCsl = ReservoirHandler < Cfl , true > ; <nl> + using SHCsl = SingleResultHandler < Cfl , true > ; <nl> + INSTANTIATE_ACCUMULATE_Q ( HHCsl ) <nl> + INSTANTIATE_ACCUMULATE_Q ( RHCsl ) <nl> + INSTANTIATE_ACCUMULATE_Q ( SHCsl ) <nl> + using Cfl2 = CMin < uint16_t , int64_t > ; <nl> + using HHCsl2 = HeapHandler < Cfl2 , true > ; <nl> + using RHCsl2 = ReservoirHandler < Cfl2 , true > ; <nl> + using SHCsl2 = SingleResultHandler < Cfl2 , true > ; <nl> + INSTANTIATE_ACCUMULATE_Q ( HHCsl2 ) <nl> + INSTANTIATE_ACCUMULATE_Q ( RHCsl2 ) <nl> + INSTANTIATE_ACCUMULATE_Q ( SHCsl2 ) <nl> + <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Packing functions <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + int pq4_qbs_to_nq ( int qbs ) { <nl> + int i0 = 0 ; <nl> + int qi = qbs ; <nl> + while ( qi ) { <nl> + int nq = qi & 15 ; <nl> + qi > > = 4 ; <nl> + i0 + = nq ; <nl> + } <nl> + return i0 ; <nl> + } <nl> + <nl> + <nl> + <nl> + void accumulate_to_mem ( <nl> + int nq , <nl> + size_t ntotal2 , <nl> + int nsq , <nl> + const uint8_t * codes , <nl> + const uint8_t * LUT , <nl> + uint16_t * accu ) <nl> + { <nl> + FAISS_THROW_IF_NOT ( ntotal2 % 32 = = 0 ) ; <nl> + StoreResultHandler handler ( accu , ntotal2 ) ; <nl> + accumulate ( nq , ntotal2 , nsq , codes , LUT , handler ) ; <nl> + } <nl> + <nl> + <nl> + int pq4_preferred_qbs ( int n ) { <nl> + / / from timmings in P141901742 , P141902828 <nl> + static int map [ 12 ] = { <nl> + 0 , 1 , 2 , 3 , 0x13 , <nl> + 0x23 , 0x33 , 0x223 , 0x233 , 0x333 , <nl> + 0x2233 , 0x2333 <nl> + } ; <nl> + if ( n < = 11 ) { <nl> + return map [ n ] ; <nl> + } else if ( n < = 24 ) { <nl> + / / override qbs : all first stages with 3 steps <nl> + / / then 1 stage with the rest <nl> + int nbit = 4 * ( n / 3 ) ; / / nbits with only 3s <nl> + int qbs = 0x33333333 & ( ( 1 < < nbit ) - 1 ) ; <nl> + qbs | = ( n % 3 ) < < nbit ; <nl> + return qbs ; <nl> + } else { <nl> + FAISS_THROW_FMT ( " number of queries % d too large " , n ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + <nl> + } / / namespace faiss <nl> + <nl> new file mode 100644 <nl> index 000000000 . . 6209da52b <nl> mmm / dev / null <nl> ppp b / faiss / impl / simd_result_handlers . h <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + # pragma once <nl> + <nl> + # include < vector > <nl> + # include < algorithm > <nl> + # include < type_traits > <nl> + <nl> + # include < faiss / utils / Heap . h > <nl> + # include < faiss / utils / simdlib . h > <nl> + <nl> + # include < faiss / utils / AlignedTable . h > <nl> + # include < faiss / utils / partitioning . h > <nl> + # include < faiss / impl / platform_macros . h > <nl> + <nl> + / * * This file contains callbacks for kernels that compute distances . <nl> + * <nl> + * The SIMDResultHandler object is intended to be templated and inlined . <nl> + * Methods : <nl> + * - handle ( ) : called when 32 distances are computed and provided in two <nl> + * simd16uint16 . ( q , b ) indicate which entry it is in the block . <nl> + * - set_block_origin ( ) : set the sub - matrix that is being computed <nl> + * / <nl> + <nl> + namespace faiss { <nl> + <nl> + namespace simd_result_handlers { <nl> + <nl> + <nl> + / * * Dummy structure that just computes a checksum on results <nl> + * ( to avoid the computation to be optimized away ) * / <nl> + struct DummyResultHandler { <nl> + size_t cs = 0 ; <nl> + <nl> + void handle ( size_t q , size_t b , simd16uint16 d0 , simd16uint16 d1 ) { <nl> + cs + = q * 123 + b * 789 + d0 . get_scalar_0 ( ) + d1 . get_scalar_0 ( ) ; <nl> + } <nl> + <nl> + void set_block_origin ( size_t , size_t ) { <nl> + } <nl> + } ; <nl> + <nl> + / * * memorize results in a nq - by - nb matrix . <nl> + * <nl> + * j0 is the current upper - left block of the matrix <nl> + * / <nl> + struct StoreResultHandler { <nl> + uint16_t * data ; <nl> + size_t ld ; / / total number of columns <nl> + size_t i0 = 0 ; <nl> + size_t j0 = 0 ; <nl> + <nl> + StoreResultHandler ( uint16_t * data , size_t ld ) : <nl> + data ( data ) , ld ( ld ) { <nl> + } <nl> + <nl> + void handle ( size_t q , size_t b , simd16uint16 d0 , simd16uint16 d1 ) { <nl> + size_t ofs = ( q + i0 ) * ld + j0 + b * 32 ; <nl> + d0 . store ( data + ofs ) ; <nl> + d1 . store ( data + ofs + 16 ) ; <nl> + } <nl> + <nl> + void set_block_origin ( size_t i0 , size_t j0 ) { <nl> + this - > i0 = i0 ; <nl> + this - > j0 = j0 ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> + / * * stores results in fixed - size matrix . * / <nl> + template < int NQ , int BB > <nl> + struct FixedStorageHandler { <nl> + simd16uint16 dis [ NQ ] [ BB ] ; <nl> + int i0 = 0 ; <nl> + <nl> + void handle ( int q , int b , simd16uint16 d0 , simd16uint16 d1 ) { <nl> + dis [ q + i0 ] [ 2 * b ] = d0 ; <nl> + dis [ q + i0 ] [ 2 * b + 1 ] = d1 ; <nl> + } <nl> + <nl> + void set_block_origin ( size_t i0 , size_t j0 ) { <nl> + this - > i0 = i0 ; <nl> + assert ( j0 = = 0 ) ; <nl> + } <nl> + <nl> + template < class OtherResultHandler > <nl> + void to_other_handler ( OtherResultHandler & other ) const { <nl> + for ( int q = 0 ; q < NQ ; q + + ) { <nl> + for ( int b = 0 ; b < BB ; b + = 2 ) { <nl> + other . handle ( q , b / 2 , dis [ q ] [ b ] , dis [ q ] [ b + 1 ] ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + } ; <nl> + <nl> + <nl> + / * * Record origin of current block * / <nl> + template < class C , bool with_id_map > <nl> + struct SIMDResultHandler { <nl> + using TI = typename C : : TI ; <nl> + <nl> + bool disable = false ; <nl> + <nl> + int64_t i0 = 0 ; / / query origin <nl> + int64_t j0 = 0 ; / / db origin <nl> + size_t ntotal ; / / ignore excess elements after ntotal <nl> + <nl> + / / / these fields are used mainly for the IVF variants ( with_id_map = true ) <nl> + const TI * id_map ; / / map offset in invlist to vector id <nl> + const int * q_map ; / / map q to global query <nl> + const uint16_t * dbias ; / / table of biases to add to each query <nl> + <nl> + explicit SIMDResultHandler ( size_t ntotal ) : <nl> + ntotal ( ntotal ) , id_map ( nullptr ) , q_map ( nullptr ) , dbias ( nullptr ) <nl> + { } <nl> + <nl> + void set_block_origin ( size_t i0 , size_t j0 ) { <nl> + this - > i0 = i0 ; <nl> + this - > j0 = j0 ; <nl> + } <nl> + <nl> + <nl> + / / adjust handler data for IVF . <nl> + void adjust_with_origin ( size_t & q , simd16uint16 & d0 , simd16uint16 & d1 ) <nl> + { <nl> + q + = i0 ; <nl> + <nl> + if ( dbias ) { <nl> + simd16uint16 dbias16 ( dbias [ q ] ) ; <nl> + d0 + = dbias16 ; <nl> + d1 + = dbias16 ; <nl> + } <nl> + <nl> + if ( with_id_map ) { / / FIXME test on q_map instead <nl> + q = q_map [ q ] ; <nl> + } <nl> + } <nl> + <nl> + / / compute and adjust idx <nl> + int64_t adjust_id ( size_t b , size_t j ) { <nl> + int64_t idx = j0 + 32 * b + j ; <nl> + if ( with_id_map ) { <nl> + idx = id_map [ idx ] ; <nl> + } <nl> + return idx ; <nl> + } <nl> + <nl> + / / / return binary mask of elements below thr in ( d0 , d1 ) <nl> + / / / inverse_test returns elements above <nl> + uint32_t get_lt_mask ( <nl> + uint16_t thr , size_t b , <nl> + simd16uint16 d0 , simd16uint16 d1 <nl> + ) { <nl> + simd16uint16 thr16 ( thr ) ; <nl> + uint32_t lt_mask ; <nl> + <nl> + constexpr bool keep_min = C : : is_max ; <nl> + if ( keep_min ) { <nl> + lt_mask = ~ cmp_ge32 ( d0 , d1 , thr16 ) ; <nl> + } else { <nl> + lt_mask = ~ cmp_le32 ( d0 , d1 , thr16 ) ; <nl> + } <nl> + <nl> + if ( lt_mask = = 0 ) { <nl> + return 0 ; <nl> + } <nl> + uint64_t idx = j0 + b * 32 ; <nl> + if ( idx + 32 > ntotal ) { <nl> + if ( idx > = ntotal ) { <nl> + return 0 ; <nl> + } <nl> + int nbit = ( ntotal - idx ) ; <nl> + lt_mask & = ( uint32_t ( 1 ) < < nbit ) - 1 ; <nl> + } <nl> + return lt_mask ; <nl> + } <nl> + <nl> + virtual void to_flat_arrays ( <nl> + float * distances , int64_t * labels , <nl> + const float * normalizers = nullptr <nl> + ) = 0 ; <nl> + <nl> + virtual ~ SIMDResultHandler ( ) { } <nl> + <nl> + } ; <nl> + <nl> + <nl> + / * * Special version for k = 1 * / <nl> + template < class C , bool with_id_map = false > <nl> + struct SingleResultHandler : SIMDResultHandler < C , with_id_map > { <nl> + using T = typename C : : T ; <nl> + using TI = typename C : : TI ; <nl> + <nl> + struct Result { <nl> + T val ; <nl> + TI id ; <nl> + } ; <nl> + std : : vector < Result > results ; <nl> + <nl> + SingleResultHandler ( size_t nq , size_t ntotal ) : <nl> + SIMDResultHandler < C , with_id_map > ( ntotal ) , results ( nq ) <nl> + { <nl> + for ( int i = 0 ; i < nq ; i + + ) { <nl> + Result res = { C : : neutral ( ) , - 1 } ; <nl> + results [ i ] = res ; <nl> + } <nl> + } <nl> + <nl> + void handle ( size_t q , size_t b , simd16uint16 d0 , simd16uint16 d1 ) { <nl> + if ( this - > disable ) { <nl> + return ; <nl> + } <nl> + <nl> + this - > adjust_with_origin ( q , d0 , d1 ) ; <nl> + <nl> + Result & res = results [ q ] ; <nl> + uint32_t lt_mask = this - > get_lt_mask ( res . val , b , d0 , d1 ) ; <nl> + if ( ! lt_mask ) { <nl> + return ; <nl> + } <nl> + <nl> + ALIGNED ( 32 ) uint16_t d32tab [ 32 ] ; <nl> + d0 . store ( d32tab ) ; <nl> + d1 . store ( d32tab + 16 ) ; <nl> + <nl> + while ( lt_mask ) { <nl> + / / find first non - zero <nl> + int j = __builtin_ctz ( lt_mask ) ; <nl> + lt_mask - = 1 < < j ; <nl> + T dis = d32tab [ j ] ; <nl> + if ( C : : cmp ( res . val , dis ) ) { <nl> + res . val = dis ; <nl> + res . id = this - > adjust_id ( b , j ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void to_flat_arrays ( <nl> + float * distances , int64_t * labels , <nl> + const float * normalizers = nullptr <nl> + ) override { <nl> + for ( int q = 0 ; q < results . size ( ) ; q + + ) { <nl> + if ( ! normalizers ) { <nl> + distances [ q ] = results [ q ] . val ; <nl> + } else { <nl> + float one_a = 1 / normalizers [ 2 * q ] ; <nl> + float b = normalizers [ 2 * q + 1 ] ; <nl> + distances [ q ] = b + results [ q ] . val * one_a ; <nl> + } <nl> + labels [ q ] = results [ q ] . id ; <nl> + } <nl> + } <nl> + <nl> + } ; <nl> + <nl> + / * * Structure that collects results in a min - or max - heap * / <nl> + template < class C , bool with_id_map = false > <nl> + struct HeapHandler : SIMDResultHandler < C , with_id_map > { <nl> + using T = typename C : : T ; <nl> + using TI = typename C : : TI ; <nl> + <nl> + int nq ; <nl> + T * heap_dis_tab ; <nl> + TI * heap_ids_tab ; <nl> + <nl> + int64_t k ; / / number of results to keep <nl> + <nl> + HeapHandler ( <nl> + int nq , <nl> + T * heap_dis_tab , TI * heap_ids_tab , <nl> + size_t k , size_t ntotal <nl> + ) : <nl> + SIMDResultHandler < C , with_id_map > ( ntotal ) , nq ( nq ) , <nl> + heap_dis_tab ( heap_dis_tab ) , heap_ids_tab ( heap_ids_tab ) , k ( k ) <nl> + { <nl> + for ( int q = 0 ; q < nq ; q + + ) { <nl> + T * heap_dis_in = heap_dis_tab + q * k ; <nl> + TI * heap_ids_in = heap_ids_tab + q * k ; <nl> + heap_heapify < C > ( k , heap_dis_in , heap_ids_in ) ; <nl> + } <nl> + } <nl> + <nl> + void handle ( size_t q , size_t b , simd16uint16 d0 , simd16uint16 d1 ) { <nl> + if ( this - > disable ) { <nl> + return ; <nl> + } <nl> + <nl> + this - > adjust_with_origin ( q , d0 , d1 ) ; <nl> + <nl> + T * heap_dis = heap_dis_tab + q * k ; <nl> + TI * heap_ids = heap_ids_tab + q * k ; <nl> + <nl> + uint16_t cur_thresh = heap_dis [ 0 ] < 65536 ? ( uint16_t ) ( heap_dis [ 0 ] ) : <nl> + 0xffff ; <nl> + <nl> + / / here we handle the reverse comparison case as well <nl> + uint32_t lt_mask = this - > get_lt_mask ( cur_thresh , b , d0 , d1 ) ; <nl> + <nl> + if ( ! lt_mask ) { <nl> + return ; <nl> + } <nl> + <nl> + ALIGNED ( 32 ) uint16_t d32tab [ 32 ] ; <nl> + d0 . store ( d32tab ) ; <nl> + d1 . store ( d32tab + 16 ) ; <nl> + <nl> + while ( lt_mask ) { <nl> + / / find first non - zero <nl> + int j = __builtin_ctz ( lt_mask ) ; <nl> + lt_mask - = 1 < < j ; <nl> + T dis = d32tab [ j ] ; <nl> + if ( C : : cmp ( heap_dis [ 0 ] , dis ) ) { <nl> + int64_t idx = this - > adjust_id ( b , j ) ; <nl> + heap_pop < C > ( k , heap_dis , heap_ids ) ; <nl> + heap_push < C > ( k , heap_dis , heap_ids , dis , idx ) ; <nl> + } <nl> + } <nl> + <nl> + } <nl> + <nl> + void to_flat_arrays ( <nl> + float * distances , int64_t * labels , <nl> + const float * normalizers = nullptr <nl> + ) override { <nl> + <nl> + for ( int q = 0 ; q < nq ; q + + ) { <nl> + T * heap_dis_in = heap_dis_tab + q * k ; <nl> + TI * heap_ids_in = heap_ids_tab + q * k ; <nl> + heap_reorder < C > ( k , heap_dis_in , heap_ids_in ) ; <nl> + int64_t * heap_ids = labels + q * k ; <nl> + float * heap_dis = distances + q * k ; <nl> + <nl> + float one_a = 1 . 0 , b = 0 . 0 ; <nl> + if ( normalizers ) { <nl> + one_a = 1 / normalizers [ 2 * q ] ; <nl> + b = normalizers [ 2 * q + 1 ] ; <nl> + } <nl> + for ( int j = 0 ; j < k ; j + + ) { <nl> + heap_ids [ j ] = heap_ids_in [ j ] ; <nl> + heap_dis [ j ] = heap_dis_in [ j ] * one_a + b ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + } ; <nl> + <nl> + <nl> + / * * Simple top - N implementation using a reservoir . <nl> + * <nl> + * Results are stored when they are below the threshold until the capacity is <nl> + * reached . Then a partition sort is used to update the threshold . * / <nl> + <nl> + namespace { <nl> + <nl> + uint64_t get_cy ( ) { <nl> + # ifdef MICRO_BENCHMARK <nl> + uint32_t high , low ; <nl> + asm volatile ( " rdtsc \ n \ t " <nl> + : " = a " ( low ) , <nl> + " = d " ( high ) ) ; <nl> + return ( ( uint64_t ) high < < 32 ) | ( low ) ; <nl> + # else <nl> + return 0 ; <nl> + # endif <nl> + } <nl> + <nl> + } / / anonymous namespace <nl> + <nl> + template < class C > <nl> + struct ReservoirTopN { <nl> + using T = typename C : : T ; <nl> + using TI = typename C : : TI ; <nl> + <nl> + T * vals ; <nl> + TI * ids ; <nl> + <nl> + size_t i ; / / number of stored elements <nl> + size_t n ; / / number of requested elements <nl> + size_t capacity ; / / size of storage <nl> + size_t cycles = 0 ; <nl> + <nl> + T threshold ; / / current threshold <nl> + <nl> + ReservoirTopN ( <nl> + size_t n , size_t capacity , <nl> + T * vals , TI * ids <nl> + ) : <nl> + vals ( vals ) , ids ( ids ) , <nl> + i ( 0 ) , n ( n ) , capacity ( capacity ) { <nl> + assert ( n < capacity ) ; <nl> + threshold = C : : neutral ( ) ; <nl> + } <nl> + <nl> + void add ( T val , TI id ) { <nl> + if ( C : : cmp ( threshold , val ) ) { <nl> + if ( i = = capacity ) { <nl> + shrink_fuzzy ( ) ; <nl> + } <nl> + vals [ i ] = val ; <nl> + ids [ i ] = id ; <nl> + i + + ; <nl> + } <nl> + } <nl> + <nl> + / / / shrink number of stored elements to n <nl> + void shrink_xx ( ) { <nl> + uint64_t t0 = get_cy ( ) ; <nl> + qselect ( vals , ids , i , n ) ; <nl> + i = n ; / / forget all elements above i = n <nl> + threshold = C : : Crev : : neutral ( ) ; <nl> + for ( size_t j = 0 ; j < n ; j + + ) { <nl> + if ( C : : cmp ( vals [ j ] , threshold ) ) { <nl> + threshold = vals [ j ] ; <nl> + } <nl> + } <nl> + cycles + = get_cy ( ) - t0 ; <nl> + } <nl> + <nl> + void shrink ( ) { <nl> + uint64_t t0 = get_cy ( ) ; <nl> + threshold = partition < C > ( vals , ids , i , n ) ; <nl> + i = n ; <nl> + cycles + = get_cy ( ) - t0 ; <nl> + } <nl> + <nl> + void shrink_fuzzy ( ) { <nl> + uint64_t t0 = get_cy ( ) ; <nl> + assert ( i = = capacity ) ; <nl> + threshold = partition_fuzzy < C > ( <nl> + vals , ids , capacity , n , ( capacity + n ) / 2 , <nl> + & i ) ; <nl> + cycles + = get_cy ( ) - t0 ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> + / * * Handler built from several ReservoirTopN ( one per query ) * / <nl> + template < class C , bool with_id_map = false > <nl> + struct ReservoirHandler : SIMDResultHandler < C , with_id_map > { <nl> + using T = typename C : : T ; <nl> + using TI = typename C : : TI ; <nl> + <nl> + size_t capacity ; / / rounded up to multiple of 16 <nl> + std : : vector < TI > all_ids ; <nl> + AlignedTable < T > all_vals ; <nl> + <nl> + std : : vector < ReservoirTopN < C > > reservoirs ; <nl> + <nl> + uint64_t times [ 4 ] ; <nl> + <nl> + ReservoirHandler ( size_t nq , size_t ntotal , size_t n , size_t capacity_in ) : <nl> + SIMDResultHandler < C , with_id_map > ( ntotal ) , capacity ( ( capacity_in + 15 ) & ~ 15 ) , <nl> + all_ids ( nq * capacity ) , all_vals ( nq * capacity ) <nl> + { <nl> + assert ( capacity % 16 = = 0 ) ; <nl> + for ( size_t i = 0 ; i < nq ; i + + ) { <nl> + reservoirs . emplace_back ( <nl> + n , capacity , <nl> + all_vals . get ( ) + i * capacity , <nl> + all_ids . data ( ) + i * capacity <nl> + ) ; <nl> + } <nl> + times [ 0 ] = times [ 1 ] = times [ 2 ] = times [ 3 ] = 0 ; <nl> + } <nl> + <nl> + <nl> + void handle ( size_t q , size_t b , simd16uint16 d0 , simd16uint16 d1 ) { <nl> + uint64_t t0 = get_cy ( ) ; <nl> + if ( this - > disable ) { <nl> + return ; <nl> + } <nl> + this - > adjust_with_origin ( q , d0 , d1 ) ; <nl> + <nl> + ReservoirTopN < C > & res = reservoirs [ q ] ; <nl> + uint32_t lt_mask = this - > get_lt_mask ( res . threshold , b , d0 , d1 ) ; <nl> + uint64_t t1 = get_cy ( ) ; <nl> + times [ 0 ] + = t1 - t0 ; <nl> + <nl> + if ( ! lt_mask ) { <nl> + return ; <nl> + } <nl> + ALIGNED ( 32 ) uint16_t d32tab [ 32 ] ; <nl> + d0 . store ( d32tab ) ; <nl> + d1 . store ( d32tab + 16 ) ; <nl> + <nl> + while ( lt_mask ) { <nl> + / / find first non - zero <nl> + int j = __builtin_ctz ( lt_mask ) ; <nl> + lt_mask - = 1 < < j ; <nl> + T dis = d32tab [ j ] ; <nl> + res . add ( dis , this - > adjust_id ( b , j ) ) ; <nl> + } <nl> + times [ 1 ] + = get_cy ( ) - t1 ; <nl> + } <nl> + <nl> + <nl> + void to_flat_arrays ( <nl> + float * distances , int64_t * labels , <nl> + const float * normalizers = nullptr <nl> + ) override { <nl> + using Cf = typename std : : conditional < <nl> + C : : is_max , <nl> + CMax < float , int64_t > , CMin < float , int64_t > > : : type ; <nl> + <nl> + uint64_t t0 = get_cy ( ) ; <nl> + uint64_t t3 = 0 ; <nl> + std : : vector < int > perm ( reservoirs [ 0 ] . n ) ; <nl> + for ( int q = 0 ; q < reservoirs . size ( ) ; q + + ) { <nl> + ReservoirTopN < C > & res = reservoirs [ q ] ; <nl> + size_t n = res . n ; <nl> + <nl> + if ( res . i > res . n ) { <nl> + res . shrink ( ) ; <nl> + } <nl> + int64_t * heap_ids = labels + q * n ; <nl> + float * heap_dis = distances + q * n ; <nl> + <nl> + float one_a = 1 . 0 , b = 0 . 0 ; <nl> + if ( normalizers ) { <nl> + one_a = 1 / normalizers [ 2 * q ] ; <nl> + b = normalizers [ 2 * q + 1 ] ; <nl> + } <nl> + for ( int i = 0 ; i < res . i ; i + + ) { <nl> + perm [ i ] = i ; <nl> + } <nl> + / / indirect sort of result arrays <nl> + std : : sort ( <nl> + perm . begin ( ) , perm . begin ( ) + res . i , <nl> + [ & res ] ( int i , int j ) { <nl> + return C : : cmp ( res . vals [ j ] , res . vals [ i ] ) ; <nl> + } <nl> + ) ; <nl> + for ( int i = 0 ; i < res . i ; i + + ) { <nl> + heap_dis [ i ] = res . vals [ perm [ i ] ] * one_a + b ; <nl> + heap_ids [ i ] = res . ids [ perm [ i ] ] ; <nl> + } <nl> + <nl> + / / possibly add empty results <nl> + heap_heapify < Cf > ( n - res . i , heap_dis + res . i , heap_ids + res . i ) ; <nl> + <nl> + t3 + = res . cycles ; <nl> + } <nl> + times [ 2 ] + = get_cy ( ) - t0 ; <nl> + times [ 3 ] + = t3 ; <nl> + } <nl> + <nl> + } ; <nl> + <nl> + <nl> + } / / namespace simd_result_handlers <nl> + <nl> + <nl> + } / / namespace faiss <nl> mmm a / faiss / index_factory . cpp <nl> ppp b / faiss / index_factory . cpp <nl> <nl> # include < faiss / IndexScalarQuantizer . h > <nl> # include < faiss / IndexHNSW . h > <nl> # include < faiss / IndexLattice . h > <nl> + # include < faiss / IndexPQFastScan . h > <nl> + # include < faiss / IndexIVFPQFastScan . h > <nl> <nl> # include < faiss / IndexBinaryFlat . h > <nl> # include < faiss / IndexBinaryHNSW . h > <nl> char get_trains_alone ( const Index * coarse_quantizer ) { <nl> } <nl> <nl> <nl> - } <nl> + } / / anonymous namespace <nl> <nl> Index * index_factory ( int d , const char * description_in , MetricType metric ) <nl> { <nl> Index * index_factory ( int d , const char * description_in , MetricType metric ) <nl> metric = = METRIC_INNER_PRODUCT ) ; <nl> VTChain vts ; <nl> Index * coarse_quantizer = nullptr ; <nl> + std : : unique_ptr < Index > parenthesis_index ; <nl> Index * index = nullptr ; <nl> bool add_idmap = false ; <nl> bool make_IndexRefineFlat = false ; <nl> Index * index_factory ( int d , const char * description_in , MetricType metric ) <nl> std : : string description ( description_in ) ; <nl> char * ptr ; <nl> <nl> + if ( description . find ( ' ( ' ) ! = std : : string : : npos ) { <nl> + / / then we make a sub - index and remove the ( ) from the description <nl> + int i0 = description . find ( ' ( ' ) ; <nl> + int i1 = description . find ( ' ) ' ) ; <nl> + FAISS_THROW_IF_NOT_MSG ( <nl> + i1 ! = std : : string : : npos , " string must contain closing parenthesis " ) ; <nl> + std : : string sub_description = description . substr ( i0 + 1 , i1 - i0 - 1 ) ; <nl> + / / printf ( " substring = % s \ n " , sub_description . c_str ( ) ) ; <nl> + <nl> + parenthesis_index . reset ( index_factory ( d , sub_description . c_str ( ) , metric ) ) ; <nl> + <nl> + description = description . erase ( i0 , i1 - i0 + 1 ) ; <nl> + <nl> + / / printf ( " new description = % s \ n " , description . c_str ( ) ) ; <nl> + <nl> + } <nl> + <nl> int64_t ncentroids = - 1 ; <nl> bool use_2layer = false ; <nl> int hnsw_M = - 1 ; <nl> Index * index_factory ( int d , const char * description_in , MetricType metric ) <nl> int d_out , opq_M , nbit , M , M2 , pq_m , ncent , r2 ; <nl> std : : string stok ( tok ) ; <nl> nbit = 8 ; <nl> + int bbs = - 1 ; <nl> + char c ; <nl> <nl> / / to avoid mem leaks with exceptions : <nl> / / do all tests before any instanciation <nl> Index * index_factory ( int d , const char * description_in , MetricType metric ) <nl> <nl> } else if ( ! coarse_quantizer & & <nl> sscanf ( tok , " IVF % " PRId64 , & ncentroids ) = = 1 ) { <nl> - if ( metric = = METRIC_L2 ) { <nl> + if ( parenthesis_index ) { <nl> + coarse_quantizer_1 = parenthesis_index . release ( ) ; <nl> + } else if ( metric = = METRIC_L2 ) { <nl> coarse_quantizer_1 = new IndexFlatL2 ( d ) ; <nl> } else { <nl> coarse_quantizer_1 = new IndexFlatIP ( d ) ; <nl> } <nl> + <nl> } else if ( ! coarse_quantizer & & sscanf ( tok , " IMI2x % d " , & nbit ) = = 1 ) { <nl> FAISS_THROW_IF_NOT_MSG ( metric = = METRIC_L2 , <nl> " MultiIndex not implemented for inner prod search " ) ; <nl> Index * index_factory ( int d , const char * description_in , MetricType metric ) <nl> del_coarse_quantizer . release ( ) ; <nl> index_ivf - > own_fields = true ; <nl> index_1 = index_ivf ; <nl> + } else if ( ! index & & ( <nl> + sscanf ( tok , " PQ % dx4fs_ % d " , & M , & bbs ) = = 2 | | <nl> + ( sscanf ( tok , " PQ % dx4f % c " , & M , & c ) = = 2 & & c = = ' s ' ) ) ) { <nl> + if ( bbs = = - 1 ) { <nl> + bbs = 32 ; <nl> + } <nl> + if ( coarse_quantizer ) { <nl> + IndexIVFPQFastScan * index_ivf = new IndexIVFPQFastScan ( <nl> + coarse_quantizer , d , ncentroids , M , 4 , metric , bbs <nl> + ) ; <nl> + index_ivf - > quantizer_trains_alone = <nl> + get_trains_alone ( coarse_quantizer ) ; <nl> + index_ivf - > metric_type = metric ; <nl> + index_ivf - > cp . spherical = metric = = METRIC_INNER_PRODUCT ; <nl> + del_coarse_quantizer . release ( ) ; <nl> + index_ivf - > own_fields = true ; <nl> + index_1 = index_ivf ; <nl> + } else { <nl> + IndexPQFastScan * index_pq = new IndexPQFastScan ( <nl> + d , M , nbit , metric , bbs <nl> + ) ; <nl> + index_1 = index_pq ; <nl> + } <nl> } else if ( ! index & & ( sscanf ( tok , " PQ % dx % d " , & M , & nbit ) = = 2 | | <nl> sscanf ( tok , " PQ % d " , & M ) = = 1 | | <nl> sscanf ( tok , " PQ % dnp " , & M ) = = 1 ) ) { <nl> mmm a / faiss / index_io . h <nl> ppp b / faiss / index_io . h <nl> void write_InvertedLists ( const InvertedLists * ils , IOWriter * f ) ; <nl> InvertedLists * read_InvertedLists ( IOReader * reader , int io_flags = 0 ) ; <nl> <nl> <nl> - # ifndef _MSC_VER <nl> - / * * Callbacks to handle other types of InvertedList objects . <nl> - * <nl> - * The callbacks should be registered with add_callback before calling <nl> - * read_index or read_InvertedLists . The callbacks for <nl> - * OnDiskInvertedLists are registrered by default . The invlist type is <nl> - * identified by : <nl> - * <nl> - * - the key ( a fourcc ) at read time <nl> - * - the class name ( as given by typeid . name ) at write time <nl> - * / <nl> - struct InvertedListsIOHook { <nl> - const std : : string key ; / / / < string version of the fourcc <nl> - const std : : string classname ; / / / < typeid . name <nl> - <nl> - InvertedListsIOHook ( const std : : string & key , const std : : string & classname ) ; <nl> - <nl> - / / / write the index to the IOWriter ( including the fourcc ) <nl> - virtual void write ( const InvertedLists * ils , IOWriter * f ) const = 0 ; <nl> - <nl> - / / / called when the fourcc matches this class ' s fourcc <nl> - virtual InvertedLists * read ( IOReader * f , int io_flags ) const = 0 ; <nl> - <nl> - / * * read from a ArrayInvertedLists into this invertedlist type . <nl> - * For this to work , the callback has to be enabled and the io_flag has to be set to <nl> - * IO_FLAG_SKIP_IVF_DATA | ( 16 upper bits of the fourcc ) <nl> - * / <nl> - virtual InvertedLists * read_ArrayInvertedLists ( <nl> - IOReader * f , int io_flags , <nl> - size_t nlist , size_t code_size , <nl> - const std : : vector < size_t > & sizes ) const = 0 ; <nl> - <nl> - virtual ~ InvertedListsIOHook ( ) { } <nl> - <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * Manage the set of callbacks * * * * * * / <nl> - <nl> - / / transfers ownership <nl> - static void add_callback ( InvertedListsIOHook * ) ; <nl> - static void print_callbacks ( ) ; <nl> - static InvertedListsIOHook * lookup ( int h ) ; <nl> - static InvertedListsIOHook * lookup_classname ( const std : : string & classname ) ; <nl> - <nl> - } ; <nl> - <nl> - # endif / / ! _MSC_VER <nl> - <nl> - <nl> } / / namespace faiss <nl> <nl> <nl> new file mode 100644 <nl> index 000000000 . . 05b96c57c <nl> mmm / dev / null <nl> ppp b / faiss / invlists / BlockInvertedLists . cpp <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + # include < faiss / invlists / BlockInvertedLists . h > <nl> + <nl> + # include < faiss / impl / FaissAssert . h > <nl> + <nl> + # include < faiss / impl / io . h > <nl> + # include < faiss / impl / io_macros . h > <nl> + <nl> + <nl> + namespace faiss { <nl> + <nl> + BlockInvertedLists : : BlockInvertedLists ( <nl> + size_t nlist , size_t n_per_block , <nl> + size_t block_size ) : <nl> + InvertedLists ( nlist , InvertedLists : : INVALID_CODE_SIZE ) , <nl> + n_per_block ( n_per_block ) , block_size ( block_size ) <nl> + { <nl> + ids . resize ( nlist ) ; <nl> + codes . resize ( nlist ) ; <nl> + } <nl> + <nl> + BlockInvertedLists : : BlockInvertedLists ( ) : <nl> + InvertedLists ( 0 , InvertedLists : : INVALID_CODE_SIZE ) , <nl> + n_per_block ( 0 ) , block_size ( 0 ) <nl> + { } <nl> + <nl> + <nl> + size_t BlockInvertedLists : : add_entries ( <nl> + size_t list_no , size_t n_entry , <nl> + const idx_t * ids_in , const uint8_t * code ) <nl> + { <nl> + if ( n_entry = = 0 ) return 0 ; <nl> + FAISS_THROW_IF_NOT ( list_no < nlist ) ; <nl> + size_t o = ids [ list_no ] . size ( ) ; <nl> + FAISS_THROW_IF_NOT ( o = = 0 ) ; / / not clear how we should handle subsequent adds <nl> + ids [ list_no ] . resize ( o + n_entry ) ; <nl> + memcpy ( & ids [ list_no ] [ o ] , ids_in , sizeof ( ids_in [ 0 ] ) * n_entry ) ; <nl> + <nl> + / / copy whole blocks <nl> + size_t n_block = ( n_entry + n_per_block - 1 ) / n_per_block ; <nl> + codes [ list_no ] . resize ( n_block * block_size ) ; <nl> + memcpy ( & codes [ list_no ] [ o * code_size ] , code , n_block * block_size ) ; <nl> + return o ; <nl> + } <nl> + <nl> + size_t BlockInvertedLists : : list_size ( size_t list_no ) const <nl> + { <nl> + assert ( list_no < nlist ) ; <nl> + return ids [ list_no ] . size ( ) ; <nl> + } <nl> + <nl> + const uint8_t * BlockInvertedLists : : get_codes ( size_t list_no ) const <nl> + { <nl> + assert ( list_no < nlist ) ; <nl> + return codes [ list_no ] . get ( ) ; <nl> + } <nl> + <nl> + const InvertedLists : : idx_t * BlockInvertedLists : : get_ids ( size_t list_no ) const <nl> + { <nl> + assert ( list_no < nlist ) ; <nl> + return ids [ list_no ] . data ( ) ; <nl> + } <nl> + <nl> + void BlockInvertedLists : : resize ( size_t list_no , size_t new_size ) <nl> + { <nl> + ids [ list_no ] . resize ( new_size ) ; <nl> + size_t prev_nbytes = codes [ list_no ] . size ( ) ; <nl> + size_t n_block = ( new_size + n_per_block - 1 ) / n_per_block ; <nl> + size_t new_nbytes = n_block * block_size ; <nl> + codes [ list_no ] . resize ( new_nbytes ) ; <nl> + if ( prev_nbytes < new_nbytes ) { <nl> + / / set new elements to 0 <nl> + memset ( <nl> + codes [ list_no ] . data ( ) + prev_nbytes , 0 , <nl> + new_nbytes - prev_nbytes <nl> + ) ; <nl> + } <nl> + } <nl> + <nl> + void BlockInvertedLists : : update_entries ( <nl> + size_t , size_t , size_t , <nl> + const idx_t * , const uint8_t * ) <nl> + { <nl> + FAISS_THROW_MSG ( " not impemented " ) ; <nl> + / * <nl> + assert ( list_no < nlist ) ; <nl> + assert ( n_entry + offset < = ids [ list_no ] . size ( ) ) ; <nl> + memcpy ( & ids [ list_no ] [ offset ] , ids_in , sizeof ( ids_in [ 0 ] ) * n_entry ) ; <nl> + memcpy ( & codes [ list_no ] [ offset * code_size ] , codes_in , code_size * n_entry ) ; <nl> + * / <nl> + } <nl> + <nl> + <nl> + BlockInvertedLists : : ~ BlockInvertedLists ( ) <nl> + { } <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * IO hook implementation <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + BlockInvertedListsIOHook : : BlockInvertedListsIOHook ( ) : <nl> + InvertedListsIOHook ( " ilbl " , typeid ( BlockInvertedLists ) . name ( ) ) <nl> + { } <nl> + <nl> + <nl> + void BlockInvertedListsIOHook : : write ( const InvertedLists * ils_in , IOWriter * f ) const <nl> + { <nl> + uint32_t h = fourcc ( " ilbl " ) ; <nl> + WRITE1 ( h ) ; <nl> + const BlockInvertedLists * il = <nl> + dynamic_cast < const BlockInvertedLists * > ( ils_in ) ; <nl> + WRITE1 ( il - > nlist ) ; <nl> + WRITE1 ( il - > code_size ) ; <nl> + WRITE1 ( il - > n_per_block ) ; <nl> + WRITE1 ( il - > block_size ) ; <nl> + <nl> + for ( size_t i = 0 ; i < il - > nlist ; i + + ) { <nl> + WRITEVECTOR ( il - > ids [ i ] ) ; <nl> + WRITEVECTOR ( il - > codes [ i ] ) ; <nl> + } <nl> + } <nl> + <nl> + InvertedLists * BlockInvertedListsIOHook : : read ( IOReader * f , int / * io_flags * / ) const <nl> + { <nl> + BlockInvertedLists * il = new BlockInvertedLists ( ) ; <nl> + READ1 ( il - > nlist ) ; <nl> + READ1 ( il - > code_size ) ; <nl> + READ1 ( il - > n_per_block ) ; <nl> + READ1 ( il - > block_size ) ; <nl> + <nl> + il - > ids . resize ( il - > nlist ) ; <nl> + il - > codes . resize ( il - > nlist ) ; <nl> + <nl> + for ( size_t i = 0 ; i < il - > nlist ; i + + ) { <nl> + READVECTOR ( il - > ids [ i ] ) ; <nl> + READVECTOR ( il - > codes [ i ] ) ; <nl> + } <nl> + <nl> + return il ; <nl> + } <nl> + <nl> + <nl> + <nl> + <nl> + } / / namespace faiss <nl> new file mode 100644 <nl> index 000000000 . . d2c9f747c <nl> mmm / dev / null <nl> ppp b / faiss / invlists / BlockInvertedLists . h <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + <nl> + # pragma once <nl> + <nl> + # include < faiss / invlists / InvertedLists . h > <nl> + # include < faiss / invlists / InvertedListsIOHook . h > <nl> + # include < faiss / utils / AlignedTable . h > <nl> + # include < faiss / index_io . h > <nl> + <nl> + namespace faiss { <nl> + <nl> + / * * Inverted Lists that are organized by blocks . <nl> + * <nl> + * Different from the regular inverted lists , the codes are organized by blocks <nl> + * of size block_size bytes that reprsent a set of n_per_block . Therefore , code <nl> + * allocations are always rounded up to block_size bytes . The codes are also <nl> + * aligned on 32 - byte boundaries for use with SIMD . <nl> + * <nl> + * To avoid misinterpretations , the code_size is set to ( size_t ) ( - 1 ) , even if <nl> + * arguably the amount of memory consumed by code is block_size / n_per_block . <nl> + * <nl> + * The writing functions add_entries and update_entries operate on block - aligned <nl> + * data . <nl> + * / <nl> + struct BlockInvertedLists : InvertedLists { <nl> + <nl> + size_t n_per_block ; / / nb of vectors stored per block <nl> + size_t block_size ; / / nb bytes per block <nl> + <nl> + std : : vector < AlignedTable < uint8_t > > codes ; <nl> + std : : vector < std : : vector < idx_t > > ids ; <nl> + <nl> + <nl> + BlockInvertedLists ( <nl> + size_t nlist , size_t vec_per_block , <nl> + size_t block_size <nl> + ) ; <nl> + <nl> + BlockInvertedLists ( ) ; <nl> + <nl> + size_t list_size ( size_t list_no ) const override ; <nl> + const uint8_t * get_codes ( size_t list_no ) const override ; <nl> + const idx_t * get_ids ( size_t list_no ) const override ; <nl> + <nl> + / / works only on empty BlockInvertedLists <nl> + / / the codes should be of size ceil ( n_entry / n_per_block ) * block_size <nl> + / / and padded with 0s <nl> + size_t add_entries ( <nl> + size_t list_no , size_t n_entry , <nl> + const idx_t * ids , const uint8_t * code ) override ; <nl> + <nl> + / / / not implemented <nl> + void update_entries ( size_t list_no , size_t offset , size_t n_entry , <nl> + const idx_t * ids , const uint8_t * code ) override ; <nl> + <nl> + / / also pads new data with 0s <nl> + void resize ( size_t list_no , size_t new_size ) override ; <nl> + <nl> + ~ BlockInvertedLists ( ) override ; <nl> + <nl> + } ; <nl> + <nl> + struct BlockInvertedListsIOHook : InvertedListsIOHook { <nl> + BlockInvertedListsIOHook ( ) ; <nl> + void write ( const InvertedLists * ils , IOWriter * f ) const override ; <nl> + InvertedLists * read ( IOReader * f , int io_flags ) const override ; <nl> + } ; <nl> + <nl> + <nl> + } / / namespace faiss <nl> similarity index 99 % <nl> rename from faiss / DirectMap . cpp <nl> rename to faiss / invlists / DirectMap . cpp <nl> mmm a / faiss / DirectMap . cpp <nl> ppp b / faiss / invlists / DirectMap . cpp <nl> <nl> <nl> / / - * - c + + - * - <nl> <nl> - # include < faiss / DirectMap . h > <nl> + # include < faiss / invlists / DirectMap . h > <nl> <nl> # include < cstdio > <nl> # include < cassert > <nl> similarity index 98 % <nl> rename from faiss / DirectMap . h <nl> rename to faiss / invlists / DirectMap . h <nl> mmm a / faiss / DirectMap . h <nl> ppp b / faiss / invlists / DirectMap . h <nl> <nl> # ifndef FAISS_DIRECT_MAP_H <nl> # define FAISS_DIRECT_MAP_H <nl> <nl> - # include < faiss / InvertedLists . h > <nl> + # include < faiss / invlists / InvertedLists . h > <nl> # include < unordered_map > <nl> <nl> <nl> similarity index 99 % <nl> rename from faiss / InvertedLists . cpp <nl> rename to faiss / invlists / InvertedLists . cpp <nl> mmm a / faiss / InvertedLists . cpp <nl> ppp b / faiss / invlists / InvertedLists . cpp <nl> <nl> <nl> / / - * - c + + - * - <nl> <nl> - # include < faiss / InvertedLists . h > <nl> + # include < faiss / invlists / InvertedLists . h > <nl> <nl> # include < cstdio > <nl> <nl> similarity index 97 % <nl> rename from faiss / InvertedLists . h <nl> rename to faiss / invlists / InvertedLists . h <nl> mmm a / faiss / InvertedLists . h <nl> ppp b / faiss / invlists / InvertedLists . h <nl> struct InvertedLists { <nl> <nl> InvertedLists ( size_t nlist , size_t code_size ) ; <nl> <nl> + / / / used for BlockInvertedLists , where the codes are packed into groups <nl> + / / / and the individual code size is meaningless <nl> + static const size_t INVALID_CODE_SIZE = static_cast < size_t > ( - 1 ) ; <nl> + <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * <nl> * Read only functions * / <nl> <nl> struct ArrayInvertedLists : InvertedLists { <nl> <nl> void resize ( size_t list_no , size_t new_size ) override ; <nl> <nl> - virtual ~ ArrayInvertedLists ( ) ; <nl> + ~ ArrayInvertedLists ( ) override ; <nl> } ; <nl> <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> new file mode 100644 <nl> index 000000000 . . 2a34ca765 <nl> mmm / dev / null <nl> ppp b / faiss / invlists / InvertedListsIOHook . cpp <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + # include < faiss / invlists / InvertedListsIOHook . h > <nl> + <nl> + # include < faiss / impl / io . h > <nl> + # include < faiss / impl / io_macros . h > <nl> + # include < faiss / impl / FaissAssert . h > <nl> + <nl> + # include < faiss / invlists / BlockInvertedLists . h > <nl> + <nl> + # ifndef _MSC_VER <nl> + # include < faiss / invlists / OnDiskInvertedLists . h > <nl> + # endif / / ! _MSC_VER <nl> + <nl> + <nl> + namespace faiss { <nl> + <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * InvertedListIOHook ' s <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + InvertedListsIOHook : : InvertedListsIOHook ( <nl> + const std : : string & key , const std : : string & classname ) : <nl> + key ( key ) , classname ( classname ) <nl> + { } <nl> + <nl> + namespace { <nl> + <nl> + / / / std : : vector that deletes its contents <nl> + struct IOHookTable : std : : vector < InvertedListsIOHook * > { <nl> + <nl> + IOHookTable ( ) { <nl> + # ifndef _MSC_VER <nl> + push_back ( new OnDiskInvertedListsIOHook ( ) ) ; <nl> + # endif <nl> + push_back ( new BlockInvertedListsIOHook ( ) ) ; <nl> + } <nl> + <nl> + ~ IOHookTable ( ) { <nl> + for ( auto x : * this ) { <nl> + delete x ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + static IOHookTable InvertedListsIOHook_table ; <nl> + <nl> + } / / anonymous namepsace <nl> + <nl> + InvertedListsIOHook * InvertedListsIOHook : : lookup ( int h ) <nl> + { <nl> + for ( const auto & callback : InvertedListsIOHook_table ) { <nl> + if ( h = = fourcc ( callback - > key ) ) { <nl> + return callback ; <nl> + } <nl> + } <nl> + FAISS_THROW_FMT ( <nl> + " read_InvertedLists : could not load ArrayInvertedLists as " <nl> + " % 08x ( \ " % s \ " ) " , h , fourcc_inv_printable ( h ) . c_str ( ) <nl> + ) ; <nl> + } <nl> + <nl> + InvertedListsIOHook * InvertedListsIOHook : : lookup_classname ( const std : : string & classname ) <nl> + { <nl> + for ( const auto & callback : InvertedListsIOHook_table ) { <nl> + if ( callback - > classname = = classname ) { <nl> + return callback ; <nl> + } <nl> + } <nl> + FAISS_THROW_FMT ( <nl> + " read_InvertedLists : could not find classname % s " , <nl> + classname . c_str ( ) <nl> + ) ; <nl> + } <nl> + <nl> + void InvertedListsIOHook : : add_callback ( InvertedListsIOHook * cb ) <nl> + { <nl> + InvertedListsIOHook_table . push_back ( cb ) ; <nl> + } <nl> + <nl> + void InvertedListsIOHook : : print_callbacks ( ) <nl> + { <nl> + printf ( " registered % zd InvertedListsIOHooks : \ n " , <nl> + InvertedListsIOHook_table . size ( ) ) ; <nl> + for ( const auto & cb : InvertedListsIOHook_table ) { <nl> + printf ( " % 08x % s % s \ n " , <nl> + fourcc ( cb - > key . c_str ( ) ) , <nl> + cb - > key . c_str ( ) , <nl> + cb - > classname . c_str ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + InvertedLists * InvertedListsIOHook : : read_ArrayInvertedLists ( <nl> + IOReader * , int , <nl> + size_t , size_t , <nl> + const std : : vector < size_t > & ) const <nl> + { <nl> + FAISS_THROW_FMT ( " read to array not implemented for % s " , classname . c_str ( ) ) ; <nl> + } <nl> + <nl> + } / / namespace faiss <nl> new file mode 100644 <nl> index 000000000 . . 4bf42d713 <nl> mmm / dev / null <nl> ppp b / faiss / invlists / InvertedListsIOHook . h <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + # pragma once <nl> + <nl> + # include < string > <nl> + # include < faiss / invlists / InvertedLists . h > <nl> + # include < faiss / impl / io . h > <nl> + <nl> + <nl> + namespace faiss { <nl> + <nl> + <nl> + / * * Callbacks to handle other types of InvertedList objects . <nl> + * <nl> + * The callbacks should be registered with add_callback before calling <nl> + * read_index or read_InvertedLists . The callbacks for <nl> + * OnDiskInvertedLists are registrered by default . The invlist type is <nl> + * identified by : <nl> + * <nl> + * - the key ( a fourcc ) at read time <nl> + * - the class name ( as given by typeid . name ) at write time <nl> + * / <nl> + struct InvertedListsIOHook { <nl> + const std : : string key ; / / / < string version of the fourcc <nl> + const std : : string classname ; / / / < typeid . name <nl> + <nl> + InvertedListsIOHook ( const std : : string & key , const std : : string & classname ) ; <nl> + <nl> + / / / write the index to the IOWriter ( including the fourcc ) <nl> + virtual void write ( const InvertedLists * ils , IOWriter * f ) const = 0 ; <nl> + <nl> + / / / called when the fourcc matches this class ' s fourcc <nl> + virtual InvertedLists * read ( IOReader * f , int io_flags ) const = 0 ; <nl> + <nl> + / * * read from a ArrayInvertedLists into this invertedlist type . <nl> + * For this to work , the callback has to be enabled and the io_flag has to be set to <nl> + * IO_FLAG_SKIP_IVF_DATA | ( 16 upper bits of the fourcc ) <nl> + * <nl> + * ( default implementation fails ) <nl> + * / <nl> + virtual InvertedLists * read_ArrayInvertedLists ( <nl> + IOReader * f , int io_flags , <nl> + size_t nlist , size_t code_size , <nl> + const std : : vector < size_t > & sizes ) const ; <nl> + <nl> + virtual ~ InvertedListsIOHook ( ) { } <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * Manage the set of callbacks * * * * * * / <nl> + <nl> + / / transfers ownership <nl> + static void add_callback ( InvertedListsIOHook * ) ; <nl> + static void print_callbacks ( ) ; <nl> + static InvertedListsIOHook * lookup ( int h ) ; <nl> + static InvertedListsIOHook * lookup_classname ( const std : : string & classname ) ; <nl> + <nl> + } ; <nl> + <nl> + } / / namespace faiss <nl> \ No newline at end of file <nl> similarity index 98 % <nl> rename from faiss / OnDiskInvertedLists . cpp <nl> rename to faiss / invlists / OnDiskInvertedLists . cpp <nl> mmm a / faiss / OnDiskInvertedLists . cpp <nl> ppp b / faiss / invlists / OnDiskInvertedLists . cpp <nl> <nl> <nl> / / - * - c + + - * - <nl> <nl> - # include < faiss / OnDiskInvertedLists . h > <nl> + # include < faiss / invlists / OnDiskInvertedLists . h > <nl> <nl> # include < pthread . h > <nl> <nl> struct LockLevels { <nl> <nl> void print ( ) { <nl> pthread_mutex_lock ( & mutex1 ) ; <nl> - printf ( " State : level3_in_use = % d n_level2 = % d level1_holders : [ " , level3_in_use , n_level2 ) ; <nl> + printf ( " State : level3_in_use = % d n_level2 = % d level1_holders : [ " , <nl> + int ( level3_in_use ) , n_level2 ) ; <nl> for ( int k : level1_holders ) { <nl> printf ( " % d " , k ) ; <nl> } <nl> void OnDiskInvertedLists : : update_totsize ( size_t new_size ) <nl> / / unmap file <nl> if ( ptr ! = nullptr ) { <nl> int err = munmap ( ptr , totsize ) ; <nl> - FAISS_THROW_IF_NOT_FMT ( err = = 0 , " munmap error : % s " , <nl> - strerror ( errno ) ) ; <nl> + FAISS_THROW_IF_NOT_FMT ( err = = 0 , " munmap error : % s " , strerror ( errno ) ) ; <nl> } <nl> if ( totsize = = 0 ) { <nl> / / must create file before truncating it <nl> size_t OnDiskInvertedLists : : allocate_slot ( size_t capacity ) { <nl> if ( it = = slots . end ( ) ) { <nl> / / not enough capacity <nl> size_t new_size = totsize = = 0 ? 32 : totsize * 2 ; <nl> - while ( new_size - totsize < capacity ) <nl> + while ( new_size - totsize < capacity ) { <nl> new_size * = 2 ; <nl> + } <nl> locks - > lock_3 ( ) ; <nl> update_totsize ( new_size ) ; <nl> locks - > unlock_3 ( ) ; <nl> similarity index 98 % <nl> rename from faiss / OnDiskInvertedLists . h <nl> rename to faiss / invlists / OnDiskInvertedLists . h <nl> mmm a / faiss / OnDiskInvertedLists . h <nl> ppp b / faiss / invlists / OnDiskInvertedLists . h <nl> <nl> # include < typeinfo > <nl> <nl> # include < faiss / IndexIVF . h > <nl> - <nl> + # include < faiss / invlists / InvertedListsIOHook . h > <nl> # include < faiss / index_io . h > <nl> <nl> namespace faiss { <nl> struct OnDiskInvertedLists : InvertedLists { <nl> <nl> void prefetch_lists ( const idx_t * list_nos , int nlist ) const override ; <nl> <nl> - virtual ~ OnDiskInvertedLists ( ) ; <nl> + ~ OnDiskInvertedLists ( ) override ; <nl> <nl> / / private <nl> <nl> mmm a / faiss / python / __init__ . py <nl> ppp b / faiss / python / __init__ . py <nl> def replacement_function ( * args ) : <nl> add_ref_in_method ( IndexPreTransform , ' prepend_transform ' , 0 ) <nl> add_ref_in_constructor ( IndexIVFPQ , 0 ) <nl> add_ref_in_constructor ( IndexIVFPQR , 0 ) <nl> + add_ref_in_constructor ( IndexIVFPQFastScan , 0 ) <nl> add_ref_in_constructor ( Index2Layer , 0 ) <nl> add_ref_in_constructor ( Level1Quantizer , 0 ) <nl> add_ref_in_constructor ( IndexIVFScalarQuantizer , 0 ) <nl> def replacement_function ( * args ) : <nl> add_ref_in_constructor ( IndexHNSW , 0 ) <nl> add_ref_in_method ( IndexShards , ' add_shard ' , 0 ) <nl> add_ref_in_method ( IndexBinaryShards , ' add_shard ' , 0 ) <nl> - add_ref_in_constructor ( IndexRefineFlat , 0 ) <nl> + # add_ref_in_constructor ( IndexRefineFlat , 0 ) <nl> + <nl> add_ref_in_constructor ( IndexBinaryIVF , 0 ) <nl> add_ref_in_constructor ( IndexBinaryFromFloat , 0 ) <nl> add_ref_in_constructor ( IndexBinaryIDMap , 0 ) <nl> def replacement_function ( * args ) : <nl> # seems really marginal . . . <nl> # remove_ref_from_method ( IndexReplicas , ' removeIndex ' , 0 ) <nl> <nl> + def handle_IndexRefineFlat ( the_class ) : <nl> + <nl> + original_init = the_class . __init__ <nl> + <nl> + def replacement_init ( self , * args ) : <nl> + if len ( args ) = = 2 : <nl> + index , xb = args <nl> + assert xb . shape = = ( index . ntotal , index . d ) <nl> + xb = swig_ptr ( xb ) <nl> + args = ( index , xb ) <nl> + <nl> + original_init ( self , * args ) <nl> + self . referenced_objects = [ args [ 0 ] ] <nl> + <nl> + the_class . __init__ = replacement_init <nl> + <nl> + handle_IndexRefineFlat ( IndexRefineFlat ) <nl> + <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # GPU functions <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> def copy_array_to_vector ( a , v ) : <nl> if n > 0 : <nl> memcpy ( v . data ( ) , swig_ptr ( a ) , a . nbytes ) <nl> <nl> + # same for AlignedTable <nl> + <nl> + def copy_array_to_AlignedTable ( a , v ) : <nl> + n , = a . shape <nl> + # TODO check class name <nl> + assert v . itemsize ( ) = = a . itemsize <nl> + v . resize ( n ) <nl> + if n > 0 : <nl> + memcpy ( v . get ( ) , swig_ptr ( a ) , a . nbytes ) <nl> + <nl> + def array_to_AlignedTable ( a ) : <nl> + if a . dtype = = ' uint16 ' : <nl> + v = AlignedTableUint16 ( a . size ) <nl> + elif a . dtype = = ' uint8 ' : <nl> + v = AlignedTableUint8 ( a . size ) <nl> + else : <nl> + assert False <nl> + copy_array_to_AlignedTable ( a , v ) <nl> + return v <nl> + <nl> + def AlignedTable_to_array ( v ) : <nl> + " " " convert an AlignedTable to a numpy array " " " <nl> + classname = v . __class__ . __name__ <nl> + assert classname . startswith ( ' AlignedTable ' ) <nl> + dtype = classname [ 12 : ] . lower ( ) <nl> + a = np . empty ( v . size ( ) , dtype = dtype ) <nl> + if a . size > 0 : <nl> + memcpy ( swig_ptr ( a ) , v . data ( ) , a . nbytes ) <nl> + return a <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # Wrapper for a few functions <nl> def eval_intersection ( I1 , I2 ) : <nl> def normalize_L2 ( x ) : <nl> fvec_renorm_L2 ( x . shape [ 1 ] , x . shape [ 0 ] , swig_ptr ( x ) ) <nl> <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # MapLong2Long interface <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> <nl> def replacement_map_add ( self , keys , vals ) : <nl> n , = keys . shape <nl> def replacement_map_search_multiple ( self , keys ) : <nl> replace_method ( MapLong2Long , ' add ' , replacement_map_add ) <nl> replace_method ( MapLong2Long , ' search_multiple ' , replacement_map_search_multiple ) <nl> <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # search_with_parameters interface <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> search_with_parameters_c = search_with_parameters <nl> <nl> def search_with_parameters ( index , x , k , params = None , output_stats = False ) : <nl> def range_search_with_parameters ( index , x , radius , params = None , output_stats = Fal <nl> return lims , Dout , Iout , stats <nl> <nl> <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # KNN function <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + def knn ( xq , xb , k , distance_type = METRIC_L2 ) : <nl> + " " " wrapper around the faiss knn functions without index " " " <nl> + nq , d = xq . shape <nl> + nb , d2 = xb . shape <nl> + assert d = = d2 <nl> + <nl> + I = np . empty ( ( nq , k ) , dtype = ' int64 ' ) <nl> + D = np . empty ( ( nq , k ) , dtype = ' float32 ' ) <nl> + <nl> + if distance_type = = METRIC_L2 : <nl> + heaps = float_maxheap_array_t ( ) <nl> + heaps . k = k <nl> + heaps . nh = nq <nl> + heaps . val = swig_ptr ( D ) <nl> + heaps . ids = swig_ptr ( I ) <nl> + knn_L2sqr ( <nl> + swig_ptr ( xq ) , swig_ptr ( xb ) , <nl> + d , nq , nb , heaps <nl> + ) <nl> + elif distance_type = = METRIC_INNER_PRODUCT : <nl> + heaps = float_minheap_array_t ( ) <nl> + heaps . k = k <nl> + heaps . nh = nq <nl> + heaps . val = swig_ptr ( D ) <nl> + heaps . ids = swig_ptr ( I ) <nl> + knn_inner_product ( <nl> + swig_ptr ( xq ) , swig_ptr ( xb ) , <nl> + d , nq , nb , heaps <nl> + ) <nl> + return D , I <nl> + <nl> + <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # Kmeans object <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> mmm a / faiss / python / python_callbacks . h <nl> ppp b / faiss / python / python_callbacks . h <nl> <nl> <nl> # include " Python . h " <nl> # include < faiss / impl / io . h > <nl> - # include < faiss / InvertedLists . h > <nl> + # include < faiss / invlists / InvertedLists . h > <nl> <nl> / / all callbacks have to acquire the GIL on input <nl> <nl> mmm a / faiss / python / swigfaiss . swig <nl> ppp b / faiss / python / swigfaiss . swig <nl> <nl> # pragma SWIG nowarn = 389 <nl> # pragma SWIG nowarn = 341 <nl> # pragma SWIG nowarn = 512 <nl> + # pragma SWIG nowarn = 362 <nl> <nl> % include < stdint . i > <nl> <nl> typedef uint64_t size_t ; <nl> <nl> + <nl> # define __restrict <nl> <nl> <nl> typedef uint64_t size_t ; <nl> # include < faiss / Index2Layer . h > <nl> # include < faiss / IndexIVFPQR . h > <nl> # include < faiss / IndexIVFFlat . h > <nl> + <nl> + # include < faiss / IndexPQFastScan . h > <nl> + # include < faiss / IndexIVFPQFastScan . h > <nl> + # include < faiss / utils / quantize_lut . h > <nl> + <nl> # include < faiss / IndexScalarQuantizer . h > <nl> # include < faiss / IndexIVFSpectralHash . h > <nl> # include < faiss / impl / ThreadedIndex . h > <nl> typedef uint64_t size_t ; <nl> # include < faiss / utils / extra_distances . h > <nl> # include < faiss / utils / random . h > <nl> # include < faiss / utils / Heap . h > <nl> + # include < faiss / utils / AlignedTable . h > <nl> # include < faiss / utils / partitioning . h > <nl> # include < faiss / impl / AuxIndexStructures . h > <nl> <nl> + # include < faiss / invlists / BlockInvertedLists . h > <nl> + <nl> # ifndef _MSC_VER <nl> - # include < faiss / OnDiskInvertedLists . h > <nl> + # include < faiss / invlists / OnDiskInvertedLists . h > <nl> # endif / / ! _MSC_VER <nl> <nl> # include < faiss / Clustering . h > <nl> namespace std { <nl> <nl> % ignore * : : cmp ; <nl> <nl> - % include < faiss / utils / ordered_key_value . h > <nl> - % include < faiss / utils / Heap . h > <nl> - % include < faiss / utils / partitioning . h > <nl> - % include < faiss / utils / hamming . h > <nl> + % include < faiss / utils / ordered_key_value . h > <nl> + % include < faiss / utils / Heap . h > <nl> + <nl> + / / this ignore seems to be ignored , so disable W362 above <nl> + % ignore faiss : : AlignedTable : : operator = ; <nl> + <nl> + % include < faiss / utils / AlignedTable . h > <nl> + % include < faiss / utils / partitioning . h > <nl> + % include < faiss / utils / hamming . h > <nl> <nl> int get_num_gpus ( ) ; <nl> void gpu_profiler_start ( ) ; <nl> void gpu_sync_all_devices ( ) <nl> % include < faiss / IndexLSH . h > <nl> % include < faiss / impl / PolysemousTraining . h > <nl> % include < faiss / IndexPQ . h > <nl> - % include < faiss / InvertedLists . h > <nl> - % include < faiss / DirectMap . h > <nl> + % include < faiss / impl / io . h > <nl> + <nl> + % include < faiss / invlists / InvertedLists . h > <nl> + % include < faiss / invlists / InvertedListsIOHook . h > <nl> + % ignore BlockInvertedListsIOHook ; <nl> + % include < faiss / invlists / BlockInvertedLists . h > <nl> + % include < faiss / invlists / DirectMap . h > <nl> % ignore InvertedListScanner ; <nl> % ignore BinaryInvertedListScanner ; <nl> % include < faiss / IndexIVF . h > <nl> void gpu_sync_all_devices ( ) <nl> # ifndef SWIGWIN <nl> % warnfilter ( 401 ) faiss : : OnDiskInvertedListsIOHook ; <nl> % ignore OnDiskInvertedListsIOHook ; <nl> - % include < faiss / OnDiskInvertedLists . h > <nl> + % include < faiss / invlists / OnDiskInvertedLists . h > <nl> # endif / / ! SWIGWIN <nl> <nl> % include < faiss / impl / lattice_Zn . h > <nl> void gpu_sync_all_devices ( ) <nl> % include < faiss / IndexIVFPQR . h > <nl> % include < faiss / Index2Layer . h > <nl> <nl> + % include < faiss / IndexPQFastScan . h > <nl> + % include < faiss / IndexIVFPQFastScan . h > <nl> + % include < faiss / utils / quantize_lut . h > <nl> + <nl> % include < faiss / IndexBinary . h > <nl> % include < faiss / IndexBinaryFlat . h > <nl> % include < faiss / IndexBinaryIVF . h > <nl> void gpu_sync_all_devices ( ) <nl> DOWNCAST2 ( IndexReplicas , IndexReplicasTemplateT_faiss__Index_t ) <nl> DOWNCAST ( IndexIVFPQR ) <nl> DOWNCAST ( IndexIVFPQ ) <nl> + DOWNCAST ( IndexIVFPQFastScan ) <nl> DOWNCAST ( IndexIVFSpectralHash ) <nl> DOWNCAST ( IndexIVFScalarQuantizer ) <nl> DOWNCAST ( IndexIVFFlatDedup ) <nl> void gpu_sync_all_devices ( ) <nl> DOWNCAST ( IndexIVF ) <nl> DOWNCAST ( IndexFlat ) <nl> DOWNCAST ( IndexRefineFlat ) <nl> + DOWNCAST ( IndexPQFastScan ) <nl> DOWNCAST ( IndexPQ ) <nl> DOWNCAST ( IndexScalarQuantizer ) <nl> DOWNCAST ( IndexLSH ) <nl> void gpu_sync_all_devices ( ) <nl> <nl> % typemap ( out ) faiss : : InvertedLists * { <nl> DOWNCAST ( ArrayInvertedLists ) <nl> + DOWNCAST ( BlockInvertedLists ) <nl> # ifndef SWIGWIN <nl> DOWNCAST ( OnDiskInvertedLists ) <nl> # endif / / ! SWIGWIN <nl> faiss : : InvertedLists * downcast_InvertedLists ( faiss : : InvertedLists * il ) <nl> } <nl> % } <nl> <nl> - % include < faiss / impl / io . h > <nl> % include < faiss / index_io . h > <nl> % include < faiss / clone_index . h > <nl> % newobject index_factory ; <nl> REV_SWIG_PTR ( uint64_t , NPY_UINT64 ) ; <nl> % template ( CMax_float_partition_fuzzy ) <nl> faiss : : partition_fuzzy < faiss : : CMax < float , int64_t > > ; <nl> <nl> + % template ( AlignedTableUint8 ) faiss : : AlignedTable < uint8_t > ; <nl> + % template ( AlignedTableUint16 ) faiss : : AlignedTable < uint16_t > ; <nl> + % template ( AlignedTableFloat32 ) faiss : : AlignedTable < float > ; <nl> + <nl> + % inline % { <nl> + <nl> + / / SWIG seems to have has some trouble resolving the template type here , so <nl> + / / declare explicitly <nl> + uint16_t CMax_uint16_partition_fuzzy ( <nl> + uint16_t * vals , int64_t * ids , size_t n , <nl> + size_t q_min , size_t q_max , size_t * q_out ) <nl> + { <nl> + return faiss : : partition_fuzzy < faiss : : CMax < unsigned short , int64_t > > ( <nl> + vals , ids , n , q_min , q_max , q_out ) ; <nl> + } <nl> + <nl> + uint16_t CMin_uint16_partition_fuzzy ( <nl> + uint16_t * vals , int64_t * ids , size_t n , <nl> + size_t q_min , size_t q_max , size_t * q_out ) <nl> + { <nl> + return faiss : : partition_fuzzy < faiss : : CMin < unsigned short , int64_t > > ( <nl> + vals , ids , n , q_min , q_max , q_out ) ; <nl> + } <nl> + <nl> + / / and overload with the int32 version <nl> + <nl> + uint16_t CMax_uint16_partition_fuzzy ( <nl> + uint16_t * vals , int * ids , size_t n , <nl> + size_t q_min , size_t q_max , size_t * q_out ) <nl> + { <nl> + return faiss : : partition_fuzzy < faiss : : CMax < unsigned short , int > > ( <nl> + vals , ids , n , q_min , q_max , q_out ) ; <nl> + } <nl> + <nl> + uint16_t CMin_uint16_partition_fuzzy ( <nl> + uint16_t * vals , int * ids , size_t n , <nl> + size_t q_min , size_t q_max , size_t * q_out ) <nl> + { <nl> + return faiss : : partition_fuzzy < faiss : : CMin < unsigned short , int > > ( <nl> + vals , ids , n , q_min , q_max , q_out ) ; <nl> + } <nl> + <nl> + % } <nl> + <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> * Expose a few basic functions <nl> * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> void * cast_integer_to_void_ptr ( long long x ) { <nl> % } <nl> <nl> <nl> - <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> * Range search interface <nl> * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> new file mode 100644 <nl> index 000000000 . . 6b66b515b <nl> mmm / dev / null <nl> ppp b / faiss / utils / AlignedTable . h <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + <nl> + # pragma once <nl> + <nl> + # include < cstdint > <nl> + # include < cstdlib > <nl> + # include < cassert > <nl> + # include < cstring > <nl> + <nl> + # include < algorithm > <nl> + <nl> + # include < faiss / impl / platform_macros . h > <nl> + <nl> + namespace faiss { <nl> + <nl> + template < int A = 32 > <nl> + inline bool is_aligned_pointer ( const void * x ) <nl> + { <nl> + size_t xi = ( size_t ) x ; <nl> + return xi % A = = 0 ; <nl> + } <nl> + <nl> + / / class that manages suitably aligned arrays for SIMD <nl> + / / T should be a POV type . The default alignment is 32 for AVX <nl> + template < class T , int A = 32 > <nl> + struct AlignedTableTightAlloc { <nl> + T * ptr ; <nl> + size_t numel ; <nl> + <nl> + AlignedTableTightAlloc ( ) : ptr ( nullptr ) , numel ( 0 ) <nl> + { } <nl> + <nl> + explicit AlignedTableTightAlloc ( size_t n ) : ptr ( nullptr ) , numel ( 0 ) <nl> + { resize ( n ) ; } <nl> + <nl> + size_t itemsize ( ) const { return sizeof ( T ) ; } <nl> + <nl> + void resize ( size_t n ) { <nl> + if ( numel = = n ) { <nl> + return ; <nl> + } <nl> + T * new_ptr ; <nl> + if ( n > 0 ) { <nl> + int ret = posix_memalign ( ( void * * ) & new_ptr , A , n * sizeof ( T ) ) ; <nl> + if ( ret ! = 0 ) { <nl> + throw std : : bad_alloc ( ) ; <nl> + } <nl> + if ( numel > 0 ) { <nl> + memcpy ( new_ptr , ptr , sizeof ( T ) * std : : min ( numel , n ) ) ; <nl> + } <nl> + } else { <nl> + new_ptr = nullptr ; <nl> + } <nl> + numel = n ; <nl> + free ( ptr ) ; <nl> + ptr = new_ptr ; <nl> + } <nl> + <nl> + void clear ( ) { memset ( ptr , 0 , nbytes ( ) ) ; } <nl> + size_t size ( ) const { return numel ; } <nl> + size_t nbytes ( ) const { return numel * sizeof ( T ) ; } <nl> + <nl> + T * get ( ) { return ptr ; } <nl> + const T * get ( ) const { return ptr ; } <nl> + T * data ( ) { return ptr ; } <nl> + const T * data ( ) const { return ptr ; } <nl> + T & operator [ ] ( size_t i ) { return ptr [ i ] ; } <nl> + T operator [ ] ( size_t i ) const { return ptr [ i ] ; } <nl> + <nl> + ~ AlignedTableTightAlloc ( ) { posix_memalign_free ( ptr ) ; } <nl> + <nl> + AlignedTableTightAlloc < T , A > & operator = <nl> + ( const AlignedTableTightAlloc < T , A > & other ) { <nl> + resize ( other . numel ) ; <nl> + memcpy ( ptr , other . ptr , sizeof ( T ) * numel ) ; <nl> + return * this ; <nl> + } <nl> + <nl> + AlignedTableTightAlloc ( const AlignedTableTightAlloc < T , A > & other ) { <nl> + * this = other ; <nl> + } <nl> + <nl> + } ; <nl> + <nl> + / / same as AlignedTableTightAlloc , but with geometric re - allocation <nl> + template < class T , int A = 32 > <nl> + struct AlignedTable { <nl> + AlignedTableTightAlloc < T , A > tab ; <nl> + size_t numel = 0 ; <nl> + <nl> + static size_t round_capacity ( size_t n ) { <nl> + if ( n = = 0 ) { <nl> + return 0 ; <nl> + } <nl> + if ( n < 8 * A ) { <nl> + return 8 * A ; <nl> + } <nl> + size_t capacity = 8 * A ; <nl> + while ( capacity < n ) { <nl> + capacity * = 2 ; <nl> + } <nl> + return capacity ; <nl> + } <nl> + <nl> + AlignedTable ( ) { } <nl> + <nl> + explicit AlignedTable ( size_t n ) : <nl> + tab ( round_capacity ( n ) ) , <nl> + numel ( n ) <nl> + { } <nl> + <nl> + size_t itemsize ( ) const { return sizeof ( T ) ; } <nl> + <nl> + void resize ( size_t n ) { <nl> + tab . resize ( round_capacity ( n ) ) ; <nl> + numel = n ; <nl> + } <nl> + <nl> + void clear ( ) { tab . clear ( ) ; } <nl> + size_t size ( ) const { return numel ; } <nl> + size_t nbytes ( ) const { return numel * sizeof ( T ) ; } <nl> + <nl> + T * get ( ) { return tab . get ( ) ; } <nl> + const T * get ( ) const { return tab . get ( ) ; } <nl> + T * data ( ) { return tab . get ( ) ; } <nl> + const T * data ( ) const { return tab . get ( ) ; } <nl> + T & operator [ ] ( size_t i ) { return tab . ptr [ i ] ; } <nl> + T operator [ ] ( size_t i ) const { return tab . ptr [ i ] ; } <nl> + <nl> + / / assign and copy constructor should work as expected <nl> + <nl> + } ; <nl> + <nl> + <nl> + } / / namespace faiss <nl> mmm a / faiss / utils / distances_simd . cpp <nl> ppp b / faiss / utils / distances_simd . cpp <nl> void pq2_8cents_table ( <nl> } <nl> } <nl> <nl> + simd8float32 load_simd8float32_partial ( const float * x , int n ) { <nl> + ALIGNED ( 32 ) float tmp [ 8 ] = { 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 } ; <nl> + float * wp = tmp ; <nl> + for ( int i = 0 ; i < n ; i + + ) { <nl> + * wp + + = * x + + ; <nl> + } <nl> + return simd8float32 ( tmp ) ; <nl> + } <nl> <nl> } / / anonymous namespace <nl> <nl> <nl> + <nl> + <nl> void compute_PQ_dis_tables_dsub2 ( <nl> size_t d , size_t ksub , const float * all_centroids , <nl> size_t nx , const float * x , <nl> void compute_PQ_dis_tables_dsub2 ( <nl> } <nl> for ( size_t i = 0 ; i < nx ; i + + ) { <nl> simd8float32 xi ; <nl> - xi . loadu ( x + i * d + m0 * 2 ) ; <nl> + if ( m1 = = m0 + 4 ) { <nl> + xi . loadu ( x + i * d + m0 * 2 ) ; <nl> + } else { <nl> + xi = load_simd8float32_partial ( x + i * d + m0 * 2 , 2 * ( m1 - m0 ) ) ; <nl> + } <nl> + <nl> if ( is_inner_product ) { <nl> pq2_8cents_table < true > ( <nl> centroids , xi , <nl> mmm a / faiss / utils / hamming . h <nl> ppp b / faiss / utils / hamming . h <nl> <nl> <nl> # include < stdint . h > <nl> <nl> - # ifdef _MSC_VER <nl> - # include < intrin . h > <nl> - # define __builtin_popcountl __popcnt64 <nl> - # endif / / _MSC_VER <nl> - <nl> # include < faiss / impl / platform_macros . h > <nl> # include < faiss / utils / Heap . h > <nl> <nl> mmm a / faiss / utils / partitioning . cpp <nl> ppp b / faiss / utils / partitioning . cpp <nl> <nl> # include < cassert > <nl> <nl> # include < faiss / impl / FaissAssert . h > <nl> - <nl> + # include < faiss / utils / AlignedTable . h > <nl> # include < faiss / utils / ordered_key_value . h > <nl> + # include < faiss / utils / simdlib . h > <nl> + <nl> + # include < faiss / impl / platform_macros . h > <nl> <nl> namespace faiss { <nl> <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Internal routines <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + <nl> namespace partitioning { <nl> <nl> template < typename T > <nl> typename C : : T sample_threshold_median3 ( <nl> } else if ( vi ! = 0 ) { <nl> return val3 [ 0 ] ; <nl> } else { <nl> - FAISS_THROW_MSG ( " too few values to compute a median " ) ; <nl> + return thresh_inf ; <nl> + / / FAISS_THROW_MSG ( " too few values to compute a median " ) ; <nl> } <nl> } <nl> <nl> typename C : : T partition_fuzzy_median3 ( <nl> <nl> / / here we use bissection with a median of 3 to find the threshold and <nl> / / compress the arrays afterwards . So it ' s a n * log ( n ) algoirithm rather than <nl> - / / qselect ' s O ( n ) but it avoids compressing the array . <nl> + / / qselect ' s O ( n ) but it avoids shuffling around the array . <nl> <nl> FAISS_THROW_IF_NOT ( n > = 3 ) ; <nl> <nl> typename C : : T partition_fuzzy_median3 ( <nl> count_lt_and_eq < C > ( vals , n , thresh , n_lt , n_eq ) ; <nl> <nl> IFV printf ( " thresh = % g [ % g % g ] n_lt = % ld n_eq = % ld , q = % ld : % ld / % ld \ n " , <nl> - thresh , thresh_inf , thresh_sup , n_lt , n_eq , q_min , q_max , n ) ; <nl> + float ( thresh ) , float ( thresh_inf ) , float ( thresh_sup ) , <nl> + long ( n_lt ) , long ( n_eq ) , long ( q_min ) , long ( q_max ) , long ( n ) ) ; <nl> <nl> if ( n_lt < = q_min ) { <nl> if ( n_lt + n_eq > = q_min ) { <nl> typename C : : T partition_fuzzy_median3 ( <nl> } <nl> <nl> / / FIXME avoid a second pass over the array to sample the threshold <nl> - IFV printf ( " sample thresh in [ % g % g ] \ n " , thresh_inf , thresh_sup ) ; <nl> - thresh = sample_threshold_median3 < C > ( vals , n , thresh_inf , thresh_sup ) ; <nl> + IFV printf ( " sample thresh in [ % g % g ] \ n " , float ( thresh_inf ) , float ( thresh_sup ) ) ; <nl> + T new_thresh = sample_threshold_median3 < C > ( vals , n , thresh_inf , thresh_sup ) ; <nl> + if ( new_thresh = = thresh_inf ) { <nl> + / / then there is nothing between thresh_inf and thresh_sup <nl> + break ; <nl> + } <nl> + thresh = new_thresh ; <nl> } <nl> <nl> int64_t n_eq_1 = q - n_lt ; <nl> <nl> - IFV printf ( " shrink : thresh = % g n_eq_1 = % ld \ n " , thresh , n_eq_1 ) ; <nl> + IFV printf ( " shrink : thresh = % g n_eq_1 = % ld \ n " , float ( thresh ) , long ( n_eq_1 ) ) ; <nl> <nl> if ( n_eq_1 < 0 ) { / / happens when > q elements are at lower bound <nl> q = q_min ; <nl> typename C : : T partition_fuzzy_median3 ( <nl> } / / namespace partitioning <nl> <nl> <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * SIMD routines when vals is an aligned array of uint16_t <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + <nl> + namespace simd_partitioning { <nl> + <nl> + <nl> + <nl> + void find_minimax ( <nl> + const uint16_t * vals , size_t n , <nl> + uint16_t & smin , uint16_t & smax <nl> + ) { <nl> + <nl> + simd16uint16 vmin ( 0xffff ) , vmax ( 0 ) ; <nl> + for ( size_t i = 0 ; i + 15 < n ; i + = 16 ) { <nl> + simd16uint16 v ( vals + i ) ; <nl> + vmin . accu_min ( v ) ; <nl> + vmax . accu_max ( v ) ; <nl> + } <nl> + <nl> + ALIGNED ( 32 ) uint16_t tab32 [ 32 ] ; <nl> + vmin . store ( tab32 ) ; <nl> + vmax . store ( tab32 + 16 ) ; <nl> + <nl> + smin = tab32 [ 0 ] , smax = tab32 [ 16 ] ; <nl> + <nl> + for ( int i = 1 ; i < 16 ; i + + ) { <nl> + smin = std : : min ( smin , tab32 [ i ] ) ; <nl> + smax = std : : max ( smax , tab32 [ i + 16 ] ) ; <nl> + } <nl> + <nl> + / / missing values <nl> + for ( size_t i = ( n & ~ 15 ) ; i < n ; i + + ) { <nl> + smin = std : : min ( smin , vals [ i ] ) ; <nl> + smax = std : : max ( smax , vals [ i ] ) ; <nl> + } <nl> + <nl> + } <nl> + <nl> + <nl> + / / max func differentiates between CMin and CMax ( keep lowest or largest ) <nl> + template < class C > <nl> + simd16uint16 max_func ( simd16uint16 v , simd16uint16 thr16 ) { <nl> + constexpr bool is_max = C : : is_max ; <nl> + if ( is_max ) { <nl> + return max ( v , thr16 ) ; <nl> + } else { <nl> + return min ( v , thr16 ) ; <nl> + } <nl> + } <nl> + <nl> + template < class C > <nl> + void count_lt_and_eq ( <nl> + const uint16_t * vals , int n , uint16_t thresh , <nl> + size_t & n_lt , size_t & n_eq <nl> + ) { <nl> + n_lt = n_eq = 0 ; <nl> + simd16uint16 thr16 ( thresh ) ; <nl> + <nl> + size_t n1 = n / 16 ; <nl> + <nl> + for ( size_t i = 0 ; i < n1 ; i + + ) { <nl> + simd16uint16 v ( vals ) ; <nl> + vals + = 16 ; <nl> + simd16uint16 eqmask = ( v = = thr16 ) ; <nl> + simd16uint16 max2 = max_func < C > ( v , thr16 ) ; <nl> + simd16uint16 gemask = ( v = = max2 ) ; <nl> + uint32_t bits = get_MSBs ( uint16_to_uint8_saturate ( eqmask , gemask ) ) ; <nl> + int i_eq = __builtin_popcount ( bits & 0x00ff00ff ) ; <nl> + int i_ge = __builtin_popcount ( bits ) - i_eq ; <nl> + n_eq + = i_eq ; <nl> + n_lt + = 16 - i_ge ; <nl> + } <nl> + <nl> + for ( size_t i = n1 * 16 ; i < n ; i + + ) { <nl> + uint16_t v = * vals + + ; <nl> + if ( C : : cmp ( thresh , v ) ) { <nl> + n_lt + + ; <nl> + } else if ( v = = thresh ) { <nl> + n_eq + + ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + <nl> + / * compress separated values and ids table , keeping all values < thresh and at <nl> + * most n_eq equal values * / <nl> + template < class C > <nl> + int simd_compress_array ( <nl> + uint16_t * vals , typename C : : TI * ids , size_t n , uint16_t thresh , int n_eq <nl> + ) { <nl> + simd16uint16 thr16 ( thresh ) ; <nl> + simd16uint16 mixmask ( 0xff00 ) ; <nl> + <nl> + int wp = 0 ; <nl> + size_t i0 ; <nl> + <nl> + / / loop while there are eqs to collect <nl> + for ( i0 = 0 ; i0 + 15 < n & & n_eq > 0 ; i0 + = 16 ) { <nl> + simd16uint16 v ( vals + i0 ) ; <nl> + simd16uint16 max2 = max_func < C > ( v , thr16 ) ; <nl> + simd16uint16 gemask = ( v = = max2 ) ; <nl> + simd16uint16 eqmask = ( v = = thr16 ) ; <nl> + uint32_t bits = get_MSBs ( blendv ( <nl> + simd32uint8 ( eqmask ) , simd32uint8 ( gemask ) , simd32uint8 ( mixmask ) ) ) ; <nl> + bits ^ = 0xAAAAAAAA ; <nl> + / / bit 2 * i : eq <nl> + / / bit 2 * i + 1 : lt <nl> + <nl> + while ( bits ) { <nl> + int j = __builtin_ctz ( bits ) & ( ~ 1 ) ; <nl> + bool is_eq = ( bits > > j ) & 1 ; <nl> + bool is_lt = ( bits > > j ) & 2 ; <nl> + bits & = ~ ( 3 < < j ) ; <nl> + j > > = 1 ; <nl> + <nl> + if ( is_lt ) { <nl> + vals [ wp ] = vals [ i0 + j ] ; <nl> + ids [ wp ] = ids [ i0 + j ] ; <nl> + wp + + ; <nl> + } else if ( is_eq & & n_eq > 0 ) { <nl> + vals [ wp ] = vals [ i0 + j ] ; <nl> + ids [ wp ] = ids [ i0 + j ] ; <nl> + wp + + ; <nl> + n_eq - - ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / handle remaining , only striclty lt ones . <nl> + for ( ; i0 + 15 < n ; i0 + = 16 ) { <nl> + simd16uint16 v ( vals + i0 ) ; <nl> + simd16uint16 max2 = max_func < C > ( v , thr16 ) ; <nl> + simd16uint16 gemask = ( v = = max2 ) ; <nl> + uint32_t bits = ~ get_MSBs ( simd32uint8 ( gemask ) ) ; <nl> + <nl> + while ( bits ) { <nl> + int j = __builtin_ctz ( bits ) ; <nl> + bits & = ~ ( 3 < < j ) ; <nl> + j > > = 1 ; <nl> + <nl> + vals [ wp ] = vals [ i0 + j ] ; <nl> + ids [ wp ] = ids [ i0 + j ] ; <nl> + wp + + ; <nl> + } <nl> + } <nl> + <nl> + / / end with scalar <nl> + for ( int i = ( n & ~ 15 ) ; i < n ; i + + ) { <nl> + if ( C : : cmp ( thresh , vals [ i ] ) ) { <nl> + vals [ wp ] = vals [ i ] ; <nl> + ids [ wp ] = ids [ i ] ; <nl> + wp + + ; <nl> + } else if ( vals [ i ] = = thresh & & n_eq > 0 ) { <nl> + vals [ wp ] = vals [ i ] ; <nl> + ids [ wp ] = ids [ i ] ; <nl> + wp + + ; <nl> + n_eq - - ; <nl> + } <nl> + } <nl> + assert ( n_eq = = 0 ) ; <nl> + return wp ; <nl> + } <nl> + <nl> + / / # define MICRO_BENCHMARK <nl> + <nl> + static uint64_t get_cy ( ) { <nl> + # ifdef MICRO_BENCHMARK <nl> + uint32_t high , low ; <nl> + asm volatile ( " rdtsc \ n \ t " <nl> + : " = a " ( low ) , <nl> + " = d " ( high ) ) ; <nl> + return ( ( uint64_t ) high < < 32 ) | ( low ) ; <nl> + # else <nl> + return 0 ; <nl> + # endif <nl> + } <nl> + <nl> + <nl> + <nl> + # define IFV if ( false ) <nl> + <nl> + template < class C > <nl> + uint16_t simd_partition_fuzzy_with_bounds ( <nl> + uint16_t * vals , typename C : : TI * ids , size_t n , <nl> + size_t q_min , size_t q_max , size_t * q_out , <nl> + uint16_t s0i , uint16_t s1i ) <nl> + { <nl> + <nl> + if ( q_min = = 0 ) { <nl> + if ( q_out ) { <nl> + * q_out = 0 ; <nl> + } <nl> + return 0 ; <nl> + } <nl> + if ( q_max > = n ) { <nl> + if ( q_out ) { <nl> + * q_out = q_max ; <nl> + } <nl> + return 0xffff ; <nl> + } <nl> + if ( s0i = = s1i ) { <nl> + if ( q_out ) { <nl> + * q_out = q_min ; <nl> + } <nl> + return s0i ; <nl> + } <nl> + uint64_t t0 = get_cy ( ) ; <nl> + <nl> + / / lower bound inclusive , upper exclusive <nl> + size_t s0 = s0i , s1 = s1i + 1 ; <nl> + <nl> + IFV printf ( " bounds : % ld % ld \ n " , s0 , s1 - 1 ) ; <nl> + <nl> + int thresh ; <nl> + size_t n_eq = 0 , n_lt = 0 ; <nl> + size_t q = 0 ; <nl> + <nl> + for ( int it = 0 ; it < 200 ; it + + ) { <nl> + / / while ( s0 + 1 < s1 ) { <nl> + thresh = ( s0 + s1 ) / 2 ; <nl> + count_lt_and_eq < C > ( vals , n , thresh , n_lt , n_eq ) ; <nl> + <nl> + IFV printf ( " [ % ld % ld ] thresh = % d n_lt = % ld n_eq = % ld , q = % ld : % ld / % ld \ n " , <nl> + s0 , s1 , thresh , n_lt , n_eq , q_min , q_max , n ) ; <nl> + if ( n_lt < = q_min ) { <nl> + if ( n_lt + n_eq > = q_min ) { <nl> + q = q_min ; <nl> + break ; <nl> + } else { <nl> + if ( C : : is_max ) { <nl> + s0 = thresh ; <nl> + } else { <nl> + s1 = thresh ; <nl> + } <nl> + } <nl> + } else if ( n_lt < = q_max ) { <nl> + q = n_lt ; <nl> + break ; <nl> + } else { <nl> + if ( C : : is_max ) { <nl> + s1 = thresh ; <nl> + } else { <nl> + s0 = thresh ; <nl> + } <nl> + } <nl> + <nl> + } <nl> + <nl> + uint64_t t1 = get_cy ( ) ; <nl> + <nl> + / / number of equal values to keep <nl> + int64_t n_eq_1 = q - n_lt ; <nl> + <nl> + IFV printf ( " shrink : thresh = % d q = % ld n_eq_1 = % ld \ n " , thresh , q , n_eq_1 ) ; <nl> + if ( n_eq_1 < 0 ) { / / happens when > q elements are at lower bound <nl> + assert ( s0 + 1 = = s1 ) ; <nl> + q = q_min ; <nl> + if ( C : : is_max ) { <nl> + thresh - - ; <nl> + } else { <nl> + thresh + + ; <nl> + } <nl> + n_eq_1 = q ; <nl> + IFV printf ( " override : thresh = % d n_eq_1 = % ld \ n " , thresh , n_eq_1 ) ; <nl> + } else { <nl> + assert ( n_eq_1 < = n_eq ) ; <nl> + } <nl> + <nl> + size_t wp = simd_compress_array < C > ( vals , ids , n , thresh , n_eq_1 ) ; <nl> + <nl> + IFV printf ( " wp = % ld \ n " , wp ) ; <nl> + assert ( wp = = q ) ; <nl> + if ( q_out ) { <nl> + * q_out = q ; <nl> + } <nl> + <nl> + uint64_t t2 = get_cy ( ) ; <nl> + <nl> + partition_stats . bissect_cycles + = t1 - t0 ; <nl> + partition_stats . compress_cycles + = t2 - t1 ; <nl> + <nl> + return thresh ; <nl> + } <nl> + <nl> + <nl> + template < class C > <nl> + uint16_t simd_partition_fuzzy_with_bounds_histogram ( <nl> + uint16_t * vals , typename C : : TI * ids , size_t n , <nl> + size_t q_min , size_t q_max , size_t * q_out , <nl> + uint16_t s0i , uint16_t s1i ) <nl> + { <nl> + <nl> + if ( q_min = = 0 ) { <nl> + if ( q_out ) { <nl> + * q_out = 0 ; <nl> + } <nl> + return 0 ; <nl> + } <nl> + if ( q_max > = n ) { <nl> + if ( q_out ) { <nl> + * q_out = q_max ; <nl> + } <nl> + return 0xffff ; <nl> + } <nl> + if ( s0i = = s1i ) { <nl> + if ( q_out ) { <nl> + * q_out = q_min ; <nl> + } <nl> + return s0i ; <nl> + } <nl> + <nl> + IFV printf ( " partition fuzzy , q = % ld : % ld / % ld , bounds = % d % d \ n " , <nl> + q_min , q_max , n , s0i , s1i ) ; <nl> + <nl> + if ( ! C : : is_max ) { <nl> + IFV printf ( " revert due to CMin , q_min : q_max - > % ld : % ld \ n " , q_min , q_max ) ; <nl> + q_min = n - q_min ; <nl> + q_max = n - q_max ; <nl> + } <nl> + <nl> + / / lower and upper bound of range , inclusive <nl> + int s0 = s0i , s1 = s1i ; <nl> + / / number of values < s0 and > s1 <nl> + size_t n_lt = 0 , n_gt = 0 ; <nl> + <nl> + / / output of loop : <nl> + int thresh ; / / final threshold <nl> + uint64_t tot_eq = 0 ; / / total nb of equal values <nl> + uint64_t n_eq = 0 ; / / nb of equal values to keep <nl> + size_t q ; / / final quantile <nl> + <nl> + / / buffer for the histograms <nl> + int hist [ 16 ] ; <nl> + <nl> + for ( int it = 0 ; it < 20 ; it + + ) { <nl> + / / otherwise we would be done already <nl> + <nl> + int shift = 0 ; <nl> + <nl> + IFV printf ( " it % d bounds : % d % d n_lt = % ld n_gt = % ld \ n " , <nl> + it , s0 , s1 , n_lt , n_gt ) ; <nl> + <nl> + int maxval = s1 - s0 ; <nl> + <nl> + while ( maxval > 15 ) { <nl> + shift + + ; <nl> + maxval > > = 1 ; <nl> + } <nl> + <nl> + IFV printf ( " histogram shift % d maxval % d ? = % d \ n " , <nl> + shift , maxval , int ( ( s1 - s0 ) > > shift ) ) ; <nl> + <nl> + if ( maxval > 7 ) { <nl> + simd_histogram_16 ( vals , n , s0 , shift , hist ) ; <nl> + } else { <nl> + simd_histogram_8 ( vals , n , s0 , shift , hist ) ; <nl> + } <nl> + IFV { <nl> + int sum = n_lt + n_gt ; <nl> + printf ( " n_lt = % ld hist = [ " , n_lt ) ; <nl> + for ( int i = 0 ; i < = maxval ; i + + ) { <nl> + printf ( " % d " , hist [ i ] ) ; <nl> + sum + = hist [ i ] ; <nl> + } <nl> + printf ( " ] n_gt = % ld sum = % d \ n " , n_gt , sum ) ; <nl> + assert ( sum = = n ) ; <nl> + } <nl> + <nl> + size_t sum_below = n_lt ; <nl> + int i ; <nl> + for ( i = 0 ; i < = maxval ; i + + ) { <nl> + sum_below + = hist [ i ] ; <nl> + if ( sum_below > = q_min ) { <nl> + break ; <nl> + } <nl> + } <nl> + IFV printf ( " i = % d sum_below = % ld \ n " , i , sum_below ) ; <nl> + if ( i < = maxval ) { <nl> + s0 = s0 + ( i < < shift ) ; <nl> + s1 = s0 + ( 1 < < shift ) - 1 ; <nl> + n_lt = sum_below - hist [ i ] ; <nl> + n_gt = n - sum_below ; <nl> + } else { <nl> + assert ( ! " not implemented " ) ; <nl> + } <nl> + <nl> + IFV printf ( " new bin : s0 = % d s1 = % d n_lt = % ld n_gt = % ld \ n " , s0 , s1 , n_lt , n_gt ) ; <nl> + <nl> + if ( s1 > s0 ) { <nl> + if ( n_lt > = q_min & & q_max > = n_lt ) { <nl> + IFV printf ( " FOUND1 \ n " ) ; <nl> + thresh = s0 ; <nl> + q = n_lt ; <nl> + break ; <nl> + } <nl> + <nl> + size_t n_lt_2 = n - n_gt ; <nl> + if ( n_lt_2 > = q_min & & q_max > = n_lt_2 ) { <nl> + thresh = s1 + 1 ; <nl> + q = n_lt_2 ; <nl> + IFV printf ( " FOUND2 \ n " ) ; <nl> + break ; <nl> + } <nl> + } else { <nl> + thresh = s0 ; <nl> + q = q_min ; <nl> + tot_eq = n - n_gt - n_lt ; <nl> + n_eq = q_min - n_lt ; <nl> + IFV printf ( " FOUND3 \ n " ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + IFV printf ( " end bissection : thresh = % d q = % ld n_eq = % ld \ n " , thresh , q , n_eq ) ; <nl> + <nl> + if ( ! C : : is_max ) { <nl> + if ( n_eq = = 0 ) { <nl> + thresh - - ; <nl> + } else { <nl> + / / thresh unchanged <nl> + n_eq = tot_eq - n_eq ; <nl> + } <nl> + q = n - q ; <nl> + IFV printf ( " revert due to CMin , q - > % ld n_eq - > % ld \ n " , q , n_eq ) ; <nl> + } <nl> + <nl> + size_t wp = simd_compress_array < C > ( vals , ids , n , thresh , n_eq ) ; <nl> + IFV printf ( " wp = % ld ? = % ld \ n " , wp , q ) ; <nl> + assert ( wp = = q ) ; <nl> + if ( q_out ) { <nl> + * q_out = wp ; <nl> + } <nl> + <nl> + return thresh ; <nl> + } <nl> + <nl> + <nl> + <nl> + template < class C > <nl> + uint16_t simd_partition_fuzzy ( <nl> + uint16_t * vals , typename C : : TI * ids , size_t n , <nl> + size_t q_min , size_t q_max , size_t * q_out <nl> + ) { <nl> + <nl> + assert ( is_aligned_pointer ( vals ) ) ; <nl> + <nl> + uint16_t s0i , s1i ; <nl> + find_minimax ( vals , n , s0i , s1i ) ; <nl> + / / QSelect_stats . t0 + = get_cy ( ) - t0 ; <nl> + <nl> + return simd_partition_fuzzy_with_bounds < C > ( <nl> + vals , ids , n , q_min , q_max , q_out , s0i , s1i ) ; <nl> + } <nl> + <nl> + <nl> + <nl> + template < class C > <nl> + uint16_t simd_partition ( uint16_t * vals , typename C : : TI * ids , size_t n , size_t q ) { <nl> + <nl> + assert ( is_aligned_pointer ( vals ) ) ; <nl> + <nl> + if ( q = = 0 ) { <nl> + return 0 ; <nl> + } <nl> + if ( q > = n ) { <nl> + return 0xffff ; <nl> + } <nl> + <nl> + uint16_t s0i , s1i ; <nl> + find_minimax ( vals , n , s0i , s1i ) ; <nl> + <nl> + return simd_partition_fuzzy_with_bounds < C > ( <nl> + vals , ids , n , q , q , nullptr , s0i , s1i ) ; <nl> + } <nl> + <nl> + template < class C > <nl> + uint16_t simd_partition_with_bounds ( <nl> + uint16_t * vals , typename C : : TI * ids , size_t n , size_t q , <nl> + uint16_t s0i , uint16_t s1i ) <nl> + { <nl> + return simd_partition_fuzzy_with_bounds < C > ( <nl> + vals , ids , n , q , q , nullptr , s0i , s1i ) ; <nl> + } <nl> + <nl> + } / / namespace simd_partitioning <nl> + <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Driver routine <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + <nl> template < class C > <nl> typename C : : T partition_fuzzy ( <nl> typename C : : T * vals , typename C : : TI * ids , size_t n , <nl> size_t q_min , size_t q_max , size_t * q_out ) <nl> { <nl> - <nl> + / / the code below compiles and runs without AVX2 but it ' s slower than <nl> + / / the scalar implementation <nl> + # ifdef __AVX2__ <nl> + constexpr bool is_uint16 = std : : is_same < typename C : : T , uint16_t > : : value ; <nl> + if ( is_uint16 & & is_aligned_pointer ( vals ) ) { <nl> + return simd_partitioning : : simd_partition_fuzzy < C > ( <nl> + ( uint16_t * ) vals , ids , n , q_min , q_max , q_out ) ; <nl> + } <nl> + # endif <nl> return partitioning : : partition_fuzzy_median3 < C > ( <nl> vals , ids , n , q_min , q_max , q_out ) ; <nl> } <nl> template float partition_fuzzy < CMax < float , int64_t > > ( <nl> float * vals , int64_t * ids , size_t n , <nl> size_t q_min , size_t q_max , size_t * q_out ) ; <nl> <nl> + template uint16_t partition_fuzzy < CMin < uint16_t , int64_t > > ( <nl> + uint16_t * vals , int64_t * ids , size_t n , <nl> + size_t q_min , size_t q_max , size_t * q_out ) ; <nl> + <nl> + template uint16_t partition_fuzzy < CMax < uint16_t , int64_t > > ( <nl> + uint16_t * vals , int64_t * ids , size_t n , <nl> + size_t q_min , size_t q_max , size_t * q_out ) ; <nl> + <nl> + template uint16_t partition_fuzzy < CMin < uint16_t , int > > ( <nl> + uint16_t * vals , int * ids , size_t n , <nl> + size_t q_min , size_t q_max , size_t * q_out ) ; <nl> + <nl> + template uint16_t partition_fuzzy < CMax < uint16_t , int > > ( <nl> + uint16_t * vals , int * ids , size_t n , <nl> + size_t q_min , size_t q_max , size_t * q_out ) ; <nl> + <nl> + <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Histogram subroutines <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + # ifdef __AVX2__ <nl> + / / / FIXME when MSB of uint16 is set <nl> + / / this code does not compile properly with GCC 7 . 4 . 0 <nl> + <nl> + namespace { <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * 8 bins <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + simd32uint8 accu4to8 ( simd16uint16 a4 ) { <nl> + simd16uint16 mask4 ( 0x0f0f ) ; <nl> + <nl> + simd16uint16 a8_0 = a4 & mask4 ; <nl> + simd16uint16 a8_1 = ( a4 > > 4 ) & mask4 ; <nl> + <nl> + return simd32uint8 ( _mm256_hadd_epi16 ( a8_0 . i , a8_1 . i ) ) ; <nl> + } <nl> + <nl> + <nl> + simd16uint16 accu8to16 ( simd32uint8 a8 ) { <nl> + simd16uint16 mask8 ( 0x00ff ) ; <nl> + <nl> + simd16uint16 a8_0 = simd16uint16 ( a8 ) & mask8 ; <nl> + simd16uint16 a8_1 = ( simd16uint16 ( a8 ) > > 8 ) & mask8 ; <nl> + <nl> + return simd16uint16 ( _mm256_hadd_epi16 ( a8_0 . i , a8_1 . i ) ) ; <nl> + } <nl> + <nl> + <nl> + static const simd32uint8 shifts ( _mm256_setr_epi8 ( <nl> + 1 , 16 , 0 , 0 , 4 , 64 , 0 , 0 , <nl> + 0 , 0 , 1 , 16 , 0 , 0 , 4 , 64 , <nl> + 1 , 16 , 0 , 0 , 4 , 64 , 0 , 0 , <nl> + 0 , 0 , 1 , 16 , 0 , 0 , 4 , 64 <nl> + ) ) ; <nl> + <nl> + / / 2 - bit accumulator : we can add only up to 3 elements <nl> + / / on output we return 2 * 4 - bit results <nl> + / / preproc returns either an index in 0 . . 7 or 0xffff <nl> + / / that yeilds a 0 when used in the table look - up <nl> + template < int N , class Preproc > <nl> + void compute_accu2 ( <nl> + const uint16_t * & data , <nl> + Preproc & pp , <nl> + simd16uint16 & a4lo , simd16uint16 & a4hi <nl> + ) { <nl> + simd16uint16 mask2 ( 0x3333 ) ; <nl> + simd16uint16 a2 ( ( uint16_t ) 0 ) ; / / 2 - bit accu <nl> + for ( int j = 0 ; j < N ; j + + ) { <nl> + simd16uint16 v ( data ) ; <nl> + data + = 16 ; <nl> + v = pp ( v ) ; <nl> + / / 0x800 - > force second half of table <nl> + simd16uint16 idx = v | ( v < < 8 ) | simd16uint16 ( 0x800 ) ; <nl> + a2 + = simd16uint16 ( shifts . lookup_2_lanes ( simd32uint8 ( idx ) ) ) ; <nl> + } <nl> + a4lo + = a2 & mask2 ; <nl> + a4hi + = ( a2 > > 2 ) & mask2 ; <nl> + } <nl> + <nl> + <nl> + template < class Preproc > <nl> + simd16uint16 histogram_8 ( <nl> + const uint16_t * data , Preproc pp , <nl> + size_t n_in ) { <nl> + <nl> + assert ( n_in % 16 = = 0 ) ; <nl> + int n = n_in / 16 ; <nl> + <nl> + simd32uint8 a8lo ( 0 ) ; <nl> + simd32uint8 a8hi ( 0 ) ; <nl> + <nl> + for ( int i0 = 0 ; i0 < n ; i0 + = 15 ) { <nl> + simd16uint16 a4lo ( 0 ) ; / / 4 - bit accus <nl> + simd16uint16 a4hi ( 0 ) ; <nl> + <nl> + int i1 = std : : min ( i0 + 15 , n ) ; <nl> + int i ; <nl> + for ( i = i0 ; i + 2 < i1 ; i + = 3 ) { <nl> + compute_accu2 < 3 > ( data , pp , a4lo , a4hi ) ; / / adds 3 max <nl> + } <nl> + switch ( i1 - i ) { <nl> + case 2 : <nl> + compute_accu2 < 2 > ( data , pp , a4lo , a4hi ) ; <nl> + break ; <nl> + case 1 : <nl> + compute_accu2 < 1 > ( data , pp , a4lo , a4hi ) ; <nl> + break ; <nl> + } <nl> + <nl> + a8lo + = accu4to8 ( a4lo ) ; <nl> + a8hi + = accu4to8 ( a4hi ) ; <nl> + } <nl> + <nl> + / / move to 16 - bit accu <nl> + simd16uint16 a16lo = accu8to16 ( a8lo ) ; <nl> + simd16uint16 a16hi = accu8to16 ( a8hi ) ; <nl> + <nl> + simd16uint16 a16 = simd16uint16 ( _mm256_hadd_epi16 ( a16lo . i , a16hi . i ) ) ; <nl> + <nl> + / / the 2 lanes must still be combined <nl> + return a16 ; <nl> + } <nl> + <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * 16 bins <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + <nl> + <nl> + static const simd32uint8 shifts2 ( _mm256_setr_epi8 ( <nl> + 1 , 2 , 4 , 8 , 16 , 32 , 64 , ( char ) 128 , <nl> + 1 , 2 , 4 , 8 , 16 , 32 , 64 , ( char ) 128 , <nl> + 1 , 2 , 4 , 8 , 16 , 32 , 64 , ( char ) 128 , <nl> + 1 , 2 , 4 , 8 , 16 , 32 , 64 , ( char ) 128 <nl> + ) ) ; <nl> + <nl> + <nl> + simd32uint8 shiftr_16 ( simd32uint8 x , int n ) <nl> + { <nl> + return simd32uint8 ( simd16uint16 ( x ) > > n ) ; <nl> + } <nl> + <nl> + <nl> + inline simd32uint8 combine_2x2 ( simd32uint8 a , simd32uint8 b ) { <nl> + <nl> + __m256i a1b0 = _mm256_permute2f128_si256 ( a . i , b . i , 0x21 ) ; <nl> + __m256i a0b1 = _mm256_blend_epi32 ( a . i , b . i , 0xF0 ) ; <nl> + <nl> + return simd32uint8 ( a1b0 ) + simd32uint8 ( a0b1 ) ; <nl> + } <nl> + <nl> + <nl> + / / 2 - bit accumulator : we can add only up to 3 elements <nl> + / / on output we return 2 * 4 - bit results <nl> + template < int N , class Preproc > <nl> + void compute_accu2_16 ( <nl> + const uint16_t * & data , Preproc pp , <nl> + simd32uint8 & a4_0 , simd32uint8 & a4_1 , <nl> + simd32uint8 & a4_2 , simd32uint8 & a4_3 <nl> + ) { <nl> + simd32uint8 mask1 ( 0x55 ) ; <nl> + simd32uint8 a2_0 ; / / 2 - bit accu <nl> + simd32uint8 a2_1 ; / / 2 - bit accu <nl> + a2_0 . clear ( ) ; a2_1 . clear ( ) ; <nl> + <nl> + for ( int j = 0 ; j < N ; j + + ) { <nl> + simd16uint16 v ( data ) ; <nl> + data + = 16 ; <nl> + v = pp ( v ) ; <nl> + <nl> + simd16uint16 idx = v | ( v < < 8 ) ; <nl> + simd32uint8 a1 = shifts2 . lookup_2_lanes ( simd32uint8 ( idx ) ) ; <nl> + / / contains 0s for out - of - bounds elements <nl> + <nl> + simd16uint16 lt8 = ( v > > 3 ) = = simd16uint16 ( 0 ) ; <nl> + lt8 . i = _mm256_xor_si256 ( lt8 . i , _mm256_set1_epi16 ( 0xff00 ) ) ; <nl> + <nl> + a1 = a1 & lt8 ; <nl> + <nl> + a2_0 + = a1 & mask1 ; <nl> + a2_1 + = shiftr_16 ( a1 , 1 ) & mask1 ; <nl> + } <nl> + simd32uint8 mask2 ( 0x33 ) ; <nl> + <nl> + a4_0 + = a2_0 & mask2 ; <nl> + a4_1 + = a2_1 & mask2 ; <nl> + a4_2 + = shiftr_16 ( a2_0 , 2 ) & mask2 ; <nl> + a4_3 + = shiftr_16 ( a2_1 , 2 ) & mask2 ; <nl> + <nl> + } <nl> + <nl> + <nl> + simd32uint8 accu4to8_2 ( simd32uint8 a4_0 , simd32uint8 a4_1 ) { <nl> + simd32uint8 mask4 ( 0x0f ) ; <nl> + <nl> + simd32uint8 a8_0 = combine_2x2 ( <nl> + a4_0 & mask4 , <nl> + shiftr_16 ( a4_0 , 4 ) & mask4 <nl> + ) ; <nl> + <nl> + simd32uint8 a8_1 = combine_2x2 ( <nl> + a4_1 & mask4 , <nl> + shiftr_16 ( a4_1 , 4 ) & mask4 <nl> + ) ; <nl> + <nl> + return simd32uint8 ( _mm256_hadd_epi16 ( a8_0 . i , a8_1 . i ) ) ; <nl> + } <nl> + <nl> + <nl> + <nl> + template < class Preproc > <nl> + simd16uint16 histogram_16 ( const uint16_t * data , Preproc pp , size_t n_in ) { <nl> + <nl> + assert ( n_in % 16 = = 0 ) ; <nl> + int n = n_in / 16 ; <nl> + <nl> + simd32uint8 a8lo ( ( uint8_t ) 0 ) ; <nl> + simd32uint8 a8hi ( ( uint8_t ) 0 ) ; <nl> + <nl> + for ( int i0 = 0 ; i0 < n ; i0 + = 7 ) { <nl> + simd32uint8 a4_0 ( 0 ) ; / / 0 , 4 , 8 , 12 <nl> + simd32uint8 a4_1 ( 0 ) ; / / 1 , 5 , 9 , 13 <nl> + simd32uint8 a4_2 ( 0 ) ; / / 2 , 6 , 10 , 14 <nl> + simd32uint8 a4_3 ( 0 ) ; / / 3 , 7 , 11 , 15 <nl> + <nl> + int i1 = std : : min ( i0 + 7 , n ) ; <nl> + int i ; <nl> + for ( i = i0 ; i + 2 < i1 ; i + = 3 ) { <nl> + compute_accu2_16 < 3 > ( data , pp , a4_0 , a4_1 , a4_2 , a4_3 ) ; <nl> + } <nl> + switch ( i1 - i ) { <nl> + case 2 : <nl> + compute_accu2_16 < 2 > ( data , pp , a4_0 , a4_1 , a4_2 , a4_3 ) ; <nl> + break ; <nl> + case 1 : <nl> + compute_accu2_16 < 1 > ( data , pp , a4_0 , a4_1 , a4_2 , a4_3 ) ; <nl> + break ; <nl> + } <nl> + <nl> + a8lo + = accu4to8_2 ( a4_0 , a4_1 ) ; <nl> + a8hi + = accu4to8_2 ( a4_2 , a4_3 ) ; <nl> + } <nl> + <nl> + / / move to 16 - bit accu <nl> + simd16uint16 a16lo = accu8to16 ( a8lo ) ; <nl> + simd16uint16 a16hi = accu8to16 ( a8hi ) ; <nl> + <nl> + simd16uint16 a16 = simd16uint16 ( _mm256_hadd_epi16 ( a16lo . i , a16hi . i ) ) ; <nl> + <nl> + __m256i perm32 = _mm256_setr_epi32 ( <nl> + 0 , 2 , 4 , 6 , 1 , 3 , 5 , 7 <nl> + ) ; <nl> + a16 . i = _mm256_permutevar8x32_epi32 ( a16 . i , perm32 ) ; <nl> + <nl> + return a16 ; <nl> + } <nl> + <nl> + struct PreprocNOP { <nl> + simd16uint16 operator ( ) ( simd16uint16 x ) { <nl> + return x ; <nl> + } <nl> + <nl> + } ; <nl> + <nl> + <nl> + template < int shift , int nbin > <nl> + struct PreprocMinShift { <nl> + simd16uint16 min16 ; <nl> + simd16uint16 max16 ; <nl> + <nl> + explicit PreprocMinShift ( uint16_t min ) { <nl> + min16 . set1 ( min ) ; <nl> + int vmax0 = std : : min ( ( nbin < < shift ) + min , 65536 ) ; <nl> + uint16_t vmax = uint16_t ( vmax0 - 1 - min ) ; <nl> + max16 . set1 ( vmax ) ; / / vmax inclusive <nl> + } <nl> + <nl> + simd16uint16 operator ( ) ( simd16uint16 x ) { <nl> + x = x - min16 ; <nl> + simd16uint16 mask = ( x = = max ( x , max16 ) ) - ( x = = max16 ) ; <nl> + return ( x > > shift ) | mask ; <nl> + } <nl> + <nl> + } ; <nl> + <nl> + / * unbounded versions of the functions * / <nl> + <nl> + void simd_histogram_8_unbounded ( <nl> + const uint16_t * data , int n , <nl> + int * hist ) <nl> + { <nl> + PreprocNOP pp ; <nl> + simd16uint16 a16 = histogram_8 ( data , pp , ( n & ~ 15 ) ) ; <nl> + <nl> + ALIGNED ( 32 ) uint16_t a16_tab [ 16 ] ; <nl> + a16 . store ( a16_tab ) ; <nl> + <nl> + for ( int i = 0 ; i < 8 ; i + + ) { <nl> + hist [ i ] = a16_tab [ i ] + a16_tab [ i + 8 ] ; <nl> + } <nl> + <nl> + for ( int i = ( n & ~ 15 ) ; i < n ; i + + ) { <nl> + hist [ data [ i ] ] + + ; <nl> + } <nl> + <nl> + } <nl> + <nl> + <nl> + void simd_histogram_16_unbounded ( <nl> + const uint16_t * data , int n , <nl> + int * hist ) <nl> + { <nl> + <nl> + simd16uint16 a16 = histogram_16 ( data , PreprocNOP ( ) , ( n & ~ 15 ) ) ; <nl> + <nl> + ALIGNED ( 32 ) uint16_t a16_tab [ 16 ] ; <nl> + a16 . store ( a16_tab ) ; <nl> + <nl> + for ( int i = 0 ; i < 16 ; i + + ) { <nl> + hist [ i ] = a16_tab [ i ] ; <nl> + } <nl> + <nl> + for ( int i = ( n & ~ 15 ) ; i < n ; i + + ) { <nl> + hist [ data [ i ] ] + + ; <nl> + } <nl> + <nl> + } <nl> + <nl> + <nl> + <nl> + } / / anonymous namespace <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Driver routines <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + void simd_histogram_8 ( <nl> + const uint16_t * data , int n , <nl> + uint16_t min , int shift , <nl> + int * hist ) <nl> + { <nl> + if ( shift < 0 ) { <nl> + simd_histogram_8_unbounded ( data , n , hist ) ; <nl> + return ; <nl> + } <nl> + <nl> + simd16uint16 a16 ; <nl> + <nl> + # define DISPATCH ( s ) \ <nl> + case s : \ <nl> + a16 = histogram_8 ( data , PreprocMinShift < s , 8 > ( min ) , ( n & ~ 15 ) ) ; \ <nl> + break <nl> + <nl> + switch ( shift ) { <nl> + DISPATCH ( 0 ) ; <nl> + DISPATCH ( 1 ) ; <nl> + DISPATCH ( 2 ) ; <nl> + DISPATCH ( 3 ) ; <nl> + DISPATCH ( 4 ) ; <nl> + DISPATCH ( 5 ) ; <nl> + DISPATCH ( 6 ) ; <nl> + DISPATCH ( 7 ) ; <nl> + DISPATCH ( 8 ) ; <nl> + DISPATCH ( 9 ) ; <nl> + DISPATCH ( 10 ) ; <nl> + DISPATCH ( 11 ) ; <nl> + DISPATCH ( 12 ) ; <nl> + DISPATCH ( 13 ) ; <nl> + default : <nl> + FAISS_THROW_FMT ( " dispatch for shift = % d not instantiated " , shift ) ; <nl> + } <nl> + # undef DISPATCH <nl> + <nl> + ALIGNED ( 32 ) uint16_t a16_tab [ 16 ] ; <nl> + a16 . store ( a16_tab ) ; <nl> + <nl> + for ( int i = 0 ; i < 8 ; i + + ) { <nl> + hist [ i ] = a16_tab [ i ] + a16_tab [ i + 8 ] ; <nl> + } <nl> + <nl> + / / complete with remaining bins <nl> + for ( int i = ( n & ~ 15 ) ; i < n ; i + + ) { <nl> + if ( data [ i ] < min ) continue ; <nl> + uint16_t v = data [ i ] - min ; <nl> + v > > = shift ; <nl> + if ( v < 8 ) hist [ v ] + + ; <nl> + } <nl> + <nl> + } <nl> + <nl> + <nl> + <nl> + void simd_histogram_16 ( <nl> + const uint16_t * data , int n , <nl> + uint16_t min , int shift , <nl> + int * hist ) <nl> + { <nl> + if ( shift < 0 ) { <nl> + simd_histogram_16_unbounded ( data , n , hist ) ; <nl> + return ; <nl> + } <nl> + <nl> + simd16uint16 a16 ; <nl> + <nl> + # define DISPATCH ( s ) \ <nl> + case s : \ <nl> + a16 = histogram_16 ( data , PreprocMinShift < s , 16 > ( min ) , ( n & ~ 15 ) ) ; \ <nl> + break <nl> + <nl> + switch ( shift ) { <nl> + DISPATCH ( 0 ) ; <nl> + DISPATCH ( 1 ) ; <nl> + DISPATCH ( 2 ) ; <nl> + DISPATCH ( 3 ) ; <nl> + DISPATCH ( 4 ) ; <nl> + DISPATCH ( 5 ) ; <nl> + DISPATCH ( 6 ) ; <nl> + DISPATCH ( 7 ) ; <nl> + DISPATCH ( 8 ) ; <nl> + DISPATCH ( 9 ) ; <nl> + DISPATCH ( 10 ) ; <nl> + DISPATCH ( 11 ) ; <nl> + DISPATCH ( 12 ) ; <nl> + default : <nl> + FAISS_THROW_FMT ( " dispatch for shift = % d not instantiated " , shift ) ; <nl> + } <nl> + # undef DISPATCH <nl> + <nl> + ALIGNED ( 32 ) uint16_t a16_tab [ 16 ] ; <nl> + a16 . store ( a16_tab ) ; <nl> + <nl> + for ( int i = 0 ; i < 16 ; i + + ) { <nl> + hist [ i ] = a16_tab [ i ] ; <nl> + } <nl> + <nl> + for ( int i = ( n & ~ 15 ) ; i < n ; i + + ) { <nl> + if ( data [ i ] < min ) continue ; <nl> + uint16_t v = data [ i ] - min ; <nl> + v > > = shift ; <nl> + if ( v < 16 ) hist [ v ] + + ; <nl> + } <nl> + <nl> + } <nl> + <nl> + <nl> + / / no AVX2 <nl> + # else <nl> + <nl> + <nl> + <nl> + void simd_histogram_16 ( <nl> + const uint16_t * data , int n , <nl> + uint16_t min , int shift , <nl> + int * hist ) <nl> + { <nl> + memset ( hist , 0 , sizeof ( * hist ) * 16 ) ; <nl> + if ( shift < 0 ) { <nl> + for ( size_t i = 0 ; i < n ; i + + ) { <nl> + hist [ data [ i ] ] + + ; <nl> + } <nl> + } else { <nl> + int vmax0 = std : : min ( ( 16 < < shift ) + min , 65536 ) ; <nl> + uint16_t vmax = uint16_t ( vmax0 - 1 - min ) ; <nl> + <nl> + for ( size_t i = 0 ; i < n ; i + + ) { <nl> + uint16_t v = data [ i ] ; <nl> + v - = min ; <nl> + if ( ! ( v < = vmax ) ) <nl> + continue ; <nl> + v > > = shift ; <nl> + hist [ v ] + + ; <nl> + <nl> + / * <nl> + if ( data [ i ] < min ) continue ; <nl> + uint16_t v = data [ i ] - min ; <nl> + v > > = shift ; <nl> + if ( v < 16 ) hist [ v ] + + ; <nl> + * / <nl> + } <nl> + } <nl> + <nl> + } <nl> + <nl> + void simd_histogram_8 ( <nl> + const uint16_t * data , int n , <nl> + uint16_t min , int shift , <nl> + int * hist ) <nl> + { <nl> + memset ( hist , 0 , sizeof ( * hist ) * 8 ) ; <nl> + if ( shift < 0 ) { <nl> + for ( size_t i = 0 ; i < n ; i + + ) { <nl> + hist [ data [ i ] ] + + ; <nl> + } <nl> + } else { <nl> + for ( size_t i = 0 ; i < n ; i + + ) { <nl> + if ( data [ i ] < min ) continue ; <nl> + uint16_t v = data [ i ] - min ; <nl> + v > > = shift ; <nl> + if ( v < 8 ) hist [ v ] + + ; <nl> + } <nl> + } <nl> + <nl> + } <nl> + <nl> + <nl> + # endif <nl> + <nl> + <nl> + void PartitionStats : : reset ( ) { <nl> + memset ( this , 0 , sizeof ( * this ) ) ; <nl> + } <nl> + <nl> + PartitionStats partition_stats ; <nl> + <nl> <nl> <nl> } / / namespace faiss <nl> mmm a / faiss / utils / partitioning . h <nl> ppp b / faiss / utils / partitioning . h <nl> <nl> # include < stdint . h > <nl> # include < stdio . h > <nl> <nl> + # include < faiss / impl / platform_macros . h > <nl> + <nl> namespace faiss { <nl> <nl> <nl> typename C : : T partition_fuzzy ( <nl> typename C : : T * vals , typename C : : TI * ids , size_t n , <nl> size_t q_min , size_t q_max , size_t * q_out ) ; <nl> <nl> + / * * simplified interface for when the parition is not fuzzy * / <nl> + template < class C > <nl> + inline typename C : : T partition ( <nl> + typename C : : T * vals , typename C : : TI * ids , size_t n , <nl> + size_t q ) <nl> + { <nl> + return partition_fuzzy < C > ( vals , ids , n , q , q , nullptr ) ; <nl> + } <nl> + <nl> + / * * low level SIMD histogramming functions * / <nl> + <nl> + / * * 8 - bin histogram of ( x - min ) > > shift <nl> + * values outside the range are ignored . <nl> + * the data table should be aligned on 32 bytes * / <nl> + void simd_histogram_8 ( <nl> + const uint16_t * data , int n , <nl> + uint16_t min , int shift , <nl> + int * hist ) ; <nl> + <nl> + / * * same for 16 - bin histogram * / <nl> + void simd_histogram_16 ( <nl> + const uint16_t * data , int n , <nl> + uint16_t min , int shift , <nl> + int * hist ) ; <nl> + <nl> + <nl> + struct PartitionStats { <nl> + uint64_t bissect_cycles ; <nl> + uint64_t compress_cycles ; <nl> + <nl> + PartitionStats ( ) { reset ( ) ; } <nl> + void reset ( ) ; <nl> + } ; <nl> + <nl> + / / global var that collects them all <nl> + FAISS_API extern PartitionStats partition_stats ; <nl> + <nl> <nl> <nl> } / / namespace faiss <nl> new file mode 100644 <nl> index 000000000 . . 63fa97b69 <nl> mmm / dev / null <nl> ppp b / faiss / utils / quantize_lut . cpp <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + <nl> + # include < faiss / utils / quantize_lut . h > <nl> + <nl> + # include < cmath > <nl> + # include < cstring > <nl> + # include < vector > <nl> + # include < algorithm > <nl> + <nl> + # include < faiss / impl / FaissAssert . h > <nl> + <nl> + <nl> + namespace faiss { <nl> + <nl> + <nl> + namespace quantize_lut { <nl> + <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Quantize look - up tables <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + namespace { <nl> + <nl> + float round_uint8_and_mul ( float * tab , size_t n ) { <nl> + float max = 0 ; <nl> + for ( int i = 0 ; i < n ; i + + ) { <nl> + if ( fabs ( tab [ i ] ) > max ) { <nl> + max = fabs ( tab [ i ] ) ; <nl> + } <nl> + } <nl> + float multiplier = 127 / max ; <nl> + for ( int i = 0 ; i < n ; i + + ) { <nl> + tab [ i ] = floorf ( tab [ i ] * multiplier + 128 ) ; <nl> + } <nl> + return multiplier ; <nl> + } <nl> + <nl> + <nl> + float tab_min ( const float * tab , size_t n ) { <nl> + float min = HUGE_VAL ; <nl> + for ( int i = 0 ; i < n ; i + + ) { <nl> + if ( tab [ i ] < min ) min = tab [ i ] ; <nl> + } <nl> + return min ; <nl> + } <nl> + <nl> + float tab_max ( const float * tab , size_t n ) { <nl> + float max = - HUGE_VAL ; <nl> + for ( int i = 0 ; i < n ; i + + ) { <nl> + if ( tab [ i ] > max ) max = tab [ i ] ; <nl> + } <nl> + return max ; <nl> + } <nl> + <nl> + void round_tab ( float * tab , size_t n , float a , float bi ) { <nl> + for ( int i = 0 ; i < n ; i + + ) { <nl> + tab [ i ] = floorf ( ( tab [ i ] - bi ) * a + 0 . 5 ) ; <nl> + } <nl> + } <nl> + <nl> + template < typename T > <nl> + void round_tab ( const float * tab , size_t n , float a , float bi , T * tab_out ) { <nl> + for ( int i = 0 ; i < n ; i + + ) { <nl> + tab_out [ i ] = ( T ) floorf ( ( tab [ i ] - bi ) * a + 0 . 5 ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + <nl> + } / / anonymous namespace <nl> + <nl> + void round_uint8_per_column ( <nl> + float * tab , size_t n , size_t d , <nl> + float * a_out , float * b_out ) <nl> + { <nl> + float max_span = 0 ; <nl> + std : : vector < float > mins ( n ) ; <nl> + for ( int i = 0 ; i < n ; i + + ) { <nl> + mins [ i ] = tab_min ( tab + i * d , d ) ; <nl> + float span = tab_max ( tab + i * d , d ) - mins [ i ] ; <nl> + if ( span > max_span ) { <nl> + max_span = span ; <nl> + } <nl> + } <nl> + float a = 255 / max_span ; <nl> + float b = 0 ; <nl> + for ( int i = 0 ; i < n ; i + + ) { <nl> + b + = mins [ i ] ; <nl> + round_tab ( tab + i * d , d , a , mins [ i ] ) ; <nl> + } <nl> + if ( a_out ) * a_out = a ; <nl> + if ( b_out ) * b_out = b ; <nl> + } <nl> + <nl> + void round_uint8_per_column_multi ( <nl> + float * tab , size_t m , size_t n , size_t d , <nl> + float * a_out , float * b_out ) <nl> + { <nl> + float max_span = 0 ; <nl> + std : : vector < float > mins ( n ) ; <nl> + for ( int i = 0 ; i < n ; i + + ) { <nl> + float min_i = HUGE_VAL ; <nl> + float max_i = - HUGE_VAL ; <nl> + for ( int j = 0 ; j < m ; j + + ) { <nl> + min_i = std : : min ( min_i , tab_min ( tab + ( j * n + i ) * d , d ) ) ; <nl> + max_i = std : : max ( max_i , tab_max ( tab + ( j * n + i ) * d , d ) ) ; <nl> + } <nl> + mins [ i ] = min_i ; <nl> + float span = max_i - min_i ; <nl> + if ( span > max_span ) { <nl> + max_span = span ; <nl> + } <nl> + } <nl> + float a = 255 / max_span ; <nl> + float b = 0 ; <nl> + for ( int i = 0 ; i < n ; i + + ) { <nl> + b + = mins [ i ] ; <nl> + for ( int j = 0 ; j < m ; j + + ) { <nl> + round_tab ( tab + ( j * n + i ) * d , d , a , mins [ i ] ) ; <nl> + } <nl> + } <nl> + if ( a_out ) * a_out = a ; <nl> + if ( b_out ) * b_out = b ; <nl> + } <nl> + <nl> + <nl> + / / translation of <nl> + / / https : / / github . com / fairinternal / faiss_improvements / blob / 7122c3cc6ddb0a371d8aa6f1309cd8bcf2335e61 / LUT_quantization . ipynb <nl> + void quantize_LUT_and_bias ( <nl> + size_t nprobe , size_t M , size_t ksub , <nl> + bool lut_is_3d , <nl> + const float * LUT , <nl> + const float * bias , <nl> + uint8_t * LUTq , size_t M2 , <nl> + uint16_t * biasq , <nl> + float * a_out , float * b_out ) <nl> + { <nl> + float a , b ; <nl> + if ( ! bias ) { <nl> + FAISS_THROW_IF_NOT ( ! lut_is_3d ) ; <nl> + std : : vector < float > mins ( M ) ; <nl> + float max_span_LUT = - HUGE_VAL , max_span_dis = 0 ; <nl> + b = 0 ; <nl> + for ( int i = 0 ; i < M ; i + + ) { <nl> + mins [ i ] = tab_min ( LUT + i * ksub , ksub ) ; <nl> + float span = tab_max ( LUT + i * ksub , ksub ) - mins [ i ] ; <nl> + max_span_LUT = std : : max ( max_span_LUT , span ) ; <nl> + max_span_dis + = span ; <nl> + b + = mins [ i ] ; <nl> + } <nl> + a = std : : min ( 255 / max_span_LUT , 65535 / max_span_dis ) ; <nl> + <nl> + for ( int i = 0 ; i < M ; i + + ) { <nl> + round_tab ( LUT + i * ksub , ksub , a , mins [ i ] , LUTq + i * ksub ) ; <nl> + } <nl> + memset ( LUTq + M * ksub , 0 , ksub * ( M2 - M ) ) ; <nl> + } else if ( ! lut_is_3d ) { <nl> + std : : vector < float > mins ( M ) ; <nl> + float max_span_LUT = - HUGE_VAL , max_span_dis ; <nl> + float bias_min = tab_min ( bias , nprobe ) ; <nl> + float bias_max = tab_max ( bias , nprobe ) ; <nl> + max_span_dis = bias_max - bias_min ; <nl> + b = 0 ; <nl> + for ( int i = 0 ; i < M ; i + + ) { <nl> + mins [ i ] = tab_min ( LUT + i * ksub , ksub ) ; <nl> + float span = tab_max ( LUT + i * ksub , ksub ) - mins [ i ] ; <nl> + max_span_LUT = std : : max ( max_span_LUT , span ) ; <nl> + max_span_dis + = span ; <nl> + b + = mins [ i ] ; <nl> + } <nl> + a = std : : min ( 255 / max_span_LUT , 65535 / max_span_dis ) ; <nl> + b + = bias_min ; <nl> + <nl> + for ( int i = 0 ; i < M ; i + + ) { <nl> + round_tab ( LUT + i * ksub , ksub , a , mins [ i ] , LUTq + i * ksub ) ; <nl> + } <nl> + memset ( LUTq + M * ksub , 0 , ksub * ( M2 - M ) ) ; <nl> + round_tab ( bias , nprobe , a , bias_min , biasq ) ; <nl> + <nl> + } else if ( biasq ) { <nl> + std : : vector < float > mins ( nprobe * M ) ; <nl> + std : : vector < float > bias2 ( nprobe ) ; <nl> + float bias_min = tab_min ( bias , nprobe ) ; <nl> + float max_span_LUT = - HUGE_VAL , max_span_dis = - HUGE_VAL ; <nl> + <nl> + b = HUGE_VAL ; <nl> + size_t ij = 0 ; <nl> + for ( int j = 0 ; j < nprobe ; j + + ) { <nl> + float max_span_dis_j = bias [ j ] - bias_min ; <nl> + float b2j = bias [ j ] ; <nl> + for ( int i = 0 ; i < M ; i + + ) { <nl> + mins [ ij ] = tab_min ( LUT + ij * ksub , ksub ) ; <nl> + float span = tab_max ( LUT + ij * ksub , ksub ) - mins [ ij ] ; <nl> + max_span_LUT = std : : max ( max_span_LUT , span ) ; <nl> + max_span_dis_j + = span ; <nl> + b2j + = mins [ ij ] ; <nl> + ij + + ; <nl> + } <nl> + max_span_dis = std : : max ( max_span_dis , max_span_dis_j ) ; <nl> + bias2 [ j ] = b2j ; <nl> + b = std : : min ( b , b2j ) ; <nl> + } <nl> + <nl> + a = std : : min ( 255 / max_span_LUT , 65535 / max_span_dis ) ; <nl> + <nl> + ij = 0 ; <nl> + size_t ij_2 = 0 ; <nl> + for ( int j = 0 ; j < nprobe ; j + + ) { <nl> + for ( int i = 0 ; i < M ; i + + ) { <nl> + round_tab ( LUT + ij * ksub , ksub , a , mins [ ij ] , LUTq + ij_2 * ksub ) ; <nl> + ij + + ; ij_2 + + ; <nl> + } <nl> + memset ( LUTq + ij_2 * ksub , 0 , ksub * ( M2 - M ) ) ; <nl> + ij_2 + = M2 - M ; <nl> + } <nl> + <nl> + round_tab ( bias2 . data ( ) , nprobe , a , b , biasq ) ; <nl> + <nl> + } else { / / ! biasq <nl> + / / then we integrate the bias into the LUTs <nl> + std : : vector < float > LUT2_storage ( nprobe * M * ksub ) ; <nl> + float * LUT2 = LUT2_storage . data ( ) ; <nl> + size_t ijc = 0 ; <nl> + for ( int j = 0 ; j < nprobe ; j + + ) { <nl> + float bias_j = bias [ j ] / M ; <nl> + for ( int i = 0 ; i < M ; i + + ) { <nl> + for ( int c = 0 ; c < ksub ; c + + ) { <nl> + LUT2 [ ijc ] = LUT [ ijc ] + bias_j ; <nl> + ijc + + ; <nl> + } <nl> + } <nl> + } <nl> + std : : vector < float > mins ( M , HUGE_VAL ) , maxs ( M , - HUGE_VAL ) ; <nl> + size_t ij = 0 ; <nl> + for ( int j = 0 ; j < nprobe ; j + + ) { <nl> + for ( int i = 0 ; i < M ; i + + ) { <nl> + mins [ i ] = std : : min ( mins [ i ] , tab_min ( LUT2 + ij * ksub , ksub ) ) ; <nl> + maxs [ i ] = std : : max ( maxs [ i ] , tab_max ( LUT2 + ij * ksub , ksub ) ) ; <nl> + ij + + ; <nl> + } <nl> + } <nl> + <nl> + float max_span = - HUGE_VAL ; <nl> + b = 0 ; <nl> + for ( int i = 0 ; i < M ; i + + ) { <nl> + float span = maxs [ i ] - mins [ i ] ; <nl> + max_span = std : : max ( max_span , span ) ; <nl> + b + = mins [ i ] ; <nl> + } <nl> + a = 255 / max_span ; <nl> + ij = 0 ; <nl> + size_t ij_2 = 0 ; <nl> + for ( int j = 0 ; j < nprobe ; j + + ) { <nl> + for ( int i = 0 ; i < M ; i + + ) { <nl> + round_tab ( LUT2 + ij * ksub , ksub , a , mins [ i ] , LUTq + ij_2 * ksub ) ; <nl> + ij + + ; ij_2 + + ; <nl> + } <nl> + memset ( LUTq + ij_2 * ksub , 0 , ksub * ( M2 - M ) ) ; <nl> + ij_2 + = M2 - M ; <nl> + } <nl> + } <nl> + if ( a_out ) * a_out = a ; <nl> + if ( b_out ) * b_out = b ; <nl> + } <nl> + <nl> + <nl> + } / / namespace quantize_lut <nl> + <nl> + } / / namespace faiss <nl> new file mode 100644 <nl> index 000000000 . . afdbd1f8d <nl> mmm / dev / null <nl> ppp b / faiss / utils / quantize_lut . h <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + <nl> + # pragma once <nl> + <nl> + <nl> + # include < cstdio > <nl> + # include < cstdint > <nl> + <nl> + namespace faiss { <nl> + <nl> + / * * Functions to quantize PQ floating - point Look Up Tables ( LUT ) to uint8 , and <nl> + * biases to uint16 . The accumulation is supposed to take place in uint16 . <nl> + * The quantization coefficients are float ( a , b ) such that <nl> + * <nl> + * original_value = quantized_value * a / b <nl> + * <nl> + * The hardest part of the quantization is with multiple LUTs that need to be <nl> + * added up together . In that case , coefficient a has to be chosen so that <nl> + * the sum fits in a uint16 accumulator . <nl> + * / <nl> + <nl> + namespace quantize_lut { <nl> + <nl> + / * affine quantizer , a and b are the affine coefficients , marginalize over d <nl> + * <nl> + * @ param tab input / output , size ( n , d ) <nl> + * / <nl> + void round_uint8_per_column ( <nl> + float * tab , size_t n , size_t d , <nl> + float * a_out = nullptr , <nl> + float * b_out = nullptr <nl> + ) ; <nl> + <nl> + <nl> + / * affine quantizer , a and b are the affine coefficients <nl> + * <nl> + * @ param tab input / output , size ( m , n , d ) <nl> + * / <nl> + void round_uint8_per_column_multi ( <nl> + float * tab , size_t m , size_t n , size_t d , <nl> + float * a_out = nullptr , float * b_out = nullptr ) ; <nl> + <nl> + / * * LUT quantization to uint8 and bias to uint16 . <nl> + * <nl> + * ( nprobe , M , ksub , lut_is_3d ) determine the size of the the LUT <nl> + * <nl> + * LUT input : <nl> + * - 2D size ( M , ksub ) : single matrix per probe ( lut_is_3d = false ) <nl> + * - 3D size ( nprobe , M , ksub ) : separate LUT per probe ( lut_is_3d = true ) <nl> + * bias input : <nl> + * - nullptr : bias is 0 <nl> + * - size ( nprobe ) : one bias per probe <nl> + * Output : <nl> + * - LUTq uint8 version of the LUT ( M size is rounded up to M2 ) <nl> + * - biasq ( or nullptr ) : uint16 version of the LUT <nl> + * - a , b : scalars to approximate the true distance <nl> + * / <nl> + <nl> + void quantize_LUT_and_bias ( <nl> + size_t nprobe , size_t M , size_t ksub , <nl> + bool lut_is_3d , <nl> + const float * LUT , <nl> + const float * bias , <nl> + uint8_t * LUTq , size_t M2 , <nl> + uint16_t * biasq , <nl> + float * a_out = nullptr , float * b_out = nullptr <nl> + ) ; <nl> + <nl> + <nl> + } / / namespace quantize_lut <nl> + <nl> + } / / namespace faiss <nl> + <nl> + <nl> mmm a / faiss / utils / simdlib . h <nl> ppp b / faiss / utils / simdlib . h <nl> <nl> - # pragma once <nl> - <nl> - # include < string > <nl> - # include < cstdint > <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> <nl> - namespace faiss { <nl> + # pragma once <nl> <nl> - # ifdef __AVX2__ <nl> <nl> - # include < immintrin . h > <nl> <nl> - / * * Simple wrapper around the AVX 256 - bit registers <nl> + / * * Abstractions for 256 - bit registers <nl> * <nl> * The objective is to separate the different interpretations of the same <nl> * registers ( as a vector of uint8 , uint16 or uint32 ) , to provide printing <nl> - * functions , and to give more readable names to the AVX intrinsics . It does not <nl> - * pretend to be exhausitve , functions are added as needed . <nl> + * functions . <nl> * / <nl> <nl> - / / / 256 - bit representation without interpretation as a vector <nl> - struct simd256bit { <nl> - <nl> - union { <nl> - __m256i i ; <nl> - __m256 f ; <nl> - } ; <nl> - <nl> - simd256bit ( ) { } <nl> - <nl> - simd256bit ( __m256i i ) : i ( i ) { } <nl> - simd256bit ( __m256 f ) : f ( f ) { } <nl> - <nl> - simd256bit ( const void * x ) : <nl> - i ( _mm256_load_si256 ( ( __m256i const * ) x ) ) <nl> - { } <nl> - <nl> - void clear ( ) { <nl> - i = _mm256_setzero_si256 ( ) ; <nl> - } <nl> - <nl> - void storeu ( void * ptr ) const { <nl> - _mm256_storeu_si256 ( ( __m256i * ) ptr , i ) ; <nl> - } <nl> - <nl> - void loadu ( const void * ptr ) { <nl> - i = _mm256_loadu_si256 ( ( __m256i * ) ptr ) ; <nl> - } <nl> - <nl> - void store ( void * ptr ) const { <nl> - _mm256_store_si256 ( ( __m256i * ) ptr , i ) ; <nl> - } <nl> - <nl> - void bin ( char bits [ 257 ] ) const { <nl> - char bytes [ 32 ] ; <nl> - storeu ( ( void * ) bytes ) ; <nl> - for ( int i = 0 ; i < 256 ; i + + ) { <nl> - bits [ i ] = ' 0 ' + ( ( bytes [ i / 8 ] > > ( i % 8 ) ) & 1 ) ; <nl> - } <nl> - bits [ 256 ] = 0 ; <nl> - } <nl> - <nl> - std : : string bin ( ) const { <nl> - char bits [ 257 ] ; <nl> - bin ( bits ) ; <nl> - return std : : string ( bits ) ; <nl> - } <nl> - <nl> - } ; <nl> - <nl> - <nl> - / / / vector of 16 elements in uint16 <nl> - struct simd16uint16 : simd256bit { <nl> - simd16uint16 ( ) { } <nl> - <nl> - simd16uint16 ( int x ) : simd256bit ( _mm256_set1_epi16 ( x ) ) { } <nl> - <nl> - simd16uint16 ( uint16_t x ) : simd256bit ( _mm256_set1_epi16 ( x ) ) { } <nl> - <nl> - simd16uint16 ( simd256bit x ) : simd256bit ( x ) { } <nl> - <nl> - simd16uint16 ( const uint16_t * x ) : simd256bit ( ( const void * ) x ) { } <nl> - <nl> - std : : string elements_to_string ( const char * fmt ) const { <nl> - uint16_t bytes [ 16 ] ; <nl> - storeu ( ( void * ) bytes ) ; <nl> - char res [ 1000 ] , * ptr = res ; <nl> - for ( int i = 0 ; i < 16 ; i + + ) { <nl> - ptr + = sprintf ( ptr , fmt , bytes [ i ] ) ; <nl> - } <nl> - / / strip last , <nl> - ptr [ - 1 ] = 0 ; <nl> - return std : : string ( res ) ; <nl> - } <nl> - <nl> - std : : string hex ( ) const { <nl> - return elements_to_string ( " % 02x , " ) ; <nl> - } <nl> - <nl> - std : : string dec ( ) const { <nl> - return elements_to_string ( " % 3d , " ) ; <nl> - } <nl> - <nl> - void set1 ( uint16_t x ) { <nl> - i = _mm256_set1_epi16 ( ( short ) x ) ; <nl> - } <nl> - <nl> - / / shift must be known at compile time <nl> - simd16uint16 operator > > ( const int shift ) const { <nl> - return simd16uint16 ( _mm256_srli_epi16 ( i , shift ) ) ; <nl> - } <nl> - <nl> - / / shift must be known at compile time <nl> - simd16uint16 operator < < ( const int shift ) const { <nl> - return simd16uint16 ( _mm256_slli_epi16 ( i , shift ) ) ; <nl> - } <nl> - <nl> - simd16uint16 operator + = ( simd16uint16 other ) { <nl> - i = _mm256_add_epi16 ( i , other . i ) ; <nl> - return * this ; <nl> - } <nl> - <nl> - simd16uint16 operator - = ( simd16uint16 other ) { <nl> - i = _mm256_sub_epi16 ( i , other . i ) ; <nl> - return * this ; <nl> - } <nl> - <nl> - simd16uint16 operator + ( simd16uint16 other ) const { <nl> - return simd16uint16 ( _mm256_add_epi16 ( i , other . i ) ) ; <nl> - } <nl> - <nl> - simd16uint16 operator - ( simd16uint16 other ) const { <nl> - return simd16uint16 ( _mm256_sub_epi16 ( i , other . i ) ) ; <nl> - } <nl> - <nl> - simd16uint16 operator & ( simd256bit other ) const { <nl> - return simd16uint16 ( _mm256_and_si256 ( i , other . i ) ) ; <nl> - } <nl> - <nl> - simd16uint16 operator | ( simd256bit other ) const { <nl> - return simd16uint16 ( _mm256_or_si256 ( i , other . i ) ) ; <nl> - } <nl> - <nl> - simd16uint16 operator = = ( simd256bit other ) const { <nl> - return simd16uint16 ( _mm256_cmpeq_epi16 ( i , other . i ) ) ; <nl> - } <nl> - <nl> - / / get scalar at index 0 <nl> - uint16_t get_scalar_0 ( ) const { <nl> - return _mm256_extract_epi16 ( i , 0 ) ; <nl> - } <nl> - <nl> - / / mask of elements where this > = thresh <nl> - / / 2 bit per component : 16 * 2 = 32 bit <nl> - uint32_t ge_mask ( simd16uint16 thresh ) const { <nl> - __m256i j = thresh . i ; <nl> - __m256i max = _mm256_max_epu16 ( i , j ) ; <nl> - __m256i ge = _mm256_cmpeq_epi16 ( i , max ) ; <nl> - return _mm256_movemask_epi8 ( ge ) ; <nl> - } <nl> - <nl> - uint32_t le_mask ( simd16uint16 thresh ) const { <nl> - return thresh . ge_mask ( * this ) ; <nl> - } <nl> - <nl> - uint32_t gt_mask ( simd16uint16 thresh ) const { <nl> - return ~ le_mask ( thresh ) ; <nl> - } <nl> - <nl> - bool all_gt ( simd16uint16 thresh ) const { <nl> - return le_mask ( thresh ) = = 0 ; <nl> - } <nl> - <nl> - / / for debugging only <nl> - uint16_t operator [ ] ( int i ) const { <nl> - uint16_t tab [ 16 ] __attribute__ ( ( aligned ( 32 ) ) ) ; <nl> - store ( tab ) ; <nl> - return tab [ i ] ; <nl> - } <nl> - <nl> - void accu_min ( simd16uint16 incoming ) { <nl> - i = _mm256_min_epu16 ( i , incoming . i ) ; <nl> - } <nl> - <nl> - void accu_max ( simd16uint16 incoming ) { <nl> - i = _mm256_max_epu16 ( i , incoming . i ) ; <nl> - } <nl> - <nl> - <nl> - } ; <nl> - <nl> - / / decompose in 128 - lanes : a = ( a0 , a1 ) , b = ( b0 , b1 ) <nl> - / / return ( a0 + a1 , b0 + b1 ) <nl> - / / TODO find a better name <nl> - inline simd16uint16 combine2x2 ( simd16uint16 a , simd16uint16 b ) { <nl> - <nl> - __m256i a1b0 = _mm256_permute2f128_si256 ( a . i , b . i , 0x21 ) ; <nl> - __m256i a0b1 = _mm256_blend_epi32 ( a . i , b . i , 0xF0 ) ; <nl> - <nl> - return simd16uint16 ( a1b0 ) + simd16uint16 ( a0b1 ) ; <nl> - } <nl> - <nl> - / / compare d0 and d1 to thr , return 32 bits corresponding to the concatenation <nl> - / / of d0 and d1 with thr <nl> - inline uint32_t cmp_ge32 ( simd16uint16 d0 , simd16uint16 d1 , simd16uint16 thr ) { <nl> - <nl> - __m256i max0 = _mm256_max_epu16 ( d0 . i , thr . i ) ; <nl> - __m256i ge0 = _mm256_cmpeq_epi16 ( d0 . i , max0 ) ; <nl> - <nl> - __m256i max1 = _mm256_max_epu16 ( d1 . i , thr . i ) ; <nl> - __m256i ge1 = _mm256_cmpeq_epi16 ( d1 . i , max1 ) ; <nl> - <nl> - __m256i ge01 = _mm256_packs_epi16 ( ge0 , ge1 ) ; <nl> - <nl> - / / easier than manipulating bit fields afterwards <nl> - ge01 = _mm256_permute4x64_epi64 ( ge01 , 0 | ( 2 < < 2 ) | ( 1 < < 4 ) | ( 3 < < 6 ) ) ; <nl> - uint32_t ge = _mm256_movemask_epi8 ( ge01 ) ; <nl> - <nl> - return ge ; <nl> - } <nl> - <nl> - <nl> - inline uint32_t cmp_le32 ( simd16uint16 d0 , simd16uint16 d1 , simd16uint16 thr ) { <nl> - <nl> - __m256i max0 = _mm256_min_epu16 ( d0 . i , thr . i ) ; <nl> - __m256i ge0 = _mm256_cmpeq_epi16 ( d0 . i , max0 ) ; <nl> - <nl> - __m256i max1 = _mm256_min_epu16 ( d1 . i , thr . i ) ; <nl> - __m256i ge1 = _mm256_cmpeq_epi16 ( d1 . i , max1 ) ; <nl> - <nl> - __m256i ge01 = _mm256_packs_epi16 ( ge0 , ge1 ) ; <nl> - <nl> - / / easier than manipulating bit fields afterwards <nl> - ge01 = _mm256_permute4x64_epi64 ( ge01 , 0 | ( 2 < < 2 ) | ( 1 < < 4 ) | ( 3 < < 6 ) ) ; <nl> - uint32_t ge = _mm256_movemask_epi8 ( ge01 ) ; <nl> - <nl> - return ge ; <nl> - } <nl> - <nl> - <nl> - <nl> - / / vector of 32 unsigned 8 - bit integers <nl> - struct simd32uint8 : simd256bit { <nl> - <nl> - simd32uint8 ( ) { } <nl> - <nl> - simd32uint8 ( int x ) : simd256bit ( _mm256_set1_epi8 ( x ) ) { } <nl> - <nl> - simd32uint8 ( uint8_t x ) : simd256bit ( _mm256_set1_epi8 ( x ) ) { } <nl> - <nl> - simd32uint8 ( simd256bit x ) : simd256bit ( x ) { } <nl> - <nl> - simd32uint8 ( const uint8_t * x ) : simd256bit ( ( const void * ) x ) { } <nl> - <nl> - std : : string elements_to_string ( const char * fmt ) const { <nl> - uint8_t bytes [ 32 ] ; <nl> - storeu ( ( void * ) bytes ) ; <nl> - char res [ 1000 ] , * ptr = res ; <nl> - for ( int i = 0 ; i < 32 ; i + + ) { <nl> - ptr + = sprintf ( ptr , fmt , bytes [ i ] ) ; <nl> - } <nl> - / / strip last , <nl> - ptr [ - 1 ] = 0 ; <nl> - return std : : string ( res ) ; <nl> - } <nl> - <nl> - std : : string hex ( ) const { <nl> - return elements_to_string ( " % 02x , " ) ; <nl> - } <nl> - <nl> - std : : string dec ( ) const { <nl> - return elements_to_string ( " % 3d , " ) ; <nl> - } <nl> - <nl> - void set1 ( uint8_t x ) { <nl> - i = _mm256_set1_epi8 ( ( char ) x ) ; <nl> - } <nl> - <nl> - simd32uint8 operator & ( simd256bit other ) const { <nl> - return simd32uint8 ( _mm256_and_si256 ( i , other . i ) ) ; <nl> - } <nl> - <nl> - simd32uint8 operator + ( simd32uint8 other ) const { <nl> - return simd32uint8 ( _mm256_add_epi8 ( i , other . i ) ) ; <nl> - } <nl> - <nl> - simd32uint8 lookup_2_lanes ( simd32uint8 idx ) const { <nl> - return simd32uint8 ( _mm256_shuffle_epi8 ( i , idx . i ) ) ; <nl> - } <nl> - <nl> - / / extract + 0 - extend lane <nl> - / / this operation is slow ( 3 cycles ) <nl> - simd16uint16 lane0_as_uint16 ( ) const { <nl> - __m128i x = _mm256_extracti128_si256 ( i , 0 ) ; <nl> - return simd16uint16 ( _mm256_cvtepu8_epi16 ( x ) ) ; <nl> - } <nl> - <nl> - simd16uint16 lane1_as_uint16 ( ) const { <nl> - __m128i x = _mm256_extracti128_si256 ( i , 1 ) ; <nl> - return simd16uint16 ( _mm256_cvtepu8_epi16 ( x ) ) ; <nl> - } <nl> - <nl> - simd32uint8 operator + = ( simd32uint8 other ) { <nl> - i = _mm256_add_epi8 ( i , other . i ) ; <nl> - return * this ; <nl> - } <nl> - <nl> - / / for debugging only <nl> - uint8_t operator [ ] ( int i ) const { <nl> - uint8_t tab [ 32 ] __attribute__ ( ( aligned ( 32 ) ) ) ; <nl> - store ( tab ) ; <nl> - return tab [ i ] ; <nl> - } <nl> - <nl> - } ; <nl> - <nl> - / / / vector of 8 unsigned 32 - bit integers <nl> - struct simd8uint32 : simd256bit { <nl> - simd8uint32 ( ) { } <nl> - <nl> - <nl> - simd8uint32 ( uint32_t x ) : simd256bit ( _mm256_set1_epi32 ( x ) ) { } <nl> - <nl> - simd8uint32 ( simd256bit x ) : simd256bit ( x ) { } <nl> - <nl> - simd8uint32 ( const uint8_t * x ) : simd256bit ( ( const void * ) x ) { } <nl> - <nl> - std : : string elements_to_string ( const char * fmt ) const { <nl> - uint32_t bytes [ 8 ] ; <nl> - storeu ( ( void * ) bytes ) ; <nl> - char res [ 1000 ] , * ptr = res ; <nl> - for ( int i = 0 ; i < 8 ; i + + ) { <nl> - ptr + = sprintf ( ptr , fmt , bytes [ i ] ) ; <nl> - } <nl> - / / strip last , <nl> - ptr [ - 1 ] = 0 ; <nl> - return std : : string ( res ) ; <nl> - } <nl> - <nl> - std : : string hex ( ) const { <nl> - return elements_to_string ( " % 08x , " ) ; <nl> - } <nl> - <nl> - std : : string dec ( ) const { <nl> - return elements_to_string ( " % 10d , " ) ; <nl> - } <nl> - <nl> - void set1 ( uint32_t x ) { <nl> - i = _mm256_set1_epi32 ( ( int ) x ) ; <nl> - } <nl> - <nl> - } ; <nl> - <nl> - struct simd8float32 : simd256bit { <nl> - <nl> - simd8float32 ( ) { } <nl> - <nl> - simd8float32 ( simd256bit x ) : simd256bit ( x ) { } <nl> - <nl> - simd8float32 ( float x ) : simd256bit ( _mm256_set1_ps ( x ) ) { } <nl> - <nl> - simd8float32 ( const float * x ) : simd256bit ( _mm256_load_ps ( x ) ) { } <nl> - <nl> - simd8float32 operator * ( simd8float32 other ) const { <nl> - return simd8float32 ( _mm256_mul_ps ( f , other . f ) ) ; <nl> - } <nl> - <nl> - simd8float32 operator + ( simd8float32 other ) const { <nl> - return simd8float32 ( _mm256_add_ps ( f , other . f ) ) ; <nl> - } <nl> - <nl> - simd8float32 operator - ( simd8float32 other ) const { <nl> - return simd8float32 ( _mm256_sub_ps ( f , other . f ) ) ; <nl> - } <nl> - <nl> - std : : string tostring ( ) const { <nl> - float tab [ 8 ] ; <nl> - storeu ( ( void * ) tab ) ; <nl> - char res [ 1000 ] , * ptr = res ; <nl> - for ( int i = 0 ; i < 8 ; i + + ) { <nl> - ptr + = sprintf ( ptr , " % g , " , tab [ i ] ) ; <nl> - } <nl> - / / strip last , <nl> - ptr [ - 1 ] = 0 ; <nl> - return std : : string ( res ) ; <nl> - } <nl> - <nl> - } ; <nl> - <nl> - inline simd8float32 hadd ( simd8float32 a , simd8float32 b ) { <nl> - return simd8float32 ( _mm256_hadd_ps ( a . f , b . f ) ) ; <nl> - } <nl> - <nl> - inline simd8float32 unpacklo ( simd8float32 a , simd8float32 b ) { <nl> - return simd8float32 ( _mm256_unpacklo_ps ( a . f , b . f ) ) ; <nl> - } <nl> - <nl> - inline simd8float32 unpackhi ( simd8float32 a , simd8float32 b ) { <nl> - return simd8float32 ( _mm256_unpackhi_ps ( a . f , b . f ) ) ; <nl> - } <nl> - <nl> + # ifdef __AVX2__ <nl> <nl> - / / compute a * b + c <nl> - inline simd8float32 fmadd ( simd8float32 a , simd8float32 b , simd8float32 c ) { <nl> - return simd8float32 ( _mm256_fmadd_ps ( a . f , b . f , c . f ) ) ; <nl> - } <nl> + # include < faiss / utils / simdlib_avx2 . h > <nl> <nl> # else <nl> <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> - * Eventullay it would be good to have a scalar emulation of SIMD <nl> - * registers . Leave empty for now . <nl> - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + / / emulated = all operations are implemented as scalars <nl> + # include < faiss / utils / simdlib_emulated . h > <nl> <nl> + / / FIXME : make a SSE version <nl> + / / is this ever going to happen ? We will probably rather implement AVX512 <nl> <nl> # endif <nl> - <nl> - } / / namespace faiss <nl> new file mode 100644 <nl> index 000000000 . . d0f8df215 <nl> mmm / dev / null <nl> ppp b / faiss / utils / simdlib_avx2 . h <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + # pragma once <nl> + <nl> + # include < string > <nl> + # include < cstdint > <nl> + <nl> + # include < immintrin . h > <nl> + <nl> + # include < faiss / impl / platform_macros . h > <nl> + <nl> + namespace faiss { <nl> + <nl> + <nl> + / * * Simple wrapper around the AVX 256 - bit registers <nl> + * <nl> + * The objective is to separate the different interpretations of the same <nl> + * registers ( as a vector of uint8 , uint16 or uint32 ) , to provide printing <nl> + * functions , and to give more readable names to the AVX intrinsics . It does not <nl> + * pretend to be exhausitve , functions are added as needed . <nl> + * / <nl> + <nl> + / / / 256 - bit representation without interpretation as a vector <nl> + struct simd256bit { <nl> + <nl> + union { <nl> + __m256i i ; <nl> + __m256 f ; <nl> + } ; <nl> + <nl> + simd256bit ( ) { } <nl> + <nl> + explicit simd256bit ( __m256i i ) : i ( i ) { } <nl> + <nl> + explicit simd256bit ( __m256 f ) : f ( f ) { } <nl> + <nl> + explicit simd256bit ( const void * x ) : <nl> + i ( _mm256_load_si256 ( ( __m256i const * ) x ) ) <nl> + { } <nl> + <nl> + void clear ( ) { <nl> + i = _mm256_setzero_si256 ( ) ; <nl> + } <nl> + <nl> + void storeu ( void * ptr ) const { <nl> + _mm256_storeu_si256 ( ( __m256i * ) ptr , i ) ; <nl> + } <nl> + <nl> + void loadu ( const void * ptr ) { <nl> + i = _mm256_loadu_si256 ( ( __m256i * ) ptr ) ; <nl> + } <nl> + <nl> + void store ( void * ptr ) const { <nl> + _mm256_store_si256 ( ( __m256i * ) ptr , i ) ; <nl> + } <nl> + <nl> + void bin ( char bits [ 257 ] ) const { <nl> + char bytes [ 32 ] ; <nl> + storeu ( ( void * ) bytes ) ; <nl> + for ( int i = 0 ; i < 256 ; i + + ) { <nl> + bits [ i ] = ' 0 ' + ( ( bytes [ i / 8 ] > > ( i % 8 ) ) & 1 ) ; <nl> + } <nl> + bits [ 256 ] = 0 ; <nl> + } <nl> + <nl> + std : : string bin ( ) const { <nl> + char bits [ 257 ] ; <nl> + bin ( bits ) ; <nl> + return std : : string ( bits ) ; <nl> + } <nl> + <nl> + } ; <nl> + <nl> + <nl> + / / / vector of 16 elements in uint16 <nl> + struct simd16uint16 : simd256bit { <nl> + simd16uint16 ( ) { } <nl> + <nl> + explicit simd16uint16 ( __m256i i ) : simd256bit ( i ) { } <nl> + <nl> + explicit simd16uint16 ( int x ) : simd256bit ( _mm256_set1_epi16 ( x ) ) { } <nl> + <nl> + explicit simd16uint16 ( uint16_t x ) : simd256bit ( _mm256_set1_epi16 ( x ) ) { } <nl> + <nl> + explicit simd16uint16 ( simd256bit x ) : simd256bit ( x ) { } <nl> + <nl> + explicit simd16uint16 ( const uint16_t * x ) : simd256bit ( ( const void * ) x ) { } <nl> + <nl> + std : : string elements_to_string ( const char * fmt ) const { <nl> + uint16_t bytes [ 16 ] ; <nl> + storeu ( ( void * ) bytes ) ; <nl> + char res [ 1000 ] ; <nl> + char * ptr = res ; <nl> + for ( int i = 0 ; i < 16 ; i + + ) { <nl> + ptr + = sprintf ( ptr , fmt , bytes [ i ] ) ; <nl> + } <nl> + / / strip last , <nl> + ptr [ - 1 ] = 0 ; <nl> + return std : : string ( res ) ; <nl> + } <nl> + <nl> + std : : string hex ( ) const { <nl> + return elements_to_string ( " % 02x , " ) ; <nl> + } <nl> + <nl> + std : : string dec ( ) const { <nl> + return elements_to_string ( " % 3d , " ) ; <nl> + } <nl> + <nl> + void set1 ( uint16_t x ) { <nl> + i = _mm256_set1_epi16 ( ( short ) x ) ; <nl> + } <nl> + <nl> + / / shift must be known at compile time <nl> + simd16uint16 operator > > ( const int shift ) const { <nl> + return simd16uint16 ( _mm256_srli_epi16 ( i , shift ) ) ; <nl> + } <nl> + <nl> + / / shift must be known at compile time <nl> + simd16uint16 operator < < ( const int shift ) const { <nl> + return simd16uint16 ( _mm256_slli_epi16 ( i , shift ) ) ; <nl> + } <nl> + <nl> + simd16uint16 operator + = ( simd16uint16 other ) { <nl> + i = _mm256_add_epi16 ( i , other . i ) ; <nl> + return * this ; <nl> + } <nl> + <nl> + simd16uint16 operator - = ( simd16uint16 other ) { <nl> + i = _mm256_sub_epi16 ( i , other . i ) ; <nl> + return * this ; <nl> + } <nl> + <nl> + simd16uint16 operator + ( simd16uint16 other ) const { <nl> + return simd16uint16 ( _mm256_add_epi16 ( i , other . i ) ) ; <nl> + } <nl> + <nl> + simd16uint16 operator - ( simd16uint16 other ) const { <nl> + return simd16uint16 ( _mm256_sub_epi16 ( i , other . i ) ) ; <nl> + } <nl> + <nl> + simd16uint16 operator & ( simd256bit other ) const { <nl> + return simd16uint16 ( _mm256_and_si256 ( i , other . i ) ) ; <nl> + } <nl> + <nl> + simd16uint16 operator | ( simd256bit other ) const { <nl> + return simd16uint16 ( _mm256_or_si256 ( i , other . i ) ) ; <nl> + } <nl> + <nl> + / / returns binary masks <nl> + simd16uint16 operator = = ( simd256bit other ) const { <nl> + return simd16uint16 ( _mm256_cmpeq_epi16 ( i , other . i ) ) ; <nl> + } <nl> + <nl> + simd16uint16 operator ~ ( ) const { <nl> + return simd16uint16 ( _mm256_xor_si256 ( i , _mm256_set1_epi32 ( - 1 ) ) ) ; <nl> + } <nl> + <nl> + / / get scalar at index 0 <nl> + uint16_t get_scalar_0 ( ) const { <nl> + return _mm256_extract_epi16 ( i , 0 ) ; <nl> + } <nl> + <nl> + / / mask of elements where this > = thresh <nl> + / / 2 bit per component : 16 * 2 = 32 bit <nl> + uint32_t ge_mask ( simd16uint16 thresh ) const { <nl> + __m256i j = thresh . i ; <nl> + __m256i max = _mm256_max_epu16 ( i , j ) ; <nl> + __m256i ge = _mm256_cmpeq_epi16 ( i , max ) ; <nl> + return _mm256_movemask_epi8 ( ge ) ; <nl> + } <nl> + <nl> + uint32_t le_mask ( simd16uint16 thresh ) const { <nl> + return thresh . ge_mask ( * this ) ; <nl> + } <nl> + <nl> + uint32_t gt_mask ( simd16uint16 thresh ) const { <nl> + return ~ le_mask ( thresh ) ; <nl> + } <nl> + <nl> + bool all_gt ( simd16uint16 thresh ) const { <nl> + return le_mask ( thresh ) = = 0 ; <nl> + } <nl> + <nl> + / / for debugging only <nl> + uint16_t operator [ ] ( int i ) const { <nl> + ALIGNED ( 32 ) uint16_t tab [ 16 ] ; <nl> + store ( tab ) ; <nl> + return tab [ i ] ; <nl> + } <nl> + <nl> + void accu_min ( simd16uint16 incoming ) { <nl> + i = _mm256_min_epu16 ( i , incoming . i ) ; <nl> + } <nl> + <nl> + void accu_max ( simd16uint16 incoming ) { <nl> + i = _mm256_max_epu16 ( i , incoming . i ) ; <nl> + } <nl> + <nl> + } ; <nl> + <nl> + / / not really a std : : min because it returns an elementwise min <nl> + inline simd16uint16 min ( simd16uint16 a , simd16uint16 b ) { <nl> + return simd16uint16 ( _mm256_min_epu16 ( a . i , b . i ) ) ; <nl> + } <nl> + <nl> + inline simd16uint16 max ( simd16uint16 a , simd16uint16 b ) { <nl> + return simd16uint16 ( _mm256_max_epu16 ( a . i , b . i ) ) ; <nl> + } <nl> + <nl> + <nl> + <nl> + / / decompose in 128 - lanes : a = ( a0 , a1 ) , b = ( b0 , b1 ) <nl> + / / return ( a0 + a1 , b0 + b1 ) <nl> + / / TODO find a better name <nl> + inline simd16uint16 combine2x2 ( simd16uint16 a , simd16uint16 b ) { <nl> + <nl> + __m256i a1b0 = _mm256_permute2f128_si256 ( a . i , b . i , 0x21 ) ; <nl> + __m256i a0b1 = _mm256_blend_epi32 ( a . i , b . i , 0xF0 ) ; <nl> + <nl> + return simd16uint16 ( a1b0 ) + simd16uint16 ( a0b1 ) ; <nl> + } <nl> + <nl> + / / compare d0 and d1 to thr , return 32 bits corresponding to the concatenation <nl> + / / of d0 and d1 with thr <nl> + inline uint32_t cmp_ge32 ( simd16uint16 d0 , simd16uint16 d1 , simd16uint16 thr ) { <nl> + <nl> + __m256i max0 = _mm256_max_epu16 ( d0 . i , thr . i ) ; <nl> + __m256i ge0 = _mm256_cmpeq_epi16 ( d0 . i , max0 ) ; <nl> + <nl> + __m256i max1 = _mm256_max_epu16 ( d1 . i , thr . i ) ; <nl> + __m256i ge1 = _mm256_cmpeq_epi16 ( d1 . i , max1 ) ; <nl> + <nl> + __m256i ge01 = _mm256_packs_epi16 ( ge0 , ge1 ) ; <nl> + <nl> + / / easier than manipulating bit fields afterwards <nl> + ge01 = _mm256_permute4x64_epi64 ( ge01 , 0 | ( 2 < < 2 ) | ( 1 < < 4 ) | ( 3 < < 6 ) ) ; <nl> + uint32_t ge = _mm256_movemask_epi8 ( ge01 ) ; <nl> + <nl> + return ge ; <nl> + } <nl> + <nl> + <nl> + inline uint32_t cmp_le32 ( simd16uint16 d0 , simd16uint16 d1 , simd16uint16 thr ) { <nl> + <nl> + __m256i max0 = _mm256_min_epu16 ( d0 . i , thr . i ) ; <nl> + __m256i ge0 = _mm256_cmpeq_epi16 ( d0 . i , max0 ) ; <nl> + <nl> + __m256i max1 = _mm256_min_epu16 ( d1 . i , thr . i ) ; <nl> + __m256i ge1 = _mm256_cmpeq_epi16 ( d1 . i , max1 ) ; <nl> + <nl> + __m256i ge01 = _mm256_packs_epi16 ( ge0 , ge1 ) ; <nl> + <nl> + / / easier than manipulating bit fields afterwards <nl> + ge01 = _mm256_permute4x64_epi64 ( ge01 , 0 | ( 2 < < 2 ) | ( 1 < < 4 ) | ( 3 < < 6 ) ) ; <nl> + uint32_t ge = _mm256_movemask_epi8 ( ge01 ) ; <nl> + <nl> + return ge ; <nl> + } <nl> + <nl> + <nl> + / / vector of 32 unsigned 8 - bit integers <nl> + struct simd32uint8 : simd256bit { <nl> + <nl> + <nl> + simd32uint8 ( ) { } <nl> + <nl> + explicit simd32uint8 ( __m256i i ) : simd256bit ( i ) { } <nl> + <nl> + explicit simd32uint8 ( int x ) : simd256bit ( _mm256_set1_epi8 ( x ) ) { } <nl> + <nl> + explicit simd32uint8 ( uint8_t x ) : simd256bit ( _mm256_set1_epi8 ( x ) ) { } <nl> + <nl> + explicit simd32uint8 ( simd256bit x ) : simd256bit ( x ) { } <nl> + <nl> + explicit simd32uint8 ( const uint8_t * x ) : simd256bit ( ( const void * ) x ) { } <nl> + <nl> + std : : string elements_to_string ( const char * fmt ) const { <nl> + uint8_t bytes [ 32 ] ; <nl> + storeu ( ( void * ) bytes ) ; <nl> + char res [ 1000 ] ; <nl> + char * ptr = res ; <nl> + for ( int i = 0 ; i < 32 ; i + + ) { <nl> + ptr + = sprintf ( ptr , fmt , bytes [ i ] ) ; <nl> + } <nl> + / / strip last , <nl> + ptr [ - 1 ] = 0 ; <nl> + return std : : string ( res ) ; <nl> + } <nl> + <nl> + std : : string hex ( ) const { <nl> + return elements_to_string ( " % 02x , " ) ; <nl> + } <nl> + <nl> + std : : string dec ( ) const { <nl> + return elements_to_string ( " % 3d , " ) ; <nl> + } <nl> + <nl> + void set1 ( uint8_t x ) { <nl> + i = _mm256_set1_epi8 ( ( char ) x ) ; <nl> + } <nl> + <nl> + simd32uint8 operator & ( simd256bit other ) const { <nl> + return simd32uint8 ( _mm256_and_si256 ( i , other . i ) ) ; <nl> + } <nl> + <nl> + simd32uint8 operator + ( simd32uint8 other ) const { <nl> + return simd32uint8 ( _mm256_add_epi8 ( i , other . i ) ) ; <nl> + } <nl> + <nl> + simd32uint8 lookup_2_lanes ( simd32uint8 idx ) const { <nl> + return simd32uint8 ( _mm256_shuffle_epi8 ( i , idx . i ) ) ; <nl> + } <nl> + <nl> + / / extract + 0 - extend lane <nl> + / / this operation is slow ( 3 cycles ) <nl> + simd16uint16 lane0_as_uint16 ( ) const { <nl> + __m128i x = _mm256_extracti128_si256 ( i , 0 ) ; <nl> + return simd16uint16 ( _mm256_cvtepu8_epi16 ( x ) ) ; <nl> + } <nl> + <nl> + simd16uint16 lane1_as_uint16 ( ) const { <nl> + __m128i x = _mm256_extracti128_si256 ( i , 1 ) ; <nl> + return simd16uint16 ( _mm256_cvtepu8_epi16 ( x ) ) ; <nl> + } <nl> + <nl> + simd32uint8 operator + = ( simd32uint8 other ) { <nl> + i = _mm256_add_epi8 ( i , other . i ) ; <nl> + return * this ; <nl> + } <nl> + <nl> + / / for debugging only <nl> + uint8_t operator [ ] ( int i ) const { <nl> + ALIGNED ( 32 ) uint8_t tab [ 32 ] ; <nl> + store ( tab ) ; <nl> + return tab [ i ] ; <nl> + } <nl> + <nl> + } ; <nl> + <nl> + / / convert with saturation <nl> + / / careful : this does not cross lanes , so the order is weird <nl> + inline simd32uint8 uint16_to_uint8_saturate ( simd16uint16 a , simd16uint16 b ) { <nl> + return simd32uint8 ( _mm256_packs_epi16 ( a . i , b . i ) ) ; <nl> + } <nl> + <nl> + / / / get most significant bit of each byte <nl> + inline uint32_t get_MSBs ( simd32uint8 a ) { <nl> + return _mm256_movemask_epi8 ( a . i ) ; <nl> + } <nl> + <nl> + / / / use MSB of each byte of mask to select a byte between a and b <nl> + inline simd32uint8 blendv ( simd32uint8 a , simd32uint8 b , simd32uint8 mask ) { <nl> + return simd32uint8 ( _mm256_blendv_epi8 ( a . i , b . i , mask . i ) ) ; <nl> + } <nl> + <nl> + <nl> + <nl> + / / / vector of 8 unsigned 32 - bit integers <nl> + struct simd8uint32 : simd256bit { <nl> + simd8uint32 ( ) { } <nl> + <nl> + explicit simd8uint32 ( __m256i i ) : simd256bit ( i ) { } <nl> + <nl> + explicit simd8uint32 ( uint32_t x ) : simd256bit ( _mm256_set1_epi32 ( x ) ) { } <nl> + <nl> + explicit simd8uint32 ( simd256bit x ) : simd256bit ( x ) { } <nl> + <nl> + explicit simd8uint32 ( const uint8_t * x ) : simd256bit ( ( const void * ) x ) { } <nl> + <nl> + std : : string elements_to_string ( const char * fmt ) const { <nl> + uint32_t bytes [ 8 ] ; <nl> + storeu ( ( void * ) bytes ) ; <nl> + char res [ 1000 ] ; <nl> + char * ptr = res ; <nl> + for ( int i = 0 ; i < 8 ; i + + ) { <nl> + ptr + = sprintf ( ptr , fmt , bytes [ i ] ) ; <nl> + } <nl> + / / strip last , <nl> + ptr [ - 1 ] = 0 ; <nl> + return std : : string ( res ) ; <nl> + } <nl> + <nl> + std : : string hex ( ) const { <nl> + return elements_to_string ( " % 08x , " ) ; <nl> + } <nl> + <nl> + std : : string dec ( ) const { <nl> + return elements_to_string ( " % 10d , " ) ; <nl> + } <nl> + <nl> + void set1 ( uint32_t x ) { <nl> + i = _mm256_set1_epi32 ( ( int ) x ) ; <nl> + } <nl> + <nl> + } ; <nl> + <nl> + struct simd8float32 : simd256bit { <nl> + <nl> + simd8float32 ( ) { } <nl> + <nl> + <nl> + explicit simd8float32 ( simd256bit x ) : simd256bit ( x ) { } <nl> + <nl> + explicit simd8float32 ( __m256 x ) : simd256bit ( x ) { } <nl> + <nl> + explicit simd8float32 ( float x ) : simd256bit ( _mm256_set1_ps ( x ) ) { } <nl> + <nl> + explicit simd8float32 ( const float * x ) : simd256bit ( _mm256_load_ps ( x ) ) { } <nl> + <nl> + simd8float32 operator * ( simd8float32 other ) const { <nl> + return simd8float32 ( _mm256_mul_ps ( f , other . f ) ) ; <nl> + } <nl> + <nl> + simd8float32 operator + ( simd8float32 other ) const { <nl> + return simd8float32 ( _mm256_add_ps ( f , other . f ) ) ; <nl> + } <nl> + <nl> + simd8float32 operator - ( simd8float32 other ) const { <nl> + return simd8float32 ( _mm256_sub_ps ( f , other . f ) ) ; <nl> + } <nl> + <nl> + std : : string tostring ( ) const { <nl> + float tab [ 8 ] ; <nl> + storeu ( ( void * ) tab ) ; <nl> + char res [ 1000 ] ; <nl> + char * ptr = res ; <nl> + for ( int i = 0 ; i < 8 ; i + + ) { <nl> + ptr + = sprintf ( ptr , " % g , " , tab [ i ] ) ; <nl> + } <nl> + / / strip last , <nl> + ptr [ - 1 ] = 0 ; <nl> + return std : : string ( res ) ; <nl> + } <nl> + <nl> + } ; <nl> + <nl> + inline simd8float32 hadd ( simd8float32 a , simd8float32 b ) { <nl> + return simd8float32 ( _mm256_hadd_ps ( a . f , b . f ) ) ; <nl> + } <nl> + <nl> + inline simd8float32 unpacklo ( simd8float32 a , simd8float32 b ) { <nl> + return simd8float32 ( _mm256_unpacklo_ps ( a . f , b . f ) ) ; <nl> + } <nl> + <nl> + inline simd8float32 unpackhi ( simd8float32 a , simd8float32 b ) { <nl> + return simd8float32 ( _mm256_unpackhi_ps ( a . f , b . f ) ) ; <nl> + } <nl> + <nl> + / / compute a * b + c <nl> + inline simd8float32 fmadd ( simd8float32 a , simd8float32 b , simd8float32 c ) { <nl> + return simd8float32 ( _mm256_fmadd_ps ( a . f , b . f , c . f ) ) ; <nl> + } <nl> + <nl> + <nl> + } / / namespace faiss <nl> new file mode 100644 <nl> index 000000000 . . 94103ec4f <nl> mmm / dev / null <nl> ppp b / faiss / utils / simdlib_emulated . h <nl> <nl> + / * * <nl> + * Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + * <nl> + * This source code is licensed under the MIT license found in the <nl> + * LICENSE file in the root directory of this source tree . <nl> + * / <nl> + <nl> + # pragma once <nl> + <nl> + # include < string > <nl> + # include < cstdint > <nl> + # include < cstring > <nl> + # include < functional > <nl> + # include < algorithm > <nl> + <nl> + namespace faiss { <nl> + <nl> + <nl> + struct simd256bit { <nl> + <nl> + union { <nl> + uint8_t u8 [ 32 ] ; <nl> + uint16_t u16 [ 16 ] ; <nl> + uint32_t u32 [ 8 ] ; <nl> + float f32 [ 8 ] ; <nl> + } ; <nl> + <nl> + simd256bit ( ) { } <nl> + <nl> + explicit simd256bit ( const void * x ) <nl> + { <nl> + memcpy ( u8 , x , 32 ) ; <nl> + } <nl> + <nl> + void clear ( ) { <nl> + memset ( u8 , 0 , 32 ) ; <nl> + } <nl> + <nl> + void storeu ( void * ptr ) const { <nl> + memcpy ( ptr , u8 , 32 ) ; <nl> + } <nl> + <nl> + void loadu ( const void * ptr ) { <nl> + memcpy ( u8 , ptr , 32 ) ; <nl> + } <nl> + <nl> + void store ( void * ptr ) const { <nl> + storeu ( ptr ) ; <nl> + } <nl> + <nl> + void bin ( char bits [ 257 ] ) const { <nl> + const char * bytes = ( char * ) this - > u8 ; <nl> + for ( int i = 0 ; i < 256 ; i + + ) { <nl> + bits [ i ] = ' 0 ' + ( ( bytes [ i / 8 ] > > ( i % 8 ) ) & 1 ) ; <nl> + } <nl> + bits [ 256 ] = 0 ; <nl> + } <nl> + <nl> + std : : string bin ( ) const { <nl> + char bits [ 257 ] ; <nl> + bin ( bits ) ; <nl> + return std : : string ( bits ) ; <nl> + } <nl> + <nl> + } ; <nl> + <nl> + <nl> + <nl> + <nl> + / / / vector of 16 elements in uint16 <nl> + struct simd16uint16 : simd256bit { <nl> + simd16uint16 ( ) { } <nl> + <nl> + explicit simd16uint16 ( int x ) { <nl> + set1 ( x ) ; <nl> + } <nl> + <nl> + explicit simd16uint16 ( uint16_t x ) { <nl> + set1 ( x ) ; <nl> + } <nl> + <nl> + explicit simd16uint16 ( simd256bit x ) : simd256bit ( x ) { } <nl> + <nl> + explicit simd16uint16 ( const uint16_t * x ) : simd256bit ( ( const void * ) x ) { } <nl> + <nl> + std : : string elements_to_string ( const char * fmt ) const { <nl> + char res [ 1000 ] , * ptr = res ; <nl> + for ( int i = 0 ; i < 16 ; i + + ) { <nl> + ptr + = sprintf ( ptr , fmt , u16 [ i ] ) ; <nl> + } <nl> + / / strip last , <nl> + ptr [ - 1 ] = 0 ; <nl> + return std : : string ( res ) ; <nl> + } <nl> + <nl> + std : : string hex ( ) const { <nl> + return elements_to_string ( " % 02x , " ) ; <nl> + } <nl> + <nl> + std : : string dec ( ) const { <nl> + return elements_to_string ( " % 3d , " ) ; <nl> + } <nl> + <nl> + static simd16uint16 unary_func ( <nl> + simd16uint16 a , std : : function < uint16_t ( uint16_t ) > f ) <nl> + { <nl> + simd16uint16 c ; <nl> + for ( int j = 0 ; j < 16 ; j + + ) { <nl> + c . u16 [ j ] = f ( a . u16 [ j ] ) ; <nl> + } <nl> + return c ; <nl> + } <nl> + <nl> + <nl> + static simd16uint16 binary_func ( <nl> + simd16uint16 a , simd16uint16 b , <nl> + std : : function < uint16_t ( uint16_t , uint16_t ) > f ) <nl> + { <nl> + simd16uint16 c ; <nl> + for ( int j = 0 ; j < 16 ; j + + ) { <nl> + c . u16 [ j ] = f ( a . u16 [ j ] , b . u16 [ j ] ) ; <nl> + } <nl> + return c ; <nl> + } <nl> + <nl> + void set1 ( uint16_t x ) { <nl> + for ( int i = 0 ; i < 16 ; i + + ) { <nl> + u16 [ i ] = x ; <nl> + } <nl> + } <nl> + <nl> + / / shift must be known at compile time <nl> + simd16uint16 operator > > ( const int shift ) const { <nl> + return unary_func ( * this , [ shift ] ( uint16_t a ) { return a > > shift ; } ) ; <nl> + } <nl> + <nl> + <nl> + / / shift must be known at compile time <nl> + simd16uint16 operator < < ( const int shift ) const { <nl> + return unary_func ( * this , [ shift ] ( uint16_t a ) { return a < < shift ; } ) ; <nl> + } <nl> + <nl> + simd16uint16 operator + = ( simd16uint16 other ) { <nl> + * this = * this + other ; <nl> + return * this ; <nl> + } <nl> + <nl> + simd16uint16 operator - = ( simd16uint16 other ) { <nl> + * this = * this - other ; <nl> + return * this ; <nl> + } <nl> + <nl> + simd16uint16 operator + ( simd16uint16 other ) const { <nl> + return binary_func ( * this , other , <nl> + [ ] ( uint16_t a , uint16_t b ) { return a + b ; } <nl> + ) ; <nl> + } <nl> + <nl> + simd16uint16 operator - ( simd16uint16 other ) const { <nl> + return binary_func ( * this , other , <nl> + [ ] ( uint16_t a , uint16_t b ) { return a - b ; } <nl> + ) ; <nl> + } <nl> + <nl> + simd16uint16 operator & ( simd256bit other ) const { <nl> + return binary_func ( * this , simd16uint16 ( other ) , <nl> + [ ] ( uint16_t a , uint16_t b ) { return a & b ; } <nl> + ) ; <nl> + } <nl> + <nl> + simd16uint16 operator | ( simd256bit other ) const { <nl> + return binary_func ( * this , simd16uint16 ( other ) , <nl> + [ ] ( uint16_t a , uint16_t b ) { return a | b ; } <nl> + ) ; <nl> + } <nl> + <nl> + / / returns binary masks <nl> + simd16uint16 operator = = ( simd16uint16 other ) const { <nl> + return binary_func ( * this , other , <nl> + [ ] ( uint16_t a , uint16_t b ) { return a = = b ? 0xffff : 0 ; } <nl> + ) ; <nl> + } <nl> + <nl> + simd16uint16 operator ~ ( ) const { <nl> + return unary_func ( * this , [ ] ( uint16_t a ) { return ~ a ; } ) ; <nl> + } <nl> + <nl> + / / get scalar at index 0 <nl> + uint16_t get_scalar_0 ( ) const { <nl> + return u16 [ 0 ] ; <nl> + } <nl> + <nl> + / / mask of elements where this > = thresh <nl> + / / 2 bit per component : 16 * 2 = 32 bit <nl> + uint32_t ge_mask ( simd16uint16 thresh ) const { <nl> + uint32_t gem = 0 ; <nl> + for ( int j = 0 ; j < 16 ; j + + ) { <nl> + if ( u16 [ j ] > = thresh . u16 [ j ] ) { <nl> + gem | = 3 < < ( j * 2 ) ; <nl> + } <nl> + } <nl> + return gem ; <nl> + } <nl> + <nl> + uint32_t le_mask ( simd16uint16 thresh ) const { <nl> + return thresh . ge_mask ( * this ) ; <nl> + } <nl> + <nl> + uint32_t gt_mask ( simd16uint16 thresh ) const { <nl> + return ~ le_mask ( thresh ) ; <nl> + } <nl> + <nl> + bool all_gt ( simd16uint16 thresh ) const { <nl> + return le_mask ( thresh ) = = 0 ; <nl> + } <nl> + <nl> + / / for debugging only <nl> + uint16_t operator [ ] ( int i ) const { <nl> + return u16 [ i ] ; <nl> + } <nl> + <nl> + void accu_min ( simd16uint16 incoming ) { <nl> + for ( int j = 0 ; j < 16 ; j + + ) { <nl> + if ( incoming . u16 [ j ] < u16 [ j ] ) { <nl> + u16 [ j ] = incoming . u16 [ j ] ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void accu_max ( simd16uint16 incoming ) { <nl> + for ( int j = 0 ; j < 16 ; j + + ) { <nl> + if ( incoming . u16 [ j ] > u16 [ j ] ) { <nl> + u16 [ j ] = incoming . u16 [ j ] ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + } ; <nl> + <nl> + <nl> + / / not really a std : : min because it returns an elementwise min <nl> + inline simd16uint16 min ( simd16uint16 av , simd16uint16 bv ) { <nl> + return simd16uint16 : : binary_func ( av , bv , <nl> + [ ] ( uint16_t a , uint16_t b ) { return std : : min ( a , b ) ; } <nl> + ) ; <nl> + } <nl> + <nl> + inline simd16uint16 max ( simd16uint16 av , simd16uint16 bv ) { <nl> + return simd16uint16 : : binary_func ( av , bv , <nl> + [ ] ( uint16_t a , uint16_t b ) { return std : : max ( a , b ) ; } <nl> + ) ; <nl> + } <nl> + <nl> + / / decompose in 128 - lanes : a = ( a0 , a1 ) , b = ( b0 , b1 ) <nl> + / / return ( a0 + a1 , b0 + b1 ) <nl> + / / TODO find a better name <nl> + inline simd16uint16 combine2x2 ( simd16uint16 a , simd16uint16 b ) { <nl> + simd16uint16 c ; <nl> + for ( int j = 0 ; j < 8 ; j + + ) { <nl> + c . u16 [ j ] = a . u16 [ j ] + a . u16 [ j + 8 ] ; <nl> + c . u16 [ j + 8 ] = b . u16 [ j ] + b . u16 [ j + 8 ] ; <nl> + } <nl> + return c ; <nl> + } <nl> + <nl> + / / compare d0 and d1 to thr , return 32 bits corresponding to the concatenation <nl> + / / of d0 and d1 with thr <nl> + inline uint32_t cmp_ge32 ( simd16uint16 d0 , simd16uint16 d1 , simd16uint16 thr ) { <nl> + uint32_t gem = 0 ; <nl> + for ( int j = 0 ; j < 16 ; j + + ) { <nl> + if ( d0 . u16 [ j ] > = thr . u16 [ j ] ) { <nl> + gem | = 1 < < j ; <nl> + } <nl> + if ( d1 . u16 [ j ] > = thr . u16 [ j ] ) { <nl> + gem | = 1 < < ( j + 16 ) ; <nl> + } <nl> + } <nl> + return gem ; <nl> + } <nl> + <nl> + <nl> + inline uint32_t cmp_le32 ( simd16uint16 d0 , simd16uint16 d1 , simd16uint16 thr ) { <nl> + uint32_t gem = 0 ; <nl> + for ( int j = 0 ; j < 16 ; j + + ) { <nl> + if ( d0 . u16 [ j ] < = thr . u16 [ j ] ) { <nl> + gem | = 1 < < j ; <nl> + } <nl> + if ( d1 . u16 [ j ] < = thr . u16 [ j ] ) { <nl> + gem | = 1 < < ( j + 16 ) ; <nl> + } <nl> + } <nl> + return gem ; <nl> + } <nl> + <nl> + <nl> + <nl> + / / vector of 32 unsigned 8 - bit integers <nl> + struct simd32uint8 : simd256bit { <nl> + <nl> + simd32uint8 ( ) { } <nl> + <nl> + explicit simd32uint8 ( int x ) { set1 ( x ) ; } <nl> + <nl> + explicit simd32uint8 ( uint8_t x ) { set1 ( x ) ; } <nl> + <nl> + explicit simd32uint8 ( simd256bit x ) : simd256bit ( x ) { } <nl> + <nl> + explicit simd32uint8 ( const uint8_t * x ) : simd256bit ( ( const void * ) x ) { } <nl> + <nl> + std : : string elements_to_string ( const char * fmt ) const { <nl> + char res [ 1000 ] , * ptr = res ; <nl> + for ( int i = 0 ; i < 32 ; i + + ) { <nl> + ptr + = sprintf ( ptr , fmt , u8 [ i ] ) ; <nl> + } <nl> + / / strip last , <nl> + ptr [ - 1 ] = 0 ; <nl> + return std : : string ( res ) ; <nl> + } <nl> + <nl> + std : : string hex ( ) const { <nl> + return elements_to_string ( " % 02x , " ) ; <nl> + } <nl> + <nl> + std : : string dec ( ) const { <nl> + return elements_to_string ( " % 3d , " ) ; <nl> + } <nl> + <nl> + void set1 ( uint8_t x ) { <nl> + for ( int j = 0 ; j < 32 ; j + + ) { <nl> + u8 [ j ] = x ; <nl> + } <nl> + } <nl> + <nl> + static simd32uint8 binary_func ( <nl> + simd32uint8 a , simd32uint8 b , <nl> + std : : function < uint8_t ( uint8_t , uint8_t ) > f ) <nl> + { <nl> + simd32uint8 c ; <nl> + for ( int j = 0 ; j < 32 ; j + + ) { <nl> + c . u8 [ j ] = f ( a . u8 [ j ] , b . u8 [ j ] ) ; <nl> + } <nl> + return c ; <nl> + } <nl> + <nl> + <nl> + simd32uint8 operator & ( simd256bit other ) const { <nl> + return binary_func ( * this , simd32uint8 ( other ) , <nl> + [ ] ( uint8_t a , uint8_t b ) { return a & b ; } <nl> + ) ; <nl> + } <nl> + <nl> + simd32uint8 operator + ( simd32uint8 other ) const { <nl> + return binary_func ( * this , other , <nl> + [ ] ( uint8_t a , uint8_t b ) { return a + b ; } <nl> + ) ; <nl> + } <nl> + <nl> + / / The very important operation that everything relies on <nl> + simd32uint8 lookup_2_lanes ( simd32uint8 idx ) const { <nl> + simd32uint8 c ; <nl> + for ( int j = 0 ; j < 32 ; j + + ) { <nl> + if ( idx . u8 [ j ] & 0x80 ) { <nl> + c . u8 [ j ] = 0 ; <nl> + } else { <nl> + uint8_t i = idx . u8 [ j ] & 15 ; <nl> + if ( j < 16 ) { <nl> + c . u8 [ j ] = u8 [ i ] ; <nl> + } else { <nl> + c . u8 [ j ] = u8 [ 16 + i ] ; <nl> + } <nl> + } <nl> + } <nl> + return c ; <nl> + } <nl> + <nl> + / / extract + 0 - extend lane <nl> + / / this operation is slow ( 3 cycles ) <nl> + <nl> + simd32uint8 operator + = ( simd32uint8 other ) { <nl> + * this = * this + other ; <nl> + return * this ; <nl> + } <nl> + <nl> + / / for debugging only <nl> + uint8_t operator [ ] ( int i ) const { <nl> + return u8 [ i ] ; <nl> + } <nl> + <nl> + } ; <nl> + <nl> + <nl> + / / convert with saturation <nl> + / / careful : this does not cross lanes , so the order is weird <nl> + inline simd32uint8 uint16_to_uint8_saturate ( simd16uint16 a , simd16uint16 b ) { <nl> + simd32uint8 c ; <nl> + <nl> + auto saturate_16_to_8 = [ ] ( uint16_t x ) { <nl> + return x > = 256 ? 0xff : x ; <nl> + } ; <nl> + <nl> + for ( int i = 0 ; i < 8 ; i + + ) { <nl> + c . u8 [ i ] = saturate_16_to_8 ( a . u16 [ i ] ) ; <nl> + c . u8 [ 8 + i ] = saturate_16_to_8 ( b . u16 [ i ] ) ; <nl> + c . u8 [ 16 + i ] = saturate_16_to_8 ( a . u16 [ 8 + i ] ) ; <nl> + c . u8 [ 24 + i ] = saturate_16_to_8 ( b . u16 [ 8 + i ] ) ; <nl> + } <nl> + return c ; <nl> + } <nl> + <nl> + / / / get most significant bit of each byte <nl> + inline uint32_t get_MSBs ( simd32uint8 a ) { <nl> + uint32_t res = 0 ; <nl> + for ( int i = 0 ; i < 32 ; i + + ) { <nl> + if ( a . u8 [ i ] & 0x80 ) { <nl> + res | = 1 < < i ; <nl> + } <nl> + } <nl> + return res ; <nl> + } <nl> + <nl> + / / / use MSB of each byte of mask to select a byte between a and b <nl> + inline simd32uint8 blendv ( simd32uint8 a , simd32uint8 b , simd32uint8 mask ) { <nl> + simd32uint8 c ; <nl> + for ( int i = 0 ; i < 32 ; i + + ) { <nl> + if ( mask . u8 [ i ] & 0x80 ) { <nl> + c . u8 [ i ] = b . u8 [ i ] ; <nl> + } else { <nl> + c . u8 [ i ] = a . u8 [ i ] ; <nl> + } <nl> + } <nl> + return c ; <nl> + } <nl> + <nl> + <nl> + <nl> + <nl> + / / / vector of 8 unsigned 32 - bit integers <nl> + struct simd8uint32 : simd256bit { <nl> + simd8uint32 ( ) { } <nl> + <nl> + <nl> + explicit simd8uint32 ( uint32_t x ) { set1 ( x ) ; } <nl> + <nl> + explicit simd8uint32 ( simd256bit x ) : simd256bit ( x ) { } <nl> + <nl> + explicit simd8uint32 ( const uint8_t * x ) : simd256bit ( ( const void * ) x ) { } <nl> + <nl> + std : : string elements_to_string ( const char * fmt ) const { <nl> + char res [ 1000 ] , * ptr = res ; <nl> + for ( int i = 0 ; i < 8 ; i + + ) { <nl> + ptr + = sprintf ( ptr , fmt , u32 [ i ] ) ; <nl> + } <nl> + / / strip last , <nl> + ptr [ - 1 ] = 0 ; <nl> + return std : : string ( res ) ; <nl> + } <nl> + <nl> + std : : string hex ( ) const { <nl> + return elements_to_string ( " % 08x , " ) ; <nl> + } <nl> + <nl> + std : : string dec ( ) const { <nl> + return elements_to_string ( " % 10d , " ) ; <nl> + } <nl> + <nl> + void set1 ( uint32_t x ) { <nl> + for ( int i = 0 ; i < 8 ; i + + ) { <nl> + u32 [ i ] = x ; <nl> + } <nl> + } <nl> + <nl> + } ; <nl> + <nl> + struct simd8float32 : simd256bit { <nl> + <nl> + simd8float32 ( ) { } <nl> + <nl> + explicit simd8float32 ( simd256bit x ) : simd256bit ( x ) { } <nl> + <nl> + explicit simd8float32 ( float x ) { set1 ( x ) ; } <nl> + <nl> + explicit simd8float32 ( const float * x ) { loadu ( ( void * ) x ) ; } <nl> + <nl> + void set1 ( float x ) { <nl> + for ( int i = 0 ; i < 8 ; i + + ) { <nl> + f32 [ i ] = x ; <nl> + } <nl> + } <nl> + <nl> + static simd8float32 binary_func ( <nl> + simd8float32 a , simd8float32 b , <nl> + std : : function < float ( float , float ) > f ) <nl> + { <nl> + simd8float32 c ; <nl> + for ( int j = 0 ; j < 8 ; j + + ) { <nl> + c . f32 [ j ] = f ( a . f32 [ j ] , b . f32 [ j ] ) ; <nl> + } <nl> + return c ; <nl> + } <nl> + <nl> + simd8float32 operator * ( simd8float32 other ) const { <nl> + return binary_func ( * this , other , <nl> + [ ] ( float a , float b ) { return a * b ; } <nl> + ) ; <nl> + } <nl> + <nl> + simd8float32 operator + ( simd8float32 other ) const { <nl> + return binary_func ( * this , other , <nl> + [ ] ( float a , float b ) { return a + b ; } <nl> + ) ; <nl> + } <nl> + <nl> + simd8float32 operator - ( simd8float32 other ) const { <nl> + return binary_func ( * this , other , <nl> + [ ] ( float a , float b ) { return a - b ; } <nl> + ) ; <nl> + } <nl> + <nl> + std : : string tostring ( ) const { <nl> + char res [ 1000 ] , * ptr = res ; <nl> + for ( int i = 0 ; i < 8 ; i + + ) { <nl> + ptr + = sprintf ( ptr , " % g , " , f32 [ i ] ) ; <nl> + } <nl> + / / strip last , <nl> + ptr [ - 1 ] = 0 ; <nl> + return std : : string ( res ) ; <nl> + } <nl> + <nl> + } ; <nl> + <nl> + <nl> + / / hadd does not cross lanes <nl> + inline simd8float32 hadd ( simd8float32 a , simd8float32 b ) { <nl> + simd8float32 c ; <nl> + c . f32 [ 0 ] = a . f32 [ 0 ] + a . f32 [ 1 ] ; <nl> + c . f32 [ 1 ] = a . f32 [ 2 ] + a . f32 [ 3 ] ; <nl> + c . f32 [ 2 ] = b . f32 [ 0 ] + b . f32 [ 1 ] ; <nl> + c . f32 [ 3 ] = b . f32 [ 2 ] + b . f32 [ 3 ] ; <nl> + <nl> + c . f32 [ 4 ] = a . f32 [ 4 ] + a . f32 [ 5 ] ; <nl> + c . f32 [ 5 ] = a . f32 [ 6 ] + a . f32 [ 7 ] ; <nl> + c . f32 [ 6 ] = b . f32 [ 4 ] + b . f32 [ 5 ] ; <nl> + c . f32 [ 7 ] = b . f32 [ 6 ] + b . f32 [ 7 ] ; <nl> + <nl> + return c ; <nl> + } <nl> + <nl> + inline simd8float32 unpacklo ( simd8float32 a , simd8float32 b ) { <nl> + simd8float32 c ; <nl> + c . f32 [ 0 ] = a . f32 [ 0 ] ; <nl> + c . f32 [ 1 ] = b . f32 [ 0 ] ; <nl> + c . f32 [ 2 ] = a . f32 [ 1 ] ; <nl> + c . f32 [ 3 ] = b . f32 [ 1 ] ; <nl> + <nl> + c . f32 [ 4 ] = a . f32 [ 4 ] ; <nl> + c . f32 [ 5 ] = b . f32 [ 4 ] ; <nl> + c . f32 [ 6 ] = a . f32 [ 5 ] ; <nl> + c . f32 [ 7 ] = b . f32 [ 5 ] ; <nl> + <nl> + return c ; <nl> + } <nl> + <nl> + inline simd8float32 unpackhi ( simd8float32 a , simd8float32 b ) { <nl> + simd8float32 c ; <nl> + c . f32 [ 0 ] = a . f32 [ 2 ] ; <nl> + c . f32 [ 1 ] = b . f32 [ 2 ] ; <nl> + c . f32 [ 2 ] = a . f32 [ 3 ] ; <nl> + c . f32 [ 3 ] = b . f32 [ 3 ] ; <nl> + <nl> + c . f32 [ 4 ] = a . f32 [ 6 ] ; <nl> + c . f32 [ 5 ] = b . f32 [ 6 ] ; <nl> + c . f32 [ 6 ] = a . f32 [ 7 ] ; <nl> + c . f32 [ 7 ] = b . f32 [ 7 ] ; <nl> + <nl> + return c ; <nl> + } <nl> + <nl> + / / compute a * b + c <nl> + inline simd8float32 fmadd ( simd8float32 a , simd8float32 b , simd8float32 c ) { <nl> + simd8float32 res ; <nl> + for ( int i = 0 ; i < 8 ; i + + ) { <nl> + res . f32 [ i ] = a . f32 [ i ] * b . f32 [ i ] + c . f32 [ i ] ; <nl> + } <nl> + return res ; <nl> + } <nl> + <nl> + <nl> + <nl> + } / / namespace faiss <nl> mmm a / tests / test_build_blocks . py <nl> ppp b / tests / test_build_blocks . py <nl> def test_int64 ( self ) : <nl> faiss . vector_to_array ( idx . id_map ) <nl> <nl> <nl> - class TestPartitioning ( unittest . TestCase ) : <nl> - <nl> - def do_partition ( self , n , q , maxval = None , seed = None ) : <nl> - if seed is None : <nl> - for i in range ( 50 ) : <nl> - self . do_partition ( n , q , maxval , i + 1234 ) <nl> - # print ( " seed = " , seed ) <nl> - rs = np . random . RandomState ( seed ) <nl> - if maxval is None : <nl> - vals = rs . rand ( n ) . astype ( ' float32 ' ) <nl> - else : <nl> - vals = rs . randint ( maxval , size = n ) . astype ( ' float32 ' ) <nl> - <nl> - ids = ( rs . permutation ( n ) + 12345 ) . astype ( ' int64 ' ) <nl> - dic = dict ( zip ( ids , vals ) ) <nl> - <nl> - # print ( " seed = " , seed , " q = " , q , " n = " , n ) <nl> - # print ( vals ) <nl> - # print ( ids ) <nl> - <nl> - vals_orig = vals . copy ( ) <nl> - <nl> - sp = faiss . swig_ptr <nl> - if type ( q ) = = int : <nl> - faiss . CMax_float_partition_fuzzy ( <nl> - sp ( vals ) , sp ( ids ) , n , <nl> - q , q , None <nl> - ) <nl> - else : <nl> - q_min , q_max = q <nl> - q = np . array ( [ - 1 ] , dtype = ' uint64 ' ) <nl> - faiss . CMax_float_partition_fuzzy ( <nl> - sp ( vals ) , sp ( ids ) , n , <nl> - q_min , q_max , sp ( q ) <nl> - ) <nl> - q = q [ 0 ] <nl> - assert q_min < = q < = q_max <nl> - <nl> - o = vals_orig . argsort ( ) <nl> - thresh = vals_orig [ o [ q ] ] <nl> - n_eq = ( vals_orig [ o [ : q ] ] = = thresh ) . sum ( ) <nl> - <nl> - for i in range ( q ) : <nl> - self . assertEqual ( vals [ i ] , dic [ ids [ i ] ] ) <nl> - self . assertLessEqual ( vals [ i ] , thresh ) <nl> - if vals [ i ] = = thresh : <nl> - n_eq - = 1 <nl> - self . assertEqual ( n_eq , 0 ) <nl> - <nl> - def test_partition ( self ) : <nl> - self . do_partition ( 160 , 80 ) <nl> - <nl> - def test_partition_manydups ( self ) : <nl> - self . do_partition ( 160 , 80 , maxval = 16 ) <nl> - <nl> - def test_partition_lowq ( self ) : <nl> - self . do_partition ( 160 , 10 , maxval = 16 ) <nl> - <nl> - def test_partition_highq ( self ) : <nl> - self . do_partition ( 165 , 155 , maxval = 16 ) <nl> - <nl> - def test_partition_q10 ( self ) : <nl> - self . do_partition ( 32 , 10 , maxval = 500 ) <nl> - <nl> - def test_partition_q10_dups ( self ) : <nl> - self . do_partition ( 32 , 10 , maxval = 16 ) <nl> - <nl> - def test_partition_q10_fuzzy ( self ) : <nl> - self . do_partition ( 32 , ( 10 , 15 ) , maxval = 500 ) <nl> - <nl> - def test_partition_fuzzy ( self ) : <nl> - self . do_partition ( 160 , ( 70 , 80 ) , maxval = 500 ) <nl> - <nl> - def test_partition_fuzzy_2 ( self ) : <nl> - self . do_partition ( 160 , ( 70 , 80 ) ) <nl> - <nl> if __name__ = = ' __main__ ' : <nl> unittest . main ( ) <nl> mmm a / tests / test_contrib . py <nl> ppp b / tests / test_contrib . py <nl> <nl> import platform <nl> <nl> from faiss . contrib import datasets <nl> + from faiss . contrib import inspect_tools <nl> <nl> from common import get_dataset_2 <nl> try : <nl> def test_knn_cpu ( self ) : <nl> <nl> assert np . all ( Inew = = Iref ) <nl> assert np . allclose ( Dref , Dnew ) <nl> + <nl> + <nl> + class TestInspect ( unittest . TestCase ) : <nl> + <nl> + def test_LinearTransform ( self ) : <nl> + # training data <nl> + xt = np . random . rand ( 1000 , 20 ) . astype ( ' float32 ' ) <nl> + # test data <nl> + x = np . random . rand ( 10 , 20 ) . astype ( ' float32 ' ) <nl> + # make the PCA matrix <nl> + pca = faiss . PCAMatrix ( 20 , 10 ) <nl> + pca . train ( xt ) <nl> + # apply it to test data <nl> + yref = pca . apply_py ( x ) <nl> + <nl> + A , b = inspect_tools . get_LinearTransform_matrix ( pca ) <nl> + <nl> + # verify <nl> + ynew = x @ A . T + b <nl> + np . testing . assert_array_almost_equal ( yref , ynew ) <nl> mmm a / tests / test_factory . py <nl> ppp b / tests / test_factory . py <nl> def test_factory_HNSW_newstyle ( self ) : <nl> indexpq = faiss . downcast_index ( index . storage ) <nl> assert not indexpq . do_polysemous_training <nl> <nl> + def test_factory_fast_scan ( self ) : <nl> + index = faiss . index_factory ( 56 , " PQ28x4fs " ) <nl> + self . assertEqual ( index . bbs , 32 ) <nl> + index = faiss . index_factory ( 56 , " PQ28x4fs_64 " ) <nl> + self . assertEqual ( index . bbs , 64 ) <nl> + index = faiss . index_factory ( 56 , " IVF50 , PQ28x4fs_64 " , faiss . METRIC_INNER_PRODUCT ) <nl> + self . assertEqual ( index . bbs , 64 ) <nl> + self . assertEqual ( index . nlist , 50 ) <nl> + self . assertTrue ( index . cp . spherical ) <nl> + index = faiss . index_factory ( 56 , " PQ28x4fs , RFlat " ) <nl> + self . assertEqual ( index . k_factor , 1 . 0 ) <nl> + <nl> + def test_parenthesis ( self ) : <nl> + index = faiss . index_factory ( 50 , " IVF32 ( PQ25 ) , Flat " ) <nl> + quantizer = faiss . downcast_index ( index . quantizer ) <nl> + self . assertEqual ( quantizer . pq . M , 25 ) <nl> + <nl> <nl> class TestCloneSize ( unittest . TestCase ) : <nl> <nl> new file mode 100644 <nl> index 000000000 . . fa5cf7d1b <nl> mmm / dev / null <nl> ppp b / tests / test_fast_scan . py <nl> <nl> + # Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + # <nl> + # This source code is licensed under the MIT license found in the <nl> + # LICENSE file in the root directory of this source tree . <nl> + <nl> + <nl> + import unittest <nl> + <nl> + import numpy as np <nl> + import faiss <nl> + <nl> + from faiss . contrib import datasets <nl> + import platform <nl> + <nl> + <nl> + class TestSearch ( unittest . TestCase ) : <nl> + <nl> + <nl> + def test_PQ4_accuracy ( self ) : <nl> + ds = datasets . SyntheticDataset ( 32 , 2000 , 5000 , 1000 ) <nl> + <nl> + index_gt = faiss . IndexFlatL2 ( 32 ) <nl> + index_gt . add ( ds . get_database ( ) ) <nl> + Dref , Iref = index_gt . search ( ds . get_queries ( ) , 10 ) <nl> + <nl> + index = faiss . index_factory ( 32 , ' PQ16x4 ' ) <nl> + index . train ( ds . get_train ( ) ) <nl> + index . add ( ds . get_database ( ) ) <nl> + Da , Ia = index . search ( ds . get_queries ( ) , 10 ) <nl> + <nl> + nq = Iref . shape [ 0 ] <nl> + recall_at_1 = ( Iref [ : , 0 ] = = Ia [ : , 0 ] ) . sum ( ) / nq <nl> + assert recall_at_1 > 0 . 6 <nl> + # print ( f ' recall @ 1 = { recall_at_1 : . 3f } ' ) <nl> + <nl> + <nl> + class TestRounding ( unittest . TestCase ) : <nl> + <nl> + def do_test_rounding ( self , implem = 4 , metric = faiss . METRIC_L2 ) : <nl> + ds = datasets . SyntheticDataset ( 32 , 2000 , 5000 , 200 ) <nl> + <nl> + index = faiss . index_factory ( 32 , ' PQ16x4 ' , metric ) <nl> + index . train ( ds . get_train ( ) ) <nl> + index . add ( ds . get_database ( ) ) <nl> + Dref , Iref = index . search ( ds . get_queries ( ) , 10 ) <nl> + nq = Iref . shape [ 0 ] <nl> + <nl> + index2 = faiss . IndexPQFastScan ( index ) <nl> + <nl> + # simply repro normal search <nl> + index2 . implem = 2 <nl> + D2 , I2 = index2 . search ( ds . get_queries ( ) , 10 ) <nl> + np . testing . assert_array_equal ( I2 , Iref ) <nl> + np . testing . assert_array_equal ( D2 , Dref ) <nl> + <nl> + # rounded LUT with correction <nl> + index2 . implem = implem <nl> + D4 , I4 = index2 . search ( ds . get_queries ( ) , 10 ) <nl> + # check accuracy of indexes <nl> + recalls = { } <nl> + for rank in 1 , 10 : <nl> + recalls [ rank ] = ( Iref [ : , : 1 ] = = I4 [ : , : rank ] ) . sum ( ) / nq <nl> + <nl> + min_r1 = 0 . 98 if metric = = faiss . METRIC_INNER_PRODUCT else 0 . 99 <nl> + self . assertGreater ( recalls [ 1 ] , min_r1 ) <nl> + self . assertGreater ( recalls [ 10 ] , 0 . 995 ) <nl> + # check accuracy of distances <nl> + # err3 = ( ( D3 - D2 ) * * 2 ) . sum ( ) <nl> + err4 = ( ( D4 - D2 ) * * 2 ) . sum ( ) <nl> + nf = ( D2 * * 2 ) . sum ( ) <nl> + self . assertLess ( err4 , nf * 1e - 4 ) <nl> + <nl> + def test_implem_4 ( self ) : <nl> + self . do_test_rounding ( 4 ) <nl> + <nl> + def test_implem_4_ip ( self ) : <nl> + self . do_test_rounding ( 4 , faiss . METRIC_INNER_PRODUCT ) <nl> + <nl> + def test_implem_12 ( self ) : <nl> + self . do_test_rounding ( 12 ) <nl> + <nl> + def test_implem_12_ip ( self ) : <nl> + self . do_test_rounding ( 12 , faiss . METRIC_INNER_PRODUCT ) <nl> + <nl> + def test_implem_14 ( self ) : <nl> + self . do_test_rounding ( 14 ) <nl> + <nl> + def test_implem_14_ip ( self ) : <nl> + self . do_test_rounding ( 12 , faiss . METRIC_INNER_PRODUCT ) <nl> + <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # Kernel unit test <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + <nl> + <nl> + def reference_accu ( codes , LUT ) : <nl> + nq , nsp , is_16 = LUT . shape <nl> + nb , nsp_2 = codes . shape <nl> + assert is_16 = = 16 <nl> + assert nsp_2 = = nsp / / 2 <nl> + accu = np . zeros ( ( nq , nb ) , ' uint16 ' ) <nl> + for i in range ( nq ) : <nl> + for j in range ( nb ) : <nl> + a = np . uint16 ( 0 ) <nl> + for sp in range ( 0 , nsp , 2 ) : <nl> + c = codes [ j , sp / / 2 ] <nl> + a + = LUT [ i , sp , c & 15 ] . astype ( ' uint16 ' ) <nl> + a + = LUT [ i , sp + 1 , c > > 4 ] . astype ( ' uint16 ' ) <nl> + accu [ i , j ] = a <nl> + return accu <nl> + <nl> + <nl> + # disabled because the function to write to mem is not implemented currently <nl> + class ThisIsNotATestLoop5 : # ( unittest . TestCase ) : <nl> + <nl> + def do_loop5_kernel ( self , nq , bb ) : <nl> + " " " unit test for the accumulation kernel " " " <nl> + nb = bb * 32 # databse size <nl> + nsp = 24 # number of sub - quantizers <nl> + <nl> + rs = np . random . RandomState ( 123 ) <nl> + codes = rs . randint ( 256 , size = ( nb , nsp / / 2 ) ) . astype ( ' uint8 ' ) <nl> + LUT = rs . randint ( 256 , size = ( nq , nsp , 16 ) ) . astype ( ' uint8 ' ) <nl> + accu_ref = reference_accu ( codes , LUT ) <nl> + <nl> + def to_A ( x ) : <nl> + return faiss . array_to_AlignedTable ( x . ravel ( ) ) <nl> + <nl> + sp = faiss . swig_ptr <nl> + <nl> + LUT_a = faiss . AlignedTableUint8 ( LUT . size ) <nl> + faiss . pq4_pack_LUT ( <nl> + nq , nsp , sp ( LUT ) , <nl> + LUT_a . get ( ) <nl> + ) <nl> + <nl> + codes_a = faiss . AlignedTableUint8 ( codes . size ) <nl> + faiss . pq4_pack_codes ( <nl> + sp ( codes ) , <nl> + nb , nsp , nb , nb , nsp , <nl> + codes_a . get ( ) <nl> + ) <nl> + <nl> + accu_a = faiss . AlignedTableUint16 ( nq * nb ) <nl> + accu_a . clear ( ) <nl> + faiss . loop5_kernel_accumulate_1_block_to_mem ( <nl> + nq , nb , nsp , codes_a . get ( ) , LUT_a . get ( ) , accu_a . get ( ) <nl> + ) <nl> + accu = faiss . AlignedTable_to_array ( accu_a ) . reshape ( nq , nb ) <nl> + np . testing . assert_array_equal ( accu_ref , accu ) <nl> + <nl> + def test_11 ( self ) : <nl> + self . do_loop5_kernel ( 1 , 1 ) <nl> + <nl> + def test_21 ( self ) : <nl> + self . do_loop5_kernel ( 2 , 1 ) <nl> + <nl> + def test_12 ( self ) : <nl> + self . do_loop5_kernel ( 1 , 2 ) <nl> + <nl> + def test_22 ( self ) : <nl> + self . do_loop5_kernel ( 2 , 2 ) <nl> + <nl> + <nl> + <nl> + <nl> + <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # Tests for various IndexPQFastScan implementations <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + def verify_with_draws ( testcase , Dref , Iref , Dnew , Inew ) : <nl> + " " " verify a list of results where there are draws in the distances ( because <nl> + they are integer ) . " " " <nl> + np . testing . assert_array_almost_equal ( Dref , Dnew , decimal = 5 ) <nl> + # here we have to be careful because of draws <nl> + for i in range ( len ( Iref ) ) : <nl> + if np . all ( Iref [ i ] = = Inew [ i ] ) : # easy case <nl> + continue <nl> + # we can deduce nothing about the latest line <nl> + skip_dis = Dref [ i , - 1 ] <nl> + for dis in np . unique ( Dref ) : <nl> + if dis = = skip_dis : <nl> + continue <nl> + mask = Dref [ i , : ] = = dis <nl> + testcase . assertEqual ( set ( Iref [ i , mask ] ) , set ( Inew [ i , mask ] ) ) <nl> + <nl> + <nl> + class TestImplems ( unittest . TestCase ) : <nl> + <nl> + def __init__ ( self , * args ) : <nl> + unittest . TestCase . __init__ ( self , * args ) <nl> + self . cache = { } <nl> + self . k = 10 <nl> + <nl> + def get_index ( self , d , metric ) : <nl> + if ( d , metric ) not in self . cache : <nl> + ds = datasets . SyntheticDataset ( d , 1000 , 2000 , 200 ) <nl> + target_size = d / / 2 <nl> + index = faiss . index_factory ( d , ' PQ % dx4 ' % target_size , metric ) <nl> + index . train ( ds . get_train ( ) ) <nl> + index . add ( ds . get_database ( ) ) <nl> + <nl> + index2 = faiss . IndexPQFastScan ( index ) <nl> + # uint8 LUT but no SIMD <nl> + index2 . implem = 4 <nl> + Dref , Iref = index2 . search ( ds . get_queries ( ) , 10 ) <nl> + <nl> + self . cache [ ( d , metric ) ] = ( ds , index , Dref , Iref ) <nl> + <nl> + return self . cache [ ( d , metric ) ] <nl> + <nl> + def do_with_params ( self , d , params , metric = faiss . METRIC_L2 ) : <nl> + ds , index , Dref , Iref = self . get_index ( d , metric ) <nl> + <nl> + index2 = self . build_fast_scan_index ( index , params ) <nl> + <nl> + Dnew , Inew = index2 . search ( ds . get_queries ( ) , self . k ) <nl> + <nl> + Dref = Dref [ : , : self . k ] <nl> + Iref = Iref [ : , : self . k ] <nl> + <nl> + verify_with_draws ( self , Dref , Iref , Dnew , Inew ) <nl> + <nl> + <nl> + def build_fast_scan_index ( self , index , params ) : <nl> + index2 = faiss . IndexPQFastScan ( index ) <nl> + index2 . implem = 5 <nl> + return index2 <nl> + <nl> + <nl> + <nl> + class TestImplem12 ( TestImplems ) : <nl> + <nl> + def build_fast_scan_index ( self , index , qbs ) : <nl> + index2 = faiss . IndexPQFastScan ( index ) <nl> + index2 . qbs = qbs <nl> + index2 . implem = 12 <nl> + return index2 <nl> + <nl> + def test_qbs7 ( self ) : <nl> + self . do_with_params ( 32 , 0x223 ) <nl> + <nl> + def test_qbs7b ( self ) : <nl> + self . do_with_params ( 32 , 0x133 ) <nl> + <nl> + def test_qbs6 ( self ) : <nl> + self . do_with_params ( 32 , 0x33 ) <nl> + <nl> + def test_qbs6_ip ( self ) : <nl> + self . do_with_params ( 32 , 0x33 , faiss . METRIC_INNER_PRODUCT ) <nl> + <nl> + def test_qbs6b ( self ) : <nl> + # test codepath where qbs is not known at compile time <nl> + self . do_with_params ( 32 , 0x1113 ) <nl> + <nl> + def test_qbs6_odd_dim ( self ) : <nl> + self . do_with_params ( 30 , 0x33 ) <nl> + <nl> + <nl> + <nl> + <nl> + class TestImplem13 ( TestImplems ) : <nl> + <nl> + def build_fast_scan_index ( self , index , qbs ) : <nl> + index2 = faiss . IndexPQFastScan ( index ) <nl> + index2 . qbs = qbs <nl> + index2 . implem = 13 <nl> + return index2 <nl> + <nl> + def test_qbs7 ( self ) : <nl> + self . do_with_params ( 32 , 0x223 ) <nl> + <nl> + def test_qbs7_k1 ( self ) : <nl> + self . k = 1 <nl> + self . do_with_params ( 32 , 0x223 ) <nl> + <nl> + <nl> + class TestImplem14 ( TestImplems ) : <nl> + <nl> + def build_fast_scan_index ( self , index , params ) : <nl> + qbs , bbs = params <nl> + index2 = faiss . IndexPQFastScan ( index , bbs ) <nl> + index2 . qbs = qbs <nl> + index2 . implem = 14 <nl> + return index2 <nl> + <nl> + def test_1_32 ( self ) : <nl> + self . do_with_params ( 32 , ( 1 , 32 ) ) <nl> + <nl> + def test_1_64 ( self ) : <nl> + self . do_with_params ( 32 , ( 1 , 64 ) ) <nl> + <nl> + def test_2_32 ( self ) : <nl> + self . do_with_params ( 32 , ( 2 , 32 ) ) <nl> + <nl> + def test_2_64 ( self ) : <nl> + self . do_with_params ( 32 , ( 2 , 64 ) ) <nl> + <nl> + def test_qbs_1_32_k1 ( self ) : <nl> + self . k = 1 <nl> + self . do_with_params ( 32 , ( 1 , 32 ) ) <nl> + <nl> + def test_qbs_1_64_k1 ( self ) : <nl> + self . k = 1 <nl> + self . do_with_params ( 32 , ( 1 , 64 ) ) <nl> + <nl> + def test_1_32_odd_dim ( self ) : <nl> + self . do_with_params ( 30 , ( 1 , 32 ) ) <nl> + <nl> + def test_1_64_odd_dim ( self ) : <nl> + self . do_with_params ( 30 , ( 1 , 64 ) ) <nl> + <nl> + <nl> + class TestImplem15 ( TestImplems ) : <nl> + <nl> + def build_fast_scan_index ( self , index , params ) : <nl> + qbs , bbs = params <nl> + index2 = faiss . IndexPQFastScan ( index , bbs ) <nl> + index2 . qbs = qbs <nl> + index2 . implem = 15 <nl> + return index2 <nl> + <nl> + def test_1_32 ( self ) : <nl> + self . do_with_params ( 32 , ( 1 , 32 ) ) <nl> + <nl> + def test_2_64 ( self ) : <nl> + self . do_with_params ( 32 , ( 2 , 64 ) ) <nl> + <nl> + @ unittest . skipIf ( platform . system ( ) = = " Windows " , " heap corruption on windows " ) <nl> + class TestAdd ( unittest . TestCase ) : <nl> + <nl> + def do_test_add ( self , d , bbs ) : <nl> + <nl> + ds = datasets . SyntheticDataset ( d , 2000 , 5000 , 200 ) <nl> + <nl> + index = faiss . index_factory ( d , f ' PQ { d / / 2 } x4np ' ) <nl> + index . train ( ds . get_train ( ) ) <nl> + <nl> + xb = ds . get_database ( ) <nl> + index . add ( xb [ : 1235 ] ) <nl> + <nl> + index2 = faiss . IndexPQFastScan ( index , bbs ) <nl> + index2 . add ( xb [ 1235 : ] ) <nl> + new_codes = faiss . AlignedTable_to_array ( index2 . codes ) <nl> + <nl> + index . add ( xb [ 1235 : ] ) <nl> + index3 = faiss . IndexPQFastScan ( index , bbs ) <nl> + ref_codes = faiss . AlignedTable_to_array ( index3 . codes ) <nl> + self . assertEqual ( index3 . ntotal , index2 . ntotal ) <nl> + <nl> + np . testing . assert_array_equal ( ref_codes , new_codes ) <nl> + <nl> + def test_add ( self ) : <nl> + self . do_test_add ( 32 , 32 ) <nl> + <nl> + def test_add_bbs64 ( self ) : <nl> + self . do_test_add ( 32 , 64 ) <nl> + <nl> + def test_add_odd_d ( self ) : <nl> + self . do_test_add ( 30 , 64 ) <nl> + <nl> + def test_constructor ( self ) : <nl> + d = 32 <nl> + ds = datasets . SyntheticDataset ( d , 2000 , 5000 , 200 ) <nl> + <nl> + index = faiss . index_factory ( d , f ' PQ { d / / 2 } x4np ' ) <nl> + index . train ( ds . get_train ( ) ) <nl> + index . add ( ds . get_database ( ) ) <nl> + Dref , Iref = index . search ( ds . get_queries ( ) , 10 ) <nl> + nq = Iref . shape [ 0 ] <nl> + <nl> + index2 = faiss . IndexPQFastScan ( d , d / / 2 , 4 ) <nl> + index2 . train ( ds . get_train ( ) ) <nl> + index2 . add ( ds . get_database ( ) ) <nl> + Dnew , Inew = index2 . search ( ds . get_queries ( ) , 10 ) <nl> + <nl> + recall_at_1 = ( Iref [ : , 0 ] = = Inew [ : , 0 ] ) . sum ( ) / nq <nl> + <nl> + self . assertGreater ( recall_at_1 , 0 . 99 ) <nl> + <nl> + data = faiss . serialize_index ( index2 ) <nl> + index3 = faiss . deserialize_index ( data ) <nl> + <nl> + self . assertEqual ( index2 . implem , index3 . implem ) <nl> + <nl> + D3 , I3 = index3 . search ( ds . get_queries ( ) , 10 ) <nl> + np . testing . assert_array_equal ( D3 , Dnew ) <nl> + np . testing . assert_array_equal ( I3 , Inew ) <nl> new file mode 100644 <nl> index 000000000 . . 4bead1530 <nl> mmm / dev / null <nl> ppp b / tests / test_fast_scan_ivf . py <nl> <nl> + # Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + # <nl> + # This source code is licensed under the MIT license found in the <nl> + # LICENSE file in the root directory of this source tree . <nl> + <nl> + <nl> + import unittest <nl> + import platform <nl> + <nl> + import numpy as np <nl> + import faiss <nl> + <nl> + from faiss . contrib import datasets <nl> + from faiss . contrib . inspect_tools import get_invlist <nl> + <nl> + <nl> + class TestLUTQuantization ( unittest . TestCase ) : <nl> + <nl> + def compute_dis_float ( self , codes , LUT , bias ) : <nl> + nprobe , nt , M = codes . shape <nl> + dis = np . zeros ( ( nprobe , nt ) , dtype = ' float32 ' ) <nl> + if bias is not None : <nl> + dis [ : ] = bias . reshape ( - 1 , 1 ) <nl> + <nl> + if LUT . ndim = = 2 : <nl> + LUTp = LUT <nl> + <nl> + for p in range ( nprobe ) : <nl> + if LUT . ndim = = 3 : <nl> + LUTp = LUT [ p ] <nl> + <nl> + for i in range ( nt ) : <nl> + dis [ p , i ] + = LUTp [ np . arange ( M ) , codes [ p , i ] ] . sum ( ) <nl> + <nl> + return dis <nl> + <nl> + def compute_dis_quant ( self , codes , LUT , bias , a , b ) : <nl> + nprobe , nt , M = codes . shape <nl> + dis = np . zeros ( ( nprobe , nt ) , dtype = ' uint16 ' ) <nl> + if bias is not None : <nl> + dis [ : ] = bias . reshape ( - 1 , 1 ) <nl> + <nl> + if LUT . ndim = = 2 : <nl> + LUTp = LUT <nl> + <nl> + for p in range ( nprobe ) : <nl> + if LUT . ndim = = 3 : <nl> + LUTp = LUT [ p ] <nl> + <nl> + for i in range ( nt ) : <nl> + dis [ p , i ] + = LUTp [ np . arange ( M ) , codes [ p , i ] ] . astype ( ' uint16 ' ) . sum ( ) <nl> + <nl> + return dis / a + b <nl> + <nl> + def do_test ( self , LUT , bias , nprobe , alt_3d = False ) : <nl> + M , ksub = LUT . shape [ - 2 : ] <nl> + nt = 200 <nl> + <nl> + rs = np . random . RandomState ( 123 ) <nl> + codes = rs . randint ( ksub , size = ( nprobe , nt , M ) ) . astype ( ' uint8 ' ) <nl> + <nl> + dis_ref = self . compute_dis_float ( codes , LUT , bias ) <nl> + <nl> + LUTq = np . zeros ( LUT . shape , dtype = ' uint8 ' ) <nl> + biasq = ( <nl> + np . zeros ( bias . shape , dtype = ' uint16 ' ) <nl> + if ( bias is not None ) and not alt_3d else None <nl> + ) <nl> + atab = np . zeros ( 1 , dtype = ' float32 ' ) <nl> + btab = np . zeros ( 1 , dtype = ' float32 ' ) <nl> + <nl> + def sp ( x ) : <nl> + return faiss . swig_ptr ( x ) if x is not None else None <nl> + <nl> + faiss . quantize_LUT_and_bias ( <nl> + nprobe , M , ksub , LUT . ndim = = 3 , <nl> + sp ( LUT ) , sp ( bias ) , sp ( LUTq ) , M , sp ( biasq ) , <nl> + sp ( atab ) , sp ( btab ) <nl> + ) <nl> + a = atab [ 0 ] <nl> + b = btab [ 0 ] <nl> + dis_new = self . compute_dis_quant ( codes , LUTq , biasq , a , b ) <nl> + <nl> + # print ( a , b , dis_ref . sum ( ) ) <nl> + avg_realtive_error = np . abs ( dis_new - dis_ref ) . sum ( ) / dis_ref . sum ( ) <nl> + # print ( ' a = ' , a , ' avg_relative_error = ' , avg_realtive_error ) <nl> + self . assertLess ( avg_realtive_error , 0 . 0005 ) <nl> + <nl> + def test_no_residual_ip ( self ) : <nl> + ksub = 16 <nl> + M = 20 <nl> + nprobe = 10 <nl> + rs = np . random . RandomState ( 1234 ) <nl> + LUT = rs . rand ( M , ksub ) . astype ( ' float32 ' ) <nl> + bias = None <nl> + <nl> + self . do_test ( LUT , bias , nprobe ) <nl> + <nl> + def test_by_residual_ip ( self ) : <nl> + ksub = 16 <nl> + M = 20 <nl> + nprobe = 10 <nl> + rs = np . random . RandomState ( 1234 ) <nl> + LUT = rs . rand ( M , ksub ) . astype ( ' float32 ' ) <nl> + bias = rs . rand ( nprobe ) . astype ( ' float32 ' ) <nl> + bias * = 10 <nl> + <nl> + self . do_test ( LUT , bias , nprobe ) <nl> + <nl> + def test_by_residual_L2 ( self ) : <nl> + ksub = 16 <nl> + M = 20 <nl> + nprobe = 10 <nl> + rs = np . random . RandomState ( 1234 ) <nl> + LUT = rs . rand ( nprobe , M , ksub ) . astype ( ' float32 ' ) <nl> + bias = rs . rand ( nprobe ) . astype ( ' float32 ' ) <nl> + bias * = 10 <nl> + <nl> + self . do_test ( LUT , bias , nprobe ) <nl> + <nl> + def test_by_residual_L2_v2 ( self ) : <nl> + ksub = 16 <nl> + M = 20 <nl> + nprobe = 10 <nl> + rs = np . random . RandomState ( 1234 ) <nl> + LUT = rs . rand ( nprobe , M , ksub ) . astype ( ' float32 ' ) <nl> + bias = rs . rand ( nprobe ) . astype ( ' float32 ' ) <nl> + bias * = 10 <nl> + <nl> + self . do_test ( LUT , bias , nprobe , alt_3d = True ) <nl> + <nl> + <nl> + <nl> + <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # Tests for various IndexPQFastScan implementations <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + def verify_with_draws ( testcase , Dref , Iref , Dnew , Inew ) : <nl> + " " " verify a list of results where there are draws in the distances ( because <nl> + they are integer ) . " " " <nl> + np . testing . assert_array_almost_equal ( Dref , Dnew , decimal = 5 ) <nl> + # here we have to be careful because of draws <nl> + for i in range ( len ( Iref ) ) : <nl> + if np . all ( Iref [ i ] = = Inew [ i ] ) : # easy case <nl> + continue <nl> + # we can deduce nothing about the latest line <nl> + skip_dis = Dref [ i , - 1 ] <nl> + for dis in np . unique ( Dref ) : <nl> + if dis = = skip_dis : continue <nl> + mask = Dref [ i , : ] = = dis <nl> + testcase . assertEqual ( set ( Iref [ i , mask ] ) , set ( Inew [ i , mask ] ) ) <nl> + <nl> + def three_metrics ( Dref , Iref , Dnew , Inew ) : <nl> + nq = Iref . shape [ 0 ] <nl> + recall_at_1 = ( Iref [ : , 0 ] = = Inew [ : , 0 ] ) . sum ( ) / nq <nl> + recall_at_10 = ( Iref [ : , : 1 ] = = Inew [ : , : 10 ] ) . sum ( ) / nq <nl> + ninter = 0 <nl> + for i in range ( nq ) : <nl> + ninter + = len ( np . intersect1d ( Inew [ i ] , Iref [ i ] ) ) <nl> + intersection_at_10 = ninter / nq <nl> + return recall_at_1 , recall_at_10 , intersection_at_10 <nl> + <nl> + <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # Tests for various IndexIVFPQFastScan implementations <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + class TestIVFImplem1 ( unittest . TestCase ) : <nl> + " " " Verify implem 1 ( search from original invlists ) <nl> + against IndexIVFPQ " " " <nl> + <nl> + def do_test ( self , by_residual , metric_type = faiss . METRIC_L2 , <nl> + use_precomputed_table = 0 ) : <nl> + ds = datasets . SyntheticDataset ( 32 , 2000 , 5000 , 1000 ) <nl> + <nl> + index = faiss . index_factory ( 32 , " IVF32 , PQ16x4np " , metric_type ) <nl> + index . use_precomputed_table <nl> + index . use_precomputed_table = use_precomputed_table <nl> + index . train ( ds . get_train ( ) ) <nl> + index . add ( ds . get_database ( ) ) <nl> + index . nprobe = 4 <nl> + index . by_residual = by_residual <nl> + Da , Ia = index . search ( ds . get_queries ( ) , 10 ) <nl> + <nl> + index2 = faiss . IndexIVFPQFastScan ( index ) <nl> + index2 . implem = 1 <nl> + Db , Ib = index2 . search ( ds . get_queries ( ) , 10 ) <nl> + # self . assertLess ( ( Ia ! = Ib ) . sum ( ) , Ia . size * 0 . 005 ) <nl> + np . testing . assert_array_equal ( Ia , Ib ) <nl> + np . testing . assert_almost_equal ( Da , Db , decimal = 5 ) <nl> + <nl> + def test_no_residual ( self ) : <nl> + self . do_test ( False ) <nl> + <nl> + def test_by_residual ( self ) : <nl> + self . do_test ( True ) <nl> + <nl> + def test_by_residual_no_precomputed ( self ) : <nl> + self . do_test ( True , use_precomputed_table = - 1 ) <nl> + <nl> + def test_no_residual_ip ( self ) : <nl> + self . do_test ( False , faiss . METRIC_INNER_PRODUCT ) <nl> + <nl> + def test_by_residual_ip ( self ) : <nl> + self . do_test ( True , faiss . METRIC_INNER_PRODUCT ) <nl> + <nl> + <nl> + <nl> + class TestIVFImplem2 ( unittest . TestCase ) : <nl> + " " " Verify implem 2 ( search with original invlists with uint8 LUTs ) <nl> + against IndexIVFPQ . Entails some loss in accuracy . " " " <nl> + <nl> + def eval_quant_loss ( self , by_residual , metric = faiss . METRIC_L2 ) : <nl> + ds = datasets . SyntheticDataset ( 32 , 2000 , 5000 , 1000 ) <nl> + <nl> + index = faiss . index_factory ( 32 , " IVF32 , PQ16x4np " , metric ) <nl> + index . train ( ds . get_train ( ) ) <nl> + index . add ( ds . get_database ( ) ) <nl> + index . nprobe = 4 <nl> + index . by_residual = by_residual <nl> + Da , Ia = index . search ( ds . get_queries ( ) , 10 ) <nl> + <nl> + # loss due to int8 quantization of LUTs <nl> + index2 = faiss . IndexIVFPQFastScan ( index ) <nl> + index2 . implem = 2 <nl> + Db , Ib = index2 . search ( ds . get_queries ( ) , 10 ) <nl> + <nl> + m3 = three_metrics ( Da , Ia , Db , Ib ) <nl> + <nl> + <nl> + # print ( by_residual , metric , recall_at_1 , recall_at_10 , intersection_at_10 ) <nl> + ref_results = { <nl> + ( True , 1 ) : [ 0 . 985 , 1 . 0 , 9 . 872 ] , <nl> + ( True , 0 ) : [ 0 . 987 , 1 . 0 , 9 . 914 ] , <nl> + ( False , 1 ) : [ 0 . 991 , 1 . 0 , 9 . 907 ] , <nl> + ( False , 0 ) : [ 0 . 986 , 1 . 0 , 9 . 917 ] , <nl> + } <nl> + <nl> + ref = ref_results [ ( by_residual , metric ) ] <nl> + <nl> + self . assertGreaterEqual ( m3 [ 0 ] , ref [ 0 ] * 0 . 995 ) <nl> + self . assertGreaterEqual ( m3 [ 1 ] , ref [ 1 ] * 0 . 995 ) <nl> + self . assertGreaterEqual ( m3 [ 2 ] , ref [ 2 ] * 0 . 995 ) <nl> + <nl> + <nl> + def test_qloss_no_residual ( self ) : <nl> + self . eval_quant_loss ( False ) <nl> + <nl> + def test_qloss_by_residual ( self ) : <nl> + self . eval_quant_loss ( True ) <nl> + <nl> + def test_qloss_no_residual_ip ( self ) : <nl> + self . eval_quant_loss ( False , faiss . METRIC_INNER_PRODUCT ) <nl> + <nl> + def test_qloss_by_residual_ip ( self ) : <nl> + self . eval_quant_loss ( True , faiss . METRIC_INNER_PRODUCT ) <nl> + <nl> + class TestEquivPQ ( unittest . TestCase ) : <nl> + <nl> + def test_equiv_pq ( self ) : <nl> + ds = datasets . SyntheticDataset ( 32 , 2000 , 200 , 4 ) <nl> + <nl> + index = faiss . index_factory ( 32 , " IVF1 , PQ16x4np " ) <nl> + index . by_residual = False <nl> + # force coarse quantizer <nl> + index . quantizer . add ( np . zeros ( ( 1 , 32 ) , dtype = ' float32 ' ) ) <nl> + index . train ( ds . get_train ( ) ) <nl> + index . add ( ds . get_database ( ) ) <nl> + Dref , Iref = index . search ( ds . get_queries ( ) , 4 ) <nl> + <nl> + index_pq = faiss . index_factory ( 32 , " PQ16x4np " ) <nl> + index_pq . pq = index . pq <nl> + index_pq . is_trained = True <nl> + index_pq . codes = faiss . downcast_InvertedLists ( <nl> + index . invlists ) . codes . at ( 0 ) <nl> + index_pq . ntotal = index . ntotal <nl> + Dnew , Inew = index_pq . search ( ds . get_queries ( ) , 4 ) <nl> + <nl> + np . testing . assert_array_equal ( Iref , Inew ) <nl> + np . testing . assert_array_equal ( Dref , Dnew ) <nl> + <nl> + index_pq2 = faiss . IndexPQFastScan ( index_pq ) <nl> + index_pq2 . implem = 12 <nl> + Dref , Iref = index_pq2 . search ( ds . get_queries ( ) , 4 ) <nl> + <nl> + index2 = faiss . IndexIVFPQFastScan ( index ) <nl> + index2 . implem = 12 <nl> + Dnew , Inew = index2 . search ( ds . get_queries ( ) , 4 ) <nl> + np . testing . assert_array_equal ( Iref , Inew ) <nl> + np . testing . assert_array_equal ( Dref , Dnew ) <nl> + <nl> + <nl> + class TestIVFImplem12 ( unittest . TestCase ) : <nl> + <nl> + IMPLEM = 12 <nl> + <nl> + def do_test ( self , by_residual , metric = faiss . METRIC_L2 , d = 32 ) : <nl> + ds = datasets . SyntheticDataset ( d , 2000 , 5000 , 200 ) <nl> + <nl> + index = faiss . index_factory ( d , f " IVF32 , PQ { d / / 2 } x4np " , metric ) <nl> + # force coarse quantizer <nl> + # index . quantizer . add ( np . zeros ( ( 1 , 32 ) , dtype = ' float32 ' ) ) <nl> + index . by_residual = by_residual <nl> + index . train ( ds . get_train ( ) ) <nl> + index . add ( ds . get_database ( ) ) <nl> + index . nprobe = 4 <nl> + <nl> + index2 = faiss . IndexIVFPQFastScan ( index ) <nl> + index2 . implem = 2 <nl> + Dref , Iref = index2 . search ( ds . get_queries ( ) , 4 ) <nl> + index2 = faiss . IndexIVFPQFastScan ( index ) <nl> + index2 . implem = self . IMPLEM <nl> + Dnew , Inew = index2 . search ( ds . get_queries ( ) , 4 ) <nl> + <nl> + verify_with_draws ( self , Dref , Iref , Dnew , Inew ) <nl> + <nl> + stats = faiss . cvar . indexIVF_stats <nl> + stats . reset ( ) <nl> + <nl> + # also verify with single result <nl> + Dnew , Inew = index2 . search ( ds . get_queries ( ) , 1 ) <nl> + for q in range ( len ( Dref ) ) : <nl> + if Dref [ q , 1 ] = = Dref [ q , 0 ] : <nl> + # then we cannot conclude <nl> + continue <nl> + self . assertEqual ( Iref [ q , 0 ] , Inew [ q , 0 ] ) <nl> + np . testing . assert_almost_equal ( Dref [ q , 0 ] , Dnew [ q , 0 ] , decimal = 5 ) <nl> + <nl> + self . assertGreater ( stats . ndis , 0 ) <nl> + <nl> + def test_no_residual ( self ) : <nl> + self . do_test ( False ) <nl> + <nl> + def test_by_residual ( self ) : <nl> + self . do_test ( True ) <nl> + <nl> + def test_no_residual_ip ( self ) : <nl> + self . do_test ( False , metric = faiss . METRIC_INNER_PRODUCT ) <nl> + <nl> + def test_by_residual_ip ( self ) : <nl> + self . do_test ( True , metric = faiss . METRIC_INNER_PRODUCT ) <nl> + <nl> + def test_no_residual_odd_dim ( self ) : <nl> + self . do_test ( False , d = 30 ) <nl> + <nl> + def test_by_residual_odd_dim ( self ) : <nl> + self . do_test ( True , d = 30 ) <nl> + <nl> + <nl> + class TestIVFImplem10 ( TestIVFImplem12 ) : <nl> + IMPLEM = 10 <nl> + <nl> + <nl> + class TestIVFImplem11 ( TestIVFImplem12 ) : <nl> + IMPLEM = 11 <nl> + <nl> + class TestIVFImplem13 ( TestIVFImplem12 ) : <nl> + IMPLEM = 13 <nl> + <nl> + <nl> + @ unittest . skipIf ( platform . system ( ) = = " Windows " , " heap corruption on windows " ) <nl> + class TestAdd ( unittest . TestCase ) : <nl> + <nl> + def do_test ( self , by_residual = False , metric = faiss . METRIC_L2 , d = 32 , bbs = 32 ) : <nl> + bbs = 32 <nl> + ds = datasets . SyntheticDataset ( d , 2000 , 5000 , 200 ) <nl> + <nl> + index = faiss . index_factory ( d , f " IVF32 , PQ { d / / 2 } x4np " , metric ) <nl> + index . by_residual = by_residual <nl> + index . train ( ds . get_train ( ) ) <nl> + index . nprobe = 4 <nl> + <nl> + xb = ds . get_database ( ) <nl> + index . add ( xb [ : 1235 ] ) <nl> + <nl> + index2 = faiss . IndexIVFPQFastScan ( index , bbs ) <nl> + <nl> + index . add ( xb [ 1235 : ] ) <nl> + index3 = faiss . IndexIVFPQFastScan ( index , bbs ) <nl> + Dref , Iref = index3 . search ( ds . get_queries ( ) , 10 ) <nl> + <nl> + index2 . add ( xb [ 1235 : ] ) <nl> + Dnew , Inew = index2 . search ( ds . get_queries ( ) , 10 ) <nl> + <nl> + np . testing . assert_array_equal ( Dref , Dnew ) <nl> + np . testing . assert_array_equal ( Iref , Inew ) <nl> + <nl> + # direct verification of code content . Not sure the test is correct <nl> + # if codes are shuffled . <nl> + for list_no in range ( 32 ) : <nl> + ref_ids , ref_codes = get_invlist ( index3 . invlists , list_no ) <nl> + new_ids , new_codes = get_invlist ( index2 . invlists , list_no ) <nl> + self . assertEqual ( set ( ref_ids ) , set ( new_ids ) ) <nl> + new_code_per_id = { <nl> + new_ids [ i ] : new_codes [ i / / bbs , : , i % bbs ] <nl> + for i in range ( new_ids . size ) <nl> + } <nl> + for i , the_id in enumerate ( ref_ids ) : <nl> + ref_code_i = ref_codes [ i / / bbs , : , i % bbs ] <nl> + new_code_i = new_code_per_id [ the_id ] <nl> + np . testing . assert_array_equal ( ref_code_i , new_code_i ) <nl> + <nl> + <nl> + def test_add ( self ) : <nl> + self . do_test ( ) <nl> + <nl> + def test_odd_d ( self ) : <nl> + self . do_test ( d = 30 ) <nl> + <nl> + def test_bbs64 ( self ) : <nl> + self . do_test ( bbs = 64 ) <nl> + <nl> + <nl> + class TestTraining ( unittest . TestCase ) : <nl> + <nl> + def do_test ( self , by_residual = False , metric = faiss . METRIC_L2 , d = 32 , bbs = 32 ) : <nl> + bbs = 32 <nl> + ds = datasets . SyntheticDataset ( d , 2000 , 5000 , 200 ) <nl> + <nl> + index = faiss . index_factory ( d , f " IVF32 , PQ { d / / 2 } x4np " , metric ) <nl> + index . by_residual = by_residual <nl> + index . train ( ds . get_train ( ) ) <nl> + index . add ( ds . get_database ( ) ) <nl> + index . nprobe = 4 <nl> + Dref , Iref = index . search ( ds . get_queries ( ) , 10 ) <nl> + <nl> + index2 = faiss . IndexIVFPQFastScan ( <nl> + index . quantizer , d , 32 , d / / 2 , 4 , metric , bbs ) <nl> + index2 . by_residual = by_residual <nl> + index2 . train ( ds . get_train ( ) ) <nl> + <nl> + index2 . add ( ds . get_database ( ) ) <nl> + index2 . nprobe = 4 <nl> + Dnew , Inew = index2 . search ( ds . get_queries ( ) , 10 ) <nl> + <nl> + m3 = three_metrics ( Dref , Iref , Dnew , Inew ) <nl> + # print ( ( by_residual , metric , d ) , " : " , m3 ) <nl> + ref_m3_tab = { <nl> + ( True , 1 , 32 ) : ( 0 . 995 , 1 . 0 , 9 . 91 ) , <nl> + ( True , 0 , 32 ) : ( 0 . 99 , 1 . 0 , 9 . 91 ) , <nl> + ( True , 1 , 30 ) : ( 0 . 99 , 1 . 0 , 9 . 885 ) , <nl> + ( False , 1 , 32 ) : ( 0 . 99 , 1 . 0 , 9 . 875 ) , <nl> + ( False , 0 , 32 ) : ( 0 . 99 , 1 . 0 , 9 . 92 ) , <nl> + ( False , 1 , 30 ) : ( 1 . 0 , 1 . 0 , 9 . 895 ) <nl> + } <nl> + ref_m3 = ref_m3_tab [ ( by_residual , metric , d ) ] <nl> + self . assertGreater ( m3 [ 0 ] , ref_m3 [ 0 ] * 0 . 99 ) <nl> + self . assertGreater ( m3 [ 1 ] , ref_m3 [ 1 ] * 0 . 99 ) <nl> + self . assertGreater ( m3 [ 2 ] , ref_m3 [ 2 ] * 0 . 99 ) <nl> + <nl> + # Test I / O <nl> + data = faiss . serialize_index ( index2 ) <nl> + index3 = faiss . deserialize_index ( data ) <nl> + D3 , I3 = index3 . search ( ds . get_queries ( ) , 10 ) <nl> + <nl> + np . testing . assert_array_equal ( I3 , Inew ) <nl> + np . testing . assert_array_equal ( D3 , Dnew ) <nl> + <nl> + def test_no_residual ( self ) : <nl> + self . do_test ( by_residual = False ) <nl> + <nl> + def test_by_residual ( self ) : <nl> + self . do_test ( by_residual = True ) <nl> + <nl> + def test_no_residual_ip ( self ) : <nl> + self . do_test ( by_residual = False , metric = faiss . METRIC_INNER_PRODUCT ) <nl> + <nl> + def test_by_residual_ip ( self ) : <nl> + self . do_test ( by_residual = True , metric = faiss . METRIC_INNER_PRODUCT ) <nl> + <nl> + def test_no_residual_odd_dim ( self ) : <nl> + self . do_test ( by_residual = False , d = 30 ) <nl> + <nl> + def test_by_residual_odd_dim ( self ) : <nl> + self . do_test ( by_residual = True , d = 30 ) <nl> mmm a / tests / test_index_accuracy . py <nl> ppp b / tests / test_index_accuracy . py <nl> def test_sh ( self ) : <nl> assert abs ( ninter - self . ref_results [ key ] ) < = 4 <nl> <nl> <nl> - if __name__ = = ' __main__ ' : <nl> - unittest . main ( ) <nl> + class TestRefine ( unittest . TestCase ) : <nl> + <nl> + def do_test ( self , metric ) : <nl> + d = 32 <nl> + xt , xb , xq = get_dataset_2 ( d , 2000 , 1000 , 200 ) <nl> + index1 = faiss . index_factory ( d , " PQ4x4np " , metric ) <nl> + <nl> + Dref , Iref = faiss . knn ( xq , xb , 10 , metric ) <nl> + <nl> + index1 . train ( xt ) <nl> + index1 . add ( xb ) <nl> + <nl> + D1 , I1 = index1 . search ( xq , 100 ) <nl> + <nl> + recall1 = ( I1 = = Iref [ : , : 1 ] ) . sum ( ) <nl> + <nl> + # add refine index on top <nl> + index2 = faiss . IndexRefineFlat ( index1 , xb ) <nl> + index2 . k_factor = 10 . 0 <nl> + D2 , I2 = index2 . search ( xq , 10 ) <nl> + <nl> + # check distance is computed properly <nl> + for i in range ( len ( xq ) ) : <nl> + x1 = xq [ i ] <nl> + x2 = xb [ I2 [ i , 5 ] ] <nl> + if metric = = faiss . METRIC_L2 : <nl> + dref = ( ( x1 - x2 ) * * 2 ) . sum ( ) <nl> + else : <nl> + dref = np . dot ( x1 , x2 ) <nl> + np . testing . assert_almost_equal ( dref , D2 [ i , 5 ] , decimal = 5 ) <nl> + <nl> + # check that with refinement , the recall @ 10 is the same as <nl> + # the original recall @ 100 <nl> + recall2 = ( I2 = = Iref [ : , : 1 ] ) . sum ( ) <nl> + # print ( " recalls " , recall1 , recall2 ) <nl> + self . assertEquals ( recall1 , recall2 ) <nl> + <nl> + def test_IP ( self ) : <nl> + self . do_test ( faiss . METRIC_INNER_PRODUCT ) <nl> + <nl> + def test_L2 ( self ) : <nl> + self . do_test ( faiss . METRIC_L2 ) <nl> mmm a / tests / test_merge . cpp <nl> ppp b / tests / test_merge . cpp <nl> <nl> # include < faiss / IndexFlat . h > <nl> # include < faiss / MetaIndexes . h > <nl> # include < faiss / IndexPreTransform . h > <nl> - # include < faiss / OnDiskInvertedLists . h > <nl> + # include < faiss / invlists / OnDiskInvertedLists . h > <nl> # include < faiss / IVFlib . h > <nl> <nl> <nl> mmm a / tests / test_ondisk_ivf . cpp <nl> ppp b / tests / test_ondisk_ivf . cpp <nl> <nl> <nl> # include < gtest / gtest . h > <nl> <nl> - # include < faiss / OnDiskInvertedLists . h > <nl> + # include < faiss / invlists / OnDiskInvertedLists . h > <nl> # include < faiss / IndexIVFFlat . h > <nl> # include < faiss / IndexFlat . h > <nl> # include < faiss / utils / random . h > <nl> new file mode 100644 <nl> index 000000000 . . c3950410e <nl> mmm / dev / null <nl> ppp b / tests / test_partition . py <nl> <nl> + # Copyright ( c ) Facebook , Inc . and its affiliates . <nl> + # <nl> + # This source code is licensed under the MIT license found in the <nl> + # LICENSE file in the root directory of this source tree . <nl> + <nl> + import numpy as np <nl> + <nl> + import faiss <nl> + import unittest <nl> + <nl> + <nl> + <nl> + class PartitionTests : <nl> + <nl> + def test_partition ( self ) : <nl> + self . do_partition ( 160 , 80 ) <nl> + <nl> + def test_partition_manydups ( self ) : <nl> + self . do_partition ( 160 , 80 , maxval = 16 ) <nl> + <nl> + def test_partition_lowq ( self ) : <nl> + self . do_partition ( 160 , 10 , maxval = 16 ) <nl> + <nl> + def test_partition_highq ( self ) : <nl> + self . do_partition ( 165 , 155 , maxval = 16 ) <nl> + <nl> + def test_partition_q10 ( self ) : <nl> + self . do_partition ( 32 , 10 , maxval = 500 ) <nl> + <nl> + def test_partition_q10_dups ( self ) : <nl> + self . do_partition ( 32 , 10 , maxval = 16 ) <nl> + <nl> + def test_partition_q10_fuzzy ( self ) : <nl> + self . do_partition ( 32 , ( 10 , 15 ) , maxval = 500 ) <nl> + <nl> + def test_partition_fuzzy ( self ) : <nl> + self . do_partition ( 160 , ( 70 , 80 ) , maxval = 500 ) <nl> + <nl> + def test_partition_fuzzy_2 ( self ) : <nl> + self . do_partition ( 160 , ( 70 , 80 ) ) <nl> + <nl> + <nl> + <nl> + class TestPartitioningFloat ( unittest . TestCase , PartitionTests ) : <nl> + <nl> + def do_partition ( self , n , q , maxval = None , seed = None ) : <nl> + if seed is None : <nl> + for i in range ( 50 ) : <nl> + self . do_partition ( n , q , maxval , i + 1234 ) <nl> + # print ( " seed = " , seed ) <nl> + rs = np . random . RandomState ( seed ) <nl> + if maxval is None : <nl> + vals = rs . rand ( n ) . astype ( ' float32 ' ) <nl> + else : <nl> + vals = rs . randint ( maxval , size = n ) . astype ( ' float32 ' ) <nl> + <nl> + ids = ( rs . permutation ( n ) + 12345 ) . astype ( ' int64 ' ) <nl> + dic = dict ( zip ( ids , vals ) ) <nl> + <nl> + vals_orig = vals . copy ( ) <nl> + <nl> + sp = faiss . swig_ptr <nl> + if type ( q ) = = int : <nl> + faiss . CMax_float_partition_fuzzy ( <nl> + sp ( vals ) , sp ( ids ) , n , <nl> + q , q , None <nl> + ) <nl> + else : <nl> + q_min , q_max = q <nl> + q = np . array ( [ - 1 ] , dtype = ' uint64 ' ) <nl> + faiss . CMax_float_partition_fuzzy ( <nl> + sp ( vals ) , sp ( ids ) , n , <nl> + q_min , q_max , sp ( q ) <nl> + ) <nl> + q = q [ 0 ] <nl> + assert q_min < = q < = q_max <nl> + <nl> + o = vals_orig . argsort ( ) <nl> + thresh = vals_orig [ o [ q ] ] <nl> + n_eq = ( vals_orig [ o [ : q ] ] = = thresh ) . sum ( ) <nl> + <nl> + for i in range ( q ) : <nl> + self . assertEqual ( vals [ i ] , dic [ ids [ i ] ] ) <nl> + self . assertLessEqual ( vals [ i ] , thresh ) <nl> + if vals [ i ] = = thresh : <nl> + n_eq - = 1 <nl> + self . assertEqual ( n_eq , 0 ) <nl> + <nl> + <nl> + class TestPartitioningFloatMin ( unittest . TestCase , PartitionTests ) : <nl> + <nl> + def do_partition ( self , n , q , maxval = None , seed = None ) : <nl> + if seed is None : <nl> + for i in range ( 50 ) : <nl> + self . do_partition ( n , q , maxval , i + 1234 ) <nl> + # print ( " seed = " , seed ) <nl> + rs = np . random . RandomState ( seed ) <nl> + if maxval is None : <nl> + vals = rs . rand ( n ) . astype ( ' float32 ' ) <nl> + mirval = 1 . 0 <nl> + else : <nl> + vals = rs . randint ( maxval , size = n ) . astype ( ' float32 ' ) <nl> + mirval = 65536 <nl> + <nl> + ids = ( rs . permutation ( n ) + 12345 ) . astype ( ' int64 ' ) <nl> + dic = dict ( zip ( ids , vals ) ) <nl> + <nl> + vals_orig = vals . copy ( ) <nl> + <nl> + vals [ : ] = mirval - vals <nl> + <nl> + sp = faiss . swig_ptr <nl> + if type ( q ) = = int : <nl> + faiss . CMin_float_partition_fuzzy ( <nl> + sp ( vals ) , sp ( ids ) , n , <nl> + q , q , None <nl> + ) <nl> + else : <nl> + q_min , q_max = q <nl> + q = np . array ( [ - 1 ] , dtype = ' uint64 ' ) <nl> + faiss . CMin_float_partition_fuzzy ( <nl> + sp ( vals ) , sp ( ids ) , n , <nl> + q_min , q_max , sp ( q ) <nl> + ) <nl> + q = q [ 0 ] <nl> + assert q_min < = q < = q_max <nl> + <nl> + vals [ : ] = mirval - vals <nl> + <nl> + o = vals_orig . argsort ( ) <nl> + thresh = vals_orig [ o [ q ] ] <nl> + n_eq = ( vals_orig [ o [ : q ] ] = = thresh ) . sum ( ) <nl> + <nl> + for i in range ( q ) : <nl> + np . testing . assert_almost_equal ( vals [ i ] , dic [ ids [ i ] ] , decimal = 5 ) <nl> + self . assertLessEqual ( vals [ i ] , thresh ) <nl> + if vals [ i ] = = thresh : <nl> + n_eq - = 1 <nl> + self . assertEqual ( n_eq , 0 ) <nl> + <nl> + <nl> + class TestPartitioningUint16 ( unittest . TestCase , PartitionTests ) : <nl> + <nl> + def do_partition ( self , n , q , maxval = 65536 , seed = None ) : <nl> + if seed is None : <nl> + for i in range ( 50 ) : <nl> + self . do_partition ( n , q , maxval , i + 1234 ) <nl> + <nl> + # print ( " seed = " , seed ) <nl> + rs = np . random . RandomState ( seed ) <nl> + vals = rs . randint ( maxval , size = n ) . astype ( ' uint16 ' ) <nl> + ids = ( rs . permutation ( n ) + 12345 ) . astype ( ' int64 ' ) <nl> + dic = dict ( zip ( ids , vals ) ) <nl> + <nl> + sp = faiss . swig_ptr <nl> + vals_orig = vals . copy ( ) <nl> + <nl> + tab_a = faiss . AlignedTableUint16 ( ) <nl> + faiss . copy_array_to_AlignedTable ( vals , tab_a ) <nl> + <nl> + # print ( " tab a type " , tab_a . get ( ) ) <nl> + if type ( q ) = = int : <nl> + thresh2 = faiss . CMax_uint16_partition_fuzzy ( <nl> + tab_a . get ( ) , sp ( ids ) , n , q , q , None ) <nl> + else : <nl> + q_min , q_max = q <nl> + q = np . array ( [ - 1 ] , dtype = ' uint64 ' ) <nl> + thresh2 = faiss . CMax_uint16_partition_fuzzy ( <nl> + tab_a . get ( ) , sp ( ids ) , n , <nl> + q_min , q_max , sp ( q ) <nl> + ) <nl> + q = q [ 0 ] <nl> + assert q_min < = q < = q_max <nl> + <nl> + vals = faiss . AlignedTable_to_array ( tab_a ) <nl> + <nl> + o = vals_orig . argsort ( ) <nl> + thresh = vals_orig [ o [ q ] ] <nl> + n_eq = ( vals_orig [ o [ : q ] ] = = thresh ) . sum ( ) <nl> + <nl> + for i in range ( q ) : <nl> + self . assertEqual ( vals [ i ] , dic [ ids [ i ] ] ) <nl> + self . assertLessEqual ( vals [ i ] , thresh ) <nl> + if vals [ i ] = = thresh : <nl> + n_eq - = 1 <nl> + self . assertEqual ( n_eq , 0 ) <nl> + <nl> + <nl> + <nl> + class TestPartitioningUint16Min ( unittest . TestCase , PartitionTests ) : <nl> + <nl> + def do_partition ( self , n , q , maxval = 65536 , seed = None ) : <nl> + # seed = 1235 <nl> + if seed is None : <nl> + for i in range ( 50 ) : <nl> + self . do_partition ( n , q , maxval , i + 1234 ) <nl> + # print ( " seed = " , seed ) <nl> + rs = np . random . RandomState ( seed ) <nl> + vals = rs . randint ( maxval , size = n ) . astype ( ' uint16 ' ) <nl> + ids = ( rs . permutation ( n ) + 12345 ) . astype ( ' int64 ' ) <nl> + dic = dict ( zip ( ids , vals ) ) <nl> + <nl> + sp = faiss . swig_ptr <nl> + vals_orig = vals . copy ( ) <nl> + <nl> + tab_a = faiss . AlignedTableUint16 ( ) <nl> + vals_inv = ( 65535 - vals ) . astype ( ' uint16 ' ) <nl> + faiss . copy_array_to_AlignedTable ( vals_inv , tab_a ) <nl> + <nl> + # print ( " tab a type " , tab_a . get ( ) ) <nl> + if type ( q ) = = int : <nl> + thresh2 = faiss . CMin_uint16_partition_fuzzy ( <nl> + tab_a . get ( ) , sp ( ids ) , n , q , q , None ) <nl> + else : <nl> + q_min , q_max = q <nl> + q = np . array ( [ - 1 ] , dtype = ' uint64 ' ) <nl> + thresh2 = faiss . CMin_uint16_partition_fuzzy ( <nl> + tab_a . get ( ) , sp ( ids ) , n , <nl> + q_min , q_max , sp ( q ) <nl> + ) <nl> + q = q [ 0 ] <nl> + assert q_min < = q < = q_max <nl> + <nl> + vals_inv = faiss . AlignedTable_to_array ( tab_a ) <nl> + vals = 65535 - vals_inv <nl> + <nl> + o = vals_orig . argsort ( ) <nl> + thresh = vals_orig [ o [ q ] ] <nl> + n_eq = ( vals_orig [ o [ : q ] ] = = thresh ) . sum ( ) <nl> + <nl> + for i in range ( q ) : <nl> + self . assertEqual ( vals [ i ] , dic [ ids [ i ] ] ) <nl> + self . assertLessEqual ( vals [ i ] , thresh ) <nl> + if vals [ i ] = = thresh : <nl> + n_eq - = 1 <nl> + self . assertEqual ( n_eq , 0 ) <nl> + <nl> + <nl> + class TestHistograms ( unittest . TestCase ) : <nl> + <nl> + def do_test ( self , nbin , n ) : <nl> + rs = np . random . RandomState ( 123 ) <nl> + tab = rs . randint ( nbin , size = n ) . astype ( ' uint16 ' ) <nl> + ref_histogram = np . bincount ( tab , minlength = nbin ) <nl> + <nl> + tab_a = faiss . AlignedTableUint16 ( ) <nl> + faiss . copy_array_to_AlignedTable ( tab , tab_a ) <nl> + <nl> + sp = faiss . swig_ptr <nl> + hist = np . zeros ( nbin , ' int32 ' ) <nl> + if nbin = = 8 : <nl> + faiss . simd_histogram_8 ( tab_a . get ( ) , n , 0 , - 1 , sp ( hist ) ) <nl> + elif nbin = = 16 : <nl> + faiss . simd_histogram_16 ( tab_a . get ( ) , n , 0 , - 1 , sp ( hist ) ) <nl> + else : <nl> + raise AssertionError ( ) <nl> + np . testing . assert_array_equal ( hist , ref_histogram ) <nl> + <nl> + def test_8bin_even ( self ) : <nl> + self . do_test ( 8 , 5 * 16 ) <nl> + <nl> + def test_8bin_odd ( self ) : <nl> + self . do_test ( 8 , 123 ) <nl> + <nl> + def test_16bin_even ( self ) : <nl> + self . do_test ( 16 , 5 * 16 ) <nl> + <nl> + def test_16bin_odd ( self ) : <nl> + self . do_test ( 16 , 123 ) <nl> + <nl> + <nl> + def do_test_bounded ( self , nbin , n , shift = 2 , minv = 500 , rspan = None , seed = None ) : <nl> + if seed is None : <nl> + for run in range ( 50 ) : <nl> + self . do_test_bounded ( nbin , n , shift , minv , rspan , seed = 123 + run ) <nl> + return <nl> + <nl> + if rspan is None : <nl> + rmin , rmax = 0 , nbin * 6 <nl> + else : <nl> + rmin , rmax = rspan <nl> + <nl> + rs = np . random . RandomState ( seed ) <nl> + tab = rs . randint ( rmin , rmax , size = n ) . astype ( ' uint16 ' ) <nl> + bc = np . bincount ( tab , minlength = 65536 ) <nl> + <nl> + binsize = 1 < < shift <nl> + ref_histogram = bc [ minv : minv + binsize * nbin ] <nl> + <nl> + def pad_and_reshape ( x , m , n ) : <nl> + xout = np . zeros ( m * n , dtype = x . dtype ) <nl> + xout [ : x . size ] = x <nl> + return xout . reshape ( m , n ) <nl> + <nl> + ref_histogram = pad_and_reshape ( ref_histogram , nbin , binsize ) <nl> + ref_histogram = ref_histogram . sum ( 1 ) <nl> + <nl> + tab_a = faiss . AlignedTableUint16 ( ) <nl> + faiss . copy_array_to_AlignedTable ( tab , tab_a ) <nl> + sp = faiss . swig_ptr <nl> + <nl> + hist = np . zeros ( nbin , ' int32 ' ) <nl> + if nbin = = 8 : <nl> + faiss . simd_histogram_8 ( <nl> + tab_a . get ( ) , n , minv , shift , sp ( hist ) <nl> + ) <nl> + elif nbin = = 16 : <nl> + faiss . simd_histogram_16 ( <nl> + tab_a . get ( ) , n , minv , shift , sp ( hist ) <nl> + ) <nl> + else : <nl> + raise AssertionError ( ) <nl> + <nl> + np . testing . assert_array_equal ( hist , ref_histogram ) <nl> + <nl> + def test_8bin_even_bounded ( self ) : <nl> + self . do_test_bounded ( 8 , 22 * 16 ) <nl> + <nl> + def test_8bin_odd_bounded ( self ) : <nl> + self . do_test_bounded ( 8 , 10000 ) <nl> + <nl> + def test_16bin_even_bounded ( self ) : <nl> + self . do_test_bounded ( 16 , 22 * 16 ) <nl> + <nl> + def test_16bin_odd_bounded ( self ) : <nl> + self . do_test_bounded ( 16 , 10000 ) <nl> + <nl> + def test_16bin_bounded_bigrange ( self ) : <nl> + self . do_test_bounded ( 16 , 1000 , shift = 12 , rspan = ( 10 , 65500 ) ) <nl> + <nl> + def test_8bin_bounded_bigrange ( self ) : <nl> + self . do_test_bounded ( 8 , 1000 , shift = 13 , rspan = ( 10 , 65500 ) ) <nl> + <nl> + def test_16bin_bounded_bigrange_2 ( self ) : <nl> + self . do_test_bounded ( 16 , 10 , shift = 12 , rspan = ( 65000 , 65500 ) ) <nl> + <nl> + def test_16bin_bounded_shift0 ( self ) : <nl> + self . do_test_bounded ( 16 , 10000 , shift = 0 , rspan = ( 10 , 65500 ) ) <nl> + <nl> + def test_8bin_bounded_shift0 ( self ) : <nl> + self . do_test_bounded ( 8 , 10000 , shift = 0 , rspan = ( 10 , 65500 ) ) <nl> + <nl> + def test_16bin_bounded_ignore_out_range ( self ) : <nl> + self . do_test_bounded ( 16 , 10000 , shift = 5 , rspan = ( 100 , 20000 ) , minv = 300 ) <nl> + <nl> + def test_8bin_bounded_ignore_out_range ( self ) : <nl> + self . do_test_bounded ( 8 , 10000 , shift = 5 , rspan = ( 100 , 20000 ) , minv = 300 ) <nl>
|
Implementation of PQ4 search with SIMD instructions ( )
|
facebookresearch/faiss
|
6d0bc58db656e73aae4d301f1e341115da122576
|
2020-12-03T18:06:38Z
|
mmm a / configure . ac <nl> ppp b / configure . ac <nl> AC_PROG_CC <nl> AC_PROG_CXX <nl> AC_LANG ( [ C + + ] ) <nl> ACX_USE_SYSTEM_EXTENSIONS <nl> - AM_PROG_AR <nl> AM_CONDITIONAL ( GCC , test " $ GCC " = yes ) # let the Makefile know if we ' re gcc <nl> <nl> # test_util . cc takes forever to compile with GCC and optimization turned on . <nl>
|
Remove AM_PROG_AR .
|
protocolbuffers/protobuf
|
ce58c88019e66b6c2eacd1f161041692b98050c1
|
2014-08-05T21:22:07Z
|
mmm a / Telegram / lib_base <nl> ppp b / Telegram / lib_base <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit baae6cdd9ba5216732222e7dec9a76b9ea3a7c83 <nl> + Subproject commit afb1b61161d6e534c35382b91dc24c7225ed3e5a <nl>
|
Fix links parsing .
|
telegramdesktop/tdesktop
|
38d799de24e21eda113b32035fce195938949711
|
2020-01-17T11:14:30Z
|
mmm a / src / cpp / server / server . cc <nl> ppp b / src / cpp / server / server . cc <nl> class Server : : AsyncRequest GRPC_FINAL : public CompletionQueueTag { <nl> request_ ( request ) , <nl> stream_ ( stream ) , <nl> call_cq_ ( call_cq ) , <nl> - notification_cq_ ( notification_cq ) , <nl> ctx_ ( ctx ) , <nl> generic_ctx_ ( nullptr ) , <nl> server_ ( server ) , <nl> class Server : : AsyncRequest GRPC_FINAL : public CompletionQueueTag { <nl> request_ ( nullptr ) , <nl> stream_ ( stream ) , <nl> call_cq_ ( call_cq ) , <nl> - notification_cq_ ( notification_cq ) , <nl> ctx_ ( nullptr ) , <nl> generic_ctx_ ( ctx ) , <nl> server_ ( server ) , <nl> class Server : : AsyncRequest GRPC_FINAL : public CompletionQueueTag { <nl> grpc : : protobuf : : Message * const request_ ; <nl> ServerAsyncStreamingInterface * const stream_ ; <nl> CompletionQueue * const call_cq_ ; <nl> - ServerCompletionQueue * const notification_cq_ ; <nl> ServerContext * const ctx_ ; <nl> GenericServerContext * const generic_ctx_ ; <nl> Server * const server_ ; <nl> void Server : : RunRpc ( ) { <nl> { <nl> grpc : : unique_lock < grpc : : mutex > lock ( mu_ ) ; <nl> if ( ! shutdown_ ) { <nl> - mrd - > Request ( server_ ) ; <nl> + mrd - > Request ( server_ , cq_ . cq ( ) ) ; <nl> } <nl> } <nl> cd . Run ( ) ; <nl>
|
Correct C + + build errors
|
grpc/grpc
|
a33acb702150d63b895626bbe5a5619360421e7d
|
2015-05-08T15:02:55Z
|
mmm a / hphp / util / address - range . h <nl> ppp b / hphp / util / address - range . h <nl> struct RangeState { <nl> / / running ) to allocate without adding new mappings . <nl> bool trivial ( size_t size , size_t align , Direction d ) const { <nl> auto const mask = align - 1 ; <nl> - assert ( ( align & mask ) = = 0 ) ; <nl> + assertx ( ( align & mask ) = = 0 ) ; <nl> if ( d = = Direction : : LowToHigh ) { <nl> auto const use = low_use . load ( std : : memory_order_acquire ) ; <nl> auto const aligned = ( use + mask ) & ~ mask ; <nl> struct RangeState { <nl> / / Whether free space in this range is insufficient for the allocation . <nl> bool infeasible ( size_t size , size_t align , Direction d ) const { <nl> auto const mask = align - 1 ; <nl> - assert ( ( align & mask ) = = 0 ) ; <nl> + assertx ( ( align & mask ) = = 0 ) ; <nl> if ( d = = Direction : : LowToHigh ) { <nl> auto const newUse = <nl> ( ( low_use . load ( std : : memory_order_acquire ) + mask ) & ~ mask ) + size ; <nl> struct RangeState { <nl> auto const mapFrontier = low_map . load ( std : : memory_order_acquire ) ; <nl> auto oldUse = low_use . load ( std : : memory_order_acquire ) ; <nl> auto const mask = align - 1 ; <nl> - assert ( ( align & mask ) = = 0 ) ; <nl> + assertx ( ( align & mask ) = = 0 ) ; <nl> do { <nl> auto const aligned = ( oldUse + mask ) & ~ mask ; <nl> auto const newUse = aligned + size ; <nl> struct RangeState { <nl> auto const mapFrontier = high_map . load ( std : : memory_order_acquire ) ; <nl> auto oldUse = high_use . load ( std : : memory_order_acquire ) ; <nl> auto const mask = align - 1 ; <nl> - assert ( ( align & mask ) = = 0 ) ; <nl> + assertx ( ( align & mask ) = = 0 ) ; <nl> do { <nl> auto const newUse = ( oldUse - size ) & ~ mask ; <nl> / / Need to add more mapping . <nl> struct RangeState { <nl> / / the operation was successful . <nl> bool tryFreeLow ( void * ptr , size_t size ) { <nl> auto const p = reinterpret_cast < uintptr_t > ( ptr ) ; <nl> - assert ( p < low_use . load ( std : : memory_order_relaxed ) ) ; <nl> - assert ( p > = low ( ) ) ; <nl> + assertx ( p < low_use . load ( std : : memory_order_relaxed ) ) ; <nl> + assertx ( p > = low ( ) ) ; <nl> uintptr_t expected = p + size ; <nl> return low_use . compare_exchange_strong ( expected , p , <nl> std : : memory_order_relaxed ) ; <nl>
|
Don ' t use assert ( ) - use assertx ( ) instead .
|
facebook/hhvm
|
5ffcf000400bf0af2b0847c25d0e52ae4bf3cea8
|
2020-09-16T21:52:22Z
|
mmm a / PowerEditor / src / ScitillaComponent / FindReplaceDlg . cpp <nl> ppp b / PowerEditor / src / ScitillaComponent / FindReplaceDlg . cpp <nl> void Finder : : add ( FoundInfo fi , SearchResultMarking mi , const TCHAR * foundline ) <nl> { <nl> _pMainFoundInfos - > push_back ( fi ) ; <nl> <nl> - NativeLangSpeaker * pNativeSpeaker = ( NppParameters : : getInstance ( ) ) . getNativeLangSpeaker ( ) ; <nl> - static generic_string lineStr = pNativeSpeaker - > getLocalizedStrFromID ( " find - result - line - prefix " , TEXT ( " Line " ) ) ; <nl> generic_string str = TEXT ( " \ t " ) ; <nl> - str + = lineStr ; <nl> + str + = _prefixLineStr ; <nl> str + = TEXT ( " " ) ; <nl> <nl> TCHAR lnb [ 16 ] ; <nl> void Finder : : copy ( ) <nl> <nl> void Finder : : beginNewFilesSearch ( ) <nl> { <nl> - / / _scintView . execute ( SCI_SETLEXER , SCLEX_NULL ) ; <nl> + NativeLangSpeaker * pNativeSpeaker = ( NppParameters : : getInstance ( ) ) . getNativeLangSpeaker ( ) ; <nl> + _prefixLineStr = pNativeSpeaker - > getLocalizedStrFromID ( " find - result - line - prefix " , TEXT ( " Line " ) ) ; <nl> + <nl> <nl> _scintView . execute ( SCI_SETCURRENTPOS , 0 ) ; <nl> _pMainFoundInfos = _pMainFoundInfos = = & _foundInfos1 ? & _foundInfos2 : & _foundInfos1 ; <nl> mmm a / PowerEditor / src / ScitillaComponent / FindReplaceDlg . h <nl> ppp b / PowerEditor / src / ScitillaComponent / FindReplaceDlg . h <nl> protected : <nl> int _lastSearchHeaderPos = 0 ; <nl> <nl> bool _canBeVolatiled = true ; <nl> - <nl> bool _longLinesAreWrapped = false ; <nl> <nl> + generic_string _prefixLineStr ; <nl> + <nl> void setFinderReadOnly ( bool isReadOnly ) { <nl> _scintView . execute ( SCI_SETREADONLY , isReadOnly ) ; <nl> } ; <nl> mmm a / scintilla / lexers / LexSearchResult . cxx <nl> ppp b / scintilla / lexers / LexSearchResult . cxx <nl> static void ColouriseSearchResultLine ( SearchResultMarkings * pMarkings , char * lin <nl> <nl> SearchResultMarking mi = pMarkings - > _markings [ linenum ] ; <nl> <nl> - currentPos + = 2 ; / / skip " : " <nl> size_t match_start = startLine + mi . _start - 1 ; <nl> size_t match_end = startLine + mi . _end - 1 ; <nl> <nl>
|
Enhance localization of " Line " of Search result
|
notepad-plus-plus/notepad-plus-plus
|
6c3ecacdb7cf3d301b4b2fc74f0770ff4bf13ab5
|
2020-12-09T15:25:35Z
|
mmm a / Installation / Jenkins / Jenkinsfile <nl> ppp b / Installation / Jenkins / Jenkinsfile <nl> <nl> node { <nl> if ( isUnix ( ) ) { <nl> echo ' Hello Unix ' <nl> - mkdir ( ' build ' ) <nl> + folder ( ' build ' ) <nl> dir ( ' build ' ) { <nl> / / s ome block <nl> sh ' cmake . . ' <nl>
|
next test
|
arangodb/arangodb
|
2cbef516eb33625bae5c52866b5879880e2c78e9
|
2016-07-08T09:26:32Z
|
new file mode 100644 <nl> index 0000000000 . . 79cec4fba1 <nl> mmm / dev / null <nl> ppp b / tests / swoole_socket_coro / fd . phpt <nl> <nl> + - - TEST - - <nl> + swoole_socket_coro : fd <nl> + - - SKIPIF - - <nl> + < ? php require __DIR__ . ' / . . / include / skipif . inc ' ; ? > <nl> + - - FILE - - <nl> + < ? php <nl> + require __DIR__ . ' / . . / include / bootstrap . php ' ; <nl> + $ sockets = [ ] ; <nl> + for ( $ n = MAX_REQUESTS ; $ n - - ; ) { <nl> + $ sockets [ ] = new Swoole \ Coroutine \ Socket ( AF_INET , SOCK_STREAM , IPPROTO_IP ) ; <nl> + if ( count ( $ sockets ) > 1 ) { <nl> + assert ( end ( $ sockets ) - > fd = = = prev ( $ sockets ) - > fd + 1 ) ; <nl> + } <nl> + } <nl> + echo " DONE \ n " ; <nl> + ? > <nl> + - - EXPECT - - <nl> + DONE <nl>
|
Add socket fd test .
|
swoole/swoole-src
|
2442ba8ef506d84af702aef4ed281fa61f58a414
|
2019-03-05T13:01:26Z
|
mmm a / include / swift / AST / DiagnosticsParse . def <nl> ppp b / include / swift / AST / DiagnosticsParse . def <nl> ERROR ( expected_parameter_type , PointsToFirstBadToken , <nl> ERROR ( expected_parameter_name , PointsToFirstBadToken , <nl> " expected parameter name followed by ' : ' " , ( ) ) <nl> ERROR ( expected_parameter_colon , PointsToFirstBadToken , <nl> - " expected ' : ' following argumant label and parameter name " , ( ) ) <nl> + " expected ' : ' following argument label and parameter name " , ( ) ) <nl> ERROR ( missing_parameter_type , PointsToFirstBadToken , <nl> " parameter requires an explicit type " , ( ) ) <nl> ERROR ( multiple_parameter_ellipsis , none , <nl>
|
AST : Fix a typo in a diagnostic
|
apple/swift
|
c25e41c34c13009610e0efbd8af281d292d606d3
|
2017-10-25T03:45:49Z
|
mmm a / tools / clang_tidy . py <nl> ppp b / tools / clang_tidy . py <nl> def run_shell_command ( arguments ) : <nl> try : <nl> output = subprocess . check_output ( arguments ) . decode ( ) . strip ( ) <nl> except subprocess . CalledProcessError : <nl> - raise RuntimeError ( " Error executing { } : { } " . format ( " " . join ( arguments ) , output ) ) <nl> + _ , error , _ = sys . exc_info ( ) <nl> + error_output = error . output . decode ( ) . strip ( ) <nl> + raise RuntimeError ( " Error executing { } : { } " . format ( " " . join ( arguments ) , error_output ) ) <nl> <nl> return output <nl> <nl>
|
Fix clang_tidy . py
|
pytorch/pytorch
|
09369fa9d79b549e3502ec7bb25aa1daf3a11916
|
2018-11-09T19:46:50Z
|
mmm a / Makefile <nl> ppp b / Makefile <nl> ifeq ( $ ( USE_OPENMP ) , 1 ) <nl> endif <nl> <nl> ifeq ( $ ( USE_CUDNN ) , 1 ) <nl> - CFLAGS + = - DMSHADOW_USE_CUDNN = 1 - I $ ( USE_CUDNN_PATH ) <nl> - LDFLAGS + = - L $ ( USE_CUDNN_PATH ) - lcudnn <nl> + CFLAGS + = - DMSHADOW_USE_CUDNN = 1 <nl> + LDFLAGS + = - lcudnn <nl> endif <nl> <nl> ifeq ( $ ( USE_THREADED_ENGINE ) , 1 ) <nl> mmm a / make / config . mk <nl> ppp b / make / config . mk <nl> USE_CUDNN = 0 <nl> <nl> # add the path to CUDNN libary to link and compile flag <nl> # if you do not need that , or do not have that , leave it as NONE <nl> - USE_CUDNN_PATH = NONE <nl> + # ( NOTE : not enable at this moment ) <nl> + # USE_CUDNN_PATH = NONE <nl> <nl> # whether use opencv during compilation <nl> # you can disable it , however , you will not able to use <nl>
|
comment out USE_CUDNN_PATH
|
apache/incubator-mxnet
|
468397116e8fc69633c087fbc306f9d1d9a11766
|
2015-09-23T18:42:35Z
|
mmm a / example / cifar10 / cifar10 . py <nl> ppp b / example / cifar10 / cifar10 . py <nl> <nl> import sys <nl> sys . path . append ( " . . / . . / tests / python " ) <nl> import get_data <nl> - <nl> + import time <nl> <nl> " " " <nl> CXXNET Result : <nl> def ConvFactory ( * * kwargs ) : <nl> param = copy . copy ( kwargs ) <nl> act = param [ " act_type " ] <nl> del param [ " act_type " ] <nl> + param [ " workspace " ] = 512 <nl> param [ " name " ] = " conv % d " % conv_cnt <nl> - param [ " nstep " ] = 64 <nl> conv = mx . symbol . Convolution ( * * param ) <nl> bn = mx . symbol . BatchNorm ( data = conv , name = " bn % d " % conv_cnt ) <nl> relu = mx . symbol . Activation ( data = bn , name = " % s % d " % ( act , conv_cnt ) , act_type = act ) <nl> def DownsampleFactory ( data , ch_3x3 , stride = 2 ) : <nl> param [ " num_filter " ] = ch_3x3 <nl> param [ " act_type " ] = " relu " <nl> param [ " data " ] = data <nl> - param [ " nstep " ] = 100 <nl> param [ " pad " ] = ( 1 , 1 ) <nl> conv3x3 = ConvFactory ( * * param ) <nl> # pool <nl> del param [ " num_filter " ] <nl> del param [ " act_type " ] <nl> - del param [ " nstep " ] <nl> del param [ " pad " ] <nl> param [ " pool_type " ] = " max " <nl> param [ " name " ] = " pool % d " % pool_cnt <nl> def SimpleFactory ( data , ch_1x1 , ch_3x3 ) : <nl> param [ " stride " ] = ( 1 , 1 ) <nl> param [ " act_type " ] = " relu " <nl> param [ " data " ] = data <nl> - param [ " nstep " ] = 128 <nl> conv1x1 = ConvFactory ( * * param ) <nl> <nl> # 3x3 <nl> def RandomInit ( narray ) : <nl> in3a = SimpleFactory ( conv1 , 32 , 32 ) <nl> in3b = SimpleFactory ( in3a , 32 , 48 ) <nl> in3c = DownsampleFactory ( in3b , 80 ) <nl> - in4a = SimpleFactory ( in3c , 112 , 38 ) <nl> + in4a = SimpleFactory ( in3c , 112 , 48 ) <nl> in4b = SimpleFactory ( in4a , 96 , 64 ) <nl> in4c = SimpleFactory ( in4b , 80 , 80 ) <nl> in4d = SimpleFactory ( in4c , 48 , 96 ) <nl> def RandomInit ( narray ) : <nl> fc = mx . symbol . FullyConnected ( data = flatten , num_hidden = 10 , name = " fc1 " ) <nl> loss = mx . symbol . Softmax ( data = fc , name = " sm " ) <nl> <nl> - args_list = loss . list_arguments ( ) <nl> <nl> + epoch = 9 <nl> + lr = 0 . 05 <nl> + wd = 0 . 0001 <nl> + momentum = 0 . 9 <nl> <nl> batch_size = 128 <nl> data_shape = ( batch_size , 3 , 28 , 28 ) <nl> - arg_shapes , out_shapes , aux_shapes = loss . infer_shape ( data = data_shape ) <nl> <nl> - arg_narrays = [ mx . narray . zeros ( shape , ctx = mx . Context ( " gpu " ) ) for shape in arg_shapes ] <nl> - grad_narrays = [ mx . narray . zeros ( shape , ctx = mx . Context ( " gpu " ) ) for shape in arg_shapes ] <nl> - mom_narrays = [ mx . narray . zeros ( shape , ctx = mx . Context ( " gpu " ) ) for shape in arg_shapes ] <nl> - aux_narrays = [ mx . narray . zeros ( shape , ctx = mx . Context ( " gpu " ) ) for shape in aux_shapes ] <nl> + in_data = mx . narray . empty ( data_shape , mx . gpu ( ) ) <nl> + executor = loss . simple_bind ( mx . gpu ( ) , { " data " : in_data } ) <nl> + out_narray = executor . heads ( ) [ 0 ] <nl> + pred = mx . narray . zeros ( out_narray . shape ) <nl> <nl> - inputs = dict ( zip ( args_list , arg_narrays ) ) <nl> + arg_narrays , grad_narrays = executor . list_arguments ( ) <nl> + momentum_narrays = [ mx . narray . zeros ( item . shape , mx . gpu ( ) ) for item in grad_narrays ] <nl> <nl> - name2shape = dict ( zip ( args_list , arg_shapes ) ) <nl> - pred = mx . narray . zeros ( out_shapes [ 0 ] ) <nl> + inputs = dict ( zip ( loss . list_arguments ( ) , arg_narrays ) ) <nl> + block = zip ( grad_narrays , arg_narrays , momentum_narrays ) <nl> <nl> np . random . seed ( 0 ) <nl> # set random weight <nl> <nl> - for name , narray in inputs . items ( ) : <nl> + for name , narray in zip ( loss . list_arguments ( ) , arg_narrays ) : <nl> if " weight " in name : <nl> narray [ : ] = np . random . uniform ( - 0 . 1 , 0 . 1 , narray . shape ) <nl> if " bias " in name : <nl> def RandomInit ( narray ) : <nl> if " beta " in name : <nl> narray [ : ] = 0 . 0 <nl> <nl> - # bind executer <nl> - # TODO ( bing ) : think of a better bind interface <nl> - executor = loss . bind ( mx . Context ( ' gpu ' ) , arg_narrays , grad_narrays , ' write ' , aux_narrays ) <nl> - # update <nl> - <nl> - out_narray = executor . heads ( ) [ 0 ] <nl> - <nl> - epoch = 9 <nl> - lr = 0 . 05 <nl> - wd = 0 . 0001 <nl> - momentum = 0 . 9 <nl> - <nl> def Update ( grad , weight , mom ) : <nl> mom [ : ] * = momentum <nl> mom [ : ] + = - lr * ( grad / batch_size + wd * weight ) <nl> weight [ : ] + = mom <nl> <nl> - block = list ( zip ( grad_narrays , arg_narrays , mom_narrays ) ) <nl> - <nl> # check data <nl> get_data . GetCifar10 ( ) <nl> <nl> def Update ( grad , weight , mom ) : <nl> batch_size = batch_size , <nl> nthread = 1 ) <nl> <nl> - tmp_label = mx . narray . zeros ( name2shape [ " sm_label " ] ) <nl> + tmp_label = mx . narray . zeros ( inputs [ " sm_label " ] . shape ) <nl> <nl> - def progress ( count , total , suffix = ' ' ) : <nl> - bar_len = 80 <nl> + def progress ( count , total , epoch , toc ) : <nl> + bar_len = 60 <nl> filled_len = int ( round ( bar_len * count / float ( total ) ) ) <nl> <nl> percents = round ( 100 . 0 * count / float ( total ) , 1 ) <nl> bar = ' = ' * filled_len + ' - ' * ( bar_len - filled_len ) <nl> - <nl> + tic = time . time ( ) <nl> + speed = batch_size / float ( tic - toc ) <nl> + suffix = " Epoch % d , Speed : % . 2f pic / sec " % ( epoch , speed ) <nl> sys . stdout . write ( ' [ % s ] % s % s . . . % s \ r ' % ( bar , percents , ' % ' , suffix ) ) <nl> <nl> def test_cifar ( ) : <nl> def test_cifar ( ) : <nl> val_nbatch = 0 <nl> all_train_bacth = 50000 / float ( batch_size ) <nl> for data , label in train_dataiter : <nl> - progress ( train_nbatch , all_train_bacth , " Epoch % d " % i ) <nl> + toc = time . time ( ) <nl> label = label . asnumpy ( ) . flatten ( ) <nl> tmp_label [ : ] = label <nl> inputs [ " data " ] [ : ] = data <nl> def test_cifar ( ) : <nl> <nl> for grad , weight , mom in block : <nl> Update ( grad , weight , mom ) <nl> + progress ( train_nbatch , all_train_bacth , i , toc ) <nl> <nl> # evaluate <nl> for data , label in test_dataiter : <nl> mmm a / python / mxnet / __init__ . py <nl> ppp b / python / mxnet / __init__ . py <nl> <nl> " " " <nl> from __future__ import absolute_import <nl> <nl> - from . context import Context , current_context <nl> + from . context import Context , current_context , cpu , gpu <nl> from . base import MXNetError <nl> from . import narray <nl> from . import symbol <nl> mmm a / python / mxnet / context . py <nl> ppp b / python / mxnet / context . py <nl> def __exit__ ( self , ptype , value , trace ) : <nl> # initialize the default context in Context <nl> Context . default_ctx = Context ( ' cpu ' , 0 ) <nl> <nl> + def cpu ( device_id = 0 ) : <nl> + " " " <nl> + Return CPU context <nl> + <nl> + Parameters <nl> + mmmmmmmmm - <nl> + device_id : int ( default = 0 ) <nl> + the device id of the device , needed for GPU <nl> + <nl> + Returns <nl> + mmmmmmmmm <nl> + A cpu context <nl> + " " " <nl> + return Context ( ' cpu ' , device_id ) <nl> + <nl> + def gpu ( device_id = 0 ) : <nl> + " " " <nl> + Return CPU context <nl> + <nl> + Parameters <nl> + mmmmmmmmm - <nl> + device_id : int ( default = 0 ) <nl> + the device id of the device , needed for GPU <nl> + <nl> + Returns <nl> + mmmmmmmmm <nl> + A cpu context <nl> + " " " <nl> + return Context ( ' gpu ' , device_id ) <nl> + <nl> def current_context ( ) : <nl> " " " Return the current context . <nl> <nl> mmm a / python / mxnet / executor . py <nl> ppp b / python / mxnet / executor . py <nl> def __init__ ( self , handle ) : <nl> if not isinstance ( handle , ExecutorHandle ) : <nl> raise TypeError ( " Handle type error " ) <nl> self . handle = handle <nl> + self . arg_narrays = [ ] <nl> + self . grad_narrays = [ ] <nl> + self . auxiliary_states = [ ] <nl> + <nl> + def list_arguments ( self , with_grad = True ) : <nl> + " " " Return arguments ( and grad for arguments ) <nl> + <nl> + Parameters <nl> + mmmmmmmmm - <nl> + with_grad : bool <nl> + whether return args with grad <nl> + <nl> + Returns <nl> + mmmmmm - <nl> + if with_grad = True , return ( args , grad ) pair list <nl> + otherwise return args list only <nl> + Note : args sequence is same to symbol . list_arguments ( ) <nl> + " " " <nl> + if with_grad : <nl> + return self . arg_narrays , self . grad_narrays <nl> + else : <nl> + return self . arg_narrays <nl> + <nl> + def list_auxiliary_states ( ) : <nl> + " " " Return auxiliary states of executor <nl> + Note : auxiliary states is same to symbol . list_auxiliary_states ( ) <nl> + " " " <nl> + return self . auxiliary_states <nl> <nl> def forward ( self , is_train = True ) : <nl> " " " Do forward . <nl> mmm a / python / mxnet / narray . py <nl> ppp b / python / mxnet / narray . py <nl> def zeros ( shape , ctx = None ) : <nl> out : Array <nl> The created NArray . <nl> " " " <nl> - if ctx is None : <nl> - ctx = Context . default_ctx <nl> - arr = NArray ( handle = _new_alloc_handle ( shape , ctx , False ) ) <nl> + arr = empty ( shape , ctx ) <nl> arr [ : ] = 0 . 0 <nl> return arr <nl> <nl> def ones ( shape , ctx = None ) : <nl> out : Array <nl> The created NArray . <nl> " " " <nl> - if ctx is None : <nl> - ctx = Context . default_ctx <nl> - arr = NArray ( handle = _new_alloc_handle ( shape , ctx , False ) ) <nl> + arr = empty ( shape , ctx ) <nl> arr [ : ] = 1 . 0 <nl> return arr <nl> <nl> <nl> - <nl> - <nl> def array ( source_array , ctx = None ) : <nl> " " " Create a new NArray that copies content from source_array . <nl> <nl> mmm a / python / mxnet / symbol . py <nl> ppp b / python / mxnet / symbol . py <nl> <nl> from . base import NArrayHandle , ExecutorHandle , SymbolHandle <nl> from . base import check_call <nl> from . context import Context <nl> - from . narray import NArray <nl> + from . narray import NArray , zeros <nl> from . executor import Executor <nl> <nl> <nl> def _get_narray_handle ( arg_key , args , arg_names , allow_missing ) : <nl> raise TypeError ( ' Only Accept list of NArrays or dict of str - > NArray ' ) <nl> return c_array ( NArrayHandle , arg_handles ) <nl> <nl> + def simple_bind ( self , ctx , args , grad_req = ' write ' ) : <nl> + " " " Simply bind current symbol to get an executor <nl> + Parameters <nl> + mmmmmmmmm - <nl> + ctx : Context <nl> + The device context the generated executor to run on . <nl> + <nl> + args : list of NArray or dict of str - > NArray <nl> + Input arguments to the symbol . <nl> + - type is dict of str - > NArray , then it maps the name of arguments <nl> + to the corresponding NArray , <nl> + - Not all the arguments must be provided . <nl> + Returns <nl> + mmmmmm - <nl> + executor : mxnet . Executor <nl> + The generated Executor <nl> + " " " <nl> + if not isinstance ( args , dict ) : <nl> + raise TypeError ( " args must be dict of str - > NArray " ) <nl> + input_shapes = dict ( ( arr [ 0 ] , arr [ 1 ] . shape ) for arr in args . items ( ) ) <nl> + arg_shapes , out_shapes , aux_shapes = self . infer_shape ( * * input_shapes ) <nl> + if arg_shapes = = None : <nl> + raise ValueError ( " Input node is not complete " ) <nl> + # alloc space <nl> + arg_narrays = [ ] <nl> + for name , shape in zip ( self . list_arguments ( ) , arg_shapes ) : <nl> + if name in args : <nl> + arg_narrays . append ( args [ name ] ) <nl> + else : <nl> + arg_narrays . append ( zeros ( shape , ctx ) ) <nl> + # TODO ( bing ) : specail treat input data grad <nl> + grad_narrays = [ zeros ( shape , ctx ) for shape in arg_shapes ] <nl> + aux_narrays = [ zeros ( shape , ctx ) for shape in aux_shapes ] <nl> + executor = self . bind ( ctx , arg_narrays , grad_narrays , grad_req , aux_narrays ) <nl> + executor . arg_narrays = arg_narrays <nl> + executor . grad_narrays = grad_narrays <nl> + executor . auxiliary_states = aux_narrays <nl> + <nl> + return executor <nl> + <nl> def bind ( self , ctx , args , args_grad = None , grad_req = ' write ' , aux_states = None ) : <nl> " " " Bind current symbol to get an executor . <nl> <nl> mmm a / src / operator / batch_norm - inl . h <nl> ppp b / src / operator / batch_norm - inl . h <nl> class BatchNormProp : public OperatorProperty { <nl> <nl> Operator * CreateOperator ( Context ctx ) const ; <nl> <nl> + std : : vector < ResourceRequest > BackwardResource ( ) const override { <nl> + return { Resource : : kTempSpace } ; <nl> + } <nl> + <nl> private : <nl> BatchNormParam param_ ; <nl> } ; / / class BatchNormProp <nl> mmm a / src / operator / convolution - inl . h <nl> ppp b / src / operator / convolution - inl . h <nl> struct ConvolutionParam : public dmlc : : Parameter < ConvolutionParam > { <nl> TShape pad ; <nl> uint32_t num_filter ; <nl> uint32_t num_group ; <nl> - uint32_t nstep ; <nl> + uint32_t workspace ; <nl> bool no_bias ; <nl> DMLC_DECLARE_PARAMETER ( ConvolutionParam ) { <nl> int shape [ ] = { 1 , 1 } ; <nl> struct ConvolutionParam : public dmlc : : Parameter < ConvolutionParam > { <nl> . describe ( " convolution filter ( channel ) number " ) ; <nl> DMLC_DECLARE_FIELD ( num_group ) . set_default ( 1 ) <nl> . describe ( " number of groups partition " ) ; <nl> - DMLC_DECLARE_FIELD ( nstep ) . set_default ( 2 ) . set_range ( 1 , 10000 ) <nl> - . describe ( " process n images once " ) ; <nl> + DMLC_DECLARE_FIELD ( workspace ) . set_default ( 128 ) . set_range ( 1 , 10000 ) <nl> + . describe ( " Tmp workspace for convolution ( MB ) " ) ; <nl> DMLC_DECLARE_FIELD ( no_bias ) . set_default ( false ) <nl> . describe ( " Whether to disable bias parameter . " ) ; <nl> } <nl> class ConvolutionOp : public Operator { <nl> Tensor < xpu , 4 > out = out_data [ kOut ] . get < xpu , 4 , real_t > ( s ) ; <nl> this - > InitTemp ( ctx , data . shape_ , out . shape_ ) ; <nl> const index_t nbatch = data . size ( 0 ) ; <nl> - for ( index_t i = 0 ; i < nbatch ; i + = param_ . nstep ) { <nl> - const index_t step = std : : min ( param_ . nstep , nbatch - i ) ; <nl> + for ( index_t i = 0 ; i < nbatch ; i + = nstep_ ) { <nl> + const index_t step = std : : min ( nstep_ , nbatch - i ) ; <nl> temp_col_ . Resize ( mshadow : : Shape2 ( shape_colunit_ [ 0 ] , <nl> shape_colunit_ [ 1 ] * step ) ) ; <nl> temp_dst_ . Resize ( mshadow : : Shape3 ( shape_dstunit_ [ 0 ] , <nl> class ConvolutionOp : public Operator { <nl> Tensor < xpu , 3 > gwmat = in_grad [ kWeight ] . get_with_shape < xpu , 3 , real_t > ( wmat_shape , s ) ; <nl> this - > InitTemp ( ctx , data . shape_ , grad . shape_ ) ; <nl> const index_t nbatch = data . size ( 0 ) ; <nl> - for ( index_t i = 0 ; i < nbatch ; i + = param_ . nstep ) { <nl> - const index_t step = std : : min ( param_ . nstep , nbatch - i ) ; <nl> + for ( index_t i = 0 ; i < nbatch ; i + = nstep_ ) { <nl> + const index_t step = std : : min ( nstep_ , nbatch - i ) ; <nl> temp_col_ . Resize ( Shape2 ( shape_colunit_ [ 0 ] , <nl> shape_colunit_ [ 1 ] * step ) ) ; <nl> temp_dst_ . Resize ( Shape3 ( shape_dstunit_ [ 0 ] , <nl> class ConvolutionOp : public Operator { <nl> shape_dstunit_ = mshadow : : Shape3 ( param_ . num_group , <nl> param_ . num_filter / param_ . num_group , <nl> oshape [ 2 ] * oshape [ 3 ] ) ; <nl> - int nop = ( ishape [ 0 ] + param_ . nstep - 1 ) / param_ . nstep ; <nl> - param_ . nstep = ( ishape [ 0 ] + nop - 1 ) / nop ; <nl> + const uint32_t workspace_size = param_ . workspace < < 18 ; <nl> + nstep_ = std : : max ( std : : min ( static_cast < index_t > ( workspace_size / shape_colunit_ . Size ( ) ) , <nl> + ishape [ 0 ] ) , 1U ) ; <nl> + int nop = ( ishape [ 0 ] + nstep_ - 1 ) / nstep_ ; <nl> + nstep_ = ( ishape [ 0 ] + nop - 1 ) / nop ; <nl> mshadow : : Stream < xpu > * s = ctx . get_stream < xpu > ( ) ; <nl> temp_col_ . set_stream ( s ) ; <nl> temp_dst_ . set_stream ( s ) ; <nl> temp_col_ . Resize ( mshadow : : Shape2 ( shape_colunit_ [ 0 ] , <nl> - shape_colunit_ [ 1 ] * param_ . nstep ) ) ; <nl> + shape_colunit_ [ 1 ] * nstep_ ) ) ; <nl> temp_dst_ . Resize ( mshadow : : Shape3 ( shape_dstunit_ [ 0 ] , <nl> shape_dstunit_ [ 1 ] , <nl> - shape_dstunit_ [ 2 ] * param_ . nstep ) ) ; <nl> + shape_dstunit_ [ 2 ] * nstep_ ) ) ; <nl> } <nl> <nl> ConvolutionParam param_ ; <nl> class ConvolutionOp : public Operator { <nl> mshadow : : TensorContainer < xpu , 3 > temp_dst_ ; <nl> mshadow : : Shape < 2 > shape_colunit_ ; <nl> mshadow : : Shape < 3 > shape_dstunit_ ; <nl> + index_t nstep_ ; <nl> } ; / / class ConvolutionOp <nl> <nl> template < typename xpu > <nl> class ConvolutionProp : public OperatorProperty { <nl> <nl> Operator * CreateOperator ( Context ctx ) const ; <nl> <nl> + std : : vector < ResourceRequest > ForwardResource ( ) const override { <nl> + return { Resource : : kTempSpace } ; <nl> + } <nl> + <nl> + std : : vector < ResourceRequest > BackwardResource ( ) const override { <nl> + return { Resource : : kTempSpace } ; <nl> + } <nl> + <nl> private : <nl> ConvolutionParam param_ ; <nl> } ; / / class ConvolutionProp <nl> mmm a / tests / python / test_conv . py <nl> ppp b / tests / python / test_conv . py <nl> def CalAcc ( out , label ) : <nl> # symbol net <nl> batch_size = 100 <nl> data = mx . symbol . Variable ( ' data ' ) <nl> - conv1 = mx . symbol . Convolution ( data = data , name = ' conv1 ' , num_filter = 32 , kernel = ( 3 , 3 ) , stride = ( 2 , 2 ) , nstep = 100 ) <nl> + conv1 = mx . symbol . Convolution ( data = data , name = ' conv1 ' , num_filter = 32 , kernel = ( 3 , 3 ) , stride = ( 2 , 2 ) ) <nl> bn1 = mx . symbol . BatchNorm ( data = conv1 , name = " bn1 " ) <nl> act1 = mx . symbol . Activation ( data = bn1 , name = ' relu1 ' , act_type = " relu " ) <nl> mp1 = mx . symbol . Pooling ( data = act1 , name = ' mp1 ' , kernel = ( 2 , 2 ) , stride = ( 2 , 2 ) , pool_type = ' max ' ) <nl> <nl> - conv2 = mx . symbol . Convolution ( data = mp1 , name = ' conv2 ' , num_filter = 32 , kernel = ( 3 , 3 ) , stride = ( 2 , 2 ) , nstep = 100 ) <nl> + conv2 = mx . symbol . Convolution ( data = mp1 , name = ' conv2 ' , num_filter = 32 , kernel = ( 3 , 3 ) , stride = ( 2 , 2 ) ) <nl> bn2 = mx . symbol . BatchNorm ( data = conv2 , name = " bn2 " ) <nl> act2 = mx . symbol . Activation ( data = bn2 , name = ' relu2 ' , act_type = " relu " ) <nl> mp2 = mx . symbol . Pooling ( data = act2 , name = ' mp2 ' , kernel = ( 2 , 2 ) , stride = ( 2 , 2 ) , pool_type = ' max ' ) <nl>
|
simple bind
|
apache/incubator-mxnet
|
3e8e1e063f33cbd5cbf844d630fe7f1648bd7957
|
2015-09-09T18:13:37Z
|
mmm a / src / fsck / checker . cc <nl> ppp b / src / fsck / checker . cc <nl> void check_and_load_diff_log ( slicecx_t & cx , diff_log_errors * errs ) { <nl> struct node_error { <nl> block_id_t block_id ; <nl> btree_block_t : : error block_not_found_error ; / / must be none <nl> - bool block_underfull : 1 ; / / should be false <nl> bool bad_magic : 1 ; / / should be false <nl> bool noncontiguous_offsets : 1 ; / / should be false <nl> bool value_out_of_buf : 1 ; / / must be false <nl> struct node_error { <nl> std : : string msg ; <nl> <nl> explicit node_error ( block_id_t block_id ) : block_id ( block_id ) , block_not_found_error ( btree_block_t : : none ) , <nl> - block_underfull ( false ) , bad_magic ( false ) , <nl> + bad_magic ( false ) , <nl> noncontiguous_offsets ( false ) , value_out_of_buf ( false ) , <nl> keys_too_big ( false ) , keys_in_wrong_slice ( false ) , <nl> out_of_order ( false ) , value_errors_exist ( false ) , <nl> last_internal_node_key_nonempty ( false ) { } <nl> <nl> bool is_bad ( ) const { <nl> - return block_not_found_error ! = btree_block_t : : none | | block_underfull | | bad_magic <nl> + return block_not_found_error ! = btree_block_t : : none | | bad_magic <nl> | | noncontiguous_offsets | | value_out_of_buf | | keys_too_big | | keys_in_wrong_slice <nl> | | out_of_order | | value_errors_exist | | ! msg . empty ( ) ; <nl> } <nl> void check_subtree ( slicecx_t & cx , block_id_t id , const btree_key_t * lo , const bt <nl> / / TODO LOOF : This is memcached - specific , and heh , that ' s bad . <nl> value_sizer_t < memcached_value_t > sizer ( cx . block_size ( ) ) ; <nl> <nl> - if ( lo ! = NULL & & hi ! = NULL ) { <nl> - / / ( We ' re happy with an underfull root block . ) <nl> - / / TODO LOOF : is is_underfull a safe function for fsck to call ? <nl> - if ( node : : is_underfull ( & sizer , reinterpret_cast < node_t * > ( node . buf ) ) ) { <nl> - node_err . block_underfull = true ; <nl> - } <nl> - } <nl> - <nl> if ( reinterpret_cast < node_t * > ( node . buf ) - > magic = = sizer . btree_leaf_magic ( ) ) { <nl> check_subtree_leaf_node ( cx , reinterpret_cast < leaf_node_t * > ( node . buf ) , lo , hi , errs , & node_err ) ; <nl> } else if ( reinterpret_cast < internal_node_t * > ( node . buf ) - > magic = = internal_node_t : : expected_magic ) { <nl> bool report_subtree_errors ( const subtree_errors * errs ) { <nl> if ( e . block_not_found_error ! = btree_block_t : : none ) { <nl> printf ( " block not found : % s \ n " , btree_block_t : : error_name ( e . block_not_found_error ) ) ; <nl> } else { <nl> - printf ( " % s % s % s % s % s % s % s % s % s % s \ n " , <nl> - e . block_underfull ? " block_underfull " : " " , <nl> + printf ( " % s % s % s % s % s % s % s % s % s \ n " , <nl> e . bad_magic ? " bad_magic " : " " , <nl> e . noncontiguous_offsets ? " noncontiguous_offsets " : " " , <nl> e . value_out_of_buf ? " value_out_of_buf " : " " , <nl>
|
Removed underfullness check in fsck , since it was wrong .
|
rethinkdb/rethinkdb
|
e1d43bbbf2dfbb0fcc21ce15f18bc6f44e7d37ab
|
2011-08-17T22:11:19Z
|
mmm a / modules / core / src / convert . cpp <nl> ppp b / modules / core / src / convert . cpp <nl> static bool ocl_LUT ( InputArray _src , InputArray _lut , OutputArray _dst ) <nl> int sdepth = _src . depth ( ) ; <nl> <nl> UMat src = _src . getUMat ( ) , lut = _lut . getUMat ( ) ; <nl> - int dtype = CV_MAKETYPE ( ddepth , dcn ) ; <nl> - _dst . create ( src . size ( ) , dtype ) ; <nl> + _dst . create ( src . size ( ) , CV_MAKETYPE ( ddepth , dcn ) ) ; <nl> UMat dst = _dst . getUMat ( ) ; <nl> <nl> - size_t globalSize [ 2 ] = { dst . cols , dst . rows / 2 } ; <nl> + size_t globalSize [ 2 ] = { dst . cols , ( dst . rows + 3 ) / 4 } ; <nl> <nl> - cv : : String build_opt = format ( " - D dcn = % d - D lcn = % d - D srcT = % s - D dstT = % s " , dcn , lcn , <nl> + ocl : : Kernel k ( " LUT " , ocl : : core : : lut_oclsrc , format ( " - D dcn = % d - D lcn = % d - D srcT = % s - D dstT = % s " , dcn , lcn , <nl> ocl : : typeToStr ( sdepth ) , ocl : : memopTypeToStr ( ddepth ) <nl> - ) ; <nl> - <nl> - ocl : : Kernel kernel ; <nl> - if ( ( 4 = = lcn ) & & ( CV_8U = = sdepth ) ) <nl> - kernel . create ( " LUTC4 " , ocl : : core : : lut_oclsrc , build_opt ) ; <nl> - else if ( ( 3 = = lcn ) & & ( CV_8U = = sdepth ) ) <nl> - kernel . create ( " LUTC3 " , ocl : : core : : lut_oclsrc , build_opt ) ; <nl> - else <nl> - kernel . create ( " LUT " , ocl : : core : : lut_oclsrc , build_opt ) ; <nl> - if ( kernel . empty ( ) ) <nl> + ) ) ; <nl> + if ( k . empty ( ) ) <nl> return false ; <nl> <nl> - kernel . args ( ocl : : KernelArg : : ReadOnlyNoSize ( src ) , ocl : : KernelArg : : ReadOnlyNoSize ( lut ) , <nl> + k . args ( ocl : : KernelArg : : ReadOnlyNoSize ( src ) , ocl : : KernelArg : : ReadOnlyNoSize ( lut ) , <nl> ocl : : KernelArg : : WriteOnly ( dst ) ) ; <nl> <nl> - return kernel . run ( 2 , globalSize , NULL , true ) ; <nl> + return k . run ( 2 , globalSize , NULL , false ) ; <nl> } <nl> <nl> # endif <nl> mmm a / modules / core / src / opencl / lut . cl <nl> ppp b / modules / core / src / opencl / lut . cl <nl> <nl> / / <nl> / / <nl> <nl> - __kernel void LUTC4 ( __global const uchar * srcptr , int src_step , int src_offset , <nl> - __global const uchar * lutptr , int lut_step , int lut_offset , <nl> - __global uchar * dstptr , int dst_step , int dst_offset , int rows , int cols ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = 2 * get_global_id ( 1 ) ; <nl> - <nl> - __global const dstT * lut = ( __global const dstT * ) ( lutptr + lut_offset ) ; <nl> - <nl> - __local dstT lut_l [ 256 * lcn ] ; <nl> - int init = mad24 ( get_local_id ( 1 ) , get_local_size ( 0 ) , get_local_id ( 0 ) ) ; <nl> - int step = get_local_size ( 0 ) * get_local_size ( 1 ) ; <nl> - <nl> - for ( int i = init ; i < 256 * lcn ; i + = step ) <nl> - { <nl> - lut_l [ i + 0 ] = lut [ i + 0 ] ; <nl> - } <nl> - barrier ( CLK_LOCAL_MEM_FENCE ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - int src_index = mad24 ( y , src_step , mad24 ( x , ( int ) sizeof ( srcT ) * dcn , src_offset ) ) ; <nl> - int dst_index = mad24 ( y , dst_step , mad24 ( x , ( int ) sizeof ( dstT ) * dcn , dst_offset ) ) ; <nl> - <nl> - __global const uchar4 * src = ( __global const uchar4 * ) ( srcptr + src_index ) ; <nl> - int4 idx = convert_int4 ( src [ 0 ] ) * lcn + ( int4 ) ( 0 , 1 , 2 , 3 ) ; <nl> - __global dstT * dst = ( __global dstT * ) ( dstptr + dst_index ) ; <nl> - <nl> - dst [ 0 ] = lut_l [ idx . x ] ; <nl> - dst [ 1 ] = lut_l [ idx . y ] ; <nl> - dst [ 2 ] = lut_l [ idx . z ] ; <nl> - dst [ 3 ] = lut_l [ idx . w ] ; <nl> - <nl> - if ( y < rows - 1 ) <nl> - { <nl> - src = ( __global const uchar4 * ) ( srcptr + src_index + src_step ) ; <nl> - idx = convert_int4 ( src [ 0 ] ) * lcn + ( int4 ) ( 0 , 1 , 2 , 3 ) ; <nl> - dst = ( __global dstT * ) ( dstptr + dst_index + dst_step ) ; <nl> - <nl> - dst [ 0 ] = lut_l [ idx . x ] ; <nl> - dst [ 1 ] = lut_l [ idx . y ] ; <nl> - dst [ 2 ] = lut_l [ idx . z ] ; <nl> + # if lcn = = 1 <nl> + # if dcn = = 4 <nl> + # define LUT_OP ( num ) \ <nl> + uchar4 idx = vload4 ( 0 , ( __global const uchar * ) ( srcptr + src_index + num * src_step ) ) ; \ <nl> + dst = ( __global dstT * ) ( dstptr + dst_index + num * dst_step ) ; \ <nl> + dst [ 0 ] = lut_l [ idx . x ] ; \ <nl> + dst [ 1 ] = lut_l [ idx . y ] ; \ <nl> + dst [ 2 ] = lut_l [ idx . z ] ; \ <nl> dst [ 3 ] = lut_l [ idx . w ] ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - __kernel void LUTC3 ( __global const uchar * srcptr , int src_step , int src_offset , <nl> - __global const uchar * lutptr , int lut_step , int lut_offset , <nl> - __global uchar * dstptr , int dst_step , int dst_offset , int rows , int cols ) <nl> - { <nl> - int x = get_global_id ( 0 ) ; <nl> - int y = 2 * get_global_id ( 1 ) ; <nl> - <nl> - __global const dstT * lut = ( __global const dstT * ) ( lutptr + lut_offset ) ; <nl> - <nl> - __local dstT lut_l [ 256 * lcn ] ; <nl> - int init = mad24 ( get_local_id ( 1 ) , get_local_size ( 0 ) , get_local_id ( 0 ) ) ; <nl> - int step = get_local_size ( 0 ) * get_local_size ( 1 ) ; <nl> - <nl> - for ( int i = init ; i < 256 * lcn ; i + = step ) <nl> - { <nl> - lut_l [ i + 0 ] = lut [ i + 0 ] ; <nl> - } <nl> - barrier ( CLK_LOCAL_MEM_FENCE ) ; <nl> - <nl> - if ( x < cols & & y < rows ) <nl> - { <nl> - int src_index = mad24 ( y , src_step , mad24 ( x , ( int ) sizeof ( srcT ) * dcn , src_offset ) ) ; <nl> - int dst_index = mad24 ( y , dst_step , mad24 ( x , ( int ) sizeof ( dstT ) * dcn , dst_offset ) ) ; <nl> - <nl> - uchar3 src_pixel = vload3 ( 0 , ( __global const uchar * ) ( srcptr + src_index ) ) ; <nl> - int3 idx = convert_int3 ( src_pixel ) * lcn + ( int3 ) ( 0 , 1 , 2 ) ; <nl> - __global dstT * dst = ( __global dstT * ) ( dstptr + dst_index ) ; <nl> - <nl> - dst [ 0 ] = lut_l [ idx . x ] ; <nl> - dst [ 1 ] = lut_l [ idx . y ] ; <nl> - dst [ 2 ] = lut_l [ idx . z ] ; <nl> - if ( y < rows - 1 ) <nl> - { <nl> - uchar3 src_pixel = vload3 ( 0 , ( __global const uchar * ) ( srcptr + src_index + src_step ) ) ; <nl> - idx = convert_int3 ( src_pixel ) * lcn + ( int3 ) ( 0 , 1 , 2 ) ; <nl> - dst = ( __global dstT * ) ( dstptr + dst_index + dst_step ) ; <nl> - <nl> - dst [ 0 ] = lut_l [ idx . x ] ; <nl> + # elif dcn = = 3 <nl> + # define LUT_OP ( num ) \ <nl> + uchar3 idx = vload3 ( 0 , ( __global const uchar * ) ( srcptr + src_index + num * src_step ) ) ; \ <nl> + dst = ( __global dstT * ) ( dstptr + dst_index + num * dst_step ) ; \ <nl> + dst [ 0 ] = lut_l [ idx . x ] ; \ <nl> + dst [ 1 ] = lut_l [ idx . y ] ; \ <nl> + dst [ 2 ] = lut_l [ idx . z ] ; <nl> + # elif dcn = = 2 <nl> + # define LUT_OP ( num ) \ <nl> + uchar2 idx = vload2 ( 0 , ( __global const uchar * ) ( srcptr + src_index + num * src_step ) ) ; \ <nl> + dst = ( __global dstT * ) ( dstptr + dst_index + num * dst_step ) ; \ <nl> + dst [ 0 ] = lut_l [ idx . x ] ; \ <nl> dst [ 1 ] = lut_l [ idx . y ] ; <nl> + # elif dcn = = 1 <nl> + # define LUT_OP ( num ) \ <nl> + uchar idx = ( __global const uchar * ) ( srcptr + src_index + num * src_step ) [ 0 ] ; \ <nl> + dst = ( __global dstT * ) ( dstptr + dst_index + num * dst_step ) ; \ <nl> + dst [ 0 ] = lut_l [ idx ] ; <nl> + # else <nl> + # define LUT_OP ( num ) \ <nl> + src = ( __global const srcT * ) ( srcptr + src_index + num * src_step ) ; \ <nl> + dst = ( __global dstT * ) ( dstptr + dst_index + num * dst_step ) ; \ <nl> + for ( int cn = 0 ; cn < dcn ; + + cn ) \ <nl> + dst [ cn ] = lut_l [ src [ cn ] ] ; <nl> + # endif <nl> + # else <nl> + # if dcn = = 4 <nl> + # define LUT_OP ( num ) \ <nl> + uchar4 src_pixel = vload4 ( 0 , ( __global const uchar * ) ( srcptr + src_index + num * src_step ) ) ; \ <nl> + int4 idx = convert_int4 ( src_pixel ) * lcn + ( int4 ) ( 0 , 1 , 2 , 3 ) ; \ <nl> + dst = ( __global dstT * ) ( dstptr + dst_index + num * dst_step ) ; \ <nl> + dst [ 0 ] = lut_l [ idx . x ] ; \ <nl> + dst [ 1 ] = lut_l [ idx . y ] ; \ <nl> + dst [ 2 ] = lut_l [ idx . z ] ; \ <nl> + dst [ 3 ] = lut_l [ idx . w ] ; <nl> + # elif dcn = = 3 <nl> + # define LUT_OP ( num ) \ <nl> + uchar3 src_pixel = vload3 ( 0 , ( __global const uchar * ) ( srcptr + src_index + num * src_step ) ) ; \ <nl> + int3 idx = convert_int3 ( src_pixel ) * lcn + ( int3 ) ( 0 , 1 , 2 ) ; \ <nl> + dst = ( __global dstT * ) ( dstptr + dst_index + num * dst_step ) ; \ <nl> + dst [ 0 ] = lut_l [ idx . x ] ; \ <nl> + dst [ 1 ] = lut_l [ idx . y ] ; \ <nl> dst [ 2 ] = lut_l [ idx . z ] ; <nl> - } <nl> + # elif dcn = = 2 <nl> + # define LUT_OP ( num ) \ <nl> + uchar2 src_pixel = vload2 ( 0 , ( __global const uchar * ) ( srcptr + src_index + num * src_step ) ) ; \ <nl> + int2 idx = convert_int2 ( src_pixel ) * lcn + ( int2 ) ( 0 , 1 ) ; \ <nl> + dst = ( __global dstT * ) ( dstptr + dst_index + num * dst_step ) ; \ <nl> + dst [ 0 ] = lut_l [ idx . x ] ; \ <nl> + dst [ 1 ] = lut_l [ idx . y ] ; <nl> + # elif dcn = = 1 / / error case ( 1 < lcn ) = = > lcn = = scn = = dcn <nl> + # define LUT_OP ( num ) \ <nl> + uchar idx = ( __global const uchar * ) ( srcptr + src_index + num * src_step ) [ 0 ] ; \ <nl> + dst = ( __global dstT * ) ( dstptr + dst_index + num * dst_step ) ; \ <nl> + dst [ 0 ] = lut_l [ idx ] ; <nl> + # else <nl> + # define LUT_OP ( num ) \ <nl> + src = ( __global const srcT * ) ( srcptr + src_index + num * src_step ) ; \ <nl> + dst = ( __global dstT * ) ( dstptr + dst_index + num * dst_step ) ; \ <nl> + for ( int cn = 0 ; cn < dcn ; + + cn ) \ <nl> + dst [ cn ] = lut_l [ mad24 ( src [ cn ] , lcn , cn ) ] ; <nl> + # endif <nl> + # endif <nl> + <nl> + # define LOCAL_LUT_INIT \ <nl> + { \ <nl> + __global const dstT * lut = ( __global const dstT * ) ( lutptr + lut_offset ) ; \ <nl> + int init = mad24 ( ( int ) get_local_id ( 1 ) , ( int ) get_local_size ( 0 ) , ( int ) get_local_id ( 0 ) ) ; \ <nl> + int step = get_local_size ( 0 ) * get_local_size ( 1 ) ; \ <nl> + for ( int i = init ; i < 256 * lcn ; i + = step ) \ <nl> + { \ <nl> + lut_l [ i ] = lut [ i ] ; \ <nl> + } \ <nl> + barrier ( CLK_LOCAL_MEM_FENCE ) ; \ <nl> } <nl> - } <nl> <nl> __kernel void LUT ( __global const uchar * srcptr , int src_step , int src_offset , <nl> __global const uchar * lutptr , int lut_step , int lut_offset , <nl> __global uchar * dstptr , int dst_step , int dst_offset , int rows , int cols ) <nl> { <nl> - __global const dstT * lut = ( __global const dstT * ) ( lutptr + lut_offset ) ; <nl> - <nl> __local dstT lut_l [ 256 * lcn ] ; <nl> - int init = mad24 ( get_local_id ( 1 ) , get_local_size ( 0 ) , get_local_id ( 0 ) ) ; <nl> - int step = get_local_size ( 0 ) * get_local_size ( 1 ) ; <nl> - <nl> - for ( int i = init ; i < 256 * lcn ; i + = step ) <nl> - { <nl> - lut_l [ i + 0 ] = lut [ i + 0 ] ; <nl> - } <nl> - barrier ( CLK_LOCAL_MEM_FENCE ) ; <nl> + LOCAL_LUT_INIT ; <nl> <nl> int x = get_global_id ( 0 ) ; <nl> - int y = 2 * get_global_id ( 1 ) ; <nl> + int y = 4 * get_global_id ( 1 ) ; <nl> <nl> if ( x < cols & & y < rows ) <nl> { <nl> int src_index = mad24 ( y , src_step , mad24 ( x , ( int ) sizeof ( srcT ) * dcn , src_offset ) ) ; <nl> - <nl> - __global const srcT * src = ( __global const srcT * ) ( srcptr + src_index ) ; <nl> - __global const dstT * lut = ( __global const dstT * ) ( lutptr + lut_offset ) ; <nl> - <nl> int dst_index = mad24 ( y , dst_step , mad24 ( x , ( int ) sizeof ( dstT ) * dcn , dst_offset ) ) ; <nl> - __global dstT * dst = ( __global dstT * ) ( dstptr + dst_index ) ; <nl> + __global const srcT * src ; __global dstT * dst ; <nl> <nl> - # if lcn = = 1 <nl> - # pragma unroll <nl> - for ( int cn = 0 ; cn < dcn ; + + cn ) <nl> - dst [ cn ] = lut_l [ src [ cn ] ] ; <nl> - # else / / lcn = = scn = = dcn <nl> - # pragma unroll <nl> - for ( int cn = 0 ; cn < dcn ; + + cn ) <nl> - dst [ cn ] = lut_l [ mad24 ( src [ cn ] , lcn , cn ) ] ; <nl> - # endif <nl> + LUT_OP ( 0 ) ; <nl> if ( y < rows - 1 ) <nl> { <nl> - src = ( __global const srcT * ) ( srcptr + src_index + src_step ) ; <nl> - dst = ( __global dstT * ) ( dstptr + dst_index + dst_step ) ; <nl> - <nl> - # if lcn = = 1 <nl> - # pragma unroll <nl> - for ( int cn = 0 ; cn < dcn ; + + cn ) <nl> - dst [ cn ] = lut_l [ src [ cn ] ] ; <nl> - # else / / lcn = = scn = = dcn <nl> - # pragma unroll <nl> - for ( int cn = 0 ; cn < dcn ; + + cn ) <nl> - dst [ cn ] = lut_l [ mad24 ( src [ cn ] , lcn , cn ) ] ; <nl> - # endif <nl> + LUT_OP ( 1 ) ; <nl> + if ( y < rows - 2 ) <nl> + { <nl> + LUT_OP ( 2 ) ; <nl> + if ( y < rows - 3 ) <nl> + { <nl> + LUT_OP ( 3 ) ; <nl> + } <nl> + } <nl> } <nl> + <nl> } <nl> } <nl>
|
Use 4 pixels for one unit . Some ocl code refactoring
|
opencv/opencv
|
72727111c71404d6597617cca490ea610b7a71e3
|
2014-05-26T12:52:59Z
|
mmm a / xbmc / pvr / windows / GUIWindowPVRBase . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRBase . cpp <nl> void CGUIWindowPVRBase : : SetGroup ( CPVRChannelGroupPtr group ) <nl> / / we need to register the window to receive changes from the new group <nl> m_group - > RegisterObserver ( this ) ; <nl> g_PVRManager . SetPlayingGroup ( m_group ) ; <nl> - Update ( ) ; <nl> + Refresh ( ) ; <nl> } <nl> } <nl> <nl> bool CGUIWindowPVRBase : : ActionDeleteChannel ( CFileItem * item ) <nl> return false ; <nl> <nl> g_PVRChannelGroups - > GetGroupAll ( channel - > IsRadio ( ) ) - > RemoveFromGroup ( * channel ) ; <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> <nl> return true ; <nl> } <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRBase . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRBase . h <nl> namespace PVR <nl> public : <nl> virtual void OnInitWindow ( void ) ; <nl> virtual bool OnMessage ( CGUIMessage & message ) ; <nl> - virtual bool Update ( const std : : string & strDirectory = " " , bool updateFilterPath = true ) ; <nl> + virtual bool Update ( const std : : string & strDirectory , bool updateFilterPath = true ) ; <nl> virtual void UpdateButtons ( void ) ; <nl> virtual bool OnAction ( const CAction & action ) ; <nl> virtual bool OnBack ( int actionID ) ; <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRChannels . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRChannels . cpp <nl> bool CGUIWindowPVRChannels : : Update ( const std : : string & strDirectory , bool updateF <nl> / * show the visible channels instead * / <nl> m_bShowHiddenChannels = false ; <nl> lock . Leave ( ) ; <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> } <nl> <nl> return bReturn ; <nl> bool CGUIWindowPVRChannels : : OnMessage ( CGUIMessage & message ) <nl> case ObservableMessageChannelGroupReset : <nl> { <nl> if ( IsActive ( ) ) <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> bReturn = true ; <nl> break ; <nl> } <nl> bool CGUIWindowPVRChannels : : OnContextButtonHide ( CFileItem * item , CONTEXT_BUTTON <nl> return bReturn ; <nl> <nl> g_PVRManager . GetPlayingGroup ( m_bRadio ) - > RemoveFromGroup ( * channel ) ; <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> <nl> bReturn = true ; <nl> } <nl> bool CGUIWindowPVRChannels : : OnContextButtonLock ( CFileItem * item , CONTEXT_BUTTON <nl> return bReturn ; <nl> <nl> group - > ToggleChannelLocked ( * item ) ; <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> <nl> bReturn = true ; <nl> } <nl> bool CGUIWindowPVRChannels : : OnContextButtonMove ( CFileItem * item , CONTEXT_BUTTON <nl> if ( newIndex ! = channel - > ChannelNumber ( ) ) <nl> { <nl> g_PVRManager . GetPlayingGroup ( ) - > MoveChannel ( channel - > ChannelNumber ( ) , newIndex ) ; <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> } <nl> <nl> bReturn = true ; <nl> bool CGUIWindowPVRChannels : : OnContextButtonSetThumb ( CFileItem * item , CONTEXT_BUT <nl> <nl> channelPtr - > SetIconPath ( strThumb , true ) ; <nl> channelPtr - > Persist ( ) ; <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> } <nl> <nl> bReturn = true ; <nl> bool CGUIWindowPVRChannels : : OnContextButtonShowHidden ( CFileItem * item , CONTEXT_B <nl> if ( button = = CONTEXT_BUTTON_SHOW_HIDDEN ) <nl> { <nl> m_bShowHiddenChannels = ! m_bShowHiddenChannels ; <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> bReturn = true ; <nl> } <nl> <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRChannels . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRChannels . h <nl> namespace PVR <nl> bool OnMessage ( CGUIMessage & message ) ; <nl> void GetContextButtons ( int itemNumber , CContextButtons & buttons ) ; <nl> bool OnContextButton ( int itemNumber , CONTEXT_BUTTON button ) ; <nl> - bool Update ( const std : : string & strDirectory = " " , bool updateFilterPath = true ) ; <nl> + bool Update ( const std : : string & strDirectory , bool updateFilterPath = true ) ; <nl> void UpdateButtons ( void ) ; <nl> void ResetObservers ( void ) ; <nl> void UnregisterObservers ( void ) ; <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRGuide . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRGuide . cpp <nl> bool CGUIWindowPVRGuide : : OnMessage ( CGUIMessage & message ) <nl> { <nl> / / let ' s set the view mode first before update <nl> CGUIWindowPVRBase : : OnMessage ( message ) ; <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> bReturn = true ; <nl> } <nl> break ; <nl> bool CGUIWindowPVRGuide : : OnMessage ( CGUIMessage & message ) <nl> m_bUpdateRequired = true ; <nl> / * update the current window if the EPG timeline view is visible * / <nl> if ( IsActive ( ) & & m_viewControl . GetCurrentControl ( ) = = GUIDE_VIEW_TIMELINE ) <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> bReturn = true ; <nl> break ; <nl> } <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRGuide . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRGuide . h <nl> namespace PVR <nl> bool OnAction ( const CAction & action ) ; <nl> void GetContextButtons ( int itemNumber , CContextButtons & buttons ) ; <nl> bool OnContextButton ( int itemNumber , CONTEXT_BUTTON button ) ; <nl> - bool Update ( const std : : string & strDirectory = " " , bool updateFilterPath = true ) ; <nl> + bool Update ( const std : : string & strDirectory , bool updateFilterPath = true ) ; <nl> void ResetObservers ( void ) ; <nl> void UnregisterObservers ( void ) ; <nl> <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRRecordings . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRRecordings . cpp <nl> bool CGUIWindowPVRRecordings : : OnMessage ( CGUIMessage & message ) <nl> case ObservableMessageTimersReset : <nl> { <nl> if ( IsActive ( ) ) <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> bReturn = true ; <nl> break ; <nl> } <nl> bool CGUIWindowPVRRecordings : : OnContextButtonRename ( CFileItem * item , CONTEXT_BUT <nl> if ( CGUIKeyboardFactory : : ShowAndGetInput ( strNewName , g_localizeStrings . Get ( 19041 ) , false ) ) <nl> { <nl> if ( g_PVRRecordings - > RenameRecording ( * item , strNewName ) ) <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> } <nl> } <nl> <nl> bool CGUIWindowPVRRecordings : : OnContextButtonMarkWatched ( const CFileItemPtr & ite <nl> g_PVRRecordings - > SetRecordingsPlayCount ( item , 1 ) ; <nl> m_viewControl . SetSelectedItem ( newSelection ) ; <nl> <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> } <nl> <nl> if ( button = = CONTEXT_BUTTON_MARK_UNWATCHED ) <nl> bool CGUIWindowPVRRecordings : : OnContextButtonMarkWatched ( const CFileItemPtr & ite <nl> <nl> g_PVRRecordings - > SetRecordingsPlayCount ( item , 0 ) ; <nl> <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> } <nl> <nl> return bReturn ; <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRRecordings . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRRecordings . h <nl> namespace PVR <nl> bool OnAction ( const CAction & action ) ; <nl> void GetContextButtons ( int itemNumber , CContextButtons & buttons ) ; <nl> bool OnContextButton ( int itemNumber , CONTEXT_BUTTON button ) ; <nl> - bool Update ( const std : : string & strDirectory = " " , bool updateFilterPath = true ) ; <nl> + bool Update ( const std : : string & strDirectory , bool updateFilterPath = true ) ; <nl> void UnregisterObservers ( void ) ; <nl> void ResetObservers ( void ) ; <nl> <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRSearch . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRSearch . cpp <nl> bool CGUIWindowPVRSearch : : OnContextButtonClear ( CFileItem * item , CONTEXT_BUTTON b <nl> m_bSearchConfirmed = false ; <nl> m_searchfilter . Reset ( ) ; <nl> <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> } <nl> <nl> return bReturn ; <nl> void CGUIWindowPVRSearch : : ShowSearchResults ( ) <nl> if ( pDlgInfo - > IsConfirmed ( ) ) <nl> { <nl> m_bSearchConfirmed = true ; <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> } <nl> } <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRSearch . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRSearch . h <nl> namespace PVR <nl> bool OnMessage ( CGUIMessage & message ) ; <nl> void GetContextButtons ( int itemNumber , CContextButtons & buttons ) ; <nl> bool OnContextButton ( int itemNumber , CONTEXT_BUTTON button ) ; <nl> - bool Update ( const std : : string & strDirectory = " " , bool updateFilterPath = true ) ; <nl> + bool Update ( const std : : string & strDirectory , bool updateFilterPath = true ) ; <nl> <nl> private : <nl> void Search ( void ) ; <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRTimers . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRTimers . cpp <nl> bool CGUIWindowPVRTimers : : OnMessage ( CGUIMessage & message ) <nl> case ObservableMessageTimersReset : <nl> { <nl> if ( IsActive ( ) ) <nl> - Update ( ) ; <nl> + Refresh ( true ) ; <nl> bReturn = true ; <nl> break ; <nl> } <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRTimers . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRTimers . h <nl> namespace PVR <nl> bool OnMessage ( CGUIMessage & message ) ; <nl> void GetContextButtons ( int itemNumber , CContextButtons & buttons ) ; <nl> bool OnContextButton ( int itemNumber , CONTEXT_BUTTON button ) ; <nl> - bool Update ( const std : : string & strDirectory = " " , bool updateFilterPath = true ) ; <nl> + bool Update ( const std : : string & strDirectory , bool updateFilterPath = true ) ; <nl> void UnregisterObservers ( void ) ; <nl> void ResetObservers ( void ) ; <nl> <nl>
|
[ pvr ] use Refresh ( ) instead of Update ( ) to refresh directory listing
|
xbmc/xbmc
|
11beba9f17a1e1dc47eab80000c06a2a1307ba61
|
2014-07-14T16:16:26Z
|
mmm a / src / debug / debug . cc <nl> ppp b / src / debug / debug . cc <nl> BreakLocation : : BreakLocation ( Handle < DebugInfo > debug_info , DebugBreakType type , <nl> SharedFunctionInfo * shared = debug_info - > shared ( ) ; <nl> if ( shared - > HasSourceCode ( ) ) { <nl> return_position = <nl> - std : : max ( shared - > end_position ( ) - shared - > start_position ( ) - 1 , 0 ) ; <nl> + std : : max ( shared - > end_position ( ) - 1 , shared - > start_position ( ) ) ; <nl> } <nl> / / TODO ( yangguo ) : find out why return position is wrong for liveedit . <nl> position_ = return_position ; <nl> - statement_position = return_position ; <nl> + statement_position_ = return_position ; <nl> } <nl> } <nl> <nl> BreakLocation : : Iterator * BreakLocation : : GetIterator ( <nl> } <nl> <nl> BreakLocation : : Iterator : : Iterator ( Handle < DebugInfo > debug_info ) <nl> - : debug_info_ ( debug_info ) , <nl> - break_index_ ( - 1 ) , <nl> - position_ ( 1 ) , <nl> - statement_position_ ( 1 ) { } <nl> + : debug_info_ ( debug_info ) , break_index_ ( - 1 ) { <nl> + position_ = debug_info - > shared ( ) - > start_position ( ) ; <nl> + statement_position_ = position_ ; <nl> + } <nl> <nl> BreakLocation : : CodeIterator : : CodeIterator ( Handle < DebugInfo > debug_info , <nl> BreakLocatorType type ) <nl> BreakLocation : : CodeIterator : : CodeIterator ( Handle < DebugInfo > debug_info , <nl> reloc_iterator_ ( debug_info - > abstract_code ( ) - > GetCode ( ) , <nl> GetModeMask ( type ) ) , <nl> source_position_iterator_ ( <nl> - debug_info - > abstract_code ( ) - > GetCode ( ) - > source_position_table ( ) ) , <nl> - start_position_ ( debug_info_ - > shared ( ) - > start_position ( ) ) { <nl> + debug_info - > abstract_code ( ) - > GetCode ( ) - > source_position_table ( ) ) { <nl> / / There is at least one break location . <nl> DCHECK ( ! Done ( ) ) ; <nl> Next ( ) ; <nl> void BreakLocation : : CodeIterator : : Next ( ) { <nl> int offset = code_offset ( ) ; <nl> while ( ! source_position_iterator_ . done ( ) & & <nl> source_position_iterator_ . code_offset ( ) < = offset ) { <nl> - position_ = source_position_iterator_ . source_position ( ) - start_position_ ; <nl> + position_ = source_position_iterator_ . source_position ( ) ; <nl> if ( source_position_iterator_ . is_statement ( ) ) { <nl> statement_position_ = position_ ; <nl> } <nl> BreakLocation : : BytecodeArrayIterator : : BytecodeArrayIterator ( <nl> source_position_iterator_ ( debug_info - > abstract_code ( ) <nl> - > GetBytecodeArray ( ) <nl> - > source_position_table ( ) ) , <nl> - break_locator_type_ ( type ) , <nl> - start_position_ ( debug_info - > shared ( ) - > start_position ( ) ) { <nl> + break_locator_type_ ( type ) { <nl> / / There is at least one break location . <nl> DCHECK ( ! Done ( ) ) ; <nl> Next ( ) ; <nl> void BreakLocation : : BytecodeArrayIterator : : Next ( ) { <nl> if ( ! first ) source_position_iterator_ . Advance ( ) ; <nl> first = false ; <nl> if ( Done ( ) ) return ; <nl> - position_ = source_position_iterator_ . source_position ( ) - start_position_ ; <nl> + position_ = source_position_iterator_ . source_position ( ) ; <nl> if ( source_position_iterator_ . is_statement ( ) ) { <nl> statement_position_ = position_ ; <nl> } <nl> bool Debug : : SetBreakPointForScript ( Handle < Script > script , <nl> <nl> / / Find position within function . The script position might be before the <nl> / / source position of the first function . <nl> - int position ; <nl> if ( shared - > start_position ( ) > * source_position ) { <nl> - position = 0 ; <nl> - } else { <nl> - position = * source_position - shared - > start_position ( ) ; <nl> + * source_position = shared - > start_position ( ) ; <nl> } <nl> <nl> Handle < DebugInfo > debug_info ( shared - > GetDebugInfo ( ) ) ; <nl> - / / Source positions starts with zero . <nl> - DCHECK ( position > = 0 ) ; <nl> <nl> / / Find the break point and change it . <nl> BreakLocation location = <nl> - BreakLocation : : FromPosition ( debug_info , position , alignment ) ; <nl> + BreakLocation : : FromPosition ( debug_info , * source_position , alignment ) ; <nl> location . SetBreakPoint ( break_point_object ) ; <nl> <nl> feature_tracker ( ) - > Track ( DebugFeatureTracker : : kBreakPoint ) ; <nl> <nl> - position = ( alignment = = STATEMENT_ALIGNED ) ? location . statement_position ( ) <nl> - : location . position ( ) ; <nl> - <nl> - * source_position = position + shared - > start_position ( ) ; <nl> + * source_position = ( alignment = = STATEMENT_ALIGNED ) <nl> + ? location . statement_position ( ) <nl> + : location . position ( ) ; <nl> <nl> / / At least one active break point now . <nl> DCHECK ( debug_info - > GetBreakPointCount ( ) > 0 ) ; <nl> mmm a / src / debug / debug . h <nl> ppp b / src / debug / debug . h <nl> class BreakLocation { <nl> <nl> RelocIterator reloc_iterator_ ; <nl> SourcePositionTableIterator source_position_iterator_ ; <nl> - int start_position_ ; <nl> DISALLOW_COPY_AND_ASSIGN ( CodeIterator ) ; <nl> } ; <nl> <nl> class BreakLocation { <nl> <nl> SourcePositionTableIterator source_position_iterator_ ; <nl> BreakLocatorType break_locator_type_ ; <nl> - int start_position_ ; <nl> DISALLOW_COPY_AND_ASSIGN ( BytecodeArrayIterator ) ; <nl> } ; <nl> <nl> mmm a / src / debug / debug . js <nl> ppp b / src / debug / debug . js <nl> Debug . setListener = function ( listener , opt_data ) { <nl> } ; <nl> <nl> <nl> - Debug . breakLocations = function ( f , opt_position_aligment ) { <nl> - if ( ! IS_FUNCTION ( f ) ) throw MakeTypeError ( kDebuggerType ) ; <nl> - var position_aligment = IS_UNDEFINED ( opt_position_aligment ) <nl> - ? Debug . BreakPositionAlignment . Statement : opt_position_aligment ; <nl> - return % GetBreakLocations ( f , position_aligment ) ; <nl> - } ; <nl> - <nl> / / Returns a Script object . If the parameter is a function the return value <nl> / / is the script in which the function is defined . If the parameter is a string <nl> / / the return value is the script for which the script name has that string <nl> Debug . setBreakPoint = function ( func , opt_line , opt_column , opt_condition ) { <nl> if ( % FunctionIsAPIFunction ( func ) ) { <nl> throw MakeError ( kDebugger , ' Cannot set break point in native code . ' ) ; <nl> } <nl> - / / Find source position relative to start of the function <nl> - var break_position = <nl> + / / Find source position . <nl> + var source_position = <nl> this . findFunctionSourceLocation ( func , opt_line , opt_column ) . position ; <nl> - var source_position = break_position - this . sourcePosition ( func ) ; <nl> / / Find the script for the function . <nl> var script = % FunctionGetScript ( func ) ; <nl> / / Break in builtin JavaScript code is not supported . <nl> Debug . setBreakPoint = function ( func , opt_line , opt_column , opt_condition ) { <nl> / / If the script for the function has a name convert this to a script break <nl> / / point . <nl> if ( script & & script . id ) { <nl> - / / Adjust the source position to be script relative . <nl> - source_position + = % FunctionGetScriptSourcePosition ( func ) ; <nl> / / Find line and column for the position in the script and set a script <nl> / / break point from that . <nl> var location = script . locationFromPosition ( source_position , false ) ; <nl> Debug . setBreakPoint = function ( func , opt_line , opt_column , opt_condition ) { <nl> var break_point = MakeBreakPoint ( source_position ) ; <nl> var actual_position = <nl> % SetFunctionBreakPoint ( func , source_position , break_point ) ; <nl> - actual_position + = this . sourcePosition ( func ) ; <nl> var actual_location = script . locationFromPosition ( actual_position , true ) ; <nl> break_point . actual_location = { line : actual_location . line , <nl> column : actual_location . column , <nl> Debug . isBreakOnUncaughtException = function ( ) { <nl> Debug . showBreakPoints = function ( f , full , opt_position_alignment ) { <nl> if ( ! IS_FUNCTION ( f ) ) throw MakeError ( kDebuggerType ) ; <nl> var source = full ? this . scriptSource ( f ) : this . source ( f ) ; <nl> - var offset = full ? this . sourcePosition ( f ) : 0 ; <nl> - var locations = this . breakLocations ( f , opt_position_alignment ) ; <nl> + var offset = full ? 0 : this . sourcePosition ( f ) ; <nl> + var position_alignment = IS_UNDEFINED ( opt_position_alignment ) <nl> + ? Debug . BreakPositionAlignment . Statement : opt_position_alignment ; <nl> + var locations = % GetBreakLocations ( f , position_alignment ) ; <nl> if ( ! locations ) return source ; <nl> locations . sort ( function ( x , y ) { return x - y ; } ) ; <nl> var result = " " ; <nl> mmm a / test / cctest / heap / test - heap . cc <nl> ppp b / test / cctest / heap / test - heap . cc <nl> TEST ( TestCodeFlushingIncrementalAbort ) { <nl> / / Enable the debugger and add a breakpoint while incremental marking <nl> / / is running so that incremental marking aborts and code flushing is <nl> / / disabled . <nl> - int position = 0 ; <nl> + int position = function - > shared ( ) - > start_position ( ) ; <nl> Handle < Object > breakpoint_object ( Smi : : FromInt ( 0 ) , isolate ) ; <nl> EnableDebugger ( CcTest : : isolate ( ) ) ; <nl> isolate - > debug ( ) - > SetBreakPoint ( function , breakpoint_object , & position ) ; <nl> mmm a / test / cctest / test - debug . cc <nl> ppp b / test / cctest / test - debug . cc <nl> static bool HasDebugInfo ( v8 : : Local < v8 : : Function > fun ) { <nl> return shared - > HasDebugInfo ( ) ; <nl> } <nl> <nl> - <nl> - / / Set a break point in a function and return the associated break point <nl> - / / number . <nl> - static int SetBreakPoint ( Handle < v8 : : internal : : JSFunction > fun , int position ) { <nl> + / / Set a break point in a function with a position relative to function start , <nl> + / / and return the associated break point number . <nl> + static int SetBreakPoint ( v8 : : Local < v8 : : Function > fun , int position ) { <nl> + i : : Handle < i : : JSFunction > function = <nl> + i : : Handle < i : : JSFunction > : : cast ( v8 : : Utils : : OpenHandle ( * fun ) ) ; <nl> + position + = function - > shared ( ) - > start_position ( ) ; <nl> static int break_point = 0 ; <nl> - v8 : : internal : : Isolate * isolate = fun - > GetIsolate ( ) ; <nl> + v8 : : internal : : Isolate * isolate = function - > GetIsolate ( ) ; <nl> v8 : : internal : : Debug * debug = isolate - > debug ( ) ; <nl> debug - > SetBreakPoint ( <nl> - fun , <nl> + function , <nl> Handle < Object > ( v8 : : internal : : Smi : : FromInt ( + + break_point ) , isolate ) , <nl> & position ) ; <nl> return break_point ; <nl> } <nl> <nl> <nl> - / / Set a break point in a function and return the associated break point <nl> - / / number . <nl> - static int SetBreakPoint ( v8 : : Local < v8 : : Function > fun , int position ) { <nl> - return SetBreakPoint ( <nl> - i : : Handle < i : : JSFunction > : : cast ( v8 : : Utils : : OpenHandle ( * fun ) ) , position ) ; <nl> - } <nl> - <nl> - <nl> / / Set a break point in a function using the Debug object and return the <nl> / / associated break point number . <nl> static int SetBreakPointFromJS ( v8 : : Isolate * isolate , <nl> TEST ( BreakLocationIterator ) { <nl> TestBreakLocation : : Iterator * iterator = <nl> TestBreakLocation : : GetIterator ( debug_info , i : : ALL_BREAK_LOCATIONS ) ; <nl> CHECK ( iterator - > GetBreakLocation ( ) . IsDebuggerStatement ( ) ) ; <nl> - CHECK_EQ ( 7 , iterator - > GetBreakLocation ( ) . position ( ) ) ; <nl> + CHECK_EQ ( 17 , iterator - > GetBreakLocation ( ) . position ( ) ) ; <nl> iterator - > Next ( ) ; <nl> CHECK ( iterator - > GetBreakLocation ( ) . IsDebugBreakSlot ( ) ) ; <nl> - CHECK_EQ ( 22 , iterator - > GetBreakLocation ( ) . position ( ) ) ; <nl> + CHECK_EQ ( 32 , iterator - > GetBreakLocation ( ) . position ( ) ) ; <nl> iterator - > Next ( ) ; <nl> CHECK ( iterator - > GetBreakLocation ( ) . IsCall ( ) ) ; <nl> - CHECK_EQ ( 22 , iterator - > GetBreakLocation ( ) . position ( ) ) ; <nl> + CHECK_EQ ( 32 , iterator - > GetBreakLocation ( ) . position ( ) ) ; <nl> iterator - > Next ( ) ; <nl> CHECK ( iterator - > GetBreakLocation ( ) . IsDebuggerStatement ( ) ) ; <nl> - CHECK_EQ ( 37 , iterator - > GetBreakLocation ( ) . position ( ) ) ; <nl> + CHECK_EQ ( 47 , iterator - > GetBreakLocation ( ) . position ( ) ) ; <nl> iterator - > Next ( ) ; <nl> CHECK ( iterator - > GetBreakLocation ( ) . IsReturn ( ) ) ; <nl> - CHECK_EQ ( 50 , iterator - > GetBreakLocation ( ) . position ( ) ) ; <nl> + CHECK_EQ ( 60 , iterator - > GetBreakLocation ( ) . position ( ) ) ; <nl> iterator - > Next ( ) ; <nl> CHECK ( iterator - > Done ( ) ) ; <nl> delete iterator ; <nl> <nl> iterator = TestBreakLocation : : GetIterator ( debug_info , i : : CALLS_AND_RETURNS ) ; <nl> CHECK ( iterator - > GetBreakLocation ( ) . IsCall ( ) ) ; <nl> - CHECK_EQ ( 22 , iterator - > GetBreakLocation ( ) . position ( ) ) ; <nl> + CHECK_EQ ( 32 , iterator - > GetBreakLocation ( ) . position ( ) ) ; <nl> iterator - > Next ( ) ; <nl> CHECK ( iterator - > GetBreakLocation ( ) . IsReturn ( ) ) ; <nl> - CHECK_EQ ( 50 , iterator - > GetBreakLocation ( ) . position ( ) ) ; <nl> + CHECK_EQ ( 60 , iterator - > GetBreakLocation ( ) . position ( ) ) ; <nl> iterator - > Next ( ) ; <nl> CHECK ( iterator - > Done ( ) ) ; <nl> delete iterator ; <nl>
|
[ debugger ] use absolute source positions for break locations .
|
v8/v8
|
ad4eb051e7315915ee9dac72c82b4af94952ab77
|
2016-07-21T06:30:40Z
|
mmm a / contrib / seeds / makeseeds . py <nl> ppp b / contrib / seeds / makeseeds . py <nl> def filtermultiport ( ips ) : <nl> # Based on Greg Maxwell ' s seed_filter . py <nl> def filterbyasn ( ips , max_per_asn , max_total ) : <nl> # Sift out ips by type <nl> - ips_ipv4 = [ ip for ip in ips if ip [ ' net ' ] = = ' ipv4 ' ] <nl> - ips_ipv6 = [ ip for ip in ips if ip [ ' net ' ] = = ' ipv6 ' ] <nl> + ips_ipv46 = [ ip for ip in ips if ip [ ' net ' ] in [ ' ipv4 ' , ' ipv6 ' ] ] <nl> ips_onion = [ ip for ip in ips if ip [ ' net ' ] = = ' onion ' ] <nl> <nl> - # Filter IPv4 by ASN <nl> + # Filter IPv46 by ASN <nl> result = [ ] <nl> asn_count = { } <nl> - for ip in ips_ipv4 : <nl> + for ip in ips_ipv46 : <nl> if len ( result ) = = max_total : <nl> break <nl> try : <nl> - asn = int ( [ x . to_text ( ) for x in dns . resolver . query ( ' . ' . join ( reversed ( ip [ ' ip ' ] . split ( ' . ' ) ) ) + ' . origin . asn . cymru . com ' , ' TXT ' ) . response . answer ] [ 0 ] . split ( ' \ " ' ) [ 1 ] . split ( ' ' ) [ 0 ] ) <nl> + if ip [ ' net ' ] = = ' ipv4 ' : <nl> + ipaddr = ip [ ' ip ' ] <nl> + prefix = ' . origin ' <nl> + else : # http : / / www . team - cymru . com / IP - ASN - mapping . html <nl> + res = str ( ) # 2001 : 4860 : b002 : 23 : : 68 <nl> + for nb in ip [ ' ip ' ] . split ( ' : ' ) [ : 4 ] : # pick the first 4 nibbles <nl> + for c in nb . zfill ( 4 ) : # right padded with ' 0 ' <nl> + res + = c + ' . ' # 2001 4860 b002 0023 <nl> + ipaddr = res . rstrip ( ' . ' ) # 2 . 0 . 0 . 1 . 4 . 8 . 6 . 0 . b . 0 . 0 . 2 . 0 . 0 . 2 . 3 <nl> + prefix = ' . origin6 ' <nl> + <nl> + asn = int ( [ x . to_text ( ) for x in dns . resolver . query ( ' . ' . join ( <nl> + reversed ( ipaddr . split ( ' . ' ) ) ) + prefix + ' . asn . cymru . com ' , <nl> + ' TXT ' ) . response . answer ] [ 0 ] . split ( ' \ " ' ) [ 1 ] . split ( ' ' ) [ 0 ] ) <nl> if asn not in asn_count : <nl> asn_count [ asn ] = 0 <nl> if asn_count [ asn ] = = max_per_asn : <nl> def filterbyasn ( ips , max_per_asn , max_total ) : <nl> except : <nl> sys . stderr . write ( ' ERR : Could not resolve ASN for " ' + ip [ ' ip ' ] + ' " \ n ' ) <nl> <nl> - # TODO : filter IPv6 by ASN <nl> - <nl> - # Add back non - IPv4 <nl> - result . extend ( ips_ipv6 ) <nl> + # Add back Onions <nl> result . extend ( ips_onion ) <nl> return result <nl> <nl>
|
Filter IPv6 by ASN
|
bitcoin/bitcoin
|
316b8b2339efa131fc39f050ee0c9fe5291572b7
|
2019-04-17T17:32:05Z
|
mmm a / tools / osx / osx - depends / python26 / Makefile <nl> ppp b / tools / osx / osx - depends / python26 / Makefile <nl> SOURCE = $ ( LIBNAME ) - $ ( VERSION ) <nl> BASE_URL = http : / / mirrors . xbmc . org / build - deps / darwin - libs <nl> ARCHIVE = $ ( SOURCE ) . tar . bz2 <nl> TARBALLS_LOCATION = / Users / Shared / xbmc - depends / tarballs <nl> + <nl> RETRIEVE_TOOL = / usr / bin / curl <nl> RETRIEVE_TOOL_FLAGS = - Ls - - create - dirs - - output $ ( TARBALLS_LOCATION ) / $ ( ARCHIVE ) <nl> ARCHIVE_TOOL = tar <nl> ARCHIVE_TOOL_FLAGS = xf <nl> # configuration settings <nl> export EXTRA_CFLAGS = $ ( CFLAGS ) <nl> CONFIGURE = . / configure - - prefix = $ ( PREFIX ) - - host = $ ( HOST ) - - enable - shared \ <nl> - - - disable - toolbox - glue <nl> + - - disable - toolbox - glue - - enable - unicode = ucs4 <nl> <nl> LIBDYLIB = $ ( SOURCE ) / libpython2 . 6 . dylib <nl> <nl> $ ( LIBDYLIB ) : $ ( TARBALLS_LOCATION ) / $ ( ARCHIVE ) <nl> $ ( ARCHIVE_TOOL ) $ ( ARCHIVE_TOOL_FLAGS ) $ ( TARBALLS_LOCATION ) / $ ( ARCHIVE ) <nl> echo $ ( SOURCE ) > . gitignore <nl> cd $ ( SOURCE ) ; $ ( CONFIGURE ) <nl> + # python2 . 6 has an issue detecting and using the same version of openssl in configure and setup . py <nl> + # this forces python2 . 6 hashlib to be compatible with osx 10 . 4 boxes . <nl> + sed - ie " s | openssl_ver > = 0x00907000 ) | openssl_ver > = 0x00907000 and False ) | " " $ ( SOURCE ) / setup . py " <nl> + sed - ie " s | ( openssl_ver < 0x00908000 ) | ( True or openssl_ver < 0x00908000 ) | " " $ ( SOURCE ) / setup . py " <nl> cd $ ( SOURCE ) ; make <nl> <nl> . installed : <nl>
|
[ osx ] fixed , python2 . 6 issues with hashlib under 10 . 4
|
xbmc/xbmc
|
e1db89524d19d14cb5fda49ced3df4bb35e5bf1a
|
2011-03-17T20:42:59Z
|
mmm a / tensorflow / core / kernels / BUILD <nl> ppp b / tensorflow / core / kernels / BUILD <nl> tf_kernel_libraries ( <nl> ] , <nl> deps = [ <nl> " : bounds_check " , <nl> + " : constant_op " , <nl> " : fill_functor " , <nl> " : transpose_functor " , <nl> " / / tensorflow / core : core_cpu " , <nl> tf_kernel_libraries ( <nl> ] , <nl> deps = [ <nl> " : bounds_check " , <nl> + " : constant_op " , <nl> " : cwise_op " , <nl> " : fill_functor " , <nl> " : scatter_op " , <nl>
|
Fix linker errors by adding a dependence on constant_op to targets using fill_functor . This is needed because the templates in fill_functor . h are instantiated in constant_op . cc .
|
tensorflow/tensorflow
|
5bffa7d239f0482d52388fad4bce4840951aeaf8
|
2016-06-24T21:31:46Z
|
new file mode 100644 <nl> index 0000000000 . . c2c7f56884 <nl> mmm / dev / null <nl> ppp b / libraries / ESP8266WebServer / examples / PathArgServer / PathArgServer . ino <nl> <nl> + # include < ESP8266WiFi . h > <nl> + # include < WiFiClient . h > <nl> + # include < ESP8266WebServer . h > <nl> + # include < ESP8266mDNS . h > <nl> + <nl> + # include < uri / UriBraces . h > <nl> + # include < uri / UriRegex . h > <nl> + <nl> + # ifndef STASSID <nl> + # define STASSID " your - ssid " <nl> + # define STAPSK " your - password " <nl> + # endif <nl> + <nl> + const char * ssid = STASSID ; <nl> + const char * password = STAPSK ; <nl> + <nl> + ESP8266WebServer server ( 80 ) ; <nl> + <nl> + void setup ( void ) { <nl> + Serial . begin ( 115200 ) ; <nl> + WiFi . mode ( WIFI_STA ) ; <nl> + WiFi . begin ( ssid , password ) ; <nl> + Serial . println ( " " ) ; <nl> + <nl> + / / Wait for connection <nl> + while ( WiFi . status ( ) ! = WL_CONNECTED ) { <nl> + delay ( 500 ) ; <nl> + Serial . print ( " . " ) ; <nl> + } <nl> + Serial . println ( " " ) ; <nl> + Serial . print ( " Connected to " ) ; <nl> + Serial . println ( ssid ) ; <nl> + Serial . print ( " IP address : " ) ; <nl> + Serial . println ( WiFi . localIP ( ) ) ; <nl> + <nl> + if ( MDNS . begin ( " esp8266 " ) ) { <nl> + Serial . println ( " MDNS responder started " ) ; <nl> + } <nl> + <nl> + server . on ( " / " , [ ] ( ) { <nl> + server . send ( 200 , " text / plain " , " hello from esp8266 ! " ) ; <nl> + } ) ; <nl> + <nl> + server . on ( UriBraces ( " / users / { } " ) , [ ] ( ) { <nl> + String user = server . pathArg ( 0 ) ; <nl> + server . send ( 200 , " text / plain " , " User : ' " + user + " ' " ) ; <nl> + } ) ; <nl> + <nl> + server . on ( UriRegex ( " ^ \ \ / users \ \ / ( [ 0 - 9 ] + ) \ \ / devices \ \ / ( [ 0 - 9 ] + ) $ " ) , [ ] ( ) { <nl> + String user = server . pathArg ( 0 ) ; <nl> + String device = server . pathArg ( 1 ) ; <nl> + server . send ( 200 , " text / plain " , " User : ' " + user + " ' and Device : ' " + device + " ' " ) ; <nl> + } ) ; <nl> + <nl> + server . begin ( ) ; <nl> + Serial . println ( " HTTP server started " ) ; <nl> + } <nl> + <nl> + void loop ( void ) { <nl> + server . handleClient ( ) ; <nl> + } <nl> mmm a / libraries / ESP8266WebServer / src / ESP8266WebServer - impl . h <nl> ppp b / libraries / ESP8266WebServer / src / ESP8266WebServer - impl . h <nl> void ESP8266WebServerTemplate < ServerType > : : requestAuthentication ( HTTPAuthMethod <nl> } <nl> <nl> template < typename ServerType > <nl> - void ESP8266WebServerTemplate < ServerType > : : on ( const String & uri , ESP8266WebServerTemplate < ServerType > : : THandlerFunction handler ) { <nl> + void ESP8266WebServerTemplate < ServerType > : : on ( const Uri & uri , ESP8266WebServerTemplate < ServerType > : : THandlerFunction handler ) { <nl> on ( uri , HTTP_ANY , handler ) ; <nl> } <nl> <nl> template < typename ServerType > <nl> - void ESP8266WebServerTemplate < ServerType > : : on ( const String & uri , HTTPMethod method , ESP8266WebServerTemplate < ServerType > : : THandlerFunction fn ) { <nl> + void ESP8266WebServerTemplate < ServerType > : : on ( const Uri & uri , HTTPMethod method , ESP8266WebServerTemplate < ServerType > : : THandlerFunction fn ) { <nl> on ( uri , method , fn , _fileUploadHandler ) ; <nl> } <nl> <nl> template < typename ServerType > <nl> - void ESP8266WebServerTemplate < ServerType > : : on ( const String & uri , HTTPMethod method , ESP8266WebServerTemplate < ServerType > : : THandlerFunction fn , ESP8266WebServerTemplate < ServerType > : : THandlerFunction ufn ) { <nl> + void ESP8266WebServerTemplate < ServerType > : : on ( const Uri & uri , HTTPMethod method , ESP8266WebServerTemplate < ServerType > : : THandlerFunction fn , ESP8266WebServerTemplate < ServerType > : : THandlerFunction ufn ) { <nl> _addRequestHandler ( new FunctionRequestHandler < ServerType > ( fn , ufn , uri , method ) ) ; <nl> } <nl> <nl> void ESP8266WebServerTemplate < ServerType > : : _streamFileCore ( const size_t fileSize <nl> send ( 200 , contentType , emptyString ) ; <nl> } <nl> <nl> + template < typename ServerType > <nl> + const String & ESP8266WebServerTemplate < ServerType > : : pathArg ( unsigned int i ) const { <nl> + if ( _currentHandler ! = nullptr ) <nl> + return _currentHandler - > pathArg ( i ) ; <nl> + return emptyString ; <nl> + } <nl> <nl> template < typename ServerType > <nl> const String & ESP8266WebServerTemplate < ServerType > : : arg ( const String & name ) const { <nl> mmm a / libraries / ESP8266WebServer / src / ESP8266WebServer . h <nl> ppp b / libraries / ESP8266WebServer / src / ESP8266WebServer . h <nl> <nl> # include < ESP8266WiFi . h > <nl> # include < FS . h > <nl> # include " detail / mimetable . h " <nl> + # include " Uri . h " <nl> <nl> enum HTTPMethod { HTTP_ANY , HTTP_GET , HTTP_HEAD , HTTP_POST , HTTP_PUT , HTTP_PATCH , HTTP_DELETE , HTTP_OPTIONS } ; <nl> enum HTTPUploadStatus { UPLOAD_FILE_START , UPLOAD_FILE_WRITE , UPLOAD_FILE_END , <nl> class ESP8266WebServerTemplate <nl> void requestAuthentication ( HTTPAuthMethod mode = BASIC_AUTH , const char * realm = NULL , const String & authFailMsg = String ( " " ) ) ; <nl> <nl> typedef std : : function < void ( void ) > THandlerFunction ; <nl> - void on ( const String & uri , THandlerFunction handler ) ; <nl> - void on ( const String & uri , HTTPMethod method , THandlerFunction fn ) ; <nl> - void on ( const String & uri , HTTPMethod method , THandlerFunction fn , THandlerFunction ufn ) ; <nl> + void on ( const Uri & uri , THandlerFunction handler ) ; <nl> + void on ( const Uri & uri , HTTPMethod method , THandlerFunction fn ) ; <nl> + void on ( const Uri & uri , HTTPMethod method , THandlerFunction fn , THandlerFunction ufn ) ; <nl> void addHandler ( RequestHandlerType * handler ) ; <nl> void serveStatic ( const char * uri , fs : : FS & fs , const char * path , const char * cache_header = NULL ) ; <nl> void onNotFound ( THandlerFunction fn ) ; / / called when handler is not assigned <nl> class ESP8266WebServerTemplate <nl> / / Allows setting server options ( i . e . SSL keys ) by the instantiator <nl> ServerType & getServer ( ) { return _server ; } <nl> <nl> + const String & pathArg ( unsigned int i ) const ; / / get request path argument by number <nl> const String & arg ( const String & name ) const ; / / get request argument value by name <nl> const String & arg ( int i ) const ; / / get request argument value by number <nl> const String & argName ( int i ) const ; / / get request argument name by number <nl> new file mode 100644 <nl> index 0000000000 . . cb9b68c051 <nl> mmm / dev / null <nl> ppp b / libraries / ESP8266WebServer / src / Uri . h <nl> <nl> + # ifndef URI_H <nl> + # define URI_H <nl> + <nl> + # include < Arduino . h > <nl> + # include < vector > <nl> + <nl> + class Uri { <nl> + <nl> + protected : <nl> + const String _uri ; <nl> + <nl> + public : <nl> + Uri ( const char * uri ) : _uri ( uri ) { } <nl> + Uri ( const String & uri ) : _uri ( uri ) { } <nl> + virtual ~ Uri ( ) { } <nl> + <nl> + virtual Uri * clone ( ) const { <nl> + return new Uri ( _uri ) ; <nl> + } ; <nl> + <nl> + virtual bool canHandle ( const String & requestUri , __attribute__ ( ( unused ) ) std : : vector < String > & pathArgs ) { <nl> + return _uri = = requestUri ; <nl> + } <nl> + } ; <nl> + <nl> + # endif <nl> mmm a / libraries / ESP8266WebServer / src / detail / RequestHandler . h <nl> ppp b / libraries / ESP8266WebServer / src / detail / RequestHandler . h <nl> <nl> # define REQUESTHANDLER_H <nl> <nl> # include < ESP8266WebServer . h > <nl> + # include < vector > <nl> + # include < assert . h > <nl> <nl> template < typename ServerType > <nl> class RequestHandler { <nl> class RequestHandler { <nl> <nl> private : <nl> RequestHandler < ServerType > * _next = nullptr ; <nl> + <nl> + protected : <nl> + std : : vector < String > pathArgs ; <nl> + <nl> + public : <nl> + const String & pathArg ( unsigned int i ) { <nl> + assert ( i < pathArgs . size ( ) ) ; <nl> + return pathArgs [ i ] ; <nl> + } <nl> } ; <nl> <nl> # endif / / REQUESTHANDLER_H <nl> mmm a / libraries / ESP8266WebServer / src / detail / RequestHandlersImpl . h <nl> ppp b / libraries / ESP8266WebServer / src / detail / RequestHandlersImpl . h <nl> <nl> # include " RequestHandler . h " <nl> # include " mimetable . h " <nl> # include " WString . h " <nl> + # include " Uri . h " <nl> <nl> using namespace mime ; <nl> <nl> template < typename ServerType > <nl> class FunctionRequestHandler : public RequestHandler < ServerType > { <nl> using WebServerType = ESP8266WebServerTemplate < ServerType > ; <nl> public : <nl> - FunctionRequestHandler ( typename WebServerType : : THandlerFunction fn , typename WebServerType : : THandlerFunction ufn , const String & uri , HTTPMethod method ) <nl> + FunctionRequestHandler ( typename WebServerType : : THandlerFunction fn , typename WebServerType : : THandlerFunction ufn , const Uri & uri , HTTPMethod method ) <nl> : _fn ( fn ) <nl> , _ufn ( ufn ) <nl> - , _uri ( uri ) <nl> + , _uri ( uri . clone ( ) ) <nl> , _method ( method ) <nl> { <nl> } <nl> <nl> + ~ FunctionRequestHandler ( ) { <nl> + delete _uri ; <nl> + } <nl> + <nl> bool canHandle ( HTTPMethod requestMethod , String requestUri ) override { <nl> if ( _method ! = HTTP_ANY & & _method ! = requestMethod ) <nl> return false ; <nl> <nl> - if ( requestUri ! = _uri ) <nl> - return false ; <nl> - <nl> - return true ; <nl> + return _uri - > canHandle ( requestUri , RequestHandler < ServerType > : : pathArgs ) ; <nl> } <nl> <nl> bool canUpload ( String requestUri ) override { <nl> class FunctionRequestHandler : public RequestHandler < ServerType > { <nl> protected : <nl> typename WebServerType : : THandlerFunction _fn ; <nl> typename WebServerType : : THandlerFunction _ufn ; <nl> - String _uri ; <nl> + Uri * _uri ; <nl> HTTPMethod _method ; <nl> } ; <nl> <nl> new file mode 100644 <nl> index 0000000000 . . 29652efc7f <nl> mmm / dev / null <nl> ppp b / libraries / ESP8266WebServer / src / uri / UriBraces . h <nl> <nl> + # ifndef URI_BRACES_H <nl> + # define URI_BRACES_H <nl> + <nl> + # include " Uri . h " <nl> + <nl> + class UriBraces : public Uri { <nl> + <nl> + public : <nl> + explicit UriBraces ( const char * uri ) : Uri ( uri ) { } ; <nl> + explicit UriBraces ( const String & uri ) : Uri ( uri ) { } ; <nl> + <nl> + Uri * clone ( ) const override final { <nl> + return new UriBraces ( _uri ) ; <nl> + } ; <nl> + <nl> + bool canHandle ( const String & requestUri , std : : vector < String > & pathArgs ) override final { <nl> + if ( Uri : : canHandle ( requestUri , pathArgs ) ) <nl> + return true ; <nl> + <nl> + pathArgs . clear ( ) ; <nl> + <nl> + size_t uriLength = _uri . length ( ) ; <nl> + unsigned int requestUriIndex = 0 ; <nl> + for ( unsigned int i = 0 ; i < uriLength ; i + + , requestUriIndex + + ) { <nl> + char uriChar = _uri [ i ] ; <nl> + char requestUriChar = requestUri [ requestUriIndex ] ; <nl> + <nl> + if ( uriChar = = requestUriChar ) <nl> + continue ; <nl> + if ( uriChar ! = ' { ' ) <nl> + return false ; <nl> + <nl> + i + = 2 ; / / index of char after ' } ' <nl> + if ( i > = uriLength ) { <nl> + / / there is no char after ' } ' <nl> + pathArgs . push_back ( requestUri . substring ( requestUriIndex ) ) ; <nl> + return pathArgs . back ( ) . indexOf ( " / " ) = = - 1 ; / / path argument may not contain a ' / ' <nl> + } <nl> + else <nl> + { <nl> + char charEnd = _uri [ i ] ; <nl> + int uriIndex = requestUri . indexOf ( charEnd , requestUriIndex ) ; <nl> + if ( uriIndex < 0 ) <nl> + return false ; <nl> + pathArgs . push_back ( requestUri . substring ( requestUriIndex , uriIndex ) ) ; <nl> + requestUriIndex = ( unsigned int ) uriIndex ; <nl> + } <nl> + } <nl> + <nl> + return requestUriIndex > = requestUri . length ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + # endif <nl> new file mode 100644 <nl> index 0000000000 . . 1e222cbabd <nl> mmm / dev / null <nl> ppp b / libraries / ESP8266WebServer / src / uri / UriGlob . h <nl> <nl> + # ifndef URI_GLOB_H <nl> + # define URI_GLOB_H <nl> + <nl> + # include " Uri . h " <nl> + # include < fnmatch . h > <nl> + <nl> + class UriGlob : public Uri { <nl> + <nl> + public : <nl> + explicit UriGlob ( const char * uri ) : Uri ( uri ) { } ; <nl> + explicit UriGlob ( const String & uri ) : Uri ( uri ) { } ; <nl> + <nl> + Uri * clone ( ) const override final { <nl> + return new UriGlob ( _uri ) ; <nl> + } ; <nl> + <nl> + bool canHandle ( const String & requestUri , __attribute__ ( ( unused ) ) std : : vector < String > & pathArgs ) override final { <nl> + return fnmatch ( _uri . c_str ( ) , requestUri . c_str ( ) , 0 ) = = 0 ; <nl> + } <nl> + } ; <nl> + <nl> + # endif <nl> new file mode 100644 <nl> index 0000000000 . . eef1b516d4 <nl> mmm / dev / null <nl> ppp b / libraries / ESP8266WebServer / src / uri / UriRegex . h <nl> <nl> + # ifndef URI_REGEX_H <nl> + # define URI_REGEX_H <nl> + <nl> + # include " Uri . h " <nl> + # include < regex . h > <nl> + # include < assert . h > <nl> + <nl> + # ifndef REGEX_MAX_GROUPS <nl> + # define REGEX_MAX_GROUPS 10 <nl> + # endif <nl> + <nl> + class UriRegex : public Uri { <nl> + <nl> + private : <nl> + regex_t _regexCompiled ; <nl> + <nl> + public : <nl> + explicit UriRegex ( const char * uri ) : Uri ( uri ) { <nl> + assert ( regcomp ( & _regexCompiled , uri , REG_EXTENDED ) = = 0 ) ; <nl> + } ; <nl> + explicit UriRegex ( const String & uri ) : UriRegex ( uri . c_str ( ) ) { } ; <nl> + <nl> + ~ UriRegex ( ) { <nl> + regfree ( & _regexCompiled ) ; <nl> + } <nl> + <nl> + Uri * clone ( ) const override final { <nl> + return new UriRegex ( _uri ) ; <nl> + } ; <nl> + <nl> + bool canHandle ( const String & requestUri , std : : vector < String > & pathArgs ) override final { <nl> + if ( Uri : : canHandle ( requestUri , pathArgs ) ) <nl> + return true ; <nl> + <nl> + regmatch_t groupArray [ REGEX_MAX_GROUPS ] ; <nl> + if ( regexec ( & _regexCompiled , requestUri . c_str ( ) , REGEX_MAX_GROUPS , groupArray , 0 ) = = 0 ) { <nl> + / / matches <nl> + pathArgs . clear ( ) ; <nl> + <nl> + unsigned int g = 1 ; <nl> + for ( ; g < REGEX_MAX_GROUPS ; g + + ) { <nl> + if ( groupArray [ g ] . rm_so = = ( long int ) - 1 ) <nl> + break ; / / No more groups <nl> + <nl> + pathArgs . push_back ( requestUri . substring ( groupArray [ g ] . rm_so , groupArray [ g ] . rm_eo ) ) ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + } ; <nl> + <nl> + # endif <nl>
|
Add Uri with support for regexUri and globUri ( )
|
esp8266/Arduino
|
4eca62cb530297d98c00f463dba03113811a444e
|
2020-02-22T19:51:47Z
|
mmm a / src / core / hle / service / audio / audout_u . cpp <nl> ppp b / src / core / hle / service / audio / audout_u . cpp <nl> class IAudioOut final : public ServiceFramework < IAudioOut > { <nl> <nl> private : <nl> void GetAudioOutState ( Kernel : : HLERequestContext & ctx ) { <nl> - LOG_DEBUG ( Service_Audio , " called " ) ; <nl> + NGLOG_DEBUG ( Service_Audio , " called " ) ; <nl> IPC : : ResponseBuilder rb { ctx , 3 } ; <nl> rb . Push ( RESULT_SUCCESS ) ; <nl> rb . Push ( static_cast < u32 > ( audio_out_state ) ) ; <nl> } <nl> <nl> void StartAudioOut ( Kernel : : HLERequestContext & ctx ) { <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> <nl> / / Start audio <nl> audio_out_state = AudioState : : Started ; <nl> class IAudioOut final : public ServiceFramework < IAudioOut > { <nl> } <nl> <nl> void StopAudioOut ( Kernel : : HLERequestContext & ctx ) { <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> <nl> / / Stop audio <nl> audio_out_state = AudioState : : Stopped ; <nl> class IAudioOut final : public ServiceFramework < IAudioOut > { <nl> } <nl> <nl> void RegisterBufferEvent ( Kernel : : HLERequestContext & ctx ) { <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> <nl> IPC : : ResponseBuilder rb { ctx , 2 , 1 } ; <nl> rb . Push ( RESULT_SUCCESS ) ; <nl> class IAudioOut final : public ServiceFramework < IAudioOut > { <nl> } <nl> <nl> void AppendAudioOutBuffer ( Kernel : : HLERequestContext & ctx ) { <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> IPC : : RequestParser rp { ctx } ; <nl> <nl> const u64 key { rp . Pop < u64 > ( ) } ; <nl> class IAudioOut final : public ServiceFramework < IAudioOut > { <nl> } <nl> <nl> void GetReleasedAudioOutBuffer ( Kernel : : HLERequestContext & ctx ) { <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> <nl> / / TODO ( st4rk ) : This is how libtransistor currently implements the <nl> / / GetReleasedAudioOutBuffer , it should return the key ( a VAddr ) to the app and this address <nl> class IAudioOut final : public ServiceFramework < IAudioOut > { <nl> } ; <nl> <nl> void AudOutU : : ListAudioOuts ( Kernel : : HLERequestContext & ctx ) { <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> IPC : : RequestParser rp { ctx } ; <nl> <nl> const std : : string audio_interface = " AudioInterface " ; <nl> void AudOutU : : ListAudioOuts ( Kernel : : HLERequestContext & ctx ) { <nl> } <nl> <nl> void AudOutU : : OpenAudioOut ( Kernel : : HLERequestContext & ctx ) { <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> <nl> if ( ! audio_out_interface ) { <nl> audio_out_interface = std : : make_shared < IAudioOut > ( ) ; <nl> mmm a / src / core / hle / service / audio / audren_u . cpp <nl> ppp b / src / core / hle / service / audio / audren_u . cpp <nl> class IAudioRenderer final : public ServiceFramework < IAudioRenderer > { <nl> } <nl> <nl> void RequestUpdateAudioRenderer ( Kernel : : HLERequestContext & ctx ) { <nl> - LOG_DEBUG ( Service_Audio , " % s " , ctx . Description ( ) . c_str ( ) ) ; <nl> + NGLOG_DEBUG ( Service_Audio , " { } " , ctx . Description ( ) ) ; <nl> AudioRendererResponseData response_data { } ; <nl> <nl> response_data . section_0_size = <nl> class IAudioRenderer final : public ServiceFramework < IAudioRenderer > { <nl> <nl> rb . Push ( RESULT_SUCCESS ) ; <nl> <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> } <nl> <nl> void StartAudioRenderer ( Kernel : : HLERequestContext & ctx ) { <nl> class IAudioRenderer final : public ServiceFramework < IAudioRenderer > { <nl> <nl> rb . Push ( RESULT_SUCCESS ) ; <nl> <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> } <nl> <nl> void StopAudioRenderer ( Kernel : : HLERequestContext & ctx ) { <nl> class IAudioRenderer final : public ServiceFramework < IAudioRenderer > { <nl> <nl> rb . Push ( RESULT_SUCCESS ) ; <nl> <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> } <nl> <nl> void QuerySystemEvent ( Kernel : : HLERequestContext & ctx ) { <nl> class IAudioRenderer final : public ServiceFramework < IAudioRenderer > { <nl> rb . Push ( RESULT_SUCCESS ) ; <nl> rb . PushCopyObjects ( system_event ) ; <nl> <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> } <nl> <nl> struct AudioRendererStateEntry { <nl> class IAudioDevice final : public ServiceFramework < IAudioDevice > { <nl> <nl> private : <nl> void ListAudioDeviceName ( Kernel : : HLERequestContext & ctx ) { <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> IPC : : RequestParser rp { ctx } ; <nl> <nl> const std : : string audio_interface = " AudioInterface " ; <nl> class IAudioDevice final : public ServiceFramework < IAudioDevice > { <nl> } <nl> <nl> void SetAudioDeviceOutputVolume ( Kernel : : HLERequestContext & ctx ) { <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> <nl> IPC : : RequestParser rp { ctx } ; <nl> f32 volume = static_cast < f32 > ( rp . Pop < u32 > ( ) ) ; <nl> class IAudioDevice final : public ServiceFramework < IAudioDevice > { <nl> } <nl> <nl> void GetActiveAudioDeviceName ( Kernel : : HLERequestContext & ctx ) { <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> IPC : : RequestParser rp { ctx } ; <nl> <nl> const std : : string audio_interface = " AudioDevice " ; <nl> class IAudioDevice final : public ServiceFramework < IAudioDevice > { <nl> } <nl> <nl> void QueryAudioDeviceSystemEvent ( Kernel : : HLERequestContext & ctx ) { <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> <nl> buffer_event - > Signal ( ) ; <nl> <nl> class IAudioDevice final : public ServiceFramework < IAudioDevice > { <nl> } <nl> <nl> void GetActiveChannelCount ( Kernel : : HLERequestContext & ctx ) { <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> IPC : : ResponseBuilder rb { ctx , 3 } ; <nl> rb . Push ( RESULT_SUCCESS ) ; <nl> rb . Push < u32 > ( 1 ) ; <nl> void AudRenU : : OpenAudioRenderer ( Kernel : : HLERequestContext & ctx ) { <nl> rb . Push ( RESULT_SUCCESS ) ; <nl> rb . PushIpcInterface < Audio : : IAudioRenderer > ( ) ; <nl> <nl> - LOG_DEBUG ( Service_Audio , " called " ) ; <nl> + NGLOG_DEBUG ( Service_Audio , " called " ) ; <nl> } <nl> <nl> void AudRenU : : GetAudioRendererWorkBufferSize ( Kernel : : HLERequestContext & ctx ) { <nl> void AudRenU : : GetAudioRendererWorkBufferSize ( Kernel : : HLERequestContext & ctx ) { <nl> rb . Push ( RESULT_SUCCESS ) ; <nl> rb . Push < u64 > ( 0x400 ) ; <nl> <nl> - LOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> + NGLOG_WARNING ( Service_Audio , " ( STUBBED ) called " ) ; <nl> } <nl> <nl> void AudRenU : : GetAudioDevice ( Kernel : : HLERequestContext & ctx ) { <nl> void AudRenU : : GetAudioDevice ( Kernel : : HLERequestContext & ctx ) { <nl> rb . Push ( RESULT_SUCCESS ) ; <nl> rb . PushIpcInterface < Audio : : IAudioDevice > ( ) ; <nl> <nl> - LOG_DEBUG ( Service_Audio , " called " ) ; <nl> + NGLOG_DEBUG ( Service_Audio , " called " ) ; <nl> } <nl> <nl> } / / namespace Service : : Audio <nl>
|
audio : Move logging macros over to new fmt - compatible ones
|
yuzu-emu/yuzu
|
8d32bf9a96bf8eea855d3ad1614e3f7f4f27ff8d
|
2018-04-24T14:18:09Z
|
mmm a / selfdrive / common / util . c <nl> ppp b / selfdrive / common / util . c <nl> void * read_file ( const char * path , size_t * out_len ) { <nl> long f_len = ftell ( f ) ; <nl> rewind ( f ) ; <nl> <nl> - / / calloc one extra byte so the file will always be NULL terminated <nl> + / / malloc one extra byte so the file will always be NULL terminated <nl> / / cl_cached_program_from_file relies on this <nl> - char * buf = ( char * ) calloc ( f_len + 1 , 1 ) ; <nl> + char * buf = ( char * ) malloc ( f_len + 1 ) ; <nl> assert ( buf ) ; <nl> <nl> size_t num_read = fread ( buf , f_len , 1 , f ) ; <nl> void * read_file ( const char * path , size_t * out_len ) { <nl> return NULL ; <nl> } <nl> <nl> + buf [ f_len ] = ' \ 0 ' ; <nl> if ( out_len ) { <nl> * out_len = f_len ; <nl> } <nl>
|
use malloc ( )
|
commaai/openpilot
|
b678449707ae69b593f0f9b3f8600c7efa3c040a
|
2020-06-12T01:06:47Z
|
mmm a / lib / IDE / CodeCompletion . cpp <nl> ppp b / lib / IDE / CodeCompletion . cpp <nl> class CompletionLookup final : public swift : : VisibleDeclConsumer { <nl> <nl> static bool getPositionInTupleExpr ( DeclContext & DC , Expr * Target , <nl> TupleExpr * Tuple , unsigned & Pos , <nl> - bool & HasName , <nl> - llvm : : SmallVectorImpl < Type > & TupleEleTypes ) { <nl> + bool & HasName ) { <nl> auto & SM = DC . getASTContext ( ) . SourceMgr ; <nl> Pos = 0 ; <nl> for ( auto E : Tuple - > getElements ( ) ) { <nl> if ( SM . isBeforeInBuffer ( E - > getEndLoc ( ) , Target - > getStartLoc ( ) ) ) { <nl> - TupleEleTypes . push_back ( E - > getType ( ) ) ; <nl> Pos + + ; <nl> continue ; <nl> } <nl> class CompletionLookup final : public swift : : VisibleDeclConsumer { <nl> } <nl> } <nl> <nl> - static bool collectPossibleArgTypes ( DeclContext & DC , CallExpr * CallE , Expr * CCExpr , <nl> - SmallVectorImpl < Type > & PossibleTypes , <nl> - unsigned & Position , bool & HasName , <nl> - bool RemoveUnlikelyOverloads ) { <nl> - if ( auto Ty = CallE - > getFn ( ) - > getType ( ) ) { <nl> - if ( auto FT = Ty - > getAs < FunctionType > ( ) ) { <nl> - PossibleTypes . push_back ( FT - > getInput ( ) ) ; <nl> + static bool collectionInputTypes ( DeclContext & DC , CallExpr * callExpr , <nl> + SmallVectorImpl < Type > & possibleTypes ) { <nl> + auto * fnExpr = callExpr - > getFn ( ) ; <nl> + <nl> + if ( auto type = fnExpr - > getType ( ) ) { <nl> + if ( auto * funcType = type - > getAs < AnyFunctionType > ( ) ) <nl> + possibleTypes . push_back ( funcType - > getInput ( ) ) ; <nl> + } else if ( auto * DRE = dyn_cast < DeclRefExpr > ( fnExpr ) ) { <nl> + if ( auto * decl = DRE - > getDecl ( ) ) { <nl> + auto declType = decl - > getInterfaceType ( ) ; <nl> + if ( auto * funcType = declType - > getAs < AnyFunctionType > ( ) ) <nl> + possibleTypes . push_back ( funcType - > getInput ( ) ) ; <nl> } <nl> - } <nl> - if ( auto TAG = dyn_cast < TupleExpr > ( CallE - > getArg ( ) ) ) { <nl> - llvm : : SmallVector < Type , 3 > TupleEleTypesBeforeTarget ; <nl> - if ( ! getPositionInTupleExpr ( DC , CCExpr , TAG , Position , HasName , <nl> - TupleEleTypesBeforeTarget ) ) <nl> - return false ; <nl> - if ( PossibleTypes . empty ( ) & & <nl> - ! typeCheckUnresolvedExpr ( DC , CallE - > getArg ( ) , CallE , PossibleTypes ) ) <nl> - return false ; <nl> - if ( RemoveUnlikelyOverloads ) { <nl> - removeUnlikelyOverloads ( PossibleTypes , TupleEleTypesBeforeTarget , & DC ) ; <nl> - return ! PossibleTypes . empty ( ) ; <nl> + } else if ( auto * OSRE = dyn_cast < OverloadSetRefExpr > ( fnExpr ) ) { <nl> + for ( auto * decl : OSRE - > getDecls ( ) ) { <nl> + auto declType = decl - > getInterfaceType ( ) ; <nl> + if ( auto * funcType = declType - > getAs < AnyFunctionType > ( ) ) <nl> + possibleTypes . push_back ( funcType - > getInput ( ) ) ; <nl> } <nl> - } else if ( isa < ParenExpr > ( CallE - > getArg ( ) ) ) { <nl> - Position = 0 ; <nl> - HasName = false ; <nl> - if ( PossibleTypes . empty ( ) & & <nl> - ! typeCheckUnresolvedExpr ( DC , CallE - > getArg ( ) , CallE , PossibleTypes ) ) <nl> + } else { <nl> + ConcreteDeclRef ref = nullptr ; <nl> + auto fnType = getTypeOfCompletionContextExpr ( DC . getASTContext ( ) , <nl> + & DC , CompletionTypeCheckKind : : Normal , <nl> + fnExpr , ref ) ; <nl> + <nl> + if ( ! fnType ) <nl> return false ; <nl> - } else <nl> + <nl> + if ( auto * AFT = ( * fnType ) - > getAs < AnyFunctionType > ( ) ) <nl> + possibleTypes . push_back ( AFT - > getInput ( ) ) ; <nl> + } <nl> + <nl> + return ! possibleTypes . empty ( ) ; <nl> + } <nl> + <nl> + static bool collectPossibleArgTypes ( DeclContext & DC , CallExpr * CallE , <nl> + Expr * CCExpr , <nl> + SmallVectorImpl < Type > & PossibleTypes , <nl> + unsigned & Position , bool & HasName ) { <nl> + if ( ! collectionInputTypes ( DC , CallE , PossibleTypes ) ) <nl> return false ; <nl> - return true ; <nl> + <nl> + if ( auto * tuple = dyn_cast < TupleExpr > ( CallE - > getArg ( ) ) ) { <nl> + for ( unsigned i = 0 , n = tuple - > getNumElements ( ) ; i ! = n ; + + i ) { <nl> + if ( isa < CodeCompletionExpr > ( tuple - > getElement ( i ) ) ) { <nl> + HasName = ! tuple - > getElementName ( i ) . empty ( ) ; <nl> + Position = i ; <nl> + return true ; <nl> + } <nl> + } <nl> + <nl> + return getPositionInTupleExpr ( DC , CCExpr , tuple , Position , HasName ) ; <nl> + } else if ( auto * paren = dyn_cast < ParenExpr > ( CallE - > getArg ( ) ) ) { <nl> + HasName = false ; <nl> + Position = 0 ; <nl> + return true ; <nl> + } <nl> + <nl> + return false ; <nl> } <nl> <nl> static bool <nl> class CompletionLookup final : public swift : : VisibleDeclConsumer { <nl> unsigned Position ; <nl> bool HasName ; <nl> if ( collectPossibleArgTypes ( DC , CallE , CCExpr , PossibleTypes , Position , <nl> - HasName , true ) ) { <nl> + HasName ) ) { <nl> collectArgumentExpectation ( Position , HasName , PossibleTypes , <nl> CCExpr - > getStartLoc ( ) , ExpectedTypes , ExpectedNames ) ; <nl> return ! ExpectedTypes . empty ( ) | | ! ExpectedNames . empty ( ) ; <nl> class CompletionLookup final : public swift : : VisibleDeclConsumer { <nl> SmallVector < Type , 2 > PossibleTypes ; <nl> unsigned Position ; <nl> bool HasName ; <nl> - return collectPossibleArgTypes ( DC , CallE , CCExpr , PossibleTypes , Position , <nl> - HasName , true ) & & <nl> - lookupArgCompletionsAtPosition ( Position , HasName , PossibleTypes , <nl> - CCExpr - > getStartLoc ( ) ) ; <nl> + bool hasPossibleArgTypes = collectPossibleArgTypes ( DC , CallE , CCExpr , <nl> + PossibleTypes , Position , <nl> + HasName ) ; <nl> + bool hasCompletions = lookupArgCompletionsAtPosition ( Position , HasName , <nl> + PossibleTypes , <nl> + CCExpr - > getStartLoc ( ) ) ; <nl> + <nl> + return hasPossibleArgTypes & & hasCompletions ; <nl> } <nl> <nl> void getTypeContextEnumElementCompletions ( SourceLoc Loc ) { <nl> class CodeCompletionTypeContextAnalyzer { <nl> if ( SM . isBeforeInBuffer ( AE - > getEqualLoc ( ) , ParsedExpr - > getStartLoc ( ) ) ) { <nl> <nl> / / The destination is of the expected type . <nl> - Callback ( AE - > getDest ( ) - > getType ( ) ) ; <nl> + auto * destExpr = AE - > getDest ( ) ; <nl> + if ( auto type = destExpr - > getType ( ) ) { <nl> + Callback ( type ) ; <nl> + } else if ( auto * DRE = dyn_cast < DeclRefExpr > ( destExpr ) ) { <nl> + if ( auto * decl = DRE - > getDecl ( ) ) <nl> + Callback ( decl - > getInterfaceType ( ) ) ; <nl> + } <nl> } <nl> break ; <nl> } <nl> void CodeCompletionCallbacksImpl : : doneParsing ( ) { <nl> } <nl> case CompletionKind : : AssignmentRHS : { <nl> SourceLoc Loc = P . Context . SourceMgr . getCodeCompletionLoc ( ) ; <nl> - if ( auto destType = AssignmentExpr - > getDest ( ) - > getType ( ) ) <nl> + if ( auto destType = ParsedExpr - > getType ( ) ) <nl> Lookup . setExpectedTypes ( destType - > getRValueType ( ) ) ; <nl> Lookup . getValueCompletionsInDeclContext ( Loc , DefaultFilter ) ; <nl> break ; <nl> mmm a / lib / Sema / CSApply . cpp <nl> ppp b / lib / Sema / CSApply . cpp <nl> namespace { <nl> } <nl> <nl> Expr * visitTypeExpr ( TypeExpr * expr ) { <nl> - auto toType = simplifyType ( expr - > getTypeLoc ( ) . getType ( ) ) ; <nl> + auto toType = simplifyType ( cs . getType ( expr - > getTypeLoc ( ) ) ) ; <nl> expr - > getTypeLoc ( ) . setType ( toType , / * validated = * / true ) ; <nl> cs . setType ( expr , MetatypeType : : get ( toType ) ) ; <nl> - <nl> return expr ; <nl> } <nl> <nl> namespace { <nl> Expr * visitIsExpr ( IsExpr * expr ) { <nl> / / Turn the subexpression into an rvalue . <nl> auto & tc = cs . getTypeChecker ( ) ; <nl> - auto toType = simplifyType ( expr - > getCastTypeLoc ( ) . getType ( ) ) ; <nl> + auto toType = simplifyType ( cs . getType ( expr - > getCastTypeLoc ( ) ) ) ; <nl> auto sub = cs . coerceToRValue ( expr - > getSubExpr ( ) ) ; <nl> <nl> checkForImportedUsedConformances ( toType ) ; <nl> namespace { <nl> sub , expr - > getLoc ( ) , SourceLoc ( ) , <nl> TypeLoc : : withoutLoc ( toType ) ) ; <nl> cs . setType ( cast , toOptType ) ; <nl> + cs . setType ( cast - > getCastTypeLoc ( ) , toType ) ; <nl> if ( expr - > isImplicit ( ) ) <nl> cast - > setImplicit ( ) ; <nl> <nl> namespace { <nl> <nl> Expr * visitCoerceExpr ( CoerceExpr * expr , Optional < unsigned > choice ) { <nl> / / Simplify the type we ' re casting to . <nl> - auto toType = simplifyType ( expr - > getCastTypeLoc ( ) . getType ( ) ) ; <nl> + auto toType = simplifyType ( cs . getType ( expr - > getCastTypeLoc ( ) ) ) ; <nl> expr - > getCastTypeLoc ( ) . setType ( toType , / * validated = * / true ) ; <nl> checkForImportedUsedConformances ( toType ) ; <nl> <nl> namespace { <nl> / / Most of the logic for dealing with ForcedCheckedCastExpr . <nl> Expr * handleForcedCheckedCastExpr ( ForcedCheckedCastExpr * expr ) { <nl> / / Simplify the type we ' re casting to . <nl> - auto toType = simplifyType ( expr - > getCastTypeLoc ( ) . getType ( ) ) ; <nl> + auto toType = simplifyType ( cs . getType ( expr - > getCastTypeLoc ( ) ) ) ; <nl> expr - > getCastTypeLoc ( ) . setType ( toType , / * validated = * / true ) ; <nl> checkForImportedUsedConformances ( toType ) ; <nl> <nl> namespace { <nl> auto * result = new ( tc . Context ) CoerceExpr ( sub , expr - > getLoc ( ) , <nl> expr - > getCastTypeLoc ( ) ) ; <nl> cs . setType ( result , toType ) ; <nl> + cs . setType ( result - > getCastTypeLoc ( ) , toType ) ; <nl> unsigned disjunctionChoice = <nl> ( castKind = = CheckedCastKind : : Coercion ? 0 : 1 ) ; <nl> return visitCoerceExpr ( result , disjunctionChoice ) ; <nl> namespace { <nl> Expr * handleConditionalCheckedCastExpr ( ConditionalCheckedCastExpr * expr , <nl> bool isInsideIsExpr = false ) { <nl> / / Simplify the type we ' re casting to . <nl> - auto toType = simplifyType ( expr - > getCastTypeLoc ( ) . getType ( ) ) ; <nl> + auto toType = simplifyType ( cs . getType ( expr - > getCastTypeLoc ( ) ) ) ; <nl> checkForImportedUsedConformances ( toType ) ; <nl> expr - > getCastTypeLoc ( ) . setType ( toType , / * validated = * / true ) ; <nl> <nl> namespace { <nl> auto * coerce = new ( tc . Context ) CoerceExpr ( sub , expr - > getLoc ( ) , <nl> expr - > getCastTypeLoc ( ) ) ; <nl> cs . setType ( coerce , toType ) ; <nl> + cs . setType ( coerce - > getCastTypeLoc ( ) , toType ) ; <nl> unsigned disjunctionChoice = <nl> ( castKind = = CheckedCastKind : : Coercion ? 0 : 1 ) ; <nl> Expr * result = visitCoerceExpr ( coerce , disjunctionChoice ) ; <nl> namespace { <nl> bool allIndexesHashable = true ; <nl> ArrayRef < TupleTypeElt > indexTypes ; <nl> TupleTypeElt singleIndexTypeBuf ; <nl> - if ( auto tup = component . getIndexExpr ( ) - > getType ( ) <nl> + if ( auto tup = cs . getType ( component . getIndexExpr ( ) ) <nl> - > getAs < TupleType > ( ) ) { <nl> indexTypes = tup - > getElements ( ) ; <nl> } else { <nl> - singleIndexTypeBuf = component . getIndexExpr ( ) - > getType ( ) ; <nl> + singleIndexTypeBuf = cs . getType ( component . getIndexExpr ( ) ) ; <nl> indexTypes = singleIndexTypeBuf ; <nl> } <nl> <nl> Expr * ExprRewriter : : coerceToType ( Expr * expr , Type toType , <nl> / / swap the order so that we load first and force the result . <nl> cs . propagateLValueAccessKind ( expr , AccessKind : : Read ) ; <nl> if ( auto * forceExpr = dyn_cast < ForceValueExpr > ( expr ) ) { <nl> - fromType = forceExpr - > getSubExpr ( ) - > getType ( ) - > getRValueType ( ) ; <nl> + fromType = cs . getType ( forceExpr - > getSubExpr ( ) ) - > getRValueType ( ) ; <nl> auto * loadExpr = cs . cacheType ( <nl> new ( tc . Context ) LoadExpr ( forceExpr - > getSubExpr ( ) , fromType ) ) ; <nl> auto * newForceValue = new ( tc . Context ) <nl> ForceValueExpr ( loadExpr , forceExpr - > getLoc ( ) , <nl> forceExpr - > isForceOfImplicitlyUnwrappedOptional ( ) ) ; <nl> - cs . setType ( newForceValue , loadExpr - > getType ( ) - > getOptionalObjectType ( ) ) ; <nl> + cs . setType ( newForceValue , <nl> + cs . getType ( loadExpr ) - > getOptionalObjectType ( ) ) ; <nl> expr = newForceValue ; <nl> } else { <nl> expr = cs . cacheType ( new ( tc . Context ) <nl> namespace { <nl> } else { <nl> / / For other closures , type - check the body once we ' ve finished with <nl> / / the expression . <nl> + cs . setExprTypes ( closure ) ; <nl> ClosuresToTypeCheck . push_back ( closure ) ; <nl> } <nl> <nl> mmm a / lib / Sema / CSDiag . cpp <nl> ppp b / lib / Sema / CSDiag . cpp <nl> static void eraseOpenedExistentials ( Expr * & expr , ConstraintSystem & CS ) { <nl> return type ; <nl> } ) ; <nl> CS . setType ( expr , type ) ; <nl> + / / Set new type to the expression directly . <nl> + expr - > setType ( type ) ; <nl> <nl> return expr ; <nl> } <nl> bool FailureDiagnosis : : visitInOutExpr ( InOutExpr * IOE ) { <nl> bool FailureDiagnosis : : visitCoerceExpr ( CoerceExpr * CE ) { <nl> / / Coerce the input to whatever type is specified by the CoerceExpr . <nl> auto expr = typeCheckChildIndependently ( CE - > getSubExpr ( ) , <nl> - CE - > getCastTypeLoc ( ) . getType ( ) , <nl> + CS . getType ( CE - > getCastTypeLoc ( ) ) , <nl> CTP_CoerceOperand ) ; <nl> if ( ! expr ) <nl> return true ; <nl> static void noteArchetypeSource ( const TypeLoc & loc , ArchetypeType * archetype , <nl> / / ` Pair < Any , Any > ` . <nl> / / Right now we only handle this when the type that ' s at fault is the <nl> / / top - level type passed to this function . <nl> - if ( loc . getType ( ) . isNull ( ) ) { <nl> - return ; <nl> - } <nl> - <nl> + auto type = loc . getType ( ) ; <nl> + if ( ! type ) <nl> + type = cs . getType ( loc ) ; <nl> + <nl> ArrayRef < Type > genericArgs ; <nl> - if ( auto * boundGenericTy = loc . getType ( ) - > getAs < BoundGenericType > ( ) ) { <nl> + <nl> + if ( auto * boundGenericTy = type - > getAs < BoundGenericType > ( ) ) { <nl> if ( boundGenericTy - > getDecl ( ) = = FoundDecl ) <nl> genericArgs = boundGenericTy - > getGenericArgs ( ) ; <nl> } <nl> mmm a / lib / Sema / CSGen . cpp <nl> ppp b / lib / Sema / CSGen . cpp <nl> namespace { <nl> <nl> auto locator = CS . getConstraintLocator ( E ) ; <nl> type = CS . openUnboundGenericType ( type , locator ) ; <nl> - E - > getTypeLoc ( ) . setType ( type , / * validated = * / true ) ; <nl> + CS . setType ( E - > getTypeLoc ( ) , type ) ; <nl> return MetatypeType : : get ( type ) ; <nl> } <nl> <nl> namespace { <nl> / / Open the type we ' re casting to . <nl> auto toType = CS . openUnboundGenericType ( expr - > getCastTypeLoc ( ) . getType ( ) , <nl> CS . getConstraintLocator ( expr ) ) ; <nl> - expr - > getCastTypeLoc ( ) . setType ( toType , / * validated = * / true ) ; <nl> + CS . setType ( expr - > getCastTypeLoc ( ) , toType ) ; <nl> <nl> auto fromType = CS . getType ( fromExpr ) ; <nl> auto locator = CS . getConstraintLocator ( expr ) ; <nl> namespace { <nl> / / Open the type we ' re casting to . <nl> auto toType = CS . openUnboundGenericType ( expr - > getCastTypeLoc ( ) . getType ( ) , <nl> CS . getConstraintLocator ( expr ) ) ; <nl> - expr - > getCastTypeLoc ( ) . setType ( toType , / * validated = * / true ) ; <nl> + CS . setType ( expr - > getCastTypeLoc ( ) , toType ) ; <nl> <nl> auto fromType = CS . getType ( expr - > getSubExpr ( ) ) ; <nl> auto locator = CS . getConstraintLocator ( expr ) ; <nl> namespace { <nl> / / Open the type we ' re casting to . <nl> auto toType = CS . openUnboundGenericType ( expr - > getCastTypeLoc ( ) . getType ( ) , <nl> CS . getConstraintLocator ( expr ) ) ; <nl> - expr - > getCastTypeLoc ( ) . setType ( toType , / * validated = * / true ) ; <nl> + CS . setType ( expr - > getCastTypeLoc ( ) , toType ) ; <nl> <nl> auto fromType = CS . getType ( fromExpr ) ; <nl> auto locator = CS . getConstraintLocator ( expr ) ; <nl> namespace { <nl> / / FIXME : Locator for the cast type ? <nl> auto toType = CS . openUnboundGenericType ( expr - > getCastTypeLoc ( ) . getType ( ) , <nl> CS . getConstraintLocator ( expr ) ) ; <nl> - expr - > getCastTypeLoc ( ) . setType ( toType , / * validated = * / true ) ; <nl> + CS . setType ( expr - > getCastTypeLoc ( ) , toType ) ; <nl> <nl> / / Add a checked cast constraint . <nl> auto fromType = CS . getType ( expr - > getSubExpr ( ) ) ; <nl> namespace { <nl> <nl> auto * TE = TypeExpr : : createImplicit ( joinTy , CS . getASTContext ( ) ) ; <nl> CS . cacheType ( TE ) ; <nl> + CS . setType ( TE - > getTypeLoc ( ) , joinTy ) ; <nl> <nl> auto * DSE = new ( CS . getASTContext ( ) ) <nl> DotSelfExpr ( TE , SourceLoc ( ) , SourceLoc ( ) , CS . getType ( TE ) ) ; <nl> bool swift : : typeCheckUnresolvedExpr ( DeclContext & DC , <nl> SmallVectorImpl < Type > & PossibleTypes ) { <nl> PrettyStackTraceExpr stackTrace ( DC . getASTContext ( ) , <nl> " type - checking unresolved member " , Parent ) ; <nl> + <nl> ConstraintSystemOptions Options = ConstraintSystemFlags : : AllowFixes ; <nl> auto * TC = static_cast < TypeChecker * > ( DC . getASTContext ( ) . getLazyResolver ( ) ) ; <nl> ConstraintSystem CS ( * TC , & DC , Options ) ; <nl> bool swift : : typeCheckUnresolvedExpr ( DeclContext & DC , <nl> } <nl> <nl> SmallVector < Solution , 3 > solutions ; <nl> - if ( CS . solve ( Parent , solutions , FreeTypeVariableBinding : : Allow ) ) { <nl> + if ( CS . solve ( Parent , solutions , FreeTypeVariableBinding : : UnresolvedType ) ) { <nl> return false ; <nl> } <nl> <nl> mmm a / lib / Sema / ConstraintSystem . h <nl> ppp b / lib / Sema / ConstraintSystem . h <nl> class ConstraintSystem { <nl> / / / run through various diagnostics passes without actually mutating <nl> / / / the types on the expression nodes . <nl> llvm : : DenseMap < const Expr * , TypeBase * > ExprTypes ; <nl> + llvm : : DenseMap < const TypeLoc * , TypeBase * > TypeLocTypes ; <nl> <nl> / / / Maps closure parameters to type variables . <nl> llvm : : DenseMap < const ParamDecl * , TypeVariableType * > <nl> class ConstraintSystem { <nl> if ( ExcludeRoot & & expr = = RootExpr ) <nl> return expr ; <nl> <nl> - assert ( ( ! expr - > getType ( ) | | CS . getType ( expr ) - > isEqual ( expr - > getType ( ) ) ) <nl> - & & " Mismatched types ! " ) ; <nl> + / / assert ( ( ! expr - > getType ( ) | | CS . getType ( expr ) - > isEqual ( expr - > getType ( ) ) ) <nl> + / / & & " Mismatched types ! " ) ; <nl> assert ( ! CS . getType ( expr ) - > hasTypeVariable ( ) & & <nl> " Should not write type variable into expression ! " ) ; <nl> expr - > setType ( CS . getType ( expr ) ) ; <nl> class ConstraintSystem { <nl> / / " Expected type to be invariant ! " ) ; <nl> <nl> ExprTypes [ E ] = T . getPointer ( ) ; <nl> + } <nl> <nl> - / / FIXME : Temporary until all references to expression types are <nl> - / / updated . <nl> - E - > setType ( T ) ; <nl> + void setType ( TypeLoc & L , Type T ) { <nl> + assert ( T & & " Expected non - null type ! " ) ; <nl> + TypeLocTypes [ & L ] = T . getPointer ( ) ; <nl> } <nl> <nl> / / / Check to see if we have a type for an expression . <nl> class ConstraintSystem { <nl> return ExprTypes . find ( E ) ! = ExprTypes . end ( ) ; <nl> } <nl> <nl> + bool hasType ( const TypeLoc & L ) const { <nl> + return TypeLocTypes . find ( & L ) ! = TypeLocTypes . end ( ) ; <nl> + } <nl> + <nl> / / / Get the type for an expression . <nl> Type getType ( const Expr * E ) const { <nl> assert ( hasType ( E ) & & " Expected type to have been set ! " ) ; <nl> class ConstraintSystem { <nl> return ExprTypes . find ( E ) - > second ; <nl> } <nl> <nl> + Type getType ( const TypeLoc & L ) const { <nl> + assert ( hasType ( L ) & & " Expected type to have been set ! " ) ; <nl> + return TypeLocTypes . find ( & L ) - > second ; <nl> + } <nl> + <nl> / / / Cache the type of the expression argument and return that same <nl> / / / argument . <nl> template < typename T > <nl> mmm a / test / Compatibility / tuple_arguments_3 . swift <nl> ppp b / test / Compatibility / tuple_arguments_3 . swift <nl> do { <nl> } <nl> <nl> do { <nl> - var a = 3 / / expected - warning { { variable ' a ' was never mutated ; consider changing to ' let ' constant } } <nl> - var b = 4 / / expected - warning { { variable ' b ' was never mutated ; consider changing to ' let ' constant } } <nl> - var c = ( 3 ) / / expected - warning { { variable ' c ' was never mutated ; consider changing to ' let ' constant } } <nl> - var d = ( a , b ) / / expected - warning { { variable ' d ' was never mutated ; consider changing to ' let ' constant } } <nl> + var a = 3 <nl> + var b = 4 <nl> + var c = ( 3 ) <nl> + var d = ( a , b ) <nl> <nl> concrete ( a ) <nl> concrete ( ( a ) ) <nl> do { <nl> } <nl> <nl> do { <nl> - var a = 3 / / expected - warning { { variable ' a ' was never mutated ; consider changing to ' let ' constant } } <nl> - var b = 4 / / expected - warning { { variable ' b ' was never mutated ; consider changing to ' let ' constant } } <nl> - var c = ( a , b ) / / expected - warning { { variable ' c ' was never mutated ; consider changing to ' let ' constant } } <nl> + var a = 3 <nl> + var b = 4 <nl> + var c = ( a , b ) <nl> <nl> _ = InitTwo ( a , b ) <nl> _ = InitTwo ( ( a , b ) ) / / expected - error { { missing argument for parameter # 2 in call } } <nl> do { <nl> <nl> do { <nl> / / TODO : Restore regressed diagnostics rdar : / / problem / 31724211 <nl> - var a = 3 / / e / xpected - warning { { variable ' a ' was never mutated ; consider changing to ' let ' constant } } <nl> - var b = 4 / / e / xpected - warning { { variable ' b ' was never mutated ; consider changing to ' let ' constant } } <nl> - var d = ( a , b ) / / e / xpected - warning { { variable ' d ' was never mutated ; consider changing to ' let ' constant } } <nl> + var a = 3 <nl> + var b = 4 <nl> + var d = ( a , b ) <nl> <nl> var s1 = SubscriptTwo ( ) <nl> _ = s1 [ a , b ] <nl> do { <nl> } <nl> <nl> do { <nl> - var a = 3 / / expected - warning { { variable ' a ' was never mutated ; consider changing to ' let ' constant } } <nl> - var b = 4 / / expected - warning { { variable ' b ' was never mutated ; consider changing to ' let ' constant } } <nl> - var c = ( a , b ) / / expected - warning { { variable ' c ' was never mutated ; consider changing to ' let ' constant } } <nl> + var a = 3 <nl> + var b = 4 <nl> + var c = ( a , b ) <nl> <nl> / / _ = GenericInit < ( Int , Int ) > ( a , b ) / / Crashes in Swift 3 <nl> _ = GenericInit < ( Int , Int ) > ( ( a , b ) ) / / expected - error { { expression type ' GenericInit < ( Int , Int ) > ' is ambiguous without more context } } <nl> do { <nl> <nl> do { <nl> / / TODO : Restore regressed diagnostics rdar : / / problem / 31724211 <nl> - var a = 3 . 0 / / e / xpected - warning { { variable ' a ' was never mutated ; consider changing to ' let ' constant } } <nl> - var b = 4 . 0 / / e / xpected - warning { { variable ' b ' was never mutated ; consider changing to ' let ' constant } } <nl> - var d = ( a , b ) / / e / xpected - warning { { variable ' d ' was never mutated ; consider changing to ' let ' constant } } <nl> + var a = 3 . 0 <nl> + var b = 4 . 0 <nl> + var d = ( a , b ) <nl> <nl> var s1 = GenericSubscript < ( Double , Double ) > ( ) <nl> _ = s1 [ a , b ] <nl> mmm a / test / Compatibility / tuple_arguments_4 . swift <nl> ppp b / test / Compatibility / tuple_arguments_4 . swift <nl> do { <nl> } <nl> <nl> do { <nl> - var a = 3 / / expected - warning { { variable ' a ' was never mutated ; consider changing to ' let ' constant } } <nl> - var b = 4 / / expected - warning { { variable ' b ' was never mutated ; consider changing to ' let ' constant } } <nl> - var c = ( 3 ) / / expected - warning { { variable ' c ' was never mutated ; consider changing to ' let ' constant } } <nl> - var d = ( a , b ) / / expected - warning { { variable ' d ' was never mutated ; consider changing to ' let ' constant } } <nl> + var a = 3 <nl> + var b = 4 <nl> + var c = ( 3 ) <nl> + var d = ( a , b ) <nl> <nl> concrete ( a ) <nl> concrete ( ( a ) ) <nl> do { <nl> } <nl> <nl> do { <nl> - var a = 3 / / expected - warning { { variable ' a ' was never mutated ; consider changing to ' let ' constant } } <nl> - var b = 4 / / expected - warning { { variable ' b ' was never mutated ; consider changing to ' let ' constant } } <nl> - var c = ( a , b ) / / expected - warning { { variable ' c ' was never mutated ; consider changing to ' let ' constant } } <nl> + var a = 3 <nl> + var b = 4 <nl> + var c = ( a , b ) <nl> <nl> _ = InitTwo ( a , b ) <nl> _ = InitTwo ( ( a , b ) ) / / expected - error { { missing argument for parameter # 2 in call } } <nl> do { <nl> } <nl> <nl> do { <nl> - var a = 3 / / expected - warning { { variable ' a ' was never mutated ; consider changing to ' let ' constant } } <nl> - var b = 4 / / expected - warning { { variable ' b ' was never mutated ; consider changing to ' let ' constant } } <nl> - var c = ( a , b ) / / expected - warning { { variable ' c ' was never mutated ; consider changing to ' let ' constant } } <nl> + var a = 3 <nl> + var b = 4 <nl> + var c = ( a , b ) <nl> <nl> _ = GenericInit < ( Int , Int ) > ( a , b ) / / expected - error { { extra argument in call } } <nl> _ = GenericInit < ( Int , Int ) > ( ( a , b ) ) <nl> mmm a / test / Constraints / generics . swift <nl> ppp b / test / Constraints / generics . swift <nl> func r22459135 ( ) { <nl> <nl> / / < rdar : / / problem / 19710848 > QoI : Friendlier error message for " [ ] as Set " <nl> / / < rdar : / / problem / 22326930 > QoI : " argument for generic parameter ' Element ' could not be inferred " lacks context <nl> - _ = [ ] as Set / / expected - error { { generic parameter ' Element ' could not be inferred in cast to ' Set < _ > } } expected - note { { explicitly specify the generic arguments to fix this issue } } { { 14 - 14 = < < # Element : Hashable # > > } } <nl> + _ = [ ] as Set / / expected - error { { generic parameter ' Element ' could not be inferred in cast to ' Set ' } } expected - note { { explicitly specify the generic arguments to fix this issue } } { { 14 - 14 = < < # Element : Hashable # > > } } <nl> <nl> <nl> / / < rdar : / / problem / 22509125 > QoI : Error when unable to infer generic archetype lacks greatness <nl> mmm a / test / Constraints / trailing_closures_objc . swift <nl> ppp b / test / Constraints / trailing_closures_objc . swift <nl> func foo ( options : [ AVMediaSelectionOption ] ) { <nl> func rdar28004686 ( a : [ IndexPath ] ) { <nl> _ = a . sorted { ( lhs : NSIndexPath , rhs : NSIndexPath ) - > Bool in true } <nl> / / expected - error @ - 1 { { ' NSIndexPath ' is not convertible to ' IndexPath ' } } <nl> - / / expected - error @ - 2 { { call can throw , but it is not marked with ' try ' and the error is not handled } } <nl> - / / The second error is erroneous : rdar : / / 36744895 <nl> } <nl> <nl> class Test : NSObject { <nl> mmm a / test / Constraints / tuple_arguments . swift <nl> ppp b / test / Constraints / tuple_arguments . swift <nl> do { <nl> } <nl> <nl> do { <nl> - var a = 3 / / expected - warning { { variable ' a ' was never mutated ; consider changing to ' let ' constant } } <nl> - var b = 4 / / expected - warning { { variable ' b ' was never mutated ; consider changing to ' let ' constant } } <nl> - var c = ( 3 ) / / expected - warning { { variable ' c ' was never mutated ; consider changing to ' let ' constant } } <nl> - var d = ( a , b ) / / expected - warning { { variable ' d ' was never mutated ; consider changing to ' let ' constant } } <nl> + var a = 3 <nl> + var b = 4 <nl> + var c = ( 3 ) <nl> + var d = ( a , b ) <nl> <nl> concrete ( a ) <nl> concrete ( ( a ) ) <nl> do { <nl> } <nl> <nl> do { <nl> - var a = 3 / / expected - warning { { variable ' a ' was never mutated ; consider changing to ' let ' constant } } <nl> - var b = 4 / / expected - warning { { variable ' b ' was never mutated ; consider changing to ' let ' constant } } <nl> - var c = ( a , b ) / / expected - warning { { variable ' c ' was never mutated ; consider changing to ' let ' constant } } <nl> + var a = 3 <nl> + var b = 4 <nl> + var c = ( a , b ) <nl> <nl> _ = InitTwo ( a , b ) <nl> _ = InitTwo ( ( a , b ) ) / / expected - error { { missing argument for parameter # 2 in call } } <nl> do { <nl> } <nl> <nl> do { <nl> - var a = 3 / / expected - warning { { variable ' a ' was never mutated ; consider changing to ' let ' constant } } <nl> - var b = 4 / / expected - warning { { variable ' b ' was never mutated ; consider changing to ' let ' constant } } <nl> - var c = ( a , b ) / / expected - warning { { variable ' c ' was never mutated ; consider changing to ' let ' constant } } <nl> + var a = 3 <nl> + var b = 4 <nl> + var c = ( a , b ) <nl> <nl> _ = GenericInit < ( Int , Int ) > ( a , b ) / / expected - error { { extra argument in call } } <nl> _ = GenericInit < ( Int , Int ) > ( ( a , b ) ) <nl> mmm a / test / IDE / complete_constructor . swift <nl> ppp b / test / IDE / complete_constructor . swift <nl> func testExplicitConstructors3P ( ) { <nl> / / EXPLICIT_CONSTRUCTORS_3P : Begin completions <nl> / / EXPLICIT_CONSTRUCTORS_3P - DAG : Decl [ Constructor ] / CurrNominal : [ ' ( ' ] { # ( a ) : Int # } [ ' ) ' ] [ # ExplicitConstructors3 # ] { { ; name = . + $ } } <nl> / / EXPLICIT_CONSTRUCTORS_3P - DAG : Decl [ Constructor ] / CurrNominal : [ ' ( ' ] { # a : Int # } , { # b : Float # } [ ' ) ' ] [ # ExplicitConstructors3 # ] { { ; name = . + $ } } <nl> - / / EXPLICIT_CONSTRUCTORS_3P - DAG : Decl [ FreeFunction ] / CurrModule / NotRecommended / TypeRelation [ Invalid ] : freeFunc ( ) [ # Void # ] { { ; name = . + $ } } <nl> / / EXPLICIT_CONSTRUCTORS_3P : End completions <nl> } <nl> <nl> mmm a / test / IDE / complete_enum_elements . swift <nl> ppp b / test / IDE / complete_enum_elements . swift <nl> enum BazEnum < T > { <nl> / / BAZ_T_ENUM_NO_DOT - NEXT : Decl [ StaticVar ] / CurrNominal : . staticVar [ # Int # ] { { ; name = . + $ } } <nl> / / BAZ_T_ENUM_NO_DOT - NEXT : Decl [ StaticVar ] / CurrNominal : . staticVarT [ # _ # ] { { ; name = . + $ } } <nl> / / BAZ_T_ENUM_NO_DOT - NEXT : Decl [ StaticMethod ] / CurrNominal : . bazStaticFunc ( ) [ # Void # ] { { ; name = . + $ } } <nl> - / / BAZ_T_ENUM_NO_DOT - NEXT : Decl [ InfixOperatorFunction ] / OtherModule [ Swift ] : = = { # Any . Type ? # } [ # Bool # ] ; name = = = Any . Type ? <nl> - / / BAZ_T_ENUM_NO_DOT - NEXT : Decl [ InfixOperatorFunction ] / OtherModule [ Swift ] : ! = { # Any . Type ? # } [ # Bool # ] ; name = ! = Any . Type ? <nl> / / BAZ_T_ENUM_NO_DOT - NEXT : End completions <nl> <nl> / / BAZ_INT_ENUM_DOT : Begin completions , 6 items <nl> mmm a / test / IDE / complete_from_swift_module . swift <nl> ppp b / test / IDE / complete_from_swift_module . swift <nl> func testCompleteModuleQualified3 ( ) { <nl> / / MODULE_QUALIFIED_3 : Begin completions <nl> / / MODULE_QUALIFIED_3 - NEXT : Decl [ Constructor ] / CurrNominal : ( { # t : _ # } ) [ # BarGenericSwiftStruct1 < _ > # ] ; name = ( t : _ ) <nl> / / MODULE_QUALIFIED_3 - NEXT : Decl [ InstanceMethod ] / CurrNominal : . bar1InstanceFunc ( { # self : BarGenericSwiftStruct1 < _ > # } ) [ # ( ) - > Void # ] ; name = bar1InstanceFunc ( BarGenericSwiftStruct1 < _ > ) <nl> - / / MODULE_QUALIFIED_3 : Decl [ InfixOperatorFunction ] / OtherModule [ Swift ] : ! = { # Any . Type ? # } [ # Bool # ] ; <nl> / / MODULE_QUALIFIED_3 : End completions <nl> <nl> func testCompleteModuleQualified4 ( ) { <nl> func testCompleteModuleQualified4 ( ) { <nl> / / MODULE_QUALIFIED_4 : Begin completions <nl> / / MODULE_QUALIFIED_4 - NEXT : Decl [ Constructor ] / CurrNominal : ( { # t : _ # } , { # u : _ # } ) [ # BarGenericSwiftStruct2 < _ , _ > # ] ; name = ( t : _ , u : _ ) <nl> / / MODULE_QUALIFIED_4 - NEXT : Decl [ InstanceMethod ] / CurrNominal : . bar2InstanceFunc ( { # self : BarGenericSwiftStruct2 < _ , _ > # } ) [ # ( ) - > Void # ] ; name = bar2InstanceFunc ( BarGenericSwiftStruct2 < _ , _ > ) <nl> - / / MODULE_QUALIFIED_4 : Decl [ InfixOperatorFunction ] / OtherModule [ Swift ] : ! = { # Any . Type ? # } [ # Bool # ] ; <nl> / / MODULE_QUALIFIED_4 - NEXT : End completions <nl> <nl> func testCompleteModuleQualified5 ( ) { <nl> mmm a / test / IDE / complete_member_decls_from_parent_decl_context . swift <nl> ppp b / test / IDE / complete_member_decls_from_parent_decl_context . swift <nl> struct NestedOuter1 { <nl> / / NESTED_NOMINAL_DECL_A_4 - DAG : Decl [ Struct ] / CurrModule : NestedOuter1 [ # NestedOuter1 # ] { { ; name = . + $ } } <nl> / / NESTED_NOMINAL_DECL_A_4 : End completions <nl> <nl> - NestedInnerA ( ) # ^ NESTED_NOMINAL_DECL_A_5 ^ # <nl> + NestedInnerA ( aInstanceVar : 42 ) # ^ NESTED_NOMINAL_DECL_A_5 ^ # <nl> / / NESTED_NOMINAL_DECL_A_5 : Begin completions , 4 items <nl> / / NESTED_NOMINAL_DECL_A_5 - NEXT : Decl [ InstanceMethod ] / CurrNominal : . aTestInstanceFunc ( ) [ # Void # ] { { ; name = . + $ } } <nl> / / NESTED_NOMINAL_DECL_A_5 - NEXT : Decl [ InstanceVar ] / CurrNominal : . aInstanceVar [ # Int # ] { { ; name = . + $ } } <nl> struct NestedOuter1 { <nl> / / NESTED_NOMINAL_DECL_B_4 - DAG : Decl [ Struct ] / CurrModule : NestedOuter1 [ # NestedOuter1 # ] { { ; name = . + $ } } <nl> / / NESTED_NOMINAL_DECL_B_4 : End completions <nl> <nl> - NestedInnerB ( ) # ^ NESTED_NOMINAL_DECL_B_5 ^ # <nl> + NestedInnerB ( bInstanceVar : 42 ) # ^ NESTED_NOMINAL_DECL_B_5 ^ # <nl> / / NESTED_NOMINAL_DECL_B_5 : Begin completions , 4 items <nl> / / NESTED_NOMINAL_DECL_B_5 - DAG : Decl [ InstanceMethod ] / CurrNominal : . bTestInstanceFunc ( ) [ # Void # ] { { ; name = . + $ } } <nl> / / NESTED_NOMINAL_DECL_B_5 - DAG : Decl [ InstanceVar ] / CurrNominal : . bInstanceVar [ # Int # ] { { ; name = . + $ } } <nl> func testOuterC ( ) { <nl> / / NESTED_NOMINAL_DECL_C_4 - DAG : Decl [ Struct ] / Local : NestedInnerC [ # NestedInnerC # ] { { ; name = . + $ } } <nl> / / NESTED_NOMINAL_DECL_C_4 : End completions <nl> <nl> - NestedInnerC ( ) # ^ NESTED_NOMINAL_DECL_C_5 ^ # <nl> + NestedInnerC ( cInstanceVar : 42 ) # ^ NESTED_NOMINAL_DECL_C_5 ^ # <nl> / / NESTED_NOMINAL_DECL_C_5 : Begin completions , 4 items <nl> / / NESTED_NOMINAL_DECL_C_5 - NEXT : Decl [ InstanceMethod ] / CurrNominal : . cTestInstanceFunc ( ) [ # Void # ] { { ; name = . + $ } } <nl> / / NESTED_NOMINAL_DECL_C_5 - NEXT : Decl [ InstanceVar ] / CurrNominal : . cInstanceVar [ # Int # ] { { ; name = . + $ } } <nl> mmm a / test / IDE / complete_value_expr . swift <nl> ppp b / test / IDE / complete_value_expr . swift <nl> func testSwitch1 ( ) { <nl> / / = = = mmm Helper types that are used in this test <nl> <nl> struct FooGenericStruct < T > { <nl> + init ( ) { } <nl> init ( t : T ) { fooInstanceVarT = t } <nl> <nl> var fooInstanceVarT : T <nl> func testInsideFunctionCall2 ( ) { <nl> var a = FooStruct ( ) <nl> a . instanceFunc1 ( # ^ INSIDE_FUNCTION_CALL_2 ^ # <nl> / / INSIDE_FUNCTION_CALL_2 : Begin completions <nl> - / / FIXME : we should print the non - API param name rdar : / / 20962472 <nl> - / / INSIDE_FUNCTION_CALL_2 - DAG : Pattern / CurrModule : [ ' ( ' ] { # Int # } [ ' ) ' ] [ # Void # ] { { ; name = . + $ } } <nl> + / / INSIDE_FUNCTION_CALL_2 - DAG : Pattern / CurrModule : [ ' ( ' ] { # ( a ) : Int # } [ ' ) ' ] [ # Void # ] { { ; name = . + $ } } <nl> / / INSIDE_FUNCTION_CALL_2 - DAG : Decl [ GlobalVar ] / CurrModule : fooObject [ # FooStruct # ] { { ; name = . + $ } } <nl> / / INSIDE_FUNCTION_CALL_2 : End completions <nl> } <nl> <nl> func testInsideFunctionCall3 ( ) { <nl> FooStruct ( ) . instanceFunc1 ( 42 , # ^ INSIDE_FUNCTION_CALL_3 ^ # <nl> - / / INSIDE_FUNCTION_CALL_3 : Begin completions <nl> - / / FIXME : There should be no results here because the function call <nl> - / / unambiguously resolves to overload that takes 1 argument . <nl> - / / INSIDE_FUNCTION_CALL_3 - DAG : Decl [ GlobalVar ] / CurrModule : fooObject [ # FooStruct # ] { { ; name = . + $ } } <nl> - / / INSIDE_FUNCTION_CALL_3 : End completions <nl> + / / INSIDE_FUNCTION_CALL_3 - NOT : Begin Completions <nl> } <nl> <nl> func testInsideFunctionCall4 ( ) { <nl> var a = FooStruct ( ) <nl> a . instanceFunc2 ( # ^ INSIDE_FUNCTION_CALL_4 ^ # <nl> / / INSIDE_FUNCTION_CALL_4 : Begin completions <nl> - / / FIXME : we should print the non - API param name rdar : / / 20962472 <nl> - / / INSIDE_FUNCTION_CALL_4 - DAG : Pattern / CurrModule : [ ' ( ' ] { # Int # } , { # b : & Double # } [ ' ) ' ] [ # Void # ] { { ; name = . + $ } } <nl> + / / INSIDE_FUNCTION_CALL_4 - DAG : Pattern / CurrModule : [ ' ( ' ] { # ( a ) : Int # } , { # b : & Double # } [ ' ) ' ] [ # Void # ] { { ; name = . + $ } } <nl> / / INSIDE_FUNCTION_CALL_4 - DAG : Decl [ GlobalVar ] / CurrModule : fooObject [ # FooStruct # ] { { ; name = . + $ } } <nl> / / INSIDE_FUNCTION_CALL_4 : End completions <nl> } <nl> func testInsideFunctionCall7 ( ) { <nl> var a = FooStruct ( ) <nl> a . instanceFunc8 ( # ^ INSIDE_FUNCTION_CALL_7 ^ # <nl> / / INSIDE_FUNCTION_CALL_7 : Begin completions <nl> - / / FIXME : we should print the non - API param name rdar : / / 20962472 <nl> - / / INSIDE_FUNCTION_CALL_7 : Pattern / CurrModule : [ ' ( ' ] { # ( Int , Int ) # } [ ' ) ' ] [ # Void # ] { { ; name = . + $ } } <nl> + / / INSIDE_FUNCTION_CALL_7 : Pattern / CurrModule : [ ' ( ' ] { # ( a ) : ( Int , Int ) # } [ ' ) ' ] [ # Void # ] { { ; name = . + $ } } <nl> / / INSIDE_FUNCTION_CALL_7 : End completions <nl> } <nl> <nl> func testInsideFunctionCall9 ( _ x : inout FooStruct ) { <nl> x . instanceFunc1 ( # ^ INSIDE_FUNCTION_CALL_9 ^ # ) <nl> / / Annotated ' ) ' <nl> / / INSIDE_FUNCTION_CALL_9 : Begin completions <nl> - / / INSIDE_FUNCTION_CALL_9 - DAG : Pattern / CurrModule : [ ' ( ' ] { # Int # } [ ' ) ' ] [ # Void # ] { { ; name = . + $ } } <nl> + / / INSIDE_FUNCTION_CALL_9 - DAG : Pattern / CurrModule : [ ' ( ' ] { # ( a ) : Int # } [ ' ) ' ] [ # Void # ] { { ; name = . + $ } } <nl> / / INSIDE_FUNCTION_CALL_9 - DAG : Decl [ GlobalVar ] / CurrModule : fooObject [ # FooStruct # ] { { ; name = . + $ } } <nl> / / INSIDE_FUNCTION_CALL_9 : End completions <nl> } <nl> func testInsideFunctionCall10 ( _ x : inout FooStruct ) { <nl> x . instanceFunc2 ( # ^ INSIDE_FUNCTION_CALL_10 ^ # ) <nl> / / Annotated ' ) ' <nl> / / INSIDE_FUNCTION_CALL_10 : Begin completions <nl> - / / INSIDE_FUNCTION_CALL_10 - DAG : Pattern / CurrModule : [ ' ( ' ] { # Int # } , { # b : & Double # } [ ' ) ' ] [ # Void # ] { { ; name = . + $ } } <nl> + / / INSIDE_FUNCTION_CALL_10 - DAG : Pattern / CurrModule : [ ' ( ' ] { # ( a ) : Int # } , { # b : & Double # } [ ' ) ' ] [ # Void # ] { { ; name = . + $ } } <nl> / / INSIDE_FUNCTION_CALL_10 - DAG : Decl [ GlobalVar ] / CurrModule : fooObject [ # FooStruct # ] { { ; name = . + $ } } <nl> / / INSIDE_FUNCTION_CALL_10 : End completions <nl> } <nl> func testInsideVarargFunctionCall1 ( ) { <nl> var a = FooStruct ( ) <nl> a . varargInstanceFunc0 ( # ^ INSIDE_VARARG_FUNCTION_CALL_1 ^ # <nl> / / INSIDE_VARARG_FUNCTION_CALL_1 : Begin completions <nl> - / / FIXME : we should print the non - API param name rdar : / / 20962472 <nl> - / / INSIDE_VARARG_FUNCTION_CALL_1 - DAG : Pattern / CurrModule : [ ' ( ' ] { # Int . . . # } [ ' ) ' ] [ # Void # ] { { ; name = . + $ } } <nl> + / / INSIDE_VARARG_FUNCTION_CALL_1 - DAG : Pattern / CurrModule : [ ' ( ' ] { # ( v ) : Int . . . # } [ ' ) ' ] [ # Void # ] { { ; name = . + $ } } <nl> / / INSIDE_VARARG_FUNCTION_CALL_1 - DAG : Decl [ GlobalVar ] / CurrModule : fooObject [ # FooStruct # ] { { ; name = . + $ } } <nl> / / INSIDE_VARARG_FUNCTION_CALL_1 : End completions <nl> } <nl> func testInsideOverloadedFunctionCall1 ( ) { <nl> func testInsideFunctionCallOnClassInstance1 ( _ a : FooClass ) { <nl> a . fooClassInstanceFunc1 ( # ^ INSIDE_FUNCTION_CALL_ON_CLASS_INSTANCE_1 ^ # <nl> / / INSIDE_FUNCTION_CALL_ON_CLASS_INSTANCE_1 : Begin completions <nl> - / / FIXME : we should print the non - API param name rdar : / / 20962472 <nl> - / / INSIDE_FUNCTION_CALL_ON_CLASS_INSTANCE_1 - DAG : Pattern / CurrModule : [ ' ( ' ] { # Int # } [ ' ) ' ] [ # Void # ] { { ; name = . + $ } } <nl> + / / INSIDE_FUNCTION_CALL_ON_CLASS_INSTANCE_1 - DAG : Pattern / CurrModule : [ ' ( ' ] { # ( a ) : Int # } [ ' ) ' ] [ # Void # ] { { ; name = . + $ } } <nl> / / INSIDE_FUNCTION_CALL_ON_CLASS_INSTANCE_1 - DAG : Decl [ GlobalVar ] / CurrModule : fooObject [ # FooStruct # ] { { ; name = . + $ } } <nl> / / INSIDE_FUNCTION_CALL_ON_CLASS_INSTANCE_1 : End completions <nl> } <nl> func testFuncParenPattern3 ( _ fpp : inout FuncParenPattern ) { <nl> / / = = = mmm Check that we can code complete after function calls <nl> <nl> struct SomeBuilder { <nl> - init ( a : Int ) { } <nl> + init ( _ a : Int ) { } <nl> func doFoo ( ) - > SomeBuilder { return self } <nl> func doBar ( ) - > SomeBuilder { return self } <nl> func doBaz ( _ z : Double ) - > SomeBuilder { return self } <nl> func testResolveGenericParams1 ( ) { <nl> <nl> FooGenericStruct < FooStruct > # ^ RESOLVE_GENERIC_PARAMS_1_STATIC ^ # <nl> / / RESOLVE_GENERIC_PARAMS_1_STATIC : Begin completions <nl> + / / RESOLVE_GENERIC_PARAMS_1_STATIC - NEXT : Decl [ Constructor ] / CurrNominal : ( ) [ # FooGenericStruct < FooStruct > # ] ; name = ( ) <nl> / / RESOLVE_GENERIC_PARAMS_1_STATIC - NEXT : Decl [ Constructor ] / CurrNominal : ( { # t : FooStruct # } ) [ # FooGenericStruct < FooStruct > # ] { { ; name = . + $ } } <nl> / / RESOLVE_GENERIC_PARAMS_1_STATIC - NEXT : Decl [ InstanceMethod ] / CurrNominal : . fooVoidInstanceFunc1 ( { # self : & FooGenericStruct < FooStruct > # } ) [ # ( FooStruct ) - > Void # ] { { ; name = . + $ } } <nl> / / RESOLVE_GENERIC_PARAMS_1_STATIC - NEXT : Decl [ InstanceMethod ] / CurrNominal : . fooTInstanceFunc1 ( { # self : & FooGenericStruct < FooStruct > # } ) [ # ( FooStruct ) - > FooStruct # ] { { ; name = . + $ } } <nl> func testResolveGenericParams2 < Foo : FooProtocol > ( _ foo : Foo ) { <nl> <nl> FooGenericStruct < Foo > # ^ RESOLVE_GENERIC_PARAMS_2_STATIC ^ # <nl> / / RESOLVE_GENERIC_PARAMS_2_STATIC : Begin completions <nl> + / / RESOLVE_GENERIC_PARAMS_2_STATIC - NEXT : Decl [ Constructor ] / CurrNominal : ( ) [ # FooGenericStruct < FooProtocol > # ] ; name = ( ) <nl> / / RESOLVE_GENERIC_PARAMS_2_STATIC - NEXT : Decl [ Constructor ] / CurrNominal : ( { # t : FooProtocol # } ) [ # FooGenericStruct < FooProtocol > # ] { { ; name = . + $ } } <nl> / / RESOLVE_GENERIC_PARAMS_2_STATIC - NEXT : Decl [ InstanceMethod ] / CurrNominal : . fooVoidInstanceFunc1 ( { # self : & FooGenericStruct < FooProtocol > # } ) [ # ( FooProtocol ) - > Void # ] { { ; name = . + $ } } <nl> / / RESOLVE_GENERIC_PARAMS_2_STATIC - NEXT : Decl [ InstanceMethod ] / CurrNominal : . fooTInstanceFunc1 ( { # self : & FooGenericStruct < FooProtocol > # } ) [ # ( FooProtocol ) - > FooProtocol # ] { { ; name = . + $ } } <nl> struct TestResolveGenericParams3_4 < T > { <nl> / / RESOLVE_GENERIC_PARAMS_3 - NEXT : End completions <nl> <nl> FooGenericStruct < FooStruct > # ^ RESOLVE_GENERIC_PARAMS_3_STATIC ^ # <nl> - / / RESOLVE_GENERIC_PARAMS_3_STATIC : Begin completions , 9 items <nl> + / / RESOLVE_GENERIC_PARAMS_3_STATIC : Begin completions , 10 items <nl> + / / RESOLVE_GENERIC_PARAMS_3_STATIC - NEXT : Decl [ Constructor ] / CurrNominal : ( ) [ # FooGenericStruct < FooStruct > # ] ; name = ( ) <nl> / / RESOLVE_GENERIC_PARAMS_3_STATIC - NEXT : Decl [ Constructor ] / CurrNominal : ( { # t : FooStruct # } ) [ # FooGenericStruct < FooStruct > # ] { { ; name = . + $ } } <nl> / / RESOLVE_GENERIC_PARAMS_3_STATIC - NEXT : Decl [ InstanceMethod ] / CurrNominal : . fooVoidInstanceFunc1 ( { # self : & FooGenericStruct < FooStruct > # } ) [ # ( FooStruct ) - > Void # ] { { ; name = . + $ } } <nl> / / RESOLVE_GENERIC_PARAMS_3_STATIC - NEXT : Decl [ InstanceMethod ] / CurrNominal : . fooTInstanceFunc1 ( { # self : & FooGenericStruct < FooStruct > # } ) [ # ( FooStruct ) - > FooStruct # ] { { ; name = . + $ } } <nl> struct TestResolveGenericParams3_4 < T > { <nl> } <nl> <nl> func testResolveGenericParams4 ( _ t : T ) { <nl> - FooGenericStruct < T > ( t ) # ^ RESOLVE_GENERIC_PARAMS_4 ^ # <nl> + FooGenericStruct < T > ( t : t ) # ^ RESOLVE_GENERIC_PARAMS_4 ^ # <nl> / / RESOLVE_GENERIC_PARAMS_4 : Begin completions <nl> / / RESOLVE_GENERIC_PARAMS_4 - NEXT : Decl [ InstanceVar ] / CurrNominal : . fooInstanceVarT [ # T # ] { { ; name = . + $ } } <nl> / / RESOLVE_GENERIC_PARAMS_4 - NEXT : Decl [ InstanceVar ] / CurrNominal : . fooInstanceVarTBrackets [ # [ T ] # ] { { ; name = . + $ } } <nl> struct TestResolveGenericParams3_4 < T > { <nl> <nl> FooGenericStruct < T > # ^ RESOLVE_GENERIC_PARAMS_4_STATIC ^ # <nl> / / RESOLVE_GENERIC_PARAMS_4_STATIC : Begin completions <nl> + / / RESOLVE_GENERIC_PARAMS_4_STATIC - NEXT : Decl [ Constructor ] / CurrNominal : ( ) [ # FooGenericStruct < T > # ] ; name = ( ) <nl> / / RESOLVE_GENERIC_PARAMS_4_STATIC - NEXT : Decl [ Constructor ] / CurrNominal : ( { # t : T # } ) [ # FooGenericStruct < T > # ] { { ; name = . + $ } } <nl> / / RESOLVE_GENERIC_PARAMS_4_STATIC - NEXT : Decl [ InstanceMethod ] / CurrNominal : . fooVoidInstanceFunc1 ( { # self : & FooGenericStruct < T > # } ) [ # ( T ) - > Void # ] { { ; name = . + $ } } <nl> / / RESOLVE_GENERIC_PARAMS_4_STATIC - NEXT : Decl [ InstanceMethod ] / CurrNominal : . fooTInstanceFunc1 ( { # self : & FooGenericStruct < T > # } ) [ # ( T ) - > T # ] { { ; name = . + $ } } <nl> struct TestResolveGenericParams3_4 < T > { <nl> } <nl> <nl> func testResolveGenericParams5 < U > ( _ u : U ) { <nl> - FooGenericStruct < U > ( u ) # ^ RESOLVE_GENERIC_PARAMS_5 ^ # <nl> + FooGenericStruct < U > ( t : u ) # ^ RESOLVE_GENERIC_PARAMS_5 ^ # <nl> / / RESOLVE_GENERIC_PARAMS_5 : Begin completions <nl> / / RESOLVE_GENERIC_PARAMS_5 - NEXT : Decl [ InstanceVar ] / CurrNominal : . fooInstanceVarT [ # U # ] { { ; name = . + $ } } <nl> / / RESOLVE_GENERIC_PARAMS_5 - NEXT : Decl [ InstanceVar ] / CurrNominal : . fooInstanceVarTBrackets [ # [ U ] # ] { { ; name = . + $ } } <nl> struct TestResolveGenericParams3_4 < T > { <nl> <nl> FooGenericStruct < U > # ^ RESOLVE_GENERIC_PARAMS_5_STATIC ^ # <nl> / / RESOLVE_GENERIC_PARAMS_5_STATIC : Begin completions <nl> + / / RESOLVE_GENERIC_PARAMS_5_STATIC - NEXT : Decl [ Constructor ] / CurrNominal : ( ) [ # FooGenericStruct < U > # ] ; name = ( ) <nl> / / RESOLVE_GENERIC_PARAMS_5_STATIC - NEXT : Decl [ Constructor ] / CurrNominal : ( { # t : U # } ) [ # FooGenericStruct < U > # ] { { ; name = . + $ } } <nl> / / RESOLVE_GENERIC_PARAMS_5_STATIC - NEXT : Decl [ InstanceMethod ] / CurrNominal : . fooVoidInstanceFunc1 ( { # self : & FooGenericStruct < U > # } ) [ # ( U ) - > Void # ] { { ; name = . + $ } } <nl> / / RESOLVE_GENERIC_PARAMS_5_STATIC - NEXT : Decl [ InstanceMethod ] / CurrNominal : . fooTInstanceFunc1 ( { # self : & FooGenericStruct < U > # } ) [ # ( U ) - > U # ] { { ; name = . + $ } } <nl> similarity index 82 % <nl> rename from validation - test / compiler_crashers / 28823 - impl - getgraphindex - typevariables - size - out - of - bounds - index . swift <nl> rename to validation - test / compiler_crashers_fixed / 28823 - impl - getgraphindex - typevariables - size - out - of - bounds - index . swift <nl> mmm a / validation - test / compiler_crashers / 28823 - impl - getgraphindex - typevariables - size - out - of - bounds - index . swift <nl> ppp b / validation - test / compiler_crashers_fixed / 28823 - impl - getgraphindex - typevariables - size - out - of - bounds - index . swift <nl> <nl> / / See https : / / swift . org / LICENSE . txt for license information <nl> / / See https : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> <nl> - / / REQUIRES : asserts <nl> - / / RUN : not - - crash % target - swift - frontend % s - emit - ir <nl> + <nl> + / / RUN : not % target - swift - frontend % s - emit - ir <nl> { { } as ManagedBuffer } { <nl> similarity index 83 % <nl> rename from validation - test / compiler_crashers / 28856 - typevariables - impl - getgraphindex - typevar - type - variable - mismatch . swift <nl> rename to validation - test / compiler_crashers_fixed / 28856 - typevariables - impl - getgraphindex - typevar - type - variable - mismatch . swift <nl> mmm a / validation - test / compiler_crashers / 28856 - typevariables - impl - getgraphindex - typevar - type - variable - mismatch . swift <nl> ppp b / validation - test / compiler_crashers_fixed / 28856 - typevariables - impl - getgraphindex - typevar - type - variable - mismatch . swift <nl> <nl> / / See https : / / swift . org / LICENSE . txt for license information <nl> / / See https : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> <nl> - / / REQUIRES : asserts <nl> - / / RUN : not - - crash % target - swift - frontend % s - emit - ir <nl> + <nl> + / / RUN : not % target - swift - frontend % s - emit - ir <nl> { [ Int ? as ? ManagedBuffer } { <nl> similarity index 83 % <nl> rename from validation - test / compiler_crashers / 28868 - known - typebindings - end . swift <nl> rename to validation - test / compiler_crashers_fixed / 28868 - known - typebindings - end . swift <nl> mmm a / validation - test / compiler_crashers / 28868 - known - typebindings - end . swift <nl> ppp b / validation - test / compiler_crashers_fixed / 28868 - known - typebindings - end . swift <nl> <nl> / / See https : / / swift . org / LICENSE . txt for license information <nl> / / See https : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> <nl> - / / REQUIRES : asserts <nl> - / / RUN : not - - crash % target - swift - frontend % s - emit - ir <nl> + <nl> + / / RUN : not % target - swift - frontend % s - emit - ir <nl> struct B { func a { a ( Int ? as ? ManagedBuffer { <nl>
|
Merge remote - tracking branch ' origin / master ' into master - llvm - swift5 - transition
|
apple/swift
|
a6cd2a88728f1dc27ef459691e946e706c15e9e4
|
2018-02-14T05:58:26Z
|
mmm a / modules / tools / open_space_visualization / distance_approach_python_interface . py <nl> ppp b / modules / tools / open_space_visualization / distance_approach_python_interface . py <nl> <nl> from ctypes import c_double <nl> from ctypes import c_ushort <nl> from ctypes import c_void_p <nl> + from ctypes import cdll , POINTER <nl> <nl> <nl> lib = cdll . LoadLibrary ( <nl>
|
tools : added denpendency to distance_approach interface
|
ApolloAuto/apollo
|
45b646e2c1d5937655b9cb3bfc3bf3e60b425e4b
|
2020-02-26T01:43:47Z
|
mmm a / tensorflow / workspace . bzl <nl> ppp b / tensorflow / workspace . bzl <nl> def tf_repositories ( path_prefix = " " , tf_repo_name = " " ) : <nl> ) <nl> <nl> # Check out LLVM and MLIR from llvm - project . <nl> - LLVM_COMMIT = " 3667d87a33d3c8d4072a41fd84bb880c59347dc0 " <nl> - LLVM_SHA256 = " 89e0523c771c55bbf0aea55fffffa5fb071ec3788a92f25fda98389a8dfd880e " <nl> + LLVM_COMMIT = " 300156932321a8b34b46d6a890cce0699525ed20 " <nl> + LLVM_SHA256 = " 932ca27455d396ba8be72032cbc7e284f882761b0dd9d33af14e354f8f9b5718 " <nl> LLVM_URLS = [ <nl> " https : / / storage . googleapis . com / mirror . tensorflow . org / github . com / llvm / llvm - project / archive / { commit } . tar . gz " . format ( commit = LLVM_COMMIT ) , <nl> " https : / / github . com / llvm / llvm - project / archive / { commit } . tar . gz " . format ( commit = LLVM_COMMIT ) , <nl>
|
Integrate LLVM at https : / / github . com / llvm / llvm - project / commit / 300156932321
|
tensorflow/tensorflow
|
b6980e052907899efa32f2fbf3971a643b3528f6
|
2020-07-14T12:16:26Z
|
mmm a / src / compiler . cc <nl> ppp b / src / compiler . cc <nl> static Handle < SharedFunctionInfo > MakeFunctionInfo ( CompilationInfo * info ) { <nl> / / the instances of the function . <nl> SetExpectedNofPropertiesFromEstimate ( result , lit - > expected_property_count ( ) ) ; <nl> <nl> + script - > set_compilation_state ( <nl> + Smi : : FromInt ( Script : : COMPILATION_STATE_COMPILED ) ) ; <nl> + <nl> # ifdef ENABLE_DEBUGGER_SUPPORT <nl> / / Notify debugger <nl> isolate - > debugger ( ) - > OnAfterCompile ( <nl> mmm a / src / factory . cc <nl> ppp b / src / factory . cc <nl> Handle < Script > Factory : : NewScript ( Handle < String > source ) { <nl> script - > set_context_data ( heap - > undefined_value ( ) ) ; <nl> script - > set_type ( Smi : : FromInt ( Script : : TYPE_NORMAL ) ) ; <nl> script - > set_compilation_type ( Smi : : FromInt ( Script : : COMPILATION_TYPE_HOST ) ) ; <nl> + script - > set_compilation_state ( <nl> + Smi : : FromInt ( Script : : COMPILATION_STATE_INITIAL ) ) ; <nl> script - > set_wrapper ( * wrapper ) ; <nl> script - > set_line_ends ( heap - > undefined_value ( ) ) ; <nl> script - > set_eval_from_shared ( heap - > undefined_value ( ) ) ; <nl> mmm a / src / mirror - debugger . js <nl> ppp b / src / mirror - debugger . js <nl> ScriptMirror . prototype . source = function ( ) { <nl> } ; <nl> <nl> <nl> + ScriptMirror . prototype . setSource = function ( source ) { <nl> + % DebugSetScriptSource ( this . script_ , source ) ; <nl> + } ; <nl> + <nl> + <nl> ScriptMirror . prototype . lineOffset = function ( ) { <nl> return this . script_ . line_offset ; <nl> } ; <nl> mmm a / src / objects - inl . h <nl> ppp b / src / objects - inl . h <nl> ACCESSORS ( Script , context_data , Object , kContextOffset ) <nl> ACCESSORS ( Script , wrapper , Foreign , kWrapperOffset ) <nl> ACCESSORS_TO_SMI ( Script , type , kTypeOffset ) <nl> ACCESSORS_TO_SMI ( Script , compilation_type , kCompilationTypeOffset ) <nl> + ACCESSORS_TO_SMI ( Script , compilation_state , kCompilationStateOffset ) <nl> ACCESSORS ( Script , line_ends , Object , kLineEndsOffset ) <nl> ACCESSORS ( Script , eval_from_shared , Object , kEvalFromSharedOffset ) <nl> ACCESSORS_TO_SMI ( Script , eval_from_instructions_offset , <nl> mmm a / src / objects . h <nl> ppp b / src / objects . h <nl> class Script : public Struct { <nl> COMPILATION_TYPE_EVAL = 1 <nl> } ; <nl> <nl> + / / Script compilation state . <nl> + enum CompilationState { <nl> + COMPILATION_STATE_INITIAL = 0 , <nl> + COMPILATION_STATE_COMPILED = 1 <nl> + } ; <nl> + <nl> / / [ source ] : the script source . <nl> DECL_ACCESSORS ( source , Object ) <nl> <nl> class Script : public Struct { <nl> / / [ compilation ] : how the the script was compiled . <nl> DECL_ACCESSORS ( compilation_type , Smi ) <nl> <nl> + / / [ is_compiled ] : determines whether the script has already been compiled . <nl> + DECL_ACCESSORS ( compilation_state , Smi ) <nl> + <nl> / / [ line_ends ] : FixedArray of line ends positions . <nl> DECL_ACCESSORS ( line_ends , Object ) <nl> <nl> class Script : public Struct { <nl> static const int kWrapperOffset = kContextOffset + kPointerSize ; <nl> static const int kTypeOffset = kWrapperOffset + kPointerSize ; <nl> static const int kCompilationTypeOffset = kTypeOffset + kPointerSize ; <nl> - static const int kLineEndsOffset = kCompilationTypeOffset + kPointerSize ; <nl> + static const int kCompilationStateOffset = <nl> + kCompilationTypeOffset + kPointerSize ; <nl> + static const int kLineEndsOffset = kCompilationStateOffset + kPointerSize ; <nl> static const int kIdOffset = kLineEndsOffset + kPointerSize ; <nl> static const int kEvalFromSharedOffset = kIdOffset + kPointerSize ; <nl> static const int kEvalFrominstructionsOffsetOffset = <nl> mmm a / src / runtime . cc <nl> ppp b / src / runtime . cc <nl> RUNTIME_FUNCTION ( MaybeObject * , Runtime_DebugGetPrototype ) { <nl> } <nl> <nl> <nl> + / / Patches script source ( should be called upon BeforeCompile event ) . <nl> + RUNTIME_FUNCTION ( MaybeObject * , Runtime_DebugSetScriptSource ) { <nl> + HandleScope scope ( isolate ) ; <nl> + ASSERT ( args . length ( ) = = 2 ) ; <nl> + <nl> + CONVERT_ARG_HANDLE_CHECKED ( JSValue , script_wrapper , 0 ) ; <nl> + Handle < String > source ( String : : cast ( args [ 1 ] ) ) ; <nl> + <nl> + RUNTIME_ASSERT ( script_wrapper - > value ( ) - > IsScript ( ) ) ; <nl> + Handle < Script > script ( Script : : cast ( script_wrapper - > value ( ) ) ) ; <nl> + <nl> + int compilation_state = Smi : : cast ( script - > compilation_state ( ) ) - > value ( ) ; <nl> + RUNTIME_ASSERT ( compilation_state = = Script : : COMPILATION_STATE_INITIAL ) ; <nl> + script - > set_source ( * source ) ; <nl> + <nl> + return isolate - > heap ( ) - > undefined_value ( ) ; <nl> + } <nl> + <nl> + <nl> RUNTIME_FUNCTION ( MaybeObject * , Runtime_SystemBreak ) { <nl> ASSERT ( args . length ( ) = = 0 ) ; <nl> CPU : : DebugBreak ( ) ; <nl> mmm a / src / runtime . h <nl> ppp b / src / runtime . h <nl> namespace internal { <nl> F ( DebugReferencedBy , 3 , 1 ) \ <nl> F ( DebugConstructedBy , 2 , 1 ) \ <nl> F ( DebugGetPrototype , 1 , 1 ) \ <nl> + F ( DebugSetScriptSource , 2 , 1 ) \ <nl> F ( SystemBreak , 0 , 1 ) \ <nl> F ( DebugDisassembleFunction , 1 , 1 ) \ <nl> F ( DebugDisassembleConstructor , 1 , 1 ) \ <nl> new file mode 100644 <nl> index 00000000000 . . 34ae8488a4d <nl> mmm / dev / null <nl> ppp b / test / mjsunit / debug - set - script - source . js <nl> <nl> + / / Copyright 2012 the V8 project authors . All rights reserved . <nl> + / / Redistribution and use in source and binary forms , with or without <nl> + / / modification , are permitted provided that the following conditions are <nl> + / / met : <nl> + / / <nl> + / / * Redistributions of source code must retain the above copyright <nl> + / / notice , this list of conditions and the following disclaimer . <nl> + / / * Redistributions in binary form must reproduce the above <nl> + / / copyright notice , this list of conditions and the following <nl> + / / disclaimer in the documentation and / or other materials provided <nl> + / / with the distribution . <nl> + / / * Neither the name of Google Inc . nor the names of its <nl> + / / contributors may be used to endorse or promote products derived <nl> + / / from this software without specific prior written permission . <nl> + / / <nl> + / / THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + / / " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + / / LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + / / A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + / / OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + / / SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + / / LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + / / DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + / / THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + / / Flags : - - expose - debug - as debug <nl> + / / Get the Debug object exposed from the debug context global object . <nl> + Debug = debug . Debug <nl> + <nl> + var script_number = 0 ; <nl> + var script_names = [ ] ; <nl> + var exception = null ; <nl> + <nl> + function listener ( event , exec_state , event_data , data ) { <nl> + if ( event = = Debug . DebugEvent . BeforeCompile ) { <nl> + event_data . script ( ) . setSource ( event_data . script ( ) . source ( ) + <nl> + " / / @ sourceURL = proper_location_ " + ( + + script_number ) ) ; <nl> + } else if ( event = = Debug . DebugEvent . AfterCompile ) { <nl> + try { <nl> + event_data . script ( ) . setSource ( " a = 1 / / @ sourceURL = wrong_location " ) ; <nl> + } catch ( e ) { <nl> + exception = e ; <nl> + } <nl> + script_names . push ( event_data . script ( ) . name ( ) ) ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> + / / Add the debug event listener . <nl> + Debug . setListener ( listener ) ; <nl> + <nl> + / / Compile different sources . <nl> + eval ( ' a = 1 ' ) ; <nl> + eval ( ' ( function ( ) { } ) ' ) ; <nl> + <nl> + assertEquals ( 2 , script_names . length ) ; <nl> + assertEquals ( " proper_location_1 " , script_names [ 0 ] ) ; <nl> + assertEquals ( " proper_location_2 " , script_names [ 1 ] ) ; <nl> + <nl> + assertEquals ( " illegal access " , exception ) ; <nl> + <nl> + Debug . setListener ( null ) ; <nl>
|
Debugger : add ability to set script source from within OnBeforeCompile .
|
v8/v8
|
100bc51eaebf12c9feafa3fb17f66de2464a3f6a
|
2012-03-15T11:51:26Z
|
mmm a / test / common / event / dispatcher_impl_test . cc <nl> ppp b / test / common / event / dispatcher_impl_test . cc <nl> <nl> + # include < condition_variable > <nl> # include < functional > <nl> + # include < mutex > <nl> <nl> + # include " common / common / thread . h " <nl> # include " common / event / dispatcher_impl . h " <nl> <nl> # include " test / mocks / common . h " <nl> class TestDeferredDeletable : public DeferredDeletable { <nl> std : : function < void ( ) > on_destroy_ ; <nl> } ; <nl> <nl> - TEST ( DispatcherImplTest , DeferredDelete ) { <nl> + TEST ( DeferredDeleteTest , DeferredDelete ) { <nl> InSequence s ; <nl> DispatcherImpl dispatcher ; <nl> ReadyWatcher watcher1 ; <nl> TEST ( DispatcherImplTest , DeferredDelete ) { <nl> dispatcher . clearDeferredDeleteList ( ) ; <nl> } <nl> <nl> + class DispatcherImplTest : public : : testing : : Test { <nl> + protected : <nl> + DispatcherImplTest ( ) : dispatcher_ ( std : : make_unique < DispatcherImpl > ( ) ) , work_finished_ ( false ) { <nl> + dispatcher_thread_ = std : : make_unique < Thread : : Thread > ( [ this ] ( ) { <nl> + / / Must create a keepalive timer to keep the dispatcher from exiting . <nl> + std : : chrono : : milliseconds time_interval ( 500 ) ; <nl> + keepalive_timer_ = dispatcher_ - > createTimer ( <nl> + [ this , time_interval ] ( ) { keepalive_timer_ - > enableTimer ( time_interval ) ; } ) ; <nl> + keepalive_timer_ - > enableTimer ( time_interval ) ; <nl> + <nl> + dispatcher_ - > run ( Dispatcher : : RunType : : Block ) ; <nl> + } ) ; <nl> + } <nl> + <nl> + ~ DispatcherImplTest ( ) override { <nl> + dispatcher_ - > exit ( ) ; <nl> + dispatcher_thread_ - > join ( ) ; <nl> + } <nl> + <nl> + std : : unique_ptr < Thread : : Thread > dispatcher_thread_ ; <nl> + DispatcherPtr dispatcher_ ; <nl> + std : : mutex mu_ ; <nl> + std : : condition_variable cv_ ; <nl> + <nl> + bool work_finished_ ; <nl> + TimerPtr keepalive_timer_ ; <nl> + } ; <nl> + <nl> + TEST_F ( DispatcherImplTest , Post ) { <nl> + dispatcher_ - > post ( [ this ] ( ) { <nl> + { <nl> + std : : lock_guard < std : : mutex > lock ( mu_ ) ; <nl> + work_finished_ = true ; <nl> + } <nl> + cv_ . notify_one ( ) ; <nl> + } ) ; <nl> + <nl> + std : : unique_lock < std : : mutex > lock ( mu_ ) ; <nl> + <nl> + cv_ . wait ( lock , [ this ] ( ) { return work_finished_ ; } ) ; <nl> + } <nl> + <nl> + TEST_F ( DispatcherImplTest , Timer ) { <nl> + TimerPtr timer ; <nl> + dispatcher_ - > post ( [ this , & timer ] ( ) { <nl> + { <nl> + std : : lock_guard < std : : mutex > lock ( mu_ ) ; <nl> + timer = dispatcher_ - > createTimer ( [ this ] ( ) { <nl> + { <nl> + std : : lock_guard < std : : mutex > lock ( mu_ ) ; <nl> + work_finished_ = true ; <nl> + } <nl> + cv_ . notify_one ( ) ; <nl> + } ) ; <nl> + } <nl> + cv_ . notify_one ( ) ; <nl> + } ) ; <nl> + <nl> + std : : unique_lock < std : : mutex > lock ( mu_ ) ; <nl> + cv_ . wait ( lock , [ & timer ] ( ) { return timer ! = nullptr ; } ) ; <nl> + timer - > enableTimer ( std : : chrono : : milliseconds ( 50 ) ) ; <nl> + <nl> + cv_ . wait ( lock , [ this ] ( ) { return work_finished_ ; } ) ; <nl> + } <nl> + <nl> } / / namespace Event <nl> } / / namespace Envoy <nl>
|
dispatcher : add simple unit tests ( )
|
envoyproxy/envoy
|
820fb7b38980a07a00fe47cecec2bce15e864a0a
|
2017-12-19T04:23:45Z
|
mmm a / src / RemoteDatabase . cpp <nl> ppp b / src / RemoteDatabase . cpp <nl> void RemoteDatabase : : gotError ( QNetworkReply * reply , const QList < QSslError > & erro <nl> QMessageBox : : warning ( 0 , qApp - > applicationName ( ) , message ) ; <nl> <nl> / / Delete reply later , i . e . after returning from this slot function <nl> - m_progress - > reset ( ) ; <nl> + if ( m_progress ) <nl> + m_progress - > reset ( ) ; <nl> reply - > deleteLater ( ) ; <nl> } <nl> <nl>
|
dbhub : Fix possible crash
|
sqlitebrowser/sqlitebrowser
|
4dc52865962414b2754c5a6db6ad68c0958f0eb0
|
2017-09-25T12:36:31Z
|
mmm a / include / internal / catch_commandline . cpp <nl> ppp b / include / internal / catch_commandline . cpp <nl> <nl> <nl> # include " catch_string_manip . h " <nl> <nl> + # include " catch_interfaces_registry_hub . h " <nl> + # include " catch_interfaces_reporter . h " <nl> + <nl> # include < fstream > <nl> # include < ctime > <nl> <nl> namespace Catch { <nl> return ParserResult : : runtimeError ( " Unrecognised verbosity , ' " + verbosity + " ' " ) ; <nl> return ParserResult : : ok ( ParseResultType : : Matched ) ; <nl> } ; <nl> + auto const setReporter = [ & ] ( std : : string const & reporter ) { <nl> + IReporterRegistry : : FactoryMap const & factories = getRegistryHub ( ) . getReporterRegistry ( ) . getFactories ( ) ; <nl> + <nl> + auto lcReporter = toLower ( reporter ) ; <nl> + auto result = factories . find ( lcReporter ) ; <nl> + <nl> + if ( factories . end ( ) ! = result ) <nl> + config . reporterName = lcReporter ; <nl> + else <nl> + return ParserResult : : runtimeError ( " Unrecognized reporter , ' " + reporter + " ' . Check available with - - list - reporters " ) ; <nl> + return ParserResult : : ok ( ParseResultType : : Matched ) ; <nl> + } ; <nl> <nl> auto cli <nl> = ExeName ( config . processName ) <nl> namespace Catch { <nl> | Opt ( config . outputFilename , " filename " ) <nl> [ " - o " ] [ " - - out " ] <nl> ( " output filename " ) <nl> - | Opt ( config . reporterName , " name " ) <nl> + | Opt ( setReporter , " name " ) <nl> [ " - r " ] [ " - - reporter " ] <nl> ( " reporter to use ( defaults to console ) " ) <nl> | Opt ( config . name , " name " ) <nl> mmm a / projects / SelfTest / Baselines / compact . sw . approved . txt <nl> ppp b / projects / SelfTest / Baselines / compact . sw . approved . txt <nl> CmdLine . tests . cpp : < line number > : passed : config . reporterName = = " xml " for : " xml " <nl> CmdLine . tests . cpp : < line number > : passed : cli . parse ( { " test " , " - - reporter " , " junit " } ) for : { ? } <nl> CmdLine . tests . cpp : < line number > : passed : config . reporterName = = " junit " for : " junit " = = " junit " <nl> CmdLine . tests . cpp : < line number > : passed : ! ( cli . parse ( { " test " , " - r " , " xml " , " - r " , " junit " } ) ) for : ! { ? } <nl> + CmdLine . tests . cpp : < line number > : passed : ! result for : true <nl> + CmdLine . tests . cpp : < line number > : passed : result . errorMessage ( ) , Contains ( " Unrecognized reporter " ) for : " Unrecognized reporter , ' unsupported ' . Check available with - - list - reporters " contains : " Unrecognized reporter " <nl> CmdLine . tests . cpp : < line number > : passed : cli . parse ( { " test " , " - b " } ) for : { ? } <nl> CmdLine . tests . cpp : < line number > : passed : config . shouldDebugBreak = = true for : true = = true <nl> CmdLine . tests . cpp : < line number > : passed : cli . parse ( { " test " , " - - break " } ) for : { ? } <nl> mmm a / projects / SelfTest / Baselines / console . std . approved . txt <nl> ppp b / projects / SelfTest / Baselines / console . std . approved . txt <nl> due to unexpected exception with message : <nl> <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> test cases : 215 | 162 passed | 49 failed | 4 failed as expected <nl> - assertions : 1231 | 1102 passed | 108 failed | 21 failed as expected <nl> + assertions : 1233 | 1104 passed | 108 failed | 21 failed as expected <nl> <nl> mmm a / projects / SelfTest / Baselines / console . sw . approved . txt <nl> ppp b / projects / SelfTest / Baselines / console . sw . approved . txt <nl> PASSED : <nl> with expansion : <nl> ! { ? } <nl> <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + Process can be configured on command line <nl> + reporter <nl> + must match one of the available ones <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + CmdLine . tests . cpp : < line number > <nl> + . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + <nl> + CmdLine . tests . cpp : < line number > : <nl> + PASSED : <nl> + CHECK ( ! result ) <nl> + with expansion : <nl> + true <nl> + <nl> + CmdLine . tests . cpp : < line number > : <nl> + PASSED : <nl> + REQUIRE_THAT ( result . errorMessage ( ) , Contains ( " Unrecognized reporter " ) ) <nl> + with expansion : <nl> + " Unrecognized reporter , ' unsupported ' . Check available with - - list - reporters " <nl> + contains : " Unrecognized reporter " <nl> + <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> Process can be configured on command line <nl> debugger <nl> PASSED : <nl> <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> test cases : 215 | 149 passed | 62 failed | 4 failed as expected <nl> - assertions : 1245 | 1102 passed | 122 failed | 21 failed as expected <nl> + assertions : 1247 | 1104 passed | 122 failed | 21 failed as expected <nl> <nl> mmm a / projects / SelfTest / Baselines / junit . sw . approved . txt <nl> ppp b / projects / SelfTest / Baselines / junit . sw . approved . txt <nl> <nl> < ? xml version = " 1 . 0 " encoding = " UTF - 8 " ? > <nl> < testsuitesloose text artifact <nl> > <nl> - < testsuite name = " < exe - name > " errors = " 17 " failures = " 106 " tests = " 1246 " hostname = " tbd " time = " { duration } " timestamp = " { iso8601 - timestamp } " > <nl> + < testsuite name = " < exe - name > " errors = " 17 " failures = " 106 " tests = " 1248 " hostname = " tbd " time = " { duration } " timestamp = " { iso8601 - timestamp } " > <nl> < testcase classname = " < exe - name > . global " name = " # A test name that starts with a # " time = " { duration } " / > <nl> < testcase classname = " < exe - name > . global " name = " # 1005 : Comparing pointer to int and long ( NULL can be either on various systems ) " time = " { duration } " / > <nl> < testcase classname = " < exe - name > . global " name = " # 1027 " time = " { duration } " / > <nl> Message . tests . cpp : < line number > <nl> < testcase classname = " < exe - name > . global " name = " Process can be configured on command line / reporter / - r / xml " time = " { duration } " / > <nl> < testcase classname = " < exe - name > . global " name = " Process can be configured on command line / reporter / - - reporter / junit " time = " { duration } " / > <nl> < testcase classname = " < exe - name > . global " name = " Process can be configured on command line / reporter / Only one reporter is accepted " time = " { duration } " / > <nl> + < testcase classname = " < exe - name > . global " name = " Process can be configured on command line / reporter / must match one of the available ones " time = " { duration } " / > <nl> < testcase classname = " < exe - name > . global " name = " Process can be configured on command line / debugger / - b " time = " { duration } " / > <nl> < testcase classname = " < exe - name > . global " name = " Process can be configured on command line / debugger / - - break " time = " { duration } " / > <nl> < testcase classname = " < exe - name > . global " name = " Process can be configured on command line / abort / - a aborts after first failure " time = " { duration } " / > <nl> mmm a / projects / SelfTest / Baselines / xml . sw . approved . txt <nl> ppp b / projects / SelfTest / Baselines / xml . sw . approved . txt <nl> <nl> < / Section > <nl> < OverallResults successes = " 1 " failures = " 0 " expectedFailures = " 0 " / > <nl> < / Section > <nl> + < Section name = " reporter " filename = " projects / < exe - name > / IntrospectiveTests / CmdLine . tests . cpp " > <nl> + < Section name = " must match one of the available ones " filename = " projects / < exe - name > / IntrospectiveTests / CmdLine . tests . cpp " > <nl> + < Expression success = " true " type = " CHECK " filename = " projects / < exe - name > / IntrospectiveTests / CmdLine . tests . cpp " > <nl> + < Original > <nl> + ! result <nl> + < / Original > <nl> + < Expanded > <nl> + true <nl> + < / Expanded > <nl> + < / Expression > <nl> + < Expression success = " true " type = " REQUIRE_THAT " filename = " projects / < exe - name > / IntrospectiveTests / CmdLine . tests . cpp " > <nl> + < Original > <nl> + result . errorMessage ( ) , Contains ( " Unrecognized reporter " ) <nl> + < / Original > <nl> + < Expanded > <nl> + " Unrecognized reporter , ' unsupported ' . Check available with - - list - reporters " contains : " Unrecognized reporter " <nl> + < / Expanded > <nl> + < / Expression > <nl> + < OverallResults successes = " 2 " failures = " 0 " expectedFailures = " 0 " / > <nl> + < / Section > <nl> + < OverallResults successes = " 2 " failures = " 0 " expectedFailures = " 0 " / > <nl> + < / Section > <nl> < Section name = " debugger " filename = " projects / < exe - name > / IntrospectiveTests / CmdLine . tests . cpp " > <nl> < Section name = " - b " filename = " projects / < exe - name > / IntrospectiveTests / CmdLine . tests . cpp " > <nl> < Expression success = " true " type = " CHECK " filename = " projects / < exe - name > / IntrospectiveTests / CmdLine . tests . cpp " > <nl> loose text artifact <nl> < / Section > <nl> < OverallResult success = " true " / > <nl> < / TestCase > <nl> - < OverallResults successes = " 1102 " failures = " 123 " expectedFailures = " 21 " / > <nl> + < OverallResults successes = " 1104 " failures = " 123 " expectedFailures = " 21 " / > <nl> < / Group > <nl> - < OverallResults successes = " 1102 " failures = " 122 " expectedFailures = " 21 " / > <nl> + < OverallResults successes = " 1104 " failures = " 122 " expectedFailures = " 21 " / > <nl> < / Catch > <nl> mmm a / projects / SelfTest / IntrospectiveTests / CmdLine . tests . cpp <nl> ppp b / projects / SelfTest / IntrospectiveTests / CmdLine . tests . cpp <nl> TEST_CASE ( " Process can be configured on command line " , " [ config ] [ command - line ] " <nl> CHECK ( config . processName = = " " ) ; <nl> } <nl> <nl> - <nl> SECTION ( " default - no arguments " ) { <nl> auto result = cli . parse ( { " test " } ) ; <nl> CHECK ( result ) ; <nl> TEST_CASE ( " Process can be configured on command line " , " [ config ] [ command - line ] " <nl> SECTION ( " Only one reporter is accepted " ) { <nl> REQUIRE_FALSE ( cli . parse ( { " test " , " - r " , " xml " , " - r " , " junit " } ) ) ; <nl> } <nl> - } <nl> + SECTION ( " must match one of the available ones " ) { <nl> + auto result = cli . parse ( { " test " , " - - reporter " , " unsupported " } ) ; <nl> + CHECK ( ! result ) ; <nl> <nl> + # ifndef CATCH_CONFIG_DISABLE_MATCHERS <nl> + REQUIRE_THAT ( result . errorMessage ( ) , Contains ( " Unrecognized reporter " ) ) ; <nl> + # endif <nl> + } <nl> + } <nl> <nl> SECTION ( " debugger " ) { <nl> SECTION ( " - b " ) { <nl>
|
Catch ' s CLI now checks whether requested reporter exists
|
catchorg/Catch2
|
8b3c09c137b532ca1480d267ae9dc367b889a408
|
2018-10-25T13:43:30Z
|
mmm a / system / include / emscripten / em_asm . h <nl> ppp b / system / include / emscripten / em_asm . h <nl> <nl> / / C + + needs to support vararg template parameter packs , e . g . like in <nl> / / tests / core / test_em_asm_parameter_pack . cpp . Because of that , a macro - only <nl> / / approach doesn ' t work ( a macro applied to a parameter pack would expand <nl> - / / incorrectly ) . So we can use a template function instead to build a <nl> - / / std : : string , and convert that to a C string . <nl> - / / String builder class is so the _sig functions can be mutually recursive . <nl> - class __em_asm_sig_builder { <nl> - private : <nl> - static char sig_char ( float ) { <nl> - return ' d ' ; <nl> - } <nl> - static char sig_char ( double ) { <nl> - return ' d ' ; <nl> - } <nl> - static char sig_char ( int ) { <nl> - return ' i ' ; <nl> - } <nl> - static char sig_char ( unsigned ) { <nl> - return ' i ' ; <nl> - } <nl> - static char sig_char ( long ) { <nl> - return ' i ' ; <nl> - } <nl> - static char sig_char ( unsigned long ) { <nl> - return ' i ' ; <nl> - } <nl> - template < typename T > <nl> - static char sig_char ( T * arg ) { <nl> - return ' i ' ; <nl> - } <nl> - <nl> - template < typename . . . Args > <nl> - struct inner { <nl> - char buffer [ sizeof . . . ( Args ) + 1 ] ; <nl> - } ; <nl> - public : <nl> - template < typename . . . Args > <nl> - static const inner < Args . . . > __em_asm_sig ( Args . . . args ) { <nl> - inner < Args . . . > temp ; <nl> - char buf [ sizeof . . . ( Args ) + 1 ] = { sig_char ( args ) . . . , 0 } ; <nl> - for ( int i = 0 ; i < sizeof . . . ( Args ) + 1 ; + + i ) { <nl> - temp . buffer [ i ] = buf [ i ] ; <nl> - } <nl> - return temp ; <nl> - } <nl> + / / incorrectly ) . So we can use a template class instead to build a temporary <nl> + / / buffer of characters . <nl> + <nl> + / / As emscripten is require to build successfully with - std = c + + 03 , we cannot <nl> + / / use std : : tuple or std : : integral_constant . Using C + + 11 features is only a <nl> + / / warning in modern Clang , which are ignored in system headers . <nl> + template < typename , typename = void > struct __em_asm_sig { } ; <nl> + template < > struct __em_asm_sig < float > { static const char value = ' d ' ; } ; <nl> + template < > struct __em_asm_sig < double > { static const char value = ' d ' ; } ; <nl> + template < > struct __em_asm_sig < char > { static const char value = ' i ' ; } ; <nl> + template < > struct __em_asm_sig < signed char > { static const char value = ' i ' ; } ; <nl> + template < > struct __em_asm_sig < unsigned char > { static const char value = ' i ' ; } ; <nl> + template < > struct __em_asm_sig < short > { static const char value = ' i ' ; } ; <nl> + template < > struct __em_asm_sig < unsigned short > { static const char value = ' i ' ; } ; <nl> + template < > struct __em_asm_sig < int > { static const char value = ' i ' ; } ; <nl> + template < > struct __em_asm_sig < unsigned int > { static const char value = ' i ' ; } ; <nl> + template < > struct __em_asm_sig < long > { static const char value = ' i ' ; } ; <nl> + template < > struct __em_asm_sig < unsigned long > { static const char value = ' i ' ; } ; <nl> + template < > struct __em_asm_sig < bool > { static const char value = ' i ' ; } ; <nl> + template < > struct __em_asm_sig < wchar_t > { static const char value = ' i ' ; } ; <nl> + template < typename T > struct __em_asm_sig < T * > { static const char value = ' i ' ; } ; <nl> + <nl> + / / Explicit support for enums , they ' re passed as int via variadic arguments . <nl> + template < bool > struct __em_asm_if { } ; <nl> + template < > struct __em_asm_if < true > { typedef void type ; } ; <nl> + template < typename T > struct __em_asm_sig < T , typename __em_asm_if < __is_enum ( T ) > : : type > { <nl> + static const char value = ' i ' ; <nl> } ; <nl> <nl> + / / Instead of std : : tuple <nl> + template < typename . . . Args > <nl> + struct __em_asm_type_tuple { } ; <nl> + <nl> + / / Instead of std : : make_tuple <nl> + template < typename . . . Args > <nl> + __em_asm_type_tuple < Args . . . > __em_asm_make_type_tuple ( Args . . . args ) { <nl> + return { } ; <nl> + } <nl> + <nl> + template < typename > <nl> + struct __em_asm_sig_builder { } ; <nl> + <nl> + template < typename . . . Args > <nl> + struct __em_asm_sig_builder < __em_asm_type_tuple < Args . . . > > { <nl> + static const char buffer [ sizeof . . . ( Args ) + 1 ] ; <nl> + } ; <nl> + <nl> + template < typename . . . Args > <nl> + const char __em_asm_sig_builder < __em_asm_type_tuple < Args . . . > > : : buffer [ ] = { __em_asm_sig < Args > : : value . . . , 0 } ; <nl> + <nl> + / / We move to type level with decltype ( make_tuple ( . . . ) ) to avoid double <nl> + / / evaluation of arguments . Use __typeof__ instead of decltype , though , <nl> + / / because the header should be able to compile with clang ' s - std = c + + 03 . <nl> # define _EM_ASM_PREP_ARGS ( . . . ) \ <nl> - , __em_asm_sig_builder : : __em_asm_sig ( __VA_ARGS__ ) . buffer , # # __VA_ARGS__ <nl> + , __em_asm_sig_builder < __typeof__ ( __em_asm_make_type_tuple ( __VA_ARGS__ ) ) > : : buffer , # # __VA_ARGS__ <nl> <nl> extern " C " { <nl> # endif / / __cplusplus <nl> new file mode 100644 <nl> index 00000000000 . . e59c541eb4d <nl> mmm / dev / null <nl> ppp b / tests / core / test_em_asm_arguments_side_effects . cpp <nl> <nl> + / / Copyright 2019 The Emscripten Authors . All rights reserved . <nl> + / / Emscripten is available under two separate licenses , the MIT license and the <nl> + / / University of Illinois / NCSA Open Source License . Both these licenses can be <nl> + / / found in the LICENSE file . <nl> + <nl> + # include < stdio . h > <nl> + # include < emscripten . h > <nl> + <nl> + int main ( int argc , char * * argv ) { <nl> + int counter_1 = 3 , counter_2 = 4 ; <nl> + / / https : / / github . com / emscripten - core / emscripten / issues / 9030 <nl> + printf ( " counter_1 = % d , counter_2 = % d \ n " , counter_1 , counter_2 ) ; <nl> + int result = EM_ASM_INT ( { <nl> + return $ 0 * 10 + $ 1 ; <nl> + } , counter_1 + + , counter_2 + + ) ; <nl> + printf ( " counter_1 + + * 10 + counter_2 + + = % d \ n " , result ) ; <nl> + printf ( " counter_1 = % d , counter_2 = % d \ n " , counter_1 , counter_2 ) ; <nl> + return 0 ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . ccde896f032 <nl> mmm / dev / null <nl> ppp b / tests / core / test_em_asm_arguments_side_effects . out <nl> <nl> + counter_1 = 3 , counter_2 = 4 <nl> + counter_1 + + * 10 + counter_2 + + = 34 <nl> + counter_1 = 4 , counter_2 = 5 <nl> mmm a / tests / test_core . py <nl> ppp b / tests / test_core . py <nl> def test_em_asm_parameter_pack ( self ) : <nl> self . emcc_args + = [ ' - std = c + + 11 ' ] <nl> self . do_run_in_out_file_test ( ' tests ' , ' core ' , ' test_em_asm_parameter_pack ' ) <nl> <nl> + def test_em_asm_arguments_side_effects ( self ) : <nl> + self . do_run_in_out_file_test ( ' tests ' , ' core ' , ' test_em_asm_arguments_side_effects ' ) <nl> + self . do_run_in_out_file_test ( ' tests ' , ' core ' , ' test_em_asm_arguments_side_effects ' , force_c = True ) <nl> + <nl> @ parameterized ( { <nl> ' normal ' : ( [ ] , ) , <nl> ' linked ' : ( [ ' - s ' , ' MAIN_MODULE ' ] , ) , <nl>
|
Fix : allow side effects for ` EM_ASM ` arguments ( )
|
emscripten-core/emscripten
|
4ea632510a0c6623b002c280bb1c25b168e894c0
|
2019-07-29T21:05:00Z
|
new file mode 100644 <nl> index 000000000000 . . edfbc35db22d <nl> mmm / dev / null <nl> ppp b / jstests / noPassthrough / index_build_continuous_drain_secondary . js <nl> <nl> + / * * <nl> + * Tests that secondaries drain side writes while waiting for the primary to commit an index build . <nl> + * <nl> + * This test does not make very many correctness assertions because this exercises a performance <nl> + * optimization . Instead we log the time difference between how long the primary and secondary took <nl> + * to complete the index builds . The expectation is that these values are close to each other . <nl> + * <nl> + * @ tags : [ requires_replication ] <nl> + * <nl> + * / <nl> + ( function ( ) { <nl> + load ( " jstests / noPassthrough / libs / index_build . js " ) ; <nl> + <nl> + const replSet = new ReplSetTest ( { <nl> + nodes : [ <nl> + { } , <nl> + { <nl> + / / Disallow elections on secondary . <nl> + rsConfig : { <nl> + priority : 0 , <nl> + votes : 0 , <nl> + } , <nl> + } , <nl> + ] <nl> + } ) ; <nl> + <nl> + replSet . startSet ( ) ; <nl> + replSet . initiate ( ) ; <nl> + <nl> + const primary = replSet . getPrimary ( ) ; <nl> + if ( ! IndexBuildTest . supportsTwoPhaseIndexBuild ( primary ) ) { <nl> + jsTestLog ( ' Skipping test because two phase index builds are not supported . ' ) ; <nl> + replSet . stopSet ( ) ; <nl> + return ; <nl> + } <nl> + <nl> + const dbName = ' test ' ; <nl> + const primaryDB = primary . getDB ( dbName ) ; <nl> + const coll = primaryDB . test ; <nl> + <nl> + let insertDocs = function ( numDocs ) { <nl> + const bulk = coll . initializeUnorderedBulkOp ( ) ; <nl> + for ( let i = 0 ; i < numDocs ; i + + ) { <nl> + bulk . insert ( { a : i , b : i } ) ; <nl> + } <nl> + assert . commandWorked ( bulk . execute ( ) ) ; <nl> + } ; <nl> + insertDocs ( 10000 ) ; <nl> + replSet . awaitReplication ( ) ; <nl> + <nl> + / / Start and pause the index build on the primary so that it does not start collection scanning . <nl> + IndexBuildTest . pauseIndexBuilds ( primary ) ; <nl> + const createIdx = IndexBuildTest . startIndexBuild ( primary , coll . getFullName ( ) , { a : 1 , b : 1 } ) ; <nl> + <nl> + const secondary = replSet . getSecondary ( ) ; <nl> + const secondaryDB = secondary . getDB ( dbName ) ; <nl> + <nl> + / / Wait until the secondary reports that it is ready to commit . <nl> + / / " Index build waiting for next action before completing final phase " <nl> + checkLog . containsJson ( secondary , 3856203 ) ; <nl> + <nl> + / / Insert a high volume of documents . Since the secondary has reported that it is ready to commit , <nl> + / / the expectation is that the secondary will intercept and drain these writes as they are <nl> + / / replicated from primary . <nl> + insertDocs ( 50000 ) ; <nl> + / / " index build : drained side writes " <nl> + checkLog . containsJson ( secondary , 20689 ) ; <nl> + <nl> + / / Record how long it takes for the index build to complete from this point onward . <nl> + let start = new Date ( ) ; <nl> + IndexBuildTest . resumeIndexBuilds ( primary ) ; <nl> + <nl> + / / Wait for index build to finish on primary . <nl> + createIdx ( ) ; <nl> + let primaryEnd = new Date ( ) ; <nl> + <nl> + / / Wait for the index build to complete on the secondary . <nl> + IndexBuildTest . waitForIndexBuildToStop ( secondaryDB ) ; <nl> + let secondaryEnd = new Date ( ) ; <nl> + <nl> + / / We don ' t make any assertions about these times , just report them for informational purposes . The <nl> + / / expectation is that they are as close to each other as possible , which would suggest that the <nl> + / / secondary does not spend more time completing the index than the primary . <nl> + jsTestLog ( " these values should be similar : " ) ; <nl> + jsTestLog ( " elapsed on primary : " + ( primaryEnd - start ) ) ; <nl> + jsTestLog ( " elapsed on secondary : " + ( secondaryEnd - start ) ) ; <nl> + <nl> + IndexBuildTest . assertIndexes ( coll , 2 , [ ' _id_ ' , ' a_1_b_1 ' ] ) ; <nl> + replSet . stopSet ( ) ; <nl> + } ) ( ) ; <nl> mmm a / src / mongo / db / index_builds_coordinator . cpp <nl> ppp b / src / mongo / db / index_builds_coordinator . cpp <nl> void IndexBuildsCoordinator : : _buildIndexSinglePhase ( <nl> boost : : optional < Lock : : CollectionLock > * exclusiveCollectionLock ) { <nl> _scanCollectionAndInsertKeysIntoSorter ( opCtx , replState , exclusiveCollectionLock ) ; <nl> _insertKeysFromSideTablesWithoutBlockingWrites ( opCtx , replState ) ; <nl> + _insertKeysFromSideTablesBlockingWrites ( opCtx , replState ) ; <nl> _signalPrimaryForCommitReadiness ( opCtx , replState ) ; <nl> _waitForNextIndexBuildAction ( opCtx , replState ) ; <nl> _insertKeysFromSideTablesAndCommit ( <nl> void IndexBuildsCoordinator : : _buildIndexTwoPhase ( <nl> <nl> _scanCollectionAndInsertKeysIntoSorter ( opCtx , replState , exclusiveCollectionLock ) ; <nl> _insertKeysFromSideTablesWithoutBlockingWrites ( opCtx , replState ) ; <nl> + _insertKeysFromSideTablesBlockingWrites ( opCtx , replState ) ; <nl> <nl> _signalPrimaryForCommitReadiness ( opCtx , replState ) ; <nl> auto commitIndexBuildTimestamp = _waitForNextIndexBuildAction ( opCtx , replState ) ; <nl> void IndexBuildsCoordinator : : _insertKeysFromSideTablesWithoutBlockingWrites ( <nl> LOGV2 ( 20666 , " Hanging after index build first drain " ) ; <nl> hangAfterIndexBuildFirstDrain . pauseWhileSet ( ) ; <nl> } <nl> - <nl> + } <nl> + void IndexBuildsCoordinator : : _insertKeysFromSideTablesBlockingWrites ( <nl> + OperationContext * opCtx , std : : shared_ptr < ReplIndexBuildState > replState ) { <nl> + const NamespaceStringOrUUID dbAndUUID ( replState - > dbName , replState - > collectionUUID ) ; <nl> / / Perform the second drain while stopping writes on the collection . <nl> { <nl> opCtx - > recoveryUnit ( ) - > abandonSnapshot ( ) ; <nl> mmm a / src / mongo / db / index_builds_coordinator . h <nl> ppp b / src / mongo / db / index_builds_coordinator . h <nl> class IndexBuildsCoordinator { <nl> * / <nl> void _insertKeysFromSideTablesWithoutBlockingWrites ( <nl> OperationContext * opCtx , std : : shared_ptr < ReplIndexBuildState > replState ) ; <nl> + void _insertKeysFromSideTablesBlockingWrites ( OperationContext * opCtx , <nl> + std : : shared_ptr < ReplIndexBuildState > replState ) ; <nl> <nl> / * * <nl> * Reads the commit ready members list for index build UUID in ' replState ' from <nl> class IndexBuildsCoordinator { <nl> virtual void _signalPrimaryForCommitReadiness ( <nl> OperationContext * opCtx , std : : shared_ptr < ReplIndexBuildState > replState ) = 0 ; <nl> <nl> + / * * <nl> + * Drains the side - writes table periodically while waiting for the IndexBuildAction to be ready . <nl> + * / <nl> + virtual IndexBuildAction _drainSideWritesUntilNextActionIsAvailable ( <nl> + OperationContext * opCtx , std : : shared_ptr < ReplIndexBuildState > replState ) = 0 ; <nl> + <nl> / * * <nl> * Both primary and secondaries will wait on ' ReplIndexBuildState : : waitForNextAction ' future for <nl> * commit or abort index build signal . <nl> mmm a / src / mongo / db / index_builds_coordinator_mongod . cpp <nl> ppp b / src / mongo / db / index_builds_coordinator_mongod . cpp <nl> void IndexBuildsCoordinatorMongod : : _signalPrimaryForCommitReadiness ( <nl> return ; <nl> } <nl> <nl> + IndexBuildAction IndexBuildsCoordinatorMongod : : _drainSideWritesUntilNextActionIsAvailable ( <nl> + OperationContext * opCtx , std : : shared_ptr < ReplIndexBuildState > replState ) { <nl> + auto future = [ & ] { <nl> + stdx : : unique_lock < Latch > lk ( replState - > mutex ) ; <nl> + invariant ( replState - > waitForNextAction ) ; <nl> + return replState - > waitForNextAction - > getFuture ( ) ; <nl> + } ( ) ; <nl> + <nl> + / / Waits until the promise is fulfilled or the deadline expires . <nl> + IndexBuildAction nextAction ; <nl> + auto waitUntilNextActionIsReady = [ & ] ( ) { <nl> + / / Don ' t perform a blocking wait while holding locks or storage engine resources . <nl> + opCtx - > recoveryUnit ( ) - > abandonSnapshot ( ) ; <nl> + Lock : : TempRelease release ( opCtx - > lockState ( ) ) ; <nl> + <nl> + auto deadline = Date_t : : now ( ) + Milliseconds ( 1000 ) ; <nl> + auto timeoutError = opCtx - > getTimeoutError ( ) ; <nl> + <nl> + try { <nl> + nextAction = <nl> + opCtx - > runWithDeadline ( deadline , timeoutError , [ & ] { return future . get ( opCtx ) ; } ) ; <nl> + } catch ( const ExceptionForCat < ErrorCategory : : ExceededTimeLimitError > & e ) { <nl> + if ( e . code ( ) = = timeoutError ) { <nl> + return false ; <nl> + } <nl> + throw ; <nl> + } <nl> + return true ; <nl> + } ; <nl> + <nl> + / / Continuously drain incoming writes until the future is ready . This is an optimization that <nl> + / / allows the critical section of committing , which must drain the remainder of the side writes , <nl> + / / to be as short as possible . <nl> + while ( ! waitUntilNextActionIsReady ( ) ) { <nl> + _insertKeysFromSideTablesWithoutBlockingWrites ( opCtx , replState ) ; <nl> + } <nl> + return nextAction ; <nl> + } <nl> + <nl> Timestamp IndexBuildsCoordinatorMongod : : _waitForNextIndexBuildAction ( <nl> OperationContext * opCtx , std : : shared_ptr < ReplIndexBuildState > replState ) { <nl> Timestamp commitIndexBuildTimestamp ; <nl> <nl> - / / Yield locks and storage engine resources before blocking . <nl> - opCtx - > recoveryUnit ( ) - > abandonSnapshot ( ) ; <nl> - Lock : : TempRelease release ( opCtx - > lockState ( ) ) ; <nl> - <nl> LOGV2 ( 3856203 , <nl> " Index build waiting for next action before completing final phase : { buildUUID } " , <nl> " buildUUID " _attr = replState - > buildUUID ) ; <nl> <nl> while ( true ) { <nl> - / / Future wait should ignore state transition . <nl> - invariant ( ! opCtx - > lockState ( ) - > isRSTLLocked ( ) , <nl> - str : : stream ( ) <nl> - < < " failed to yield locks for index build while waiting for commit or abort : " <nl> - < < replState - > buildUUID ) ; <nl> - <nl> - / / future wait should get interrupted if the node shutdowns . <nl> - const auto nextAction = replState - > waitForNextAction - > getFuture ( ) . get ( opCtx ) ; <nl> + / / Future wait can be interrupted . This function will yield locks while waiting for the <nl> + / / future to be fulfilled . <nl> + const auto nextAction = _drainSideWritesUntilNextActionIsAvailable ( opCtx , replState ) ; <nl> LOGV2 ( 3856204 , <nl> " Index build received signal for build uuid : { buildUUID } , action : { action } " , <nl> " buildUUID " _attr = replState - > buildUUID , <nl> Timestamp IndexBuildsCoordinatorMongod : : _waitForNextIndexBuildAction ( <nl> <nl> bool needsToRetryWait = false ; <nl> <nl> - / / Reacquire RSTL lock <nl> + / / Ensure RSTL is acquired before checking replication state . This is only necessary for <nl> + / / single - phase builds on secondaries . Everywhere else , the RSTL is already held and this is <nl> + / / should never block . <nl> repl : : ReplicationStateTransitionLockGuard rstl ( opCtx , MODE_IX ) ; <nl> + <nl> const NamespaceStringOrUUID dbAndUUID ( replState - > dbName , replState - > collectionUUID ) ; <nl> auto replCoord = repl : : ReplicationCoordinator : : get ( opCtx ) ; <nl> auto isMaster = replCoord - > canAcceptWritesFor ( opCtx , dbAndUUID ) ; <nl> mmm a / src / mongo / db / index_builds_coordinator_mongod . h <nl> ppp b / src / mongo / db / index_builds_coordinator_mongod . h <nl> class IndexBuildsCoordinatorMongod : public IndexBuildsCoordinator { <nl> void _signalPrimaryForCommitReadiness ( OperationContext * opCtx , <nl> std : : shared_ptr < ReplIndexBuildState > replState ) override ; <nl> <nl> + IndexBuildAction _drainSideWritesUntilNextActionIsAvailable ( <nl> + OperationContext * opCtx , std : : shared_ptr < ReplIndexBuildState > replState ) override ; <nl> + <nl> Timestamp _waitForNextIndexBuildAction ( OperationContext * opCtx , <nl> std : : shared_ptr < ReplIndexBuildState > replState ) override ; <nl> <nl> mmm a / src / mongo / embedded / index_builds_coordinator_embedded . h <nl> ppp b / src / mongo / embedded / index_builds_coordinator_embedded . h <nl> class IndexBuildsCoordinatorEmbedded : public IndexBuildsCoordinator { <nl> void _signalPrimaryForCommitReadiness ( OperationContext * opCtx , <nl> std : : shared_ptr < ReplIndexBuildState > replState ) override ; <nl> <nl> + IndexBuildAction _drainSideWritesUntilNextActionIsAvailable ( <nl> + OperationContext * opCtx , std : : shared_ptr < ReplIndexBuildState > replState ) { <nl> + return { } ; <nl> + } ; <nl> + <nl> Timestamp _waitForNextIndexBuildAction ( OperationContext * opCtx , <nl> std : : shared_ptr < ReplIndexBuildState > replState ) override ; <nl> } ; <nl>
|
SERVER - 39458 Continuously drain side writes while waiting for next index build action
|
mongodb/mongo
|
ca49965d8d68cc853e466ba741df08bb248d46f6
|
2020-04-03T14:06:26Z
|
mmm a / docs / SIL . rst <nl> ppp b / docs / SIL . rst <nl> Basic Blocks <nl> <nl> sil - basic - block : : = sil - label sil - instruction - def * sil - terminator <nl> sil - label : : = sil - identifier ( ' ( ' sil - argument ( ' , ' sil - argument ) * ' ) ' ) ? ' : ' <nl> - sil - argument : : = sil - value - name ' : ' sil - type <nl> + sil - value - ownership - kind : : = @ owned <nl> + sil - value - ownership - kind : : = @ guaranteed <nl> + sil - value - ownership - kind : : = @ unowned <nl> + sil - argument : : = sil - value - name ' : ' sil - value - ownership - kind ? sil - type <nl> <nl> sil - instruction - result : : = sil - value - name <nl> sil - instruction - result : : = ' ( ' ( sil - value - name ( ' , ' sil - value - name ) * ) ? ' ) ' <nl> block : : <nl> Arguments to the entry point basic block , which has no predecessor , <nl> are bound by the function ' s caller : : <nl> <nl> - sil @ foo : $ ( Int ) - > Int { <nl> + sil @ foo : $ @ convention ( thin ) ( Int ) - > Int { <nl> bb0 ( % x : $ Int ) : <nl> return % x : $ Int <nl> } <nl> <nl> - sil @ bar : $ ( Int , Int ) - > ( ) { <nl> + sil @ bar : $ @ convention ( thin ) ( Int , Int ) - > ( ) { <nl> bb0 ( % x : $ Int , % y : $ Int ) : <nl> % foo = function_ref @ foo <nl> % 1 = apply % foo ( % x ) : $ ( Int ) - > Int <nl> are bound by the function ' s caller : : <nl> return % 3 : $ ( ) <nl> } <nl> <nl> + When a function is in Ownership SSA , arguments additionally have an explicit <nl> + annotated convention that describe the ownership semantics of the argument <nl> + value : : <nl> + <nl> + sil [ ossa ] @ baz : $ @ convention ( thin ) ( Int , @ owned String , @ guaranteed String , @ unowned String ) - > ( ) { <nl> + bb0 ( % x : $ Int , % y : @ owned $ String , % z : @ guaranteed $ String , % w : @ unowned $ String ) : <nl> + . . . <nl> + } <nl> + <nl> + Note that the first argument ( ` ` % x ` ` ) has an implicit ownership kind of <nl> + ` ` @ none ` ` since all trivial values have ` ` @ none ` ` ownership . <nl> <nl> Debug Information <nl> ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> of functions returning uninhabited types . An ` ` unreachable ` ` instruction that <nl> survives guaranteed DCE and is not immediately preceded by a no - return <nl> application is a dataflow error . <nl> <nl> + Ownership SSA <nl> + mmmmmmmmmmmm - <nl> + <nl> + A SILFunction marked with the ` ` [ ossa ] ` ` function attribute is considered to be <nl> + in Ownership SSA form . Ownership SSA is an augmented version of SSA that <nl> + enforces ownership invariants by imbuing value - operand edges with semantic <nl> + ownership information . All SIL values are statically assigned an ownership kind <nl> + that defines the ownership semantics that the value models . All SIL operands <nl> + that use a SIL value are required to be able to be semantically partitioned in <nl> + between " normal uses " that just require the value to be live and " consuming <nl> + uses " that end the lifetime of the value and after which the value can no longer <nl> + be used . Since operands that are consuming uses end a value ' s lifetime , <nl> + naturally we must have that the consuming use points jointly post - dominate all <nl> + non - consuming use points and that a value must be consumed exactly once along <nl> + all reachable program paths , preventing leaks and use - after - frees . As an <nl> + example , consider the following SIL example with partitioned defs / uses annotated <nl> + inline : : <nl> + <nl> + sil @ stash_and_cast : $ @ convention ( thin ) ( @ owned Klass ) - > @ owned SuperKlass { <nl> + bb0 ( % kls1 : @ owned $ Klass ) : / / Definition of % kls1 <nl> + <nl> + / / " Normal Use " kls1 . <nl> + / / Definition of % kls2 . <nl> + % kls2 = copy_value % kls1 : $ Klass <nl> + <nl> + / / " Consuming Use " of % kls2 to store it into a global . Stores in ossa are <nl> + / / consuming since memory is generally assumed to have " owned " <nl> + / / semantics . After this instruction executes , we can no longer use % kls2 <nl> + / / without triggering an ownership violation . <nl> + store % kls2 to [ init ] % globalMem : $ * Klass <nl> + <nl> + / / " Consuming Use " of % kls1 . <nl> + / / Definition of % kls1Casted . <nl> + % kls1Casted = upcast % kls1 : $ Klass to $ SuperKlass <nl> + <nl> + / / " Consuming Use " of % kls1Casted <nl> + return % kls1Casted : $ SuperKlass <nl> + } <nl> + <nl> + Notice how every value in the SIL above has a partionable set of uses with <nl> + normal uses always before consuming uses . Any such violations of ownership <nl> + semantics would trigger a static SILVerifier error allowing us to know that we <nl> + do not have any leaks or use - after - frees in the above code . <nl> + <nl> + The semantics in the previous example is of just one form of ownership semantics <nl> + supported : " owned " semantics . In SIL , we support four different ownership kinds : <nl> + <nl> + * * * None * * . This is used to represent values that do not require memory <nl> + management and are outside of Ownership SSA invariants . Examples : trivial <nl> + values ( e . x . : Int , Float ) , non - payloaded cases of non - trivial enums ( e . x . : <nl> + Optional < T > . none ) , all address types . <nl> + <nl> + * * * Owned * * . A value that exists independently of any other value and is <nl> + consumed exactly once along all paths through a function by either a <nl> + destroy_value ( actually destroying the value ) or by a consuming instruction <nl> + that rebinds the value in some manner ( e . x . : apply , casts , store ) . <nl> + <nl> + * * * Guaranteed * * . A value with a scoped lifetime whose liveness is dependent on <nl> + the lifetime of some other " base " owned or guaranteed value . Consumed by <nl> + end_borrow instructions . The " base " value is statically guaranteed to be live <nl> + at all of the value ' s paired end_borrow instructions . <nl> + <nl> + * * * Unowned * * . A value that is only guaranteed to be instantaneously valid and <nl> + must be copied before the value is used in an ` ` @ owned ` ` or ` ` @ guaranteed ` ` <nl> + context . This is needed both to model argument values with the ObjC unsafe <nl> + unowned argument convention and also to model the ownership resulting from <nl> + bitcasting a trivial type to a non - trivial type . This value should never be <nl> + consumed . <nl> + <nl> + We describe each of these semantics in more detail below . <nl> + <nl> + Value Ownership Kind <nl> + ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> + <nl> + Owned <nl> + ` ` ` ` ` <nl> + <nl> + Owned ownership models " move only " values . We require that each such value is <nl> + consumed exactly once along all program paths . The IR verifier will flag values <nl> + that are not consumed along a path as a leak and any double consumes as <nl> + use - after - frees . We model move operations via " forwarding uses " such as casts <nl> + and transforming terminators ( e . x . : ` switch_enum ` _ , ` checked_cast_br ` _ ) that <nl> + transform the input value , consuming it in the process , and producing a new <nl> + transformed owned value as a result . <nl> + <nl> + Putting this all together , one can view each owned SIL value as being <nl> + effectively a " move only value " except when explicitly copied by a <nl> + copy_value . This of course implies that ARC operations can be assumed to only <nl> + semantically effect the specific value that they are applied to / and / that each <nl> + ARC constraint is able to be verified independently for each owned SILValue <nl> + derived from the ARC object . As an example , consider the following Swift / SIL : : <nl> + <nl> + / / testcase . swift . <nl> + func doSomething ( x : Klass ) - > OtherKlass ? { <nl> + return x as ? OtherKlass <nl> + } <nl> + <nl> + / / testcase . sil . A possible SILGen lowering <nl> + sil [ ossa ] @ doSomething : $ @ convention ( thin ) ( @ guaranteed Klass ) - > ( ) { <nl> + bb0 ( % 0 : @ guaranteed Klass ) : <nl> + / / Definition of ' % 1 ' <nl> + % 1 = copy_value % 0 : $ Klass <nl> + <nl> + / / Consume ' % 1 ' . This means ' % 1 ' can no longer be used after this point . We <nl> + / / rebind ' % 1 ' in the destination blocks ( bbYes , bbNo ) . <nl> + checked_cast_br % 1 : $ Klass to $ OtherKlass , bbYes , bbNo <nl> + <nl> + bbYes ( % 2 : @ owned $ OtherKlass ) : / / On success , the checked_cast_br forwards <nl> + / / ' % 1 ' into ' % 2 ' after casting to OtherKlass . <nl> + <nl> + / / Forward ' % 2 ' into ' % 3 ' . ' % 2 ' can not be used past this point in the <nl> + / / function . <nl> + % 3 = enum $ Optional < OtherKlass > , case # Optional . some ! enumelt , % 2 : $ OtherKlass <nl> + <nl> + / / Forward ' % 3 ' into the branch . ' % 3 ' can not be used past this point . <nl> + br bbEpilog ( % 3 : $ Optional < OtherKlass > ) <nl> + <nl> + bbNo ( % 3 : @ owned $ Klass ) : / / On failure , since we consumed ' % 1 ' already , we <nl> + / / return the original ' % 1 ' as a new value ' % 3 ' <nl> + / / so we can use it below . <nl> + / / Actually destroy the underlying copy ( ` ` % 1 ` ` ) created by the copy_value <nl> + / / in bb0 . <nl> + destroy_value % 3 : $ Klass <nl> + <nl> + / / We want to return nil here . So we create a new non - payloaded enum and <nl> + / / pass it off to bbEpilog . <nl> + % 4 = enum $ Optional < OtherKlass > , case # Optional . none ! enumelt <nl> + br bbEpilog ( % 4 : $ Optional < OtherKlass > ) <nl> + <nl> + bbEpilog ( % 5 : @ owned $ Optional < OtherKlass > ) : <nl> + / / Consumes ' % 5 ' to return to caller . <nl> + return % 5 : $ Optional < OtherKlass > <nl> + } <nl> + <nl> + Notice how our individual copy ( ` ` % 1 ` ` ) threads its way through the IR using <nl> + forwarding of ` ` @ owned ` ` ownership . These forwarding operations partition the <nl> + lifetime of the result of the copy_value into a set of disjoint individual owned <nl> + lifetimes ( ` ` % 2 ` ` , ` ` % 3 ` ` , ` ` % 5 ` ` ) . <nl> + <nl> + Guaranteed <nl> + ` ` ` ` ` ` ` ` ` ` <nl> + <nl> + Guaranteed ownership models values that have a scoped dependent lifetime on a <nl> + " base value " with owned or guaranteed ownership . Due to this lifetime <nl> + dependence , the base value is required to be statically live over the entire <nl> + scope where the guaranteed value is valid . <nl> + <nl> + These explicit scopes are introduced into SIL by begin scope instructions ( e . x . : <nl> + ` begin_borrow ` _ , ` load_borrow ` _ ) that are paired with sets of jointly <nl> + post - dominating scope ending instructions ( e . x . : ` end_borrow ` _ ) : : <nl> + <nl> + sil [ ossa ] @ guaranteed_values : $ @ convention ( thin ) ( @ owned Klass ) - > ( ) { <nl> + bb0 ( % 0 : @ owned $ Klass ) : <nl> + % 1 = begin_borrow % 0 : $ Klass <nl> + cond_br . . . , bb1 , bb2 <nl> + <nl> + bb1 : <nl> + . . . <nl> + end_borrow % 1 : $ Klass <nl> + destroy_value % 0 : $ Klass <nl> + br bb3 <nl> + <nl> + bb2 : <nl> + . . . <nl> + end_borrow % 1 : $ Klass <nl> + destroy_value % 0 : $ Klass <nl> + br bb3 <nl> + <nl> + bb3 : <nl> + . . . <nl> + } <nl> + <nl> + Notice how the ` end_borrow ` _ allow for a SIL generator to communicate to <nl> + optimizations that they can never shrink the lifetime of ` ` % 0 ` ` by moving <nl> + ` destroy_value ` _ above ` ` % 1 ` ` . <nl> + <nl> + Values with guaranteed ownership follow a dataflow rule that states that <nl> + non - consuming " forwarding " uses of the guaranteed value are also guaranteed and <nl> + are recursively validated as being in the original values scope . This was a <nl> + choice we made to reduce idempotent scopes in the IR : : <nl> + <nl> + sil [ ossa ] @ get_first_elt : $ @ convention ( thin ) ( @ guaranteed ( String , String ) ) - > @ owned String { <nl> + bb0 ( % 0 : @ guaranteed $ ( String , String ) ) : <nl> + / / % 1 is validated as if it was apart of % 0 and does not need its own begin_borrow / end_borrow . <nl> + % 1 = tuple_extract % 0 : $ ( String , String ) <nl> + / / So this copy_value is treated as a use of % 0 . <nl> + % 2 = copy_value % 1 : $ String <nl> + return % 2 : $ String <nl> + } <nl> + <nl> + None <nl> + ` ` ` ` <nl> + <nl> + Values with None ownership are inert values that exist outside of the guarantees <nl> + of Ownership SSA . Some examples of such values are : <nl> + <nl> + * Trivially typed values such as : Int , Float , Double <nl> + * Non - payloaded non - trivial enums . <nl> + * Address types . <nl> + <nl> + Since values with none ownership exist outside of ownership SSA , they can be <nl> + used like normal SSA without violating ownership SSA invariants . This does not <nl> + mean that code does not potentially violate other SIL rules ( consider memory <nl> + lifetime invariants ) : : <nl> + <nl> + sil @ none_values : $ @ convention ( thin ) ( Int , @ in Klass ) - > Int { <nl> + bb0 ( % 0 : $ Int , % 1 : $ * Klass ) : <nl> + <nl> + / / % 0 , % 1 are normal SSA values that can be used anywhere in the function <nl> + / / without breaking Ownership SSA invariants . It could violate other <nl> + / / invariants if for instance , we load from % 1 after we destroy the object <nl> + / / there . <nl> + destroy_addr % 1 : $ * Klass <nl> + <nl> + / / If uncommented , this would violate memory lifetime invariants due to <nl> + / / the ` ` destroy_addr % 1 ` ` above . But this would not violate the rules of <nl> + / / Ownership SSA since addresses exist outside of the guarantees of <nl> + / / Ownership SSA . <nl> + / / <nl> + / / % 2 = load [ take ] % 1 : $ * Klass <nl> + <nl> + / / I can return this object without worrying about needing to copy since <nl> + / / none objects can be arbitrarily returned . <nl> + return % 0 : $ Int <nl> + } <nl> + <nl> + Unowned <nl> + ` ` ` ` ` ` ` <nl> + <nl> + This is a form of ownership that is used to model two different use cases : <nl> + <nl> + * Arguments of functions with ObjC convention . This convention requires the <nl> + callee to copy the value before using it ( preferably before any other code <nl> + runs ) . We do not model this flow sensitive property in SIL today , but we do <nl> + not allow for unowned values to be passed as owned or guaranteed values <nl> + without copying it first . <nl> + <nl> + * Values that are a conversion from a trivial value with None ownership to a <nl> + non - trivial value . As an example of this consider an unsafe bit cast of a <nl> + trivial pointer to a class . In that case , since we have no reason to assume <nl> + that the object will remain alive , we need to make a copy of the value . <nl> + <nl> Runtime Failure <nl> mmmmmmmmmmmmmmm <nl> <nl>
|
Merge pull request from gottesmm / pr - a7cb4e56fe39ba260ca4bb9090ec74f9b3537eaa
|
apple/swift
|
6687fca84d28ec8ba278e8791ccdbcb6e3b986db
|
2020-07-16T06:51:38Z
|
new file mode 100644 <nl> index 00000000000 . . 7db60bcce91 <nl> mmm / dev / null <nl> ppp b / docs / maintainers / pr - review - checklist . md <nl> <nl> + Vcpkg PR Checklist <nl> + = = = = = = = = = = = = = = = = = = = = = <nl> + Revision : 0 <nl> + <nl> + # # Overview <nl> + This document provides an annotated checklist which vcpkg team members use to apply the " reviewed " label on incoming pull requests . If a pull request violates any of these points , we may ask contributors to make necessary changes before we can merge the changeset . <nl> + <nl> + Feel free to create an issue or pull request if you feel that this checklist can be improved . Please increment the revision number when modifying the checklist content . <nl> + <nl> + # # Checklist <nl> + You can link any of these checklist items in a GitHub comment by copying the link address attached to each item code . <nl> + <nl> + < details id = c000001 > <nl> + < summary > < a href = # c000001 > c000001 < / a > : No deprecated helper functions are used < / summary > <nl> + <nl> + See our [ Maintainer Guidelines and Policies ] ( maintainer - guide . md # Avoid - deprecated - helper - functions ) for more information . <nl> + <nl> + < / details > <nl> + <nl> + < details id = c000002 > <nl> + < summary > < a href = # c000002 > c000002 < / a > : Control Version field is updated < / summary > <nl> + <nl> + See our [ Maintainer Guidelines and Policies ] ( maintainer - guide . md # versioning ) for more information . <nl> + <nl> + < / details > <nl> + <nl> + < details id = c000003 > <nl> + < summary > < a href = # c000003 > c000003 < / a > : New ports contain a Description field written in English < / summary > <nl> + <nl> + A description only one or a few sentences long is helpful . Consider using the library ' s official description from their ` README . md ` or similar if possible . Automatic translations are acceptable and we are happy to clean up translations to English for our contributors . <nl> + <nl> + See our [ CONTROL file documentation ] ( https : / / github . com / grdowns / vcpkg / blob / pr - checklist / docs / maintainers / control - files . md # description ) for more information . <nl> + <nl> + < / details > <nl> + <nl> + < details id = c000004 > <nl> + < summary > < a href = # c000004 > c000004 < / a > : No unnecessary comments are present in the changeset < / summary > <nl> + <nl> + See our [ Maintainer Guidelines and Policies ] ( maintainer - guide . md # Avoid - excessive - comments - in - portfiles ) for more information . <nl> + <nl> + < / details > <nl> + <nl> + < details id = c000005 > <nl> + < summary > < a href = # c000005 > c000005 < / a > : Downloaded archives are versioned if available < / summary <nl> + <nl> + To ensure archive content does not change , archives downloaded preferably have an associated version tag that can be incremented alongside the port version . <nl> + <nl> + < / details > <nl> + <nl> + < details id = c000006 > <nl> + < summary > < a href = # c000006 > c000006 < / a > : New ports pass CI checks for triplets that the library officially supports <nl> + <nl> + To ensure vcpkg ports are of a high quality , we ask that incoming ports support the official platforms for the library in question . <nl> + <nl> + < / details > <nl> + <nl> + < details id = c000007 > <nl> + < summary > < a href = # c000007 > c000007 < / a > : Patches fix issues that are vcpkg - specific only < / summary > <nl> + <nl> + If possible , patches to the library source code should be upstreamed to the library ' s official repository . Opening up a pull request on the library ' s repository will help to improve the library for everyone , not just vcpkg users . <nl> + <nl> + < / details > <nl> + <nl> + < details id = c000008 > <nl> + < summary > < a href = # c000008 > c000008 < / a > : New ports download source code from the official source if available < / summary > <nl> + <nl> + To respect library authors and keep code secure , please have ports download source code from the official source . We may make exceptions if the original source code is not available and there is substantial community interest in maintaining the library in question . <nl> + <nl> + < / details > <nl> + <nl> + < details id = c000009 > <nl> + < summary > < a href = # c000010 > c000010 < / a > : Ports and port features are named correctly < / summary > <nl> + <nl> + For user accessibility , we prefer names of ports and port features to be intuitive and close to their counterparts in official sources and other package managers . If you are unsure about the naming of a port or port feature , we recommend checking repology . org , packages . ubuntu . com , or searching for additional information using a search engine . We can also help our contributors with this , so feel free to ask for naming suggestions if you are unsure . <nl> + <nl> + < / details > <nl> + <nl> + < details id = c000010 > <nl> + < summary > < a href = # c000011 > c000011 < / a > : Library targets are exported when appropriate < / summary > <nl> + <nl> + To provide users with a seamless build system integration , please be sure to export and provide a means of finding the library targets intended to be used downstream . Targets not meant to be exported should be be marked private and not exported . <nl> + <nl> + < / details > <nl> + <nl> + < details id = c000011 > <nl> + < summary > < a href = # c000012 > c000012 < / a > : Ports do not use applications which modify the user ' s system < / summary > <nl> + <nl> + Ports should uphold vcpkg ' s contract of not modifying the user ' s system by avoiding applications which do so . Examples of these applications are ` sudo ` , ` apt ` , ` brew ` , or ` pip ` . Please use an alternative to these types of programs wherever possible . <nl> + <nl> + < / details > <nl> + <nl> + < details id = c000012 > <nl> + < summary > < a href = # c000013 > c000013 < / a > : Ports with system dependencies include an information message during installation < / summary > <nl> + <nl> + Some ports have library and tool dependencies that do not exist within vcpkg . For these missing dependencies , we ask that contributors add a message to the top of the port ' s ` portfile . cmake ` stating the missing dependencies and how to acquire them . We ask that the message is displayed before any major work is done to ensure that users can " early out " of the installation process as soon as possible in case they are missing the dependency . <nl> + <nl> + Example : <nl> + ` ` ` cmake <nl> + message ( <nl> + " $ { PORT } currently requires the following libraries from the system package manager : <nl> + autoconf libtool <nl> + These can be installed on Ubuntu systems via sudo apt install autoconf libtool " <nl> + ) <nl> + ` ` ` <nl> + <nl> + < / details > <nl>
|
[ vcpkg ] Initialize PR review checklist ( )
|
microsoft/vcpkg
|
beacecf48de4ad023be4f7b0e75d979d00f8b16c
|
2019-12-10T01:32:42Z
|
mmm a / torch / functional . py <nl> ppp b / torch / functional . py <nl> def tensordot ( a , b , dims = 2 ) : <nl> Args : <nl> a ( Tensor ) : Left tensor to contract <nl> b ( Tensor ) : Right tensor to contract <nl> - dims ( int or tuple of two lists of integers ) : number of dimensions to <nl> + dims ( int or Tuple [ List [ int ] ] containing two lists ) : number of dimensions to <nl> contract or explicit lists of dimensions for : attr : ` a ` and <nl> : attr : ` b ` respectively <nl> <nl> def tensordot ( a , b , dims = 2 ) : <nl> [ 3 . 3161 , 0 . 0704 , 5 . 0187 , - 0 . 4079 , - 4 . 3126 , 4 . 8744 ] , <nl> [ 0 . 8223 , 3 . 9445 , 3 . 2168 , - 0 . 2400 , 3 . 4117 , 1 . 7780 ] ] ) <nl> <nl> + > > > a = torch . randn ( 3 , 5 , 4 , 6 ) <nl> + > > > b = torch . randn ( 6 , 4 , 5 , 3 ) <nl> + > > > torch . tensordot ( a , b , dims = ( [ 2 , 1 , 3 ] , [ 1 , 2 , 0 ] ) ) <nl> + tensor ( [ [ 7 . 7193 , - 2 . 4867 , - 10 . 3204 ] , <nl> + [ 1 . 5513 , - 14 . 4737 , - 6 . 5113 ] , <nl> + [ - 0 . 2850 , 4 . 2573 , - 3 . 5997 ] ] ) <nl> " " " <nl> if not torch . jit . is_scripting ( ) : <nl> if ( type ( a ) is not Tensor or type ( b ) is not Tensor ) and has_torch_function ( ( a , b ) ) : <nl>
|
[ docs ] Add 3D reduction example to tensordot docs ( )
|
pytorch/pytorch
|
78f055272ce9d46f5aa84a4c7b626625495c9d80
|
2020-10-05T22:36:59Z
|
mmm a / db / dbcommands . cpp <nl> ppp b / db / dbcommands . cpp <nl> namespace mongo { <nl> } <nl> <nl> list < BSONObj > all ; <nl> - auto_ptr < DBClientCursor > i = db . getIndexes ( toDeleteNs ) ; <nl> + auto_ptr < DBClientCursor > i = db . query ( dbname + " . system . indexes " , BSON ( " ns " < < toDeleteNs ) , 0 , 0 , 0 , QueryOption_SlaveOk ) ; <nl> BSONObjBuilder b ; <nl> while ( i - > more ( ) ) { <nl> BSONObj o = i - > next ( ) . removeField ( " v " ) . getOwned ( ) ; <nl> namespace mongo { <nl> <nl> for ( list < BSONObj > : : iterator i = all . begin ( ) ; i ! = all . end ( ) ; i + + ) { <nl> BSONObj o = * i ; <nl> + log ( 1 ) < < " reIndex ns : " < < toDeleteNs < < " index : " < < o < < endl ; <nl> theDataFileMgr . insertWithObjMod ( Namespace ( toDeleteNs . c_str ( ) ) . getSisterNS ( " system . indexes " ) . c_str ( ) , o , true ) ; <nl> } <nl> <nl> mmm a / shell / mongo_vstudio . cpp <nl> ppp b / shell / mongo_vstudio . cpp <nl> const StringData _jscode_raw_utils_sh = <nl> " return res ; \ n " <nl> " } \ n " <nl> " \ n " <nl> + " \ n " <nl> + " sh . _dataFormat = function ( bytes ) { \ n " <nl> + " if ( bytes < 1024 ) return Math . floor ( bytes ) + \ " b \ " \ n " <nl> + " if ( bytes < 1024 * 1024 ) return Math . floor ( bytes / 1024 ) + \ " kb \ " \ n " <nl> + " if ( bytes < 1024 * 1024 * 1024 ) return Math . floor ( ( Math . floor ( bytes / 1024 ) / 1024 ) * 100 ) / 100 + \ " Mb \ " \ n " <nl> + " return Math . floor ( ( Math . floor ( bytes / ( 1024 * 1024 ) ) / 1024 ) * 100 ) / 100 + \ " Gb \ " \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " sh . _collRE = function ( coll ) { \ n " <nl> + " return RegExp ( \ " ^ \ " + ( coll + \ " \ " ) . replace ( / \ \ . / g , \ " \ \ \ \ . \ " ) + \ " - . * \ " ) \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " sh . _pchunk = function ( chunk ) { \ n " <nl> + " return \ " [ \ " + tojson ( chunk . min ) + \ " - > \ " + tojson ( chunk . max ) + \ " ] \ " \ n " <nl> + " } \ n " <nl> + " \ n " <nl> " sh . help = function ( ) { \ n " <nl> " print ( \ " \ \ tsh . addShard ( host ) server : port OR setname / server : port \ " ) \ n " <nl> " print ( \ " \ \ tsh . enableSharding ( dbname ) enables sharding on the database dbname \ " ) \ n " <nl> const StringData _jscode_raw_collection = <nl> " print ( \ " \ \ tdb . \ " + shortName + \ " . update ( query , object [ , upsert_bool , multi_bool ] ) \ " ) ; \ n " <nl> " print ( \ " \ \ tdb . \ " + shortName + \ " . validate ( < full > ) - SLOW \ " ) ; ; \ n " <nl> " print ( \ " \ \ tdb . \ " + shortName + \ " . getShardVersion ( ) - only for use with sharding \ " ) ; \ n " <nl> + " print ( \ " \ \ tdb . \ " + shortName + \ " . getShardDistribution ( ) - prints statistics about data distribution in the cluster \ " ) ; \ n " <nl> + " print ( \ " \ \ tdb . \ " + shortName + \ " . getSplitKeysForChunks ( < maxChunkSize > ) - calculates split points over all chunks and returns splitter function \ " ) ; \ n " <nl> " return __magicNoPrint ; \ n " <nl> " } \ n " <nl> " \ n " <nl> const StringData _jscode_raw_collection = <nl> " } \ n " <nl> " return ret ; \ n " <nl> " } \ n " <nl> + " \ n " <nl> + " \ n " <nl> + " / / Sharding additions \ n " <nl> + " \ n " <nl> + " / * \ n " <nl> + " Usage : \ n " <nl> + " \ n " <nl> + " mongo < mongos > \ n " <nl> + " > load ( ' path - to - file / shardingAdditions . js ' ) \ n " <nl> + " Loading custom sharding extensions . . . \ n " <nl> + " true \ n " <nl> + " \ n " <nl> + " > var collection = db . getMongo ( ) . getCollection ( \ " foo . bar \ " ) \ n " <nl> + " > collection . getShardDistribution ( ) / / prints statistics related to the collection ' s data distribution \ n " <nl> + " \ n " <nl> + " > collection . getSplitKeysForChunks ( ) / / generates split points for all chunks in the collection , based on the \ n " <nl> + " / / default maxChunkSize or alternately a specified chunk size \ n " <nl> + " > collection . getSplitKeysForChunks ( 10 ) / / Mb \ n " <nl> + " \ n " <nl> + " > var splitter = collection . getSplitKeysForChunks ( ) / / by default , the chunks are not split , the keys are just \ n " <nl> + " / / found . A splitter function is returned which will actually \ n " <nl> + " / / do the splits . \ n " <nl> + " \ n " <nl> + " > splitter ( ) / / ! Actually executes the splits on the cluster ! \ n " <nl> + " \ n " <nl> + " * / \ n " <nl> + " \ n " <nl> + " DBCollection . prototype . getShardDistribution = function ( ) { \ n " <nl> + " \ n " <nl> + " var stats = this . stats ( ) \ n " <nl> + " \ n " <nl> + " if ( ! stats . sharded ) { \ n " <nl> + " print ( \ " Collection \ " + this + \ " is not sharded . \ " ) \ n " <nl> + " return \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " var config = this . getMongo ( ) . getDB ( \ " config \ " ) \ n " <nl> + " \ n " <nl> + " var numChunks = 0 \ n " <nl> + " \ n " <nl> + " for ( var shard in stats . shards ) { \ n " <nl> + " \ n " <nl> + " var shardDoc = config . shards . findOne ( { _id : shard } ) \ n " <nl> + " \ n " <nl> + " print ( \ " \ \ nShard \ " + shard + \ " at \ " + shardDoc . host ) \ n " <nl> + " \ n " <nl> + " var shardStats = stats . shards [ shard ] \ n " <nl> + " \ n " <nl> + " var chunks = config . chunks . find ( { _id : sh . _collRE ( coll ) , shard : shard } ) . toArray ( ) \ n " <nl> + " \ n " <nl> + " numChunks + = chunks . length \ n " <nl> + " \ n " <nl> + " var estChunkData = shardStats . size / chunks . length \ n " <nl> + " var estChunkCount = Math . floor ( shardStats . count / chunks . length ) \ n " <nl> + " \ n " <nl> + " print ( \ " data : \ " + sh . _dataFormat ( shardStats . size ) + \ n " <nl> + " \ " docs : \ " + shardStats . count + \ n " <nl> + " \ " chunks : \ " + chunks . length ) \ n " <nl> + " print ( \ " estimated data per chunk : \ " + sh . _dataFormat ( estChunkData ) ) \ n " <nl> + " print ( \ " estimated docs per chunk : \ " + estChunkCount ) \ n " <nl> + " \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " print ( \ " \ \ nTotals \ " ) \ n " <nl> + " print ( \ " data : \ " + sh . _dataFormat ( stats . size ) + \ n " <nl> + " \ " docs : \ " + stats . count + \ n " <nl> + " \ " chunks : \ " + numChunks ) \ n " <nl> + " for ( var shard in stats . shards ) { \ n " <nl> + " \ n " <nl> + " var shardStats = stats . shards [ shard ] \ n " <nl> + " \ n " <nl> + " var estDataPercent = Math . floor ( shardStats . size / stats . size * 100 ) / 100 \ n " <nl> + " var estDocPercent = Math . floor ( shardStats . count / stats . count * 100 ) / 100 \ n " <nl> + " \ n " <nl> + " print ( \ " Shard \ " + shard + \ " data : \ " + estDataPercent + \ " % , docs : \ " + estDocPercent + \ " % \ " + \ n " <nl> + " \ " , avg obj size : \ " + sh . _dataFormat ( stats . shards [ shard ] . avgObjSize ) ) \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " print ( \ " \ \ n \ " ) \ n " <nl> + " \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " \ n " <nl> + " DBCollection . prototype . getSplitKeysForChunks = function ( chunkSize ) { \ n " <nl> + " \ n " <nl> + " var stats = this . stats ( ) \ n " <nl> + " \ n " <nl> + " if ( ! stats . sharded ) { \ n " <nl> + " print ( \ " Collection \ " + this + \ " is not sharded . \ " ) \ n " <nl> + " return \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " var config = this . getMongo ( ) . getDB ( \ " config \ " ) \ n " <nl> + " \ n " <nl> + " if ( ! chunkSize ) { \ n " <nl> + " chunkSize = config . settings . findOne ( { _id : \ " chunksize \ " } ) . value \ n " <nl> + " print ( \ " Chunk size not set , using default of \ " + chunkSize + \ " Mb \ " ) \ n " <nl> + " } \ n " <nl> + " else { \ n " <nl> + " print ( \ " Using chunk size of \ " + chunkSize + \ " Mb \ " ) \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " var shardDocs = config . shards . find ( ) . toArray ( ) \ n " <nl> + " \ n " <nl> + " var allSplitPoints = { } \ n " <nl> + " var numSplits = 0 \ n " <nl> + " \ n " <nl> + " for ( var i = 0 ; i < shardDocs . length ; i + + ) { \ n " <nl> + " \ n " <nl> + " var shardDoc = shardDocs [ i ] \ n " <nl> + " var shard = shardDoc . _id \ n " <nl> + " var host = shardDoc . host \ n " <nl> + " var sconn = new Mongo ( host ) \ n " <nl> + " \ n " <nl> + " var chunks = config . chunks . find ( { _id : sh . _collRE ( this ) , shard : shard } ) . toArray ( ) \ n " <nl> + " \ n " <nl> + " print ( \ " \ \ nGetting split points for chunks on shard \ " + shard + \ " at \ " + host ) \ n " <nl> + " \ n " <nl> + " var splitPoints = [ ] \ n " <nl> + " \ n " <nl> + " for ( var j = 0 ; j < chunks . length ; j + + ) { \ n " <nl> + " var chunk = chunks [ j ] \ n " <nl> + " var result = sconn . getDB ( \ " admin \ " ) . runCommand ( { splitVector : this + \ " \ " , min : chunk . min , max : chunk . max , maxChunkSize : chunkSize } ) \ n " <nl> + " if ( ! result . ok ) { \ n " <nl> + " print ( \ " Had trouble getting split keys for chunk \ " + sh . _pchunk ( chunk ) + \ " : \ \ n \ " ) \ n " <nl> + " printjson ( result ) \ n " <nl> + " } \ n " <nl> + " else { \ n " <nl> + " splitPoints = splitPoints . concat ( result . splitKeys ) \ n " <nl> + " \ n " <nl> + " if ( result . splitKeys . length > 0 ) \ n " <nl> + " print ( \ " Added \ " + result . splitKeys . length + \ " split points for chunk \ " + sh . _pchunk ( chunk ) ) \ n " <nl> + " } \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " print ( \ " Total splits for shard \ " + shard + \ " : \ " + splitPoints . length ) \ n " <nl> + " \ n " <nl> + " numSplits + = splitPoints . length \ n " <nl> + " allSplitPoints [ shard ] = splitPoints \ n " <nl> + " \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " / / Get most recent migration \ n " <nl> + " var migration = config . changelog . find ( { what : / ^ move . * / } ) . sort ( { time : - 1 } ) . limit ( 1 ) . toArray ( ) \ n " <nl> + " if ( migration . length = = 0 ) \ n " <nl> + " print ( \ " \ \ nNo migrations found in changelog . \ " ) \ n " <nl> + " else { \ n " <nl> + " migration = migration [ 0 ] \ n " <nl> + " print ( \ " \ \ nMost recent migration activity was on \ " + migration . ns + \ " at \ " + migration . time ) \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " var admin = this . getMongo ( ) . getDB ( \ " admin \ " ) \ n " <nl> + " var coll = this \ n " <nl> + " var splitFunction = function ( ) { \ n " <nl> + " \ n " <nl> + " / / Turn off the balancer , just to be safe \ n " <nl> + " print ( \ " Turning off balancer . . . \ " ) \ n " <nl> + " config . settings . update ( { _id : \ " balancer \ " } , { $ set : { stopped : true } } , true ) \ n " <nl> + " print ( \ " Sleeping for 30s to allow balancers to detect change . To be extra safe , check config . changelog \ " + \ n " <nl> + " \ " for recent migrations . \ " ) \ n " <nl> + " sleep ( 30000 ) \ n " <nl> + " \ n " <nl> + " for ( shard in allSplitPoints ) { \ n " <nl> + " for ( var i = 0 ; i < allSplitPoints [ shard ] . length ; i + + ) { \ n " <nl> + " var splitKey = allSplitPoints [ shard ] [ i ] \ n " <nl> + " print ( \ " Splitting at \ " + tojson ( splitKey ) ) \ n " <nl> + " printjson ( admin . runCommand ( { split : coll + \ " \ " , middle : splitKey } ) ) \ n " <nl> + " } \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " print ( \ " Turning the balancer back on . \ " ) \ n " <nl> + " config . settings . update ( { _id : \ " balancer \ " } , { $ set : { stopped : false } } ) \ n " <nl> + " sleep ( 1 ) \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " print ( \ " \ \ nGenerated \ " + numSplits + \ " split keys , run output function to perform splits . \ \ n \ " + \ n " <nl> + " \ " ex : \ \ n \ " + \ n " <nl> + " \ " > var splitter = < collection > . getSplitKeysForChunks ( ) \ \ n \ " + \ n " <nl> + " \ " > splitter ( ) / / Execute splits on cluster ! \ \ n \ " ) \ n " <nl> + " \ n " <nl> + " return splitFunction \ n " <nl> + " \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " \ n " <nl> + " \ n " <nl> + " \ n " <nl> ; <nl> extern const JSFile collection ; <nl> const JSFile collection = { " shell / collection . js " , _jscode_raw_collection } ; <nl>
|
fix reIndex on secondaries SERVER - 3866
|
mongodb/mongo
|
b88b810d28fc0060e3405702dcb492dfe9355af2
|
2011-09-14T21:57:47Z
|
mmm a / . gitignore <nl> ppp b / . gitignore <nl> ipch / <nl> / Mac / <nl> / Telegram / * . xcodeproj / xcuserdata / <nl> / Telegram / * . xcodeproj / project . xcworkspace / <nl> + <nl> + / Telegram / * . user . * <nl> + / Linux / <nl> + / Telegram / Makefile <nl> mmm a / Telegram / MetaEmoji . pro <nl> ppp b / Telegram / MetaEmoji . pro <nl> QT + = core <nl> <nl> CONFIG ( debug , debug | release ) { <nl> DEFINES + = _DEBUG <nl> - OBJECTS_DIR = . / . . / Mac / DebugIntermediateEmoji <nl> + OBJECTS_DIR = . / . . / Linux / DebugIntermediateEmoji <nl> MOC_DIR = . / GeneratedFiles / Debug <nl> - DESTDIR = . / . . / Mac / DebugEmoji <nl> + DESTDIR = . / . . / Linux / DebugEmoji <nl> } <nl> CONFIG ( release , debug | release ) { <nl> - OBJECTS_DIR = . / . . / Mac / ReleaseIntermediateEmoji <nl> + OBJECTS_DIR = . / . . / Linux / ReleaseIntermediateEmoji <nl> MOC_DIR = . / GeneratedFiles / Release <nl> - DESTDIR = . / . . / Mac / ReleaseEmoji <nl> + DESTDIR = . / . . / Linux / ReleaseEmoji <nl> } <nl> <nl> macx { <nl> mmm a / Telegram / MetaLang . pro <nl> ppp b / Telegram / MetaLang . pro <nl> T + = core <nl> <nl> CONFIG ( debug , debug | release ) { <nl> DEFINES + = _DEBUG <nl> - OBJECTS_DIR = . / . . / Mac / DebugIntermediateLang <nl> + OBJECTS_DIR = . / . . / Linux / DebugIntermediateLang <nl> MOC_DIR = . / GeneratedFiles / Debug <nl> - DESTDIR = . / . . / Mac / DebugLang <nl> + DESTDIR = . / . . / Linux / DebugLang <nl> } <nl> CONFIG ( release , debug | release ) { <nl> - OBJECTS_DIR = . / . . / Mac / ReleaseIntermediateLang <nl> + OBJECTS_DIR = . / . . / Linux / ReleaseIntermediateLang <nl> MOC_DIR = . / GeneratedFiles / Release <nl> - DESTDIR = . / . . / Mac / ReleaseLang <nl> + DESTDIR = . / . . / Linux / ReleaseLang <nl> } <nl> <nl> macx { <nl> mmm a / Telegram / MetaStyle . pro <nl> ppp b / Telegram / MetaStyle . pro <nl> QT + = core <nl> <nl> CONFIG ( debug , debug | release ) { <nl> DEFINES + = _DEBUG <nl> - OBJECTS_DIR = . / . . / Mac / DebugIntermediateStyle <nl> + OBJECTS_DIR = . / . . / Linux / DebugIntermediateStyle <nl> MOC_DIR = . / GeneratedFiles / Debug <nl> - DESTDIR = . / . . / Mac / DebugStyle <nl> + DESTDIR = . / . . / Linux / DebugStyle <nl> } <nl> CONFIG ( release , debug | release ) { <nl> - OBJECTS_DIR = . / . . / Mac / ReleaseIntermediateStyle <nl> + OBJECTS_DIR = . / . . / Linux / ReleaseIntermediateStyle <nl> MOC_DIR = . / GeneratedFiles / Release <nl> - DESTDIR = . / . . / Mac / ReleaseStyle <nl> + DESTDIR = . / . . / Linux / ReleaseStyle <nl> } <nl> <nl> macx { <nl> mmm a / Telegram / Packer . pro <nl> ppp b / Telegram / Packer . pro <nl> QT + = core <nl> <nl> CONFIG ( debug , debug | release ) { <nl> DEFINES + = _DEBUG <nl> - OBJECTS_DIR = . / . . / Mac / DebugIntermediatePacker <nl> + OBJECTS_DIR = . / . . / Linux / DebugIntermediatePacker <nl> MOC_DIR = . / GeneratedFiles / Debug <nl> - DESTDIR = . / . . / Mac / DebugPacker <nl> + DESTDIR = . / . . / Linux / DebugPacker <nl> } <nl> CONFIG ( release , debug | release ) { <nl> - OBJECTS_DIR = . / . . / Mac / ReleaseIntermediatePacker <nl> + OBJECTS_DIR = . / . . / Linux / ReleaseIntermediatePacker <nl> MOC_DIR = . / GeneratedFiles / Release <nl> - DESTDIR = . / . . / Mac / ReleasePacker <nl> + DESTDIR = . / . . / Linux / ReleasePacker <nl> } <nl> <nl> macx { <nl> mmm a / Telegram / SourceFiles / app . cpp <nl> ppp b / Telegram / SourceFiles / app . cpp <nl> namespace App { <nl> case mtpc_userStatusOnline : data - > onlineTill = status - > c_userStatusOnline ( ) . vexpires . v ; break ; <nl> } <nl> <nl> - if ( data - > contact < 0 & & ! data - > phone . isEmpty ( ) & & ( data - > id & 0xFFFFFFFF ) ! = MTP : : authedId ( ) ) { <nl> + if ( data - > contact < 0 & & ! data - > phone . isEmpty ( ) & & int32 ( data - > id & 0xFFFFFFFF ) ! = MTP : : authedId ( ) ) { <nl> data - > contact = 0 ; <nl> } <nl> if ( data - > contact > 0 & & ! wasContact ) { <nl> mmm a / Telegram / SourceFiles / boxes / aboutbox . cpp <nl> ppp b / Telegram / SourceFiles / boxes / aboutbox . cpp <nl> void AboutBox : : paintEvent ( QPaintEvent * e ) { <nl> <nl> p . setPen ( st : : black - > p ) ; <nl> p . setFont ( st : : aboutHeaderFont - > f ) ; <nl> - p . drawText ( ( _width - ( _headerWidth + _subheaderWidth ) ) / 2 , st : : aboutHeaderTop + st : : aboutHeaderFont - > ascent , qsl ( " Telegram " ) ) ; <nl> + p . drawText ( ( _width - ( _headerWidth + _subheaderWidth ) ) / 2 , st : : aboutHeaderTop + st : : aboutHeaderFont - > ascent , qsl ( " Telegram " ) ) ; <nl> <nl> p . setFont ( st : : aboutSubheaderFont - > f ) ; <nl> - p . drawText ( ( _width - ( _headerWidth + _subheaderWidth ) ) / 2 + _headerWidth , st : : aboutHeaderTop + st : : aboutSubheaderFont - > ascent , qsl ( " Desktop " ) ) ; <nl> + p . drawText ( ( _width - ( _headerWidth + _subheaderWidth ) ) / 2 + _headerWidth , st : : aboutHeaderTop + st : : aboutSubheaderFont - > ascent , qsl ( " Desktop " ) ) ; <nl> <nl> p . setFont ( st : : aboutVersionFont - > f ) ; <nl> p . setPen ( st : : aboutVersionColor - > p ) ; <nl> - p . drawText ( ( _width - _versionWidth ) / 2 , st : : aboutVersionTop + st : : aboutVersionFont - > ascent , _versionText ) ; <nl> + p . drawText ( ( _width - _versionWidth ) / 2 , st : : aboutVersionTop + st : : aboutVersionFont - > ascent , _versionText ) ; <nl> } <nl> } else { <nl> p . setOpacity ( a_opacity . current ( ) ) ; <nl> mmm a / Telegram / SourceFiles / gui / style_core . cpp <nl> ppp b / Telegram / SourceFiles / gui / style_core . cpp <nl> namespace style { <nl> modified [ _flags ] = Font ( this ) ; <nl> <nl> f . setPixelSize ( size ) ; <nl> - f . setBold ( _flags & FontBold ) ; <nl> + f . setBold ( _flags & FontBold ) ; <nl> f . setItalic ( _flags & FontItalic ) ; <nl> f . setUnderline ( _flags & FontUnderline ) ; <nl> f . setStyleStrategy ( QFont : : PreferQuality ) ; <nl> mmm a / Telegram / SourceFiles / gui / text . cpp <nl> ppp b / Telegram / SourceFiles / gui / text . cpp <nl> class TextPainter { <nl> break ; <nl> } <nl> } / * * / <nl> - for ( ; _lineEnd > _lineStart + 1 ; - - _lineEnd ) { <nl> + for ( ; _lineEnd > _lineStart ; - - _lineEnd ) { <nl> QChar ch = _t - > _text . at ( _lineEnd - 1 ) ; <nl> - if ( ch ! = QChar : : Space & & ch ! = QChar : : LineFeed ) { <nl> + if ( ( ch ! = QChar : : Space | | _lineEnd = = _lineStart + 1 ) & & ch ! = QChar : : LineFeed ) { <nl> break ; <nl> } <nl> } / * * / <nl> mmm a / Telegram / SourceFiles / logs . cpp <nl> ppp b / Telegram / SourceFiles / logs . cpp <nl> Full license : https : / / github . com / telegramdesktop / tdesktop / blob / master / LICENSE <nl> Copyright ( c ) 2014 John Preston , https : / / tdesktop . com <nl> * / <nl> # include " stdafx . h " <nl> + # include < iostream > <nl> # include " pspecific . h " <nl> <nl> namespace { <nl> void debugLogWrite ( const char * file , int32 line , const QString & v ) { <nl> OutputDebugString ( reinterpret_cast < const wchar_t * > ( msg . utf16 ( ) ) ) ; <nl> # elif defined Q_OS_MAC <nl> objc_outputDebugString ( msg ) ; <nl> + # elif defined Q_OS_LINUX & & defined _DEBUG <nl> + std : : cout < < msg . toUtf8 ( ) . constData ( ) ; <nl> # endif <nl> } <nl> } <nl> mmm a / Telegram / SourceFiles / mtproto / mtp . cpp <nl> ppp b / Telegram / SourceFiles / mtproto / mtp . cpp <nl> namespace { <nl> <nl> bool onErrorDefault ( mtpRequestId requestId , const RPCError & error ) { <nl> const QString & err ( error . type ( ) ) ; <nl> - QRegularExpressionMatch m ; ; <nl> + QRegularExpressionMatch m ; <nl> if ( ( m = QRegularExpression ( " ^ ( FILE | PHONE | NETWORK | USER ) _MIGRATE_ ( \ \ d + ) $ " ) . match ( err ) ) . hasMatch ( ) ) { <nl> if ( ! requestId ) return false ; <nl> <nl> mmm a / Telegram / SourceFiles / mtproto / mtpConnection . cpp <nl> ppp b / Telegram / SourceFiles / mtproto / mtpConnection . cpp <nl> void MTProtoConnectionPrivate : : authKeyCreated ( ) { <nl> <nl> void MTProtoConnectionPrivate : : clearAuthKeyData ( ) { <nl> if ( authKeyData ) { <nl> - # ifdef Q_OS_WIN <nl> - SecureZeroMemory ( authKeyData , sizeof ( AuthKeyCreateData ) ) ; <nl> + # ifdef Q_OS_WIN / / TODO <nl> + / / SecureZeroMemory ( authKeyData , sizeof ( AuthKeyCreateData ) ) ; <nl> # else <nl> - memset ( authKeyData , 0 , sizeof ( AuthKeyCreateData ) ) ; <nl> + / / memset ( authKeyData , 0 , sizeof ( AuthKeyCreateData ) ) ; <nl> # endif <nl> - delete authKeyData ; <nl> + delete authKeyData ; <nl> authKeyData = 0 ; <nl> } <nl> } <nl> mmm a / Telegram / SourceFiles / pspecific . h <nl> ppp b / Telegram / SourceFiles / pspecific . h <nl> Copyright ( c ) 2014 John Preston , https : / / tdesktop . com <nl> # endif <nl> <nl> # ifdef Q_OS_LINUX <nl> - <nl> + # include " pspecific_linux . h " <nl> # endif <nl> <nl> # ifdef Q_OS_WIN <nl> new file mode 100644 <nl> index 00000000000 . . 0efa1335050 <nl> mmm / dev / null <nl> ppp b / Telegram / SourceFiles / pspecific_linux . cpp <nl> <nl> + / * <nl> + This file is part of Telegram Desktop , <nl> + an unofficial desktop messaging app , see https : / / telegram . org <nl> + <nl> + Telegram Desktop is free software : you can redistribute it and / or modify <nl> + it under the terms of the GNU General Public License as published by <nl> + the Free Software Foundation , either version 3 of the License , or <nl> + ( at your option ) any later version . <nl> + <nl> + It is distributed in the hope that it will be useful , <nl> + but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + GNU General Public License for more details . <nl> + <nl> + Full license : https : / / github . com / telegramdesktop / tdesktop / blob / master / LICENSE <nl> + Copyright ( c ) 2014 John Preston , https : / / tdesktop . com <nl> + * / <nl> + # include " stdafx . h " <nl> + # include " pspecific . h " <nl> + <nl> + # include " lang . h " <nl> + # include " application . h " <nl> + # include " mainwidget . h " <nl> + <nl> + namespace { <nl> + bool frameless = true ; <nl> + bool finished = true ; <nl> + <nl> + class _PsEventFilter : public QAbstractNativeEventFilter { <nl> + public : <nl> + _PsEventFilter ( ) { <nl> + } <nl> + <nl> + bool nativeEventFilter ( const QByteArray & eventType , void * message , long * result ) { <nl> + Window * wnd = Application : : wnd ( ) ; <nl> + if ( ! wnd ) return false ; <nl> + <nl> + return false ; <nl> + } <nl> + } ; <nl> + _PsEventFilter * _psEventFilter = 0 ; <nl> + <nl> + } ; <nl> + <nl> + PsMainWindow : : PsMainWindow ( QWidget * parent ) : QMainWindow ( parent ) , <nl> + posInited ( false ) , trayIcon ( 0 ) , trayIconMenu ( 0 ) , icon256 ( qsl ( " : / gui / art / iconround256 . png " ) ) { <nl> + connect ( & psIdleTimer , SIGNAL ( timeout ( ) ) , this , SLOT ( psIdleTimeout ( ) ) ) ; <nl> + psIdleTimer . setSingleShot ( false ) ; <nl> + connect ( & notifyWaitTimer , SIGNAL ( timeout ( ) ) , this , SLOT ( psNotifyFire ( ) ) ) ; <nl> + notifyWaitTimer . setSingleShot ( true ) ; <nl> + } <nl> + <nl> + void PsMainWindow : : psNotIdle ( ) const { <nl> + psIdleTimer . stop ( ) ; <nl> + if ( psIdle ) { <nl> + psIdle = false ; <nl> + if ( App : : main ( ) ) App : : main ( ) - > setOnline ( ) ; <nl> + if ( App : : wnd ( ) ) App : : wnd ( ) - > checkHistoryActivation ( ) ; <nl> + } <nl> + } <nl> + <nl> + void PsMainWindow : : psIdleTimeout ( ) { <nl> + int64 idleTime = 0 ; / / objc_idleTime ( ) ; <nl> + if ( idleTime > = 0 ) { <nl> + if ( idleTime < = IdleMsecs ) { <nl> + psNotIdle ( ) ; <nl> + } <nl> + } else { / / error <nl> + psNotIdle ( ) ; <nl> + } <nl> + } <nl> + <nl> + bool PsMainWindow : : psIsOnline ( int state ) const { <nl> + if ( state < 0 ) state = this - > windowState ( ) ; <nl> + if ( state & Qt : : WindowMinimized ) { <nl> + return false ; <nl> + } else if ( ! isVisible ( ) ) { <nl> + return false ; <nl> + } <nl> + int64 idleTime = 0 ; / / objc_idleTime ( ) ; <nl> + LOG ( ( " App Info : idle time % 1 " ) . arg ( idleTime ) ) ; <nl> + if ( idleTime > = 0 ) { <nl> + if ( idleTime > IdleMsecs ) { <nl> + if ( ! psIdle ) { <nl> + psIdle = true ; <nl> + psIdleTimer . start ( 900 ) ; <nl> + } <nl> + return false ; <nl> + } else { <nl> + psNotIdle ( ) ; <nl> + } <nl> + } else { / / error <nl> + psNotIdle ( ) ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + bool PsMainWindow : : psIsActive ( int state ) const { <nl> + if ( state < 0 ) state = this - > windowState ( ) ; <nl> + return isActiveWindow ( ) & & isVisible ( ) & & ! ( state & Qt : : WindowMinimized ) & & ! psIdle ; <nl> + } <nl> + <nl> + void PsMainWindow : : psRefreshTaskbarIcon ( ) { <nl> + } <nl> + <nl> + void PsMainWindow : : psUpdateWorkmode ( ) { <nl> + } <nl> + <nl> + void PsMainWindow : : psUpdateCounter ( ) { <nl> + int32 counter = App : : histories ( ) . unreadFull ; <nl> + <nl> + setWindowTitle ( ( counter > 0 ) ? qsl ( " Telegram ( % 1 ) " ) . arg ( counter ) : qsl ( " Telegram " ) ) ; <nl> + <nl> + QString cnt = ( counter < 1000 ) ? QString ( " % 1 " ) . arg ( counter ) : QString ( " . . % 1 " ) . arg ( counter % 100 , 2 , 10 , QChar ( ' 0 ' ) ) ; <nl> + / / _private . setWindowBadge ( counter ? cnt : QString ( ) ) ; <nl> + } <nl> + <nl> + void PsMainWindow : : psInitSize ( ) { <nl> + setMinimumWidth ( st : : wndMinWidth ) ; <nl> + setMinimumHeight ( st : : wndMinHeight ) ; <nl> + <nl> + TWindowPos pos ( cWindowPos ( ) ) ; <nl> + QRect avail ( QDesktopWidget ( ) . availableGeometry ( ) ) ; <nl> + bool maximized = false ; <nl> + QRect geom ( avail . x ( ) + ( avail . width ( ) - st : : wndDefWidth ) / 2 , avail . y ( ) + ( avail . height ( ) - st : : wndDefHeight ) / 2 , st : : wndDefWidth , st : : wndDefHeight ) ; <nl> + if ( pos . w & & pos . h ) { <nl> + QList < QScreen * > screens = App : : app ( ) - > screens ( ) ; <nl> + for ( QList < QScreen * > : : const_iterator i = screens . cbegin ( ) , e = screens . cend ( ) ; i ! = e ; + + i ) { <nl> + QByteArray name = ( * i ) - > name ( ) . toUtf8 ( ) ; <nl> + if ( pos . moncrc = = hashCrc32 ( name . constData ( ) , name . size ( ) ) ) { <nl> + QRect screen ( ( * i ) - > geometry ( ) ) ; <nl> + int32 w = screen . width ( ) , h = screen . height ( ) ; <nl> + if ( w > = st : : wndMinWidth & & h > = st : : wndMinHeight ) { <nl> + if ( pos . w > w ) pos . w = w ; <nl> + if ( pos . h > h ) pos . h = h ; <nl> + pos . x + = screen . x ( ) ; <nl> + pos . y + = screen . y ( ) ; <nl> + if ( pos . x < screen . x ( ) + screen . width ( ) - 10 & & pos . y < screen . y ( ) + screen . height ( ) - 10 ) { <nl> + geom = QRect ( pos . x , pos . y , pos . w , pos . h ) ; <nl> + } <nl> + } <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + if ( pos . y < 0 ) pos . y = 0 ; <nl> + maximized = pos . maximized ; <nl> + } <nl> + setGeometry ( geom ) ; <nl> + } <nl> + <nl> + void PsMainWindow : : psInitFrameless ( ) { <nl> + psUpdatedPositionTimer . setSingleShot ( true ) ; <nl> + connect ( & psUpdatedPositionTimer , SIGNAL ( timeout ( ) ) , this , SLOT ( psSavePosition ( ) ) ) ; <nl> + <nl> + if ( frameless ) { <nl> + / / setWindowFlags ( Qt : : FramelessWindowHint ) ; <nl> + } <nl> + <nl> + connect ( windowHandle ( ) , SIGNAL ( windowStateChanged ( Qt : : WindowState ) ) , this , SLOT ( psStateChanged ( Qt : : WindowState ) ) ) ; <nl> + } <nl> + <nl> + void PsMainWindow : : psSavePosition ( Qt : : WindowState state ) { <nl> + if ( state = = Qt : : WindowActive ) state = windowHandle ( ) - > windowState ( ) ; <nl> + if ( state = = Qt : : WindowMinimized | | ! posInited ) return ; <nl> + <nl> + TWindowPos pos ( cWindowPos ( ) ) , curPos = pos ; <nl> + <nl> + if ( state = = Qt : : WindowMaximized ) { <nl> + curPos . maximized = 1 ; <nl> + } else { <nl> + QRect r ( geometry ( ) ) ; <nl> + curPos . x = r . x ( ) ; <nl> + curPos . y = r . y ( ) ; <nl> + curPos . w = r . width ( ) ; <nl> + curPos . h = r . height ( ) ; <nl> + curPos . maximized = 0 ; <nl> + } <nl> + <nl> + int px = curPos . x + curPos . w / 2 , py = curPos . y + curPos . h / 2 , d = 0 ; <nl> + QScreen * chosen = 0 ; <nl> + QList < QScreen * > screens = App : : app ( ) - > screens ( ) ; <nl> + for ( QList < QScreen * > : : const_iterator i = screens . cbegin ( ) , e = screens . cend ( ) ; i ! = e ; + + i ) { <nl> + int dx = ( * i ) - > geometry ( ) . x ( ) + ( * i ) - > geometry ( ) . width ( ) / 2 - px ; if ( dx < 0 ) dx = - dx ; <nl> + int dy = ( * i ) - > geometry ( ) . y ( ) + ( * i ) - > geometry ( ) . height ( ) / 2 - py ; if ( dy < 0 ) dy = - dy ; <nl> + if ( ! chosen | | dx + dy < d ) { <nl> + d = dx + dy ; <nl> + chosen = * i ; <nl> + } <nl> + } <nl> + if ( chosen ) { <nl> + curPos . x - = chosen - > geometry ( ) . x ( ) ; <nl> + curPos . y - = chosen - > geometry ( ) . y ( ) ; <nl> + QByteArray name = chosen - > name ( ) . toUtf8 ( ) ; <nl> + curPos . moncrc = hashCrc32 ( name . constData ( ) , name . size ( ) ) ; <nl> + } <nl> + <nl> + if ( curPos . w > = st : : wndMinWidth & & curPos . h > = st : : wndMinHeight ) { <nl> + if ( curPos . x ! = pos . x | | curPos . y ! = pos . y | | curPos . w ! = pos . w | | curPos . h ! = pos . h | | curPos . moncrc ! = pos . moncrc | | curPos . maximized ! = pos . maximized ) { <nl> + cSetWindowPos ( curPos ) ; <nl> + App : : writeConfig ( ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void PsMainWindow : : psUpdatedPosition ( ) { <nl> + psUpdatedPositionTimer . start ( 4000 ) ; <nl> + } <nl> + <nl> + void PsMainWindow : : psStateChanged ( Qt : : WindowState state ) { <nl> + psUpdateSysMenu ( state ) ; <nl> + psUpdateMargins ( ) ; <nl> + / / if ( state = = Qt : : WindowMinimized & & GetWindowLong ( ps_hWnd , GWL_HWNDPARENT ) ) { <nl> + / / minimizeToTray ( ) ; <nl> + / / } <nl> + psSavePosition ( state ) ; <nl> + } <nl> + <nl> + void PsMainWindow : : psFirstShow ( ) { <nl> + finished = false ; <nl> + <nl> + psUpdateMargins ( ) ; <nl> + <nl> + bool showShadows = true ; <nl> + <nl> + show ( ) ; <nl> + / / _private . enableShadow ( winId ( ) ) ; <nl> + if ( cWindowPos ( ) . maximized ) { <nl> + setWindowState ( Qt : : WindowMaximized ) ; <nl> + } <nl> + <nl> + if ( cFromAutoStart ( ) ) { <nl> + if ( cStartMinimized ( ) ) { <nl> + setWindowState ( Qt : : WindowMinimized ) ; <nl> + if ( cWorkMode ( ) = = dbiwmTrayOnly | | cWorkMode ( ) = = dbiwmWindowAndTray ) { <nl> + hide ( ) ; <nl> + } else { <nl> + show ( ) ; <nl> + } <nl> + showShadows = false ; <nl> + } else { <nl> + show ( ) ; <nl> + } <nl> + } else { <nl> + show ( ) ; <nl> + } <nl> + posInited = true ; <nl> + } <nl> + <nl> + bool PsMainWindow : : psHandleTitle ( ) { <nl> + return false ; <nl> + } <nl> + <nl> + void PsMainWindow : : psInitSysMenu ( ) { <nl> + } <nl> + <nl> + void PsMainWindow : : psUpdateSysMenu ( Qt : : WindowState state ) { <nl> + } <nl> + <nl> + void PsMainWindow : : psUpdateMargins ( ) { <nl> + } <nl> + <nl> + void PsMainWindow : : psFlash ( ) { <nl> + / / _private . startBounce ( ) ; <nl> + } <nl> + <nl> + PsMainWindow : : ~ PsMainWindow ( ) { <nl> + finished = true ; <nl> + psClearNotifyFast ( ) ; <nl> + } <nl> + <nl> + void PsMainWindow : : psNotify ( History * history , MsgId msgId ) { <nl> + if ( App : : quiting ( ) | | ! history - > notifyFrom ) return ; <nl> + <nl> + bool haveSetting = ( history - > peer - > notify ! = UnknownNotifySettings ) ; <nl> + if ( haveSetting ) { <nl> + if ( history - > peer - > notify ! = EmptyNotifySettings & & history - > peer - > notify - > mute > unixtime ( ) ) { <nl> + history - > clearNotifyFrom ( ) ; <nl> + return ; <nl> + } <nl> + } else { <nl> + App : : wnd ( ) - > getNotifySetting ( MTP_inputNotifyPeer ( history - > peer - > input ) ) ; <nl> + } <nl> + <nl> + uint64 ms = getms ( ) + NotifyWaitTimeout ; <nl> + notifyWhenAlerts [ history ] . insert ( ms ) ; <nl> + if ( cDesktopNotify ( ) ) { <nl> + NotifyWhenMaps : : iterator i = notifyWhenMaps . find ( history ) ; <nl> + if ( i = = notifyWhenMaps . end ( ) ) { <nl> + i = notifyWhenMaps . insert ( history , NotifyWhenMap ( ) ) ; <nl> + } <nl> + if ( i . value ( ) . constFind ( msgId ) = = i . value ( ) . cend ( ) ) { <nl> + i . value ( ) . insert ( msgId , ms ) ; <nl> + } <nl> + NotifyWaiters * addTo = haveSetting ? & notifyWaiters : & notifySettingWaiters ; <nl> + if ( addTo - > constFind ( history ) = = addTo - > cend ( ) ) { <nl> + addTo - > insert ( history , NotifyWaiter ( msgId , ms ) ) ; <nl> + } <nl> + } <nl> + if ( haveSetting ) { <nl> + if ( ! notifyWaitTimer . isActive ( ) ) { <nl> + notifyWaitTimer . start ( NotifyWaitTimeout ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void PsMainWindow : : psNotifyFire ( ) { <nl> + psShowNextNotify ( ) ; <nl> + } <nl> + <nl> + void PsMainWindow : : psNotifySettingGot ( ) { <nl> + int32 t = unixtime ( ) ; <nl> + for ( NotifyWaiters : : iterator i = notifySettingWaiters . begin ( ) ; i ! = notifySettingWaiters . end ( ) ; ) { <nl> + History * history = i . key ( ) ; <nl> + if ( history - > peer - > notify = = UnknownNotifySettings ) { <nl> + + + i ; <nl> + } else { <nl> + if ( history - > peer - > notify = = EmptyNotifySettings | | history - > peer - > notify - > mute < = t ) { <nl> + notifyWaiters . insert ( i . key ( ) , i . value ( ) ) ; <nl> + } <nl> + i = notifySettingWaiters . erase ( i ) ; <nl> + } <nl> + } <nl> + notifyWaitTimer . stop ( ) ; <nl> + psShowNextNotify ( ) ; <nl> + } <nl> + <nl> + void PsMainWindow : : psClearNotify ( History * history ) { <nl> + if ( ! history ) { <nl> + for ( PsNotifyWindows : : const_iterator i = notifyWindows . cbegin ( ) , e = notifyWindows . cend ( ) ; i ! = e ; + + i ) { <nl> + ( * i ) - > unlinkHistory ( ) ; <nl> + } <nl> + / / _private . clearNotifies ( ) ; <nl> + for ( NotifyWhenMaps : : const_iterator i = notifyWhenMaps . cbegin ( ) , e = notifyWhenMaps . cend ( ) ; i ! = e ; + + i ) { <nl> + i . key ( ) - > clearNotifyFrom ( ) ; <nl> + } <nl> + notifyWaiters . clear ( ) ; <nl> + notifySettingWaiters . clear ( ) ; <nl> + notifyWhenMaps . clear ( ) ; <nl> + return ; <nl> + } <nl> + notifyWaiters . remove ( history ) ; <nl> + notifySettingWaiters . remove ( history ) ; <nl> + for ( PsNotifyWindows : : const_iterator i = notifyWindows . cbegin ( ) , e = notifyWindows . cend ( ) ; i ! = e ; + + i ) { <nl> + ( * i ) - > unlinkHistory ( history ) ; <nl> + } <nl> + / / _private . clearNotifies ( history - > peer - > id ) ; <nl> + notifyWhenMaps . remove ( history ) ; <nl> + notifyWhenAlerts . remove ( history ) ; <nl> + } <nl> + <nl> + void PsMainWindow : : psClearNotifyFast ( ) { <nl> + notifyWaiters . clear ( ) ; <nl> + notifySettingWaiters . clear ( ) ; <nl> + for ( PsNotifyWindows : : const_iterator i = notifyWindows . cbegin ( ) , e = notifyWindows . cend ( ) ; i ! = e ; + + i ) { <nl> + ( * i ) - > deleteLater ( ) ; <nl> + } <nl> + / / _private . clearNotifies ( ) ; <nl> + notifyWindows . clear ( ) ; <nl> + notifyWhenMaps . clear ( ) ; <nl> + notifyWhenAlerts . clear ( ) ; <nl> + } <nl> + <nl> + void PsMainWindow : : psActivateNotifies ( ) { <nl> + if ( cCustomNotifies ( ) ) { <nl> + for ( PsNotifyWindows : : const_iterator i = notifyWindows . cbegin ( ) , e = notifyWindows . cend ( ) ; i ! = e ; + + i ) { <nl> + / / _private . activateWnd ( ( * i ) - > winId ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + namespace { <nl> + QRect _monitorRect ; <nl> + uint64 _monitorLastGot = 0 ; <nl> + QRect _desktopRect ( ) { <nl> + uint64 tnow = getms ( ) ; <nl> + if ( tnow > _monitorLastGot + 1000 | | tnow < _monitorLastGot ) { <nl> + _monitorLastGot = tnow ; <nl> + _monitorRect = QApplication : : desktop ( ) - > availableGeometry ( App : : wnd ( ) ) ; <nl> + } <nl> + return _monitorRect ; <nl> + } <nl> + } <nl> + <nl> + void PsMainWindow : : psShowNextNotify ( PsNotifyWindow * remove ) { <nl> + if ( App : : quiting ( ) ) return ; <nl> + <nl> + int32 count = NotifyWindows ; <nl> + if ( remove ) { <nl> + for ( PsNotifyWindows : : iterator i = notifyWindows . begin ( ) , e = notifyWindows . end ( ) ; i ! = e ; + + i ) { <nl> + if ( ( * i ) = = remove ) { <nl> + notifyWindows . erase ( i ) ; <nl> + break ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + uint64 ms = getms ( ) , nextAlert = 0 ; <nl> + bool alert = false ; <nl> + for ( NotifyWhenAlerts : : iterator i = notifyWhenAlerts . begin ( ) ; i ! = notifyWhenAlerts . end ( ) ; ) { <nl> + while ( ! i . value ( ) . isEmpty ( ) & & * i . value ( ) . begin ( ) < = ms ) { <nl> + i . value ( ) . erase ( i . value ( ) . begin ( ) ) ; <nl> + NotifySettingsPtr n = i . key ( ) - > peer - > notify ; <nl> + if ( n = = EmptyNotifySettings | | ( n ! = UnknownNotifySettings & & n - > mute < = unixtime ( ) ) ) { <nl> + alert = true ; <nl> + } <nl> + } <nl> + if ( i . value ( ) . isEmpty ( ) ) { <nl> + i = notifyWhenAlerts . erase ( i ) ; <nl> + } else { <nl> + if ( ! nextAlert | | nextAlert > * i . value ( ) . begin ( ) ) { <nl> + nextAlert = * i . value ( ) . begin ( ) ; <nl> + } <nl> + + + i ; <nl> + } <nl> + } <nl> + if ( alert ) { <nl> + psFlash ( ) ; <nl> + App : : playSound ( ) ; <nl> + } <nl> + <nl> + if ( cCustomNotifies ( ) ) { <nl> + for ( PsNotifyWindows : : const_iterator i = notifyWindows . cbegin ( ) , e = notifyWindows . cend ( ) ; i ! = e ; + + i ) { <nl> + int32 ind = ( * i ) - > index ( ) ; <nl> + if ( ind < 0 ) continue ; <nl> + - - count ; <nl> + } <nl> + } <nl> + if ( count < = 0 | | ! cDesktopNotify ( ) ) { <nl> + if ( nextAlert ) { <nl> + notifyWaitTimer . start ( nextAlert - ms ) ; <nl> + } <nl> + return ; <nl> + } <nl> + <nl> + QRect r = _desktopRect ( ) ; <nl> + int32 x = r . x ( ) + r . width ( ) - st : : notifyWidth - st : : notifyDeltaX , y = r . y ( ) + r . height ( ) - st : : notifyHeight - st : : notifyDeltaY ; <nl> + while ( count > 0 ) { <nl> + uint64 next = 0 ; <nl> + HistoryItem * notifyItem = 0 ; <nl> + NotifyWaiters : : iterator notifyWaiter ; <nl> + for ( NotifyWaiters : : iterator i = notifyWaiters . begin ( ) ; i ! = notifyWaiters . end ( ) ; + + i ) { <nl> + History * history = i . key ( ) ; <nl> + if ( history - > notifyFrom & & history - > notifyFrom - > id ! = i . value ( ) . msg ) { <nl> + NotifyWhenMaps : : iterator j = notifyWhenMaps . find ( history ) ; <nl> + if ( j = = notifyWhenMaps . end ( ) ) { <nl> + history - > clearNotifyFrom ( ) ; <nl> + i = notifyWaiters . erase ( i ) ; <nl> + continue ; <nl> + } <nl> + do { <nl> + NotifyWhenMap : : const_iterator k = j . value ( ) . constFind ( history - > notifyFrom - > id ) ; <nl> + if ( k ! = j . value ( ) . cend ( ) ) { <nl> + i . value ( ) . msg = k . key ( ) ; <nl> + i . value ( ) . when = k . value ( ) ; <nl> + break ; <nl> + } <nl> + history - > getNextNotifyFrom ( ) ; <nl> + } while ( history - > notifyFrom ) ; <nl> + } <nl> + if ( ! history - > notifyFrom ) { <nl> + notifyWhenMaps . remove ( history ) ; <nl> + i = notifyWaiters . erase ( i ) ; <nl> + continue ; <nl> + } <nl> + uint64 when = i . value ( ) . when ; <nl> + if ( ! notifyItem | | next > when ) { <nl> + next = when ; <nl> + notifyItem = history - > notifyFrom ; <nl> + notifyWaiter = i ; <nl> + } <nl> + } <nl> + if ( notifyItem ) { <nl> + if ( next > ms ) { <nl> + if ( nextAlert & & nextAlert < next ) { <nl> + next = nextAlert ; <nl> + nextAlert = 0 ; <nl> + } <nl> + notifyWaitTimer . start ( next - ms ) ; <nl> + break ; <nl> + } else { <nl> + if ( cCustomNotifies ( ) ) { <nl> + PsNotifyWindow * notify = new PsNotifyWindow ( notifyItem , x , y ) ; <nl> + notifyWindows . push_back ( notify ) ; <nl> + / / notify - > hide ( ) ; <nl> + / / _private . holdOnTop ( notify - > winId ( ) ) ; <nl> + / / notify - > show ( ) ; <nl> + / / _private . showOverAll ( notify - > winId ( ) ) ; <nl> + - - count ; <nl> + } else { <nl> + / / _private . showNotify ( notifyItem - > history ( ) - > peer - > id , notifyItem - > history ( ) - > peer - > name , notifyItem - > notificationHeader ( ) , notifyItem - > notificationText ( ) ) ; <nl> + } <nl> + <nl> + uint64 ms = getms ( ) ; <nl> + History * history = notifyItem - > history ( ) ; <nl> + history - > getNextNotifyFrom ( ) ; <nl> + NotifyWhenMaps : : iterator j = notifyWhenMaps . find ( history ) ; <nl> + if ( j = = notifyWhenMaps . end ( ) | | ! history - > notifyFrom ) { <nl> + history - > clearNotifyFrom ( ) ; <nl> + notifyWaiters . erase ( notifyWaiter ) ; <nl> + if ( j ! = notifyWhenMaps . end ( ) ) notifyWhenMaps . erase ( j ) ; <nl> + continue ; <nl> + } <nl> + j . value ( ) . remove ( notifyItem - > id ) ; <nl> + do { <nl> + NotifyWhenMap : : const_iterator k = j . value ( ) . constFind ( history - > notifyFrom - > id ) ; <nl> + if ( k ! = j . value ( ) . cend ( ) ) { <nl> + notifyWaiter . value ( ) . msg = k . key ( ) ; <nl> + notifyWaiter . value ( ) . when = k . value ( ) ; <nl> + break ; <nl> + } <nl> + history - > getNextNotifyFrom ( ) ; <nl> + } while ( history - > notifyFrom ) ; <nl> + if ( ! history - > notifyFrom ) { <nl> + notifyWaiters . erase ( notifyWaiter ) ; <nl> + notifyWhenMaps . erase ( j ) ; <nl> + continue ; <nl> + } <nl> + } <nl> + } else { <nl> + break ; <nl> + } <nl> + } <nl> + if ( nextAlert ) { <nl> + notifyWaitTimer . start ( nextAlert - ms ) ; <nl> + } <nl> + <nl> + count = NotifyWindows - count ; <nl> + for ( PsNotifyWindows : : const_iterator i = notifyWindows . cbegin ( ) , e = notifyWindows . cend ( ) ; i ! = e ; + + i ) { <nl> + int32 ind = ( * i ) - > index ( ) ; <nl> + if ( ind < 0 ) continue ; <nl> + - - count ; <nl> + ( * i ) - > moveTo ( x , y - count * ( st : : notifyHeight + st : : notifyDeltaY ) ) ; <nl> + } <nl> + } <nl> + <nl> + void PsMainWindow : : psStopHiding ( ) { <nl> + if ( cCustomNotifies ( ) ) { <nl> + for ( PsNotifyWindows : : const_iterator i = notifyWindows . cbegin ( ) , e = notifyWindows . cend ( ) ; i ! = e ; + + i ) { <nl> + ( * i ) - > stopHiding ( ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void PsMainWindow : : psStartHiding ( ) { <nl> + if ( cCustomNotifies ( ) ) { <nl> + for ( PsNotifyWindows : : const_iterator i = notifyWindows . cbegin ( ) , e = notifyWindows . cend ( ) ; i ! = e ; + + i ) { <nl> + ( * i ) - > startHiding ( ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void PsMainWindow : : psUpdateNotifies ( ) { <nl> + if ( cCustomNotifies ( ) ) { <nl> + for ( PsNotifyWindows : : const_iterator i = notifyWindows . cbegin ( ) , e = notifyWindows . cend ( ) ; i ! = e ; + + i ) { <nl> + ( * i ) - > updatePeerPhoto ( ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + PsNotifyWindow : : PsNotifyWindow ( HistoryItem * item , int32 x , int32 y ) : history ( item - > history ( ) ) , / / started ( GetTickCount ( ) ) , <nl> + close ( this , st : : notifyClose ) , alphaDuration ( st : : notifyFastAnim ) , posDuration ( st : : notifyFastAnim ) , hiding ( false ) , _index ( 0 ) , aOpacity ( 0 ) , aOpacityFunc ( st : : notifyFastAnimFunc ) , aY ( y + st : : notifyHeight + st : : notifyDeltaY ) { <nl> + <nl> + int32 w = st : : notifyWidth , h = st : : notifyHeight ; <nl> + QImage img ( w * cIntRetinaFactor ( ) , h * cIntRetinaFactor ( ) , QImage : : Format_ARGB32_Premultiplied ) ; <nl> + if ( cRetina ( ) ) img . setDevicePixelRatio ( cRetinaFactor ( ) ) ; <nl> + img . fill ( st : : notifyBG - > c ) ; <nl> + <nl> + { <nl> + QPainter p ( & img ) ; <nl> + p . setPen ( st : : notifyBorder - > p ) ; <nl> + p . setBrush ( Qt : : NoBrush ) ; <nl> + p . drawRect ( 0 , 0 , w - 1 , h - 1 ) ; <nl> + <nl> + if ( history - > peer - > photo - > loaded ( ) ) { <nl> + p . drawPixmap ( st : : notifyPhotoPos . x ( ) , st : : notifyPhotoPos . y ( ) , history - > peer - > photo - > pix ( st : : notifyPhotoSize ) ) ; <nl> + } else { <nl> + MTP : : clearLoaderPriorities ( ) ; <nl> + peerPhoto = history - > peer - > photo ; <nl> + peerPhoto - > load ( true , true ) ; <nl> + } <nl> + <nl> + int32 itemWidth = w - st : : notifyPhotoPos . x ( ) - st : : notifyPhotoSize - st : : notifyTextLeft - st : : notifyClosePos . x ( ) - st : : notifyClose . width ; <nl> + <nl> + QRect rectForName ( st : : notifyPhotoPos . x ( ) + st : : notifyPhotoSize + st : : notifyTextLeft , st : : notifyTextTop , itemWidth , st : : msgNameFont - > height ) ; <nl> + if ( history - > peer - > chat ) { <nl> + p . drawPixmap ( QPoint ( rectForName . left ( ) + st : : dlgChatImgLeft , rectForName . top ( ) + st : : dlgChatImgTop ) , App : : sprite ( ) , st : : dlgChatImg ) ; <nl> + rectForName . setLeft ( rectForName . left ( ) + st : : dlgChatImgSkip ) ; <nl> + } <nl> + <nl> + QDateTime now ( QDateTime : : currentDateTime ( ) ) , lastTime ( item - > date ) ; <nl> + QDate nowDate ( now . date ( ) ) , lastDate ( lastTime . date ( ) ) ; <nl> + QString dt = lastTime . toString ( qsl ( " hh : mm " ) ) ; <nl> + int32 dtWidth = st : : dlgHistFont - > m . width ( dt ) ; <nl> + rectForName . setWidth ( rectForName . width ( ) - dtWidth - st : : dlgDateSkip ) ; <nl> + p . setFont ( st : : dlgDateFont - > f ) ; <nl> + p . setPen ( st : : dlgDateColor - > p ) ; <nl> + p . drawText ( rectForName . left ( ) + rectForName . width ( ) + st : : dlgDateSkip , rectForName . top ( ) + st : : dlgHistFont - > ascent , dt ) ; <nl> + <nl> + const HistoryItem * textCachedFor = 0 ; <nl> + Text itemTextCache ( itemWidth ) ; <nl> + bool active = false ; <nl> + item - > drawInDialog ( p , QRect ( st : : notifyPhotoPos . x ( ) + st : : notifyPhotoSize + st : : notifyTextLeft , st : : notifyItemTop + st : : msgNameFont - > height , itemWidth , 2 * st : : dlgFont - > height ) , active , textCachedFor , itemTextCache ) ; <nl> + <nl> + p . setPen ( st : : dlgNameColor - > p ) ; <nl> + history - > nameText . drawElided ( p , rectForName . left ( ) , rectForName . top ( ) , rectForName . width ( ) ) ; <nl> + } <nl> + pm = QPixmap : : fromImage ( img ) ; <nl> + <nl> + hideTimer . setSingleShot ( true ) ; <nl> + connect ( & hideTimer , SIGNAL ( timeout ( ) ) , this , SLOT ( hideByTimer ( ) ) ) ; <nl> + <nl> + inputTimer . setSingleShot ( true ) ; <nl> + connect ( & inputTimer , SIGNAL ( timeout ( ) ) , this , SLOT ( checkLastInput ( ) ) ) ; <nl> + <nl> + connect ( & close , SIGNAL ( clicked ( ) ) , this , SLOT ( unlinkHistory ( ) ) ) ; <nl> + close . setAcceptBoth ( true ) ; <nl> + close . move ( w - st : : notifyClose . width - st : : notifyClosePos . x ( ) , st : : notifyClosePos . y ( ) ) ; <nl> + close . show ( ) ; <nl> + <nl> + aY . start ( y ) ; <nl> + setGeometry ( x , aY . current ( ) , st : : notifyWidth , st : : notifyHeight ) ; <nl> + <nl> + aOpacity . start ( 1 ) ; <nl> + setWindowFlags ( Qt : : Tool | Qt : : WindowStaysOnTopHint | Qt : : FramelessWindowHint ) ; <nl> + setAttribute ( Qt : : WA_MacAlwaysShowToolWindow ) ; <nl> + <nl> + show ( ) ; <nl> + <nl> + setWindowOpacity ( aOpacity . current ( ) ) ; <nl> + <nl> + alphaDuration = posDuration = st : : notifyFastAnim ; <nl> + anim : : start ( this ) ; <nl> + <nl> + checkLastInput ( ) ; <nl> + } <nl> + <nl> + void PsNotifyWindow : : checkLastInput ( ) { <nl> + / / TODO <nl> + if ( true ) { <nl> + hideTimer . start ( st : : notifyWaitLongHide ) ; <nl> + } else { <nl> + inputTimer . start ( 300 ) ; <nl> + } <nl> + } <nl> + <nl> + void PsNotifyWindow : : moveTo ( int32 x , int32 y , int32 index ) { <nl> + if ( index > = 0 ) { <nl> + _index = index ; <nl> + } <nl> + move ( x , aY . current ( ) ) ; <nl> + aY . start ( y ) ; <nl> + aOpacity . restart ( ) ; <nl> + posDuration = st : : notifyFastAnim ; <nl> + anim : : start ( this ) ; <nl> + } <nl> + <nl> + void PsNotifyWindow : : updatePeerPhoto ( ) { <nl> + if ( ! peerPhoto - > isNull ( ) & & peerPhoto - > loaded ( ) ) { <nl> + QImage img ( pm . toImage ( ) ) ; <nl> + { <nl> + QPainter p ( & img ) ; <nl> + p . drawPixmap ( st : : notifyPhotoPos . x ( ) , st : : notifyPhotoPos . y ( ) , peerPhoto - > pix ( st : : notifyPhotoSize ) ) ; <nl> + } <nl> + peerPhoto = ImagePtr ( ) ; <nl> + pm = QPixmap : : fromImage ( img ) ; <nl> + update ( ) ; <nl> + } <nl> + } <nl> + <nl> + void PsNotifyWindow : : unlinkHistory ( History * hist ) { <nl> + if ( ! hist | | hist = = history ) { <nl> + animHide ( st : : notifyFastAnim , st : : notifyFastAnimFunc ) ; <nl> + history = 0 ; <nl> + App : : wnd ( ) - > psShowNextNotify ( ) ; <nl> + } <nl> + } <nl> + <nl> + void PsNotifyWindow : : enterEvent ( QEvent * / * e * / ) { <nl> + if ( ! history ) return ; <nl> + if ( App : : wnd ( ) ) App : : wnd ( ) - > psStopHiding ( ) ; <nl> + } <nl> + <nl> + void PsNotifyWindow : : leaveEvent ( QEvent * / * e * / ) { <nl> + if ( ! history ) return ; <nl> + App : : wnd ( ) - > psStartHiding ( ) ; <nl> + } <nl> + <nl> + void PsNotifyWindow : : startHiding ( ) { <nl> + hideTimer . start ( st : : notifyWaitShortHide ) ; <nl> + } <nl> + <nl> + void PsNotifyWindow : : mousePressEvent ( QMouseEvent * e ) { <nl> + if ( ! history ) return ; <nl> + if ( e - > button ( ) = = Qt : : RightButton ) { <nl> + unlinkHistory ( ) ; <nl> + } else if ( history ) { <nl> + App : : wnd ( ) - > showFromTray ( ) ; <nl> + App : : wnd ( ) - > hideSettings ( ) ; <nl> + App : : main ( ) - > showPeer ( history - > peer - > id , false , true ) ; <nl> + unlinkHistory ( ) ; <nl> + e - > ignore ( ) ; <nl> + } <nl> + } <nl> + <nl> + void PsNotifyWindow : : paintEvent ( QPaintEvent * e ) { <nl> + QPainter p ( this ) ; <nl> + p . drawPixmap ( 0 , 0 , pm ) ; <nl> + } <nl> + <nl> + void PsNotifyWindow : : animHide ( float64 duration , anim : : transition func ) { <nl> + if ( ! history ) return ; <nl> + alphaDuration = duration ; <nl> + aOpacityFunc = func ; <nl> + aOpacity . start ( 0 ) ; <nl> + aY . restart ( ) ; <nl> + hiding = true ; <nl> + anim : : start ( this ) ; <nl> + } <nl> + <nl> + void PsNotifyWindow : : stopHiding ( ) { <nl> + if ( ! history ) return ; <nl> + alphaDuration = st : : notifyFastAnim ; <nl> + aOpacityFunc = st : : notifyFastAnimFunc ; <nl> + aOpacity . start ( 1 ) ; <nl> + aY . restart ( ) ; <nl> + hiding = false ; <nl> + hideTimer . stop ( ) ; <nl> + anim : : start ( this ) ; <nl> + } <nl> + <nl> + void PsNotifyWindow : : hideByTimer ( ) { <nl> + if ( ! history ) return ; <nl> + animHide ( st : : notifySlowHide , st : : notifySlowHideFunc ) ; <nl> + } <nl> + <nl> + bool PsNotifyWindow : : animStep ( float64 ms ) { <nl> + float64 dtAlpha = ms / alphaDuration , dtPos = ms / posDuration ; <nl> + if ( dtAlpha > = 1 ) { <nl> + aOpacity . finish ( ) ; <nl> + if ( hiding ) { <nl> + deleteLater ( ) ; <nl> + } <nl> + } else { <nl> + aOpacity . update ( dtAlpha , aOpacityFunc ) ; <nl> + } <nl> + setWindowOpacity ( aOpacity . current ( ) ) ; <nl> + if ( dtPos > = 1 ) { <nl> + aY . finish ( ) ; <nl> + } else { <nl> + aY . update ( dtPos , anim : : linear ) ; <nl> + } <nl> + move ( x ( ) , aY . current ( ) ) ; <nl> + update ( ) ; <nl> + return ( dtAlpha < 1 | | ( ! hiding & & dtPos < 1 ) ) ; <nl> + } <nl> + <nl> + PsNotifyWindow : : ~ PsNotifyWindow ( ) { <nl> + if ( App : : wnd ( ) ) App : : wnd ( ) - > psShowNextNotify ( this ) ; <nl> + } <nl> + <nl> + PsApplication : : PsApplication ( int & argc , char * * argv ) : QApplication ( argc , argv ) { <nl> + } <nl> + <nl> + void PsApplication : : psInstallEventFilter ( ) { <nl> + delete _psEventFilter ; <nl> + _psEventFilter = new _PsEventFilter ( ) ; <nl> + installNativeEventFilter ( _psEventFilter ) ; <nl> + } <nl> + <nl> + PsApplication : : ~ PsApplication ( ) { <nl> + delete _psEventFilter ; <nl> + _psEventFilter = 0 ; <nl> + } <nl> + <nl> + PsUpdateDownloader : : PsUpdateDownloader ( QThread * thread , const MTPDhelp_appUpdate & update ) : reply ( 0 ) , already ( 0 ) , full ( 0 ) { <nl> + updateUrl = qs ( update . vurl ) ; <nl> + moveToThread ( thread ) ; <nl> + manager . moveToThread ( thread ) ; <nl> + App : : setProxySettings ( manager ) ; <nl> + <nl> + connect ( thread , SIGNAL ( started ( ) ) , this , SLOT ( start ( ) ) ) ; <nl> + initOutput ( ) ; <nl> + } <nl> + <nl> + PsUpdateDownloader : : PsUpdateDownloader ( QThread * thread , const QString & url ) : reply ( 0 ) , already ( 0 ) , full ( 0 ) { <nl> + updateUrl = url ; <nl> + moveToThread ( thread ) ; <nl> + manager . moveToThread ( thread ) ; <nl> + App : : setProxySettings ( manager ) ; <nl> + <nl> + connect ( thread , SIGNAL ( started ( ) ) , this , SLOT ( start ( ) ) ) ; <nl> + initOutput ( ) ; <nl> + } <nl> + <nl> + void PsUpdateDownloader : : initOutput ( ) { <nl> + QString fileName ; <nl> + QRegularExpressionMatch m = QRegularExpression ( qsl ( " / ( [ ^ / \ \ ? ] + ) ( \ \ ? | $ ) " ) ) . match ( updateUrl ) ; <nl> + if ( m . hasMatch ( ) ) { <nl> + fileName = m . captured ( 1 ) . replace ( QRegularExpression ( qsl ( " [ ^ a - zA - Z0 - 9_ \ \ - ] " ) ) , QString ( ) ) ; <nl> + } <nl> + if ( fileName . isEmpty ( ) ) { <nl> + fileName = qsl ( " tupdate - % 1 " ) . arg ( rand ( ) ) ; <nl> + } <nl> + QString dirStr = cWorkingDir ( ) + qsl ( " tupdates / " ) ; <nl> + fileName = dirStr + fileName ; <nl> + QFileInfo file ( fileName ) ; <nl> + <nl> + QDir dir ( dirStr ) ; <nl> + if ( dir . exists ( ) ) { <nl> + QFileInfoList all = dir . entryInfoList ( QDir : : Files ) ; <nl> + for ( QFileInfoList : : iterator i = all . begin ( ) , e = all . end ( ) ; i ! = e ; + + i ) { <nl> + if ( i - > absoluteFilePath ( ) ! = file . absoluteFilePath ( ) ) { <nl> + QFile : : remove ( i - > absoluteFilePath ( ) ) ; <nl> + } <nl> + } <nl> + } else { <nl> + dir . mkdir ( dir . absolutePath ( ) ) ; <nl> + } <nl> + outputFile . setFileName ( fileName ) ; <nl> + if ( file . exists ( ) ) { <nl> + uint64 fullSize = file . size ( ) ; <nl> + if ( fullSize < INT_MAX ) { <nl> + int32 goodSize = ( int32 ) fullSize ; <nl> + if ( goodSize % UpdateChunk ) { <nl> + goodSize = goodSize - ( goodSize % UpdateChunk ) ; <nl> + if ( goodSize ) { <nl> + if ( outputFile . open ( QIODevice : : ReadOnly ) ) { <nl> + QByteArray goodData = outputFile . readAll ( ) . mid ( 0 , goodSize ) ; <nl> + outputFile . close ( ) ; <nl> + if ( outputFile . open ( QIODevice : : WriteOnly ) ) { <nl> + outputFile . write ( goodData ) ; <nl> + outputFile . close ( ) ; <nl> + <nl> + QMutexLocker lock ( & mutex ) ; <nl> + already = goodSize ; <nl> + } <nl> + } <nl> + } <nl> + } else { <nl> + QMutexLocker lock ( & mutex ) ; <nl> + already = goodSize ; <nl> + } <nl> + } <nl> + if ( ! already ) { <nl> + QFile : : remove ( fileName ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void PsUpdateDownloader : : start ( ) { <nl> + sendRequest ( ) ; <nl> + } <nl> + <nl> + void PsUpdateDownloader : : sendRequest ( ) { <nl> + QNetworkRequest req ( updateUrl ) ; <nl> + QByteArray rangeHeaderValue = " bytes = " + QByteArray : : number ( already ) + " - " ; / / + QByteArray : : number ( already + cUpdateChunk ( ) - 1 ) ; <nl> + req . setRawHeader ( " Range " , rangeHeaderValue ) ; <nl> + req . setAttribute ( QNetworkRequest : : HttpPipeliningAllowedAttribute , true ) ; <nl> + if ( reply ) reply - > deleteLater ( ) ; <nl> + reply = manager . get ( req ) ; <nl> + connect ( reply , SIGNAL ( downloadProgress ( qint64 , qint64 ) ) , this , SLOT ( partFinished ( qint64 , qint64 ) ) ) ; <nl> + connect ( reply , SIGNAL ( error ( QNetworkReply : : NetworkError ) ) , this , SLOT ( partFailed ( QNetworkReply : : NetworkError ) ) ) ; <nl> + connect ( reply , SIGNAL ( metaDataChanged ( ) ) , this , SLOT ( partMetaGot ( ) ) ) ; <nl> + } <nl> + <nl> + void PsUpdateDownloader : : partMetaGot ( ) { <nl> + typedef QList < QNetworkReply : : RawHeaderPair > Pairs ; <nl> + Pairs pairs = reply - > rawHeaderPairs ( ) ; <nl> + for ( Pairs : : iterator i = pairs . begin ( ) , e = pairs . end ( ) ; i ! = e ; + + i ) { <nl> + if ( QString : : fromUtf8 ( i - > first ) . toLower ( ) = = " content - range " ) { <nl> + QRegularExpressionMatch m = QRegularExpression ( qsl ( " / ( \ \ d + ) ( [ ^ \ \ d ] | $ ) " ) ) . match ( QString : : fromUtf8 ( i - > second ) ) ; <nl> + if ( m . hasMatch ( ) ) { <nl> + { <nl> + QMutexLocker lock ( & mutex ) ; <nl> + full = m . captured ( 1 ) . toInt ( ) ; <nl> + } <nl> + emit App : : app ( ) - > updateDownloading ( already , full ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + int32 PsUpdateDownloader : : ready ( ) { <nl> + QMutexLocker lock ( & mutex ) ; <nl> + return already ; <nl> + } <nl> + <nl> + int32 PsUpdateDownloader : : size ( ) { <nl> + QMutexLocker lock ( & mutex ) ; <nl> + return full ; <nl> + } <nl> + <nl> + void PsUpdateDownloader : : partFinished ( qint64 got , qint64 total ) { <nl> + if ( ! reply ) return ; <nl> + <nl> + QVariant statusCode = reply - > attribute ( QNetworkRequest : : HttpStatusCodeAttribute ) ; <nl> + if ( statusCode . isValid ( ) ) { <nl> + int status = statusCode . toInt ( ) ; <nl> + if ( status ! = 200 & & status ! = 206 & & status ! = 416 ) { <nl> + LOG ( ( " Update Error : Bad HTTP status received in partFinished ( ) : % 1 " ) . arg ( status ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + } <nl> + <nl> + if ( ! already & & ! full ) { <nl> + QMutexLocker lock ( & mutex ) ; <nl> + full = total ; <nl> + } <nl> + DEBUG_LOG ( ( " Update Info : part % 1 of % 2 " ) . arg ( got ) . arg ( total ) ) ; <nl> + <nl> + if ( ! outputFile . isOpen ( ) ) { <nl> + if ( ! outputFile . open ( QIODevice : : Append ) ) { <nl> + LOG ( ( " Update Error : Could not open output file ' % 1 ' for appending " ) . arg ( outputFile . fileName ( ) ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + } <nl> + QByteArray r = reply - > readAll ( ) ; <nl> + if ( ! r . isEmpty ( ) ) { <nl> + outputFile . write ( r ) ; <nl> + <nl> + QMutexLocker lock ( & mutex ) ; <nl> + already + = r . size ( ) ; <nl> + } <nl> + if ( got > = total ) { <nl> + reply - > deleteLater ( ) ; <nl> + reply = 0 ; <nl> + outputFile . close ( ) ; <nl> + unpackUpdate ( ) ; <nl> + } else { <nl> + emit App : : app ( ) - > updateDownloading ( already , full ) ; <nl> + } <nl> + } <nl> + <nl> + void PsUpdateDownloader : : partFailed ( QNetworkReply : : NetworkError e ) { <nl> + if ( ! reply ) return ; <nl> + <nl> + QVariant statusCode = reply - > attribute ( QNetworkRequest : : HttpStatusCodeAttribute ) ; <nl> + reply - > deleteLater ( ) ; <nl> + reply = 0 ; <nl> + if ( statusCode . isValid ( ) ) { <nl> + int status = statusCode . toInt ( ) ; <nl> + if ( status = = 416 ) { / / Requested range not satisfiable <nl> + outputFile . close ( ) ; <nl> + unpackUpdate ( ) ; <nl> + return ; <nl> + } <nl> + } <nl> + LOG ( ( " Update Error : failed to download part starting from % 1 , error % 2 " ) . arg ( already ) . arg ( e ) ) ; <nl> + emit App : : app ( ) - > updateFailed ( ) ; <nl> + } <nl> + <nl> + void PsUpdateDownloader : : deleteDir ( const QString & dir ) { <nl> + / / objc_deleteDir ( dir ) ; <nl> + } <nl> + <nl> + void PsUpdateDownloader : : fatalFail ( ) { <nl> + clearAll ( ) ; <nl> + emit App : : app ( ) - > updateFailed ( ) ; <nl> + } <nl> + <nl> + void PsUpdateDownloader : : clearAll ( ) { <nl> + deleteDir ( cWorkingDir ( ) + qsl ( " tupdates " ) ) ; <nl> + } <nl> + <nl> + # ifdef Q_OS_WIN <nl> + typedef DWORD VerInt ; <nl> + typedef WCHAR VerChar ; <nl> + # else <nl> + typedef int VerInt ; <nl> + typedef wchar_t VerChar ; <nl> + # endif <nl> + <nl> + void PsUpdateDownloader : : unpackUpdate ( ) { <nl> + QByteArray packed ; <nl> + if ( ! outputFile . open ( QIODevice : : ReadOnly ) ) { <nl> + LOG ( ( " Update Error : cant read updates file ! " ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + # ifdef Q_OS_WIN / / use Lzma SDK for win <nl> + const int32 hSigLen = 128 , hShaLen = 20 , hPropsLen = LZMA_PROPS_SIZE , hOriginalSizeLen = sizeof ( int32 ) , hSize = hSigLen + hShaLen + hPropsLen + hOriginalSizeLen ; / / header <nl> + # else <nl> + const int32 hSigLen = 128 , hShaLen = 20 , hPropsLen = 0 , hOriginalSizeLen = sizeof ( int32 ) , hSize = hSigLen + hShaLen + hOriginalSizeLen ; / / header <nl> + # endif <nl> + QByteArray compressed = outputFile . readAll ( ) ; <nl> + int32 compressedLen = compressed . size ( ) - hSize ; <nl> + if ( compressedLen < = 0 ) { <nl> + LOG ( ( " Update Error : bad compressed size : % 1 " ) . arg ( compressed . size ( ) ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + outputFile . close ( ) ; <nl> + <nl> + QString tempDirPath = cWorkingDir ( ) + qsl ( " tupdates / temp " ) , readyDirPath = cWorkingDir ( ) + qsl ( " tupdates / ready " ) ; <nl> + deleteDir ( tempDirPath ) ; <nl> + deleteDir ( readyDirPath ) ; <nl> + <nl> + QDir tempDir ( tempDirPath ) , readyDir ( readyDirPath ) ; <nl> + if ( tempDir . exists ( ) | | readyDir . exists ( ) ) { <nl> + LOG ( ( " Update Error : cant clear tupdates / temp or tupdates / ready dir ! " ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + <nl> + uchar sha1Buffer [ 20 ] ; <nl> + bool goodSha1 = ! memcmp ( compressed . constData ( ) + hSigLen , hashSha1 ( compressed . constData ( ) + hSigLen + hShaLen , compressedLen + hPropsLen + hOriginalSizeLen , sha1Buffer ) , hShaLen ) ; <nl> + if ( ! goodSha1 ) { <nl> + LOG ( ( " Update Error : bad SHA1 hash of update file ! " ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + <nl> + RSA * pbKey = PEM_read_bio_RSAPublicKey ( BIO_new_mem_buf ( const_cast < char * > ( UpdatesPublicKey ) , - 1 ) , 0 , 0 , 0 ) ; <nl> + if ( ! pbKey ) { <nl> + LOG ( ( " Update Error : cant read public rsa key ! " ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + if ( RSA_verify ( NID_sha1 , ( const uchar * ) ( compressed . constData ( ) + hSigLen ) , hShaLen , ( const uchar * ) ( compressed . constData ( ) ) , hSigLen , pbKey ) ! = 1 ) { / / verify signature <nl> + RSA_free ( pbKey ) ; <nl> + LOG ( ( " Update Error : bad RSA signature of update file ! " ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + RSA_free ( pbKey ) ; <nl> + <nl> + QByteArray uncompressed ; <nl> + <nl> + int32 uncompressedLen ; <nl> + memcpy ( & uncompressedLen , compressed . constData ( ) + hSigLen + hShaLen + hPropsLen , hOriginalSizeLen ) ; <nl> + uncompressed . resize ( uncompressedLen ) ; <nl> + <nl> + size_t resultLen = uncompressed . size ( ) ; <nl> + # ifdef Q_OS_WIN / / use Lzma SDK for win <nl> + SizeT srcLen = compressedLen ; <nl> + int uncompressRes = LzmaUncompress ( ( uchar * ) uncompressed . data ( ) , & resultLen , ( const uchar * ) ( compressed . constData ( ) + hSize ) , & srcLen , ( const uchar * ) ( compressed . constData ( ) + hSigLen + hShaLen ) , LZMA_PROPS_SIZE ) ; <nl> + if ( uncompressRes ! = SZ_OK ) { <nl> + LOG ( ( " Update Error : could not uncompress lzma , code : % 1 " ) . arg ( uncompressRes ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + # else <nl> + lzma_stream stream = LZMA_STREAM_INIT ; <nl> + <nl> + lzma_ret ret = lzma_stream_decoder ( & stream , UINT64_MAX , LZMA_CONCATENATED ) ; <nl> + if ( ret ! = LZMA_OK ) { <nl> + const char * msg ; <nl> + switch ( ret ) { <nl> + case LZMA_MEM_ERROR : msg = " Memory allocation failed " ; break ; <nl> + case LZMA_OPTIONS_ERROR : msg = " Specified preset is not supported " ; break ; <nl> + case LZMA_UNSUPPORTED_CHECK : msg = " Specified integrity check is not supported " ; break ; <nl> + default : msg = " Unknown error , possibly a bug " ; break ; <nl> + } <nl> + LOG ( ( " Error initializing the decoder : % 1 ( error code % 2 ) " ) . arg ( msg ) . arg ( ret ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + <nl> + stream . avail_in = compressedLen ; <nl> + stream . next_in = ( uint8_t * ) ( compressed . constData ( ) + hSize ) ; <nl> + stream . avail_out = resultLen ; <nl> + stream . next_out = ( uint8_t * ) uncompressed . data ( ) ; <nl> + <nl> + lzma_ret res = lzma_code ( & stream , LZMA_FINISH ) ; <nl> + if ( stream . avail_in ) { <nl> + LOG ( ( " Error in decompression , % 1 bytes left in _in of % 2 whole . " ) . arg ( stream . avail_in ) . arg ( compressedLen ) ) ; <nl> + return fatalFail ( ) ; <nl> + } else if ( stream . avail_out ) { <nl> + LOG ( ( " Error in decompression , % 1 bytes free left in _out of % 2 whole . " ) . arg ( stream . avail_out ) . arg ( resultLen ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + lzma_end ( & stream ) ; <nl> + if ( res ! = LZMA_OK & & res ! = LZMA_STREAM_END ) { <nl> + const char * msg ; <nl> + switch ( res ) { <nl> + case LZMA_MEM_ERROR : msg = " Memory allocation failed " ; break ; <nl> + case LZMA_FORMAT_ERROR : msg = " The input data is not in the . xz format " ; break ; <nl> + case LZMA_OPTIONS_ERROR : msg = " Unsupported compression options " ; break ; <nl> + case LZMA_DATA_ERROR : msg = " Compressed file is corrupt " ; break ; <nl> + case LZMA_BUF_ERROR : msg = " Compressed data is truncated or otherwise corrupt " ; break ; <nl> + default : msg = " Unknown error , possibly a bug " ; break ; <nl> + } <nl> + LOG ( ( " Error in decompression : % 1 ( error code % 2 ) " ) . arg ( msg ) . arg ( res ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + # endif <nl> + <nl> + tempDir . mkdir ( tempDir . absolutePath ( ) ) ; <nl> + <nl> + quint32 version ; <nl> + { <nl> + QBuffer buffer ( & uncompressed ) ; <nl> + buffer . open ( QIODevice : : ReadOnly ) ; <nl> + QDataStream stream ( & buffer ) ; <nl> + stream . setVersion ( QDataStream : : Qt_5_1 ) ; <nl> + <nl> + stream > > version ; <nl> + if ( stream . status ( ) ! = QDataStream : : Ok ) { <nl> + LOG ( ( " Update Error : cant read version from downloaded stream , status : % 1 " ) . arg ( stream . status ( ) ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + if ( version < = AppVersion ) { <nl> + LOG ( ( " Update Error : downloaded version % 1 is not greater , than mine % 2 " ) . arg ( version ) . arg ( AppVersion ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + <nl> + quint32 filesCount ; <nl> + stream > > filesCount ; <nl> + if ( stream . status ( ) ! = QDataStream : : Ok ) { <nl> + LOG ( ( " Update Error : cant read files count from downloaded stream , status : % 1 " ) . arg ( stream . status ( ) ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + if ( ! filesCount ) { <nl> + LOG ( ( " Update Error : update is empty ! " ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + for ( uint32 i = 0 ; i < filesCount ; + + i ) { <nl> + QString relativeName ; <nl> + quint32 fileSize ; <nl> + QByteArray fileInnerData ; <nl> + bool executable = false ; <nl> + <nl> + stream > > relativeName > > fileSize > > fileInnerData ; <nl> + # if defined Q_OS_MAC | | defined Q_OS_LINUX <nl> + stream > > executable ; <nl> + # endif <nl> + if ( stream . status ( ) ! = QDataStream : : Ok ) { <nl> + LOG ( ( " Update Error : cant read file from downloaded stream , status : % 1 " ) . arg ( stream . status ( ) ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + if ( fileSize ! = quint32 ( fileInnerData . size ( ) ) ) { <nl> + LOG ( ( " Update Error : bad file size % 1 not matching data size % 2 " ) . arg ( fileSize ) . arg ( fileInnerData . size ( ) ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + <nl> + QFile f ( tempDirPath + ' / ' + relativeName ) ; <nl> + if ( ! QDir ( ) . mkpath ( QFileInfo ( f ) . absolutePath ( ) ) ) { <nl> + LOG ( ( " Update Error : cant mkpath for file ' % 1 ' " ) . arg ( tempDirPath + ' / ' + relativeName ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + if ( ! f . open ( QIODevice : : WriteOnly ) ) { <nl> + LOG ( ( " Update Error : cant open file ' % 1 ' for writing " ) . arg ( tempDirPath + ' / ' + relativeName ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + if ( f . write ( fileInnerData ) ! = fileSize ) { <nl> + f . close ( ) ; <nl> + LOG ( ( " Update Error : cant write file ' % 1 ' " ) . arg ( tempDirPath + ' / ' + relativeName ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + f . close ( ) ; <nl> + if ( executable ) { <nl> + QFileDevice : : Permissions p = f . permissions ( ) ; <nl> + p | = QFileDevice : : ExeOwner | QFileDevice : : ExeUser | QFileDevice : : ExeGroup | QFileDevice : : ExeOther ; <nl> + f . setPermissions ( p ) ; <nl> + } <nl> + } <nl> + <nl> + / / create tdata / version file <nl> + tempDir . mkdir ( QDir ( tempDirPath + qsl ( " / tdata " ) ) . absolutePath ( ) ) ; <nl> + std : : wstring versionString = ( ( version % 1000 ) ? QString ( " % 1 . % 2 . % 3 " ) . arg ( int ( version / 1000000 ) ) . arg ( int ( ( version % 1000000 ) / 1000 ) ) . arg ( int ( version % 1000 ) ) : QString ( " % 1 . % 2 " ) . arg ( int ( version / 1000000 ) ) . arg ( int ( ( version % 1000000 ) / 1000 ) ) ) . toStdWString ( ) ; <nl> + <nl> + VerInt versionNum = VerInt ( version ) , versionLen = VerInt ( versionString . size ( ) * sizeof ( VerChar ) ) ; <nl> + VerChar versionStr [ 32 ] ; <nl> + memcpy ( versionStr , versionString . c_str ( ) , versionLen ) ; <nl> + <nl> + QFile fVersion ( tempDirPath + qsl ( " / tdata / version " ) ) ; <nl> + if ( ! fVersion . open ( QIODevice : : WriteOnly ) ) { <nl> + LOG ( ( " Update Error : cant write version file ' % 1 ' " ) . arg ( tempDirPath + qsl ( " / version " ) ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + fVersion . write ( ( const char * ) & versionNum , sizeof ( VerInt ) ) ; <nl> + fVersion . write ( ( const char * ) & versionLen , sizeof ( VerInt ) ) ; <nl> + fVersion . write ( ( const char * ) & versionStr [ 0 ] , versionLen ) ; <nl> + fVersion . close ( ) ; <nl> + } <nl> + <nl> + if ( ! tempDir . rename ( tempDir . absolutePath ( ) , readyDir . absolutePath ( ) ) ) { <nl> + LOG ( ( " Update Error : cant rename temp dir ' % 1 ' to ready dir ' % 2 ' " ) . arg ( tempDir . absolutePath ( ) ) . arg ( readyDir . absolutePath ( ) ) ) ; <nl> + return fatalFail ( ) ; <nl> + } <nl> + deleteDir ( tempDirPath ) ; <nl> + outputFile . remove ( ) ; <nl> + <nl> + emit App : : app ( ) - > updateReady ( ) ; <nl> + } <nl> + <nl> + PsUpdateDownloader : : ~ PsUpdateDownloader ( ) { <nl> + delete reply ; <nl> + reply = 0 ; <nl> + } <nl> + <nl> + void psActivateProcess ( uint64 pid ) { <nl> + / / objc_activateProgram ( ) ; <nl> + } <nl> + <nl> + QString psCurrentCountry ( ) { <nl> + QString country ; / / = objc_currentCountry ( ) ; <nl> + return country . isEmpty ( ) ? QString : : fromLatin1 ( DefaultCountry ) : country ; <nl> + } <nl> + <nl> + QString psCurrentLanguage ( ) { <nl> + QString lng ; / / = objc_currentLang ( ) ; <nl> + return lng . isEmpty ( ) ? QString : : fromLatin1 ( DefaultLanguage ) : lng ; <nl> + } <nl> + <nl> + QString psAppDataPath ( ) { <nl> + return QString ( ) ; / / objc_appDataPath ( ) ; <nl> + } <nl> + <nl> + QString psCurrentExeDirectory ( int argc , char * argv [ ] ) { <nl> + QString first = argc ? QString : : fromLocal8Bit ( argv [ 0 ] ) : QString ( ) ; <nl> + if ( ! first . isEmpty ( ) ) { <nl> + QFileInfo info ( first ) ; <nl> + if ( info . exists ( ) ) { <nl> + QDir result ( info . absolutePath ( ) + qsl ( " / . . / . . / . . " ) ) ; <nl> + return result . absolutePath ( ) + ' / ' ; <nl> + } <nl> + } <nl> + return QString ( ) ; <nl> + } <nl> + <nl> + void psDoCleanup ( ) { <nl> + try { <nl> + psAutoStart ( false , true ) ; <nl> + } catch ( . . . ) { <nl> + } <nl> + } <nl> + <nl> + int psCleanup ( ) { <nl> + psDoCleanup ( ) ; <nl> + return 0 ; <nl> + } <nl> + <nl> + void psDoFixPrevious ( ) { <nl> + } <nl> + <nl> + int psFixPrevious ( ) { <nl> + psDoFixPrevious ( ) ; <nl> + return 0 ; <nl> + } <nl> + <nl> + bool psCheckReadyUpdate ( ) { <nl> + QString readyPath = cWorkingDir ( ) + qsl ( " tupdates / ready " ) ; <nl> + if ( ! QDir ( readyPath ) . exists ( ) ) { <nl> + return false ; <nl> + } <nl> + <nl> + / / check ready version <nl> + QString versionPath = readyPath + qsl ( " / tdata / version " ) ; <nl> + { <nl> + QFile fVersion ( versionPath ) ; <nl> + if ( ! fVersion . open ( QIODevice : : ReadOnly ) ) { <nl> + LOG ( ( " Update Error : cant read version file ' % 1 ' " ) . arg ( versionPath ) ) ; <nl> + PsUpdateDownloader : : clearAll ( ) ; <nl> + return false ; <nl> + } <nl> + VerInt versionNum ; <nl> + if ( fVersion . read ( ( char * ) & versionNum , sizeof ( VerInt ) ) ! = sizeof ( VerInt ) ) { <nl> + LOG ( ( " Update Error : cant read version from file ' % 1 ' " ) . arg ( versionPath ) ) ; <nl> + PsUpdateDownloader : : clearAll ( ) ; <nl> + return false ; <nl> + } <nl> + fVersion . close ( ) ; <nl> + if ( versionNum < = AppVersion ) { <nl> + LOG ( ( " Update Error : cant install version % 1 having version % 2 " ) . arg ( versionNum ) . arg ( AppVersion ) ) ; <nl> + PsUpdateDownloader : : clearAll ( ) ; <nl> + return false ; <nl> + } <nl> + } <nl> + <nl> + # ifdef Q_OS_WIN <nl> + QString curUpdater = ( cExeDir ( ) + " Updater . exe " ) ; <nl> + QFileInfo updater ( cWorkingDir ( ) + " tupdates / ready / Updater . exe " ) ; <nl> + # elif defined Q_OS_MAC <nl> + QString curUpdater = ( cExeDir ( ) + " Telegram . app / Contents / Frameworks / Updater " ) ; <nl> + QFileInfo updater ( cWorkingDir ( ) + " tupdates / ready / Telegram . app / Contents / Frameworks / Updater " ) ; <nl> + # elif defined Q_OS_LINUX <nl> + QString curUpdater ; <nl> + QFileInfo updater ; <nl> + # endif <nl> + if ( ! updater . exists ( ) ) { <nl> + QFileInfo current ( curUpdater ) ; <nl> + if ( ! current . exists ( ) ) { <nl> + PsUpdateDownloader : : clearAll ( ) ; <nl> + return false ; <nl> + } <nl> + if ( ! QFile ( current . absoluteFilePath ( ) ) . copy ( updater . absoluteFilePath ( ) ) ) { <nl> + PsUpdateDownloader : : clearAll ( ) ; <nl> + return false ; <nl> + } <nl> + } <nl> + # ifdef Q_OS_WIN <nl> + if ( CopyFile ( updater . absoluteFilePath ( ) . toStdWString ( ) . c_str ( ) , curUpdater . toStdWString ( ) . c_str ( ) , FALSE ) = = FALSE ) { <nl> + PsUpdateDownloader : : clearAll ( ) ; <nl> + return false ; <nl> + } <nl> + if ( DeleteFile ( updater . absoluteFilePath ( ) . toStdWString ( ) . c_str ( ) ) = = FALSE ) { <nl> + PsUpdateDownloader : : clearAll ( ) ; <nl> + return false ; <nl> + } <nl> + # elif defined Q_OS_MAC <nl> + QFileInfo to ( curUpdater ) ; <nl> + QDir ( ) . mkpath ( to . absolutePath ( ) ) ; <nl> + if ( ! objc_moveFile ( updater . absoluteFilePath ( ) , curUpdater ) ) { <nl> + PsUpdateDownloader : : clearAll ( ) ; <nl> + return false ; <nl> + } <nl> + # endif <nl> + return true ; <nl> + } <nl> + <nl> + void psPostprocessFile ( const QString & name ) { <nl> + } <nl> + <nl> + void psOpenFile ( const QString & name , bool openWith ) { <nl> + / / objc_openFile ( name , openWith ) ; <nl> + } <nl> + <nl> + void psShowInFolder ( const QString & name ) { <nl> + / / objc_showInFinder ( name , QFileInfo ( name ) . absolutePath ( ) ) ; <nl> + } <nl> + <nl> + void psFinish ( ) { <nl> + / / objc_finish ( ) ; <nl> + } <nl> + <nl> + void psExecUpdater ( ) { <nl> + if ( true / * ! objc_execUpdater ( ) * / ) { <nl> + QString readyPath = cWorkingDir ( ) + qsl ( " tupdates / ready " ) ; <nl> + PsUpdateDownloader : : deleteDir ( readyPath ) ; <nl> + } <nl> + } <nl> + <nl> + void psExecTelegram ( ) { <nl> + / / objc_execTelegram ( ) ; <nl> + } <nl> + <nl> + void psAutoStart ( bool start , bool silent ) { <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . e23826df323 <nl> mmm / dev / null <nl> ppp b / Telegram / SourceFiles / pspecific_linux . h <nl> <nl> + / * <nl> + This file is part of Telegram Desktop , <nl> + an unofficial desktop messaging app , see https : / / telegram . org <nl> + <nl> + Telegram Desktop is free software : you can redistribute it and / or modify <nl> + it under the terms of the GNU General Public License as published by <nl> + the Free Software Foundation , either version 3 of the License , or <nl> + ( at your option ) any later version . <nl> + <nl> + It is distributed in the hope that it will be useful , <nl> + but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + GNU General Public License for more details . <nl> + <nl> + Full license : https : / / github . com / telegramdesktop / tdesktop / blob / master / LICENSE <nl> + Copyright ( c ) 2014 John Preston , https : / / tdesktop . com <nl> + * / <nl> + # pragma once <nl> + <nl> + inline QString psServerPrefix ( ) { <nl> + return qsl ( " / tmp / " ) ; <nl> + } <nl> + inline void psCheckLocalSocket ( const QString & serverName ) { <nl> + QFile address ( serverName ) ; <nl> + if ( address . exists ( ) ) { <nl> + address . remove ( ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + class PsNotifyWindow : public QWidget , public Animated { <nl> + Q_OBJECT <nl> + <nl> + public : <nl> + <nl> + PsNotifyWindow ( HistoryItem * item , int32 x , int32 y ) ; <nl> + <nl> + void enterEvent ( QEvent * e ) ; <nl> + void leaveEvent ( QEvent * e ) ; <nl> + void mousePressEvent ( QMouseEvent * e ) ; <nl> + void paintEvent ( QPaintEvent * e ) ; <nl> + <nl> + bool animStep ( float64 ms ) ; <nl> + void animHide ( float64 duration , anim : : transition func ) ; <nl> + void startHiding ( ) ; <nl> + void stopHiding ( ) ; <nl> + void moveTo ( int32 x , int32 y , int32 index = - 1 ) ; <nl> + <nl> + void updatePeerPhoto ( ) ; <nl> + <nl> + int32 index ( ) const { <nl> + return history ? _index : - 1 ; <nl> + } <nl> + <nl> + ~ PsNotifyWindow ( ) ; <nl> + <nl> + public slots : <nl> + <nl> + void hideByTimer ( ) ; <nl> + void checkLastInput ( ) ; <nl> + <nl> + void unlinkHistory ( History * hist = 0 ) ; <nl> + <nl> + private : <nl> + <nl> + / / DWORD started ; <nl> + <nl> + History * history ; <nl> + IconedButton close ; <nl> + QPixmap pm ; <nl> + float64 alphaDuration , posDuration ; <nl> + QTimer hideTimer , inputTimer ; <nl> + bool hiding ; <nl> + int32 _index ; <nl> + anim : : fvalue aOpacity ; <nl> + anim : : transition aOpacityFunc ; <nl> + anim : : ivalue aY ; <nl> + ImagePtr peerPhoto ; <nl> + <nl> + } ; <nl> + <nl> + typedef QList < PsNotifyWindow * > PsNotifyWindows ; <nl> + <nl> + class PsMainWindow : public QMainWindow { <nl> + Q_OBJECT <nl> + <nl> + public : <nl> + PsMainWindow ( QWidget * parent = 0 ) ; <nl> + <nl> + int32 psResizeRowWidth ( ) const { <nl> + return 0 ; / / st : : wndResizeAreaWidth ; <nl> + } <nl> + <nl> + void psInitFrameless ( ) ; <nl> + void psInitSize ( ) ; <nl> + <nl> + void psFirstShow ( ) ; <nl> + void psInitSysMenu ( ) ; <nl> + void psUpdateSysMenu ( Qt : : WindowState state ) ; <nl> + void psUpdateMargins ( ) ; <nl> + void psUpdatedPosition ( ) ; <nl> + <nl> + bool psHandleTitle ( ) ; <nl> + <nl> + void psFlash ( ) ; <nl> + void psNotifySettingGot ( ) ; <nl> + <nl> + bool psIsActive ( int state = - 1 ) const ; <nl> + bool psIsOnline ( int state ) const ; <nl> + <nl> + void psUpdateWorkmode ( ) ; <nl> + <nl> + void psRefreshTaskbarIcon ( ) ; <nl> + virtual bool minimizeToTray ( ) { <nl> + return false ; <nl> + } <nl> + <nl> + void psNotify ( History * history , MsgId msgId ) ; <nl> + void psClearNotify ( History * history = 0 ) ; <nl> + void psClearNotifyFast ( ) ; <nl> + void psShowNextNotify ( PsNotifyWindow * remove = 0 ) ; <nl> + void psActivateNotifies ( ) ; <nl> + void psStopHiding ( ) ; <nl> + void psStartHiding ( ) ; <nl> + void psUpdateNotifies ( ) ; <nl> + <nl> + bool psPosInited ( ) const { <nl> + return posInited ; <nl> + } <nl> + <nl> + ~ PsMainWindow ( ) ; <nl> + <nl> + public slots : <nl> + <nl> + void psStateChanged ( Qt : : WindowState state ) ; <nl> + void psUpdateCounter ( ) ; <nl> + void psSavePosition ( Qt : : WindowState state = Qt : : WindowActive ) ; <nl> + void psIdleTimeout ( ) ; <nl> + void psNotifyFire ( ) ; <nl> + <nl> + protected : <nl> + <nl> + void psNotIdle ( ) const ; <nl> + <nl> + bool posInited ; <nl> + QSystemTrayIcon * trayIcon ; <nl> + QMenu * trayIconMenu ; <nl> + QImage icon256 ; <nl> + virtual void setupTrayIcon ( ) { <nl> + } <nl> + <nl> + typedef QMap < MsgId , uint64 > NotifyWhenMap ; <nl> + typedef QMap < History * , NotifyWhenMap > NotifyWhenMaps ; <nl> + NotifyWhenMaps notifyWhenMaps ; <nl> + struct NotifyWaiter { <nl> + NotifyWaiter ( MsgId msg , uint64 when ) : msg ( msg ) , when ( when ) { <nl> + } <nl> + MsgId msg ; <nl> + uint64 when ; <nl> + } ; <nl> + typedef QMap < History * , NotifyWaiter > NotifyWaiters ; <nl> + NotifyWaiters notifyWaiters ; <nl> + NotifyWaiters notifySettingWaiters ; <nl> + QTimer notifyWaitTimer ; <nl> + <nl> + typedef QSet < uint64 > NotifyWhenAlert ; <nl> + typedef QMap < History * , NotifyWhenAlert > NotifyWhenAlerts ; <nl> + NotifyWhenAlerts notifyWhenAlerts ; <nl> + <nl> + PsNotifyWindows notifyWindows ; <nl> + <nl> + QTimer psUpdatedPositionTimer ; <nl> + <nl> + private : <nl> + mutable bool psIdle ; <nl> + mutable QTimer psIdleTimer ; <nl> + } ; <nl> + <nl> + <nl> + class PsApplication : public QApplication { <nl> + Q_OBJECT <nl> + <nl> + public : <nl> + <nl> + PsApplication ( int & argc , char * * argv ) ; <nl> + void psInstallEventFilter ( ) ; <nl> + ~ PsApplication ( ) ; <nl> + <nl> + signals : <nl> + <nl> + void updateChecking ( ) ; <nl> + void updateLatest ( ) ; <nl> + void updateDownloading ( qint64 ready , qint64 total ) ; <nl> + void updateReady ( ) ; <nl> + void updateFailed ( ) ; <nl> + <nl> + } ; <nl> + <nl> + class PsUpdateDownloader : public QObject { <nl> + Q_OBJECT <nl> + <nl> + public : <nl> + PsUpdateDownloader ( QThread * thread , const MTPDhelp_appUpdate & update ) ; <nl> + PsUpdateDownloader ( QThread * thread , const QString & url ) ; <nl> + <nl> + void unpackUpdate ( ) ; <nl> + <nl> + int32 ready ( ) ; <nl> + int32 size ( ) ; <nl> + <nl> + static void deleteDir ( const QString & dir ) ; <nl> + static void clearAll ( ) ; <nl> + <nl> + ~ PsUpdateDownloader ( ) ; <nl> + <nl> + public slots : <nl> + <nl> + void start ( ) ; <nl> + void partMetaGot ( ) ; <nl> + void partFinished ( qint64 got , qint64 total ) ; <nl> + void partFailed ( QNetworkReply : : NetworkError e ) ; <nl> + void sendRequest ( ) ; <nl> + <nl> + private : <nl> + void initOutput ( ) ; <nl> + <nl> + void fatalFail ( ) ; <nl> + <nl> + QString updateUrl ; <nl> + QNetworkAccessManager manager ; <nl> + QNetworkReply * reply ; <nl> + int32 already , full ; <nl> + QFile outputFile ; <nl> + <nl> + QMutex mutex ; <nl> + <nl> + } ; <nl> + <nl> + void psActivateProcess ( uint64 pid ) ; <nl> + QString psLocalServerPrefix ( ) ; <nl> + QString psCurrentCountry ( ) ; <nl> + QString psCurrentLanguage ( ) ; <nl> + QString psAppDataPath ( ) ; <nl> + QString psCurrentExeDirectory ( int argc , char * argv [ ] ) ; <nl> + void psAutoStart ( bool start , bool silent = false ) ; <nl> + <nl> + int psCleanup ( ) ; <nl> + int psFixPrevious ( ) ; <nl> + <nl> + bool psCheckReadyUpdate ( ) ; <nl> + void psExecUpdater ( ) ; <nl> + void psExecTelegram ( ) ; <nl> + <nl> + void psPostprocessFile ( const QString & name ) ; <nl> + void psOpenFile ( const QString & name , bool openWith = false ) ; <nl> + void psShowInFolder ( const QString & name ) ; <nl> + void psFinish ( ) ; <nl> mmm a / Telegram / SourceFiles / settings . cpp <nl> ppp b / Telegram / SourceFiles / settings . cpp <nl> QString gLangFile ; <nl> bool gRetina = false ; <nl> float64 gRetinaFactor = 1 . ; <nl> int32 gIntRetinaFactor = 1 ; <nl> + # ifdef Q_OS_MAC <nl> bool gCustomNotifies = false ; <nl> - <nl> + # else <nl> + bool gCustomNotifies = true ; <nl> + # endif <nl> uint64 gInstance = 0 . ; <nl> <nl> # ifdef Q_OS_WIN <nl> mmm a / Telegram / SourceFiles / stdafx . cpp <nl> ppp b / Telegram / SourceFiles / stdafx . cpp <nl> Q_IMPORT_PLUGIN ( QWindowsIntegrationPlugin ) <nl> Q_IMPORT_PLUGIN ( QWindowsAudioPlugin ) <nl> Q_IMPORT_PLUGIN ( AccessibleFactory ) <nl> # endif <nl> + # ifdef Q_OS_LINUX <nl> + Q_IMPORT_PLUGIN ( QPulseAudioPlugin ) <nl> + # endif <nl> mmm a / Telegram / SourceFiles / title . cpp <nl> ppp b / Telegram / SourceFiles / title . cpp <nl> TitleWidget : : TitleWidget ( Window * window ) <nl> connect ( wnd - > windowHandle ( ) , SIGNAL ( windowStateChanged ( Qt : : WindowState ) ) , this , SLOT ( stateChanged ( Qt : : WindowState ) ) ) ; <nl> connect ( App : : app ( ) , SIGNAL ( updateReady ( ) ) , this , SLOT ( showUpdateBtn ( ) ) ) ; <nl> <nl> - if ( cPlatform ( ) = = dbipMac ) { <nl> + if ( cPlatform ( ) ! = dbipWindows ) { <nl> _minimize . hide ( ) ; <nl> _maximize . hide ( ) ; <nl> _restore . hide ( ) ; <nl> TitleWidget : : ~ TitleWidget ( ) { <nl> void TitleWidget : : resizeEvent ( QResizeEvent * e ) { <nl> QPoint p ( width ( ) - ( ( cPlatform ( ) = = dbipWindows & & lastMaximized ) ? 0 : st : : sysBtnDelta ) , 0 ) ; <nl> <nl> - if ( cPlatform ( ) ! = dbipMac ) { <nl> + if ( cPlatform ( ) = = dbipWindows ) { <nl> p . setX ( p . x ( ) - _close . width ( ) ) ; <nl> _close . move ( p ) ; <nl> <nl> mmm a / Telegram / SourceFiles / types . cpp <nl> ppp b / Telegram / SourceFiles / types . cpp <nl> Copyright ( c ) 2014 John Preston , https : / / tdesktop . com <nl> * / <nl> # include " stdafx . h " <nl> <nl> - # ifdef Q_OS_MAC <nl> + # ifdef Q_OS_WIN <nl> + # elif defined Q_OS_MAC <nl> # include < mach / mach_time . h > <nl> - # endif <nl> - <nl> - # ifdef Q_OS_LINUX <nl> + # else <nl> # include < time . h > <nl> # endif <nl> <nl> namespace { <nl> _msStart = mach_absolute_time ( ) ; <nl> # else <nl> timespec ts ; <nl> - clock_gettime ( CLOCK_REALTIME , & ts ) ; <nl> - _msStart = 1000000000 * uint64 ( ts . tv_sec ) + uint64 ( ts . tv_nsec ) ; <nl> + clock_gettime ( CLOCK_MONOTONIC , & ts ) ; <nl> + / / _msFreq = 1 / 1000000 . ; <nl> + _msgIdCoef = float64 ( 0xFFFF0000L ) / 1000000000 . ; <nl> + _msStart = 1000 * uint64 ( ts . tv_sec ) + ( uint64 ( ts . tv_nsec ) / 1000000 ) ; <nl> # endif <nl> <nl> srand ( ( uint32 ) ( _msStart & 0xFFFFFFFFL ) ) ; <nl> uint64 getms ( ) { <nl> return ( uint64 ) ( ( msCount - _msStart ) * _msFreq ) ; <nl> # else <nl> timespec ts ; <nl> - int res = clock_gettime ( CLOCK_REALTIME , & ts ) ; <nl> + int res = clock_gettime ( CLOCK_MONOTONIC , & ts ) ; <nl> if ( res ! = 0 ) { <nl> LOG ( ( " Bad clock_gettime result : % 1 " ) . arg ( res ) ) ; <nl> return 0 ; <nl> } <nl> - uint64 msCount = 1000000000 * uint64 ( ts . tv_sec ) + uint64 ( ts . tv_nsec ) ; <nl> - return ( uint64 ) ( ( msCount - _msStart ) / 1000000 ) ; <nl> + uint64 msCount = 1000 * uint64 ( ts . tv_sec ) + ( uint64 ( ts . tv_nsec ) / 1000000 ) ; <nl> + return ( uint64 ) ( msCount - _msStart ) ; <nl> # endif <nl> } <nl> <nl> uint64 msgid ( ) { <nl> uint64 msCount = mach_absolute_time ( ) ; <nl> uint64 result = _msgIdStart + ( uint64 ) floor ( ( msCount - _msgIdMsStart ) * _msgIdCoef ) ; <nl> # else <nl> - uint64 result = 0 ; <nl> - / / TODO <nl> + timespec ts ; <nl> + clock_gettime ( CLOCK_MONOTONIC , & ts ) ; <nl> + uint64 msCount = 1000000000 * uint64 ( ts . tv_sec ) + uint64 ( ts . tv_nsec ) ; <nl> + uint64 result = _msgIdStart + ( uint64 ) floor ( ( msCount - _msgIdMsStart ) * _msgIdCoef ) ; <nl> # endif <nl> <nl> result & = ~ 0x03L ; <nl> mmm a / Telegram / Telegram . pro <nl> ppp b / Telegram / Telegram . pro <nl> <nl> - QT + = core gui widgets network multimedia <nl> + QT + = core gui network multimedia widgets <nl> + <nl> + CONFIG + = plugin static <nl> <nl> CONFIG ( debug , debug | release ) { <nl> DEFINES + = _DEBUG <nl> - OBJECTS_DIR = . / . . / Mac / DebugIntermediate <nl> + OBJECTS_DIR = . / . . / Linux / DebugIntermediate <nl> MOC_DIR = . / GeneratedFiles / Debug <nl> RCC_DIR = . / GeneratedFiles <nl> - DESTDIR = . / . . / Mac / Debug <nl> + DESTDIR = . / . . / Linux / Debug <nl> } <nl> CONFIG ( release , debug | release ) { <nl> - OBJECTS_DIR = . / . . / Mac / ReleaseIntermediate <nl> + OBJECTS_DIR = . / . . / Linux / ReleaseIntermediate <nl> MOC_DIR = . / GeneratedFiles / Release <nl> RCC_DIR = . / GeneratedFiles <nl> - DESTDIR = . / . . / Mac / Release <nl> + DESTDIR = . / . . / Linux / Release <nl> } <nl> <nl> macx { <nl> macx { <nl> QMAKE_LFLAGS + = - framework Cocoa <nl> } <nl> <nl> + linux { <nl> + SOURCES + = . / SourceFiles / pspecific_linux . cpp <nl> + HEADERS + = . / SourceFiles / pspecific_linux . h <nl> + } <nl> + <nl> SOURCES + = \ <nl> . / SourceFiles / main . cpp \ <nl> . / SourceFiles / stdafx . cpp \ <nl> CONFIG + = precompile_header <nl> <nl> PRECOMPILED_HEADER = . / SourceFiles / stdafx . h <nl> <nl> - INCLUDEPATH + = . / . . / . . / Libraries / QtStatic / qtbase / include / QtGui / 5 . 3 . 0 / QtGui \ <nl> - . / . . / . . / Libraries / QtStatic / qtbase / include / QtCore / 5 . 3 . 0 / QtCore \ <nl> + QMAKE_CXXFLAGS + = - fno - strict - aliasing <nl> + QMAKE_CXXFLAGS_WARN_ON + = - Wno - unused - parameter - Wno - unused - variable - Wno - switch - Wno - comment - Wno - unused - but - set - variable <nl> + <nl> + INCLUDEPATH + = . / . . / . . / Libraries / QtStatic / qtbase / include / QtGui / 5 . 3 . 1 / QtGui \ <nl> + . / . . / . . / Libraries / QtStatic / qtbase / include / QtCore / 5 . 3 . 1 / QtCore \ <nl> . / . . / . . / Libraries / QtStatic / qtbase / include \ <nl> . / SourceFiles \ <nl> . / GeneratedFiles \ <nl> - . / . . / . . / Libraries / lzma / C \ <nl> - . / . . / . . / Libraries / libexif - 0 . 6 . 20 <nl> - <nl> - LIBS + = - lcrypto - lssl - lz <nl> + . / . . / . . / Libraries / libexif - 0 . 6 . 20 \ <nl> + / usr / local / ssl / include <nl> + LIBS + = - L / usr / local / ssl / lib - lcrypto - lssl - lz - ldl - llzma <nl> LIBS + = . / . . / . . / Libraries / libexif - 0 . 6 . 20 / libexif / . libs / libexif . a <nl> - <nl> + LIBS + = . / . . / . . / Libraries / QtStatic / qtmultimedia / plugins / audio / libqtmedia_pulse . a <nl> RESOURCES + = \ <nl> . / SourceFiles / telegram . qrc <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 94c4c1d0394 <nl> mmm / dev / null <nl> ppp b / Telegram / _qt_5_3_1_patch / qtbase / src / platformsupport / fontdatabases / basic / qbasicfontdatabase . cpp <nl> <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * * <nl> + * * Copyright ( C ) 2013 Digia Plc and / or its subsidiary ( - ies ) . <nl> + * * Contact : http : / / www . qt - project . org / legal <nl> + * * <nl> + * * This file is part of the plugins of the Qt Toolkit . <nl> + * * <nl> + * * $ QT_BEGIN_LICENSE : LGPL $ <nl> + * * Commercial License Usage <nl> + * * Licensees holding valid commercial Qt licenses may use this file in <nl> + * * accordance with the commercial license agreement provided with the <nl> + * * Software or , alternatively , in accordance with the terms contained in <nl> + * * a written agreement between you and Digia . For licensing terms and <nl> + * * conditions see http : / / qt . digia . com / licensing . For further information <nl> + * * use the contact form at http : / / qt . digia . com / contact - us . <nl> + * * <nl> + * * GNU Lesser General Public License Usage <nl> + * * Alternatively , this file may be used under the terms of the GNU Lesser <nl> + * * General Public License version 2 . 1 as published by the Free Software <nl> + * * Foundation and appearing in the file LICENSE . LGPL included in the <nl> + * * packaging of this file . Please review the following information to <nl> + * * ensure the GNU Lesser General Public License version 2 . 1 requirements <nl> + * * will be met : http : / / www . gnu . org / licenses / old - licenses / lgpl - 2 . 1 . html . <nl> + * * <nl> + * * In addition , as a special exception , Digia gives you certain additional <nl> + * * rights . These rights are described in the Digia Qt LGPL Exception <nl> + * * version 1 . 1 , included in the file LGPL_EXCEPTION . txt in this package . <nl> + * * <nl> + * * GNU General Public License Usage <nl> + * * Alternatively , this file may be used under the terms of the GNU <nl> + * * General Public License version 3 . 0 as published by the Free Software <nl> + * * Foundation and appearing in the file LICENSE . GPL included in the <nl> + * * packaging of this file . Please review the following information to <nl> + * * ensure the GNU General Public License version 3 . 0 requirements will be <nl> + * * met : http : / / www . gnu . org / copyleft / gpl . html . <nl> + * * <nl> + * * <nl> + * * $ QT_END_LICENSE $ <nl> + * * <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + # include " qbasicfontdatabase_p . h " <nl> + <nl> + # include < QtGui / private / qguiapplication_p . h > <nl> + # include < qpa / qplatformscreen . h > <nl> + <nl> + # include < QtCore / QFile > <nl> + # include < QtCore / QLibraryInfo > <nl> + # include < QtCore / QDir > <nl> + # include < QtCore / QUuid > <nl> + # include < QtCore / QtEndian > <nl> + <nl> + # undef QT_NO_FREETYPE <nl> + # include < QtGui / private / qfontengine_ft_p . h > <nl> + # include < QtGui / private / qfontengine_p . h > <nl> + <nl> + # include < ft2build . h > <nl> + # include FT_TRUETYPE_TABLES_H <nl> + # include FT_ERRORS_H <nl> + <nl> + QT_BEGIN_NAMESPACE <nl> + <nl> + typedef struct { <nl> + quint16 majorVersion ; <nl> + quint16 minorVersion ; <nl> + quint16 numTables ; <nl> + quint16 searchRange ; <nl> + quint16 entrySelector ; <nl> + quint16 rangeShift ; <nl> + } OFFSET_TABLE ; <nl> + <nl> + typedef struct { <nl> + quint32 tag ; <nl> + quint32 checkSum ; <nl> + quint32 offset ; <nl> + quint32 length ; <nl> + } TABLE_DIRECTORY ; <nl> + <nl> + typedef struct { <nl> + quint16 fontSelector ; <nl> + quint16 nrCount ; <nl> + quint16 storageOffset ; <nl> + } NAME_TABLE_HEADER ; <nl> + <nl> + typedef struct { <nl> + quint16 platformID ; <nl> + quint16 encodingID ; <nl> + quint16 languageID ; <nl> + quint16 nameID ; <nl> + quint16 stringLength ; <nl> + quint16 stringOffset ; <nl> + } NAME_RECORD ; <nl> + <nl> + void QBasicFontDatabase : : populateFontDatabase ( ) <nl> + { <nl> + QString fontpath = fontDir ( ) ; <nl> + <nl> + if ( ! QFile : : exists ( fontpath ) ) { <nl> + qFatal ( " QFontDatabase : Cannot find font directory % s - is Qt installed correctly ? " , <nl> + qPrintable ( fontpath ) ) ; <nl> + } <nl> + <nl> + QDir dir ( fontpath ) ; <nl> + dir . setNameFilters ( QStringList ( ) < < QLatin1String ( " * . ttf " ) <nl> + < < QLatin1String ( " * . ttc " ) < < QLatin1String ( " * . pfa " ) <nl> + < < QLatin1String ( " * . pfb " ) <nl> + < < QLatin1String ( " * . otf " ) ) ; <nl> + dir . refresh ( ) ; <nl> + for ( int i = 0 ; i < int ( dir . count ( ) ) ; + + i ) { <nl> + const QByteArray file = QFile : : encodeName ( dir . absoluteFilePath ( dir [ i ] ) ) ; <nl> + / / qDebug ( ) < < " looking at " < < file ; <nl> + addTTFile ( QByteArray ( ) , file ) ; <nl> + } <nl> + } <nl> + <nl> + QFontEngine * QBasicFontDatabase : : fontEngine ( const QFontDef & fontDef , void * usrPtr ) <nl> + { <nl> + FontFile * fontfile = static_cast < FontFile * > ( usrPtr ) ; <nl> + QFontEngine : : FaceId fid ; <nl> + fid . filename = QFile : : encodeName ( fontfile - > fileName ) ; <nl> + fid . index = fontfile - > indexValue ; <nl> + <nl> + bool antialias = ! ( fontDef . styleStrategy & QFont : : NoAntialias ) ; <nl> + QFontEngineFT : : GlyphFormat format = antialias ? QFontEngineFT : : Format_A8 : QFontEngineFT : : Format_Mono ; <nl> + <nl> + QFontEngineFT * engine = new QFontEngineFT ( fontDef ) ; <nl> + if ( ! engine - > init ( fid , antialias , format ) | | engine - > invalid ( ) ) { <nl> + delete engine ; <nl> + engine = 0 ; <nl> + } <nl> + <nl> + return engine ; <nl> + } <nl> + <nl> + namespace { <nl> + <nl> + class QFontEngineFTRawData : public QFontEngineFT <nl> + { <nl> + public : <nl> + QFontEngineFTRawData ( const QFontDef & fontDef ) : QFontEngineFT ( fontDef ) <nl> + { <nl> + } <nl> + <nl> + void updateFamilyNameAndStyle ( ) <nl> + { <nl> + fontDef . family = QString : : fromLatin1 ( freetype - > face - > family_name ) ; <nl> + <nl> + if ( freetype - > face - > style_flags & FT_STYLE_FLAG_ITALIC ) <nl> + fontDef . style = QFont : : StyleItalic ; <nl> + <nl> + if ( freetype - > face - > style_flags & FT_STYLE_FLAG_BOLD ) <nl> + fontDef . weight = QFont : : Bold ; <nl> + } <nl> + <nl> + bool initFromData ( const QByteArray & fontData ) <nl> + { <nl> + FaceId faceId ; <nl> + faceId . filename = " " ; <nl> + faceId . index = 0 ; <nl> + faceId . uuid = QUuid : : createUuid ( ) . toByteArray ( ) ; <nl> + <nl> + return init ( faceId , true , Format_None , fontData ) ; <nl> + } <nl> + } ; <nl> + <nl> + } <nl> + <nl> + QFontEngine * QBasicFontDatabase : : fontEngine ( const QByteArray & fontData , qreal pixelSize , <nl> + QFont : : HintingPreference hintingPreference ) <nl> + { <nl> + QFontDef fontDef ; <nl> + fontDef . pixelSize = pixelSize ; <nl> + fontDef . hintingPreference = hintingPreference ; <nl> + <nl> + QFontEngineFTRawData * fe = new QFontEngineFTRawData ( fontDef ) ; <nl> + if ( ! fe - > initFromData ( fontData ) ) { <nl> + delete fe ; <nl> + return 0 ; <nl> + } <nl> + <nl> + fe - > updateFamilyNameAndStyle ( ) ; <nl> + <nl> + switch ( hintingPreference ) { <nl> + case QFont : : PreferNoHinting : <nl> + fe - > setDefaultHintStyle ( QFontEngineFT : : HintNone ) ; <nl> + break ; <nl> + case QFont : : PreferFullHinting : <nl> + fe - > setDefaultHintStyle ( QFontEngineFT : : HintFull ) ; <nl> + break ; <nl> + case QFont : : PreferVerticalHinting : <nl> + fe - > setDefaultHintStyle ( QFontEngineFT : : HintLight ) ; <nl> + break ; <nl> + default : <nl> + / / Leave it as it is <nl> + break ; <nl> + } <nl> + <nl> + return fe ; <nl> + } <nl> + <nl> + QStringList QBasicFontDatabase : : fallbacksForFamily ( const QString & family , QFont : : Style style , QFont : : StyleHint styleHint , QChar : : Script script ) const <nl> + { <nl> + Q_UNUSED ( family ) ; <nl> + Q_UNUSED ( style ) ; <nl> + Q_UNUSED ( script ) ; <nl> + Q_UNUSED ( styleHint ) ; <nl> + return QStringList ( ) ; <nl> + } <nl> + <nl> + QStringList QBasicFontDatabase : : addApplicationFont ( const QByteArray & fontData , const QString & fileName ) <nl> + { <nl> + return addTTFile ( fontData , fileName . toLocal8Bit ( ) ) ; <nl> + } <nl> + <nl> + void QBasicFontDatabase : : releaseHandle ( void * handle ) <nl> + { <nl> + FontFile * file = static_cast < FontFile * > ( handle ) ; <nl> + delete file ; <nl> + } <nl> + <nl> + extern FT_Library qt_getFreetype ( ) ; <nl> + <nl> + / / copied from freetype with some modifications <nl> + <nl> + # ifndef FT_PARAM_TAG_IGNORE_PREFERRED_FAMILY <nl> + # define FT_PARAM_TAG_IGNORE_PREFERRED_FAMILY FT_MAKE_TAG ( ' i ' , ' g ' , ' p ' , ' f ' ) <nl> + # endif <nl> + <nl> + # ifndef FT_PARAM_TAG_IGNORE_PREFERRED_SUBFAMILY <nl> + # define FT_PARAM_TAG_IGNORE_PREFERRED_SUBFAMILY FT_MAKE_TAG ( ' i ' , ' g ' , ' p ' , ' s ' ) <nl> + # endif <nl> + <nl> + / * there ' s a Mac - specific extended implementation of FT_New_Face ( ) * / <nl> + / * in src / base / ftmac . c * / <nl> + <nl> + # if ! defined ( FT_MACINTOSH ) | | defined ( DARWIN_NO_CARBON ) <nl> + <nl> + / * documentation is in freetype . h * / <nl> + <nl> + FT_Error __ft_New_Face ( FT_Library library , const char * pathname , FT_Long face_index , FT_Face * aface ) { <nl> + FT_Open_Args args ; <nl> + <nl> + / * test for valid ` library ' and ` aface ' delayed to FT_Open_Face ( ) * / <nl> + if ( ! pathname ) <nl> + return FT_Err_Invalid_Argument ; <nl> + <nl> + FT_Parameter params [ 2 ] ; <nl> + params [ 0 ] . tag = FT_PARAM_TAG_IGNORE_PREFERRED_FAMILY ; <nl> + params [ 0 ] . data = 0 ; <nl> + params [ 1 ] . tag = FT_PARAM_TAG_IGNORE_PREFERRED_SUBFAMILY ; <nl> + params [ 1 ] . data = 0 ; <nl> + args . flags = FT_OPEN_PATHNAME | FT_OPEN_PARAMS ; <nl> + args . pathname = ( char * ) pathname ; <nl> + args . stream = NULL ; <nl> + args . num_params = 2 ; <nl> + args . params = params ; <nl> + <nl> + return FT_Open_Face ( library , & args , face_index , aface ) ; <nl> + } <nl> + <nl> + # else <nl> + <nl> + FT_Error __ft_New_Face ( FT_Library library , const char * pathname , FT_Long face_index , FT_Face * aface ) { <nl> + return FT_New_Face ( library , pathname , face_index , aface ) ; <nl> + } <nl> + <nl> + # endif / * defined ( FT_MACINTOSH ) & & ! defined ( DARWIN_NO_CARBON ) * / <nl> + <nl> + / * documentation is in freetype . h * / <nl> + <nl> + FT_Error __ft_New_Memory_Face ( FT_Library library , const FT_Byte * file_base , FT_Long file_size , FT_Long face_index , FT_Face * aface ) { <nl> + FT_Open_Args args ; <nl> + <nl> + / * test for valid ` library ' and ` face ' delayed to FT_Open_Face ( ) * / <nl> + if ( ! file_base ) <nl> + return FT_Err_Invalid_Argument ; <nl> + <nl> + FT_Parameter params [ 2 ] ; <nl> + params [ 0 ] . tag = FT_PARAM_TAG_IGNORE_PREFERRED_FAMILY ; <nl> + params [ 0 ] . data = 0 ; <nl> + params [ 1 ] . tag = FT_PARAM_TAG_IGNORE_PREFERRED_SUBFAMILY ; <nl> + params [ 1 ] . data = 0 ; <nl> + args . flags = FT_OPEN_MEMORY | FT_OPEN_PARAMS ; <nl> + args . memory_base = file_base ; <nl> + args . memory_size = file_size ; <nl> + args . stream = NULL ; <nl> + args . num_params = 2 ; <nl> + args . params = params ; <nl> + <nl> + return FT_Open_Face ( library , & args , face_index , aface ) ; <nl> + } <nl> + <nl> + / / end <nl> + <nl> + QStringList QBasicFontDatabase : : addTTFile ( const QByteArray & fontData , const QByteArray & file , QSupportedWritingSystems * supportedWritingSystems ) <nl> + { <nl> + FT_Library library = qt_getFreetype ( ) ; <nl> + <nl> + int index = 0 ; <nl> + int numFaces = 0 ; <nl> + QStringList families ; <nl> + do { <nl> + FT_Face face ; <nl> + FT_Error error ; <nl> + if ( ! fontData . isEmpty ( ) ) { <nl> + error = __ft_New_Memory_Face ( library , ( const FT_Byte * ) fontData . constData ( ) , fontData . size ( ) , index , & face ) ; <nl> + } else { <nl> + error = __ft_New_Face ( library , file . constData ( ) , index , & face ) ; <nl> + } <nl> + if ( error ! = FT_Err_Ok ) { <nl> + qDebug ( ) < < " FT_New_Face failed with index " < < index < < " : " < < hex < < error ; <nl> + break ; <nl> + } <nl> + numFaces = face - > num_faces ; <nl> + <nl> + QFont : : Weight weight = QFont : : Normal ; <nl> + <nl> + QFont : : Style style = QFont : : StyleNormal ; <nl> + if ( face - > style_flags & FT_STYLE_FLAG_ITALIC ) <nl> + style = QFont : : StyleItalic ; <nl> + <nl> + if ( face - > style_flags & FT_STYLE_FLAG_BOLD ) <nl> + weight = QFont : : Bold ; <nl> + <nl> + bool fixedPitch = ( face - > face_flags & FT_FACE_FLAG_FIXED_WIDTH ) ; <nl> + <nl> + QSupportedWritingSystems writingSystems ; <nl> + / / detect symbol fonts <nl> + for ( int i = 0 ; i < face - > num_charmaps ; + + i ) { <nl> + FT_CharMap cm = face - > charmaps [ i ] ; <nl> + if ( cm - > encoding = = FT_ENCODING_ADOBE_CUSTOM <nl> + | | cm - > encoding = = FT_ENCODING_MS_SYMBOL ) { <nl> + writingSystems . setSupported ( QFontDatabase : : Symbol ) ; <nl> + if ( supportedWritingSystems ) <nl> + supportedWritingSystems - > setSupported ( QFontDatabase : : Symbol ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + TT_OS2 * os2 = ( TT_OS2 * ) FT_Get_Sfnt_Table ( face , ft_sfnt_os2 ) ; <nl> + if ( os2 ) { <nl> + quint32 unicodeRange [ 4 ] = { <nl> + quint32 ( os2 - > ulUnicodeRange1 ) , <nl> + quint32 ( os2 - > ulUnicodeRange2 ) , <nl> + quint32 ( os2 - > ulUnicodeRange3 ) , <nl> + quint32 ( os2 - > ulUnicodeRange4 ) <nl> + } ; <nl> + quint32 codePageRange [ 2 ] = { <nl> + quint32 ( os2 - > ulCodePageRange1 ) , <nl> + quint32 ( os2 - > ulCodePageRange2 ) <nl> + } ; <nl> + <nl> + writingSystems = QPlatformFontDatabase : : writingSystemsFromTrueTypeBits ( unicodeRange , codePageRange ) ; <nl> + if ( supportedWritingSystems ) <nl> + * supportedWritingSystems = writingSystems ; <nl> + <nl> + if ( os2 - > usWeightClass = = 0 ) <nl> + ; <nl> + else if ( os2 - > usWeightClass < 350 ) <nl> + weight = QFont : : Light ; <nl> + else if ( os2 - > usWeightClass < 450 ) <nl> + weight = QFont : : Normal ; <nl> + else if ( os2 - > usWeightClass < 650 ) <nl> + weight = QFont : : DemiBold ; <nl> + else if ( os2 - > usWeightClass < 750 ) <nl> + weight = QFont : : Bold ; <nl> + else if ( os2 - > usWeightClass < 1000 ) <nl> + weight = QFont : : Black ; <nl> + <nl> + if ( os2 - > panose [ 2 ] > = 2 ) { <nl> + int w = os2 - > panose [ 2 ] ; <nl> + if ( w < = 3 ) <nl> + weight = QFont : : Light ; <nl> + else if ( w < = 5 ) <nl> + weight = QFont : : Normal ; <nl> + else if ( w < = 7 ) <nl> + weight = QFont : : DemiBold ; <nl> + else if ( w < = 8 ) <nl> + weight = QFont : : Bold ; <nl> + else if ( w < = 10 ) <nl> + weight = QFont : : Black ; <nl> + } <nl> + } <nl> + <nl> + QString family = QString : : fromLatin1 ( face - > family_name ) ; <nl> + FontFile * fontFile = new FontFile ; <nl> + fontFile - > fileName = QFile : : decodeName ( file ) ; <nl> + fontFile - > indexValue = index ; <nl> + <nl> + QFont : : Stretch stretch = QFont : : Unstretched ; <nl> + <nl> + registerFont ( family , QString : : fromLatin1 ( face - > style_name ) , QString ( ) , weight , style , stretch , true , true , 0 , fixedPitch , writingSystems , fontFile ) ; <nl> + <nl> + families . append ( family ) ; <nl> + <nl> + FT_Done_Face ( face ) ; <nl> + + + index ; <nl> + } while ( index < numFaces ) ; <nl> + return families ; <nl> + } <nl> + <nl> + QString QBasicFontDatabase : : fontNameFromTTFile ( const QString & filename ) <nl> + { <nl> + QFile f ( filename ) ; <nl> + QString retVal ; <nl> + qint64 bytesRead ; <nl> + qint64 bytesToRead ; <nl> + <nl> + if ( f . open ( QIODevice : : ReadOnly ) ) { <nl> + OFFSET_TABLE ttOffsetTable ; <nl> + bytesToRead = sizeof ( OFFSET_TABLE ) ; <nl> + bytesRead = f . read ( ( char * ) & ttOffsetTable , bytesToRead ) ; <nl> + if ( bytesToRead ! = bytesRead ) <nl> + return retVal ; <nl> + ttOffsetTable . numTables = qFromBigEndian ( ttOffsetTable . numTables ) ; <nl> + ttOffsetTable . majorVersion = qFromBigEndian ( ttOffsetTable . majorVersion ) ; <nl> + ttOffsetTable . minorVersion = qFromBigEndian ( ttOffsetTable . minorVersion ) ; <nl> + <nl> + if ( ttOffsetTable . majorVersion ! = 1 | | ttOffsetTable . minorVersion ! = 0 ) <nl> + return retVal ; <nl> + <nl> + TABLE_DIRECTORY tblDir ; <nl> + bool found = false ; <nl> + <nl> + for ( int i = 0 ; i < ttOffsetTable . numTables ; i + + ) { <nl> + bytesToRead = sizeof ( TABLE_DIRECTORY ) ; <nl> + bytesRead = f . read ( ( char * ) & tblDir , bytesToRead ) ; <nl> + if ( bytesToRead ! = bytesRead ) <nl> + return retVal ; <nl> + if ( qFromBigEndian ( tblDir . tag ) = = MAKE_TAG ( ' n ' , ' a ' , ' m ' , ' e ' ) ) { <nl> + found = true ; <nl> + tblDir . length = qFromBigEndian ( tblDir . length ) ; <nl> + tblDir . offset = qFromBigEndian ( tblDir . offset ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + if ( found ) { <nl> + f . seek ( tblDir . offset ) ; <nl> + NAME_TABLE_HEADER ttNTHeader ; <nl> + bytesToRead = sizeof ( NAME_TABLE_HEADER ) ; <nl> + bytesRead = f . read ( ( char * ) & ttNTHeader , bytesToRead ) ; <nl> + if ( bytesToRead ! = bytesRead ) <nl> + return retVal ; <nl> + ttNTHeader . nrCount = qFromBigEndian ( ttNTHeader . nrCount ) ; <nl> + ttNTHeader . storageOffset = qFromBigEndian ( ttNTHeader . storageOffset ) ; <nl> + NAME_RECORD ttRecord ; <nl> + found = false ; <nl> + <nl> + for ( int i = 0 ; i < ttNTHeader . nrCount ; i + + ) { <nl> + bytesToRead = sizeof ( NAME_RECORD ) ; <nl> + bytesRead = f . read ( ( char * ) & ttRecord , bytesToRead ) ; <nl> + if ( bytesToRead ! = bytesRead ) <nl> + return retVal ; <nl> + ttRecord . nameID = qFromBigEndian ( ttRecord . nameID ) ; <nl> + if ( ttRecord . nameID = = 1 ) { <nl> + ttRecord . stringLength = qFromBigEndian ( ttRecord . stringLength ) ; <nl> + ttRecord . stringOffset = qFromBigEndian ( ttRecord . stringOffset ) ; <nl> + int nPos = f . pos ( ) ; <nl> + f . seek ( tblDir . offset + ttRecord . stringOffset + ttNTHeader . storageOffset ) ; <nl> + <nl> + QByteArray nameByteArray = f . read ( ttRecord . stringLength ) ; <nl> + if ( ! nameByteArray . isEmpty ( ) ) { <nl> + if ( ttRecord . encodingID = = 256 | | ttRecord . encodingID = = 768 ) { <nl> + / / This is UTF - 16 in big endian <nl> + int stringLength = ttRecord . stringLength / 2 ; <nl> + retVal . resize ( stringLength ) ; <nl> + QChar * data = retVal . data ( ) ; <nl> + const ushort * srcData = ( const ushort * ) nameByteArray . data ( ) ; <nl> + for ( int i = 0 ; i < stringLength ; + + i ) <nl> + data [ i ] = qFromBigEndian ( srcData [ i ] ) ; <nl> + return retVal ; <nl> + } else if ( ttRecord . encodingID = = 0 ) { <nl> + / / This is Latin1 <nl> + retVal = QString : : fromLatin1 ( nameByteArray ) ; <nl> + } else { <nl> + qWarning ( " Could not retrieve Font name from file : % s " , qPrintable ( QDir : : toNativeSeparators ( filename ) ) ) ; <nl> + } <nl> + break ; <nl> + } <nl> + f . seek ( nPos ) ; <nl> + } <nl> + } <nl> + } <nl> + f . close ( ) ; <nl> + } <nl> + return retVal ; <nl> + } <nl> + <nl> + QT_END_NAMESPACE <nl> mmm a / Telegram / telegram_plugin_import . cpp <nl> ppp b / Telegram / telegram_plugin_import . cpp <nl> <nl> / / This file is autogenerated by qmake . It imports static plugin classes for <nl> / / static plugins specified using QTPLUGIN and QT_PLUGIN_CLASS . < plugin > variables . <nl> # include < QtPlugin > <nl> - / / Q_IMPORT_PLUGIN ( AVFServicePlugin ) <nl> - Q_IMPORT_PLUGIN ( AVFMediaPlayerServicePlugin ) <nl> - Q_IMPORT_PLUGIN ( QT7ServicePlugin ) <nl> Q_IMPORT_PLUGIN ( AudioCaptureServicePlugin ) <nl> - Q_IMPORT_PLUGIN ( CoreAudioPlugin ) <nl> Q_IMPORT_PLUGIN ( QM3uPlaylistPlugin ) <nl> Q_IMPORT_PLUGIN ( AccessibleFactory ) <nl> - Q_IMPORT_PLUGIN ( QCoreWlanEnginePlugin ) <nl> Q_IMPORT_PLUGIN ( QGenericEnginePlugin ) <nl> - Q_IMPORT_PLUGIN ( QCocoaIntegrationPlugin ) <nl> + Q_IMPORT_PLUGIN ( QXcbIntegrationPlugin ) <nl> Q_IMPORT_PLUGIN ( QDDSPlugin ) <nl> Q_IMPORT_PLUGIN ( QICNSPlugin ) <nl> Q_IMPORT_PLUGIN ( QICOPlugin ) <nl>
|
first version ubuntu qt creator build
|
telegramdesktop/tdesktop
|
da0f087404d1d0b8b3ba2a5a71fb6d7aef442913
|
2014-07-08T10:24:21Z
|
mmm a / platform / javascript / javascript_main . cpp <nl> ppp b / platform / javascript / javascript_main . cpp <nl> static void _glut_mouse_button ( int button , int state , int x , int y ) { <nl> <nl> if ( ev . mouse_button . button_index < 4 ) { <nl> if ( ev . mouse_button . pressed ) { <nl> - _mouse_button_mask | = 1 < < ev . mouse_button . button_index ; <nl> + _mouse_button_mask | = 1 < < ( ev . mouse_button . button_index - 1 ) ; <nl> } else { <nl> - _mouse_button_mask & = ~ ( 1 < < ev . mouse_button . button_index ) ; <nl> + _mouse_button_mask & = ~ ( 1 < < ( ev . mouse_button . button_index - 1 ) ) ; <nl> } <nl> } <nl> + ev . mouse_button . button_mask = _mouse_button_mask ; <nl> <nl> uint32_t m = glutGetModifiers ( ) ; <nl> ev . mouse_button . mod . alt = ( m & GLUT_ACTIVE_ALT ) ! = 0 ; <nl> static void _glut_mouse_button ( int button , int state , int x , int y ) { <nl> <nl> os - > push_input ( ev ) ; <nl> <nl> + if ( ev . mouse_button . button_index = = BUTTON_WHEEL_UP | | ev . mouse_button . button_index = = BUTTON_WHEEL_DOWN ) { <nl> + / / GLUT doesn ' t send release events for mouse wheel , so send manually <nl> + ev . mouse_button . pressed = false ; <nl> + os - > push_input ( ev ) ; <nl> + } <nl> } <nl> <nl> <nl> int main ( int argc , char * argv [ ] ) { <nl> <nl> glutMouseFunc ( _glut_mouse_button ) ; <nl> glutMotionFunc ( _glut_mouse_motion ) ; <nl> - glutMotionFunc ( _glut_mouse_motion ) ; <nl> glutPassiveMotionFunc ( _glut_mouse_motion ) ; <nl> <nl> <nl> mmm a / platform / javascript / os_javascript . cpp <nl> ppp b / platform / javascript / os_javascript . cpp <nl> bool OS_JavaScript : : is_mouse_grab_enabled ( ) const { <nl> / / * sigh * technology has evolved so much since i was a kid . . <nl> return false ; <nl> } <nl> + <nl> Point2 OS_JavaScript : : get_mouse_pos ( ) const { <nl> <nl> - return Point2 ( ) ; <nl> + return input - > get_mouse_pos ( ) ; <nl> } <nl> + <nl> int OS_JavaScript : : get_mouse_button_state ( ) const { <nl> <nl> - return 0 ; <nl> + return last_button_mask ; <nl> } <nl> <nl> void OS_JavaScript : : set_window_title ( const String & p_title ) { <nl> void OS_JavaScript : : push_input ( const InputEvent & p_ev ) { <nl> if ( ev . type = = InputEvent : : MOUSE_MOTION ) { <nl> input - > set_mouse_pos ( Point2 ( ev . mouse_motion . x , ev . mouse_motion . y ) ) ; <nl> } <nl> + else if ( ev . type = = InputEvent : : MOUSE_BUTTON ) { <nl> + last_button_mask = ev . mouse_button . button_mask ; <nl> + } <nl> input - > parse_input_event ( p_ev ) ; <nl> } <nl> <nl> OS_JavaScript : : OS_JavaScript ( GFXInitFunc p_gfx_init_func , void * p_gfx_init_ud , Get <nl> <nl> gfx_init_func = p_gfx_init_func ; <nl> gfx_init_ud = p_gfx_init_ud ; <nl> + last_button_mask = 0 ; <nl> main_loop = NULL ; <nl> last_id = 1 ; <nl> gl_extensions = NULL ; <nl> mmm a / platform / javascript / os_javascript . h <nl> ppp b / platform / javascript / os_javascript . h <nl> class OS_JavaScript : public OS_Unix { <nl> <nl> Vector < TouchPos > touch ; <nl> Point2 last_mouse ; <nl> + int last_button_mask ; <nl> unsigned int last_id ; <nl> GFXInitFunc gfx_init_func ; <nl> void * gfx_init_ud ; <nl>
|
Fix some mouse bugs in WebAssembly / asm . js
|
godotengine/godot
|
30e9ea5a8203e0f059c533def90c42f7ca7dc7c5
|
2016-11-30T21:02:32Z
|
mmm a / drivers / gles2 / shaders / scene . glsl <nl> ppp b / drivers / gles2 / shaders / scene . glsl <nl> void light_compute ( <nl> # endif <nl> <nl> SRGB_APPROX ( specular_brdf_NL ) <nl> - specular_interp + = specular_brdf_NL * light_color * attenuation ; <nl> + specular_interp + = specular_brdf_NL * light_color * attenuation * ( 1 . 0 / M_PI ) ; <nl> } <nl> } <nl> <nl> FRAGMENT_SHADER_CODE <nl> <nl> # endif / / defined ( USE_REFLECTION_PROBE1 ) | | defined ( USE_REFLECTION_PROBE2 ) <nl> <nl> - / / scales the specular reflections , needs to be be computed before lighting happens , <nl> - / / but after environment and reflection probes are added <nl> - / / TODO : this curve is not really designed for gammaspace , should be adjusted <nl> - const vec4 c0 = vec4 ( - 1 . 0 , - 0 . 0275 , - 0 . 572 , 0 . 022 ) ; <nl> - const vec4 c1 = vec4 ( 1 . 0 , 0 . 0425 , 1 . 04 , - 0 . 04 ) ; <nl> - vec4 r = roughness * c0 + c1 ; <nl> - float ndotv = clamp ( dot ( normal , eye_position ) , 0 . 0 , 1 . 0 ) ; <nl> - float a004 = min ( r . x * r . x , exp2 ( - 9 . 28 * ndotv ) ) * r . x + r . y ; <nl> - vec2 env = vec2 ( - 1 . 04 , 1 . 04 ) * a004 + r . zw ; <nl> + / / environment BRDF approximation <nl> <nl> - vec3 f0 = F0 ( metallic , specular , albedo ) ; <nl> - specular_light * = env . x * f0 + env . y ; <nl> + { <nl> + <nl> + # if defined ( DIFFUSE_TOON ) <nl> + / / simplify for toon , as <nl> + specular_light * = specular * metallic * albedo * 2 . 0 ; <nl> + # else <nl> + <nl> + / / scales the specular reflections , needs to be be computed before lighting happens , <nl> + / / but after environment and reflection probes are added <nl> + / / TODO : this curve is not really designed for gammaspace , should be adjusted <nl> + const vec4 c0 = vec4 ( - 1 . 0 , - 0 . 0275 , - 0 . 572 , 0 . 022 ) ; <nl> + const vec4 c1 = vec4 ( 1 . 0 , 0 . 0425 , 1 . 04 , - 0 . 04 ) ; <nl> + vec4 r = roughness * c0 + c1 ; <nl> + float ndotv = clamp ( dot ( normal , eye_position ) , 0 . 0 , 1 . 0 ) ; <nl> + float a004 = min ( r . x * r . x , exp2 ( - 9 . 28 * ndotv ) ) * r . x + r . y ; <nl> + vec2 env = vec2 ( - 1 . 04 , 1 . 04 ) * a004 + r . zw ; <nl> + <nl> + vec3 f0 = F0 ( metallic , specular , albedo ) ; <nl> + specular_light * = env . x * f0 + env . y ; <nl> + <nl> + # endif <nl> + } <nl> <nl> # ifdef USE_LIGHTMAP <nl> / / ambient light will come entirely from lightmap is lightmap is used <nl> FRAGMENT_SHADER_CODE <nl> specular_light + = specular_interp * specular_blob_intensity * light_att ; <nl> diffuse_light + = diffuse_interp * albedo * light_att ; <nl> <nl> + / / Same as above , needed for VERTEX_LIGHTING or else lights are too bright <nl> + const vec4 c0 = vec4 ( - 1 . 0 , - 0 . 0275 , - 0 . 572 , 0 . 022 ) ; <nl> + const vec4 c1 = vec4 ( 1 . 0 , 0 . 0425 , 1 . 04 , - 0 . 04 ) ; <nl> + vec4 r = roughness * c0 + c1 ; <nl> + float ndotv = clamp ( dot ( normal , eye_position ) , 0 . 0 , 1 . 0 ) ; <nl> + float a004 = min ( r . x * r . x , exp2 ( - 9 . 28 * ndotv ) ) * r . x + r . y ; <nl> + vec2 env = vec2 ( - 1 . 04 , 1 . 04 ) * a004 + r . zw ; <nl> + <nl> + vec3 f0 = F0 ( metallic , specular , albedo ) ; <nl> + specular_light * = env . x * f0 + env . y ; <nl> + <nl> # else <nl> / / fragment lighting <nl> light_compute ( <nl> FRAGMENT_SHADER_CODE <nl> diffuse_light * = 1 . 0 - metallic ; <nl> ambient_light * = 1 . 0 - metallic ; <nl> <nl> - / / environment BRDF approximation <nl> - <nl> - { <nl> - <nl> - # if defined ( DIFFUSE_TOON ) <nl> - / / simplify for toon , as <nl> - specular_light * = specular * metallic * albedo * 2 . 0 ; <nl> - # endif <nl> - } <nl> - <nl> gl_FragColor = vec4 ( ambient_light + diffuse_light + specular_light , alpha ) ; <nl> <nl> / / add emission if in base pass <nl>
|
Merge pull request from clayjohn / gles2 - light - scale
|
godotengine/godot
|
b0eeb1233580420bfcbbdd989e4a1beeb4b5fc7a
|
2019-07-01T14:28:17Z
|
mmm a / arangod / Agency / AgencyCommon . h <nl> ppp b / arangod / Agency / AgencyCommon . h <nl> typedef enum AGENCY_STATUS { <nl> OK = 0 , <nl> RETRACTED_CANDIDACY_FOR_HIGHER_TERM , / / Vote for higher term candidate <nl> / / while running . Strange ! <nl> - RESIGNED_LEADERSHIP_FOR_HIGHER_TERM / / Vote for higher term candidate <nl> + RESIGNED_LEADERSHIP_FOR_HIGHER_TERM , / / Vote for higher term candidate <nl> / / while leading . Very bad ! <nl> + LOWER_TERM_APPEND_ENTRIES_RPC , <nl> + NO_MATCHING_PREVLOG <nl> } status_t ; <nl> <nl> typedef uint64_t term_t ; / / Term type <nl> template < class T > struct Config { <nl> append_entries_retry_interval ( appent_i ) , end_points ( end_p ) { } <nl> / * void print ( arangodb : : LoggerStream & l ) const { <nl> l < < " Config : " <nl> - < < " min_ping ( " < < min_ping < < " ) " <nl> - < < " max_ping ( " < < max_ping < < " ) " <nl> - < < " size ( " < < end_points . size ( ) < < " ) " <nl> - < < end_points ; <nl> - } * / <nl> + < < " min_ping ( " < < min_ping < < " ) " <nl> + < < " max_ping ( " < < max_ping < < " ) " <nl> + < < " size ( " < < end_points . size ( ) < < " ) " <nl> + < < end_points ; <nl> + } * / <nl> inline size_t size ( ) const { return end_points . size ( ) ; } <nl> } ; <nl> <nl> struct write_ret_t { <nl> std : : vector < index_t > indices ; / / Indices of log entries ( if any ) to wait for <nl> write_ret_t ( bool a , id_t id , index_list_t const & idx = index_list_t ( ) ) : <nl> accepted ( a ) , redirect ( id ) , indices ( idx ) { } <nl> + write_ret_t ( bool a , id_t id , std : : vector < index_t > const & idx ) : <nl> + accepted ( a ) , redirect ( id ) , indices ( idx ) { } <nl> } ; <nl> <nl> using namespace std : : chrono ; <nl> struct log_t { <nl> index ( idx ) , term ( t ) , leaderId ( lid ) , entry ( e ) , timestamp ( <nl> duration_cast < milliseconds > ( system_clock : : now ( ) . time_since_epoch ( ) ) ) { } <nl> } ; <nl> - <nl> <nl> enum AGENCY_EXCEPTION { <nl> QUERY_NOT_APPLICABLE <nl> struct collect_ret_t { <nl> index_t prev_log_index ; <nl> term_t prev_log_term ; <nl> std : : vector < index_t > indices ; <nl> - collect_ret_t ( index_t pli , term_t plt , std : : vector < index_t > idx ) : <nl> + collect_ret_t ( ) : prev_log_index ( 0 ) , prev_log_term ( 0 ) { } <nl> + collect_ret_t ( index_t pli , term_t plt , std : : vector < index_t > const & idx ) : <nl> prev_log_index ( pli ) , prev_log_term ( plt ) , indices ( idx ) { } <nl> + size_t size ( ) const { return indices . size ( ) ; } <nl> } ; <nl> <nl> } } <nl> mmm a / arangod / Agency / Agent . cpp <nl> ppp b / arangod / Agency / Agent . cpp <nl> term_t Agent : : term ( ) const { <nl> return _constituent . term ( ) ; <nl> } <nl> <nl> + inline size_t Agent : : size ( ) const { <nl> + return _config . size ( ) ; <nl> + } <nl> + <nl> query_t Agent : : requestVote ( term_t t , id_t id , index_t lastLogIndex , <nl> index_t lastLogTerm ) { <nl> Builder builder ; <nl> arangodb : : LoggerStream & operator < < ( arangodb : : LoggerStream & l , Agent const & a ) { <nl> return l ; <nl> } <nl> <nl> + void Agent : : catchUpReadDB ( ) { } ; / / TODO <nl> <nl> bool Agent : : waitFor ( index_t index , duration_t timeout ) { <nl> <nl> - CONDITION_LOCKER ( guard , _cv_rest ) ; <nl> + CONDITION_LOCKER ( guard , _rest_cv ) ; <nl> auto start = std : : chrono : : system_clock : : now ( ) ; <nl> <nl> while ( true ) { <nl> <nl> - _cv . wait ( ) ; <nl> + _rest_cv . wait ( ) ; <nl> <nl> / / shutting down <nl> if ( this - > isStopping ( ) ) { <nl> return false ; <nl> } <nl> - / / timeout ? <nl> - if ( std : : chrono : : system_clock : : now ( ) - start > timeout ) <nl> + / / timeout ? <nl> + if ( std : : chrono : : system_clock : : now ( ) - start > timeout ) { <nl> return false ; <nl> - / / more than half have confirmed <nl> - if ( std : : count_if ( _confirmed . begin ( ) , _confirmed . end ( ) , <nl> - [ ] ( index_t i ) { return i > = index } ) > size ( ) / 2 ) { <nl> - return true ; <nl> } <nl> + if ( _last_commit_index > index ) <nl> + return true ; <nl> } <nl> / / We should never get here <nl> TRI_ASSERT ( false ) ; <nl> } <nl> <nl> - append_entries_t Agent : : appendEntries ( <nl> - term_t term , id_t leaderId , index_t prevLogIndex , term_t prevLogTerm , <nl> - index_t leadersLastCommitIndex , query_t const & query ) { <nl> - <nl> - if ( term < this - > term ( ) ) { / / Reply false if term < currentTerm ( § 5 . 1 ) <nl> - LOG ( WARN ) < < " Term of entry to be appended smaller than my own term ( § 5 . 1 ) " ; <nl> - return append_entries_t ( false , this - > term ( ) ) ; <nl> - } <nl> - <nl> - if ( ! _state . findit ( prevLogIndex , prevLogTerm ) ) { / / Find entry at pli with plt <nl> - LOG ( WARN ) < < " No entry in logs at index " < < prevLogIndex <nl> - < < " and term " prevLogTerm ; <nl> - return append_entries_t ( false , this - > term ( ) ) ; <nl> + void Agent : : reportIn ( id_t id , index_t index ) { <nl> + MUTEX_LOCKER ( mutexLocker , _confirmedLock ) ; <nl> + if ( index > _confirmed [ id ] ) <nl> + _confirmed [ id ] = index ; <nl> + / / last commit index smaller ? <nl> + / / check if we can move forward <nl> + if ( _last_commit_index < index ) { <nl> + size_t n = 0 ; <nl> + for ( size_t i = 0 ; i < size ( ) ; + + i ) { <nl> + n + = ( _confirmed [ i ] > index ) ; <nl> + } <nl> + if ( n > size ( ) / 2 ) { <nl> + _last_commit_index = index ; <nl> + } <nl> } <nl> + _rest_cv . broadcast ( ) ; <nl> + } <nl> + <nl> + bool Agent : : recvAppendEntriesRPC ( term_t term , id_t leaderId , index_t prevIndex , <nl> + term_t prevTerm , index_t leaderCommitIndex , query_t const & queries ) { <nl> + <nl> + / / Update commit index <nl> + _last_commit_index = leaderCommitIndex ; <nl> <nl> - _state . log ( query , index_t idx , term , leaderId , _config . size ( ) ) ; / / Append all new entries <nl> - _read_db . apply ( query ) ; / / once we become leader we create a new spear head <nl> - _last_commit_index = leadersLastCommitIndex ; <nl> + / / Sanity <nl> + if ( this - > term ( ) > term ) <nl> + throw LOWER_TERM_APPEND_ENTRIES_RPC ; / / ( § 5 . 1 ) <nl> + if ( ! _state . findit ( prevIndex , prevTerm ) ) <nl> + throw NO_MATCHING_PREVLOG ; / / ( § 5 . 3 ) <nl> + <nl> + / / Delete conflits and append ( § 5 . 3 ) <nl> + for ( size_t i = 0 ; i < queries - > slice ( ) . length ( ) / 2 ; i + = 2 ) { <nl> + _state . log ( queries - > slice ( ) [ i ] . toString ( ) , <nl> + queries - > slice ( ) [ i + 1 ] . getUInt ( ) , term , leaderId ) ; <nl> + } <nl> <nl> + return true ; <nl> } <nl> <nl> - append_entries_t Agent : : appendEntriesRPC ( <nl> + append_entries_t Agent : : sendAppendEntriesRPC ( <nl> id_t slave_id , collect_ret_t const & entries ) { <nl> - <nl> - std : : vector < ClusterCommResult > result ; <nl> - <nl> + <nl> / / RPC path <nl> std : : stringstream path ; <nl> - path < < " / _api / agency_priv / appendEntries ? term = " < < _term < < " & leaderId = " <nl> + path < < " / _api / agency_priv / appendEntries ? term = " < < term ( ) < < " & leaderId = " <nl> < < id ( ) < < " & prevLogIndex = " < < entries . prev_log_index < < " & prevLogTerm = " <nl> - < < entries . prev_log_term < < " & leaderCommitId = " < < commitId ; <nl> + < < entries . prev_log_term < < " & leaderCommit = " < < _last_commit_index ; <nl> <nl> / / Headers <nl> std : : unique_ptr < std : : map < std : : string , std : : string > > headerFields = <nl> append_entries_t Agent : : appendEntriesRPC ( <nl> <nl> / / Body <nl> Builder builder ; <nl> - builder . add ( " term " , Value ( term ( ) ) ) ; <nl> - builder . add ( " voteGranted " , Value ( <nl> - _constituent . vote ( id , t , lastLogIndex , lastLogTerm ) ) ) ; <nl> + for ( size_t i = 0 ; i < entries . size ( ) ; + + i ) { <nl> + builder . add ( " index " , Value ( std : : to_string ( entries . indices [ i ] ) ) ) ; <nl> + builder . add ( " query " , Value ( _state [ entries . indices [ i ] ] . entry ) ) ; <nl> + } <nl> builder . close ( ) ; <nl> - <nl> + <nl> + / / Send <nl> arangodb : : ClusterComm : : instance ( ) - > asyncRequest <nl> ( " 1 " , 1 , _config . end_points [ slave_id ] , <nl> rest : : HttpRequest : : HTTP_REQUEST_GET , <nl> - path . str ( ) , std : : make_shared < std : : string > ( body ) , headerFields , <nl> - std : : make_shared < arangodb : : ClusterCommCallback > ( _agent_callbacks ) , <nl> + path . str ( ) , std : : make_shared < std : : string > ( builder . toString ( ) ) , headerFields , <nl> + std : : make_shared < arangodb : : ClusterCommCallback > ( _agent_callback ) , <nl> 1 . 0 , true ) ; <nl> } <nl> <nl> / / query_ret_t <nl> write_ret_t Agent : : write ( query_t const & query ) { / / Signal auf die _cv <nl> if ( _constituent . leading ( ) ) { / / We are leading <nl> - if ( _spear_head . apply ( query ) ) { / / We could apply to spear head ? <nl> + if ( true / * _spear_head . apply ( query ) * / ) { / / We could apply to spear head ? <nl> std : : vector < index_t > indices = / / otherwise through <nl> - _state . log ( query , term ( ) , id ( ) , _config . size ( ) ) ; / / Append to my own log <nl> - _confirmed [ id ( ) ] + + ; <nl> - return <nl> + _state . log ( query , term ( ) , id ( ) ) ; / / Append to my own log <nl> + { <nl> + MUTEX_LOCKER ( mutexLocker , _confirmedLock ) ; <nl> + _confirmed [ id ( ) ] + + ; <nl> + } <nl> + return write_ret_t ( true , id ( ) , indices ) ; / / indices <nl> } else { <nl> throw QUERY_NOT_APPLICABLE ; <nl> } <nl> } else { / / We redirect <nl> - return query_ret_t ( false , _constituent . leaderID ( ) ) ; <nl> + return write_ret_t ( false , _constituent . leaderID ( ) ) ; <nl> } <nl> } <nl> <nl> read_ret_t Agent : : read ( query_t const & query ) const { <nl> if ( _constituent . leading ( ) ) { / / We are leading <nl> - return _state . read ( query ) ; <nl> + return read_ret_t ( true , _constituent . leaderID ( ) ) ; / / ( query ) ; / / TODO : <nl> } else { / / We redirect <nl> return read_ret_t ( false , _constituent . leaderID ( ) ) ; <nl> } <nl> } <nl> <nl> - void State : : run ( ) { <nl> + void Agent : : run ( ) { <nl> + <nl> + CONDITION_LOCKER ( guard , _cv ) ; <nl> + <nl> while ( ! this - > isStopping ( ) ) { <nl> + <nl> + _cv . wait ( ) ; <nl> auto dur = std : : chrono : : system_clock : : now ( ) ; <nl> - std : : vector < std : : vector < index_t > > work ( _config . size ( ) ) ; <nl> - <nl> + std : : vector < collect_ret_t > work ( size ( ) ) ; <nl> + <nl> / / Collect all unacknowledged <nl> - for ( size_t i = 0 ; i < _size ( ) + + i ) { <nl> + for ( size_t i = 0 ; i < size ( ) ; + + i ) { <nl> if ( i ! = id ( ) ) { <nl> - work [ i ] = _state . collectUnAcked ( i ) ; <nl> + work [ i ] = _state . collectFrom ( _confirmed [ i ] ) ; <nl> } <nl> } <nl> - <nl> + <nl> / / ( re - ) attempt RPCs <nl> - for ( size_t j = 0 ; j < _setup . size ( ) ; + + j ) { <nl> + for ( size_t j = 0 ; j < size ( ) ; + + j ) { <nl> if ( j ! = id ( ) & & work [ j ] . size ( ) ) { <nl> - appendEntriesRPC ( j , work [ j ] ) ; <nl> + sendAppendEntriesRPC ( j , work [ j ] ) ; <nl> } <nl> } <nl> - <nl> + <nl> / / catch up read db <nl> catchUpReadDB ( ) ; <nl> <nl> - / / We were too fast ? m wait _cvw <nl> - if ( dur = std : : chrono : : system_clock : : now ( ) - dur < _poll_interval ) { <nl> - std : : this_thread : : sleep_for ( _poll_interval - dur ) ; <nl> - } <nl> } <nl> } <nl> <nl> - inline size_t Agent : : size ( ) const { <nl> - return _config . size ( ) ; <nl> - } <nl> - <nl> - void Agent : : shutdown ( ) { <nl> - / / wake up all blocked rest handlers <nl> + void Agent : : beginShutdown ( ) { <nl> + Thread : : beginShutdown ( ) ; <nl> + / / Stop callbacks <nl> _agent_callback . shutdown ( ) ; <nl> + / / wake up all blocked rest handlers <nl> CONDITION_LOCKER ( guard , _cv ) ; <nl> guard . broadcast ( ) ; <nl> } <nl> mmm a / arangod / Agency / Agent . h <nl> ppp b / arangod / Agency / Agent . h <nl> namespace arangodb { <nl> namespace consensus { <nl> <nl> class Agent : public arangodb : : Thread { <nl> - / / We need to asynchroneously append entries <nl> <nl> public : <nl> <nl> class Agent : public arangodb : : Thread { <nl> read_ret_t read ( query_t const & ) const ; <nl> <nl> / * * <nl> - * @ brief Invoked by leader to replicate log entries ( § 5 . 3 ) ; <nl> + * @ brief Received by followers to replicate log entries ( § 5 . 3 ) ; <nl> * also used as heartbeat ( § 5 . 2 ) . <nl> * / <nl> - append_entries_t appendEntries ( term_t , id_t , index_t , term_t , index_t , <nl> - query_t const & ) ; <nl> + bool recvAppendEntriesRPC ( term_t term , id_t leaderId , index_t prevIndex , <nl> + term_t prevTerm , index_t lastCommitIndex , query_t const & queries ) ; <nl> <nl> + / * * <nl> + * @ brief Invoked by leader to replicate log entries ( § 5 . 3 ) ; <nl> + * also used as heartbeat ( § 5 . 2 ) . <nl> + * / <nl> + append_entries_t sendAppendEntriesRPC ( id_t slave_id , <nl> + collect_ret_t const & entries ) ; <nl> + <nl> / * * <nl> * @ brief 1 . Deal with appendEntries to slaves . <nl> * 2 . Report success of write processes . <nl> * / <nl> - void run ( ) ; <nl> + void run ( ) override final ; <nl> + void beginShutdown ( ) override ; <nl> <nl> / * * <nl> * @ brief Report appended entries from AgentCallback <nl> * / <nl> - void reportIn ( id_t id , std : : vector < index_t > idx ) ; <nl> + void reportIn ( id_t id , index_t idx ) ; <nl> <nl> / * * <nl> * @ brief Wait for slaves to confirm appended entries <nl> * / <nl> - bool waitFor ( std : : vector < index_t > entries , duration_t timeout = duration_t ( 2 . 0 ) ) ; <nl> + bool waitFor ( index_t last_entry , duration_t timeout = duration_t ( 2 . 0 ) ) ; <nl> + <nl> + / * * <nl> + * @ brief Convencience size of agency <nl> + * / <nl> + size_t size ( ) const ; <nl> + <nl> + void catchUpReadDB ( ) ; <nl> <nl> private : <nl> <nl> class Agent : public arangodb : : Thread { <nl> config_t _config ; / * * < @ brief Command line arguments * / <nl> <nl> std : : atomic < index_t > _last_commit_index ; <nl> - index_t _last_commit_index_tmp ; <nl> <nl> arangodb : : Mutex _uncommitedLock ; <nl> <nl> class Agent : public arangodb : : Thread { <nl> AgentCallback _agent_callback ; <nl> <nl> arangodb : : basics : : ConditionVariable _cv ; / / agency callbacks <nl> - arangodb : : basics : : ConditionVariable _cv_rest ; / / rest handler <nl> + arangodb : : basics : : ConditionVariable _rest_cv ; / / rest handler <nl> + <nl> <nl> std : : atomic < bool > _stopping ; <nl> <nl> std : : vector < index_t > _confirmed ; <nl> + arangodb : : Mutex _confirmedLock ; / * * < @ brief Mutex for modifying _confirmed * / <nl> <nl> } ; <nl> <nl> mmm a / arangod / Agency / AgentCallback . cpp <nl> ppp b / arangod / Agency / AgentCallback . cpp <nl> AgentCallback : : AgentCallback ( ) : _agent ( 0 ) { } <nl> <nl> AgentCallback : : AgentCallback ( Agent * agent ) : _agent ( agent ) { } <nl> <nl> - void AgentCallback : : shutdown ( ) { <nl> + void AgentCallbacbk : : shutdown ( ) { <nl> _agent = 0 ; <nl> } <nl> <nl> - bool AgentCallback : : operator ( ) ( ClusterCommResult * res ) { <nl> + bool AgentCallback : : operator ( ) ( arangodb : : ClusterCommResult * res ) { <nl> <nl> if ( res - > status = = CL_COMM_RECEIVED ) { <nl> id_t agent_id ; <nl> mmm a / arangod / Agency / AgentCallback . h <nl> ppp b / arangod / Agency / AgentCallback . h <nl> <nl> / / / @ author Kaveh Vahedipour <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - # ifndef __ARANGODB_CONSENSUS_AGENT__ <nl> - # define __ARANGODB_CONSENSUS_AGENT__ <nl> + # ifndef __ARANGODB_CONSENSUS_AGENT_CALLBACK__ <nl> + # define __ARANGODB_CONSENSUS_AGENT_CALLBACK__ <nl> <nl> # include " Cluster / ClusterComm . h " <nl> <nl> - class Agent ; <nl> - <nl> namespace arangodb { <nl> namespace consensus { <nl> <nl> + class Agent ; <nl> + <nl> class AgentCallback : public arangodb : : ClusterCommCallback { <nl> <nl> public : <nl> <nl> AgentCallback ( ) ; <nl> - AgentCallback ( Agent * agent ) ; <nl> + explicit AgentCallback ( Agent * agent ) ; <nl> <nl> - bool operator ( ) ( ClusterCommResult * ) ; <nl> + virtual bool operator ( ) ( arangodb : : ClusterCommResult * ) ; <nl> <nl> void shutdown ( ) ; <nl> <nl> mmm a / arangod / Agency / State . cpp <nl> ppp b / arangod / Agency / State . cpp <nl> State : : State ( ) { <nl> State : : ~ State ( ) { } <nl> <nl> State : : configure ( size_t size ) { <nl> - _log . push_back ( log_t ( 0 , 0 , 0 , " " ) ; <nl> + _log . push_back ( log_t ( 0 , 0 , 0 , " " ) ) ; <nl> } <nl> <nl> / / Leader <nl> - std : : vector < index_t > State : : log ( query_t const & query , term_t term , id_t lid , size_t size ) { <nl> + std : : vector < index_t > State : : log ( query_t const & query , term_t term , id_t lid ) { <nl> MUTEX_LOCKER ( mutexLocker , _logLock ) ; <nl> std : : vector < index_t > idx ; <nl> Builder builder ; <nl> std : : vector < index_t > State : : log ( query_t const & query , term_t term , id_t lid , si <nl> } <nl> <nl> / / Follower <nl> - void State : : log ( query_t const & query , std : : vector < index_t > cont & idx , term_t term , id_t lid , size_t size ) { <nl> + void State : : log ( std : : string const & query , index_t index , term_t term , id_t lid ) { <nl> MUTEX_LOCKER ( mutexLocker , _logLock ) ; <nl> Builder builder ; <nl> for ( size_t i = 0 ; i < query - > slice ( ) . length ( ) ) { <nl> - _log . push_back ( idx [ i ] , term , lid , query . toString ( ) , std : : vector < bool > ( size ) ) ; <nl> + _log . push_back ( index , term , lid , query . toString ( ) ) ; <nl> builder . add ( " query " , qyery - > Slice ( ) ) ; / / query <nl> - builder . add ( " idx " , Value ( idx [ i ] ) ) ; / / log index <nl> + builder . add ( " idx " , Value ( index ) ) ; / / log index <nl> builder . add ( " term " , Value ( term ) ) ; / / term <nl> builder . add ( " leaderID " , Value ( lid ) ) ; / / leader id <nl> builder . close ( ) ; <nl> void State : : log ( query_t const & query , std : : vector < index_t > cont & idx , term_t te <nl> save ( builder . slice ( ) ) ; <nl> } <nl> <nl> - void State : : log ( query_t const & query , index_t idx , term_t term , id_t lid , size_t size ) { <nl> - MUTEX_LOCKER ( mutexLocker , _logLock ) ; <nl> - _log . push_back ( idx , term , lid , query . toString ( ) , std : : vector < bool > ( size ) ) ; <nl> - } <nl> - <nl> - void State : : confirm ( id_t id , index_t index ) { <nl> - MUTEX_LOCKER ( mutexLocker , _logLock ) ; <nl> - _log [ index ] . ack [ id ] = true ; <nl> - } <nl> - <nl> - bool findit ( index_t index , term_t term ) { <nl> + bool State : : findit ( index_t index , term_t term ) { <nl> MUTEX_LOCKER ( mutexLocker , _logLock ) ; <nl> auto i = std : : begin ( _log ) ; <nl> while ( i ! = std : : end ( _log ) ) { / / Find entry matching index and term <nl> bool findit ( index_t index , term_t term ) { <nl> return false ; <nl> } <nl> <nl> - collect_ret_t collectUnacked ( id_t id ) { <nl> - / / Collect all unacknowledged <nl> + log const & State : : operator [ ] ( index_t index ) const { <nl> + MUTEX_LOCKER ( mutexLocker , _logLock ) ; <nl> + return _log [ index ] ; <nl> + } <nl> + <nl> + collect_ret_t State : : collectFrom ( index_t index ) { <nl> + / / Collect all from index on <nl> MUTEX_LOCKER ( mutexLocker , _logLock ) ; <nl> std : : vector < index_t > work ; <nl> - bool found_first = false ; <nl> id_t prev_log_term ; <nl> index_t prev_log_index ; <nl> - for ( size_t i = 0 ; i < _log . end ( ) ; + + i ) { <nl> - if ( ! _log [ i ] . ack [ id ] ) { <nl> - work . push_back ( _log [ i ] . index ) ; <nl> - if ( ! found_first ) { <nl> - prev_log_term = _log [ i - 1 ] . term ; <nl> - prev_log_index = _log [ i - 1 ] . index ; <nl> - found_first = true ; <nl> - } <nl> - } <nl> + prev_log_term = _log [ index - 1 ] . term ; <nl> + prev_log_index = _log [ index - 1 ] . index ; <nl> + for ( size_t i = index ; i < _log . end ( ) ; + + i ) { <nl> + work . push_back ( _log [ i ] . index ) ; <nl> } <nl> return collect_ret_t ( prev_log_index , prev_log_term , work ) ; <nl> } <nl> <nl> - bool save ( std : : string const & ep ) { <nl> + bool State : : save ( std : : string const & ep ) { <nl> / / Persist to arango db <nl> / / AQL votedFor , lastCommit <nl> } ; <nl> <nl> - load_ret_t load ( std : : string const & ep ) { <nl> + load_ret_t State : : load ( std : : string const & ep ) { <nl> / / Read all from arango db <nl> return load_ret_t ( currentTerm , votedFor ) <nl> } ; <nl> mmm a / arangod / Agency / State . h <nl> ppp b / arangod / Agency / State . h <nl> class State { <nl> / * * <nl> * @ brief Log entries ( leader ) <nl> * / <nl> - std : : vector < index_t > log ( query_t const & query , term_t term , id_t lid , size_t size ) ; <nl> + std : : vector < index_t > log ( query_t const & query , term_t term , id_t lid ) ; <nl> <nl> / * * <nl> - * @ brief <nl> + * @ brief Log entry follower <nl> * / <nl> - void log ( query_t const & query , index_t , term_t term , id_t lid , size_t size ) ; <nl> - <nl> + void log ( std : : string const & query , index_t , term_t term , id_t lid ) ; <nl> + <nl> / * * <nl> * @ brief Save currentTerm , votedFor , log entries <nl> * / <nl> class State { <nl> bool findit ( index_t index , term_t term ) const ; <nl> <nl> / * * <nl> - * @ brief Confirm entry index for agent id <nl> + * @ brief Collect all from index on <nl> * / <nl> - void confirm ( id_t id , index_t idx ) ; <nl> + collect_ret_t collectFrom ( index_t index ) ; <nl> <nl> - / * * <nl> - * @ brief Collect all unacknowledged for agent id <nl> - * / <nl> - collect_ret_t collectUnacked ( id_t id ) ; <nl> + log_t const & operator [ ] ( index_t t ) { <nl> + <nl> + } <nl> <nl> private : <nl> <nl> mmm a / arangod / RestHandler / RestAgencyPrivHandler . cpp <nl> ppp b / arangod / RestHandler / RestAgencyPrivHandler . cpp <nl> HttpHandler : : status_t RestAgencyPrivHandler : : execute ( ) { <nl> generateError ( HttpResponse : : NOT_FOUND , 404 ) ; / / nothing <nl> return HttpHandler : : status_t ( HANDLER_DONE ) ; <nl> } <nl> - <nl> } <nl> - <nl> + <nl> result . close ( ) ; <nl> VPackSlice s = result . slice ( ) ; <nl> generateResult ( s ) ; <nl>
|
found typo in definition
|
arangodb/arangodb
|
316f0cabfa22bd9ef460954f37e4770ad24575cc
|
2016-03-01T11:23:34Z
|
mmm a / src / mongo / db / fts / stop_words_norwegian . txt <nl> ppp b / src / mongo / db / fts / stop_words_norwegian . txt <nl> <nl> som <nl> alle <nl> et <nl> - vÖre <nl> - gjÛre <nl> + være <nl> + gjøre <nl> slik <nl> ha <nl> - nÅ <nl> + nå <nl> fordi <nl> og <nl> skulle <nl> andre <nl> slutt <nl> mens <nl> siden <nl> - sÅ <nl> + så <nl> over <nl> lage <nl> da <nl> deres <nl> disse <nl> for <nl> hva <nl> - Å <nl> + å <nl> hennes <nl> kunne <nl> ny <nl> riktig <nl> bruke <nl> meget <nl> opp <nl> - mÅ <nl> + må <nl> mye <nl> sant <nl> samme <nl> - mÅte <nl> + måte <nl> hvordan <nl> der <nl> ville <nl> like <nl> tilstand <nl> arbeid <nl> hvilken <nl> - fÅ <nl> + få <nl> hvor <nl> folk <nl> det <nl> ut <nl> start <nl> - gÅ <nl> + gå <nl> hvorfor <nl> god <nl> tid <nl> meg <nl> han <nl> stille <nl> bra <nl> - fÛrst <nl> + først <nl> i <nl> ene <nl> - fÛr <nl> + før <nl> ogsÅ <nl> enn <nl> rett <nl> med <nl> av <nl> til <nl> inn <nl> - vÅr <nl> - pÅ <nl> + vår <nl> + på <nl> her <nl> - nÅr <nl> + når <nl> mange <nl> du <nl> - forsÛke <nl> + forsøke <nl> begge <nl> vi <nl> part <nl> en <nl> var <nl> enhver <nl> si <nl> - vÖrt <nl> + vært <nl> mest <nl> om <nl> gjorde <nl> bort <nl> under <nl> nei <nl> innen <nl> + jeg <nl> + at <nl> + den <nl> + de <nl> + ikkje <nl> + seg <nl> + ett <nl> + har <nl> + mitt <nl> + hun <nl> + sin <nl> + dem <nl> + man <nl> + skal <nl> + selv <nl> + sjøl <nl> + bli <nl> + ble <nl> + blei <nl> + blitt <nl> + kom <nl> + noen <nl> + noe <nl> + dere <nl> + kun <nl> + ja <nl> + etter <nl> + ned <nl> + deg <nl> + sine <nl> + sitt <nl> + mot <nl> + dette <nl> + ingen <nl> + ditt <nl> + blir <nl> + hvilke <nl> + sånn <nl> + inni <nl> + mellom <nl> + vors <nl> + både <nl> + bare <nl> + også <nl> + båe <nl> + dykk <nl> + dykkar <nl> + dei <nl> + deira <nl> + deires <nl> + deim <nl> + di <nl> + då <nl> + eg <nl> + ein <nl> + eit <nl> + eitt <nl> + elles <nl> + honom <nl> + hjå <nl> + ho <nl> + hoe <nl> + henne <nl> + hennar <nl> + hoss <nl> + hossen <nl> + ingi <nl> + inkje <nl> + korleis <nl> + korso <nl> + kva <nl> + kvar <nl> + kvarhelst <nl> + kven <nl> + kvi <nl> + kvifor <nl> + me <nl> + medan <nl> + mi <nl> + mine <nl> + mykje <nl> + no <nl> + nokon <nl> + noka <nl> + nokor <nl> + noko <nl> + nokre <nl> + sia <nl> + sidan <nl> + so <nl> + somt <nl> + somme <nl> + um <nl> + upp <nl> + vere <nl> + vore <nl> + verte <nl> + vort <nl> + varte <nl> + vart <nl> \ No newline at end of file <nl>
|
Updated all Norwgian stop words SERVER - 8404
|
mongodb/mongo
|
710c44d47e1fe414d40d0c1e6e7823034bcbcd54
|
2013-02-07T20:08:59Z
|
mmm a / doc / classes / Material . xml <nl> ppp b / doc / classes / Material . xml <nl> <nl> < / member > <nl> < member name = " render_priority " type = " int " setter = " set_render_priority " getter = " get_render_priority " default = " 0 " > <nl> Sets the render priority for transparent objects in 3D scenes . Higher priority objects will be sorted in front of lower priority objects . <nl> - [ b ] Note : [ / b ] this only applies to sorting of transparent objects . This will not impact how transparent objects are sorted relative to opaque objects . This is because opaque objects are sorted based on depth , while transparent objects are sorted from back to front ( subject to priority ) . <nl> + [ b ] Note : [ / b ] this only applies to sorting of transparent objects . This will not impact how transparent objects are sorted relative to opaque objects . This is because opaque objects are not sorted , while transparent objects are sorted from back to front ( subject to priority ) . <nl> < / member > <nl> < / members > <nl> < constants > <nl>
|
Merge pull request from 31 / doc - renderpriority - depth
|
godotengine/godot
|
10b678a8067547728b46c5602f88e31870174b8b
|
2020-06-14T08:12:36Z
|
mmm a / tensorflow / contrib / cmake / tf_tests . cmake <nl> ppp b / tensorflow / contrib / cmake / tf_tests . cmake <nl> if ( tensorflow_BUILD_PYTHON_TESTS ) <nl> " $ { tensorflow_source_dir } / tensorflow / contrib / estimator / python / estimator / replicate_model_fn_test . py " # b / 71901810 <nl> # Broken io_utils_test <nl> " $ { tensorflow_source_dir } / tensorflow / python / keras / _impl / keras / utils / io_utils_test . py " # b / 72894325 <nl> + # OOM <nl> + " $ { tensorflow_source_dir } / tensorflow / python / training / saver_large_variable_test . py " # b / 110210559 <nl> ) <nl> endif ( ) <nl> list ( REMOVE_ITEM tf_test_src_py $ { tf_test_src_py_exclude } ) <nl> mmm a / tensorflow / tools / ci_build / Dockerfile . cmake <nl> ppp b / tensorflow / tools / ci_build / Dockerfile . cmake <nl> RUN pip install - - upgrade astor <nl> RUN pip install - - upgrade gast <nl> RUN pip install - - upgrade numpy <nl> RUN pip install - - upgrade termcolor <nl> + RUN pip install keras_applications = = 1 . 0 . 2 <nl> + RUN pip install keras_preprocessing = = 1 . 0 . 1 <nl> <nl> # Install golang <nl> RUN apt - get install - t xenial - backports - y golang - 1 . 9 <nl> mmm a / tensorflow / tools / ci_build / install / install_pip_packages . sh <nl> ppp b / tensorflow / tools / ci_build / install / install_pip_packages . sh <nl> pip2 install - - upgrade gast <nl> pip3 install - - upgrade gast <nl> pip2 install - - upgrade termcolor <nl> pip3 install - - upgrade termcolor <nl> + <nl> + # Keras <nl> + pip2 install keras_applications = = 1 . 0 . 2 <nl> + pip3 install keras_applications = = 1 . 0 . 2 <nl> + pip2 install keras_preprocessing = = 1 . 0 . 1 <nl> + pip3 install keras_preprocessing = = 1 . 0 . 1 <nl> mmm a / tensorflow / tools / ci_build / install / install_python3 . 5_pip_packages . sh <nl> ppp b / tensorflow / tools / ci_build / install / install_python3 . 5_pip_packages . sh <nl> pip3 . 5 install - - upgrade astor <nl> pip3 . 5 install - - upgrade gast <nl> pip3 . 5 install - - upgrade termcolor <nl> <nl> + # Keras <nl> + pip3 . 5 install keras_applications = = 1 . 0 . 2 <nl> + pip3 . 5 install keras_preprocessing = = 1 . 0 . 1 <nl> + <nl> # LINT . ThenChange ( / / tensorflow / tools / ci_build / install / install_python3 . 6_pip_packages . sh ) <nl> mmm a / tensorflow / tools / ci_build / install / install_python3 . 6_pip_packages . sh <nl> ppp b / tensorflow / tools / ci_build / install / install_python3 . 6_pip_packages . sh <nl> pip3 install - - upgrade astor <nl> pip3 install - - upgrade gast <nl> pip3 install - - upgrade termcolor <nl> <nl> + # Keras <nl> + pip3 . 5 install keras_applications = = 1 . 0 . 2 <nl> + pip3 . 5 install keras_preprocessing = = 1 . 0 . 1 <nl> # LINT . ThenChange ( / / tensorflow / tools / ci_build / install / install_python3 . 5_pip_packages . sh ) <nl>
|
Install Keras dependencies .
|
tensorflow/tensorflow
|
c4eafb49612a694386bbda1f51dffb6951ec9cf1
|
2018-06-14T21:59:08Z
|
mmm a / imgui_internal . h <nl> ppp b / imgui_internal . h <nl> struct IMGUI_API ImGuiTextEditState <nl> void CursorClamp ( ) { StbState . cursor = ImMin ( StbState . cursor , CurLenW ) ; StbState . select_start = ImMin ( StbState . select_start , CurLenW ) ; StbState . select_end = ImMin ( StbState . select_end , CurLenW ) ; } <nl> bool HasSelection ( ) const { return StbState . select_start ! = StbState . select_end ; } <nl> void ClearSelection ( ) { StbState . select_start = StbState . select_end = StbState . cursor ; } <nl> - void SelectAll ( ) { StbState . select_start = 0 ; StbState . select_end = CurLenW ; StbState . cursor = StbState . select_end ; StbState . has_preferred_x = false ; } <nl> + void SelectAll ( ) { StbState . select_start = 0 ; StbState . cursor = StbState . select_end = CurLenW ; StbState . has_preferred_x = false ; } <nl> void OnKeyPressed ( int key ) ; <nl> } ; <nl> <nl>
|
InputText : Minor tweak .
|
ocornut/imgui
|
cc15512bfc2a3abb88d7c8969ecf6d67611e1b96
|
2018-01-17T11:15:24Z
|
mmm a / Code / CryEngine / CryCommon / CryEntitySystem / IEntity . h <nl> ppp b / Code / CryEngine / CryCommon / CryEntitySystem / IEntity . h <nl> struct IEntity <nl> virtual bool SetParentSlot ( int nParentIndex , int nChildIndex ) = 0 ; <nl> <nl> / / ! Prepare and update an entity slot to be used with component <nl> - virtual void UpdateSlotForComponent ( IEntityComponent * pComponent ) = 0 ; <nl> + virtual void UpdateSlotForComponent ( IEntityComponent * pComponent , bool callOnTransformChanged = true ) = 0 ; <nl> <nl> / / ! Assigns a custom material to the specified object slot . <nl> / / ! \ param nSlot Index of the slot , if - 1 assign this material to all existing slots . <nl> mmm a / Code / CryEngine / CryEntitySystem / Entity . cpp <nl> ppp b / Code / CryEngine / CryEntitySystem / Entity . cpp <nl> void CEntity : : AddComponentInternal ( std : : shared_ptr < IEntityComponent > pComponent , <nl> / / Automatically assign transformation if necessary <nl> if ( pComponent - > GetComponentFlags ( ) . Check ( EEntityComponentFlags : : Transform ) & & pComponent - > GetTransform ( ) = = nullptr ) <nl> { <nl> - pComponent - > SetTransformMatrix ( IDENTITY ) ; <nl> + pComponent - > m_pTransform = std : : make_shared < CryTransform : : CTransform > ( ) ; <nl> + <nl> + UpdateSlotForComponent ( pComponent . get ( ) , false ) ; <nl> } <nl> <nl> OnComponentMaskChanged ( componentRecord , 0 ) ; <nl> IRenderNode * CEntity : : GetSlotRenderNode ( int nSlot ) <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - void CEntity : : UpdateSlotForComponent ( IEntityComponent * pComponent ) <nl> + void CEntity : : UpdateSlotForComponent ( IEntityComponent * pComponent , bool callOnTransformChanged ) <nl> { <nl> int slotId = pComponent - > GetEntitySlotId ( ) ; <nl> if ( slotId = = IEntityComponent : : EmptySlotId ) <nl> void CEntity : : UpdateSlotForComponent ( IEntityComponent * pComponent ) <nl> pComponent - > SetTransformMatrix ( IDENTITY ) ; <nl> } <nl> <nl> - pComponent - > OnTransformChanged ( ) ; <nl> + if ( callOnTransformChanged ) <nl> + { <nl> + pComponent - > OnTransformChanged ( ) ; <nl> + } <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / Code / CryEngine / CryEntitySystem / Entity . h <nl> ppp b / Code / CryEngine / CryEntitySystem / Entity . h <nl> class CEntity : public IEntity <nl> virtual uint32 GetSlotFlags ( int nSlot ) const final ; <nl> virtual int SetSlotRenderNode ( int nSlot , IRenderNode * pRenderNode ) final ; <nl> virtual IRenderNode * GetSlotRenderNode ( int nSlot ) final ; <nl> - virtual void UpdateSlotForComponent ( IEntityComponent * pComponent ) final ; <nl> + virtual void UpdateSlotForComponent ( IEntityComponent * pComponent , bool callOnTransformChanged = true ) final ; <nl> virtual bool ShouldUpdateCharacter ( int nSlot ) const final ; <nl> virtual ICharacterInstance * GetCharacter ( int nSlot ) final ; <nl> virtual int SetCharacter ( ICharacterInstance * pCharacter , int nSlot , bool bUpdatePhysics ) final ; <nl>
|
! B ( CryEntitySystem ) IEntityComponent : : OnTransformChanged could be called before IEntityComponent : : Initialize
|
CRYTEK/CRYENGINE
|
439b708186abfd9811151550522b5e0aeb233548
|
2018-04-06T12:07:21Z
|
mmm a / Marlin / Conditionals_LCD . h <nl> ppp b / Marlin / Conditionals_LCD . h <nl> <nl> * / <nl> <nl> # if ENABLED ( LCD_I2C_SAINSMART_YWROBOT ) <nl> - / / This uses the LiquidCrystal_I2C library ( https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home ) <nl> - / / Make sure it is placed in the Arduino libraries directory . <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> # define LCD_I2C_TYPE_PCF8575 <nl> # define LCD_I2C_ADDRESS 0x27 / / I2C Address of the port expander <nl> # define ULTIPANEL <nl> mmm a / Marlin / Configuration . h <nl> ppp b / Marlin / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / CL - 260 / Configuration . h <nl> ppp b / Marlin / example_configurations / CL - 260 / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / Cartesio / Configuration . h <nl> ppp b / Marlin / example_configurations / Cartesio / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / Felix / Configuration . h <nl> ppp b / Marlin / example_configurations / Felix / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / Felix / DUAL / Configuration . h <nl> ppp b / Marlin / example_configurations / Felix / DUAL / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / FolgerTech - i3 - 2020 / Configuration . h <nl> ppp b / Marlin / example_configurations / FolgerTech - i3 - 2020 / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / Hephestos / Configuration . h <nl> ppp b / Marlin / example_configurations / Hephestos / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / Hephestos_2 / Configuration . h <nl> ppp b / Marlin / example_configurations / Hephestos_2 / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / K8200 / Configuration . h <nl> ppp b / Marlin / example_configurations / K8200 / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / K8400 / Configuration . h <nl> ppp b / Marlin / example_configurations / K8400 / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / K8400 / Dual - head / Configuration . h <nl> ppp b / Marlin / example_configurations / K8400 / Dual - head / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / M150 / Configuration . h <nl> ppp b / Marlin / example_configurations / M150 / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / RepRapWorld / Megatronics / Configuration . h <nl> ppp b / Marlin / example_configurations / RepRapWorld / Megatronics / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / RigidBot / Configuration . h <nl> ppp b / Marlin / example_configurations / RigidBot / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / SCARA / Configuration . h <nl> ppp b / Marlin / example_configurations / SCARA / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / TAZ4 / Configuration . h <nl> ppp b / Marlin / example_configurations / TAZ4 / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / TinyBoy2 / Configuration . h <nl> ppp b / Marlin / example_configurations / TinyBoy2 / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / WITBOX / Configuration . h <nl> ppp b / Marlin / example_configurations / WITBOX / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / adafruit / ST7565 / Configuration . h <nl> ppp b / Marlin / example_configurations / adafruit / ST7565 / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / delta / FLSUN / auto_calibrate / Configuration . h <nl> ppp b / Marlin / example_configurations / delta / FLSUN / auto_calibrate / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / delta / FLSUN / kossel_mini / Configuration . h <nl> ppp b / Marlin / example_configurations / delta / FLSUN / kossel_mini / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / delta / generic / Configuration . h <nl> ppp b / Marlin / example_configurations / delta / generic / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / delta / kossel_mini / Configuration . h <nl> ppp b / Marlin / example_configurations / delta / kossel_mini / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / delta / kossel_pro / Configuration . h <nl> ppp b / Marlin / example_configurations / delta / kossel_pro / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / delta / kossel_xl / Configuration . h <nl> ppp b / Marlin / example_configurations / delta / kossel_xl / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / gCreate_gMax1 . 5 + / Configuration . h <nl> ppp b / Marlin / example_configurations / gCreate_gMax1 . 5 + / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / makibox / Configuration . h <nl> ppp b / Marlin / example_configurations / makibox / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / tvrrug / Round2 / Configuration . h <nl> ppp b / Marlin / example_configurations / tvrrug / Round2 / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl> mmm a / Marlin / example_configurations / wt150 / Configuration . h <nl> ppp b / Marlin / example_configurations / wt150 / Configuration . h <nl> <nl> / / <nl> / / Sainsmart YW Robot ( LCM1602 ) LCD Display <nl> / / <nl> + / / Note : This controller requires F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> / / # define LCD_I2C_SAINSMART_YWROBOT <nl> <nl> / / <nl>
|
Merge pull request from thinkyhead / bf_sainsmart_link
|
MarlinFirmware/Marlin
|
254a11b08330ea9e3e5fd51b376f1bacf2e39ad4
|
2017-06-22T19:04:23Z
|
mmm a / python / taichi / lang / transformer . py <nl> ppp b / python / taichi / lang / transformer . py <nl> def visit_Assign ( self , node ) : <nl> assert ( len ( node . targets ) = = 1 ) <nl> self . generic_visit ( node ) <nl> <nl> - import astpretty <nl> - astpretty . pprint ( node ) <nl> - <nl> if isinstance ( node . targets [ 0 ] , ast . Tuple ) : <nl> targets = node . targets [ 0 ] . elts <nl> <nl> def visit_Assign ( self , node ) : <nl> <nl> def tuple_indexed ( i ) : <nl> indexing = self . parse_stmt ( ' __tmp_tuple [ 0 ] ' ) <nl> - import astpretty <nl> - astpretty . pprint ( indexing ) <nl> indexing . value . slice . value = self . parse_expr ( " { } " . format ( i ) ) <nl> return indexing . value <nl> <nl> def tuple_indexed ( i ) : <nl> ast . copy_location ( stmt , node ) <nl> stmts . append ( self . parse_stmt ( ' del __tmp_tuple ' ) ) <nl> return self . make_single_statement ( stmts ) <nl> - <nl> - is_local = isinstance ( node . targets [ 0 ] , ast . Name ) <nl> - if is_local and self . is_creation ( node . targets [ 0 ] . id ) : <nl> - var_name = node . targets [ 0 ] . id <nl> - # Create <nl> - init = ast . Attribute ( <nl> - value = ast . Name ( id = ' ti ' , ctx = ast . Load ( ) ) , attr = ' expr_init ' , <nl> - ctx = ast . Load ( ) ) <nl> - rhs = ast . Call ( <nl> - func = init , <nl> - args = [ node . value ] , <nl> - keywords = [ ] , <nl> - ) <nl> - self . create_variable ( var_name ) <nl> - return ast . copy_location ( ast . Assign ( targets = node . targets , value = rhs ) , <nl> - node ) <nl> else : <nl> - # Assign <nl> - node . targets [ 0 ] . ctx = ast . Load ( ) <nl> - func = ast . Attribute ( value = node . targets [ 0 ] , attr = ' assign ' , ctx = ast . Load ( ) ) <nl> - call = ast . Call ( func = func , args = [ node . value ] , keywords = [ ] ) <nl> - return ast . copy_location ( ast . Expr ( value = call ) , node ) <nl> + is_local = isinstance ( node . targets [ 0 ] , ast . Name ) <nl> + if is_local and self . is_creation ( node . targets [ 0 ] . id ) : <nl> + var_name = node . targets [ 0 ] . id <nl> + # Create <nl> + init = ast . Attribute ( <nl> + value = ast . Name ( id = ' ti ' , ctx = ast . Load ( ) ) , attr = ' expr_init ' , <nl> + ctx = ast . Load ( ) ) <nl> + rhs = ast . Call ( <nl> + func = init , <nl> + args = [ node . value ] , <nl> + keywords = [ ] , <nl> + ) <nl> + self . create_variable ( var_name ) <nl> + return ast . copy_location ( ast . Assign ( targets = node . targets , value = rhs ) , <nl> + node ) <nl> + else : <nl> + # Assign <nl> + node . targets [ 0 ] . ctx = ast . Load ( ) <nl> + func = ast . Attribute ( value = node . targets [ 0 ] , attr = ' assign ' , ctx = ast . Load ( ) ) <nl> + call = ast . Call ( func = func , args = [ node . value ] , keywords = [ ] ) <nl> + return ast . copy_location ( ast . Expr ( value = call ) , node ) <nl> <nl> def visit_Try ( self , node ) : <nl> raise TaichiSyntaxError ( " Keyword ' try ' not supported in Taichi kernels " ) <nl>
|
clean up
|
taichi-dev/taichi
|
da64eb838c19298a05413df2b7f170e1ee31e819
|
2019-12-07T06:55:45Z
|
mmm a / hphp / runtime / server / fastcgi / fastcgi - server . cpp <nl> ppp b / hphp / runtime / server / fastcgi / fastcgi - server . cpp <nl> using apache : : thrift : : transport : : TTransportException ; <nl> <nl> const int FastCGIAcceptor : : k_maxConns = 50 ; <nl> const int FastCGIAcceptor : : k_maxRequests = 1000 ; <nl> - const TSocketAddress FastCGIAcceptor : : s_unknownSocketAddress ( " 0 . 0 . 0 . 0 " , 0 ) ; <nl> + const TSocketAddress FastCGIAcceptor : : s_unknownSocketAddress ( " 127 . 0 . 0 . 1 " , 0 ) ; <nl> <nl> bool FastCGIAcceptor : : canAccept ( const TSocketAddress & address ) { <nl> / / TODO : Support server IP whitelist . <nl>
|
default fastcgi to listen on 127 . 0 . 0 . 1
|
facebook/hhvm
|
24a43d4d3ec3336bcaf53b42c5fe80fbeba63325
|
2014-05-13T22:54:31Z
|
mmm a / db / flushtest . cpp <nl> ppp b / db / flushtest . cpp <nl> int main ( int argc , char * argv [ ] , char * envp [ ] ) { <nl> cout < < " fullsync with sleeps intermixed : " < < ms < < " ms , " < < ( ms - 5000 ) / 500 . 0 < < " ms / request " < < endl ; <nl> } <nl> } <nl> + <nl> + / / with noatime <nl> + { <nl> + fclose ( f ) ; <nl> + / * try from beginning of the file , where we aren ' t appending and changing the file length , <nl> + to see if this is faster as the directory entry then doesn ' t have to be flushed ( if noatime in effect ) . <nl> + * / <nl> + f = fopen ( " / data / db / temptest " , " r + " ) ; <nl> + Timer t ; <nl> + int n = 500 ; <nl> + for ( int i = 0 ; i < n ; i + + ) { <nl> + fwrite ( " xyz " , 3 , 1 , f ) ; <nl> + fflush ( f ) ; <nl> + fullsync ( fileno ( f ) ) ; <nl> + } <nl> + int ms = t . millis ( ) ; <nl> + cout < < " fullsync without growing : " < < ms < < " ms , " < < ms / ( ( double ) n ) < < " ms / request " < < endl ; <nl> + } <nl> <nl> return 0 ; <nl> } <nl>
|
more flushtest tinkering
|
mongodb/mongo
|
a4c7aae14c910a0c8785fce6d771d0d2475e81d7
|
2009-01-04T18:58:05Z
|
mmm a / SConstruct <nl> ppp b / SConstruct <nl> def get_variant_dir ( ) : <nl> a . append ( name ) <nl> else : <nl> x = get_option ( name ) <nl> - x = re . sub ( " [ , \ \ \ \ / ] " , " _ " , x ) <nl> + x = re . sub ( " [ : , \ \ \ \ / ] " , " _ " , x ) <nl> a . append ( name + " _ " + x ) <nl> <nl> s = " # build / $ { PYSYSPLATFORM } / " <nl>
|
add colon to characters that are transformed when generating variant dir
|
mongodb/mongo
|
50759820a8a08410ec72b746f387aad26c04d347
|
2012-12-14T22:55:06Z
|
mmm a / src / core / lib / surface / call . c <nl> ppp b / src / core / lib / surface / call . c <nl> typedef struct batch_control { <nl> grpc_transport_stream_op op ; <nl> } batch_control ; <nl> <nl> + typedef struct { <nl> + gpr_mu child_list_mu ; <nl> + grpc_call * first_child ; <nl> + } parent_call ; <nl> + <nl> + typedef struct { <nl> + grpc_call * parent ; <nl> + / * * siblings : children of the same parent form a list , and this list is <nl> + protected under <nl> + parent - > mu * / <nl> + grpc_call * sibling_next ; <nl> + grpc_call * sibling_prev ; <nl> + } child_call ; <nl> + <nl> struct grpc_call { <nl> gpr_arena * arena ; <nl> grpc_completion_queue * cq ; <nl> grpc_polling_entity pollent ; <nl> grpc_channel * channel ; <nl> - grpc_call * parent ; <nl> - grpc_call * first_child ; <nl> gpr_timespec start_time ; <nl> - / * protects first_child , and child next / prev links * / <nl> - gpr_mu child_list_mu ; <nl> + / * parent_call * * / gpr_atm parent_call_atm ; <nl> + child_call * child_call ; <nl> <nl> / * client or server call * / <nl> bool is_client ; <nl> struct grpc_call { <nl> int send_extra_metadata_count ; <nl> gpr_timespec send_deadline ; <nl> <nl> - / * * siblings : children of the same parent form a list , and this list is <nl> - protected under <nl> - parent - > mu * / <nl> - grpc_call * sibling_next ; <nl> - grpc_call * sibling_prev ; <nl> - <nl> grpc_slice_buffer_stream sending_stream ; <nl> <nl> grpc_byte_stream * receiving_stream ; <nl> static void add_init_error ( grpc_error * * composite , grpc_error * new ) { <nl> * composite = grpc_error_add_child ( * composite , new ) ; <nl> } <nl> <nl> + static parent_call * get_or_create_parent_call ( grpc_call * call ) { <nl> + parent_call * p = ( parent_call * ) gpr_atm_acq_load ( & call - > parent_call_atm ) ; <nl> + if ( p = = NULL ) { <nl> + p = gpr_arena_alloc ( call - > arena , sizeof ( * p ) ) ; <nl> + gpr_mu_init ( & p - > child_list_mu ) ; <nl> + if ( ! gpr_atm_rel_cas ( & call - > parent_call_atm , ( gpr_atm ) NULL , ( gpr_atm ) p ) ) { <nl> + gpr_mu_destroy ( & p - > child_list_mu ) ; <nl> + p = ( parent_call * ) gpr_atm_acq_load ( & call - > parent_call_atm ) ; <nl> + } <nl> + } <nl> + return p ; <nl> + } <nl> + <nl> + static parent_call * get_parent_call ( grpc_call * call ) { <nl> + return ( parent_call * ) gpr_atm_acq_load ( & call - > parent_call_atm ) ; <nl> + } <nl> + <nl> grpc_error * grpc_call_create ( grpc_exec_ctx * exec_ctx , <nl> const grpc_call_create_args * args , <nl> grpc_call * * out_call ) { <nl> grpc_error * grpc_call_create ( grpc_exec_ctx * exec_ctx , <nl> sizeof ( grpc_call ) + channel_stack - > call_stack_size ) ; <nl> call - > arena = arena ; <nl> * out_call = call ; <nl> - gpr_mu_init ( & call - > child_list_mu ) ; <nl> call - > channel = args - > channel ; <nl> call - > cq = args - > cq ; <nl> - call - > parent = args - > parent_call ; <nl> call - > start_time = gpr_now ( GPR_CLOCK_MONOTONIC ) ; <nl> / * Always support no compression * / <nl> GPR_BITSET ( & call - > encodings_accepted_by_peer , GRPC_COMPRESS_NONE ) ; <nl> grpc_error * grpc_call_create ( grpc_exec_ctx * exec_ctx , <nl> gpr_convert_clock_type ( args - > send_deadline , GPR_CLOCK_MONOTONIC ) ; <nl> <nl> if ( args - > parent_call ! = NULL ) { <nl> + child_call * cc = call - > child_call = <nl> + gpr_arena_alloc ( arena , sizeof ( child_call ) ) ; <nl> + call - > child_call - > parent = args - > parent_call ; <nl> + <nl> GRPC_CALL_INTERNAL_REF ( args - > parent_call , " child " ) ; <nl> GPR_ASSERT ( call - > is_client ) ; <nl> GPR_ASSERT ( ! args - > parent_call - > is_client ) ; <nl> <nl> - gpr_mu_lock ( & args - > parent_call - > child_list_mu ) ; <nl> + parent_call * pc = get_or_create_parent_call ( args - > parent_call ) ; <nl> + <nl> + gpr_mu_lock ( & pc - > child_list_mu ) ; <nl> <nl> if ( args - > propagation_mask & GRPC_PROPAGATE_DEADLINE ) { <nl> send_deadline = gpr_time_min ( <nl> grpc_error * grpc_call_create ( grpc_exec_ctx * exec_ctx , <nl> } <nl> } <nl> <nl> - if ( args - > parent_call - > first_child = = NULL ) { <nl> - args - > parent_call - > first_child = call ; <nl> - call - > sibling_next = call - > sibling_prev = call ; <nl> + if ( pc - > first_child = = NULL ) { <nl> + pc - > first_child = call ; <nl> + cc - > sibling_next = cc - > sibling_prev = call ; <nl> } else { <nl> - call - > sibling_next = args - > parent_call - > first_child ; <nl> - call - > sibling_prev = args - > parent_call - > first_child - > sibling_prev ; <nl> - call - > sibling_next - > sibling_prev = call - > sibling_prev - > sibling_next = <nl> - call ; <nl> + cc - > sibling_next = pc - > first_child ; <nl> + cc - > sibling_prev = pc - > first_child - > child_call - > sibling_prev ; <nl> + cc - > sibling_next - > child_call - > sibling_prev = <nl> + cc - > sibling_prev - > child_call - > sibling_next = call ; <nl> } <nl> <nl> - gpr_mu_unlock ( & args - > parent_call - > child_list_mu ) ; <nl> + gpr_mu_unlock ( & pc - > child_list_mu ) ; <nl> } <nl> <nl> call - > send_deadline = send_deadline ; <nl> static void destroy_call ( grpc_exec_ctx * exec_ctx , void * call , <nl> if ( c - > receiving_stream ! = NULL ) { <nl> grpc_byte_stream_destroy ( exec_ctx , c - > receiving_stream ) ; <nl> } <nl> - gpr_mu_destroy ( & c - > child_list_mu ) ; <nl> + parent_call * pc = get_parent_call ( c ) ; <nl> + if ( pc ! = NULL ) { <nl> + gpr_mu_destroy ( & pc - > child_list_mu ) ; <nl> + } <nl> for ( ii = 0 ; ii < c - > send_extra_metadata_count ; ii + + ) { <nl> GRPC_MDELEM_UNREF ( exec_ctx , c - > send_extra_metadata [ ii ] . md ) ; <nl> } <nl> static void destroy_call ( grpc_exec_ctx * exec_ctx , void * call , <nl> } <nl> <nl> void grpc_call_destroy ( grpc_call * c ) { <nl> - int cancel ; <nl> - grpc_call * parent = c - > parent ; <nl> + parent_call * pc = get_parent_call ( c ) ; <nl> + child_call * cc = c - > child_call ; <nl> grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT ; <nl> <nl> GPR_TIMER_BEGIN ( " grpc_call_destroy " , 0 ) ; <nl> GRPC_API_TRACE ( " grpc_call_destroy ( c = % p ) " , 1 , ( c ) ) ; <nl> <nl> - if ( parent ) { <nl> - gpr_mu_lock ( & parent - > child_list_mu ) ; <nl> - if ( c = = parent - > first_child ) { <nl> - parent - > first_child = c - > sibling_next ; <nl> - if ( c = = parent - > first_child ) { <nl> - parent - > first_child = NULL ; <nl> + if ( pc ) { <nl> + gpr_mu_lock ( & pc - > child_list_mu ) ; <nl> + if ( c = = pc - > first_child ) { <nl> + pc - > first_child = cc - > sibling_next ; <nl> + if ( c = = pc - > first_child ) { <nl> + pc - > first_child = NULL ; <nl> } <nl> - c - > sibling_prev - > sibling_next = c - > sibling_next ; <nl> - c - > sibling_next - > sibling_prev = c - > sibling_prev ; <nl> } <nl> - gpr_mu_unlock ( & parent - > child_list_mu ) ; <nl> - GRPC_CALL_INTERNAL_UNREF ( & exec_ctx , parent , " child " ) ; <nl> + cc - > sibling_prev - > child_call - > sibling_next = cc - > sibling_next ; <nl> + cc - > sibling_next - > child_call - > sibling_prev = cc - > sibling_prev ; <nl> + gpr_mu_unlock ( & pc - > child_list_mu ) ; <nl> + GRPC_CALL_INTERNAL_UNREF ( & exec_ctx , cc - > parent , " child " ) ; <nl> } <nl> <nl> GPR_ASSERT ( ! c - > destroy_called ) ; <nl> c - > destroy_called = 1 ; <nl> - cancel = gpr_atm_acq_load ( & c - > any_ops_sent_atm ) & & <nl> - ! gpr_atm_acq_load ( & c - > received_final_op_atm ) ; <nl> + bool cancel = gpr_atm_acq_load ( & c - > any_ops_sent_atm ) ! = 0 & & <nl> + gpr_atm_acq_load ( & c - > received_final_op_atm ) = = 0 ; <nl> if ( cancel ) { <nl> cancel_with_error ( & exec_ctx , c , STATUS_FROM_API_OVERRIDE , <nl> GRPC_ERROR_CANCELLED ) ; <nl> static grpc_error * consolidate_batch_errors ( batch_control * bctl ) { <nl> <nl> static void post_batch_completion ( grpc_exec_ctx * exec_ctx , <nl> batch_control * bctl ) { <nl> - grpc_call * child_call ; <nl> grpc_call * next_child_call ; <nl> grpc_call * call = bctl - > call ; <nl> grpc_error * error = consolidate_batch_errors ( bctl ) ; <nl> static void post_batch_completion ( grpc_exec_ctx * exec_ctx , <nl> <nl> / * propagate cancellation to any interested children * / <nl> gpr_atm_rel_store ( & call - > received_final_op_atm , 1 ) ; <nl> - gpr_mu_lock ( & call - > child_list_mu ) ; <nl> - child_call = call - > first_child ; <nl> - if ( child_call ! = NULL ) { <nl> - do { <nl> - next_child_call = child_call - > sibling_next ; <nl> - if ( child_call - > cancellation_is_inherited ) { <nl> - GRPC_CALL_INTERNAL_REF ( child_call , " propagate_cancel " ) ; <nl> - cancel_with_error ( exec_ctx , child_call , STATUS_FROM_API_OVERRIDE , <nl> - GRPC_ERROR_CANCELLED ) ; <nl> - GRPC_CALL_INTERNAL_UNREF ( exec_ctx , child_call , " propagate_cancel " ) ; <nl> - } <nl> - child_call = next_child_call ; <nl> - } while ( child_call ! = call - > first_child ) ; <nl> + parent_call * pc = get_parent_call ( call ) ; <nl> + if ( pc ! = NULL ) { <nl> + grpc_call * child ; <nl> + gpr_mu_lock ( & pc - > child_list_mu ) ; <nl> + child = pc - > first_child ; <nl> + if ( child ! = NULL ) { <nl> + do { <nl> + next_child_call = child - > child_call - > sibling_next ; <nl> + if ( child - > cancellation_is_inherited ) { <nl> + GRPC_CALL_INTERNAL_REF ( child , " propagate_cancel " ) ; <nl> + cancel_with_error ( exec_ctx , child , STATUS_FROM_API_OVERRIDE , <nl> + GRPC_ERROR_CANCELLED ) ; <nl> + GRPC_CALL_INTERNAL_UNREF ( exec_ctx , child , " propagate_cancel " ) ; <nl> + } <nl> + child = next_child_call ; <nl> + } while ( child ! = pc - > first_child ) ; <nl> + } <nl> + gpr_mu_unlock ( & pc - > child_list_mu ) ; <nl> } <nl> - gpr_mu_unlock ( & call - > child_list_mu ) ; <nl> <nl> if ( call - > is_client ) { <nl> get_final_status ( call , set_status_value_directly , <nl>
|
Lazily track parenting
|
grpc/grpc
|
1c10a7b3b4330244ab038ab6985247ffb4a62836
|
2017-03-29T21:35:16Z
|
mmm a / include / v8 . h <nl> ppp b / include / v8 . h <nl> template < typename T > class CustomArguments ; <nl> class PropertyCallbackArguments ; <nl> class FunctionCallbackArguments ; <nl> class GlobalHandles ; <nl> + class ScopedExternalStringLock ; <nl> <nl> namespace wasm { <nl> class NativeModule ; <nl> class V8_EXPORT String : public Name { <nl> public : <nl> virtual ~ ExternalStringResourceBase ( ) { } <nl> <nl> - virtual bool IsCompressible ( ) const { return false ; } <nl> + V8_DEPRECATE_SOON ( " Use IsCacheable ( ) . " , <nl> + virtual bool IsCompressible ( ) const ) { <nl> + return false ; <nl> + } <nl> + <nl> + / * * <nl> + * If a string is cacheable , the value returned by <nl> + * ExternalStringResource : : data ( ) may be cached , otherwise it is not <nl> + * expected to be stable beyond the current top - level task . <nl> + * / <nl> + virtual bool IsCacheable ( ) const { <nl> + # if __clang__ <nl> + # pragma clang diagnostic push <nl> + # pragma clang diagnostic ignored " - Wdeprecated - declarations " <nl> + # endif <nl> + return ! IsCompressible ( ) ; <nl> + # if __clang__ <nl> + # pragma clang diagnostic pop <nl> + # endif <nl> + } <nl> <nl> protected : <nl> ExternalStringResourceBase ( ) { } <nl> class V8_EXPORT String : public Name { <nl> * / <nl> virtual void Dispose ( ) { delete this ; } <nl> <nl> + / * * <nl> + * For a non - cacheable string , the value returned by <nl> + * | ExternalStringResource : : data ( ) | has to be stable between | Lock ( ) | and <nl> + * | Unlock ( ) | , that is the string must behave as is | IsCacheable ( ) | returned <nl> + * true . <nl> + * <nl> + * These two functions must be thread - safe , and can be called from anywhere . <nl> + * They also must handle lock depth , in the sense that each can be called <nl> + * several times , from different threads , and unlocking should only happen <nl> + * when the balance of Lock ( ) and Unlock ( ) calls is 0 . <nl> + * / <nl> + virtual void Lock ( ) const { } <nl> + <nl> + / * * <nl> + * Unlocks the string . <nl> + * / <nl> + virtual void Unlock ( ) const { } <nl> + <nl> / / Disallow copying and assigning . <nl> ExternalStringResourceBase ( const ExternalStringResourceBase & ) = delete ; <nl> void operator = ( const ExternalStringResourceBase & ) = delete ; <nl> class V8_EXPORT String : public Name { <nl> private : <nl> friend class internal : : Heap ; <nl> friend class v8 : : String ; <nl> + friend class internal : : ScopedExternalStringLock ; <nl> } ; <nl> <nl> / * * <nl> mmm a / src / heap / factory . cc <nl> ppp b / src / heap / factory . cc <nl> MaybeHandle < String > Factory : : NewExternalStringFromOneByte ( <nl> if ( length = = 0 ) return empty_string ( ) ; <nl> <nl> Handle < Map > map ; <nl> - if ( resource - > IsCompressible ( ) ) { <nl> + if ( ! resource - > IsCacheable ( ) ) { <nl> map = uncached_external_one_byte_string_map ( ) ; <nl> } else { <nl> map = external_one_byte_string_map ( ) ; <nl> MaybeHandle < String > Factory : : NewExternalStringFromTwoByte ( <nl> length < = kOneByteCheckLengthLimit & & <nl> String : : IsOneByte ( resource - > data ( ) , static_cast < int > ( length ) ) ; <nl> Handle < Map > map ; <nl> - if ( resource - > IsCompressible ( ) ) { <nl> + if ( ! resource - > IsCacheable ( ) ) { <nl> map = is_one_byte ? uncached_external_string_with_one_byte_data_map ( ) <nl> : uncached_external_string_map ( ) ; <nl> } else { <nl> mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> bool String : : MakeExternal ( v8 : : String : : ExternalStringResource * resource ) { <nl> / / Externalizing twice leaks the external resource , so it ' s <nl> / / prohibited by the API . <nl> DCHECK ( this - > SupportsExternalization ( ) ) ; <nl> - DCHECK ( ! resource - > IsCompressible ( ) ) ; <nl> + DCHECK ( resource - > IsCacheable ( ) ) ; <nl> # ifdef ENABLE_SLOW_DCHECKS <nl> if ( FLAG_enable_slow_asserts ) { <nl> / / Assert that the resource and the string are equivalent . <nl> bool String : : MakeExternal ( v8 : : String : : ExternalOneByteStringResource * resource ) { <nl> / / Externalizing twice leaks the external resource , so it ' s <nl> / / prohibited by the API . <nl> DCHECK ( this - > SupportsExternalization ( ) ) ; <nl> - DCHECK ( ! resource - > IsCompressible ( ) ) ; <nl> + DCHECK ( resource - > IsCacheable ( ) ) ; <nl> # ifdef ENABLE_SLOW_DCHECKS <nl> if ( FLAG_enable_slow_asserts ) { <nl> / / Assert that the resource and the string are equivalent . <nl> mmm a / src / parsing / scanner - character - streams . cc <nl> ppp b / src / parsing / scanner - character - streams . cc <nl> <nl> <nl> # include " src / parsing / scanner - character - streams . h " <nl> <nl> + # include < memory > <nl> + # include < vector > <nl> + <nl> # include " include / v8 . h " <nl> # include " src / counters . h " <nl> # include " src / globals . h " <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> + class ScopedExternalStringLock { <nl> + public : <nl> + explicit ScopedExternalStringLock ( ExternalString * string ) { <nl> + DCHECK ( string ) ; <nl> + if ( string - > IsExternalOneByteString ( ) ) { <nl> + resource_ = ExternalOneByteString : : cast ( string ) - > resource ( ) ; <nl> + } else { <nl> + DCHECK ( string - > IsExternalTwoByteString ( ) ) ; <nl> + resource_ = ExternalTwoByteString : : cast ( string ) - > resource ( ) ; <nl> + } <nl> + DCHECK ( resource_ ) ; <nl> + resource_ - > Lock ( ) ; <nl> + } <nl> + <nl> + / / Copying a lock increases the locking depth . <nl> + ScopedExternalStringLock ( const ScopedExternalStringLock & other ) <nl> + : resource_ ( other . resource_ ) { <nl> + resource_ - > Lock ( ) ; <nl> + } <nl> + <nl> + ~ ScopedExternalStringLock ( ) { resource_ - > Unlock ( ) ; } <nl> + <nl> + private : <nl> + / / Not nullptr . <nl> + const v8 : : String : : ExternalStringResourceBase * resource_ ; <nl> + } ; <nl> + <nl> namespace { <nl> const unibrow : : uchar kUtf8Bom = 0xFEFF ; <nl> } / / namespace <nl> <nl> template < typename Char > <nl> - struct HeapStringType ; <nl> + struct CharTraits ; <nl> <nl> template < > <nl> - struct HeapStringType < uint8_t > { <nl> + struct CharTraits < uint8_t > { <nl> typedef SeqOneByteString String ; <nl> + typedef ExternalOneByteString ExternalString ; <nl> } ; <nl> <nl> template < > <nl> - struct HeapStringType < uint16_t > { <nl> + struct CharTraits < uint16_t > { <nl> typedef SeqTwoByteString String ; <nl> + typedef ExternalTwoByteString ExternalString ; <nl> } ; <nl> <nl> template < typename Char > <nl> struct Range { <nl> template < typename Char > <nl> class OnHeapStream { <nl> public : <nl> - typedef typename HeapStringType < Char > : : String String ; <nl> + typedef typename CharTraits < Char > : : String String ; <nl> <nl> OnHeapStream ( Handle < String > string , size_t start_offset , size_t end ) <nl> : string_ ( string ) , start_offset_ ( start_offset ) , length_ ( end ) { } <nl> class OnHeapStream { <nl> / / ExternalTwoByteString . <nl> template < typename Char > <nl> class ExternalStringStream { <nl> + typedef typename CharTraits < Char > : : ExternalString ExternalString ; <nl> + <nl> public : <nl> - ExternalStringStream ( const Char * data , size_t end ) <nl> - : data_ ( data ) , length_ ( end ) { } <nl> + ExternalStringStream ( ExternalString * string , size_t start_offset , <nl> + size_t length ) <nl> + : lock_ ( string ) , <nl> + data_ ( string - > GetChars ( ) + start_offset ) , <nl> + length_ ( length ) { } <nl> <nl> ExternalStringStream ( const ExternalStringStream & other ) <nl> - : data_ ( other . data_ ) , length_ ( other . length_ ) { } <nl> + : lock_ ( other . lock_ ) , data_ ( other . data_ ) , length_ ( other . length_ ) { } <nl> <nl> Range < Char > GetDataAt ( size_t pos ) { <nl> return { & data_ [ Min ( length_ , pos ) ] , & data_ [ length_ ] } ; <nl> class ExternalStringStream { <nl> static const bool kCanBeCloned = true ; <nl> static const bool kCanAccessHeap = false ; <nl> <nl> + private : <nl> + ScopedExternalStringLock lock_ ; <nl> + const Char * const data_ ; <nl> + const size_t length_ ; <nl> + } ; <nl> + <nl> + / / A Char stream backed by a C array . Testing only . <nl> + template < typename Char > <nl> + class TestingStream { <nl> + public : <nl> + TestingStream ( const Char * data , size_t length ) <nl> + : data_ ( data ) , length_ ( length ) { } <nl> + Range < Char > GetDataAt ( size_t pos ) { <nl> + return { & data_ [ Min ( length_ , pos ) ] , & data_ [ length_ ] } ; <nl> + } <nl> + <nl> + static const bool kCanBeCloned = true ; <nl> + static const bool kCanAccessHeap = false ; <nl> + <nl> private : <nl> const Char * const data_ ; <nl> const size_t length_ ; <nl> Utf16CharacterStream * ScannerStream : : For ( Isolate * isolate , Handle < String > data , <nl> } <nl> if ( data - > IsExternalOneByteString ( ) ) { <nl> return new BufferedCharacterStream < ExternalStringStream > ( <nl> - static_cast < size_t > ( start_pos ) , <nl> - ExternalOneByteString : : cast ( * data ) - > GetChars ( ) + start_offset , <nl> - static_cast < size_t > ( end_pos ) ) ; <nl> + static_cast < size_t > ( start_pos ) , ExternalOneByteString : : cast ( * data ) , <nl> + start_offset , static_cast < size_t > ( end_pos ) ) ; <nl> } else if ( data - > IsExternalTwoByteString ( ) ) { <nl> return new UnbufferedCharacterStream < ExternalStringStream > ( <nl> - static_cast < size_t > ( start_pos ) , <nl> - ExternalTwoByteString : : cast ( * data ) - > GetChars ( ) + start_offset , <nl> - static_cast < size_t > ( end_pos ) ) ; <nl> + static_cast < size_t > ( start_pos ) , ExternalTwoByteString : : cast ( * data ) , <nl> + start_offset , static_cast < size_t > ( end_pos ) ) ; <nl> } else if ( data - > IsSeqOneByteString ( ) ) { <nl> return new BufferedCharacterStream < OnHeapStream > ( <nl> static_cast < size_t > ( start_pos ) , Handle < SeqOneByteString > : : cast ( data ) , <nl> std : : unique_ptr < Utf16CharacterStream > ScannerStream : : ForTesting ( <nl> std : : unique_ptr < Utf16CharacterStream > ScannerStream : : ForTesting ( <nl> const char * data , size_t length ) { <nl> return std : : unique_ptr < Utf16CharacterStream > ( <nl> - new BufferedCharacterStream < ExternalStringStream > ( <nl> + new BufferedCharacterStream < TestingStream > ( <nl> static_cast < size_t > ( 0 ) , reinterpret_cast < const uint8_t * > ( data ) , <nl> static_cast < size_t > ( length ) ) ) ; <nl> } <nl> mmm a / test / cctest / parsing / test - scanner - streams . cc <nl> ppp b / test / cctest / parsing / test - scanner - streams . cc <nl> class ChunkSource : public v8 : : ScriptCompiler : : ExternalSourceStream { <nl> size_t current_ ; <nl> } ; <nl> <nl> - class TestExternalResource : public v8 : : String : : ExternalStringResource { <nl> + / / Checks that Lock ( ) / Unlock ( ) pairs are balanced . Not thread - safe . <nl> + class LockChecker { <nl> + public : <nl> + LockChecker ( ) : lock_depth_ ( 0 ) { } <nl> + ~ LockChecker ( ) { CHECK_EQ ( 0 , lock_depth_ ) ; } <nl> + <nl> + void Lock ( ) const { lock_depth_ + + ; } <nl> + <nl> + void Unlock ( ) const { <nl> + CHECK_GT ( lock_depth_ , 0 ) ; <nl> + lock_depth_ - - ; <nl> + } <nl> + <nl> + bool IsLocked ( ) const { return lock_depth_ > 0 ; } <nl> + <nl> + int LockDepth ( ) const { return lock_depth_ ; } <nl> + <nl> + protected : <nl> + mutable int lock_depth_ ; <nl> + } ; <nl> + <nl> + class TestExternalResource : public v8 : : String : : ExternalStringResource , <nl> + public LockChecker { <nl> public : <nl> explicit TestExternalResource ( uint16_t * data , int length ) <nl> - : data_ ( data ) , length_ ( static_cast < size_t > ( length ) ) { } <nl> + : LockChecker ( ) , data_ ( data ) , length_ ( static_cast < size_t > ( length ) ) { } <nl> <nl> - ~ TestExternalResource ( ) { } <nl> + const uint16_t * data ( ) const override { <nl> + CHECK ( IsLocked ( ) ) ; <nl> + return data_ ; <nl> + } <nl> + <nl> + size_t length ( ) const override { return length_ ; } <nl> <nl> - const uint16_t * data ( ) const { return data_ ; } <nl> - size_t length ( ) const { return length_ ; } <nl> + bool IsCacheable ( ) const override { return false ; } <nl> + void Lock ( ) const override { LockChecker : : Lock ( ) ; } <nl> + void Unlock ( ) const override { LockChecker : : Unlock ( ) ; } <nl> <nl> private : <nl> uint16_t * data_ ; <nl> class TestExternalResource : public v8 : : String : : ExternalStringResource { <nl> } ; <nl> <nl> class TestExternalOneByteResource <nl> - : public v8 : : String : : ExternalOneByteStringResource { <nl> + : public v8 : : String : : ExternalOneByteStringResource , <nl> + public LockChecker { <nl> public : <nl> TestExternalOneByteResource ( const char * data , size_t length ) <nl> : data_ ( data ) , length_ ( length ) { } <nl> <nl> - const char * data ( ) const { return data_ ; } <nl> - size_t length ( ) const { return length_ ; } <nl> + const char * data ( ) const override { <nl> + CHECK ( IsLocked ( ) ) ; <nl> + return data_ ; <nl> + } <nl> + size_t length ( ) const override { return length_ ; } <nl> + <nl> + bool IsCacheable ( ) const override { return false ; } <nl> + void Lock ( ) const override { LockChecker : : Lock ( ) ; } <nl> + void Unlock ( ) const override { LockChecker : : Unlock ( ) ; } <nl> <nl> private : <nl> const char * data_ ; <nl> const char unicode_utf8 [ ] = <nl> const uint16_t unicode_ucs2 [ ] = { 97 , 98 , 99 , 228 , 10784 , 55357 , <nl> 56489 , 100 , 101 , 102 , 0 } ; <nl> <nl> + i : : Handle < i : : String > NewExternalTwoByteStringFromResource ( <nl> + i : : Isolate * isolate , TestExternalResource * resource ) { <nl> + i : : Factory * factory = isolate - > factory ( ) ; <nl> + / / String creation accesses the resource . <nl> + resource - > Lock ( ) ; <nl> + i : : Handle < i : : String > uc16_string ( <nl> + factory - > NewExternalStringFromTwoByte ( resource ) . ToHandleChecked ( ) ) ; <nl> + resource - > Unlock ( ) ; <nl> + return uc16_string ; <nl> + } <nl> + <nl> } / / anonymous namespace <nl> <nl> TEST ( Utf8StreamAsciiOnly ) { <nl> void TestCloneCharacterStream ( const char * reference , <nl> CHECK_EQU ( reference [ i ] , stream - > Advance ( ) ) ; <nl> } <nl> <nl> - / / Test advancing oriignal stream didn ' t affect the clone . <nl> + / / Test advancing original stream didn ' t affect the clone . <nl> TestCharacterStream ( reference , clone . get ( ) , length , 0 , length ) ; <nl> <nl> / / Test advancing clone didn ' t affect original stream . <nl> void TestCharacterStreams ( const char * one_byte_source , unsigned length , <nl> } <nl> TestExternalResource resource ( uc16_buffer . get ( ) , length ) ; <nl> i : : Handle < i : : String > uc16_string ( <nl> - factory - > NewExternalStringFromTwoByte ( & resource ) . ToHandleChecked ( ) ) ; <nl> + NewExternalTwoByteStringFromResource ( isolate , & resource ) ) ; <nl> std : : unique_ptr < i : : Utf16CharacterStream > uc16_stream ( <nl> i : : ScannerStream : : For ( isolate , uc16_string , start , end ) ) ; <nl> TestCharacterStream ( one_byte_source , uc16_stream . get ( ) , length , start , end ) ; <nl> TEST ( CloneCharacterStreams ) { <nl> } <nl> TestExternalResource resource ( uc16_buffer . get ( ) , length ) ; <nl> i : : Handle < i : : String > uc16_string ( <nl> - factory - > NewExternalStringFromTwoByte ( & resource ) . ToHandleChecked ( ) ) ; <nl> + NewExternalTwoByteStringFromResource ( isolate , & resource ) ) ; <nl> std : : unique_ptr < i : : Utf16CharacterStream > uc16_stream ( <nl> i : : ScannerStream : : For ( isolate , uc16_string , 0 , length ) ) ; <nl> + <nl> + CHECK ( resource . IsLocked ( ) ) ; <nl> + CHECK_EQ ( 1 , resource . LockDepth ( ) ) ; <nl> + std : : unique_ptr < i : : Utf16CharacterStream > cloned = uc16_stream - > Clone ( ) ; <nl> + CHECK_EQ ( 2 , resource . LockDepth ( ) ) ; <nl> + uc16_stream = std : : move ( cloned ) ; <nl> + CHECK_EQ ( 1 , resource . LockDepth ( ) ) ; <nl> + <nl> TestCloneCharacterStream ( one_byte_source , uc16_stream . get ( ) , length ) ; <nl> <nl> / / This avoids the GC from trying to free a stack allocated resource . <nl> mmm a / test / cctest / test - regexp . cc <nl> ppp b / test / cctest / test - regexp . cc <nl> class UncachedExternalString <nl> public : <nl> const char * data ( ) const override { return " abcdefghijklmnopqrstuvwxyz " ; } <nl> size_t length ( ) const override { return 26 ; } <nl> - bool IsCompressible ( ) const override { return true ; } <nl> + bool IsCacheable ( ) const override { return false ; } <nl> } ; <nl> <nl> TEST ( UncachedExternalString ) { <nl>
|
parsing : Lock ExternalStrings in the ExternalStringStream .
|
v8/v8
|
a1da383fb3257e13912f4b10ea398d05bf221b56
|
2018-09-04T14:09:04Z
|
mmm a / autogen . sh <nl> ppp b / autogen . sh <nl> <nl> # are automatically generated . <nl> <nl> # Check that we ' re being run from the right directory . <nl> - if test ! - e src / google / protobuf / stubs / common . h ; then <nl> + if test ! - f src / google / protobuf / stubs / common . h ; then <nl> cat > & 2 < < __EOF__ <nl> Could not find source code . Make sure you are running this script from the <nl> root of the distribution tree . <nl> __EOF__ <nl> exit 1 <nl> fi <nl> <nl> - if test ! - e gtest ; then <nl> + if test ! - d gtest ; then <nl> echo " gtest bundle not present . Downloading gtest - 1 . 3 . 0 automatically . " > & 2 <nl> set - ex <nl> curl http : / / googletest . googlecode . com / files / gtest - 1 . 3 . 0 . tar . bz2 | tar jx <nl>
|
Hopefully make autogen . sh run on solaris .
|
protocolbuffers/protobuf
|
2fd1208b34c2022e8f7768d5187bdad12c98d032
|
2009-05-01T21:41:32Z
|
mmm a / doc / inconsistencies <nl> ppp b / doc / inconsistencies <nl> In PHP , a data member that is unset will not appear at all in vardump or <nl> serialize . <nl> In HipHop , it will still appear , as null . <nl> <nl> - ( 3 ) isset ( $ this ) <nl> - <nl> - This is not supported yet . <nl> - <nl> 3 . Eval Issues <nl> <nl> ( 1 ) eval <nl> mmm a / src / compiler / expression / object_property_expression . cpp <nl> ppp b / src / compiler / expression / object_property_expression . cpp <nl> void ObjectPropertyExpression : : outputCPPObjProperty ( CodeGenerator & cg , <nl> AnalysisResultPtr ar , <nl> bool directVariant ) { <nl> bool bThis = m_object - > isThis ( ) ; <nl> + if ( bThis ) { <nl> + FunctionScopePtr func = ar - > getFunctionScope ( ) ; <nl> + if ( func & & func - > isStatic ( ) ) { <nl> + cg . printf ( " GET_THIS ( ) - > " ) ; <nl> + } <nl> + } <nl> <nl> const char * op = " . " ; <nl> string func = Option : : ObjectPrefix ; <nl> void ObjectPropertyExpression : : outputCPPExistTest ( CodeGenerator & cg , <nl> int op ) { <nl> if ( op = = T_ISSET ) { <nl> bool bThis = m_object - > isThis ( ) ; <nl> - if ( ! bThis ) { <nl> + if ( bThis ) { <nl> + FunctionScopePtr func = ar - > getFunctionScope ( ) ; <nl> + if ( func & & func - > isStatic ( ) ) { <nl> + cg . printf ( " GET_THIS ( ) - > " ) ; <nl> + } <nl> + } else { <nl> m_object - > outputCPP ( cg , ar ) ; <nl> cg . printf ( " - > " ) ; <nl> } <nl> void ObjectPropertyExpression : : outputCPPExistTest ( CodeGenerator & cg , <nl> void ObjectPropertyExpression : : outputCPPUnset ( CodeGenerator & cg , <nl> AnalysisResultPtr ar ) { <nl> bool bThis = m_object - > isThis ( ) ; <nl> - if ( ! bThis ) { <nl> + if ( bThis ) { <nl> + FunctionScopePtr func = ar - > getFunctionScope ( ) ; <nl> + if ( func & & func - > isStatic ( ) ) { <nl> + cg . printf ( " GET_THIS ( ) - > " ) ; <nl> + } <nl> + } else { <nl> m_object - > outputCPP ( cg , ar ) ; <nl> cg . printf ( " - > " ) ; <nl> } <nl> mmm a / src / compiler / expression / simple_variable . cpp <nl> ppp b / src / compiler / expression / simple_variable . cpp <nl> void SimpleVariable : : analyzeProgram ( AnalysisResultPtr ar ) { <nl> FunctionScopePtr func = <nl> dynamic_pointer_cast < FunctionScope > ( ar - > getScope ( ) ) ; <nl> func - > setContainsThis ( ) ; <nl> - if ( ! func - > isStatic ( ) | | ( m_context & ObjectContext ) ) { <nl> - m_this = true ; <nl> - } <nl> + m_this = true ; <nl> } else if ( m_name = = " GLOBALS " ) { <nl> m_globals = true ; <nl> } <nl> void SimpleVariable : : preOutputStash ( CodeGenerator & cg , AnalysisResultPtr ar , <nl> <nl> void SimpleVariable : : outputCPPImpl ( CodeGenerator & cg , AnalysisResultPtr ar ) { <nl> if ( m_this ) { <nl> - ClassScopePtr cls = ar - > getClassScope ( ) ; <nl> - if ( cls - > isRedeclaring ( ) | | cls - > derivesFromRedeclaring ( ) ! = <nl> - ClassScope : : FromNormal ) { <nl> - cg . printf ( " root " ) ; <nl> - } else { <nl> - cg . printf ( " GET_THIS ( ) " ) ; <nl> - } <nl> + ASSERT ( ( getContext ( ) & ObjectContext ) = = 0 ) ; <nl> + cg . printf ( " GET_THIS ( ) " ) ; <nl> } else if ( m_superGlobal ) { <nl> VariableTablePtr variables = ar - > getScope ( ) - > getVariables ( ) ; <nl> cg . printf ( " g - > % s " , variables - > getGlobalVariableName ( ar , m_name ) . c_str ( ) ) ; <nl> mmm a / src / compiler / option . cpp <nl> ppp b / src / compiler / option . cpp <nl> bool Option : : FlattenInvoke = true ; <nl> bool Option : : UseFastInvoke = false ; <nl> int Option : : InlineFunctionThreshold = - 1 ; <nl> bool Option : : ControlEvalOrder = true ; <nl> - <nl> bool Option : : GlobalRefParamAnalysis = true ; <nl> - <nl> - bool Option : : AllDynamic = false ; <nl> + bool Option : : AllDynamic = true ; <nl> bool Option : : AllVolatile = false ; <nl> <nl> std : : string Option : : FlibDirectory ; <nl> void Option : : Load ( Hdf & config ) { <nl> EnableXHP = config [ " EnableXHP " ] . getBool ( ) ; <nl> RTTIOutputFile = config [ " RTTIOutputFile " ] . getString ( ) ; <nl> EnableEval = ( EvalLevel ) config [ " EnableEval " ] . getByte ( 0 ) ; <nl> - AllDynamic = config [ " AllDynamic " ] . getBool ( ) ; <nl> + AllDynamic = config [ " AllDynamic " ] . getBool ( true ) ; <nl> AllVolatile = config [ " AllVolatile " ] . getBool ( ) ; <nl> <nl> GenerateSourceInfo = config [ " GenerateSourceInfo " ] . getBool ( false ) ; <nl> mmm a / src / runtime / base / frame_injection . cpp <nl> ppp b / src / runtime / base / frame_injection . cpp <nl> Array FrameInjection : : getArgs ( ) { <nl> return Array ( ) ; <nl> } <nl> <nl> + CObjRef FrameInjection : : getObjectRef ( ) { <nl> + if ( m_object . get ( ) & & m_object - > o_getId ( ) ) { <nl> + return m_object ; <nl> + } <nl> + return null_object ; <nl> + } <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / static late binding <nl> <nl> mmm a / src / runtime / base / frame_injection . h <nl> ppp b / src / runtime / base / frame_injection . h <nl> class FrameInjection { <nl> int flags ; <nl> <nl> virtual Array getArgs ( ) ; <nl> - CObjRef getObjectRef ( ) { return m_object ; } <nl> + CObjRef getObjectRef ( ) ; <nl> <nl> public : <nl> / / what does " static : : " resolve to ? <nl> mmm a / src / runtime / base / object_data . cpp <nl> ppp b / src / runtime / base / object_data . cpp <nl> Variant ObjectData : : os_invoke ( const char * c , const char * s , <nl> CArrRef params , int64 hash , <nl> bool fatal / * = true * / ) { <nl> Object obj = create_object ( c , Array : : Create ( ) , false ) ; <nl> + obj . get ( ) - > o_id = 0 ; / / for isset ( $ this ) to tell whether this is a fake obj <nl> return obj - > o_invoke ( s , params , hash , fatal ) ; <nl> } <nl> <nl> mmm a / src / test / test_code_run . cpp <nl> ppp b / src / test / test_code_run . cpp <nl> bool TestCodeRun : : CompileFiles ( ) { <nl> return true ; <nl> } <nl> <nl> + static string escape ( const std : : string & s ) { <nl> + string ret ; <nl> + ret . reserve ( s . size ( ) + 20 ) ; <nl> + for ( unsigned int i = 0 ; i < s . length ( ) ; i + + ) { <nl> + char ch = s [ i ] ; <nl> + if ( isprint ( ch ) | | ch = = ' \ n ' ) { <nl> + ret + = ch ; <nl> + } else { <nl> + char buf [ 10 ] ; <nl> + snprintf ( buf , sizeof ( buf ) , " { \ \ x % 02X } " , ( unsigned char ) ch ) ; <nl> + ret + = buf ; <nl> + } <nl> + } <nl> + return ret ; <nl> + } <nl> + <nl> static bool verify_result ( const char * input , const char * output , bool perfMode , <nl> const char * file = " " , int line = 0 , <nl> bool nowarnings = false , const char * subdir = " " , <nl> static bool verify_result ( const char * input , const char * output , bool perfMode , <nl> " % s " <nl> " mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - \ n " <nl> " Err : [ % s ] \ n " , file , line , input , <nl> - ( int ) expected . length ( ) , expected . c_str ( ) , <nl> - ( int ) actual . length ( ) , actual . c_str ( ) , <nl> + ( int ) expected . length ( ) , escape ( expected ) . c_str ( ) , <nl> + ( int ) actual . length ( ) , escape ( actual ) . c_str ( ) , <nl> err . c_str ( ) ) ; <nl> return false ; <nl> } <nl> bool TestCodeRun : : TestObjectMethod ( ) { <nl> } <nl> <nl> bool TestCodeRun : : TestClassMethod ( ) { <nl> + MVCR ( <nl> + " < ? php \ n " <nl> + " class Foo { \ n " <nl> + " static function Bar ( ) { \ n " <nl> + " if ( isset ( $ this ) & & isset ( $ this - > bar ) ) { \ n " <nl> + " echo \ " isset \ \ n \ " ; \ n " <nl> + " } \ n " <nl> + " var_dump ( $ this ) ; \ n " <nl> + " } \ n " <nl> + " } Foo : : Bar ( ) ; $ obj = new Foo ( ) ; $ obj - > Bar ( ) ; \ n " <nl> + ) ; <nl> + <nl> + MVCR ( <nl> + " < ? php \ n " <nl> + " class Example { \ n " <nl> + " function whatever ( ) { \ n " <nl> + " if ( isset ( $ this ) ) { \ n " <nl> + " var_dump ( ' static method call ' ) ; \ n " <nl> + " } else { \ n " <nl> + " var_dump ( ' non - static method call ' ) ; \ n " <nl> + " } \ n " <nl> + " } \ n " <nl> + " } \ n " <nl> + " Example : : whatever ( ) ; \ n " <nl> + " $ inst = new Example ( ) ; \ n " <nl> + " $ inst - > whatever ( ) ; \ n " <nl> + ) ; <nl> + <nl> MVCR ( " < ? php \ n " <nl> " if ( true ) { \ n " <nl> " class c extends AppendIterator { } \ n " <nl>
|
fixed isset ( $ this )
|
facebook/hhvm
|
8528e77e48536ae36f0e0ee227757cc5f30b53e1
|
2010-06-14T20:42:10Z
|
mmm a / modules / control / controller / lon_controller . cc <nl> ppp b / modules / control / controller / lon_controller . cc <nl> Status LonController : : ComputeControlCommand ( <nl> FLAGS_max_acceleration_when_stopped & & <nl> std : : fabs ( debug - > preview_speed_reference ( ) ) < = <nl> vehicle_param_ . max_abs_speed_when_stopped ( ) ) | | <nl> - ( std : : abs ( debug - > path_remain ( ) < 0 . 3 ) ) ) ) { <nl> + std : : abs ( debug - > path_remain ( ) ) < 0 . 3 ) ) { <nl> acceleration_cmd = lon_controller_conf . standstill_acceleration ( ) ; <nl> AINFO < < " Stop location reached " ; <nl> debug - > set_is_full_stop ( true ) ; <nl>
|
Control : fix typo
|
ApolloAuto/apollo
|
34fb062bc8bfe0357e90fbad05fb3e45f5842967
|
2018-12-21T10:00:19Z
|
mmm a / src / compiler / analysis / analysis_result . cpp <nl> ppp b / src / compiler / analysis / analysis_result . cpp <nl> bool AnalysisResult : : addClassDependency ( FileScopePtr usingFile , <nl> m_classDecs . find ( className ) ; <nl> if ( iter = = m_classDecs . end ( ) | | iter - > second . size ( ) ! = 1 ) return false ; <nl> ClassScopePtr classScope = iter - > second [ 0 ] ; <nl> - FileScopePtr fileScope = classScope - > getFileScope ( ) ; <nl> + FileScopePtr fileScope = classScope - > getContainingFile ( ) ; <nl> link ( usingFile , fileScope ) ; <nl> return true ; <nl> } <nl> bool AnalysisResult : : addFunctionDependency ( FileScopePtr usingFile , <nl> if ( iter = = m_functionDecs . end ( ) | | <nl> iter - > second . size ( ) ! = 1 ) return false ; <nl> FunctionScopePtr functionScope = iter - > second [ 0 ] ; <nl> - FileScopePtr fileScope = functionScope - > getFileScope ( ) ; <nl> + FileScopePtr fileScope = functionScope - > getContainingFile ( ) ; <nl> link ( usingFile , fileScope ) ; <nl> return true ; <nl> } <nl> bool AnalysisResult : : isSystemConstant ( const std : : string & constName ) { <nl> return m_constants - > isSystem ( constName ) ; <nl> } <nl> <nl> - void AnalysisResult : : addCallee ( StatementPtr stmt ) { <nl> - if ( m_calleesAdded . find ( stmt ) = = m_calleesAdded . end ( ) ) { <nl> - m_callees . push_back ( stmt ) ; <nl> - m_calleesAdded . insert ( stmt ) ; <nl> - } <nl> - } <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / Program <nl> <nl> mmm a / src / compiler / analysis / analysis_result . h <nl> ppp b / src / compiler / analysis / analysis_result . h <nl> class AnalysisResult : public BlockScope , public FunctionContainer { <nl> StatementPtrVec m_stmts ; <nl> StatementPtr m_stmt ; <nl> <nl> - StatementPtrVec m_callees ; <nl> - StatementPtrSet m_calleesAdded ; <nl> std : : string m_outputPath ; <nl> int m_optCounter ; <nl> <nl> mmm a / src / compiler / analysis / class_scope . cpp <nl> ppp b / src / compiler / analysis / class_scope . cpp <nl> using namespace boost ; <nl> ClassScope : : ClassScope ( KindOf kindOf , const std : : string & name , <nl> const std : : string & parent , <nl> const vector < string > & bases , <nl> - const std : : string & docComment , StatementPtr stmt , <nl> - FileScopePtr file ) <nl> - : BlockScope ( name , docComment , stmt , BlockScope : : ClassScope ) , m_file ( file ) , <nl> + const std : : string & docComment , StatementPtr stmt ) <nl> + : BlockScope ( name , docComment , stmt , BlockScope : : ClassScope ) , <nl> m_kindOf ( kindOf ) , m_parent ( parent ) , m_bases ( bases ) , m_attribute ( 0 ) , <nl> m_redeclaring ( - 1 ) , m_volatile ( false ) , m_needStaticInitializer ( false ) , <nl> m_derivesFromRedeclaring ( FromNormal ) , m_derivedByDynamic ( false ) , <nl> void ClassScope : : getRootParents ( AnalysisResultPtr ar , <nl> } <nl> <nl> string ClassScope : : getHeaderFilename ( CodeGenerator & cg ) { <nl> - FileScopePtr file = getFileScope ( ) ; <nl> + FileScopePtr file = getContainingFile ( ) ; <nl> ASSERT ( file ) ; <nl> string fileBase = file - > outputFilebase ( ) ; <nl> string headerFile = Option : : ClassHeaderPrefix ; <nl> outputCPPMethodInvokeTable ( CodeGenerator & cg , AnalysisResultPtr ar , <nl> iterFuncs = funcScopes . find ( name ) ; <nl> ASSERT ( iterFuncs ! = funcScopes . end ( ) ) ; <nl> FunctionScopePtr func = iterFuncs - > second [ 0 ] ; <nl> - string id = func - > getClass ( ) - > getId ( cg ) ; <nl> + string id = func - > getContainingClass ( ) - > getId ( cg ) ; <nl> if ( fewArgs & & <nl> func - > getMinParamCount ( ) > Option : : InvokeFewArgsCount ) <nl> continue ; <nl> mmm a / src / compiler / analysis / class_scope . h <nl> ppp b / src / compiler / analysis / class_scope . h <nl> class ClassScope : public BlockScope , public FunctionContainer , <nl> ClassScope ( KindOf kindOf , const std : : string & name , <nl> const std : : string & parent , <nl> const std : : vector < std : : string > & bases , <nl> - const std : : string & docComment , StatementPtr stmt , <nl> - FileScopePtr file ) ; <nl> + const std : : string & docComment , StatementPtr stmt ) ; <nl> <nl> <nl> / * * <nl> class ClassScope : public BlockScope , public FunctionContainer , <nl> <nl> std : : vector < std : : string > & getBases ( ) { return m_bases ; } <nl> <nl> - FileScopePtr getFileScope ( ) { <nl> - FileScopePtr fs = m_file . lock ( ) ; <nl> - return fs ; <nl> - } <nl> - <nl> ClassScopePtr getParentScope ( AnalysisResultPtr ar ) ; <nl> <nl> / * * <nl> class ClassScope : public BlockScope , public FunctionContainer , <nl> / / need to maintain declaration order for ClassInfo map <nl> FunctionScopePtrVec m_functionsVec ; <nl> <nl> - FileScopeWeakPtr m_file ; <nl> KindOf m_kindOf ; <nl> std : : string m_parent ; <nl> mutable std : : vector < std : : string > m_bases ; <nl> mmm a / src / compiler / analysis / file_scope . cpp <nl> ppp b / src / compiler / analysis / file_scope . cpp <nl> void FileScope : : outputCPPForwardDeclarations ( CodeGenerator & cg , <nl> BOOST_FOREACH ( name , m_usedClasses ) { <nl> cls = ar - > findClass ( name , AnalysisResult : : ClassName ) ; <nl> if ( cls & & cls - > isUserClass ( ) ) { <nl> - FileScopePtr fs = cls - > getFileScope ( ) ; <nl> + FileScopePtr fs = cls - > getContainingFile ( ) ; <nl> if ( fs ) { <nl> extraIncs [ fs - > getName ( ) ] = fs ; <nl> } <nl> void FileScope : : outputCPPDeclarations ( CodeGenerator & cg , <nl> BOOST_FOREACH ( string name , m_usedClasses ) { <nl> ClassScopePtr cls = ar - > findClass ( name , AnalysisResult : : ClassName ) ; <nl> if ( cls & & cls - > isUserClass ( ) ) { <nl> - FileScopePtr fs = cls - > getFileScope ( ) ; <nl> + FileScopePtr fs = cls - > getContainingFile ( ) ; <nl> if ( fs & & done . find ( fs ) = = done . end ( ) ) { <nl> done . insert ( fs ) ; <nl> cg_printInclude ( fs - > outputFilebase ( ) ) ; <nl> void FileScope : : outputCPPDeclarations ( CodeGenerator & cg , <nl> BOOST_FOREACH ( string name , m_usedFuncsInline ) { <nl> FunctionScopePtr func = ar - > findFunction ( name ) ; <nl> if ( func ) { <nl> - FileScopePtr fs = func - > getFileScope ( ) ; <nl> + FileScopePtr fs = func - > getContainingFile ( ) ; <nl> if ( fs & & done . find ( fs ) = = done . end ( ) ) { <nl> done . insert ( fs ) ; <nl> cg_printInclude ( fs - > outputFilebase ( ) ) ; <nl> mmm a / src / compiler / analysis / function_scope . cpp <nl> ppp b / src / compiler / analysis / function_scope . cpp <nl> FunctionScope : : FunctionScope ( AnalysisResultPtr ar , bool method , <nl> FileScopePtr file , <nl> bool inPseudoMain / * = false * / ) <nl> : BlockScope ( name , docComment , stmt , BlockScope : : FunctionScope ) , <nl> - m_method ( method ) , m_file ( file ) , <nl> - m_minParam ( minParam ) , m_maxParam ( maxParam ) , <nl> + m_method ( method ) , m_minParam ( minParam ) , m_maxParam ( maxParam ) , <nl> m_attribute ( attribute ) , m_refReturn ( reference ) , m_modifiers ( modifiers ) , <nl> m_virtual ( false ) , m_perfectVirtual ( false ) , m_overriding ( false ) , <nl> m_redeclaring ( - 1 ) , m_volatile ( false ) , m_pseudoMain ( inPseudoMain ) , <nl> bool FunctionScope : : isMagic ( ) const { <nl> return m_name . size ( ) > = 2 & & m_name [ 0 ] = = ' _ ' & & m_name [ 1 ] = = ' _ ' ; <nl> } <nl> <nl> - void FunctionScope : : setClass ( ClassScopePtr cls ) { <nl> - m_class = cls ; <nl> - setOuterScope ( cls ) ; <nl> - } <nl> - <nl> - ClassScopePtr FunctionScope : : getClass ( ) { <nl> - return m_class . lock ( ) ; <nl> - } <nl> - <nl> static std : : string s_empty ; <nl> const string & FunctionScope : : getOriginalName ( ) const { <nl> if ( m_pseudoMain ) return s_empty ; <nl> void FunctionScope : : setReturnType ( AnalysisResultPtr ar , TypePtr type ) { <nl> } <nl> } <nl> if ( ! type - > getName ( ) . empty ( ) ) { <nl> - FileScopePtr fs = getFileScope ( ) ; <nl> + FileScopePtr fs = getContainingFile ( ) ; <nl> if ( fs ) fs - > addClassDependency ( ar , type - > getName ( ) ) ; <nl> } <nl> m_returnType = type ; <nl> mmm a / src / compiler / analysis / function_scope . h <nl> ppp b / src / compiler / analysis / function_scope . h <nl> class FunctionScope : public BlockScope , <nl> m_volatile = false ; <nl> } <nl> <nl> - void setClass ( ClassScopePtr cls ) ; <nl> - ClassScopePtr getClass ( ) ; <nl> - <nl> / * * <nl> * Get original name of the function , without case being lowered . <nl> * / <nl> class FunctionScope : public BlockScope , <nl> * / <nl> void outputCPPCallInfo ( CodeGenerator & cg , AnalysisResultPtr ar ) ; <nl> <nl> - FileScopePtr getFileScope ( ) { <nl> - FileScopePtr fs = m_file . lock ( ) ; <nl> - return fs ; <nl> - } <nl> - <nl> / * * <nl> * Serialize the iface , not everything . <nl> * / <nl> class FunctionScope : public BlockScope , <nl> bool inPseudoMain ( ) { <nl> return m_pseudoMain ; <nl> } <nl> - void setFileScope ( FileScopePtr fs ) { <nl> - m_file = fs ; <nl> - } <nl> <nl> void setMagicMethod ( ) { <nl> m_magicMethod = true ; <nl> class FunctionScope : public BlockScope , <nl> static StringToRefParamInfoPtrMap s_refParamInfo ; <nl> <nl> bool m_method ; <nl> - FileScopeWeakPtr m_file ; <nl> int m_minParam ; <nl> int m_maxParam ; <nl> int m_attribute ; <nl> class FunctionScope : public BlockScope , <nl> int m_inlineIndex ; <nl> bool m_directInvoke ; <nl> FunctionOptPtr m_optFunction ; <nl> - boost : : weak_ptr < HPHP : : ClassScope > m_class ; <nl> bool outputCPPInvokeArgCountCheck ( CodeGenerator & cg , AnalysisResultPtr ar , <nl> bool ret , bool constructor ) ; <nl> } ; <nl> mmm a / src / compiler / analysis / variable_table . cpp <nl> ppp b / src / compiler / analysis / variable_table . cpp <nl> void VariableTable : : outputCPPGlobalVariablesDtorIncludes ( CodeGenerator & cg , <nl> ClassScopePtr cls = ar - > findClass ( varType - > getName ( ) ) ; <nl> ASSERT ( cls & & ! cls - > isRedeclaring ( ) ) ; <nl> if ( cls - > isUserClass ( ) ) { <nl> - const string fileBase = cls - > getFileScope ( ) - > outputFilebase ( ) ; <nl> + const string fileBase = cls - > getContainingFile ( ) - > outputFilebase ( ) ; <nl> if ( dtorIncludes . find ( fileBase ) = = dtorIncludes . end ( ) ) { <nl> cg_printInclude ( fileBase + " . h " ) ; <nl> dtorIncludes . insert ( fileBase ) ; <nl> mmm a / src / compiler / builtin_symbols . cpp <nl> ppp b / src / compiler / builtin_symbols . cpp <nl> void BuiltinSymbols : : ParseExtClasses ( AnalysisResultPtr ar , const char * * p , <nl> } <nl> ClassScopePtr cl ( new ClassScope ( ar , cname , cparent , ifaces , methods ) ) ; <nl> for ( uint i = 0 ; i < methods . size ( ) ; + + i ) { <nl> - methods [ i ] - > setClass ( cl ) ; <nl> + methods [ i ] - > setOuterScope ( cl ) ; <nl> } <nl> p + + ; <nl> / / Parse properties <nl> mmm a / src / compiler / construct . cpp <nl> ppp b / src / compiler / construct . cpp <nl> void Construct : : addUserFunction ( AnalysisResultPtr ar , <nl> bool strong / * = true * / ) { <nl> if ( ! name . empty ( ) ) { <nl> FunctionScopePtr func = ar - > findFunction ( name ) ; <nl> - if ( func & & func - > isUserFunction ( ) ) { <nl> - ar - > addCallee ( func - > getStmt ( ) ) ; <nl> - } <nl> if ( strong & & ar - > getPhase ( ) = = AnalysisResult : : AnalyzeAll ) { <nl> FunctionScopePtr func = getFunctionScope ( ) ; <nl> getFileScope ( ) - > addFunctionDependency ( ar , name , func & & <nl> void Construct : : addUserClass ( AnalysisResultPtr ar , <nl> bool strong / * = true * / ) { <nl> if ( ! name . empty ( ) ) { <nl> ClassScopePtr cls = ar - > findClass ( name ) ; <nl> - if ( cls & & cls - > isUserClass ( ) ) { <nl> - ar - > addCallee ( cls - > getStmt ( ) ) ; <nl> - } <nl> if ( strong & & ! ar - > isFirstPass ( ) ) { <nl> getFileScope ( ) - > addClassDependency ( ar , name ) ; <nl> } <nl> mmm a / src / compiler / statement / class_statement . cpp <nl> ppp b / src / compiler / statement / class_statement . cpp <nl> void ClassStatement : : onParse ( AnalysisResultPtr ar , BlockScopePtr scope ) { <nl> StatementPtr stmt = dynamic_pointer_cast < Statement > ( shared_from_this ( ) ) ; <nl> ClassScopePtr classScope ( new ClassScope ( kindOf , m_originalName , m_parent , <nl> bases , m_docComment , <nl> - stmt , fs ) ) ; <nl> + stmt ) ) ; <nl> m_classScope = classScope ; <nl> if ( ! fs - > addClass ( ar , classScope ) ) { <nl> m_ignored = true ; <nl> mmm a / src / compiler / statement / function_statement . cpp <nl> ppp b / src / compiler / statement / function_statement . cpp <nl> void FunctionStatement : : outputCPPImpl ( CodeGenerator & cg , <nl> string fname = funcScope - > getId ( cg ) . c_str ( ) ; <nl> bool pseudoMain = funcScope - > inPseudoMain ( ) ; <nl> string origFuncName = ! pseudoMain ? funcScope - > getOriginalName ( ) : <nl> - ( " run_init : : " + funcScope - > getFileScope ( ) - > getName ( ) ) ; <nl> + ( " run_init : : " + funcScope - > getContainingFile ( ) - > getName ( ) ) ; <nl> string funcSection ; <nl> <nl> if ( outputFFI ( cg , ar ) ) return ; <nl> void FunctionStatement : : outputCPPImpl ( CodeGenerator & cg , <nl> <nl> if ( pseudoMain ) { <nl> cg_printf ( " % s % s ( " , Option : : PseudoMainPrefix , <nl> - funcScope - > getFileScope ( ) - > pseudoMainName ( ) . c_str ( ) ) ; <nl> + funcScope - > getContainingFile ( ) - > pseudoMainName ( ) . c_str ( ) ) ; <nl> } else { <nl> cg_printf ( " % s % s ( " , Option : : FunctionPrefix , fname . c_str ( ) ) ; <nl> } <nl> void FunctionStatement : : outputCPPImpl ( CodeGenerator & cg , <nl> if ( pseudoMain ) { <nl> cg_printf ( " PSEUDOMAIN_INJECTION % s ( % s , % s % s ) ; \ n " , <nl> sys , origFuncName . c_str ( ) , Option : : PseudoMainPrefix , <nl> - funcScope - > getFileScope ( ) - > pseudoMainName ( ) . c_str ( ) ) ; <nl> + funcScope - > getContainingFile ( ) - > pseudoMainName ( ) . c_str ( ) ) ; <nl> } else { <nl> if ( m_stmt - > hasBody ( ) ) { <nl> cg_printf ( " FUNCTION_INJECTION % s ( % s ) ; \ n " , sys , origFuncName . c_str ( ) ) ; <nl> mmm a / src / compiler / statement / interface_statement . cpp <nl> ppp b / src / compiler / statement / interface_statement . cpp <nl> void InterfaceStatement : : onParse ( AnalysisResultPtr ar , BlockScopePtr scope ) { <nl> StatementPtr stmt = dynamic_pointer_cast < Statement > ( shared_from_this ( ) ) ; <nl> ClassScopePtr classScope <nl> ( new ClassScope ( ClassScope : : KindOfInterface , m_name , " " , bases , <nl> - m_docComment , stmt , fs ) ) ; <nl> + m_docComment , stmt ) ) ; <nl> m_classScope = classScope ; <nl> fs - > addClass ( ar , classScope ) ; <nl> <nl> mmm a / src / compiler / statement / method_statement . cpp <nl> ppp b / src / compiler / statement / method_statement . cpp <nl> void MethodStatement : : onParse ( AnalysisResultPtr ar , BlockScopePtr scope ) { <nl> <nl> fs - > setParamCounts ( ar , - 1 , - 1 ) ; <nl> classScope - > addFunction ( ar , fs ) ; <nl> - fs - > setClass ( classScope ) ; <nl> + <nl> if ( m_name = = " __construct " ) { <nl> classScope - > setAttribute ( ClassScope : : HasConstructor ) ; <nl> } else if ( m_name = = " __destruct " ) { <nl>
|
Remove some unused code
|
facebook/hhvm
|
5a233eea114d1ff60450053202fe9bf333d72371
|
2010-10-29T04:59:48Z
|
mmm a / xbmc / cores / VideoPlayer / VideoRenderers / HwDecRender / RendererMediaCodec . cpp <nl> ppp b / xbmc / cores / VideoPlayer / VideoRenderers / HwDecRender / RendererMediaCodec . cpp <nl> CRenderInfo CRendererMediaCodec : : GetRenderInfo ( ) <nl> { <nl> CRenderInfo info ; <nl> info . formats = m_formats ; <nl> - info . max_buffer_size = NUM_BUFFERS ; <nl> - info . optimal_buffer_size = 2 ; <nl> + info . max_buffer_size = 4 ; <nl> + info . optimal_buffer_size = 3 ; <nl> return info ; <nl> } <nl> <nl> mmm a / xbmc / cores / VideoPlayer / VideoRenderers / HwDecRender / RendererMediaCodecSurface . cpp <nl> ppp b / xbmc / cores / VideoPlayer / VideoRenderers / HwDecRender / RendererMediaCodecSurface . cpp <nl> CRenderInfo CRendererMediaCodecSurface : : GetRenderInfo ( ) <nl> { <nl> CRenderInfo info ; <nl> info . formats = m_formats ; <nl> - info . max_buffer_size = NUM_BUFFERS ; <nl> - info . optimal_buffer_size = 2 ; <nl> + info . max_buffer_size = 4 ; <nl> + info . optimal_buffer_size = 3 ; <nl> return info ; <nl> } <nl> <nl>
|
FIX : [ amc ] freeze when refresh rate changed
|
xbmc/xbmc
|
c3d7e9a0bd02bd49430140c6361573292ba9f14d
|
2016-07-23T18:54:12Z
|
mmm a / imgui . h <nl> ppp b / imgui . h <nl> namespace ImGui <nl> IMGUI_API bool IsMouseDoubleClicked ( int button ) ; / / did mouse button double - clicked . a double - click returns false in IsMouseClicked ( ) . uses io . MouseDoubleClickTime . <nl> IMGUI_API bool IsMouseReleased ( int button ) ; / / did mouse button released ( went from Down to ! Down ) <nl> IMGUI_API bool IsMouseDragging ( int button = 0 , float lock_threshold = - 1 . 0f ) ; / / is mouse dragging . if lock_threshold < - 1 . 0f uses io . MouseDraggingThreshold <nl> - IMGUI_API bool IsMouseHoveringRect ( const ImVec2 & r_min , const ImVec2 & r_max , bool clip = true ) ; / / is mouse hovering given bounding rect ( in screen space ) . clipped by current clipping settings . disregarding of consideration of focus / window ordering / blocked by a popup . <nl> + IMGUI_API bool IsMouseHoveringRect ( const ImVec2 & r_min , const ImVec2 & r_max , bool clip = true ) ; / / is mouse hovering given bounding rect ( in screen space ) . clipped by current clipping settings , but disregarding of other consideration of focus / window ordering / popup - block . <nl> IMGUI_API bool IsMousePosValid ( const ImVec2 * mouse_pos = NULL ) ; / / <nl> IMGUI_API ImVec2 GetMousePos ( ) ; / / shortcut to ImGui : : GetIO ( ) . MousePos provided by user , to be consistent with other calls <nl> IMGUI_API ImVec2 GetMousePosOnOpeningCurrentPopup ( ) ; / / retrieve backup of mouse position at the time of opening popup we have BeginPopup ( ) into <nl> struct ImFontGlyph <nl> <nl> enum ImFontAtlasFlags_ <nl> { <nl> + ImFontAtlasFlags_None = 0 , <nl> ImFontAtlasFlags_NoPowerOfTwoHeight = 1 < < 0 , / / Don ' t round the height to next power of two <nl> ImFontAtlasFlags_NoMouseCursors = 1 < < 1 / / Don ' t build software mouse cursors into the atlas <nl> } ; <nl> <nl> - / / Load and rasterize multiple TTF / OTF fonts into a same texture . <nl> - / / Sharing a texture for multiple fonts allows us to reduce the number of draw calls during rendering . <nl> - / / We also add custom graphic data into the texture that serves for ImGui . <nl> - / / 1 . ( Optional ) Call AddFont * * * functions . If you don ' t call any , the default font will be loaded for you . <nl> - / / 2 . Call GetTexDataAsAlpha8 ( ) or GetTexDataAsRGBA32 ( ) to build and retrieve pixels data . <nl> - / / 3 . Upload the pixels data into a texture within your graphics system . <nl> - / / 4 . Call SetTexID ( my_tex_id ) ; and pass the pointer / identifier to your texture . This value will be passed back to you during rendering to identify the texture . <nl> - / / IMPORTANT : If you pass a ' glyph_ranges ' array to AddFont * * * functions , you need to make sure that your array persist up until the ImFont is build ( when calling GetTexData * * * or Build ( ) ) . We only copy the pointer , not the data . <nl> + / / Load and rasterize multiple TTF / OTF fonts into a same texture . The font atlas will build a single texture holding : <nl> + / / - One or more fonts . <nl> + / / - Custom graphics data needed to render the shapes needed by Dear ImGui . <nl> + / / - Mouse cursor shapes for software cursor rendering ( unless setting ' Flags | = ImFontAtlasFlags_NoMouseCursors ' in the font atlas ) . <nl> + / / It is the user - code responsibility to setup / build the atlas , then upload the pixel data into a texture accessible by your graphics api . <nl> + / / - Optionally , call any of the AddFont * * * functions . If you don ' t call any , the default font embedded in the code will be loaded for you . <nl> + / / - Call GetTexDataAsAlpha8 ( ) or GetTexDataAsRGBA32 ( ) to build and retrieve pixels data . <nl> + / / - Upload the pixels data into a texture within your graphics system ( see imgui_impl_xxxx . cpp examples ) <nl> + / / - Call SetTexID ( my_tex_id ) ; and pass the pointer / identifier to your texture in a format natural to your graphics API . <nl> + / / This value will be passed back to you during rendering to identify the texture . Read FAQ entry about ImTextureID for more details . <nl> + / / Common pitfalls : <nl> + / / - If you pass a ' glyph_ranges ' array to AddFont * * * functions , you need to make sure that your array persist up until the <nl> + / / atlas is build ( when calling GetTexData * * * or Build ( ) ) . We only copy the pointer , not the data . <nl> + / / - Important : By default , AddFontFromMemoryTTF ( ) takes ownership of the data . Even though we are not writing to it , we will free the pointer on destruction . <nl> + / / You can set font_cfg - > FontDataOwnedByAtlas = false to keep ownership of your data and it won ' t be freed , <nl> + / / - Even though many functions are suffixed with " TTF " , OTF data is supported just as well . <nl> + / / - This is an old API and it is currently awkward for those and and various other reasons ! We will address them in the future ! <nl> struct ImFontAtlas <nl> { <nl> IMGUI_API ImFontAtlas ( ) ; <nl> struct ImFontAtlas <nl> IMGUI_API ImFont * AddFont ( const ImFontConfig * font_cfg ) ; <nl> IMGUI_API ImFont * AddFontDefault ( const ImFontConfig * font_cfg = NULL ) ; <nl> IMGUI_API ImFont * AddFontFromFileTTF ( const char * filename , float size_pixels , const ImFontConfig * font_cfg = NULL , const ImWchar * glyph_ranges = NULL ) ; <nl> - IMGUI_API ImFont * AddFontFromMemoryTTF ( void * font_data , int font_size , float size_pixels , const ImFontConfig * font_cfg = NULL , const ImWchar * glyph_ranges = NULL ) ; / / Note : Transfer ownership of ' ttf_data ' to ImFontAtlas ! Will be deleted after Build ( ) . Set font_cfg - > FontDataOwnedByAtlas to false to keep ownership . <nl> + IMGUI_API ImFont * AddFontFromMemoryTTF ( void * font_data , int font_size , float size_pixels , const ImFontConfig * font_cfg = NULL , const ImWchar * glyph_ranges = NULL ) ; / / Note : Transfer ownership of ' ttf_data ' to ImFontAtlas ! Will be deleted after destruction of the atlas . Set font_cfg - > FontDataOwnedByAtlas = false to keep ownership of your data and it won ' t be freed . <nl> IMGUI_API ImFont * AddFontFromMemoryCompressedTTF ( const void * compressed_font_data , int compressed_font_size , float size_pixels , const ImFontConfig * font_cfg = NULL , const ImWchar * glyph_ranges = NULL ) ; / / ' compressed_font_data ' still owned by caller . Compress with binary_to_compressed_c . cpp . <nl> IMGUI_API ImFont * AddFontFromMemoryCompressedBase85TTF ( const char * compressed_font_data_base85 , float size_pixels , const ImFontConfig * font_cfg = NULL , const ImWchar * glyph_ranges = NULL ) ; / / ' compressed_font_data_base85 ' still owned by caller . Compress with binary_to_compressed_c . cpp with - base85 parameter . <nl> IMGUI_API void ClearInputData ( ) ; / / Clear input data ( all ImFontConfig structures including sizes , TTF data , glyph ranges , etc . ) = all the data used to build the texture and fonts . <nl> struct ImFontAtlas <nl> <nl> / / Build atlas , retrieve pixel data . <nl> / / User is in charge of copying the pixels into graphics memory ( e . g . create a texture with your engine ) . Then store your texture handle with SetTexID ( ) . <nl> - / / RGBA32 format is provided for convenience and compatibility , but note that unless you use CustomRect to draw color data , the RGB pixels emitted from Fonts will all be white ( ~ 75 % of waste ) . <nl> - / / Pitch = Width * BytesPerPixels <nl> + / / The pitch is always = Width * BytesPerPixels ( 1 or 4 ) <nl> + / / Building in RGBA32 format is provided for convenience and compatibility , but note that unless you manually manipulate or copy color data into <nl> + / / the texture ( e . g . when using the AddCustomRect * * * api ) , then the RGB pixels emitted will always be white ( ~ 75 % of memory / bandwidth waste . <nl> IMGUI_API bool Build ( ) ; / / Build pixels data . This is called automatically for you by the GetTexData * * * functions . <nl> IMGUI_API bool IsBuilt ( ) { return Fonts . Size > 0 & & ( TexPixelsAlpha8 ! = NULL | | TexPixelsRGBA32 ! = NULL ) ; } <nl> IMGUI_API void GetTexDataAsAlpha8 ( unsigned char * * out_pixels , int * out_width , int * out_height , int * out_bytes_per_pixel = NULL ) ; / / 1 byte per - pixel <nl> mmm a / imgui_draw . cpp <nl> ppp b / imgui_draw . cpp <nl> static const ImVec2 FONT_ATLAS_DEFAULT_TEX_CURSOR_DATA [ ImGuiMouseCursor_COUNT ] [ 3 <nl> ImFontAtlas : : ImFontAtlas ( ) <nl> { <nl> Locked = false ; <nl> - Flags = 0x00 ; <nl> + Flags = ImFontAtlasFlags_None ; <nl> TexID = NULL ; <nl> TexDesiredWidth = 0 ; <nl> TexGlyphPadding = 1 ; <nl>
|
ImFontAtlas Comments ( ) + added ImGuiFontAtlas_None for consistency .
|
ocornut/imgui
|
a082692b0a587b3e150bb7034b037d2f75b44891
|
2018-08-25T18:06:17Z
|
mmm a / Marlin / servo . h <nl> ppp b / Marlin / servo . h <nl> <nl> <nl> / / Say which 16 bit timers can be used and in what order <nl> # if defined ( __AVR_ATmega1280__ ) | | defined ( __AVR_ATmega2560__ ) <nl> - # define _useTimer5 <nl> / / # define _useTimer1 <nl> # define _useTimer3 <nl> # define _useTimer4 <nl> - / / typedef enum { _timer5 , _timer1 , _timer3 , _timer4 , _Nbr_16timers } timer16_Sequence_t ; <nl> - typedef enum { _timer5 , _timer3 , _timer4 , _Nbr_16timers } timer16_Sequence_t ; <nl> + # ifndef MOTOR_CURRENT_PWM_XY_PIN <nl> + / / Timer 5 is used for motor current PWM and can ' t be used for servos . <nl> + # define _useTimer5 <nl> + / / typedef enum { _timer5 , _timer1 , _timer3 , _timer4 , _Nbr_16timers } timer16_Sequence_t ; <nl> + typedef enum { _timer5 , _timer3 , _timer4 , _Nbr_16timers } timer16_Sequence_t ; <nl> + # else <nl> + typedef enum { _timer3 , _timer4 , _Nbr_16timers } timer16_Sequence_t ; <nl> + # endif <nl> <nl> # elif defined ( __AVR_ATmega32U4__ ) <nl> / / # define _useTimer1 <nl>
|
Enabling servo usage on boards with PWM current control
|
MarlinFirmware/Marlin
|
adb5375a091c0694d61520d68803bb670a119543
|
2016-03-21T23:56:31Z
|
mmm a / Telegram / Resources / langs / lang . strings <nl> ppp b / Telegram / Resources / langs / lang . strings <nl> https : / / github . com / telegramdesktop / tdesktop / blob / master / LEGAL <nl> " lng_export_option_info_about " = " Your chosen screen name , username , phone number and profile pictures . " ; <nl> " lng_export_option_contacts " = " Contacts list " ; <nl> " lng_export_option_contacts_about " = " If you allow access , contacts are continuously synced with Telegram . You can adjust this in Settings > Privacy & Security on mobile devices . " ; <nl> - " lng_export_option_sessions " = " Sessions list " ; <nl> + " lng_export_option_sessions " = " Active sessions " ; <nl> " lng_export_option_sessions_about " = " We store this to display your connected devices in Settings > Privacy & Security > Active Sessions . Terminating a session removes this data from Telegram servers . " ; <nl> + " lng_export_header_other " = " Other " ; <nl> + " lng_export_option_other " = " Miscellaneous data " ; <nl> + " lng_export_option_other_about " = " Other types of data not mentioned above . ( beta ) " ; <nl> " lng_export_header_chats " = " Chat export settings " ; <nl> " lng_export_option_personal_chats " = " Personal chats " ; <nl> " lng_export_option_bot_chats " = " Bot chats " ; <nl> https : / / github . com / telegramdesktop / tdesktop / blob / master / LEGAL <nl> " lng_export_total_size " = " Total size : { size } . " ; <nl> " lng_export_folder " = " Choose export folder " ; <nl> " lng_export_invalid " = " Sorry , you have started a new data export , so this data export is now cancelled . " ; <nl> - " lng_export_delay " = " Sorry , for security reasons , you will be able to begin downloading your data in 24 hours . We have notified all your devices about the export request to make sure it ' s authorized and give you time to react if it ' s not . \ n \ nPlease come back on { date } and repeat the request using the same device . " ; <nl> + " lng_export_delay " = " Sorry , for security reasons , you will be able to begin downloading your data in { hours } . We have notified all your devices about the export request to make sure it ' s authorized and give you time to react if it ' s not . \ n \ nPlease come back on { date } and repeat the request using the same device . " ; <nl> + " lng_export_delay_less_than_hour " = " less than an hour " ; <nl> + " lng_export_delay_hours # one " = " { count } hour " ; <nl> + " lng_export_delay_hours # other " = " { count } hours " ; <nl> " lng_export_suggest_title " = " Data export ready " ; <nl> " lng_export_suggest_text " = " You can now download the data you requested . Start exporting data ? " ; <nl> " lng_export_suggest_cancel " = " Not now " ; <nl> mmm a / Telegram / Resources / scheme . tl <nl> ppp b / Telegram / Resources / scheme . tl <nl> inputFileLocation # 14637196 volume_id : long local_id : int secret : long = InputFileLo <nl> inputEncryptedFileLocation # f5235d55 id : long access_hash : long = InputFileLocation ; <nl> inputDocumentFileLocation # 430f0724 id : long access_hash : long version : int = InputFileLocation ; <nl> inputSecureFileLocation # cbc7ee28 id : long access_hash : long = InputFileLocation ; <nl> + inputTakeoutFileLocation # 29be5899 = InputFileLocation ; <nl> <nl> inputAppEvent # 770656a8 time : double type : string peer : long data : string = InputAppEvent ; <nl> <nl> mmm a / Telegram / SourceFiles / data / data_session . cpp <nl> ppp b / Telegram / SourceFiles / data / data_session . cpp <nl> Session : : Session ( not_null < AuthSession * > session ) <nl> } <nl> <nl> void Session : : startExport ( ) { <nl> + if ( _exportPanel ) { <nl> + _exportPanel - > activatePanel ( ) ; <nl> + return ; <nl> + } <nl> _export = std : : make_unique < Export : : ControllerWrap > ( ) ; <nl> _exportPanel = std : : make_unique < Export : : View : : PanelController > ( <nl> _export . get ( ) ) ; <nl> mmm a / Telegram / SourceFiles / export / export_api_wrap . cpp <nl> ppp b / Telegram / SourceFiles / export / export_api_wrap . cpp <nl> LocationKey ComputeLocationKey ( const Data : : FileLocation & value ) { <nl> } , [ & ] ( const MTPDinputEncryptedFileLocation & data ) { <nl> result . type | = ( 4ULL < < 24 ) ; <nl> result . id = data . vid . v ; <nl> + } , [ & ] ( const MTPDinputTakeoutFileLocation & data ) { <nl> + result . type | = ( 5ULL < < 24 ) ; <nl> } ) ; <nl> return result ; <nl> } <nl> struct ApiWrap : : UserpicsProcess { <nl> int fileIndex = - 1 ; <nl> } ; <nl> <nl> + struct ApiWrap : : OtherDataProcess { <nl> + Data : : File file ; <nl> + FnMut < void ( Data : : File & & ) > done ; <nl> + } ; <nl> + <nl> struct ApiWrap : : FileProcess { <nl> FileProcess ( const QString & path , Output : : Stats * stats ) ; <nl> <nl> auto ApiWrap : : splitRequest ( int index , Request & & request ) { <nl> } <nl> <nl> auto ApiWrap : : fileRequest ( const Data : : FileLocation & location , int offset ) { <nl> - Expects ( location . dcId ! = 0 ) ; <nl> + Expects ( location . dcId ! = 0 <nl> + | | location . data . type ( ) = = mtpc_inputTakeoutFileLocation ) ; <nl> Expects ( _takeoutId . has_value ( ) ) ; <nl> <nl> return std : : move ( _mtp . request ( MTPInvokeWithTakeout < MTPupload_GetFile > ( <nl> auto ApiWrap : : fileRequest ( const Data : : FileLocation & location , int offset ) { <nl> MTP_int ( offset ) , <nl> MTP_int ( kFileChunkSize ) ) <nl> ) ) . fail ( [ = ] ( RPCError & & result ) { <nl> - error ( std : : move ( result ) ) ; <nl> + if ( result . type ( ) = = qstr ( " TAKEOUT_FILE_EMPTY " ) <nl> + & & _otherDataProcess ! = nullptr ) { <nl> + filePartDone ( 0 , MTP_upload_file ( MTP_storage_filePartial ( ) , <nl> + MTP_int ( 0 ) , <nl> + MTP_bytes ( QByteArray ( ) ) ) ) ; <nl> + } else { <nl> + error ( std : : move ( result ) ) ; <nl> + } <nl> } ) . toDC ( MTP : : ShiftDcId ( location . dcId , MTP : : kExportMediaDcShift ) ) ) ; <nl> } <nl> <nl> void ApiWrap : : requestSplitRanges ( ) { <nl> void ApiWrap : : requestDialogsCount ( ) { <nl> Expects ( _startProcess ! = nullptr ) ; <nl> <nl> + validateSplits ( ) ; <nl> + <nl> splitRequest ( _startProcess - > splitIndex , MTPmessages_GetDialogs ( <nl> MTP_flags ( 0 ) , <nl> MTP_int ( 0 ) , / / offset_date <nl> void ApiWrap : : requestDialogsList ( <nl> void ApiWrap : : validateSplits ( ) { <nl> if ( _splits . empty ( ) ) { <nl> _splits . push_back ( MTP_messageRange ( <nl> - MTP_int ( 0 ) , <nl> + MTP_int ( 1 ) , <nl> MTP_int ( std : : numeric_limits < int > : : max ( ) ) ) ) ; <nl> } <nl> } <nl> void ApiWrap : : requestPersonalInfo ( FnMut < void ( Data : : PersonalInfo & & ) > done ) { <nl> } ) . send ( ) ; <nl> } <nl> <nl> + void ApiWrap : : requestOtherData ( <nl> + const QString & suggestedPath , <nl> + FnMut < void ( Data : : File & & ) > done ) { <nl> + Expects ( _otherDataProcess = = nullptr ) ; <nl> + <nl> + _otherDataProcess = std : : make_unique < OtherDataProcess > ( ) ; <nl> + _otherDataProcess - > done = std : : move ( done ) ; <nl> + _otherDataProcess - > file . location . data = MTP_inputTakeoutFileLocation ( ) ; <nl> + _otherDataProcess - > file . suggestedPath = suggestedPath ; <nl> + loadFile ( <nl> + _otherDataProcess - > file , <nl> + [ ] ( FileProgress progress ) { return true ; } , <nl> + [ = ] ( const QString & result ) { otherDataDone ( result ) ; } ) ; <nl> + } <nl> + <nl> + void ApiWrap : : otherDataDone ( const QString & relativePath ) { <nl> + Expects ( _otherDataProcess ! = nullptr ) ; <nl> + <nl> + _otherDataProcess - > file . relativePath = relativePath ; <nl> + const auto process = base : : take ( _otherDataProcess ) ; <nl> + process - > done ( std : : move ( process - > file ) ) ; <nl> + } <nl> + <nl> void ApiWrap : : requestUserpics ( <nl> FnMut < bool ( Data : : UserpicsInfo & & ) > start , <nl> Fn < bool ( DownloadProgress ) > progress , <nl> void ApiWrap : : loadFile ( <nl> Fn < bool ( FileProgress ) > progress , <nl> FnMut < void ( QString ) > done ) { <nl> Expects ( _fileProcess = = nullptr ) ; <nl> - Expects ( file . location . dcId ! = 0 ) ; <nl> + Expects ( file . location . dcId ! = 0 <nl> + | | file . location . data . type ( ) = = mtpc_inputTakeoutFileLocation ) ; <nl> <nl> _fileProcess = prepareFileProcess ( file ) ; <nl> _fileProcess - > progress = std : : move ( progress ) ; <nl> void ApiWrap : : filePartDone ( int offset , const MTPupload_File & result ) { <nl> error ( " Empty bytes received in file part . " ) ; <nl> return ; <nl> } <nl> + const auto result = _fileProcess - > file . writeBlock ( { } ) ; <nl> + if ( ! result ) { <nl> + ioError ( result ) ; <nl> + return ; <nl> + } <nl> } else { <nl> using Request = FileProcess : : Request ; <nl> auto & requests = _fileProcess - > requests ; <nl> mmm a / Telegram / SourceFiles / export / export_api_wrap . h <nl> ppp b / Telegram / SourceFiles / export / export_api_wrap . h <nl> class ApiWrap { <nl> <nl> void requestPersonalInfo ( FnMut < void ( Data : : PersonalInfo & & ) > done ) ; <nl> <nl> + void requestOtherData ( <nl> + const QString & suggestedPath , <nl> + FnMut < void ( Data : : File & & ) > done ) ; <nl> + <nl> struct DownloadProgress { <nl> QString path ; <nl> int itemIndex = 0 ; <nl> class ApiWrap { <nl> struct StartProcess ; <nl> struct ContactsProcess ; <nl> struct UserpicsProcess ; <nl> + struct OtherDataProcess ; <nl> struct FileProcess ; <nl> struct FileProgress ; <nl> struct ChatsProcess ; <nl> class ApiWrap { <nl> void finishUserpicsSlice ( ) ; <nl> void finishUserpics ( ) ; <nl> <nl> + void otherDataDone ( const QString & relativePath ) ; <nl> + <nl> void validateSplits ( ) ; <nl> <nl> void requestDialogsSlice ( ) ; <nl> class ApiWrap { <nl> std : : unique_ptr < LoadedFileCache > _fileCache ; <nl> std : : unique_ptr < ContactsProcess > _contactsProcess ; <nl> std : : unique_ptr < UserpicsProcess > _userpicsProcess ; <nl> + std : : unique_ptr < OtherDataProcess > _otherDataProcess ; <nl> std : : unique_ptr < FileProcess > _fileProcess ; <nl> std : : unique_ptr < LeftChannelsProcess > _leftChannelsProcess ; <nl> std : : unique_ptr < DialogsProcess > _dialogsProcess ; <nl> mmm a / Telegram / SourceFiles / export / export_controller . cpp <nl> ppp b / Telegram / SourceFiles / export / export_controller . cpp <nl> class Controller { <nl> void exportUserpics ( ) ; <nl> void exportContacts ( ) ; <nl> void exportSessions ( ) ; <nl> + void exportOtherData ( ) ; <nl> void exportDialogs ( ) ; <nl> void exportNextDialog ( ) ; <nl> void exportLeftChannels ( ) ; <nl> class Controller { <nl> ProcessingState stateUserpics ( const DownloadProgress & progress ) const ; <nl> ProcessingState stateContacts ( ) const ; <nl> ProcessingState stateSessions ( ) const ; <nl> + ProcessingState stateOtherData ( ) const ; <nl> ProcessingState stateLeftChannels ( <nl> const DownloadProgress & progress ) const ; <nl> ProcessingState stateDialogs ( const DownloadProgress & progress ) const ; <nl> void Controller : : fillExportSteps ( ) { <nl> if ( _settings . types & Type : : Sessions ) { <nl> _steps . push_back ( Step : : Sessions ) ; <nl> } <nl> + if ( _settings . types & Type : : OtherData ) { <nl> + _steps . push_back ( Step : : OtherData ) ; <nl> + } <nl> if ( _settings . types & Type : : AnyChatsMask ) { <nl> _steps . push_back ( Step : : Dialogs ) ; <nl> } <nl> void Controller : : fillSubstepsInSteps ( const ApiWrap : : StartInfo & info ) { <nl> if ( _settings . types & Settings : : Type : : Sessions ) { <nl> push ( Step : : Sessions , 1 ) ; <nl> } <nl> + if ( _settings . types & Settings : : Type : : OtherData ) { <nl> + push ( Step : : OtherData , 1 ) ; <nl> + } <nl> if ( _settings . types & Settings : : Type : : GroupsChannelsMask ) { <nl> push ( Step : : LeftChannels , info . leftChannelsCount ) ; <nl> } <nl> void Controller : : exportNext ( ) { <nl> case Step : : Userpics : return exportUserpics ( ) ; <nl> case Step : : Contacts : return exportContacts ( ) ; <nl> case Step : : Sessions : return exportSessions ( ) ; <nl> + case Step : : OtherData : return exportOtherData ( ) ; <nl> case Step : : LeftChannels : return exportLeftChannels ( ) ; <nl> case Step : : Dialogs : return exportDialogs ( ) ; <nl> } <nl> void Controller : : exportNext ( ) { <nl> <nl> void Controller : : initialize ( ) { <nl> setState ( stateInitializing ( ) ) ; <nl> - <nl> _api . startExport ( _settings , & _stats , [ = ] ( ApiWrap : : StartInfo info ) { <nl> if ( ioCatchError ( _writer - > start ( _settings , & _stats ) ) ) { <nl> return ; <nl> void Controller : : initialize ( ) { <nl> } <nl> <nl> void Controller : : collectLeftChannels ( ) { <nl> + setState ( stateLeftChannelsList ( 0 ) ) ; <nl> _api . requestLeftChannelsList ( [ = ] ( int count ) { <nl> setState ( stateLeftChannelsList ( count ) ) ; <nl> return true ; <nl> void Controller : : collectLeftChannels ( ) { <nl> } <nl> <nl> void Controller : : collectDialogsList ( ) { <nl> + setState ( stateDialogsList ( 0 ) ) ; <nl> _api . requestDialogsList ( [ = ] ( int count ) { <nl> setState ( stateDialogsList ( count ) ) ; <nl> return true ; <nl> void Controller : : collectDialogsList ( ) { <nl> } <nl> <nl> void Controller : : exportPersonalInfo ( ) { <nl> + setState ( statePersonalInfo ( ) ) ; <nl> _api . requestPersonalInfo ( [ = ] ( Data : : PersonalInfo & & result ) { <nl> if ( ioCatchError ( _writer - > writePersonal ( result ) ) ) { <nl> return ; <nl> void Controller : : exportUserpics ( ) { <nl> } <nl> <nl> void Controller : : exportContacts ( ) { <nl> + setState ( stateContacts ( ) ) ; <nl> _api . requestContacts ( [ = ] ( Data : : ContactsList & & result ) { <nl> if ( ioCatchError ( _writer - > writeContactsList ( result ) ) ) { <nl> return ; <nl> void Controller : : exportContacts ( ) { <nl> } <nl> <nl> void Controller : : exportSessions ( ) { <nl> + setState ( stateSessions ( ) ) ; <nl> _api . requestSessions ( [ = ] ( Data : : SessionsList & & result ) { <nl> if ( ioCatchError ( _writer - > writeSessionsList ( result ) ) ) { <nl> return ; <nl> void Controller : : exportSessions ( ) { <nl> } ) ; <nl> } <nl> <nl> + void Controller : : exportOtherData ( ) { <nl> + setState ( stateOtherData ( ) ) ; <nl> + const auto relativePath = " lists / other_data . json " ; <nl> + _api . requestOtherData ( relativePath , [ = ] ( Data : : File & & result ) { <nl> + if ( ioCatchError ( _writer - > writeOtherData ( result ) ) ) { <nl> + return ; <nl> + } <nl> + exportNext ( ) ; <nl> + } ) ; <nl> + } <nl> + <nl> void Controller : : exportDialogs ( ) { <nl> if ( ioCatchError ( _writer - > writeDialogsStart ( _dialogsInfo ) ) ) { <nl> return ; <nl> ProcessingState Controller : : stateSessions ( ) const { <nl> return prepareState ( Step : : Sessions ) ; <nl> } <nl> <nl> + ProcessingState Controller : : stateOtherData ( ) const { <nl> + return prepareState ( Step : : OtherData ) ; <nl> + } <nl> + <nl> ProcessingState Controller : : stateLeftChannels ( <nl> const DownloadProgress & progress ) const { <nl> const auto step = Step : : LeftChannels ; <nl> mmm a / Telegram / SourceFiles / export / export_controller . h <nl> ppp b / Telegram / SourceFiles / export / export_controller . h <nl> struct ProcessingState { <nl> Userpics , <nl> Contacts , <nl> Sessions , <nl> + OtherData , <nl> LeftChannels , <nl> Dialogs , <nl> } ; <nl> mmm a / Telegram / SourceFiles / export / export_settings . cpp <nl> ppp b / Telegram / SourceFiles / export / export_settings . cpp <nl> bool Settings : : validate ( ) const { <nl> return false ; <nl> } else if ( ( fullChats & MustNotBeFull ) ! = 0 ) { <nl> return false ; <nl> - } else if ( format ! = Format : : Text & & format ! = Format : : Json ) { <nl> + } else if ( format ! = Format : : Html & & format ! = Format : : Json ) { <nl> return false ; <nl> } else if ( ! media . validate ( ) ) { <nl> return false ; <nl> mmm a / Telegram / SourceFiles / export / export_settings . h <nl> ppp b / Telegram / SourceFiles / export / export_settings . h <nl> struct Settings { <nl> Userpics = 0x002 , <nl> Contacts = 0x004 , <nl> Sessions = 0x008 , <nl> - PersonalChats = 0x010 , <nl> - BotChats = 0x020 , <nl> - PrivateGroups = 0x040 , <nl> - PublicGroups = 0x080 , <nl> - PrivateChannels = 0x100 , <nl> - PublicChannels = 0x200 , <nl> + OtherData = 0x010 , <nl> + PersonalChats = 0x020 , <nl> + BotChats = 0x040 , <nl> + PrivateGroups = 0x080 , <nl> + PublicGroups = 0x100 , <nl> + PrivateChannels = 0x200 , <nl> + PublicChannels = 0x400 , <nl> <nl> GroupsMask = PrivateGroups | PublicGroups , <nl> ChannelsMask = PrivateChannels | PublicChannels , <nl> struct Settings { <nl> NonChannelChatsMask = PersonalChats | BotChats | PrivateGroups , <nl> AnyChatsMask = PersonalChats | BotChats | GroupsChannelsMask , <nl> NonChatsMask = PersonalInfo | Userpics | Contacts | Sessions , <nl> - AllMask = NonChatsMask | AnyChatsMask , <nl> + AllMask = NonChatsMask | OtherData | AnyChatsMask , <nl> } ; <nl> using Types = base : : flags < Type > ; <nl> friend inline constexpr auto is_flag_type ( Type ) { return true ; } ; <nl> mmm a / Telegram / SourceFiles / export / output / export_output_abstract . h <nl> ppp b / Telegram / SourceFiles / export / output / export_output_abstract . h <nl> struct SessionsList ; <nl> struct DialogsInfo ; <nl> struct DialogInfo ; <nl> struct MessagesSlice ; <nl> + struct File ; <nl> } / / namespace Data <nl> <nl> struct Settings ; <nl> class AbstractWriter { <nl> [ [ nodiscard ] ] virtual Result writeSessionsList ( <nl> const Data : : SessionsList & data ) = 0 ; <nl> <nl> + [ [ nodiscard ] ] virtual Result writeOtherData ( <nl> + const Data : : File & data ) = 0 ; <nl> + <nl> [ [ nodiscard ] ] virtual Result writeDialogsStart ( <nl> const Data : : DialogsInfo & data ) = 0 ; <nl> [ [ nodiscard ] ] virtual Result writeDialogStart ( <nl> mmm a / Telegram / SourceFiles / export / output / export_output_html . cpp <nl> ppp b / Telegram / SourceFiles / export / output / export_output_html . cpp <nl> QByteArray HtmlWriter : : Wrap : : end ( ) const { <nl> } <nl> <nl> HtmlWriter : : Wrap : : ~ Wrap ( ) { <nl> - Expects ( _file . empty ( ) | | _closed ) ; <nl> + ( void ) close ( ) ; <nl> } <nl> <nl> HtmlWriter : : HtmlWriter ( ) = default ; <nl> Result HtmlWriter : : writeWebSessions ( const Data : : SessionsList & data ) { <nl> return _summary - > writeBlock ( header ) ; <nl> } <nl> <nl> + Result HtmlWriter : : writeOtherData ( const Data : : File & data ) { <nl> + Expects ( _summary ! = nullptr ) ; <nl> + <nl> + const auto header = SerializeLink ( <nl> + " Other data " , <nl> + _summary - > relativePath ( data ) ) <nl> + + kLineBreak <nl> + + kLineBreak ; <nl> + return _summary - > writeBlock ( header ) ; <nl> + } <nl> + <nl> Result HtmlWriter : : writeDialogsStart ( const Data : : DialogsInfo & data ) { <nl> return writeChatsStart ( <nl> data , <nl> Result HtmlWriter : : writeChatSlice ( const Data : : MessagesSlice & data ) { <nl> data . peers , <nl> _settings . internalLinksDomain ) ) ; <nl> } <nl> - const auto full = kLineBreak + JoinList ( kLineBreak , list ) ; <nl> + const auto full = _chat - > empty ( ) <nl> + ? JoinList ( kLineBreak , list ) <nl> + : kLineBreak + JoinList ( kLineBreak , list ) ; <nl> return _chat - > writeBlock ( full ) ; <nl> } <nl> <nl> Result HtmlWriter : : writeChatEnd ( ) { <nl> } <nl> Unexpected ( " Dialog type in TypeString . " ) ; <nl> } ; <nl> - return _chats - > writeBlock ( SerializeKeyValue ( { <nl> + return _chats - > writeBlock ( kLineBreak + SerializeKeyValue ( { <nl> { " Name " , SerializeString ( NameString ( _dialog , _dialog . type ) ) } , <nl> { " Type " , SerializeString ( TypeString ( _dialog . type ) ) } , <nl> { <nl> Result HtmlWriter : : writeChatEnd ( ) { <nl> ( _dialog . relativePath + " messages . html " ) ) ) <nl> : QByteArray ( ) ) <nl> } <nl> - } ) + kLineBreak ) ; <nl> + } ) ) ; <nl> } <nl> <nl> Result HtmlWriter : : writeChatsEnd ( ) { <nl> mmm a / Telegram / SourceFiles / export / output / export_output_html . h <nl> ppp b / Telegram / SourceFiles / export / output / export_output_html . h <nl> class HtmlWriter : public AbstractWriter { <nl> <nl> Result writeSessionsList ( const Data : : SessionsList & data ) override ; <nl> <nl> + Result writeOtherData ( const Data : : File & data ) override ; <nl> + <nl> Result writeDialogsStart ( const Data : : DialogsInfo & data ) override ; <nl> Result writeDialogStart ( const Data : : DialogInfo & data ) override ; <nl> Result writeDialogSlice ( const Data : : MessagesSlice & data ) override ; <nl> mmm a / Telegram / SourceFiles / export / output / export_output_json . cpp <nl> ppp b / Telegram / SourceFiles / export / output / export_output_json . cpp <nl> For license and copyright information please follow this link : <nl> # include " core / utils . h " <nl> <nl> # include < QtCore / QDateTime > <nl> + # include < QtCore / QJsonDocument > <nl> + # include < QtCore / QJsonObject > <nl> + # include < QtCore / QJsonArray > <nl> + # include < QtCore / QJsonValue > <nl> <nl> namespace Export { <nl> namespace Output { <nl> Result JsonWriter : : writeSessionsList ( const Data : : SessionsList & data ) { <nl> return Result : : Success ( ) ; <nl> } <nl> <nl> + Result JsonWriter : : writeOtherData ( const Data : : File & data ) { <nl> + Expects ( _output ! = nullptr ) ; <nl> + Expects ( data . skipReason = = Data : : File : : SkipReason : : None ) ; <nl> + Expects ( ! data . relativePath . isEmpty ( ) ) ; <nl> + <nl> + QFile f ( pathWithRelativePath ( data . relativePath ) ) ; <nl> + if ( ! f . open ( QIODevice : : ReadOnly ) ) { <nl> + return Result ( Result : : Type : : FatalError , f . fileName ( ) ) ; <nl> + } <nl> + const auto content = f . readAll ( ) ; <nl> + if ( content . isEmpty ( ) ) { <nl> + return Result : : Success ( ) ; <nl> + } <nl> + auto error = QJsonParseError { 0 , QJsonParseError : : NoError } ; <nl> + const auto document = QJsonDocument : : fromJson ( content , & error ) ; <nl> + if ( error . error ! = QJsonParseError : : NoError ) { <nl> + return Result ( Result : : Type : : FatalError , f . fileName ( ) ) ; <nl> + } <nl> + auto block = prepareObjectItemStart ( " other_data " ) ; <nl> + Fn < void ( const QJsonObject & data ) > pushObject ; <nl> + Fn < void ( const QJsonArray & data ) > pushArray ; <nl> + Fn < void ( const QJsonValue & data ) > pushValue ; <nl> + pushObject = [ & ] ( const QJsonObject & data ) { <nl> + block . append ( pushNesting ( Context : : kObject ) ) ; <nl> + for ( auto i = data . begin ( ) ; i ! = data . end ( ) ; + + i ) { <nl> + if ( ( * i ) . type ( ) ! = QJsonValue : : Undefined ) { <nl> + block . append ( prepareObjectItemStart ( i . key ( ) . toUtf8 ( ) ) ) ; <nl> + pushValue ( * i ) ; <nl> + } <nl> + } <nl> + block . append ( popNesting ( ) ) ; <nl> + } ; <nl> + pushArray = [ & ] ( const QJsonArray & data ) { <nl> + block . append ( pushNesting ( Context : : kArray ) ) ; <nl> + for ( auto i = data . begin ( ) ; i ! = data . end ( ) ; + + i ) { <nl> + if ( ( * i ) . type ( ) ! = QJsonValue : : Undefined ) { <nl> + block . append ( prepareArrayItemStart ( ) ) ; <nl> + pushValue ( * i ) ; <nl> + } <nl> + } <nl> + block . append ( popNesting ( ) ) ; <nl> + } ; <nl> + pushValue = [ & ] ( const QJsonValue & data ) { <nl> + switch ( data . type ( ) ) { <nl> + case QJsonValue : : Null : <nl> + block . append ( " null " ) ; <nl> + return ; <nl> + case QJsonValue : : Bool : <nl> + block . append ( data . toBool ( ) ? " true " : " false " ) ; <nl> + return ; <nl> + case QJsonValue : : Double : <nl> + block . append ( Data : : NumberToString ( data . toDouble ( ) ) ) ; <nl> + return ; <nl> + case QJsonValue : : String : <nl> + block . append ( SerializeString ( data . toString ( ) . toUtf8 ( ) ) ) ; <nl> + return ; <nl> + case QJsonValue : : Array : <nl> + return pushArray ( data . toArray ( ) ) ; <nl> + case QJsonValue : : Object : <nl> + return pushObject ( data . toObject ( ) ) ; <nl> + } <nl> + Unexpected ( " Type of json valuein JsonWriter : : writeOtherData . " ) ; <nl> + } ; <nl> + if ( document . isObject ( ) ) { <nl> + pushObject ( document . object ( ) ) ; <nl> + } else { <nl> + pushArray ( document . array ( ) ) ; <nl> + } <nl> + return _output - > writeBlock ( block ) ; <nl> + } <nl> + <nl> Result JsonWriter : : writeSessions ( const Data : : SessionsList & data ) { <nl> Expects ( _output ! = nullptr ) ; <nl> <nl> mmm a / Telegram / SourceFiles / export / output / export_output_json . h <nl> ppp b / Telegram / SourceFiles / export / output / export_output_json . h <nl> class JsonWriter : public AbstractWriter { <nl> <nl> Result writeSessionsList ( const Data : : SessionsList & data ) override ; <nl> <nl> + Result writeOtherData ( const Data : : File & data ) override ; <nl> + <nl> Result writeDialogsStart ( const Data : : DialogsInfo & data ) override ; <nl> Result writeDialogStart ( const Data : : DialogInfo & data ) override ; <nl> Result writeDialogSlice ( const Data : : MessagesSlice & data ) override ; <nl> mmm a / Telegram / SourceFiles / export / output / export_output_text . cpp <nl> ppp b / Telegram / SourceFiles / export / output / export_output_text . cpp <nl> Result TextWriter : : writeWebSessions ( const Data : : SessionsList & data ) { <nl> return _summary - > writeBlock ( header ) ; <nl> } <nl> <nl> + Result TextWriter : : writeOtherData ( const Data : : File & data ) { <nl> + Expects ( _summary ! = nullptr ) ; <nl> + <nl> + const auto header = " Other data - " + data . relativePath . toUtf8 ( ) <nl> + + kLineBreak <nl> + + kLineBreak ; <nl> + return _summary - > writeBlock ( header ) ; <nl> + } <nl> + <nl> Result TextWriter : : writeDialogsStart ( const Data : : DialogsInfo & data ) { <nl> return writeChatsStart ( <nl> data , <nl> Result TextWriter : : writeChatSlice ( const Data : : MessagesSlice & data ) { <nl> data . peers , <nl> _settings . internalLinksDomain ) ) ; <nl> } <nl> - const auto full = kLineBreak + JoinList ( kLineBreak , list ) ; <nl> + const auto full = _chat - > empty ( ) <nl> + ? JoinList ( kLineBreak , list ) <nl> + : kLineBreak + JoinList ( kLineBreak , list ) ; <nl> return _chat - > writeBlock ( full ) ; <nl> } <nl> <nl> Result TextWriter : : writeChatEnd ( ) { <nl> } <nl> Unexpected ( " Dialog type in TypeString . " ) ; <nl> } ; <nl> - return _chats - > writeBlock ( SerializeKeyValue ( { <nl> + return _chats - > writeBlock ( kLineBreak + SerializeKeyValue ( { <nl> { " Name " , NameString ( _dialog , _dialog . type ) } , <nl> { " Type " , TypeString ( _dialog . type ) } , <nl> { <nl> Result TextWriter : : writeChatEnd ( ) { <nl> ? ( _dialog . relativePath + " messages . txt " ) . toUtf8 ( ) <nl> : QByteArray ( ) ) <nl> } <nl> - } ) + kLineBreak ) ; <nl> + } ) ) ; <nl> } <nl> <nl> Result TextWriter : : writeChatsEnd ( ) { <nl> mmm a / Telegram / SourceFiles / export / output / export_output_text . h <nl> ppp b / Telegram / SourceFiles / export / output / export_output_text . h <nl> class TextWriter : public AbstractWriter { <nl> <nl> Result writeSessionsList ( const Data : : SessionsList & data ) override ; <nl> <nl> + Result writeOtherData ( const Data : : File & data ) override ; <nl> + <nl> Result writeDialogsStart ( const Data : : DialogsInfo & data ) override ; <nl> Result writeDialogStart ( const Data : : DialogInfo & data ) override ; <nl> Result writeDialogSlice ( const Data : : MessagesSlice & data ) override ; <nl> mmm a / Telegram / SourceFiles / export / view / export_view_content . cpp <nl> ppp b / Telegram / SourceFiles / export / view / export_view_content . cpp <nl> Content ContentFromState ( const ProcessingState & state ) { <nl> case Step : : Sessions : <nl> pushMain ( lang ( lng_export_option_sessions ) ) ; <nl> break ; <nl> + case Step : : OtherData : <nl> + pushMain ( lang ( lng_export_option_other ) ) ; <nl> + break ; <nl> case Step : : LeftChannels : <nl> case Step : : Dialogs : <nl> pushMain ( lang ( lng_export_state_chats ) ) ; <nl> mmm a / Telegram / SourceFiles / export / view / export_view_panel_controller . cpp <nl> ppp b / Telegram / SourceFiles / export / view / export_view_panel_controller . cpp <nl> void SuggestStart ( ) { <nl> <nl> void ClearSuggestStart ( ) { <nl> auto settings = Local : : ReadExportSettings ( ) ; <nl> - settings . availableAt = 0 ; <nl> - Local : : WriteExportSettings ( settings ) ; <nl> + if ( settings . availableAt ) { <nl> + settings . availableAt = 0 ; <nl> + Local : : WriteExportSettings ( settings ) ; <nl> + } <nl> } <nl> <nl> PanelController : : PanelController ( not_null < ControllerWrap * > process ) <nl> void PanelController : : showError ( const ApiErrorState & error ) { <nl> qstr ( " TAKEOUT_INIT_DELAY_ " ) . size ( ) ) . toInt ( ) , 1 ) ; <nl> const auto now = QDateTime : : currentDateTime ( ) ; <nl> const auto when = now . addSecs ( seconds ) ; <nl> - showError ( lng_export_delay ( lt_date , langDateTimeFull ( when ) ) ) ; <nl> + const auto hours = seconds / 3600 ; <nl> + const auto hoursText = [ & ] { <nl> + if ( hours < = 0 ) { <nl> + return lang ( lng_export_delay_less_than_hour ) ; <nl> + } <nl> + return lng_export_delay_hours ( lt_count , hours ) ; <nl> + } ( ) ; <nl> + showError ( lng_export_delay ( <nl> + lt_hours , <nl> + hoursText , <nl> + lt_date , <nl> + langDateTimeFull ( when ) ) ) ; <nl> <nl> _settings - > availableAt = unixtime ( ) + seconds ; <nl> _saveSettingsTimer . callOnce ( kSaveSettingsTimeout ) ; <nl> void PanelController : : showError ( const QString & text ) { <nl> } <nl> <nl> void PanelController : : showProgress ( ) { <nl> + _settings - > availableAt = 0 ; <nl> + ClearSuggestStart ( ) ; <nl> + <nl> _panel - > setTitle ( Lang : : Viewer ( lng_export_progress_title ) ) ; <nl> <nl> auto progress = base : : make_unique_q < ProgressWidget > ( <nl> mmm a / Telegram / SourceFiles / export / view / export_view_settings . cpp <nl> ppp b / Telegram / SourceFiles / export / view / export_view_settings . cpp <nl> void SettingsWidget : : setupOptions ( not_null < Ui : : VerticalLayout * > container ) { <nl> Type : : PublicChannels ) ; <nl> <nl> setupMediaOptions ( container ) ; <nl> + <nl> + addHeader ( container , lng_export_header_other ) ; <nl> + addOptionWithAbout ( <nl> + container , <nl> + lng_export_option_other , <nl> + Type : : OtherData , <nl> + lng_export_option_other_about ) ; <nl> } <nl> <nl> void SettingsWidget : : setupMediaOptions ( <nl> not_null < Ui : : Checkbox * > SettingsWidget : : addOption ( <nl> return checkbox ; <nl> } <nl> <nl> - void SettingsWidget : : addOptionWithAbout ( <nl> + not_null < Ui : : Checkbox * > SettingsWidget : : addOptionWithAbout ( <nl> not_null < Ui : : VerticalLayout * > container , <nl> LangKey key , <nl> Types types , <nl> LangKey about ) { <nl> - addOption ( container , key , types ) ; <nl> + const auto result = addOption ( container , key , types ) ; <nl> const auto label = container - > add ( <nl> object_ptr < Ui : : FlatLabel > ( <nl> container , <nl> void SettingsWidget : : addOptionWithAbout ( <nl> Ui : : FlatLabel : : InitType : : Simple , <nl> st : : exportAboutOptionLabel ) , <nl> st : : exportAboutOptionPadding ) ; <nl> + return result ; <nl> } <nl> <nl> void SettingsWidget : : addChatOption ( <nl> mmm a / Telegram / SourceFiles / export / view / export_view_settings . h <nl> ppp b / Telegram / SourceFiles / export / view / export_view_settings . h <nl> class SettingsWidget : public Ui : : RpWidget { <nl> not_null < Ui : : VerticalLayout * > container , <nl> LangKey key , <nl> Types types ) ; <nl> - void addOptionWithAbout ( <nl> + not_null < Ui : : Checkbox * > addOptionWithAbout ( <nl> not_null < Ui : : VerticalLayout * > container , <nl> LangKey key , <nl> Types types , <nl> LangKey about ) ; <nl> - void addChatOption ( <nl> + void addChatOption ( <nl> not_null < Ui : : VerticalLayout * > container , <nl> LangKey key , <nl> Types types ) ; <nl>
|
Add other additional data export .
|
telegramdesktop/tdesktop
|
54cab2c5a502c9ea3171cfe029c2bcba5628e995
|
2018-06-24T14:44:53Z
|
mmm a / deploy / provision - vm . sh <nl> ppp b / deploy / provision - vm . sh <nl> if type yum > / dev / null 2 > & 1 ; then <nl> tar - xzvf git - 1 . 8 . 0 . 3 . tar . gz <nl> cd git - 1 . 8 . 0 . 3 <nl> . / configure - - prefix = $ HOME / git & & make - j2 & & make install <nl> + cd . . <nl> sleep 3 <nl> fi <nl> fi <nl>
|
Vagrant setup : Don ' t clone PhantomJS into Git build directory .
|
ariya/phantomjs
|
aaca7774c9d43243bc0f7c9a2a2ae38ae2b8f251
|
2013-01-09T09:03:27Z
|
mmm a / tools / whitespace . txt <nl> ppp b / tools / whitespace . txt <nl> A Smi balks into a war and says : <nl> The doubles heard this and started to unbox . <nl> The Smi looked at them when a crazy v8 - autoroll account showed up . . . <nl> The autoroller bought a round of Himbeerbrause . Suddenly . . . <nl> - The bartender starts to shake the bottles . . . . . . . . . . . . . . . . . . . . . . . <nl> + The bartender starts to shake the bottles . . . . . . . . . . . . . . . . . . . . . . . . <nl>
|
Whitespace CL
|
v8/v8
|
54f1b2019b93f9fbc0a061e37cd5acac834f6341
|
2018-02-07T09:57:56Z
|
mmm a / modules / python / src2 / cv2 . cpp <nl> ppp b / modules / python / src2 / cv2 . cpp <nl> static PyObject * pycvSetMouseCallback ( PyObject * , PyObject * args , PyObject * kw ) <nl> if ( param = = NULL ) { <nl> param = Py_None ; <nl> } <nl> - ERRWRAP2 ( setMouseCallback ( name , OnMouse , Py_BuildValue ( " OO " , on_mouse , param ) ) ) ; <nl> + static PyObject * last_param = NULL ; <nl> + if ( last_param ) { <nl> + Py_DECREF ( last_param ) ; <nl> + last_param = NULL ; <nl> + } <nl> + last_param = Py_BuildValue ( " OO " , on_mouse , param ) ; <nl> + ERRWRAP2 ( setMouseCallback ( name , OnMouse , last_param ) ) ; <nl> Py_RETURN_NONE ; <nl> } <nl> # endif <nl> static PyObject * pycvCreateTrackbar ( PyObject * , PyObject * args ) <nl> PyErr_SetString ( PyExc_TypeError , " on_change must be callable " ) ; <nl> return NULL ; <nl> } <nl> - ERRWRAP2 ( createTrackbar ( trackbar_name , window_name , value , count , OnChange , Py_BuildValue ( " OO " , on_change , Py_None ) ) ) ; <nl> + static PyObject * last_param = NULL ; <nl> + if ( last_param ) { <nl> + Py_DECREF ( last_param ) ; <nl> + last_param = NULL ; <nl> + } <nl> + last_param = Py_BuildValue ( " OO " , on_change , Py_None ) ; <nl> + ERRWRAP2 ( createTrackbar ( trackbar_name , window_name , value , count , OnChange , last_param ) ) ; <nl> Py_RETURN_NONE ; <nl> } <nl> <nl> static PyObject * pycvCreateButton ( PyObject * , PyObject * args , PyObject * kw ) <nl> userdata = Py_None ; <nl> } <nl> <nl> - ERRWRAP2 ( createButton ( button_name , OnButtonChange , Py_BuildValue ( " OO " , on_change , userdata ) , button_type , initial_button_state ! = 0 ) ) ; <nl> + static PyObject * last_param = NULL ; <nl> + if ( last_param ) { <nl> + Py_DECREF ( last_param ) ; <nl> + last_param = NULL ; <nl> + } <nl> + last_param = Py_BuildValue ( " OO " , on_change , userdata ) ; <nl> + ERRWRAP2 ( createButton ( button_name , OnButtonChange , last_param , button_type , initial_button_state ! = 0 ) ) ; <nl> Py_RETURN_NONE ; <nl> } <nl> # endif <nl>
|
Fix
|
opencv/opencv
|
f6e299b58b1def1690f52d3612dd1ba8457226e3
|
2018-04-03T01:24:49Z
|
mmm a / dlib / svm / svm . h <nl> ppp b / dlib / svm / svm . h <nl> <nl> # include " . . / algs . h " <nl> # include " . . / serialize . h " <nl> # include " . . / rand . h " <nl> - # include " dlib / std_allocator . h " <nl> + # include " . . / std_allocator . h " <nl> # include " function . h " <nl> # include " kernel . h " <nl> <nl>
|
Fixed incorrect # include
|
davisking/dlib
|
5874bcd11659162f01b3e70877f6121fa6bc7b02
|
2008-05-13T01:41:25Z
|
mmm a / include / mlir / IR / OpBase . td <nl> ppp b / include / mlir / IR / OpBase . td <nl> <nl> / / Common utilities for defining TableGen mechanisms <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> - / / Concatenates a list of strings with a separator ( default " , " ) <nl> - class StrJoin < list < string > strings , string sep = " , " > { <nl> - string result = <nl> - ! if ( ! empty ( strings ) , " " , <nl> - ! foldl ( ! head ( strings ) , ! tail ( strings ) , prev , cur , prev # sep # cur ) ) ; <nl> + / / A workaround for the inability to define functions in Tablegen . <nl> + / / <nl> + / / The template parameter defines a string that can be extracted from an <nl> + / / instance of this class by accessing the " result " member . Subclasses can take <nl> + / / their own template parameters as function " arguments " and use them to <nl> + / / populate result . <nl> + / / For example , if it didn ' t already exist , a concat function could be defined <nl> + / / like : <nl> + / / <nl> + / / class StrConcat < list < string > strings > : <nl> + / / StrFunc < ! foldl ( " " , strings , prev , cur , prev # cur ) > <nl> + / / <nl> + / / and then called like <nl> + / / <nl> + / / StrConcat < [ " a " , " b " , " c " ] > . result <nl> + / / <nl> + / / to get the string " abc " <nl> + class StrFunc < string r > { <nl> + string result = r ; <nl> } <nl> <nl> + / / Concatenates a list of strings with a separator ( default " , " ) <nl> + class StrJoin < list < string > strings , string sep = " , " > : <nl> + StrFunc < ! if ( ! empty ( strings ) , " " , <nl> + ! foldl ( ! head ( strings ) , ! tail ( strings ) , prev , cur , prev # sep # cur ) ) > ; <nl> + <nl> / / Concatenates a list of integers into a string with a separator ( default " , " ) <nl> class StrJoinInt < list < int > integers , string sep = " , " > : <nl> StrJoin < ! foreach ( i , integers , ! cast < string > ( i ) ) , sep > ; <nl> def HasNoUseOf : Constraint < <nl> <nl> / / TODO ( b / 135033717 ) : Improve the autogenerated error messages . <nl> <nl> + class Rank < string name > : <nl> + StrFunc < " $ " # name # " . getType ( ) . cast < ShapedType > ( ) . getRank ( ) " > ; <nl> + <nl> + class ElementCount < string name > : <nl> + StrFunc < " $ " # name # " . getType ( ) . cast < ShapedType > ( ) . getNumElements ( ) " > ; <nl> + <nl> + class ElementType < string name > : StrFunc < " getElementTypeOrSelf ( $ " # name # " ) " > ; <nl> + <nl> class AllMatchPred < list < string > values > : <nl> CPred < " llvm : : is_splat ( llvm : : makeArrayRef ( { " # StrJoin < values > . result # " } ) ) " > ; <nl> <nl> class AllMatchSameOperatorTrait < list < string > names , string operator , <nl> AllMatchSameOperatorPred < names , operator > > ; <nl> <nl> class AllElementCountsMatch < list < string > names > : <nl> - AllMatchSameOperatorTrait < <nl> - names , " $ _self . getType ( ) . cast < ShapedType > ( ) . getNumElements ( ) " , <nl> - " element count " > ; <nl> + AllMatchSameOperatorTrait < names , ElementCount < " _self " > . result , <nl> + " element count " > ; <nl> <nl> class AllElementTypesMatch < list < string > names > : <nl> - AllMatchSameOperatorTrait < names , <nl> - " getElementTypeOrSelf ( $ _self ) " , " element type " > ; <nl> + AllMatchSameOperatorTrait < names , ElementType < " _self " > . result , <nl> + " element type " > ; <nl> <nl> class AllRanksMatch < list < string > names > : <nl> - AllMatchSameOperatorTrait < <nl> - names , " $ _self . getType ( ) . cast < ShapedType > ( ) . getRank ( ) " , " rank " > ; <nl> + AllMatchSameOperatorTrait < names , Rank < " _self " > . result , " rank " > ; <nl> <nl> class AllTypesMatch < list < string > names > : <nl> AllMatchSameOperatorTrait < names , " $ _self . getType ( ) " , " type " > ; <nl> mmm a / test / lib / TestDialect / TestOps . td <nl> ppp b / test / lib / TestDialect / TestOps . td <nl> def FourEqualsFive : <nl> <nl> def OperandRankEqualsResultSize : <nl> TEST_Op < " operand_rank_equals_result_size " , <nl> - [ AllMatch < [ " $ operand . getType ( ) . cast < ShapedType > ( ) . getRank ( ) " , <nl> - " $ result . getType ( ) . cast < ShapedType > ( ) . getNumElements ( ) " <nl> - ] , " operand rank equals result size " > ] > { <nl> + [ AllMatch < [ Rank < " operand " > . result , ElementCount < " result " > . result ] , <nl> + " operand rank equals result size " > ] > { <nl> let arguments = ( ins AnyTensor : $ operand ) ; <nl> let results = ( outs AnyTensor : $ result ) ; <nl> } <nl>
|
Tablegen helpers for accessing properties of shaped types
|
tensorflow/tensorflow
|
ef77ad99a621985aeca1df94168efc9489de95b6
|
2019-09-28T00:35:34Z
|
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> set ( CMAKE_CXX_STANDARD_REQUIRED ON ) <nl> add_library ( spdlog INTERFACE ) <nl> <nl> option ( SPDLOG_BUILD_EXAMPLES " Build examples " OFF ) <nl> + option ( SPDLOG_BUILD_TESTS " Build tests " OFF ) <nl> <nl> target_include_directories ( <nl> spdlog <nl> target_include_directories ( <nl> " $ < INSTALL_INTERFACE : include > " <nl> ) <nl> <nl> + set ( HEADER_BASE " $ { CMAKE_CURRENT_SOURCE_DIR } / include " ) <nl> + <nl> + include ( CTest ) <nl> if ( SPDLOG_BUILD_EXAMPLES ) <nl> - enable_testing ( ) <nl> - add_subdirectory ( example ) <nl> + add_subdirectory ( example ) <nl> + endif ( ) <nl> + <nl> + if ( SPDLOG_BUILD_TESTS ) <nl> + add_subdirectory ( tests ) <nl> endif ( ) <nl> <nl> # # # Install # # # <nl> new file mode 100644 <nl> index 00000000 . . 307ddeb6 <nl> mmm / dev / null <nl> ppp b / tests / CMakeLists . txt <nl> <nl> + # <nl> + # Tests <nl> + # <nl> + <nl> + enable_testing ( ) <nl> + <nl> + # Build Catch unit tests <nl> + add_library ( catch INTERFACE ) <nl> + target_include_directories ( catch INTERFACE $ { CMAKE_CURRENT_SOURCE_DIR } ) <nl> + <nl> + file ( GLOB catch_tests LIST_DIRECTORIES false RELATIVE $ { CMAKE_CURRENT_SOURCE_DIR } * . cpp ) <nl> + add_executable ( catch_tests $ { catch_tests } ) <nl> + target_link_libraries ( catch_tests spdlog ) <nl> + add_test ( NAME catch_tests COMMAND catch_tests ) <nl> + file ( MAKE_DIRECTORY " $ { CMAKE_CURRENT_BINARY_DIR } / logs " ) <nl> + <nl> + # Ensure headers include their own dependencies <nl> + add_subdirectory ( header_dependencies ) <nl> + <nl> new file mode 100644 <nl> index 00000000 . . 81779694 <nl> mmm / dev / null <nl> ppp b / tests / header_dependencies / CMakeLists . txt <nl> <nl> + # <nl> + # Ensure all headers include all dependencies <nl> + # <nl> + <nl> + set ( IGNORED_HEADERS " " ) <nl> + <nl> + set ( COMMON_TEST_LIBRARIES spdlog ) <nl> + <nl> + add_custom_target ( header_dependencies ) <nl> + <nl> + file ( GLOB_RECURSE headers RELATIVE " $ { HEADER_BASE } " $ { HEADER_BASE } / * . h ) <nl> + set ( test_index 0 ) <nl> + foreach ( HEADER $ { headers } ) <nl> + # Sample of relevant variables computed here <nl> + # HEADER : details / line_logger_impl . h <nl> + # symbolname : spdlog_details_line_logger_impl <nl> + <nl> + # Compute symbolname <nl> + string ( REPLACE " . h " " " symbolname " $ { HEADER } " ) <nl> + string ( MAKE_C_IDENTIFIER " $ { symbolname } " symbolname ) <nl> + <nl> + list ( FIND IGNORED_HEADERS " $ { HEADER } " _index ) <nl> + # If we didn ' t explicitly ignore this and if we built this target <nl> + if ( $ { _index } EQUAL - 1 ) <nl> + # message ( STATUS " $ { HEADER } : ' $ { symbolname } ' " ) <nl> + <nl> + set ( extension cpp ) <nl> + <nl> + # Name the test and output file with a number , to dodge Windows path length limits . <nl> + # Call it header , instead of test , to avoid polluting the ' executable namespace ' <nl> + set ( test_name " header_ $ { extension } _ $ { test_index } " ) <nl> + <nl> + set ( source_file " $ { CMAKE_CURRENT_SOURCE_DIR } / main . $ { extension } " ) <nl> + <nl> + add_executable ( $ { test_name } " $ { source_file } " ) <nl> + target_compile_definitions ( $ { test_name } PRIVATE HEADER_TO_TEST = " $ { HEADER } " ) <nl> + target_include_directories ( $ { test_name } <nl> + PRIVATE <nl> + $ { BUILDTREE_HEADER_BASE } <nl> + $ { HEADER_BASE } ) <nl> + <nl> + set_target_properties ( $ { test_name } PROPERTIES <nl> + FOLDER " Header dependency tests " ) <nl> + <nl> + target_link_libraries ( $ { test_name } <nl> + PRIVATE <nl> + $ { COMMON_TEST_LIBRARIES } <nl> + $ { LIBRARIES_ $ { symbolname } } <nl> + $ { LIBRARIES_ $ { libname } } ) <nl> + <nl> + add_test ( NAME $ { test_name } _builds COMMAND $ { test_name } ) <nl> + add_dependencies ( header_dependencies $ { test_name } ) <nl> + <nl> + math ( EXPR test_index " $ { test_index } + 1 " ) <nl> + endif ( ) <nl> + endforeach ( ) <nl> + <nl> + <nl> new file mode 100644 <nl> index 00000000 . . d2b5af77 <nl> mmm / dev / null <nl> ppp b / tests / header_dependencies / main . c <nl> <nl> + <nl> + # include HEADER_TO_TEST <nl> + <nl> + int main ( int argc , char * * argv ) <nl> + { <nl> + return 0 ; <nl> + } <nl> new file mode 100644 <nl> index 00000000 . . 7716c88b <nl> mmm / dev / null <nl> ppp b / tests / header_dependencies / main . cpp <nl> <nl> + <nl> + # include HEADER_TO_TEST <nl> + <nl> + int main ( int argc , char * argv [ ] ) { return 0 ; } <nl>
|
Added header - dependency tests .
|
gabime/spdlog
|
24e7b64b89f38f264424c1e178686a7918969993
|
2016-06-17T05:55:05Z
|
mmm a / build_detect_platform <nl> ppp b / build_detect_platform <nl> case ` uname - s ` in <nl> echo " PLATFORM_CFLAGS = - D_REENTRANT - DOS_FREEBSD " > > build_config . mk <nl> echo " PLATFORM_LDFLAGS = - lpthread " > > build_config . mk <nl> ; ; <nl> - GNU / kFreeBSD ) <nl> - PLATFORM = OS_FREEBSD <nl> - echo " PLATFORM_CFLAGS = - pthread - DOS_FREEBSD " > > build_config . mk <nl> - echo " PLATFORM_LDFLAGS = - lpthread - lrt " > > build_config . mk <nl> - ; ; <nl> * ) <nl> echo " Unknown platform ! " <nl> exit 1 <nl> mmm a / db / corruption_test . cc <nl> ppp b / db / corruption_test . cc <nl> TEST ( CorruptionTest , TableFile ) { <nl> Build ( 100 ) ; <nl> DBImpl * dbi = reinterpret_cast < DBImpl * > ( db_ ) ; <nl> dbi - > TEST_CompactMemTable ( ) ; <nl> - dbi - > TEST_CompactRange ( 0 , " " , " ~ " ) ; <nl> - dbi - > TEST_CompactRange ( 1 , " " , " ~ " ) ; <nl> + dbi - > TEST_CompactRange ( 0 , NULL , NULL ) ; <nl> + dbi - > TEST_CompactRange ( 1 , NULL , NULL ) ; <nl> <nl> Corrupt ( kTableFile , 100 , 1 ) ; <nl> Check ( 99 , 99 ) ; <nl> TEST ( CorruptionTest , CorruptedDescriptor ) { <nl> ASSERT_OK ( db_ - > Put ( WriteOptions ( ) , " foo " , " hello " ) ) ; <nl> DBImpl * dbi = reinterpret_cast < DBImpl * > ( db_ ) ; <nl> dbi - > TEST_CompactMemTable ( ) ; <nl> - dbi - > TEST_CompactRange ( 0 , " " , " ~ " ) ; <nl> + dbi - > TEST_CompactRange ( 0 , NULL , NULL ) ; <nl> <nl> Corrupt ( kDescriptorFile , 0 , 1000 ) ; <nl> Status s = TryReopen ( ) ; <nl> mmm a / db / db_bench . cc <nl> ppp b / db / db_bench . cc <nl> class Benchmark { <nl> } <nl> <nl> void Compact ( ThreadState * thread ) { <nl> - DBImpl * dbi = reinterpret_cast < DBImpl * > ( db_ ) ; <nl> - dbi - > TEST_CompactMemTable ( ) ; <nl> - int max_level_with_files = 1 ; <nl> - for ( int level = 1 ; level < config : : kNumLevels ; level + + ) { <nl> - std : : string property ; <nl> - char name [ 100 ] ; <nl> - snprintf ( name , sizeof ( name ) , " leveldb . num - files - at - level % d " , level ) ; <nl> - if ( db_ - > GetProperty ( name , & property ) & & atoi ( property . c_str ( ) ) > 0 ) { <nl> - max_level_with_files = level ; <nl> - } <nl> - } <nl> - for ( int level = 0 ; level < max_level_with_files ; level + + ) { <nl> - dbi - > TEST_CompactRange ( level , " " , " ~ " ) ; <nl> - } <nl> + db_ - > CompactRange ( NULL , NULL ) ; <nl> } <nl> <nl> void PrintStats ( ) { <nl> mmm a / db / db_impl . cc <nl> ppp b / db / db_impl . cc <nl> Status DBImpl : : WriteLevel0Table ( MemTable * mem , VersionEdit * edit , <nl> if ( s . ok ( ) & & meta . file_size > 0 ) { <nl> const Slice min_user_key = meta . smallest . user_key ( ) ; <nl> const Slice max_user_key = meta . largest . user_key ( ) ; <nl> - if ( base ! = NULL & & ! base - > OverlapInLevel ( 0 , min_user_key , max_user_key ) ) { <nl> - / / Push the new sstable to a higher level if possible to reduce <nl> - / / expensive manifest file ops . <nl> - while ( level < config : : kMaxMemCompactLevel & & <nl> - ! base - > OverlapInLevel ( level + 1 , min_user_key , max_user_key ) ) { <nl> - level + + ; <nl> - } <nl> + if ( base ! = NULL ) { <nl> + level = base - > PickLevelForMemTableOutput ( min_user_key , max_user_key ) ; <nl> } <nl> edit - > AddFile ( level , meta . number , meta . file_size , <nl> meta . smallest , meta . largest ) ; <nl> Status DBImpl : : CompactMemTable ( ) { <nl> return s ; <nl> } <nl> <nl> - void DBImpl : : TEST_CompactRange ( <nl> - int level , <nl> - const std : : string & begin , <nl> - const std : : string & end ) { <nl> + void DBImpl : : CompactRange ( const Slice * begin , const Slice * end ) { <nl> + int max_level_with_files = 1 ; <nl> + { <nl> + MutexLock l ( & mutex_ ) ; <nl> + Version * base = versions_ - > current ( ) ; <nl> + for ( int level = 1 ; level < config : : kNumLevels ; level + + ) { <nl> + if ( base - > OverlapInLevel ( level , begin , end ) ) { <nl> + max_level_with_files = level ; <nl> + } <nl> + } <nl> + } <nl> + TEST_CompactMemTable ( ) ; / / TODO ( sanjay ) : Skip if memtable does not overlap <nl> + for ( int level = 0 ; level < max_level_with_files ; level + + ) { <nl> + TEST_CompactRange ( level , begin , end ) ; <nl> + } <nl> + } <nl> + <nl> + void DBImpl : : TEST_CompactRange ( int level , const Slice * begin , const Slice * end ) { <nl> assert ( level > = 0 ) ; <nl> assert ( level + 1 < config : : kNumLevels ) ; <nl> <nl> - MutexLock l ( & mutex_ ) ; <nl> - while ( manual_compaction_ ! = NULL ) { <nl> - bg_cv_ . Wait ( ) ; <nl> - } <nl> + InternalKey begin_storage , end_storage ; <nl> + <nl> ManualCompaction manual ; <nl> manual . level = level ; <nl> - manual . begin = begin ; <nl> - manual . end = end ; <nl> - manual_compaction_ = & manual ; <nl> - MaybeScheduleCompaction ( ) ; <nl> - while ( manual_compaction_ = = & manual ) { <nl> - bg_cv_ . Wait ( ) ; <nl> + manual . done = false ; <nl> + if ( begin = = NULL ) { <nl> + manual . begin = NULL ; <nl> + } else { <nl> + begin_storage = InternalKey ( * begin , kMaxSequenceNumber , kValueTypeForSeek ) ; <nl> + manual . begin = & begin_storage ; <nl> + } <nl> + if ( end = = NULL ) { <nl> + manual . end = NULL ; <nl> + } else { <nl> + end_storage = InternalKey ( * end , 0 , static_cast < ValueType > ( 0 ) ) ; <nl> + manual . end = & end_storage ; <nl> + } <nl> + <nl> + MutexLock l ( & mutex_ ) ; <nl> + while ( ! manual . done ) { <nl> + while ( manual_compaction_ ! = NULL ) { <nl> + bg_cv_ . Wait ( ) ; <nl> + } <nl> + manual_compaction_ = & manual ; <nl> + MaybeScheduleCompaction ( ) ; <nl> + while ( manual_compaction_ = = & manual ) { <nl> + bg_cv_ . Wait ( ) ; <nl> + } <nl> } <nl> } <nl> <nl> void DBImpl : : BackgroundCompaction ( ) { <nl> <nl> Compaction * c ; <nl> bool is_manual = ( manual_compaction_ ! = NULL ) ; <nl> + InternalKey manual_end ; <nl> if ( is_manual ) { <nl> - const ManualCompaction * m = manual_compaction_ ; <nl> - c = versions_ - > CompactRange ( <nl> + ManualCompaction * m = manual_compaction_ ; <nl> + c = versions_ - > CompactRange ( m - > level , m - > begin , m - > end ) ; <nl> + m - > done = ( c = = NULL ) ; <nl> + if ( c ! = NULL ) { <nl> + manual_end = c - > input ( 0 , c - > num_input_files ( 0 ) - 1 ) - > largest ; <nl> + } <nl> + Log ( options_ . info_log , <nl> + " Manual compaction at level - % d from % s . . % s ; will stop at % s \ n " , <nl> m - > level , <nl> - InternalKey ( m - > begin , kMaxSequenceNumber , kValueTypeForSeek ) , <nl> - InternalKey ( m - > end , 0 , static_cast < ValueType > ( 0 ) ) ) ; <nl> + ( m - > begin ? m - > begin - > DebugString ( ) . c_str ( ) : " ( begin ) " ) , <nl> + ( m - > end ? m - > end - > DebugString ( ) . c_str ( ) : " ( end ) " ) , <nl> + ( m - > done ? " ( end ) " : manual_end . DebugString ( ) . c_str ( ) ) ) ; <nl> } else { <nl> c = versions_ - > PickCompaction ( ) ; <nl> } <nl> void DBImpl : : BackgroundCompaction ( ) { <nl> } <nl> <nl> if ( is_manual ) { <nl> - / / Mark it as done <nl> + ManualCompaction * m = manual_compaction_ ; <nl> + if ( ! m - > done ) { <nl> + / / We only compacted part of the requested range . Update * m <nl> + / / to the range that is left to be compacted . <nl> + m - > tmp_storage = manual_end ; <nl> + m - > begin = & m - > tmp_storage ; <nl> + } <nl> manual_compaction_ = NULL ; <nl> } <nl> } <nl> Status DBImpl : : Write ( const WriteOptions & options , WriteBatch * updates ) { <nl> <nl> versions_ - > SetLastSequence ( last_sequence ) ; <nl> } <nl> - if ( options . post_write_snapshot ! = NULL ) { <nl> - * options . post_write_snapshot = <nl> - status . ok ( ) ? snapshots_ . New ( last_sequence ) : NULL ; <nl> - } <nl> ReleaseLoggingResponsibility ( & self ) ; <nl> return status ; <nl> } <nl> bool DBImpl : : GetProperty ( const Slice & property , std : : string * value ) { <nl> } <nl> } <nl> return true ; <nl> + } else if ( in = = " sstables " ) { <nl> + * value = versions_ - > current ( ) - > DebugString ( ) ; <nl> + return true ; <nl> } <nl> <nl> return false ; <nl> mmm a / db / db_impl . h <nl> ppp b / db / db_impl . h <nl> class DBImpl : public DB { <nl> virtual void ReleaseSnapshot ( const Snapshot * snapshot ) ; <nl> virtual bool GetProperty ( const Slice & property , std : : string * value ) ; <nl> virtual void GetApproximateSizes ( const Range * range , int n , uint64_t * sizes ) ; <nl> + virtual void CompactRange ( const Slice * begin , const Slice * end ) ; <nl> <nl> / / Extra methods ( for testing ) that are not in the public DB interface <nl> <nl> - / / Compact any files in the named level that overlap [ begin , end ] <nl> - void TEST_CompactRange ( <nl> - int level , <nl> - const std : : string & begin , <nl> - const std : : string & end ) ; <nl> + / / Compact any files in the named level that overlap [ * begin , * end ] <nl> + void TEST_CompactRange ( int level , const Slice * begin , const Slice * end ) ; <nl> <nl> / / Force current memtable contents to be compacted . <nl> Status TEST_CompactMemTable ( ) ; <nl> class DBImpl : public DB { <nl> / / Information for a manual compaction <nl> struct ManualCompaction { <nl> int level ; <nl> - std : : string begin ; <nl> - std : : string end ; <nl> + bool done ; <nl> + const InternalKey * begin ; / / NULL means beginning of key range <nl> + const InternalKey * end ; / / NULL means end of key range <nl> + InternalKey tmp_storage ; / / Used to keep track of compaction progress <nl> } ; <nl> ManualCompaction * manual_compaction_ ; <nl> <nl> mmm a / db / db_test . cc <nl> ppp b / db / db_test . cc <nl> class DBTest { <nl> return result ; <nl> } <nl> <nl> + / / Return spread of files per level <nl> + std : : string FilesPerLevel ( ) { <nl> + std : : string result ; <nl> + int last_non_zero_offset = 0 ; <nl> + for ( int level = 0 ; level < config : : kNumLevels ; level + + ) { <nl> + int f = NumTableFilesAtLevel ( level ) ; <nl> + char buf [ 100 ] ; <nl> + snprintf ( buf , sizeof ( buf ) , " % s % d " , ( level ? " , " : " " ) , f ) ; <nl> + result + = buf ; <nl> + if ( f > 0 ) { <nl> + last_non_zero_offset = result . size ( ) ; <nl> + } <nl> + } <nl> + result . resize ( last_non_zero_offset ) ; <nl> + return result ; <nl> + } <nl> + <nl> uint64_t Size ( const Slice & start , const Slice & limit ) { <nl> Range r ( start , limit ) ; <nl> uint64_t size ; <nl> class DBTest { <nl> } <nl> <nl> void Compact ( const Slice & start , const Slice & limit ) { <nl> - dbfull ( ) - > TEST_CompactMemTable ( ) ; <nl> - int max_level_with_files = 1 ; <nl> - for ( int level = 1 ; level < config : : kNumLevels ; level + + ) { <nl> - if ( NumTableFilesAtLevel ( level ) > 0 ) { <nl> - max_level_with_files = level ; <nl> - } <nl> - } <nl> - for ( int level = 0 ; level < max_level_with_files ; level + + ) { <nl> - dbfull ( ) - > TEST_CompactRange ( level , " " , " ~ " ) ; <nl> + db_ - > CompactRange ( & start , & limit ) ; <nl> + } <nl> + <nl> + / / Do n memtable compactions , each of which produces an sstable <nl> + / / covering the range [ small , large ] . <nl> + void MakeTables ( int n , const std : : string & small , const std : : string & large ) { <nl> + for ( int i = 0 ; i < n ; i + + ) { <nl> + Put ( small , " begin " ) ; <nl> + Put ( large , " end " ) ; <nl> + dbfull ( ) - > TEST_CompactMemTable ( ) ; <nl> } <nl> } <nl> <nl> / / Prevent pushing of new sstables into deeper levels by adding <nl> / / tables that cover a specified range to all levels . <nl> void FillLevels ( const std : : string & smallest , const std : : string & largest ) { <nl> - for ( int level = 0 ; level < config : : kNumLevels ; level + + ) { <nl> - Put ( smallest , " begin " ) ; <nl> - Put ( largest , " end " ) ; <nl> - dbfull ( ) - > TEST_CompactMemTable ( ) ; <nl> - } <nl> + MakeTables ( config : : kNumLevels , smallest , largest ) ; <nl> } <nl> <nl> void DumpFileCounts ( const char * label ) { <nl> class DBTest { <nl> } <nl> } <nl> <nl> + std : : string DumpSSTableList ( ) { <nl> + std : : string property ; <nl> + db_ - > GetProperty ( " leveldb . sstables " , & property ) ; <nl> + return property ; <nl> + } <nl> + <nl> std : : string IterStatus ( Iterator * iter ) { <nl> std : : string result ; <nl> if ( iter - > Valid ( ) ) { <nl> TEST ( DBTest , GetEncountersEmptyLevel ) { <nl> } <nl> <nl> / / Step 2 : clear level 1 if necessary . <nl> - dbfull ( ) - > TEST_CompactRange ( 1 , " a " , " z " ) ; <nl> + dbfull ( ) - > TEST_CompactRange ( 1 , NULL , NULL ) ; <nl> ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 1 ) ; <nl> ASSERT_EQ ( NumTableFilesAtLevel ( 1 ) , 0 ) ; <nl> ASSERT_EQ ( NumTableFilesAtLevel ( 2 ) , 1 ) ; <nl> TEST ( DBTest , CompactionsGenerateMultipleFiles ) { <nl> <nl> / / Reopening moves updates to level - 0 <nl> Reopen ( & options ) ; <nl> - dbfull ( ) - > TEST_CompactRange ( 0 , " " , Key ( 100000 ) ) ; <nl> + dbfull ( ) - > TEST_CompactRange ( 0 , NULL , NULL ) ; <nl> <nl> ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 0 ) ; <nl> ASSERT_GT ( NumTableFilesAtLevel ( 1 ) , 1 ) ; <nl> TEST ( DBTest , SparseMerge ) { <nl> } <nl> Put ( " C " , " vc " ) ; <nl> dbfull ( ) - > TEST_CompactMemTable ( ) ; <nl> - dbfull ( ) - > TEST_CompactRange ( 0 , " A " , " Z " ) ; <nl> + dbfull ( ) - > TEST_CompactRange ( 0 , NULL , NULL ) ; <nl> <nl> / / Make sparse update <nl> Put ( " A " , " va2 " ) ; <nl> TEST ( DBTest , SparseMerge ) { <nl> / / Compactions should not cause us to create a situation where <nl> / / a file overlaps too much data at the next level . <nl> ASSERT_LE ( dbfull ( ) - > TEST_MaxNextLevelOverlappingBytes ( ) , 20 * 1048576 ) ; <nl> - dbfull ( ) - > TEST_CompactRange ( 0 , " " , " z " ) ; <nl> + dbfull ( ) - > TEST_CompactRange ( 0 , NULL , NULL ) ; <nl> ASSERT_LE ( dbfull ( ) - > TEST_MaxNextLevelOverlappingBytes ( ) , 20 * 1048576 ) ; <nl> - dbfull ( ) - > TEST_CompactRange ( 1 , " " , " z " ) ; <nl> + dbfull ( ) - > TEST_CompactRange ( 1 , NULL , NULL ) ; <nl> ASSERT_LE ( dbfull ( ) - > TEST_MaxNextLevelOverlappingBytes ( ) , 20 * 1048576 ) ; <nl> } <nl> <nl> TEST ( DBTest , ApproximateSizes ) { <nl> ASSERT_TRUE ( Between ( Size ( " " , Key ( 50 ) ) , 5000000 , 5010000 ) ) ; <nl> ASSERT_TRUE ( Between ( Size ( " " , Key ( 50 ) + " . suffix " ) , 5100000 , 5110000 ) ) ; <nl> <nl> - dbfull ( ) - > TEST_CompactRange ( 0 , <nl> - Key ( compact_start ) , <nl> - Key ( compact_start + 9 ) ) ; <nl> + std : : string cstart_str = Key ( compact_start ) ; <nl> + std : : string cend_str = Key ( compact_start + 9 ) ; <nl> + Slice cstart = cstart_str ; <nl> + Slice cend = cend_str ; <nl> + dbfull ( ) - > TEST_CompactRange ( 0 , & cstart , & cend ) ; <nl> } <nl> <nl> ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 0 ) ; <nl> TEST ( DBTest , ApproximateSizes_MixOfSmallAndLarge ) { <nl> <nl> ASSERT_TRUE ( Between ( Size ( Key ( 3 ) , Key ( 5 ) ) , 110000 , 111000 ) ) ; <nl> <nl> - dbfull ( ) - > TEST_CompactRange ( 0 , Key ( 0 ) , Key ( 100 ) ) ; <nl> + dbfull ( ) - > TEST_CompactRange ( 0 , NULL , NULL ) ; <nl> } <nl> } <nl> <nl> TEST ( DBTest , HiddenValuesAreRemoved ) { <nl> ASSERT_TRUE ( Between ( Size ( " " , " pastfoo " ) , 50000 , 60000 ) ) ; <nl> db_ - > ReleaseSnapshot ( snapshot ) ; <nl> ASSERT_EQ ( AllEntriesFor ( " foo " ) , " [ tiny , " + big + " ] " ) ; <nl> - dbfull ( ) - > TEST_CompactRange ( 0 , " " , " x " ) ; <nl> + Slice x ( " x " ) ; <nl> + dbfull ( ) - > TEST_CompactRange ( 0 , NULL , & x ) ; <nl> ASSERT_EQ ( AllEntriesFor ( " foo " ) , " [ tiny ] " ) ; <nl> ASSERT_EQ ( NumTableFilesAtLevel ( 0 ) , 0 ) ; <nl> ASSERT_GE ( NumTableFilesAtLevel ( 1 ) , 1 ) ; <nl> - dbfull ( ) - > TEST_CompactRange ( 1 , " " , " x " ) ; <nl> + dbfull ( ) - > TEST_CompactRange ( 1 , NULL , & x ) ; <nl> ASSERT_EQ ( AllEntriesFor ( " foo " ) , " [ tiny ] " ) ; <nl> <nl> ASSERT_TRUE ( Between ( Size ( " " , " pastfoo " ) , 0 , 1000 ) ) ; <nl> TEST ( DBTest , DeletionMarkers1 ) { <nl> ASSERT_EQ ( AllEntriesFor ( " foo " ) , " [ v2 , DEL , v1 ] " ) ; <nl> ASSERT_OK ( dbfull ( ) - > TEST_CompactMemTable ( ) ) ; / / Moves to level last - 2 <nl> ASSERT_EQ ( AllEntriesFor ( " foo " ) , " [ v2 , DEL , v1 ] " ) ; <nl> - dbfull ( ) - > TEST_CompactRange ( last - 2 , " " , " z " ) ; <nl> + Slice z ( " z " ) ; <nl> + dbfull ( ) - > TEST_CompactRange ( last - 2 , NULL , & z ) ; <nl> / / DEL eliminated , but v1 remains because we aren ' t compacting that level <nl> / / ( DEL can be eliminated because v2 hides v1 ) . <nl> ASSERT_EQ ( AllEntriesFor ( " foo " ) , " [ v2 , v1 ] " ) ; <nl> - dbfull ( ) - > TEST_CompactRange ( last - 1 , " " , " z " ) ; <nl> + dbfull ( ) - > TEST_CompactRange ( last - 1 , NULL , NULL ) ; <nl> / / Merging last - 1 w / last , so we are the base level for " foo " , so <nl> / / DEL is removed . ( as is v1 ) . <nl> ASSERT_EQ ( AllEntriesFor ( " foo " ) , " [ v2 ] " ) ; <nl> TEST ( DBTest , DeletionMarkers2 ) { <nl> ASSERT_EQ ( AllEntriesFor ( " foo " ) , " [ DEL , v1 ] " ) ; <nl> ASSERT_OK ( dbfull ( ) - > TEST_CompactMemTable ( ) ) ; / / Moves to level last - 2 <nl> ASSERT_EQ ( AllEntriesFor ( " foo " ) , " [ DEL , v1 ] " ) ; <nl> - dbfull ( ) - > TEST_CompactRange ( last - 2 , " " , " z " ) ; <nl> + dbfull ( ) - > TEST_CompactRange ( last - 2 , NULL , NULL ) ; <nl> / / DEL kept : " last " file overlaps <nl> ASSERT_EQ ( AllEntriesFor ( " foo " ) , " [ DEL , v1 ] " ) ; <nl> - dbfull ( ) - > TEST_CompactRange ( last - 1 , " " , " z " ) ; <nl> + dbfull ( ) - > TEST_CompactRange ( last - 1 , NULL , NULL ) ; <nl> / / Merging last - 1 w / last , so we are the base level for " foo " , so <nl> / / DEL is removed . ( as is v1 ) . <nl> ASSERT_EQ ( AllEntriesFor ( " foo " ) , " [ ] " ) ; <nl> } <nl> <nl> + TEST ( DBTest , OverlapInLevel0 ) { <nl> + ASSERT_EQ ( config : : kMaxMemCompactLevel , 2 ) < < " Fix test to match config " ; <nl> + <nl> + / / Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0 . <nl> + ASSERT_OK ( Put ( " 100 " , " v100 " ) ) ; <nl> + ASSERT_OK ( Put ( " 999 " , " v999 " ) ) ; <nl> + dbfull ( ) - > TEST_CompactMemTable ( ) ; <nl> + ASSERT_OK ( Delete ( " 100 " ) ) ; <nl> + ASSERT_OK ( Delete ( " 999 " ) ) ; <nl> + dbfull ( ) - > TEST_CompactMemTable ( ) ; <nl> + ASSERT_EQ ( " 0 , 1 , 1 " , FilesPerLevel ( ) ) ; <nl> + <nl> + / / Make files spanning the following ranges in level - 0 : <nl> + / / files [ 0 ] 200 . . 900 <nl> + / / files [ 1 ] 300 . . 500 <nl> + / / Note that files are sorted by smallest key . <nl> + ASSERT_OK ( Put ( " 300 " , " v300 " ) ) ; <nl> + ASSERT_OK ( Put ( " 500 " , " v500 " ) ) ; <nl> + dbfull ( ) - > TEST_CompactMemTable ( ) ; <nl> + ASSERT_OK ( Put ( " 200 " , " v200 " ) ) ; <nl> + ASSERT_OK ( Put ( " 600 " , " v600 " ) ) ; <nl> + ASSERT_OK ( Put ( " 900 " , " v900 " ) ) ; <nl> + dbfull ( ) - > TEST_CompactMemTable ( ) ; <nl> + ASSERT_EQ ( " 2 , 1 , 1 " , FilesPerLevel ( ) ) ; <nl> + <nl> + / / Compact away the placeholder files we created initially <nl> + dbfull ( ) - > TEST_CompactRange ( 1 , NULL , NULL ) ; <nl> + dbfull ( ) - > TEST_CompactRange ( 2 , NULL , NULL ) ; <nl> + ASSERT_EQ ( " 2 " , FilesPerLevel ( ) ) ; <nl> + <nl> + / / Do a memtable compaction . Before bug - fix , the compaction would <nl> + / / not detect the overlap with level - 0 files and would incorrectly place <nl> + / / the deletion in a deeper level . <nl> + ASSERT_OK ( Delete ( " 600 " ) ) ; <nl> + dbfull ( ) - > TEST_CompactMemTable ( ) ; <nl> + ASSERT_EQ ( " 3 " , FilesPerLevel ( ) ) ; <nl> + ASSERT_EQ ( " NOT_FOUND " , Get ( " 600 " ) ) ; <nl> + } <nl> + <nl> TEST ( DBTest , ComparatorCheck ) { <nl> class NewComparator : public Comparator { <nl> public : <nl> TEST ( DBTest , ComparatorCheck ) { <nl> < < s . ToString ( ) ; <nl> } <nl> <nl> + TEST ( DBTest , ManualCompaction ) { <nl> + ASSERT_EQ ( config : : kMaxMemCompactLevel , 2 ) <nl> + < < " Need to update this test to match kMaxMemCompactLevel " ; <nl> + <nl> + MakeTables ( 3 , " p " , " q " ) ; <nl> + ASSERT_EQ ( " 1 , 1 , 1 " , FilesPerLevel ( ) ) ; <nl> + <nl> + / / Compaction range falls before files <nl> + Compact ( " " , " c " ) ; <nl> + ASSERT_EQ ( " 1 , 1 , 1 " , FilesPerLevel ( ) ) ; <nl> + <nl> + / / Compaction range falls after files <nl> + Compact ( " r " , " z " ) ; <nl> + ASSERT_EQ ( " 1 , 1 , 1 " , FilesPerLevel ( ) ) ; <nl> + <nl> + / / Compaction range overlaps files <nl> + Compact ( " p1 " , " p9 " ) ; <nl> + ASSERT_EQ ( " 0 , 0 , 1 " , FilesPerLevel ( ) ) ; <nl> + <nl> + / / Populate a different range <nl> + MakeTables ( 3 , " c " , " e " ) ; <nl> + ASSERT_EQ ( " 1 , 1 , 2 " , FilesPerLevel ( ) ) ; <nl> + <nl> + / / Compact just the new range <nl> + Compact ( " b " , " f " ) ; <nl> + ASSERT_EQ ( " 0 , 0 , 2 " , FilesPerLevel ( ) ) ; <nl> + <nl> + / / Compact all <nl> + MakeTables ( 1 , " a " , " z " ) ; <nl> + ASSERT_EQ ( " 0 , 1 , 2 " , FilesPerLevel ( ) ) ; <nl> + db_ - > CompactRange ( NULL , NULL ) ; <nl> + ASSERT_EQ ( " 0 , 0 , 1 " , FilesPerLevel ( ) ) ; <nl> + } <nl> + <nl> TEST ( DBTest , DBOpen_Options ) { <nl> std : : string dbname = test : : TmpDir ( ) + " / db_options_test " ; <nl> DestroyDB ( dbname , Options ( ) ) ; <nl> class ModelDB : public DB { <nl> delete reinterpret_cast < const ModelSnapshot * > ( snapshot ) ; <nl> } <nl> virtual Status Write ( const WriteOptions & options , WriteBatch * batch ) { <nl> - assert ( options . post_write_snapshot = = NULL ) ; / / Not supported <nl> class Handler : public WriteBatch : : Handler { <nl> public : <nl> KVMap * map_ ; <nl> class ModelDB : public DB { <nl> sizes [ i ] = 0 ; <nl> } <nl> } <nl> + virtual void CompactRange ( const Slice * start , const Slice * end ) { <nl> + } <nl> + <nl> private : <nl> class ModelIter : public Iterator { <nl> public : <nl> mmm a / db / dbformat . cc <nl> ppp b / db / dbformat . cc <nl> std : : string ParsedInternalKey : : DebugString ( ) const { <nl> return result ; <nl> } <nl> <nl> + std : : string InternalKey : : DebugString ( ) const { <nl> + std : : string result ; <nl> + ParsedInternalKey parsed ; <nl> + if ( ParseInternalKey ( rep_ , & parsed ) ) { <nl> + result = parsed . DebugString ( ) ; <nl> + } else { <nl> + result = " ( bad ) " ; <nl> + result . append ( EscapeString ( rep_ ) ) ; <nl> + } <nl> + return result ; <nl> + } <nl> + <nl> const char * InternalKeyComparator : : Name ( ) const { <nl> return " leveldb . InternalKeyComparator " ; <nl> } <nl> mmm a / db / dbformat . h <nl> ppp b / db / dbformat . h <nl> class InternalKey { <nl> } <nl> <nl> void Clear ( ) { rep_ . clear ( ) ; } <nl> + <nl> + std : : string DebugString ( ) const ; <nl> } ; <nl> <nl> inline int InternalKeyComparator : : Compare ( <nl> mmm a / db / version_edit . cc <nl> ppp b / db / version_edit . cc <nl> std : : string VersionEdit : : DebugString ( ) const { <nl> for ( size_t i = 0 ; i < compact_pointers_ . size ( ) ; i + + ) { <nl> r . append ( " \ n CompactPointer : " ) ; <nl> AppendNumberTo ( & r , compact_pointers_ [ i ] . first ) ; <nl> - r . append ( " ' " ) ; <nl> - AppendEscapedStringTo ( & r , compact_pointers_ [ i ] . second . Encode ( ) ) ; <nl> - r . append ( " ' " ) ; <nl> + r . append ( " " ) ; <nl> + r . append ( compact_pointers_ [ i ] . second . DebugString ( ) ) ; <nl> } <nl> for ( DeletedFileSet : : const_iterator iter = deleted_files_ . begin ( ) ; <nl> iter ! = deleted_files_ . end ( ) ; <nl> std : : string VersionEdit : : DebugString ( ) const { <nl> AppendNumberTo ( & r , f . number ) ; <nl> r . append ( " " ) ; <nl> AppendNumberTo ( & r , f . file_size ) ; <nl> - r . append ( " ' " ) ; <nl> - AppendEscapedStringTo ( & r , f . smallest . Encode ( ) ) ; <nl> - r . append ( " ' . . ' " ) ; <nl> - AppendEscapedStringTo ( & r , f . largest . Encode ( ) ) ; <nl> - r . append ( " ' " ) ; <nl> + r . append ( " " ) ; <nl> + r . append ( f . smallest . DebugString ( ) ) ; <nl> + r . append ( " . . " ) ; <nl> + r . append ( f . largest . DebugString ( ) ) ; <nl> } <nl> r . append ( " \ n } \ n " ) ; <nl> return r ; <nl> mmm a / db / version_set . cc <nl> ppp b / db / version_set . cc <nl> static uint64_t MaxFileSizeForLevel ( int level ) { <nl> return kTargetFileSize ; / / We could vary per level to reduce number of files ? <nl> } <nl> <nl> + static int64_t TotalFileSize ( const std : : vector < FileMetaData * > & files ) { <nl> + int64_t sum = 0 ; <nl> + for ( size_t i = 0 ; i < files . size ( ) ; i + + ) { <nl> + sum + = files [ i ] - > file_size ; <nl> + } <nl> + return sum ; <nl> + } <nl> + <nl> namespace { <nl> std : : string IntSetToString ( const std : : set < uint64_t > & s ) { <nl> std : : string result = " { " ; <nl> int FindFile ( const InternalKeyComparator & icmp , <nl> return right ; <nl> } <nl> <nl> + static bool AfterFile ( const Comparator * ucmp , <nl> + const Slice * user_key , const FileMetaData * f ) { <nl> + / / NULL user_key occurs before all keys and is therefore never after * f <nl> + return ( user_key ! = NULL & & <nl> + ucmp - > Compare ( * user_key , f - > largest . user_key ( ) ) > 0 ) ; <nl> + } <nl> + <nl> + static bool BeforeFile ( const Comparator * ucmp , <nl> + const Slice * user_key , const FileMetaData * f ) { <nl> + / / NULL user_key occurs after all keys and is therefore never before * f <nl> + return ( user_key ! = NULL & & <nl> + ucmp - > Compare ( * user_key , f - > smallest . user_key ( ) ) < 0 ) ; <nl> + } <nl> + <nl> bool SomeFileOverlapsRange ( <nl> const InternalKeyComparator & icmp , <nl> + bool disjoint_sorted_files , <nl> const std : : vector < FileMetaData * > & files , <nl> - const Slice & smallest_user_key , <nl> - const Slice & largest_user_key ) { <nl> - / / Find the earliest possible internal key for smallest_user_key <nl> - InternalKey small ( smallest_user_key , kMaxSequenceNumber , kValueTypeForSeek ) ; <nl> - const uint32_t index = FindFile ( icmp , files , small . Encode ( ) ) ; <nl> - return ( ( index < files . size ( ) ) & & <nl> - icmp . user_comparator ( ) - > Compare ( <nl> - largest_user_key , files [ index ] - > smallest . user_key ( ) ) > = 0 ) ; <nl> + const Slice * smallest_user_key , <nl> + const Slice * largest_user_key ) { <nl> + const Comparator * ucmp = icmp . user_comparator ( ) ; <nl> + if ( ! disjoint_sorted_files ) { <nl> + / / Need to check against all files <nl> + for ( int i = 0 ; i < files . size ( ) ; i + + ) { <nl> + const FileMetaData * f = files [ i ] ; <nl> + if ( AfterFile ( ucmp , smallest_user_key , f ) | | <nl> + BeforeFile ( ucmp , largest_user_key , f ) ) { <nl> + / / No overlap <nl> + } else { <nl> + return true ; / / Overlap <nl> + } <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + / / Binary search over file list <nl> + uint32_t index = 0 ; <nl> + if ( smallest_user_key ! = NULL ) { <nl> + / / Find the earliest possible internal key for smallest_user_key <nl> + InternalKey small ( * smallest_user_key , kMaxSequenceNumber , kValueTypeForSeek ) ; <nl> + index = FindFile ( icmp , files , small . Encode ( ) ) ; <nl> + } <nl> + <nl> + if ( index > = files . size ( ) ) { <nl> + / / beginning of range is after all files , so no overlap . <nl> + return false ; <nl> + } <nl> + <nl> + return ! BeforeFile ( ucmp , largest_user_key , files [ index ] ) ; <nl> } <nl> <nl> / / An internal iterator . For a given version / level pair , yields <nl> void Version : : Unref ( ) { <nl> } <nl> <nl> bool Version : : OverlapInLevel ( int level , <nl> - const Slice & smallest_user_key , <nl> - const Slice & largest_user_key ) { <nl> - return SomeFileOverlapsRange ( vset_ - > icmp_ , files_ [ level ] , <nl> - smallest_user_key , <nl> - largest_user_key ) ; <nl> + const Slice * smallest_user_key , <nl> + const Slice * largest_user_key ) { <nl> + return SomeFileOverlapsRange ( vset_ - > icmp_ , ( level > 0 ) , files_ [ level ] , <nl> + smallest_user_key , largest_user_key ) ; <nl> + } <nl> + <nl> + int Version : : PickLevelForMemTableOutput ( <nl> + const Slice & smallest_user_key , <nl> + const Slice & largest_user_key ) { <nl> + int level = 0 ; <nl> + if ( ! OverlapInLevel ( 0 , & smallest_user_key , & largest_user_key ) ) { <nl> + / / Push to next level if there is no overlap in next level , <nl> + / / and the # bytes overlapping in the level after that are limited . <nl> + InternalKey start ( smallest_user_key , kMaxSequenceNumber , kValueTypeForSeek ) ; <nl> + InternalKey limit ( largest_user_key , 0 , static_cast < ValueType > ( 0 ) ) ; <nl> + std : : vector < FileMetaData * > overlaps ; <nl> + while ( level < config : : kMaxMemCompactLevel ) { <nl> + if ( OverlapInLevel ( level + 1 , & smallest_user_key , & largest_user_key ) ) { <nl> + break ; <nl> + } <nl> + GetOverlappingInputs ( level + 2 , & start , & limit , & overlaps ) ; <nl> + const int64_t sum = TotalFileSize ( overlaps ) ; <nl> + if ( sum > kMaxGrandParentOverlapBytes ) { <nl> + break ; <nl> + } <nl> + level + + ; <nl> + } <nl> + } <nl> + return level ; <nl> + } <nl> + <nl> + / / Store in " * inputs " all files in " level " that overlap [ begin , end ] <nl> + void Version : : GetOverlappingInputs ( <nl> + int level , <nl> + const InternalKey * begin , <nl> + const InternalKey * end , <nl> + std : : vector < FileMetaData * > * inputs ) { <nl> + inputs - > clear ( ) ; <nl> + Slice user_begin , user_end ; <nl> + if ( begin ! = NULL ) { <nl> + user_begin = begin - > user_key ( ) ; <nl> + } <nl> + if ( end ! = NULL ) { <nl> + user_end = end - > user_key ( ) ; <nl> + } <nl> + const Comparator * user_cmp = vset_ - > icmp_ . user_comparator ( ) ; <nl> + for ( size_t i = 0 ; i < files_ [ level ] . size ( ) ; i + + ) { <nl> + FileMetaData * f = files_ [ level ] [ i ] ; <nl> + if ( begin ! = NULL & & <nl> + user_cmp - > Compare ( f - > largest . user_key ( ) , user_begin ) < 0 ) { <nl> + / / " f " is completely before specified range ; skip it <nl> + } else if ( end ! = NULL & & <nl> + user_cmp - > Compare ( f - > smallest . user_key ( ) , user_end ) > 0 ) { <nl> + / / " f " is completely after specified range ; skip it <nl> + } else { <nl> + inputs - > push_back ( f ) ; <nl> + } <nl> + } <nl> } <nl> <nl> std : : string Version : : DebugString ( ) const { <nl> std : : string Version : : DebugString ( ) const { <nl> AppendNumberTo ( & r , files [ i ] - > number ) ; <nl> r . push_back ( ' : ' ) ; <nl> AppendNumberTo ( & r , files [ i ] - > file_size ) ; <nl> - r . append ( " [ ' " ) ; <nl> - AppendEscapedStringTo ( & r , files [ i ] - > smallest . Encode ( ) ) ; <nl> - r . append ( " ' . . ' " ) ; <nl> - AppendEscapedStringTo ( & r , files [ i ] - > largest . Encode ( ) ) ; <nl> - r . append ( " ' ] \ n " ) ; <nl> + r . append ( " [ " ) ; <nl> + r . append ( files [ i ] - > smallest . DebugString ( ) ) ; <nl> + r . append ( " . . " ) ; <nl> + r . append ( files [ i ] - > largest . DebugString ( ) ) ; <nl> + r . append ( " ] \ n " ) ; <nl> } <nl> } <nl> return r ; <nl> class VersionSet : : Builder { <nl> const InternalKey & this_begin = v - > files_ [ level ] [ i ] - > smallest ; <nl> if ( vset_ - > icmp_ . Compare ( prev_end , this_begin ) > = 0 ) { <nl> fprintf ( stderr , " overlapping ranges in same level % s vs . % s \ n " , <nl> - EscapeString ( prev_end . Encode ( ) ) . c_str ( ) , <nl> - EscapeString ( this_begin . Encode ( ) ) . c_str ( ) ) ; <nl> + prev_end . DebugString ( ) . c_str ( ) , <nl> + this_begin . DebugString ( ) . c_str ( ) ) ; <nl> abort ( ) ; <nl> } <nl> } <nl> void VersionSet : : MarkFileNumberUsed ( uint64_t number ) { <nl> } <nl> } <nl> <nl> - static int64_t TotalFileSize ( const std : : vector < FileMetaData * > & files ) { <nl> - int64_t sum = 0 ; <nl> - for ( size_t i = 0 ; i < files . size ( ) ; i + + ) { <nl> - sum + = files [ i ] - > file_size ; <nl> - } <nl> - return sum ; <nl> - } <nl> - <nl> void VersionSet : : Finalize ( Version * v ) { <nl> / / Precomputed best level for next compaction <nl> int best_level = - 1 ; <nl> int64_t VersionSet : : MaxNextLevelOverlappingBytes ( ) { <nl> for ( int level = 1 ; level < config : : kNumLevels - 1 ; level + + ) { <nl> for ( size_t i = 0 ; i < current_ - > files_ [ level ] . size ( ) ; i + + ) { <nl> const FileMetaData * f = current_ - > files_ [ level ] [ i ] ; <nl> - GetOverlappingInputs ( level + 1 , f - > smallest , f - > largest , & overlaps ) ; <nl> + current_ - > GetOverlappingInputs ( level + 1 , & f - > smallest , & f - > largest , <nl> + & overlaps ) ; <nl> const int64_t sum = TotalFileSize ( overlaps ) ; <nl> if ( sum > result ) { <nl> result = sum ; <nl> int64_t VersionSet : : MaxNextLevelOverlappingBytes ( ) { <nl> return result ; <nl> } <nl> <nl> - / / Store in " * inputs " all files in " level " that overlap [ begin , end ] <nl> - void VersionSet : : GetOverlappingInputs ( <nl> - int level , <nl> - const InternalKey & begin , <nl> - const InternalKey & end , <nl> - std : : vector < FileMetaData * > * inputs ) { <nl> - inputs - > clear ( ) ; <nl> - Slice user_begin = begin . user_key ( ) ; <nl> - Slice user_end = end . user_key ( ) ; <nl> - const Comparator * user_cmp = icmp_ . user_comparator ( ) ; <nl> - for ( size_t i = 0 ; i < current_ - > files_ [ level ] . size ( ) ; i + + ) { <nl> - FileMetaData * f = current_ - > files_ [ level ] [ i ] ; <nl> - if ( user_cmp - > Compare ( f - > largest . user_key ( ) , user_begin ) < 0 | | <nl> - user_cmp - > Compare ( f - > smallest . user_key ( ) , user_end ) > 0 ) { <nl> - / / Either completely before or after range ; skip it <nl> - } else { <nl> - inputs - > push_back ( f ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> / / Stores the minimal range that covers all entries in inputs in <nl> / / * smallest , * largest . <nl> / / REQUIRES : inputs is not empty <nl> Compaction * VersionSet : : PickCompaction ( ) { <nl> / / Note that the next call will discard the file we placed in <nl> / / c - > inputs_ [ 0 ] earlier and replace it with an overlapping set <nl> / / which will include the picked file . <nl> - GetOverlappingInputs ( 0 , smallest , largest , & c - > inputs_ [ 0 ] ) ; <nl> + current_ - > GetOverlappingInputs ( 0 , & smallest , & largest , & c - > inputs_ [ 0 ] ) ; <nl> assert ( ! c - > inputs_ [ 0 ] . empty ( ) ) ; <nl> } <nl> <nl> void VersionSet : : SetupOtherInputs ( Compaction * c ) { <nl> InternalKey smallest , largest ; <nl> GetRange ( c - > inputs_ [ 0 ] , & smallest , & largest ) ; <nl> <nl> - GetOverlappingInputs ( level + 1 , smallest , largest , & c - > inputs_ [ 1 ] ) ; <nl> + current_ - > GetOverlappingInputs ( level + 1 , & smallest , & largest , & c - > inputs_ [ 1 ] ) ; <nl> <nl> / / Get entire range covered by compaction <nl> InternalKey all_start , all_limit ; <nl> void VersionSet : : SetupOtherInputs ( Compaction * c ) { <nl> / / changing the number of " level + 1 " files we pick up . <nl> if ( ! c - > inputs_ [ 1 ] . empty ( ) ) { <nl> std : : vector < FileMetaData * > expanded0 ; <nl> - GetOverlappingInputs ( level , all_start , all_limit , & expanded0 ) ; <nl> + current_ - > GetOverlappingInputs ( level , & all_start , & all_limit , & expanded0 ) ; <nl> if ( expanded0 . size ( ) > c - > inputs_ [ 0 ] . size ( ) ) { <nl> InternalKey new_start , new_limit ; <nl> GetRange ( expanded0 , & new_start , & new_limit ) ; <nl> std : : vector < FileMetaData * > expanded1 ; <nl> - GetOverlappingInputs ( level + 1 , new_start , new_limit , & expanded1 ) ; <nl> + current_ - > GetOverlappingInputs ( level + 1 , & new_start , & new_limit , <nl> + & expanded1 ) ; <nl> if ( expanded1 . size ( ) = = c - > inputs_ [ 1 ] . size ( ) ) { <nl> Log ( options_ - > info_log , <nl> " Expanding @ % d % d + % d to % d + % d \ n " , <nl> void VersionSet : : SetupOtherInputs ( Compaction * c ) { <nl> / / Compute the set of grandparent files that overlap this compaction <nl> / / ( parent = = level + 1 ; grandparent = = level + 2 ) <nl> if ( level + 2 < config : : kNumLevels ) { <nl> - GetOverlappingInputs ( level + 2 , all_start , all_limit , & c - > grandparents_ ) ; <nl> + current_ - > GetOverlappingInputs ( level + 2 , & all_start , & all_limit , <nl> + & c - > grandparents_ ) ; <nl> } <nl> <nl> if ( false ) { <nl> Log ( options_ - > info_log , " Compacting % d ' % s ' . . ' % s ' " , <nl> level , <nl> - EscapeString ( smallest . Encode ( ) ) . c_str ( ) , <nl> - EscapeString ( largest . Encode ( ) ) . c_str ( ) ) ; <nl> + smallest . DebugString ( ) . c_str ( ) , <nl> + largest . DebugString ( ) . c_str ( ) ) ; <nl> } <nl> <nl> / / Update the place where we will do the next compaction for this level . <nl> void VersionSet : : SetupOtherInputs ( Compaction * c ) { <nl> <nl> Compaction * VersionSet : : CompactRange ( <nl> int level , <nl> - const InternalKey & begin , <nl> - const InternalKey & end ) { <nl> + const InternalKey * begin , <nl> + const InternalKey * end ) { <nl> std : : vector < FileMetaData * > inputs ; <nl> - GetOverlappingInputs ( level , begin , end , & inputs ) ; <nl> + current_ - > GetOverlappingInputs ( level , begin , end , & inputs ) ; <nl> if ( inputs . empty ( ) ) { <nl> return NULL ; <nl> } <nl> <nl> + / / Avoid compacting too much in one shot in case the range is large . <nl> + const uint64_t limit = MaxFileSizeForLevel ( level ) ; <nl> + uint64_t total = 0 ; <nl> + for ( int i = 0 ; i < inputs . size ( ) ; i + + ) { <nl> + uint64_t s = inputs [ i ] - > file_size ; <nl> + total + = s ; <nl> + if ( total > = limit ) { <nl> + inputs . resize ( i + 1 ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> Compaction * c = new Compaction ( level ) ; <nl> c - > input_version_ = current_ ; <nl> c - > input_version_ - > Ref ( ) ; <nl> mmm a / db / version_set . h <nl> ppp b / db / version_set . h <nl> extern int FindFile ( const InternalKeyComparator & icmp , <nl> const Slice & key ) ; <nl> <nl> / / Returns true iff some file in " files " overlaps the user key range <nl> - / / [ smallest , largest ] . <nl> + / / [ * smallest , * largest ] . <nl> + / / smallest = = NULL represents a key smaller than all keys in the DB . <nl> + / / largest = = NULL represents a key largest than all keys in the DB . <nl> + / / REQUIRES : If disjoint_sorted_files , files [ ] contains disjoint ranges <nl> + / / in sorted order . <nl> extern bool SomeFileOverlapsRange ( <nl> const InternalKeyComparator & icmp , <nl> + bool disjoint_sorted_files , <nl> const std : : vector < FileMetaData * > & files , <nl> - const Slice & smallest_user_key , <nl> - const Slice & largest_user_key ) ; <nl> + const Slice * smallest_user_key , <nl> + const Slice * largest_user_key ) ; <nl> <nl> class Version { <nl> public : <nl> class Version { <nl> void Ref ( ) ; <nl> void Unref ( ) ; <nl> <nl> + void GetOverlappingInputs ( <nl> + int level , <nl> + const InternalKey * begin , / / NULL means before all keys <nl> + const InternalKey * end , / / NULL means after all keys <nl> + std : : vector < FileMetaData * > * inputs ) ; <nl> + <nl> / / Returns true iff some file in the specified level overlaps <nl> - / / some part of [ smallest_user_key , largest_user_key ] . <nl> + / / some part of [ * smallest_user_key , * largest_user_key ] . <nl> + / / smallest_user_key = = NULL represents a key smaller than all keys in the DB . <nl> + / / largest_user_key = = NULL represents a key largest than all keys in the DB . <nl> bool OverlapInLevel ( int level , <nl> - const Slice & smallest_user_key , <nl> - const Slice & largest_user_key ) ; <nl> + const Slice * smallest_user_key , <nl> + const Slice * largest_user_key ) ; <nl> + <nl> + / / Return the level at which we should place a new memtable compaction <nl> + / / result that covers the range [ smallest_user_key , largest_user_key ] . <nl> + int PickLevelForMemTableOutput ( const Slice & smallest_user_key , <nl> + const Slice & largest_user_key ) ; <nl> <nl> int NumFiles ( int level ) const { return files_ [ level ] . size ( ) ; } <nl> <nl> class VersionSet { <nl> / / the result . <nl> Compaction * CompactRange ( <nl> int level , <nl> - const InternalKey & begin , <nl> - const InternalKey & end ) ; <nl> + const InternalKey * begin , <nl> + const InternalKey * end ) ; <nl> <nl> / / Return the maximum overlapping data ( in bytes ) at next level for any <nl> / / file at a level > = 1 . <nl> class VersionSet { <nl> <nl> void Finalize ( Version * v ) ; <nl> <nl> - void GetOverlappingInputs ( <nl> - int level , <nl> - const InternalKey & begin , <nl> - const InternalKey & end , <nl> - std : : vector < FileMetaData * > * inputs ) ; <nl> - <nl> void GetRange ( const std : : vector < FileMetaData * > & inputs , <nl> InternalKey * smallest , <nl> InternalKey * largest ) ; <nl> mmm a / db / version_set_test . cc <nl> ppp b / db / version_set_test . cc <nl> namespace leveldb { <nl> class FindFileTest { <nl> public : <nl> std : : vector < FileMetaData * > files_ ; <nl> + bool disjoint_sorted_files_ ; <nl> + <nl> + FindFileTest ( ) : disjoint_sorted_files_ ( true ) { } <nl> <nl> ~ FindFileTest ( ) { <nl> for ( int i = 0 ; i < files_ . size ( ) ; i + + ) { <nl> class FindFileTest { <nl> <nl> bool Overlaps ( const char * smallest , const char * largest ) { <nl> InternalKeyComparator cmp ( BytewiseComparator ( ) ) ; <nl> - return SomeFileOverlapsRange ( cmp , files_ , smallest , largest ) ; <nl> + Slice s ( smallest ! = NULL ? smallest : " " ) ; <nl> + Slice l ( largest ! = NULL ? largest : " " ) ; <nl> + return SomeFileOverlapsRange ( cmp , disjoint_sorted_files_ , files_ , <nl> + ( smallest ! = NULL ? & s : NULL ) , <nl> + ( largest ! = NULL ? & l : NULL ) ) ; <nl> } <nl> } ; <nl> <nl> TEST ( FindFileTest , Empty ) { <nl> ASSERT_EQ ( 0 , Find ( " foo " ) ) ; <nl> ASSERT_TRUE ( ! Overlaps ( " a " , " z " ) ) ; <nl> + ASSERT_TRUE ( ! Overlaps ( NULL , " z " ) ) ; <nl> + ASSERT_TRUE ( ! Overlaps ( " a " , NULL ) ) ; <nl> + ASSERT_TRUE ( ! Overlaps ( NULL , NULL ) ) ; <nl> } <nl> <nl> TEST ( FindFileTest , Single ) { <nl> TEST ( FindFileTest , Single ) { <nl> ASSERT_TRUE ( Overlaps ( " p1 " , " z " ) ) ; <nl> ASSERT_TRUE ( Overlaps ( " q " , " q " ) ) ; <nl> ASSERT_TRUE ( Overlaps ( " q " , " q1 " ) ) ; <nl> + <nl> + ASSERT_TRUE ( ! Overlaps ( NULL , " j " ) ) ; <nl> + ASSERT_TRUE ( ! Overlaps ( " r " , NULL ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( NULL , " p " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( NULL , " p1 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( " q " , NULL ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( NULL , NULL ) ) ; <nl> } <nl> <nl> <nl> TEST ( FindFileTest , Multiple ) { <nl> ASSERT_TRUE ( Overlaps ( " 450 " , " 500 " ) ) ; <nl> } <nl> <nl> + TEST ( FindFileTest , MultipleNullBoundaries ) { <nl> + Add ( " 150 " , " 200 " ) ; <nl> + Add ( " 200 " , " 250 " ) ; <nl> + Add ( " 300 " , " 350 " ) ; <nl> + Add ( " 400 " , " 450 " ) ; <nl> + ASSERT_TRUE ( ! Overlaps ( NULL , " 149 " ) ) ; <nl> + ASSERT_TRUE ( ! Overlaps ( " 451 " , NULL ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( NULL , NULL ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( NULL , " 150 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( NULL , " 199 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( NULL , " 200 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( NULL , " 201 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( NULL , " 400 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( NULL , " 800 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( " 100 " , NULL ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( " 200 " , NULL ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( " 449 " , NULL ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( " 450 " , NULL ) ) ; <nl> + } <nl> + <nl> TEST ( FindFileTest , OverlapSequenceChecks ) { <nl> Add ( " 200 " , " 200 " , 5000 , 3000 ) ; <nl> ASSERT_TRUE ( ! Overlaps ( " 199 " , " 199 " ) ) ; <nl> TEST ( FindFileTest , OverlapSequenceChecks ) { <nl> ASSERT_TRUE ( Overlaps ( " 200 " , " 210 " ) ) ; <nl> } <nl> <nl> + TEST ( FindFileTest , OverlappingFiles ) { <nl> + Add ( " 150 " , " 600 " ) ; <nl> + Add ( " 400 " , " 500 " ) ; <nl> + disjoint_sorted_files_ = false ; <nl> + ASSERT_TRUE ( ! Overlaps ( " 100 " , " 149 " ) ) ; <nl> + ASSERT_TRUE ( ! Overlaps ( " 601 " , " 700 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( " 100 " , " 150 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( " 100 " , " 200 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( " 100 " , " 300 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( " 100 " , " 400 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( " 100 " , " 500 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( " 375 " , " 400 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( " 450 " , " 450 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( " 450 " , " 500 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( " 450 " , " 700 " ) ) ; <nl> + ASSERT_TRUE ( Overlaps ( " 600 " , " 700 " ) ) ; <nl> + } <nl> + <nl> } <nl> <nl> int main ( int argc , char * * argv ) { <nl> mmm a / doc / index . html <nl> ppp b / doc / index . html <nl> < h1 > Snapshots < / h1 > <nl> If < code > ReadOptions : : snapshot < / code > is NULL , the read will operate on an <nl> implicit snapshot of the current state . <nl> < p > <nl> - Snapshots typically are created by the DB : : GetSnapshot ( ) method : <nl> + Snapshots are created by the DB : : GetSnapshot ( ) method : <nl> < p > <nl> < pre > <nl> leveldb : : ReadOptions options ; <nl> < h1 > Snapshots < / h1 > <nl> using the DB : : ReleaseSnapshot interface . This allows the <nl> implementation to get rid of state that was being maintained just to <nl> support reading as of that snapshot . <nl> - < p > <nl> - A Write operation can also return a snapshot that <nl> - represents the state of the database just after applying a particular <nl> - set of updates : <nl> - < p > <nl> - < pre > <nl> - leveldb : : Snapshot * snapshot ; <nl> - leveldb : : WriteOptions write_options ; <nl> - write_options . post_write_snapshot = & amp ; snapshot ; <nl> - leveldb : : Status status = db - & gt ; Write ( write_options , . . . ) ; <nl> - . . . perform other mutations to db . . . <nl> - <nl> - leveldb : : ReadOptions read_options ; <nl> - read_options . snapshot = snapshot ; <nl> - leveldb : : Iterator * iter = db - & gt ; NewIterator ( read_options ) ; <nl> - . . . read as of the state just after the Write call returned . . . <nl> - delete iter ; <nl> - <nl> - db - & gt ; ReleaseSnapshot ( snapshot ) ; <nl> - < / pre > <nl> < h1 > Slice < / h1 > <nl> < p > <nl> The return value of the < code > it - > key ( ) < / code > and < code > it - > value ( ) < / code > calls above <nl> mmm a / include / leveldb / db . h <nl> ppp b / include / leveldb / db . h <nl> class DB { <nl> / / where < N > is an ASCII representation of a level number ( e . g . " 0 " ) . <nl> / / " leveldb . stats " - returns a multi - line string that describes statistics <nl> / / about the internal operation of the DB . <nl> + / / " leveldb . sstables " - returns a multi - line string that describes all <nl> + / / of the sstables that make up the db contents . <nl> virtual bool GetProperty ( const Slice & property , std : : string * value ) = 0 ; <nl> <nl> / / For each i in [ 0 , n - 1 ] , store in " sizes [ i ] " , the approximate <nl> class DB { <nl> virtual void GetApproximateSizes ( const Range * range , int n , <nl> uint64_t * sizes ) = 0 ; <nl> <nl> - / / Possible extensions : <nl> - / / ( 1 ) Add a method to compact a range of keys <nl> + / / Compact the underlying storage for the key range [ * begin , * end ] . <nl> + / / In particular , deleted and overwritten versions are discarded , <nl> + / / and the data is rearranged to reduce the cost of operations <nl> + / / needed to access the data . This operation should typically only <nl> + / / be invoked by users who understand the underlying implementation . <nl> + / / <nl> + / / begin = = NULL is treated as a key before all keys in the database . <nl> + / / end = = NULL is treated as a key after all keys in the database . <nl> + / / Therefore the following call will compact the entire database : <nl> + / / db - > CompactRange ( NULL , NULL ) ; <nl> + virtual void CompactRange ( const Slice * begin , const Slice * end ) = 0 ; <nl> <nl> private : <nl> / / No copying allowed <nl> mmm a / include / leveldb / env . h <nl> ppp b / include / leveldb / env . h <nl> class SequentialFile { <nl> / / Read up to " n " bytes from the file . " scratch [ 0 . . n - 1 ] " may be <nl> / / written by this routine . Sets " * result " to the data that was <nl> / / read ( including if fewer than " n " bytes were successfully read ) . <nl> + / / May set " * result " to point at data in " scratch [ 0 . . n - 1 ] " , so <nl> + / / " scratch [ 0 . . n - 1 ] " must be live when " * result " is used . <nl> / / If an error was encountered , returns a non - OK status . <nl> / / <nl> / / REQUIRES : External synchronization <nl> class RandomAccessFile { <nl> / / Read up to " n " bytes from the file starting at " offset " . <nl> / / " scratch [ 0 . . n - 1 ] " may be written by this routine . Sets " * result " <nl> / / to the data that was read ( including if fewer than " n " bytes were <nl> - / / successfully read ) . If an error was encountered , returns a <nl> - / / non - OK status . <nl> + / / successfully read ) . May set " * result " to point at data in <nl> + / / " scratch [ 0 . . n - 1 ] " , so " scratch [ 0 . . n - 1 ] " must be live when <nl> + / / " * result " is used . If an error was encountered , returns a non - OK <nl> + / / status . <nl> / / <nl> / / Safe for concurrent use by multiple threads . <nl> virtual Status Read ( uint64_t offset , size_t n , Slice * result , <nl> mmm a / include / leveldb / options . h <nl> ppp b / include / leveldb / options . h <nl> struct WriteOptions { <nl> / / Default : false <nl> bool sync ; <nl> <nl> - / / If " post_write_snapshot " is non - NULL , and the write succeeds , <nl> - / / * post_write_snapshot will be modified to point to a snapshot of <nl> - / / the DB state immediately after this write . The caller must call <nl> - / / DB : : ReleaseSnapshot ( * post_write_snapshotsnapshot ) when the <nl> - / / snapshot is no longer needed . <nl> - / / <nl> - / / If " post_write_snapshot " is non - NULL , and the write fails , <nl> - / / * post_write_snapshot will be set to NULL . <nl> - / / <nl> - / / Default : NULL <nl> - const Snapshot * * post_write_snapshot ; <nl> - <nl> WriteOptions ( ) <nl> - : sync ( false ) , <nl> - post_write_snapshot ( NULL ) { <nl> + : sync ( false ) { <nl> } <nl> } ; <nl> <nl> mmm a / util / coding . h <nl> ppp b / util / coding . h <nl> inline uint32_t DecodeFixed32 ( const char * ptr ) { <nl> memcpy ( & result , ptr , sizeof ( result ) ) ; / / gcc optimizes this to a plain load <nl> return result ; <nl> } else { <nl> - return ( ( static_cast < uint32_t > ( ptr [ 0 ] ) ) <nl> - | ( static_cast < uint32_t > ( ptr [ 1 ] ) < < 8 ) <nl> - | ( static_cast < uint32_t > ( ptr [ 2 ] ) < < 16 ) <nl> - | ( static_cast < uint32_t > ( ptr [ 3 ] ) < < 24 ) ) ; <nl> + return ( ( static_cast < uint32_t > ( static_cast < unsigned char > ( ptr [ 0 ] ) ) ) <nl> + | ( static_cast < uint32_t > ( static_cast < unsigned char > ( ptr [ 1 ] ) ) < < 8 ) <nl> + | ( static_cast < uint32_t > ( static_cast < unsigned char > ( ptr [ 2 ] ) ) < < 16 ) <nl> + | ( static_cast < uint32_t > ( static_cast < unsigned char > ( ptr [ 3 ] ) ) < < 24 ) ) ; <nl> } <nl> } <nl> <nl> mmm a / util / posix_logger . h <nl> ppp b / util / posix_logger . h <nl> <nl> - / / Copyright 2011 Google Inc . All Rights Reserved . <nl> - / / Author : sanjay @ google . com ( Sanjay Ghemawat ) <nl> + / / Copyright ( c ) 2011 The LevelDB Authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . See the AUTHORS file for names of contributors . <nl> / / <nl> / / Logger implementation that can be shared by all environments <nl> / / where enough posix functionality is available . <nl>
|
A number of bugfixes :
|
facebook/rocksdb
|
299ccedfeca1fb3497978c288e76008a5c08e899
|
2011-10-05T23:30:28Z
|
mmm a / dbms / src / Processors / Transforms / AggregatingTransform . cpp <nl> ppp b / dbms / src / Processors / Transforms / AggregatingTransform . cpp <nl> namespace <nl> { <nl> / / / Convert block to chunk . <nl> / / / Adds additional info about aggregation . <nl> - static Chunk convertToChunk ( const Block & block ) <nl> + Chunk convertToChunk ( const Block & block ) <nl> { <nl> auto info = std : : make_shared < AggregatedChunkInfo > ( ) ; <nl> info - > bucket_num = block . info . bucket_num ; <nl> namespace <nl> return chunk ; <nl> } <nl> <nl> - static const AggregatedChunkInfo * getInfoFromChunk ( const Chunk & chunk ) <nl> + const AggregatedChunkInfo * getInfoFromChunk ( const Chunk & chunk ) <nl> { <nl> auto & info = chunk . getChunkInfo ( ) ; <nl> if ( ! info ) <nl>
|
Update AggregatingTransform . cpp
|
ClickHouse/ClickHouse
|
aff87dbd82e9c4e82a66adf3ae0e678163a77607
|
2020-01-25T20:36:25Z
|
mmm a / tensorflow / core / framework / op_kernel . cc <nl> ppp b / tensorflow / core / framework / op_kernel . cc <nl> Status OpKernelConstruction : : allocate_persistent ( <nl> <nl> / / OpKernelContext mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> + const int OpKernelContext : : Params : : kNeverForward ; <nl> + const int OpKernelContext : : Params : : kNoReservation ; <nl> + <nl> OpKernelContext : : OpKernelContext ( Params * params ) <nl> : OpKernelContext ( <nl> params , static_cast < int > ( params - > op_kernel - > output_types ( ) . size ( ) ) ) { } <nl>
|
Add definitions for OpKernelContext : : Params constants .
|
tensorflow/tensorflow
|
dd783b82916fa9da90ed31b63f040151c7418d4f
|
2018-11-13T19:05:59Z
|
mmm a / xbmc / Application . cpp <nl> ppp b / xbmc / Application . cpp <nl> void CApplication : : OnPlayBackResumed ( ) <nl> CVariant param ; <nl> param [ " player " ] [ " speed " ] = 1 ; <nl> param [ " player " ] [ " playerid " ] = CServiceBroker : : GetPlaylistPlayer ( ) . GetCurrentPlaylist ( ) ; <nl> - CAnnouncementManager : : GetInstance ( ) . Announce ( Player , " xbmc " , " OnPlay " , m_itemCurrentFile , param ) ; <nl> + CAnnouncementManager : : GetInstance ( ) . Announce ( Player , " xbmc " , " OnResume " , m_itemCurrentFile , param ) ; <nl> } <nl> <nl> void CApplication : : OnPlayBackSpeedChanged ( int iSpeed ) <nl> void CApplication : : OnAVStarted ( const CFileItem & file ) <nl> <nl> CGUIMessage msg ( GUI_MSG_PLAYBACK_AVSTARTED , 0 , 0 ) ; <nl> CServiceBroker : : GetGUI ( ) - > GetWindowManager ( ) . SendThreadMessage ( msg ) ; <nl> + <nl> + CVariant param ; <nl> + param [ " player " ] [ " speed " ] = 1 ; <nl> + param [ " player " ] [ " playerid " ] = CServiceBroker : : GetPlaylistPlayer ( ) . GetCurrentPlaylist ( ) ; <nl> + CAnnouncementManager : : GetInstance ( ) . Announce ( Player , " xbmc " , " OnAVStart " , m_itemCurrentFile , param ) ; <nl> } <nl> <nl> void CApplication : : OnAVChange ( ) <nl> void CApplication : : OnAVChange ( ) <nl> <nl> CGUIMessage msg ( GUI_MSG_PLAYBACK_AVCHANGE , 0 , 0 ) ; <nl> CServiceBroker : : GetGUI ( ) - > GetWindowManager ( ) . SendThreadMessage ( msg ) ; <nl> + <nl> + CVariant param ; <nl> + param [ " player " ] [ " speed " ] = 1 ; <nl> + param [ " player " ] [ " playerid " ] = CServiceBroker : : GetPlaylistPlayer ( ) . GetCurrentPlaylist ( ) ; <nl> + CAnnouncementManager : : GetInstance ( ) . Announce ( Player , " xbmc " , " OnAVChange " , m_itemCurrentFile , param ) ; <nl> } <nl> <nl> void CApplication : : RequestVideoSettings ( const CFileItem & fileItem ) <nl> mmm a / xbmc / interfaces / json - rpc / schema / notifications . json <nl> ppp b / xbmc / interfaces / json - rpc / schema / notifications . json <nl> <nl> ] , <nl> " returns " : null <nl> } , <nl> + " Player . OnResume " : { <nl> + " type " : " notification " , <nl> + " description " : " Playback of a media item has been resumed . If there is no ID available extra information will be provided . " , <nl> + " params " : [ <nl> + { <nl> + " name " : " sender " , <nl> + " type " : " string " , <nl> + " required " : true <nl> + } , <nl> + { <nl> + " name " : " data " , <nl> + " $ ref " : " Player . Notifications . Data " , <nl> + " required " : true <nl> + } <nl> + ] , <nl> + " returns " : null <nl> + } , <nl> + " Player . OnAVStart " : { <nl> + " type " : " notification " , <nl> + " description " : " Playback of a media item has been started and first frame is available . If there is no ID available extra information will be provided . " , <nl> + " params " : [ <nl> + { <nl> + " name " : " sender " , <nl> + " type " : " string " , <nl> + " required " : true <nl> + } , <nl> + { <nl> + " name " : " data " , <nl> + " $ ref " : " Player . Notifications . Data " , <nl> + " required " : true <nl> + } <nl> + ] , <nl> + " returns " : null <nl> + } , <nl> + " Player . OnAVChange " : { <nl> + " type " : " notification " , <nl> + " description " : " Audio - or videostream has changed . If there is no ID available extra information will be provided . " , <nl> + " params " : [ <nl> + { <nl> + " name " : " sender " , <nl> + " type " : " string " , <nl> + " required " : true <nl> + } , <nl> + { <nl> + " name " : " data " , <nl> + " $ ref " : " Player . Notifications . Data " , <nl> + " required " : true <nl> + } <nl> + ] , <nl> + " returns " : null <nl> + } , <nl> " Player . OnPause " : { <nl> " type " : " notification " , <nl> " description " : " Playback of a media item has been paused . If there is no ID available extra information will be provided . " , <nl> mmm a / xbmc / interfaces / json - rpc / schema / version . txt <nl> ppp b / xbmc / interfaces / json - rpc / schema / version . txt <nl> @ @ - 1 + 1 @ @ <nl> - JSONRPC_VERSION 9 . 1 . 1 <nl> + JSONRPC_VERSION 9 . 2 . 0 <nl> mmm a / xbmc / listproviders / DirectoryProvider . cpp <nl> ppp b / xbmc / listproviders / DirectoryProvider . cpp <nl> void CDirectoryProvider : : Announce ( AnnouncementFlag flag , const char * sender , con <nl> if ( flag & Player ) <nl> { <nl> if ( strcmp ( message , " OnPlay " ) = = 0 | | <nl> + strcmp ( message , " OnResume " ) = = 0 | | <nl> strcmp ( message , " OnStop " ) = = 0 ) <nl> { <nl> if ( m_currentSort . sortBy = = SortByNone | | / / not nice , but many directories that need to be refreshed on start / stop have no special sort order ( e . g . in progress movies ) <nl> mmm a / xbmc / network / AirPlayServer . cpp <nl> ppp b / xbmc / network / AirPlayServer . cpp <nl> void CAirPlayServer : : Announce ( AnnouncementFlag flag , const char * sender , const c <nl> <nl> ServerInstance - > AnnounceToClients ( EVENT_STOPPED ) ; <nl> } <nl> - else if ( strcmp ( message , " OnPlay " ) = = 0 ) <nl> + else if ( strcmp ( message , " OnPlay " ) = = 0 | | strcmp ( message , " OnResume " ) = = 0 ) <nl> { <nl> ServerInstance - > AnnounceToClients ( EVENT_PLAYING ) ; <nl> } <nl> mmm a / xbmc / network / AirTunesServer . cpp <nl> ppp b / xbmc / network / AirTunesServer . cpp <nl> void CAirTunesServer : : Announce ( AnnouncementFlag flag , const char * sender , const <nl> { <nl> if ( ( flag & Player ) & & strcmp ( sender , " xbmc " ) = = 0 ) <nl> { <nl> - if ( strcmp ( message , " OnPlay " ) = = 0 & & m_streamStarted ) <nl> + if ( ( strcmp ( message , " OnPlay " ) = = 0 | | strcmp ( message , " OnResume " ) = = 0 ) & & m_streamStarted ) <nl> { <nl> RefreshMetadata ( ) ; <nl> RefreshCoverArt ( ) ; <nl> mmm a / xbmc / network / upnp / UPnPRenderer . cpp <nl> ppp b / xbmc / network / upnp / UPnPRenderer . cpp <nl> CUPnPRenderer : : Announce ( AnnouncementFlag flag , const char * sender , const char * m <nl> if ( flag = = Player ) { <nl> if ( NPT_FAILED ( FindServiceByType ( " urn : schemas - upnp - org : service : AVTransport : 1 " , avt ) ) ) <nl> return ; <nl> - if ( strcmp ( message , " OnPlay " ) = = 0 ) { <nl> + if ( strcmp ( message , " OnPlay " ) = = 0 | | strcmp ( message , " OnResume " ) = = 0 ) { <nl> avt - > SetStateVariable ( " AVTransportURI " , g_application . CurrentFile ( ) . c_str ( ) ) ; <nl> avt - > SetStateVariable ( " CurrentTrackURI " , g_application . CurrentFile ( ) . c_str ( ) ) ; <nl> <nl> mmm a / xbmc / peripherals / devices / PeripheralCecAdapter . cpp <nl> ppp b / xbmc / peripherals / devices / PeripheralCecAdapter . cpp <nl> void CPeripheralCecAdapter : : Announce ( AnnouncementFlag flag , const char * sender , <nl> m_preventActivateSourceOnPlay = CDateTime : : GetCurrentDateTime ( ) ; <nl> m_bOnPlayReceived = false ; <nl> } <nl> - else if ( flag = = Player & & ! strcmp ( sender , " xbmc " ) & & ! strcmp ( message , " OnPlay " ) ) <nl> + else if ( flag = = Player & & ! strcmp ( sender , " xbmc " ) & & ( ! strcmp ( message , " OnPlay " ) | | ! strcmp ( message , " OnResume " ) ) ) <nl> { <nl> / / activate the source when playback started , and the option is enabled <nl> bool bActivateSource ( false ) ; <nl> mmm a / xbmc / platform / android / activity / XBMCApp . cpp <nl> ppp b / xbmc / platform / android / activity / XBMCApp . cpp <nl> void CXBMCApp : : Announce ( ANNOUNCEMENT : : AnnouncementFlag flag , const char * sender , <nl> } <nl> else if ( flag & Player ) <nl> { <nl> - if ( strcmp ( message , " OnPlay " ) = = 0 ) <nl> + if ( strcmp ( message , " OnPlay " ) = = 0 | | strcmp ( message , " OnResume " ) = = 0 ) <nl> OnPlayBackStarted ( ) ; <nl> else if ( strcmp ( message , " OnPause " ) = = 0 ) <nl> OnPlayBackPaused ( ) ; <nl> else if ( strcmp ( message , " OnStop " ) = = 0 ) <nl> OnPlayBackStopped ( ) ; <nl> - else if ( strcmp ( message , " OnSeek " ) = = 0 ) <nl> - UpdateSessionState ( ) ; <nl> - else if ( strcmp ( message , " OnSpeedChanged " ) = = 0 ) <nl> - UpdateSessionState ( ) ; <nl> + else if ( strcmp ( message , " OnSeek " ) = = 0 ) <nl> + UpdateSessionState ( ) ; <nl> + else if ( strcmp ( message , " OnSpeedChanged " ) = = 0 ) <nl> + UpdateSessionState ( ) ; <nl> } <nl> else if ( flag & Info ) <nl> { <nl> mmm a / xbmc / platform / darwin / ios - common / AnnounceReceiver . mm <nl> ppp b / xbmc / platform / darwin / ios - common / AnnounceReceiver . mm <nl> void AnnounceBridge ( ANNOUNCEMENT : : AnnouncementFlag flag , const char * sender , con <nl> const std : : string msg ( message ) ; <nl> <nl> / / handle data which only has a database id and not the metadata inside <nl> - if ( msg = = " OnPlay " ) <nl> + if ( msg = = " OnPlay " | | msg = = " OnResume " ) <nl> { <nl> if ( ! nonConstData [ " item " ] . isNull ( ) ) <nl> { <nl> void AnnounceBridge ( ANNOUNCEMENT : : AnnouncementFlag flag , const char * sender , con <nl> / / LOG ( @ " AnnounceBridge : [ % s ] , [ % s ] , [ % s ] " , ANNOUNCEMENT : : AnnouncementFlagToString ( flag ) , sender , message ) ; <nl> NSDictionary * dict = dictionaryFromVariantMap ( nonConstData ) ; <nl> / / LOG ( @ " data : % @ " , dict . description ) ; <nl> - if ( msg = = " OnPlay " ) <nl> + if ( msg = = " OnPlay " | | msg = = " OnResume " ) <nl> { <nl> NSDictionary * item = [ dict valueForKey : @ " item " ] ; <nl> NSDictionary * player = [ dict valueForKey : @ " player " ] ; <nl> mmm a / xbmc / windowing / win10 / WinEventsWin10 . cpp <nl> ppp b / xbmc / windowing / win10 / WinEventsWin10 . cpp <nl> void CWinEventsWin10 : : Announce ( AnnouncementFlag flag , const char * sender , const <nl> bool changed = false ; <nl> MediaPlaybackStatus status = MediaPlaybackStatus : : Changing ; <nl> <nl> - if ( strcmp ( message , " OnPlay " ) = = 0 ) <nl> + if ( strcmp ( message , " OnPlay " ) = = 0 | | strcmp ( message , " OnResume " ) = = 0 ) <nl> { <nl> changed = true ; <nl> status = MediaPlaybackStatus : : Playing ; <nl>
|
[ Breaking change ] Add OnResume , OnAVChange and OnAVStart to jsonRPC notifications
|
xbmc/xbmc
|
5554edd9615ad0f01f2e87411627a2f5b08189ff
|
2018-04-05T22:03:33Z
|
mmm a / dbms / include / DB / Common / HashTable / HashMap . h <nl> ppp b / dbms / include / DB / Common / HashTable / HashMap . h <nl> class HashMapTable : public HashTable < Key , Cell , Hash , Grower , Allocator > <nl> <nl> using HashTable < Key , Cell , Hash , Grower , Allocator > : : HashTable ; <nl> <nl> - mapped_type & operator [ ] ( Key x ) <nl> + mapped_type & ALWAYS_INLINE operator [ ] ( Key x ) <nl> { <nl> typename HashMapTable : : iterator it ; <nl> bool inserted ; <nl> mmm a / dbms / include / DB / Common / HashTable / HashTable . h <nl> ppp b / dbms / include / DB / Common / HashTable / HashTable . h <nl> <nl> <nl> # include < stats / IntHash . h > <nl> <nl> + # include < DB / Core / Defines . h > <nl> # include < DB / Core / Types . h > <nl> # include < DB / Core / Exception . h > <nl> # include < DB / Core / ErrorCodes . h > <nl> class HashTable : <nl> # endif <nl> <nl> / / / Найти ячейку с тем же ключём или пустую ячейку , начиная с заданного места и далее по цепочке разрешения коллизий . <nl> - size_t findCell ( const Key & x , size_t hash_value , size_t place_value ) const <nl> + size_t ALWAYS_INLINE findCell ( const Key & x , size_t hash_value , size_t place_value ) const <nl> { <nl> while ( ! buf [ place_value ] . isZero ( * this ) & & ! buf [ place_value ] . keyEquals ( x , hash_value ) ) <nl> { <nl> class HashTable : <nl> return place_value ; <nl> } <nl> <nl> + / / / Найти пустую ячейку , начиная с заданного места и далее по цепочке разрешения коллизий . <nl> + size_t ALWAYS_INLINE findEmptyCell ( const Key & x , size_t hash_value , size_t place_value ) const <nl> + { <nl> + while ( ! buf [ place_value ] . isZero ( * this ) ) <nl> + { <nl> + place_value = grower . next ( place_value ) ; <nl> + # ifdef DBMS_HASH_MAP_COUNT_COLLISIONS <nl> + + + collisions ; <nl> + # endif <nl> + } <nl> + <nl> + return place_value ; <nl> + } <nl> + <nl> void alloc ( const Grower & new_grower ) <nl> { <nl> buf = reinterpret_cast < Cell * > ( Allocator : : alloc ( new_grower . bufSize ( ) * sizeof ( Cell ) ) ) ; <nl> class HashTable : <nl> <nl> <nl> / / / Если ключ нулевой - вставить его в специальное место и вернуть true . <nl> - bool emplaceIfZero ( Key x , iterator & it , bool & inserted ) <nl> + bool ALWAYS_INLINE emplaceIfZero ( Key x , iterator & it , bool & inserted ) <nl> { <nl> / / / Если утверждается , что нулевой ключ не могут вставить в таблицу . <nl> if ( ! Cell : : need_zero_value_storage ) <nl> class HashTable : <nl> <nl> <nl> / / / Только для ненулевых ключей . Найти нужное место , вставить туда ключ , если его ещё нет , вернуть итератор на ячейку . <nl> - void emplaceNonZero ( Key x , iterator & it , bool & inserted , size_t hash_value ) <nl> + void ALWAYS_INLINE emplaceNonZero ( Key x , iterator & it , bool & inserted , size_t hash_value ) <nl> { <nl> size_t place_value = findCell ( x , hash_value , grower . place ( hash_value ) ) ; <nl> <nl> class HashTable : <nl> <nl> public : <nl> / / / Вставить значение . В случае хоть сколько - нибудь сложных значений , лучше используйте функцию emplace . <nl> - std : : pair < iterator , bool > insert ( const value_type & x ) <nl> + std : : pair < iterator , bool > ALWAYS_INLINE insert ( const value_type & x ) <nl> { <nl> std : : pair < iterator , bool > res ; <nl> <nl> class HashTable : <nl> * if ( inserted ) <nl> * new ( & it - > second ) Mapped ( value ) ; <nl> * / <nl> - void emplace ( Key x , iterator & it , bool & inserted ) <nl> + void ALWAYS_INLINE emplace ( Key x , iterator & it , bool & inserted ) <nl> { <nl> if ( ! emplaceIfZero ( x , it , inserted ) ) <nl> emplaceNonZero ( x , it , inserted , hash ( x ) ) ; <nl> class HashTable : <nl> <nl> <nl> / / / То же самое , но с заранее вычисленным значением хэш - функции . <nl> - void emplace ( Key x , iterator & it , bool & inserted , size_t hash_value ) <nl> + void ALWAYS_INLINE emplace ( Key x , iterator & it , bool & inserted , size_t hash_value ) <nl> { <nl> if ( ! emplaceIfZero ( x , it , inserted ) ) <nl> emplaceNonZero ( x , it , inserted , hash_value ) ; <nl> } <nl> <nl> <nl> - iterator find ( Key x ) <nl> + / / / Скопировать ячейку из другой хэш - таблицы . Предполагается , что ячейка не нулевая , а также , что такого ключа в таблице ещё не было . <nl> + / * void ALWAYS_INLINE insertUniqueNonZero ( Cell * cell ) <nl> + { <nl> + size_t hash_value = cell - > getHash ( ) ; <nl> + size_t place_value = findEmptyCell ( cell - > getKey ( ) , hash_value , grower . place ( hash_value ) ) ; <nl> + <nl> + memcpy ( & buf [ place_value ] , cell , sizeof ( * cell ) ) ; <nl> + + + m_size ; <nl> + <nl> + if ( unlikely ( grower . overflow ( m_size ) ) ) <nl> + resize ( ) ; <nl> + } * / <nl> + <nl> + <nl> + iterator ALWAYS_INLINE find ( Key x ) <nl> { <nl> if ( Cell : : isZero ( x , * this ) ) <nl> return this - > hasZero ( ) ? iteratorToZero ( ) : end ( ) ; <nl> class HashTable : <nl> } <nl> <nl> <nl> - const_iterator find ( Key x ) const <nl> + const_iterator ALWAYS_INLINE find ( Key x ) const <nl> { <nl> if ( Cell : : isZero ( x , * this ) ) <nl> return this - > hasZero ( ) ? iteratorToZero ( ) : end ( ) ; <nl> class HashTable : <nl> } <nl> <nl> <nl> - iterator find ( Key x , size_t hash_value ) <nl> + iterator ALWAYS_INLINE find ( Key x , size_t hash_value ) <nl> { <nl> if ( Cell : : isZero ( x , * this ) ) <nl> return this - > hasZero ( ) ? iteratorToZero ( ) : end ( ) ; <nl> class HashTable : <nl> } <nl> <nl> <nl> - const_iterator find ( Key x , size_t hash_value ) const <nl> + const_iterator ALWAYS_INLINE find ( Key x , size_t hash_value ) const <nl> { <nl> if ( Cell : : isZero ( x , * this ) ) <nl> return this - > hasZero ( ) ? iteratorToZero ( ) : end ( ) ; <nl> mmm a / dbms / include / DB / Common / HashTable / TwoLevelHashMap . h <nl> ppp b / dbms / include / DB / Common / HashTable / TwoLevelHashMap . h <nl> class TwoLevelHashMapTable : public TwoLevelHashTable < Key , Cell , Hash , Grower , A <nl> typedef typename Cell : : Mapped mapped_type ; <nl> typedef typename Cell : : value_type value_type ; <nl> <nl> - mapped_type & operator [ ] ( Key x ) <nl> + mapped_type & ALWAYS_INLINE operator [ ] ( Key x ) <nl> { <nl> typename TwoLevelHashMapTable : : iterator it ; <nl> bool inserted ; <nl> mmm a / dbms / include / DB / Common / HashTable / TwoLevelHashTable . h <nl> ppp b / dbms / include / DB / Common / HashTable / TwoLevelHashTable . h <nl> class TwoLevelHashTable : <nl> Impl impls [ NUM_BUCKETS ] ; <nl> <nl> <nl> + TwoLevelHashTable ( ) { } <nl> + <nl> + / / / Скопировать данные из другой ( обычной ) хэш - таблицы . У неё должна быть такая же хэш - функция . <nl> + / * template < typename Source > <nl> + TwoLevelHashTable ( const Source & src ) <nl> + : impls ( src . size ( ) * 15 / NUM_BUCKETS / 10 ) / / / Размер берётся с некоторым запасом . <nl> + { <nl> + for ( typename Source : : const_iterator it = src . begin ( ) ; it ! = src . end ( ) ; + + it ) <nl> + { <nl> + Cell * cell = it . ptr ; <nl> + size_t buck = getBucketFromHash ( hash_value ) ; <nl> + typename Impl : : iterator impl_it ; <nl> + impls [ buck ] . emplace ( x , impl_it , inserted , hash_value ) ; <nl> + emplace ( ) ; <nl> + } <nl> + } * / <nl> + <nl> + <nl> class iterator <nl> { <nl> Self * container ; <nl> class TwoLevelHashTable : <nl> <nl> <nl> / / / Вставить значение . В случае хоть сколько - нибудь сложных значений , лучше используйте функцию emplace . <nl> - std : : pair < iterator , bool > insert ( const value_type & x ) <nl> + std : : pair < iterator , bool > ALWAYS_INLINE insert ( const value_type & x ) <nl> { <nl> size_t hash_value = hash ( Cell : : getKey ( x ) ) ; <nl> <nl> class TwoLevelHashTable : <nl> * if ( inserted ) <nl> * new ( & it - > second ) Mapped ( value ) ; <nl> * / <nl> - void emplace ( Key x , iterator & it , bool & inserted ) <nl> + void ALWAYS_INLINE emplace ( Key x , iterator & it , bool & inserted ) <nl> { <nl> size_t hash_value = hash ( x ) ; <nl> emplace ( x , it , inserted , hash_value ) ; <nl> class TwoLevelHashTable : <nl> <nl> <nl> / / / То же самое , но с заранее вычисленным значением хэш - функции . <nl> - void emplace ( Key x , iterator & it , bool & inserted , size_t hash_value ) <nl> + void ALWAYS_INLINE emplace ( Key x , iterator & it , bool & inserted , size_t hash_value ) <nl> { <nl> size_t buck = getBucketFromHash ( hash_value ) ; <nl> typename Impl : : iterator impl_it ; <nl> class TwoLevelHashTable : <nl> } <nl> <nl> <nl> - iterator find ( Key x ) <nl> + iterator ALWAYS_INLINE find ( Key x ) <nl> { <nl> size_t hash_value = hash ( x ) ; <nl> size_t buck = getBucketFromHash ( hash_value ) ; <nl> class TwoLevelHashTable : <nl> } <nl> <nl> <nl> - const_iterator find ( Key x ) const <nl> + const_iterator ALWAYS_INLINE find ( Key x ) const <nl> { <nl> size_t hash_value = hash ( x ) ; <nl> size_t buck = getBucketFromHash ( hash_value ) ; <nl> mmm a / dbms / include / DB / Interpreters / Aggregator . h <nl> ppp b / dbms / include / DB / Interpreters / Aggregator . h <nl> struct AggregatedDataVariants : private boost : : noncopyable <nl> switch ( type ) <nl> { <nl> case Type : : EMPTY : return " EMPTY " ; <nl> - case Type : : without_key : return " WITHOUT_KEY " ; <nl> + case Type : : without_key : return " without_key " ; <nl> <nl> # define M ( NAME , IS_TWO_LEVEL ) \ <nl> case Type : : NAME : return # NAME ; <nl> class Aggregator <nl> AggregateDataPtr overflow_row ) const ; <nl> <nl> <nl> - template < typename Method > <nl> - void convertToBlockImpl ( <nl> - Method & method , <nl> - ColumnPlainPtrs & key_columns , <nl> - AggregateColumnsData & aggregate_columns , <nl> - ColumnPlainPtrs & final_aggregate_columns , <nl> - const Sizes & key_sizes , <nl> - size_t start_row , bool final ) const ; <nl> + / / / Преобразовать из одного типа хэш - таблицы в другой . <nl> + template < typename SrcData , typename DstData > <nl> + static void convertImpl ( SrcData & src , DstData & dst ) ; <nl> <nl> / / / Слить данные из хэш - таблицы src в dst . <nl> template < typename Method , typename Table > <nl> class Aggregator <nl> const Sizes & key_sizes , <nl> StringRefs & keys ) const ; <nl> <nl> + template < typename Method > <nl> + void convertToBlockImpl ( <nl> + Method & method , <nl> + ColumnPlainPtrs & key_columns , <nl> + AggregateColumnsData & aggregate_columns , <nl> + ColumnPlainPtrs & final_aggregate_columns , <nl> + const Sizes & key_sizes , <nl> + size_t start_row , bool final ) const ; <nl> + <nl> template < typename Method > <nl> void destroyImpl ( <nl> Method & method ) const ; <nl> mmm a / dbms / src / Interpreters / Aggregator . cpp <nl> ppp b / dbms / src / Interpreters / Aggregator . cpp <nl> void NO_INLINE Aggregator : : executeImplCase ( <nl> } <nl> <nl> <nl> + template < typename SrcData , typename DstData > <nl> + static void Aggregator : : convertImpl ( SrcData & src , DstData & dst ) <nl> + { <nl> + for ( const auto & value : src ) <nl> + dst . insert ( src ) ; <nl> + } <nl> + <nl> + <nl> template < typename Method > <nl> void NO_INLINE Aggregator : : convertToBlockImpl ( <nl> Method & method , <nl>
|
dbms : more scalable aggregator : development [ # METR - 2944 ] .
|
ClickHouse/ClickHouse
|
01526513ae0015d24eae3050399e09b695db7bde
|
2014-12-30T11:27:58Z
|
mmm a / tensorflow / compiler / xla / BUILD <nl> ppp b / tensorflow / compiler / xla / BUILD <nl> package_group ( <nl> ] , <nl> ) <nl> <nl> + load ( " / / tensorflow : tensorflow . bzl " , " cc_header_only_library " ) <nl> load ( " / / tensorflow : tensorflow . bzl " , " tf_cc_test " ) <nl> load ( " / / tensorflow / compiler / xla : xla . bzl " , " xla_proto_library " ) <nl> load ( <nl> mmm a / tensorflow / compiler / xla / util . cc <nl> ppp b / tensorflow / compiler / xla / util . cc <nl> limitations under the License . <nl> # include " tensorflow / core / platform / mutex . h " <nl> # include " tensorflow / core / platform / stacktrace . h " <nl> <nl> + namespace xla { <nl> + <nl> namespace { <nl> tensorflow : : mutex timer_stats_lock ( tensorflow : : LINKER_INITIALIZED ) ; <nl> <nl> struct TimerStats { <nl> double cumulative_secs = 0 ; <nl> double max_secs = 0 ; <nl> - int64 times_called = 0 ; <nl> + uint64 times_called = 0 ; <nl> } ; <nl> <nl> / / Global mapping from timer IDs to timer statistics . <nl> auto & timers_stats GUARDED_BY ( timer_stats_lock ) = <nl> * new absl : : flat_hash_map < uint64 , TimerStats > ( ) ; <nl> } / / namespace <nl> <nl> - namespace xla { <nl> - <nl> Status WithLogBacktrace ( const Status & status ) { <nl> CHECK ( ! status . ok ( ) ) ; <nl> VLOG ( 1 ) < < status . ToString ( ) ; <nl>
|
Undo the removal of an import which was actually required for Ubuntu build .
|
tensorflow/tensorflow
|
35a14f9ea24ea4d83fb6e279b7a2e03ac1c386eb
|
2019-04-18T18:58:17Z
|
mmm a / tensorflow / lite / java / src / main / java / org / tensorflow / lite / Tensor . java <nl> ppp b / tensorflow / lite / java / src / main / java / org / tensorflow / lite / Tensor . java <nl> private void throwIfTypeIsIncompatible ( Object o ) { <nl> DataType oType = dataTypeOf ( o ) ; <nl> <nl> / / INT8 and UINT8 have the same string name , " byte " <nl> - if ( oType . toStringName ( ) = = dtype . toStringName ( ) ) { <nl> + if ( oType . toStringName ( ) . equals ( dtype . toStringName ( ) ) ) { <nl> return ; <nl> } <nl> if ( oType ! = dtype ) { <nl>
|
don ' t use = = for string comparison
|
tensorflow/tensorflow
|
937ffc157f16677357699569d604bb3c76b9dce1
|
2020-02-10T21:51:09Z
|
mmm a / tools / SourceKit / CMakeLists . txt <nl> ppp b / tools / SourceKit / CMakeLists . txt <nl> if ( NOT " $ { CMAKE_SYSTEM_NAME } " STREQUAL " Darwin " ) <nl> set ( SOURCEKIT_NEED_EXPLICIT_LIBDISPATCH TRUE ) <nl> <nl> if ( SWIFT_BUILD_SOURCEKIT ) <nl> - if ( CMAKE_C_COMPILER_ID STREQUAL clang AND <nl> + if ( CMAKE_C_COMPILER_ID STREQUAL Clang AND <nl> CMAKE_C_COMPILER_VERSION VERSION_GREATER 3 . 8 ) <nl> set ( SWIFT_SOURCEKIT_C_COMPILER $ { CMAKE_C_COMPILER } ) <nl> set ( SWIFT_SOURCEKIT_CXX_COMPILER $ { CMAKE_CXX_COMPILER } ) <nl>
|
Merge remote - tracking branch ' origin / master ' into master - next
|
apple/swift
|
9a44b3cccc2d4a6fdca0e72467476f8ec8bda224
|
2018-10-16T01:52:47Z
|
mmm a / addons / resource . language . en_gb / resources / strings . po <nl> ppp b / addons / resource . language . en_gb / resources / strings . po <nl> msgctxt " # 12022 " <nl> msgid " Resume from { 0 : s } " <nl> msgstr " " <nl> <nl> - # . unused ? <nl> - msgctxt " # 12023 " <nl> - msgid " Play from beginning " <nl> - msgstr " " <nl> - <nl> - # empty strings from id 12024 to 12309 <nl> + # empty strings from id 12023 to 12309 <nl> <nl> # : addons / skin . estuary / xml / DialogNumeric . xml <nl> # : addons / skin . estouchy / xml / DialogNumeric . xml <nl>
|
Merge pull request from DaVukovic / fix_strings
|
xbmc/xbmc
|
d17ff7105657228af9009c44ca74f733c268589f
|
2019-04-16T10:47:32Z
|
mmm a / js / map . js <nl> ppp b / js / map . js <nl> jspb . Map . prototype . serializeBinary = function ( <nl> * entries with unset keys is required for maps to be backwards compatible <nl> * with the repeated message representation described here : goo . gl / zuoLAC <nl> * <nl> + * @ param { V = } opt_defaultValue <nl> + * The default value for the type of map values . <nl> + * <nl> * / <nl> jspb . Map . deserializeBinary = function ( map , reader , keyReaderFn , valueReaderFn , <nl> - opt_valueReaderCallback , opt_defaultKey ) { <nl> + opt_valueReaderCallback , opt_defaultKey , <nl> + opt_defaultValue ) { <nl> var key = opt_defaultKey ; <nl> - var value = undefined ; <nl> + var value = opt_defaultValue ; <nl> <nl> while ( reader . nextField ( ) ) { <nl> if ( reader . isEndGroup ( ) ) { <nl> mmm a / src / google / protobuf / compiler / js / js_generator . cc <nl> ppp b / src / google / protobuf / compiler / js / js_generator . cc <nl> void Generator : : GenerateClassDeserializeBinaryField ( <nl> printer - > Print ( " , null " ) ; <nl> } <nl> printer - > Print ( " , $ defaultKey $ " , " defaultKey " , JSFieldDefault ( key_field ) ) ; <nl> + printer - > Print ( " , $ defaultValue $ " , " defaultValue " , JSFieldDefault ( value_field ) ) ; <nl> printer - > Print ( " ) ; \ n " ) ; <nl> printer - > Print ( " } ) ; \ n " ) ; <nl> } else { <nl>
|
Fixed JS parsing of default map values ( )
|
protocolbuffers/protobuf
|
dcc8ffd9c6fab3c82d3d6642a35a584aa0a3f64b
|
2019-07-24T23:32:21Z
|
mmm a / include / grpc + + / client_context . h <nl> ppp b / include / grpc + + / client_context . h <nl> class ClientContext { <nl> / / / There is no guarantee the call will be cancelled . <nl> void TryCancel ( ) ; <nl> <nl> + / / / Global Callbacks <nl> + / / / <nl> + / / / Can be set exactly once per application to install hooks whenever <nl> + / / / a client context is constructed and destructed . <nl> + class GlobalCallbacks { <nl> + public : <nl> + virtual void DefaultConstructor ( ClientContext * context ) = 0 ; <nl> + virtual void Destructor ( ClientContext * context ) = 0 ; <nl> + } ; <nl> + static void SetGlobalCallbacks ( GlobalCallbacks * callbacks ) ; <nl> + <nl> private : <nl> / / Disallow copy and assign . <nl> ClientContext ( const ClientContext & ) ; <nl> mmm a / src / cpp / client / client_context . cc <nl> ppp b / src / cpp / client / client_context . cc <nl> <nl> <nl> namespace grpc { <nl> <nl> + class DefaultGlobalClientCallbacks GRPC_FINAL <nl> + : public ClientContext : : GlobalCallbacks { <nl> + public : <nl> + void DefaultConstructor ( ClientContext * context ) GRPC_OVERRIDE { } <nl> + void Destructor ( ClientContext * context ) GRPC_OVERRIDE { } <nl> + } ; <nl> + <nl> + static DefaultGlobalClientCallbacks g_default_client_callbacks ; <nl> + static ClientContext : : GlobalCallbacks * g_client_callbacks = <nl> + & g_default_client_callbacks ; <nl> + <nl> ClientContext : : ClientContext ( ) <nl> : initial_metadata_received_ ( false ) , <nl> call_ ( nullptr ) , <nl> call_canceled_ ( false ) , <nl> deadline_ ( gpr_inf_future ( GPR_CLOCK_REALTIME ) ) , <nl> - propagate_from_call_ ( nullptr ) { } <nl> + propagate_from_call_ ( nullptr ) { <nl> + g_client_callbacks - > DefaultConstructor ( this ) ; <nl> + } <nl> <nl> ClientContext : : ~ ClientContext ( ) { <nl> if ( call_ ) { <nl> grpc_call_destroy ( call_ ) ; <nl> } <nl> + g_client_callbacks - > Destructor ( this ) ; <nl> } <nl> <nl> std : : unique_ptr < ClientContext > ClientContext : : FromServerContext ( <nl> grpc : : string ClientContext : : peer ( ) const { <nl> return peer ; <nl> } <nl> <nl> + void ClientContext : : SetGlobalCallbacks ( GlobalCallbacks * client_callbacks ) { <nl> + GPR_ASSERT ( g_client_callbacks = = & g_default_client_callbacks ) ; <nl> + GPR_ASSERT ( client_callbacks ! = NULL ) ; <nl> + GPR_ASSERT ( client_callbacks ! = & g_default_client_callbacks ) ; <nl> + g_client_callbacks = client_callbacks ; <nl> + } <nl> + <nl> } / / namespace grpc <nl>
|
Merge pull request from bogdandrutu / master
|
grpc/grpc
|
b113a2e4c7dec14dca471c4362f9f0aaa13beda4
|
2015-12-15T00:18:49Z
|
new file mode 100644 <nl> index 00000000000 . . 3ae46ca955e <nl> mmm / dev / null <nl> ppp b / . gitattributes <nl> @ @ - 0 , 0 + 1 @ @ <nl> + run - test text eol = lf <nl> mmm a / MachineLearning / CNTK / CNTK . cpp <nl> ppp b / MachineLearning / CNTK / CNTK . cpp <nl> int wmain ( int argc , wchar_t * argv [ ] ) <nl> fcloseOrDie ( fp ) ; <nl> } <nl> fprintf ( stderr , " COMPLETED \ n " ) ; <nl> - } <nl> + fflush ( stderr ) ; <nl> + } <nl> catch ( const std : : exception & err ) <nl> { <nl> fprintf ( stderr , " EXCEPTION occurred : % s \ n " , err . what ( ) ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 8a3bb6c6f32 <nl> mmm / dev / null <nl> ppp b / Tests / Speech / QuickE2E / baseline . windows . cpu . txt <nl> <nl> + = = = Running / cygdrive / c / Users / svcphil / workspace . vlivan / CNTK - Build - Windows / x64 / release / cntk . exe configFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ QuickE2E \ cntk . config RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_cpu DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data DeviceId = Auto <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + Build info : <nl> + <nl> + Built time : Aug 11 2015 16 : 18 : 17 <nl> + Last modified date : Tue Aug 11 16 : 16 : 08 2015 <nl> + Built by svcphil on dphaim - 26 - new <nl> + Build Path : C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ MachineLearning \ CNTK \ <nl> + CUDA_PATH : C : \ Program Files \ NVIDIA GPU Computing Toolkit \ CUDA \ v7 . 0 <nl> + Build Branch : master <nl> + Build SHA1 : 397cc7cc16c00b1c12864d331c0729fde7a1bde3 <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + running on dphaim - 26 - new at 2015 / 08 / 11 17 : 47 : 10 <nl> + command line options : <nl> + configFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ QuickE2E \ cntk . config RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_cpu DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data DeviceId = Auto <nl> + <nl> + > > > > > > > > > > > > > > > > > > > > RAW CONFIG ( VARIABLES NOT RESOLVED ) > > > > > > > > > > > > > > > > > > > > <nl> + precision = float <nl> + command = speechTrain <nl> + deviceId = $ DeviceId $ <nl> + parallelTrain = false <nl> + speechTrain = [ <nl> + action = train <nl> + modelPath = $ RunDir $ / models / cntkSpeech . dnn <nl> + deviceId = $ DeviceId $ <nl> + traceLevel = 1 <nl> + SimpleNetworkBuilder = [ <nl> + layerSizes = 363 : 512 : 512 : 132 <nl> + trainingCriterion = CrossEntropyWithSoftmax <nl> + evalCriterion = ErrorPrediction <nl> + layerTypes = Sigmoid <nl> + initValueScale = 1 . 0 <nl> + applyMeanVarNorm = true <nl> + uniformInit = true <nl> + needPrior = true <nl> + ] <nl> + SGD = [ <nl> + epochSize = 20480 <nl> + minibatchSize = 64 : 256 : 1024 : <nl> + learningRatesPerMB = 1 . 0 : 0 . 5 : 0 . 1 <nl> + numMBsToShowResult = 10 <nl> + momentumPerMB = 0 . 9 : 0 . 656119 <nl> + dropoutRate = 0 . 0 <nl> + maxEpochs = 3 <nl> + keepCheckPointFiles = true <nl> + AutoAdjust = [ <nl> + reduceLearnRateIfImproveLessThan = 0 <nl> + loadBestModel = true <nl> + increaseLearnRateIfImproveMoreThan = 1000000000 <nl> + learnRateDecreaseFactor = 0 . 5 <nl> + learnRateIncreaseFactor = 1 . 382 <nl> + autoAdjustLR = AdjustAfterEpoch <nl> + ] <nl> + clippingThresholdPerSample = 1 # INF <nl> + ] <nl> + reader = [ <nl> + readerType = HTKMLFReader <nl> + readMethod = blockRandomize <nl> + miniBatchMode = Partial <nl> + randomize = Auto <nl> + verbosity = 0 <nl> + features = [ <nl> + dim = 363 <nl> + type = Real <nl> + scpFile = glob_0000 . scp <nl> + ] <nl> + labels = [ <nl> + mlfFile = $ DataDir $ / glob_0000 . mlf <nl> + labelMappingFile = $ DataDir $ / state . list <nl> + labelDim = 132 <nl> + labelType = Category <nl> + ] <nl> + ] <nl> + ] <nl> + RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_cpu <nl> + DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data <nl> + DeviceId = Auto <nl> + <nl> + < < < < < < < < < < < < < < < < < < < < RAW CONFIG ( VARIABLES NOT RESOLVED ) < < < < < < < < < < < < < < < < < < < < <nl> + <nl> + > > > > > > > > > > > > > > > > > > > > RAW CONFIG WITH ALL VARIABLES RESOLVED > > > > > > > > > > > > > > > > > > > > <nl> + precision = float <nl> + command = speechTrain <nl> + deviceId = Auto <nl> + parallelTrain = false <nl> + speechTrain = [ <nl> + action = train <nl> + modelPath = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_cpu / models / cntkSpeech . dnn <nl> + deviceId = Auto <nl> + traceLevel = 1 <nl> + SimpleNetworkBuilder = [ <nl> + layerSizes = 363 : 512 : 512 : 132 <nl> + trainingCriterion = CrossEntropyWithSoftmax <nl> + evalCriterion = ErrorPrediction <nl> + layerTypes = Sigmoid <nl> + initValueScale = 1 . 0 <nl> + applyMeanVarNorm = true <nl> + uniformInit = true <nl> + needPrior = true <nl> + ] <nl> + SGD = [ <nl> + epochSize = 20480 <nl> + minibatchSize = 64 : 256 : 1024 : <nl> + learningRatesPerMB = 1 . 0 : 0 . 5 : 0 . 1 <nl> + numMBsToShowResult = 10 <nl> + momentumPerMB = 0 . 9 : 0 . 656119 <nl> + dropoutRate = 0 . 0 <nl> + maxEpochs = 3 <nl> + keepCheckPointFiles = true <nl> + AutoAdjust = [ <nl> + reduceLearnRateIfImproveLessThan = 0 <nl> + loadBestModel = true <nl> + increaseLearnRateIfImproveMoreThan = 1000000000 <nl> + learnRateDecreaseFactor = 0 . 5 <nl> + learnRateIncreaseFactor = 1 . 382 <nl> + autoAdjustLR = AdjustAfterEpoch <nl> + ] <nl> + clippingThresholdPerSample = 1 # INF <nl> + ] <nl> + reader = [ <nl> + readerType = HTKMLFReader <nl> + readMethod = blockRandomize <nl> + miniBatchMode = Partial <nl> + randomize = Auto <nl> + verbosity = 0 <nl> + features = [ <nl> + dim = 363 <nl> + type = Real <nl> + scpFile = glob_0000 . scp <nl> + ] <nl> + labels = [ <nl> + mlfFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / glob_0000 . mlf <nl> + labelMappingFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / state . list <nl> + labelDim = 132 <nl> + labelType = Category <nl> + ] <nl> + ] <nl> + ] <nl> + RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_cpu <nl> + DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data <nl> + DeviceId = Auto <nl> + <nl> + < < < < < < < < < < < < < < < < < < < < RAW CONFIG WITH ALL VARIABLES RESOLVED < < < < < < < < < < < < < < < < < < < < <nl> + <nl> + > > > > > > > > > > > > > > > > > > > > PROCESSED CONFIG WITH ALL VARIABLES RESOLVED > > > > > > > > > > > > > > > > > > > > <nl> + configparameters : cntk . config : command = speechTrain <nl> + configparameters : cntk . config : DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data <nl> + configparameters : cntk . config : deviceId = Auto <nl> + configparameters : cntk . config : parallelTrain = false <nl> + configparameters : cntk . config : precision = float <nl> + configparameters : cntk . config : RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_cpu <nl> + configparameters : cntk . config : speechTrain = [ <nl> + action = train <nl> + modelPath = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_cpu / models / cntkSpeech . dnn <nl> + deviceId = Auto <nl> + traceLevel = 1 <nl> + SimpleNetworkBuilder = [ <nl> + layerSizes = 363 : 512 : 512 : 132 <nl> + trainingCriterion = CrossEntropyWithSoftmax <nl> + evalCriterion = ErrorPrediction <nl> + layerTypes = Sigmoid <nl> + initValueScale = 1 . 0 <nl> + applyMeanVarNorm = true <nl> + uniformInit = true <nl> + needPrior = true <nl> + ] <nl> + SGD = [ <nl> + epochSize = 20480 <nl> + minibatchSize = 64 : 256 : 1024 : <nl> + learningRatesPerMB = 1 . 0 : 0 . 5 : 0 . 1 <nl> + numMBsToShowResult = 10 <nl> + momentumPerMB = 0 . 9 : 0 . 656119 <nl> + dropoutRate = 0 . 0 <nl> + maxEpochs = 3 <nl> + keepCheckPointFiles = true <nl> + AutoAdjust = [ <nl> + reduceLearnRateIfImproveLessThan = 0 <nl> + loadBestModel = true <nl> + increaseLearnRateIfImproveMoreThan = 1000000000 <nl> + learnRateDecreaseFactor = 0 . 5 <nl> + learnRateIncreaseFactor = 1 . 382 <nl> + autoAdjustLR = AdjustAfterEpoch <nl> + ] <nl> + clippingThresholdPerSample = 1 # INF <nl> + ] <nl> + reader = [ <nl> + readerType = HTKMLFReader <nl> + readMethod = blockRandomize <nl> + miniBatchMode = Partial <nl> + randomize = Auto <nl> + verbosity = 0 <nl> + features = [ <nl> + dim = 363 <nl> + type = Real <nl> + scpFile = glob_0000 . scp <nl> + ] <nl> + labels = [ <nl> + mlfFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / glob_0000 . mlf <nl> + labelMappingFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / state . list <nl> + labelDim = 132 <nl> + labelType = Category <nl> + ] <nl> + ] <nl> + ] <nl> + <nl> + < < < < < < < < < < < < < < < < < < < < PROCESSED CONFIG WITH ALL VARIABLES RESOLVED < < < < < < < < < < < < < < < < < < < < <nl> + command : speechTrain <nl> + precision = float <nl> + LockDevice : Capture device 1 and lock it for exclusive use <nl> + LockDevice : Capture device 2 and lock it for exclusive use <nl> + LockDevice : Capture device 3 and lock it for exclusive use <nl> + LockDevice : Capture device 0 and lock it for exclusive use <nl> + LockDevice : Capture device 1 and lock it for exclusive use <nl> + SimpleNetworkBuilder Using GPU 1 <nl> + reading script file glob_0000 . scp . . . 948 entries <nl> + trainlayer : OOV - exclusion code enabled , but no unigram specified to derive the word set from , so you won ' t get OOV exclusion <nl> + total 132 state names in state list C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / state . list <nl> + htkmlfreader : reading MLF file C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / glob_0000 . mlf . . . total 948 entries <nl> + . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . feature set 0 : 252734 frames in 948 out of 948 utterances <nl> + label set 0 : 129 classes <nl> + minibatchutterancesource : 948 utterances grouped into 3 chunks , av . chunk size : 316 . 0 utterances , 84244 . 7 frames <nl> + GetTrainCriterionNodes . . . <nl> + GetEvalCriterionNodes . . . <nl> + <nl> + <nl> + Validating node CrossEntropyWithSoftmax <nl> + <nl> + Validating - - > labels = InputValue <nl> + Validating - - > W2 = LearnableParameter <nl> + Validating - - > W1 = LearnableParameter <nl> + Validating - - > W0 = LearnableParameter <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 3 ] ) <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 3 ] ) <nl> + Validating - - > MVNormalizedFeatures = PerDimMeanVarNormalization ( features [ 363 , 3 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + Validating - - > W0 * features = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 363 , 3 ] ) <nl> + Validating - - > B0 = LearnableParameter <nl> + Validating - - > W0 * features + B0 = Plus ( W0 * features [ 512 , 3 ] , B0 [ 512 , 1 ] ) <nl> + Validating - - > H1 = Sigmoid ( W0 * features + B0 [ 512 , 3 ] ) <nl> + Validating - - > W1 * H1 = Times ( W1 [ 512 , 512 ] , H1 [ 512 , 3 ] ) <nl> + Validating - - > B1 = LearnableParameter <nl> + Validating - - > W1 * H1 + B1 = Plus ( W1 * H1 [ 512 , 3 ] , B1 [ 512 , 1 ] ) <nl> + Validating - - > H2 = Sigmoid ( W1 * H1 + B1 [ 512 , 3 ] ) <nl> + Validating - - > W2 * H1 = Times ( W2 [ 132 , 512 ] , H2 [ 512 , 3 ] ) <nl> + Validating - - > B2 = LearnableParameter <nl> + Validating - - > HLast = Plus ( W2 * H1 [ 132 , 3 ] , B2 [ 132 , 1 ] ) <nl> + Validating - - > CrossEntropyWithSoftmax = CrossEntropyWithSoftmax ( labels [ 132 , 3 ] , HLast [ 132 , 3 ] ) <nl> + <nl> + Found 3 PreCompute nodes <nl> + NodeName : InvStdOfFeatures <nl> + NodeName : MeanOfFeatures <nl> + NodeName : Prior <nl> + minibatchiterator : epoch 0 : frames [ 0 . . 252734 ] ( first utterance at frame 0 ) with 1 datapasses <nl> + requiredata : determined feature kind as 33 - dimensional ' USER ' with frame shift 10 . 0 ms <nl> + <nl> + <nl> + Validating node InvStdOfFeatures <nl> + <nl> + Validating - - > features = InputValue <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 64 ] ) <nl> + <nl> + <nl> + <nl> + Validating node MeanOfFeatures <nl> + <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 64 ] ) <nl> + <nl> + <nl> + <nl> + Validating node Prior <nl> + <nl> + Validating - - > labels = InputValue <nl> + Validating - - > Prior = Mean ( labels [ 132 , 64 ] ) <nl> + <nl> + Set Max Temp Mem Size For Convolution Nodes to 0 samples . <nl> + Starting Epoch 1 : learning rate per sample = 0 . 015625 momentum = 0 . 900000 <nl> + minibatchiterator : epoch 0 : frames [ 0 . . 20480 ] ( first utterance at frame 0 ) with 1 datapasses <nl> + <nl> + <nl> + Validating node EvalErrorPrediction <nl> + <nl> + Validating - - > labels = InputValue <nl> + Validating - - > W2 = LearnableParameter <nl> + Validating - - > W1 = LearnableParameter <nl> + Validating - - > W0 = LearnableParameter <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 64 ] ) <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 64 ] ) <nl> + Validating - - > MVNormalizedFeatures = PerDimMeanVarNormalization ( features [ 363 , 64 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + Validating - - > W0 * features = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 363 , 64 ] ) <nl> + Validating - - > B0 = LearnableParameter <nl> + Validating - - > W0 * features + B0 = Plus ( W0 * features [ 512 , 64 ] , B0 [ 512 , 1 ] ) <nl> + Validating - - > H1 = Sigmoid ( W0 * features + B0 [ 512 , 64 ] ) <nl> + Validating - - > W1 * H1 = Times ( W1 [ 512 , 512 ] , H1 [ 512 , 64 ] ) <nl> + Validating - - > B1 = LearnableParameter <nl> + Validating - - > W1 * H1 + B1 = Plus ( W1 * H1 [ 512 , 64 ] , B1 [ 512 , 1 ] ) <nl> + Validating - - > H2 = Sigmoid ( W1 * H1 + B1 [ 512 , 64 ] ) <nl> + Validating - - > W2 * H1 = Times ( W2 [ 132 , 512 ] , H2 [ 512 , 64 ] ) <nl> + Validating - - > B2 = LearnableParameter <nl> + Validating - - > HLast = Plus ( W2 * H1 [ 132 , 64 ] , B2 [ 132 , 1 ] ) <nl> + Validating - - > EvalErrorPrediction = ErrorPrediction ( labels [ 132 , 64 ] , HLast [ 132 , 64 ] ) <nl> + <nl> + Epoch [ 1 of 3 ] - Minibatch [ 1 - 10 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 4 . 45646143 ; EvalErr [ 0 ] PerSample = 0 . 92500001 ; TotalTime = 0 . 01913s ; TotalTimePerSample = 0 . 02988ms ; SamplesPerSecond = 33462 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 11 - 20 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 4 . 22315693 ; EvalErr [ 0 ] PerSample = 0 . 90156251 ; TotalTime = 0 . 01453s ; TotalTimePerSample = 0 . 02270ms ; SamplesPerSecond = 44043 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 21 - 30 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 95180511 ; EvalErr [ 0 ] PerSample = 0 . 84687501 ; TotalTime = 0 . 01459s ; TotalTimePerSample = 0 . 02279ms ; SamplesPerSecond = 43874 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 31 - 40 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 94157934 ; EvalErr [ 0 ] PerSample = 0 . 89843750 ; TotalTime = 0 . 01459s ; TotalTimePerSample = 0 . 02280ms ; SamplesPerSecond = 43859 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 41 - 50 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 85668945 ; EvalErr [ 0 ] PerSample = 0 . 91093749 ; TotalTime = 0 . 01456s ; TotalTimePerSample = 0 . 02275ms ; SamplesPerSecond = 43953 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 51 - 60 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 72866368 ; EvalErr [ 0 ] PerSample = 0 . 89531249 ; TotalTime = 0 . 01450s ; TotalTimePerSample = 0 . 02265ms ; SamplesPerSecond = 44140 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 61 - 70 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 51809072 ; EvalErr [ 0 ] PerSample = 0 . 82968748 ; TotalTime = 0 . 01453s ; TotalTimePerSample = 0 . 02271ms ; SamplesPerSecond = 44034 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 71 - 80 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 48454905 ; EvalErr [ 0 ] PerSample = 0 . 80781251 ; TotalTime = 0 . 01452s ; TotalTimePerSample = 0 . 02269ms ; SamplesPerSecond = 44074 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 81 - 90 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 33829641 ; EvalErr [ 0 ] PerSample = 0 . 76875001 ; TotalTime = 0 . 01453s ; TotalTimePerSample = 0 . 02271ms ; SamplesPerSecond = 44037 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 91 - 100 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 50167227 ; EvalErr [ 0 ] PerSample = 0 . 79843748 ; TotalTime = 0 . 01447s ; TotalTimePerSample = 0 . 02261ms ; SamplesPerSecond = 44229 <nl> + WARNING : The same matrix with dim [ 1 , 1 ] has been transferred between different devices for 20 times . <nl> + Epoch [ 1 of 3 ] - Minibatch [ 101 - 110 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 22861624 ; EvalErr [ 0 ] PerSample = 0 . 80000001 ; TotalTime = 0 . 01459s ; TotalTimePerSample = 0 . 02279ms ; SamplesPerSecond = 43874 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 111 - 120 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 32616878 ; EvalErr [ 0 ] PerSample = 0 . 79062498 ; TotalTime = 0 . 01449s ; TotalTimePerSample = 0 . 02264ms ; SamplesPerSecond = 44174 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 121 - 130 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 16897583 ; EvalErr [ 0 ] PerSample = 0 . 77968752 ; TotalTime = 0 . 01448s ; TotalTimePerSample = 0 . 02262ms ; SamplesPerSecond = 44201 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 131 - 140 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 08891916 ; EvalErr [ 0 ] PerSample = 0 . 77656251 ; TotalTime = 0 . 01442s ; TotalTimePerSample = 0 . 02253ms ; SamplesPerSecond = 44385 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 141 - 150 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 06004953 ; EvalErr [ 0 ] PerSample = 0 . 72968751 ; TotalTime = 0 . 01454s ; TotalTimePerSample = 0 . 02271ms ; SamplesPerSecond = 44031 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 151 - 160 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 91128540 ; EvalErr [ 0 ] PerSample = 0 . 69531250 ; TotalTime = 0 . 01446s ; TotalTimePerSample = 0 . 02259ms ; SamplesPerSecond = 44272 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 161 - 170 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 90172124 ; EvalErr [ 0 ] PerSample = 0 . 72968751 ; TotalTime = 0 . 01450s ; TotalTimePerSample = 0 . 02266ms ; SamplesPerSecond = 44128 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 171 - 180 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 73261714 ; EvalErr [ 0 ] PerSample = 0 . 65312499 ; TotalTime = 0 . 01447s ; TotalTimePerSample = 0 . 02261ms ; SamplesPerSecond = 44232 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 181 - 190 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 66515493 ; EvalErr [ 0 ] PerSample = 0 . 68437499 ; TotalTime = 0 . 01453s ; TotalTimePerSample = 0 . 02270ms ; SamplesPerSecond = 44061 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 191 - 200 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 67383432 ; EvalErr [ 0 ] PerSample = 0 . 66406250 ; TotalTime = 0 . 01449s ; TotalTimePerSample = 0 . 02264ms ; SamplesPerSecond = 44165 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 201 - 210 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 52869272 ; EvalErr [ 0 ] PerSample = 0 . 63593751 ; TotalTime = 0 . 01450s ; TotalTimePerSample = 0 . 02266ms ; SamplesPerSecond = 44134 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 211 - 220 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 60032344 ; EvalErr [ 0 ] PerSample = 0 . 66718751 ; TotalTime = 0 . 01450s ; TotalTimePerSample = 0 . 02266ms ; SamplesPerSecond = 44128 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 221 - 230 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 51134038 ; EvalErr [ 0 ] PerSample = 0 . 64843750 ; TotalTime = 0 . 01452s ; TotalTimePerSample = 0 . 02268ms ; SamplesPerSecond = 44086 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 231 - 240 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 45362544 ; EvalErr [ 0 ] PerSample = 0 . 63749999 ; TotalTime = 0 . 01452s ; TotalTimePerSample = 0 . 02269ms ; SamplesPerSecond = 44068 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 241 - 250 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 41640615 ; EvalErr [ 0 ] PerSample = 0 . 61562502 ; TotalTime = 0 . 01445s ; TotalTimePerSample = 0 . 02258ms ; SamplesPerSecond = 44287 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 251 - 260 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 39745474 ; EvalErr [ 0 ] PerSample = 0 . 62812501 ; TotalTime = 0 . 01447s ; TotalTimePerSample = 0 . 02261ms ; SamplesPerSecond = 44229 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 261 - 270 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 16415405 ; EvalErr [ 0 ] PerSample = 0 . 56718749 ; TotalTime = 0 . 01454s ; TotalTimePerSample = 0 . 02272ms ; SamplesPerSecond = 44013 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 271 - 280 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 30347300 ; EvalErr [ 0 ] PerSample = 0 . 63593751 ; TotalTime = 0 . 01454s ; TotalTimePerSample = 0 . 02272ms ; SamplesPerSecond = 44016 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 281 - 290 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 24398804 ; EvalErr [ 0 ] PerSample = 0 . 60937500 ; TotalTime = 0 . 01446s ; TotalTimePerSample = 0 . 02260ms ; SamplesPerSecond = 44253 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 291 - 300 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 15322256 ; EvalErr [ 0 ] PerSample = 0 . 57968748 ; TotalTime = 0 . 01447s ; TotalTimePerSample = 0 . 02262ms ; SamplesPerSecond = 44214 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 301 - 310 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 21664429 ; EvalErr [ 0 ] PerSample = 0 . 59531248 ; TotalTime = 0 . 01448s ; TotalTimePerSample = 0 . 02262ms ; SamplesPerSecond = 44208 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 311 - 320 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 25246572 ; EvalErr [ 0 ] PerSample = 0 . 60156250 ; TotalTime = 0 . 01442s ; TotalTimePerSample = 0 . 02253ms ; SamplesPerSecond = 44392 <nl> + Finished Epoch [ 1 ] : [ Training Set ] TrainLossPerSample = 3 . 0000031 ; EvalErrPerSample = 0 . 72836918 ; Ave LearnRatePerSample = 0 . 015625 ; EpochTime = 0 . 4851 <nl> + Starting Epoch 2 : learning rate per sample = 0 . 001953 momentum = 0 . 656119 <nl> + minibatchiterator : epoch 1 : frames [ 20480 . . 40960 ] ( first utterance at frame 20480 ) with 1 datapasses <nl> + Epoch [ 2 of 3 ] - Minibatch [ 1 - 10 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 2 . 08151960 ; EvalErr [ 0 ] PerSample = 0 . 55859375 ; TotalTime = 0 . 03149s ; TotalTimePerSample = 0 . 01230ms ; SamplesPerSecond = 81290 <nl> + Epoch [ 2 of 3 ] - Minibatch [ 11 - 20 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 1 . 98395634 ; EvalErr [ 0 ] PerSample = 0 . 54257810 ; TotalTime = 0 . 02336s ; TotalTimePerSample = 0 . 00913ms ; SamplesPerSecond = 109570 <nl> + Epoch [ 2 of 3 ] - Minibatch [ 21 - 30 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 1 . 98575521 ; EvalErr [ 0 ] PerSample = 0 . 54492188 ; TotalTime = 0 . 02325s ; TotalTimePerSample = 0 . 00908ms ; SamplesPerSecond = 110116 <nl> + Epoch [ 2 of 3 ] - Minibatch [ 31 - 40 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 1 . 90484965 ; EvalErr [ 0 ] PerSample = 0 . 53164065 ; TotalTime = 0 . 02321s ; TotalTimePerSample = 0 . 00906ms ; SamplesPerSecond = 110316 <nl> + Epoch [ 2 of 3 ] - Minibatch [ 41 - 50 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 1 . 88324130 ; EvalErr [ 0 ] PerSample = 0 . 52539063 ; TotalTime = 0 . 02328s ; TotalTimePerSample = 0 . 00909ms ; SamplesPerSecond = 109975 <nl> + Epoch [ 2 of 3 ] - Minibatch [ 51 - 60 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 1 . 89109266 ; EvalErr [ 0 ] PerSample = 0 . 53359377 ; TotalTime = 0 . 02325s ; TotalTimePerSample = 0 . 00908ms ; SamplesPerSecond = 110093 <nl> + Epoch [ 2 of 3 ] - Minibatch [ 61 - 70 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 1 . 89496076 ; EvalErr [ 0 ] PerSample = 0 . 52890623 ; TotalTime = 0 . 02326s ; TotalTimePerSample = 0 . 00909ms ; SamplesPerSecond = 110055 <nl> + Epoch [ 2 of 3 ] - Minibatch [ 71 - 80 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 1 . 85944366 ; EvalErr [ 0 ] PerSample = 0 . 52265626 ; TotalTime = 0 . 02296s ; TotalTimePerSample = 0 . 00897ms ; SamplesPerSecond = 111473 <nl> + Finished Epoch [ 2 ] : [ Training Set ] TrainLossPerSample = 1 . 9356024 ; EvalErrPerSample = 0 . 53603518 ; Ave LearnRatePerSample = 0 . 001953125 ; EpochTime = 0 . 195263 <nl> + Starting Epoch 3 : learning rate per sample = 0 . 000098 momentum = 0 . 656119 <nl> + minibatchiterator : epoch 2 : frames [ 40960 . . 61440 ] ( first utterance at frame 40960 ) with 1 datapasses <nl> + Epoch [ 3 of 3 ] - Minibatch [ 1 - 10 of 20 ] : SamplesSeen = 10240 ; TrainLossPerSample = 1 . 86752820 ; EvalErr [ 0 ] PerSample = 0 . 52177733 ; TotalTime = 0 . 08160s ; TotalTimePerSample = 0 . 00797ms ; SamplesPerSecond = 125485 <nl> + Epoch [ 3 of 3 ] - Minibatch [ 11 - 20 of 20 ] : SamplesSeen = 10240 ; TrainLossPerSample = 1 . 87358737 ; EvalErr [ 0 ] PerSample = 0 . 51542968 ; TotalTime = 0 . 05742s ; TotalTimePerSample = 0 . 00561ms ; SamplesPerSecond = 178319 <nl> + Finished Epoch [ 3 ] : [ Training Set ] TrainLossPerSample = 1 . 8705578 ; EvalErrPerSample = 0 . 5186035 ; Ave LearnRatePerSample = 9 . 765625146e - 005 ; EpochTime = 0 . 142001 <nl> + COMPLETED <nl> + = = = Deleting last epoch data <nl> + = = = = Re - running from checkpoint <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + Build info : <nl> + <nl> + Built time : Aug 11 2015 16 : 18 : 17 <nl> + Last modified date : Tue Aug 11 16 : 16 : 08 2015 <nl> + Built by svcphil on dphaim - 26 - new <nl> + Build Path : C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ MachineLearning \ CNTK \ <nl> + CUDA_PATH : C : \ Program Files \ NVIDIA GPU Computing Toolkit \ CUDA \ v7 . 0 <nl> + Build Branch : master <nl> + Build SHA1 : 397cc7cc16c00b1c12864d331c0729fde7a1bde3 <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + running on dphaim - 26 - new at 2015 / 08 / 11 17 : 47 : 19 <nl> + command line options : <nl> + configFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ QuickE2E \ cntk . config RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_cpu DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data DeviceId = Auto <nl> + <nl> + > > > > > > > > > > > > > > > > > > > > RAW CONFIG ( VARIABLES NOT RESOLVED ) > > > > > > > > > > > > > > > > > > > > <nl> + precision = float <nl> + command = speechTrain <nl> + deviceId = $ DeviceId $ <nl> + parallelTrain = false <nl> + speechTrain = [ <nl> + action = train <nl> + modelPath = $ RunDir $ / models / cntkSpeech . dnn <nl> + deviceId = $ DeviceId $ <nl> + traceLevel = 1 <nl> + SimpleNetworkBuilder = [ <nl> + layerSizes = 363 : 512 : 512 : 132 <nl> + trainingCriterion = CrossEntropyWithSoftmax <nl> + evalCriterion = ErrorPrediction <nl> + layerTypes = Sigmoid <nl> + initValueScale = 1 . 0 <nl> + applyMeanVarNorm = true <nl> + uniformInit = true <nl> + needPrior = true <nl> + ] <nl> + SGD = [ <nl> + epochSize = 20480 <nl> + minibatchSize = 64 : 256 : 1024 : <nl> + learningRatesPerMB = 1 . 0 : 0 . 5 : 0 . 1 <nl> + numMBsToShowResult = 10 <nl> + momentumPerMB = 0 . 9 : 0 . 656119 <nl> + dropoutRate = 0 . 0 <nl> + maxEpochs = 3 <nl> + keepCheckPointFiles = true <nl> + AutoAdjust = [ <nl> + reduceLearnRateIfImproveLessThan = 0 <nl> + loadBestModel = true <nl> + increaseLearnRateIfImproveMoreThan = 1000000000 <nl> + learnRateDecreaseFactor = 0 . 5 <nl> + learnRateIncreaseFactor = 1 . 382 <nl> + autoAdjustLR = AdjustAfterEpoch <nl> + ] <nl> + clippingThresholdPerSample = 1 # INF <nl> + ] <nl> + reader = [ <nl> + readerType = HTKMLFReader <nl> + readMethod = blockRandomize <nl> + miniBatchMode = Partial <nl> + randomize = Auto <nl> + verbosity = 0 <nl> + features = [ <nl> + dim = 363 <nl> + type = Real <nl> + scpFile = glob_0000 . scp <nl> + ] <nl> + labels = [ <nl> + mlfFile = $ DataDir $ / glob_0000 . mlf <nl> + labelMappingFile = $ DataDir $ / state . list <nl> + labelDim = 132 <nl> + labelType = Category <nl> + ] <nl> + ] <nl> + ] <nl> + RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_cpu <nl> + DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data <nl> + DeviceId = Auto <nl> + <nl> + < < < < < < < < < < < < < < < < < < < < RAW CONFIG ( VARIABLES NOT RESOLVED ) < < < < < < < < < < < < < < < < < < < < <nl> + <nl> + > > > > > > > > > > > > > > > > > > > > RAW CONFIG WITH ALL VARIABLES RESOLVED > > > > > > > > > > > > > > > > > > > > <nl> + precision = float <nl> + command = speechTrain <nl> + deviceId = Auto <nl> + parallelTrain = false <nl> + speechTrain = [ <nl> + action = train <nl> + modelPath = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_cpu / models / cntkSpeech . dnn <nl> + deviceId = Auto <nl> + traceLevel = 1 <nl> + SimpleNetworkBuilder = [ <nl> + layerSizes = 363 : 512 : 512 : 132 <nl> + trainingCriterion = CrossEntropyWithSoftmax <nl> + evalCriterion = ErrorPrediction <nl> + layerTypes = Sigmoid <nl> + initValueScale = 1 . 0 <nl> + applyMeanVarNorm = true <nl> + uniformInit = true <nl> + needPrior = true <nl> + ] <nl> + SGD = [ <nl> + epochSize = 20480 <nl> + minibatchSize = 64 : 256 : 1024 : <nl> + learningRatesPerMB = 1 . 0 : 0 . 5 : 0 . 1 <nl> + numMBsToShowResult = 10 <nl> + momentumPerMB = 0 . 9 : 0 . 656119 <nl> + dropoutRate = 0 . 0 <nl> + maxEpochs = 3 <nl> + keepCheckPointFiles = true <nl> + AutoAdjust = [ <nl> + reduceLearnRateIfImproveLessThan = 0 <nl> + loadBestModel = true <nl> + increaseLearnRateIfImproveMoreThan = 1000000000 <nl> + learnRateDecreaseFactor = 0 . 5 <nl> + learnRateIncreaseFactor = 1 . 382 <nl> + autoAdjustLR = AdjustAfterEpoch <nl> + ] <nl> + clippingThresholdPerSample = 1 # INF <nl> + ] <nl> + reader = [ <nl> + readerType = HTKMLFReader <nl> + readMethod = blockRandomize <nl> + miniBatchMode = Partial <nl> + randomize = Auto <nl> + verbosity = 0 <nl> + features = [ <nl> + dim = 363 <nl> + type = Real <nl> + scpFile = glob_0000 . scp <nl> + ] <nl> + labels = [ <nl> + mlfFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / glob_0000 . mlf <nl> + labelMappingFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / state . list <nl> + labelDim = 132 <nl> + labelType = Category <nl> + ] <nl> + ] <nl> + ] <nl> + RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_cpu <nl> + DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data <nl> + DeviceId = Auto <nl> + <nl> + < < < < < < < < < < < < < < < < < < < < RAW CONFIG WITH ALL VARIABLES RESOLVED < < < < < < < < < < < < < < < < < < < < <nl> + <nl> + > > > > > > > > > > > > > > > > > > > > PROCESSED CONFIG WITH ALL VARIABLES RESOLVED > > > > > > > > > > > > > > > > > > > > <nl> + configparameters : cntk . config : command = speechTrain <nl> + configparameters : cntk . config : DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data <nl> + configparameters : cntk . config : deviceId = Auto <nl> + configparameters : cntk . config : parallelTrain = false <nl> + configparameters : cntk . config : precision = float <nl> + configparameters : cntk . config : RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_cpu <nl> + configparameters : cntk . config : speechTrain = [ <nl> + action = train <nl> + modelPath = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_cpu / models / cntkSpeech . dnn <nl> + deviceId = Auto <nl> + traceLevel = 1 <nl> + SimpleNetworkBuilder = [ <nl> + layerSizes = 363 : 512 : 512 : 132 <nl> + trainingCriterion = CrossEntropyWithSoftmax <nl> + evalCriterion = ErrorPrediction <nl> + layerTypes = Sigmoid <nl> + initValueScale = 1 . 0 <nl> + applyMeanVarNorm = true <nl> + uniformInit = true <nl> + needPrior = true <nl> + ] <nl> + SGD = [ <nl> + epochSize = 20480 <nl> + minibatchSize = 64 : 256 : 1024 : <nl> + learningRatesPerMB = 1 . 0 : 0 . 5 : 0 . 1 <nl> + numMBsToShowResult = 10 <nl> + momentumPerMB = 0 . 9 : 0 . 656119 <nl> + dropoutRate = 0 . 0 <nl> + maxEpochs = 3 <nl> + keepCheckPointFiles = true <nl> + AutoAdjust = [ <nl> + reduceLearnRateIfImproveLessThan = 0 <nl> + loadBestModel = true <nl> + increaseLearnRateIfImproveMoreThan = 1000000000 <nl> + learnRateDecreaseFactor = 0 . 5 <nl> + learnRateIncreaseFactor = 1 . 382 <nl> + autoAdjustLR = AdjustAfterEpoch <nl> + ] <nl> + clippingThresholdPerSample = 1 # INF <nl> + ] <nl> + reader = [ <nl> + readerType = HTKMLFReader <nl> + readMethod = blockRandomize <nl> + miniBatchMode = Partial <nl> + randomize = Auto <nl> + verbosity = 0 <nl> + features = [ <nl> + dim = 363 <nl> + type = Real <nl> + scpFile = glob_0000 . scp <nl> + ] <nl> + labels = [ <nl> + mlfFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / glob_0000 . mlf <nl> + labelMappingFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / state . list <nl> + labelDim = 132 <nl> + labelType = Category <nl> + ] <nl> + ] <nl> + ] <nl> + <nl> + < < < < < < < < < < < < < < < < < < < < PROCESSED CONFIG WITH ALL VARIABLES RESOLVED < < < < < < < < < < < < < < < < < < < < <nl> + command : speechTrain <nl> + precision = float <nl> + LockDevice : Capture device 1 and lock it for exclusive use <nl> + LockDevice : Capture device 2 and lock it for exclusive use <nl> + LockDevice : Capture device 3 and lock it for exclusive use <nl> + LockDevice : Capture device 0 and lock it for exclusive use <nl> + LockDevice : Capture device 1 and lock it for exclusive use <nl> + SimpleNetworkBuilder Using GPU 1 <nl> + reading script file glob_0000 . scp . . . 948 entries <nl> + trainlayer : OOV - exclusion code enabled , but no unigram specified to derive the word set from , so you won ' t get OOV exclusion <nl> + total 132 state names in state list C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / state . list <nl> + htkmlfreader : reading MLF file C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / glob_0000 . mlf . . . total 948 entries <nl> + . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . feature set 0 : 252734 frames in 948 out of 948 utterances <nl> + label set 0 : 129 classes <nl> + minibatchutterancesource : 948 utterances grouped into 3 chunks , av . chunk size : 316 . 0 utterances , 84244 . 7 frames <nl> + Starting from checkpoint . Load Network From File C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_cpu / models / cntkSpeech . dnn . 2 . <nl> + <nl> + <nl> + Printing Gradient Computation Node Order . . . <nl> + <nl> + CrossEntropyWithSoftmax [ 0 , 0 ] = CrossEntropyWithSoftmax ( labels [ 132 , 256 ] , HLast [ 0 , 0 ] ) <nl> + HLast [ 0 , 0 ] = Plus ( W2 * H1 [ 0 , 0 ] , B2 [ 132 , 1 ] ) <nl> + B2 [ 132 , 1 ] = LearnableParameter <nl> + W2 * H1 [ 0 , 0 ] = Times ( W2 [ 132 , 512 ] , H2 [ 0 , 0 ] ) <nl> + H2 [ 0 , 0 ] = Sigmoid ( W1 * H1 + B1 [ 0 , 0 ] ) <nl> + W1 * H1 + B1 [ 0 , 0 ] = Plus ( W1 * H1 [ 0 , 0 ] , B1 [ 512 , 1 ] ) <nl> + B1 [ 512 , 1 ] = LearnableParameter <nl> + W1 * H1 [ 0 , 0 ] = Times ( W1 [ 512 , 512 ] , H1 [ 0 , 0 ] ) <nl> + H1 [ 0 , 0 ] = Sigmoid ( W0 * features + B0 [ 0 , 0 ] ) <nl> + W0 * features + B0 [ 0 , 0 ] = Plus ( W0 * features [ 0 , 0 ] , B0 [ 512 , 1 ] ) <nl> + B0 [ 512 , 1 ] = LearnableParameter <nl> + W0 * features [ 0 , 0 ] = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 0 , 0 ] ) <nl> + MVNormalizedFeatures [ 0 , 0 ] = PerDimMeanVarNormalization ( features [ 363 , 256 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + InvStdOfFeatures [ 363 , 1 ] = InvStdDev ( features [ 363 , 256 ] ) <nl> + MeanOfFeatures [ 363 , 1 ] = Mean ( features [ 363 , 256 ] ) <nl> + features [ 363 , 256 ] = InputValue <nl> + W0 [ 512 , 363 ] = LearnableParameter <nl> + W1 [ 512 , 512 ] = LearnableParameter <nl> + W2 [ 132 , 512 ] = LearnableParameter <nl> + labels [ 132 , 256 ] = InputValue <nl> + <nl> + Validating node CrossEntropyWithSoftmax <nl> + <nl> + Validating - - > labels = InputValue <nl> + Validating - - > W2 = LearnableParameter <nl> + Validating - - > W1 = LearnableParameter <nl> + Validating - - > W0 = LearnableParameter <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 256 ] ) <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 256 ] ) <nl> + Validating - - > MVNormalizedFeatures = PerDimMeanVarNormalization ( features [ 363 , 256 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + Validating - - > W0 * features = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 363 , 256 ] ) <nl> + Validating - - > B0 = LearnableParameter <nl> + Validating - - > W0 * features + B0 = Plus ( W0 * features [ 512 , 256 ] , B0 [ 512 , 1 ] ) <nl> + Validating - - > H1 = Sigmoid ( W0 * features + B0 [ 512 , 256 ] ) <nl> + Validating - - > W1 * H1 = Times ( W1 [ 512 , 512 ] , H1 [ 512 , 256 ] ) <nl> + Validating - - > B1 = LearnableParameter <nl> + Validating - - > W1 * H1 + B1 = Plus ( W1 * H1 [ 512 , 256 ] , B1 [ 512 , 1 ] ) <nl> + Validating - - > H2 = Sigmoid ( W1 * H1 + B1 [ 512 , 256 ] ) <nl> + Validating - - > W2 * H1 = Times ( W2 [ 132 , 512 ] , H2 [ 512 , 256 ] ) <nl> + Validating - - > B2 = LearnableParameter <nl> + Validating - - > HLast = Plus ( W2 * H1 [ 132 , 256 ] , B2 [ 132 , 1 ] ) <nl> + Validating - - > CrossEntropyWithSoftmax = CrossEntropyWithSoftmax ( labels [ 132 , 256 ] , HLast [ 132 , 256 ] ) <nl> + <nl> + <nl> + <nl> + Validating node ScaledLogLikelihood <nl> + <nl> + Validating - - > W2 = LearnableParameter <nl> + Validating - - > W1 = LearnableParameter <nl> + Validating - - > W0 = LearnableParameter <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 256 ] ) <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 256 ] ) <nl> + Validating - - > MVNormalizedFeatures = PerDimMeanVarNormalization ( features [ 363 , 256 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + Validating - - > W0 * features = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 363 , 256 ] ) <nl> + Validating - - > B0 = LearnableParameter <nl> + Validating - - > W0 * features + B0 = Plus ( W0 * features [ 512 , 256 ] , B0 [ 512 , 1 ] ) <nl> + Validating - - > H1 = Sigmoid ( W0 * features + B0 [ 512 , 256 ] ) <nl> + Validating - - > W1 * H1 = Times ( W1 [ 512 , 512 ] , H1 [ 512 , 256 ] ) <nl> + Validating - - > B1 = LearnableParameter <nl> + Validating - - > W1 * H1 + B1 = Plus ( W1 * H1 [ 512 , 256 ] , B1 [ 512 , 1 ] ) <nl> + Validating - - > H2 = Sigmoid ( W1 * H1 + B1 [ 512 , 256 ] ) <nl> + Validating - - > W2 * H1 = Times ( W2 [ 132 , 512 ] , H2 [ 512 , 256 ] ) <nl> + Validating - - > B2 = LearnableParameter <nl> + Validating - - > HLast = Plus ( W2 * H1 [ 132 , 256 ] , B2 [ 132 , 1 ] ) <nl> + Validating - - > labels = InputValue <nl> + Validating - - > Prior = Mean ( labels [ 132 , 256 ] ) <nl> + Validating - - > LogOfPrior = Log ( Prior [ 132 , 1 ] ) <nl> + Validating - - > ScaledLogLikelihood = Minus ( HLast [ 132 , 256 ] , LogOfPrior [ 132 , 1 ] ) <nl> + <nl> + <nl> + <nl> + Validating node EvalErrorPrediction <nl> + <nl> + Validating - - > labels = InputValue <nl> + Validating - - > W2 = LearnableParameter <nl> + Validating - - > W1 = LearnableParameter <nl> + Validating - - > W0 = LearnableParameter <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 256 ] ) <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 256 ] ) <nl> + Validating - - > MVNormalizedFeatures = PerDimMeanVarNormalization ( features [ 363 , 256 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + Validating - - > W0 * features = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 363 , 256 ] ) <nl> + Validating - - > B0 = LearnableParameter <nl> + Validating - - > W0 * features + B0 = Plus ( W0 * features [ 512 , 256 ] , B0 [ 512 , 1 ] ) <nl> + Validating - - > H1 = Sigmoid ( W0 * features + B0 [ 512 , 256 ] ) <nl> + Validating - - > W1 * H1 = Times ( W1 [ 512 , 512 ] , H1 [ 512 , 256 ] ) <nl> + Validating - - > B1 = LearnableParameter <nl> + Validating - - > W1 * H1 + B1 = Plus ( W1 * H1 [ 512 , 256 ] , B1 [ 512 , 1 ] ) <nl> + Validating - - > H2 = Sigmoid ( W1 * H1 + B1 [ 512 , 256 ] ) <nl> + Validating - - > W2 * H1 = Times ( W2 [ 132 , 512 ] , H2 [ 512 , 256 ] ) <nl> + Validating - - > B2 = LearnableParameter <nl> + Validating - - > HLast = Plus ( W2 * H1 [ 132 , 256 ] , B2 [ 132 , 1 ] ) <nl> + Validating - - > EvalErrorPrediction = ErrorPrediction ( labels [ 132 , 256 ] , HLast [ 132 , 256 ] ) <nl> + <nl> + GetTrainCriterionNodes . . . <nl> + GetEvalCriterionNodes . . . <nl> + <nl> + <nl> + Validating node CrossEntropyWithSoftmax <nl> + <nl> + Validating - - > labels = InputValue <nl> + Validating - - > W2 = LearnableParameter <nl> + Validating - - > W1 = LearnableParameter <nl> + Validating - - > W0 = LearnableParameter <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 256 ] ) <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 256 ] ) <nl> + Validating - - > MVNormalizedFeatures = PerDimMeanVarNormalization ( features [ 363 , 256 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + Validating - - > W0 * features = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 363 , 256 ] ) <nl> + Validating - - > B0 = LearnableParameter <nl> + Validating - - > W0 * features + B0 = Plus ( W0 * features [ 512 , 256 ] , B0 [ 512 , 1 ] ) <nl> + Validating - - > H1 = Sigmoid ( W0 * features + B0 [ 512 , 256 ] ) <nl> + Validating - - > W1 * H1 = Times ( W1 [ 512 , 512 ] , H1 [ 512 , 256 ] ) <nl> + Validating - - > B1 = LearnableParameter <nl> + Validating - - > W1 * H1 + B1 = Plus ( W1 * H1 [ 512 , 256 ] , B1 [ 512 , 1 ] ) <nl> + Validating - - > H2 = Sigmoid ( W1 * H1 + B1 [ 512 , 256 ] ) <nl> + Validating - - > W2 * H1 = Times ( W2 [ 132 , 512 ] , H2 [ 512 , 256 ] ) <nl> + Validating - - > B2 = LearnableParameter <nl> + Validating - - > HLast = Plus ( W2 * H1 [ 132 , 256 ] , B2 [ 132 , 1 ] ) <nl> + Validating - - > CrossEntropyWithSoftmax = CrossEntropyWithSoftmax ( labels [ 132 , 256 ] , HLast [ 132 , 256 ] ) <nl> + <nl> + No PreCompute nodes found , skipping PreCompute step <nl> + Set Max Temp Mem Size For Convolution Nodes to 0 samples . <nl> + Starting Epoch 3 : learning rate per sample = 0 . 000098 momentum = 0 . 656119 <nl> + minibatchiterator : epoch 2 : frames [ 40960 . . 61440 ] ( first utterance at frame 40960 ) with 1 datapasses <nl> + requiredata : determined feature kind as 33 - dimensional ' USER ' with frame shift 10 . 0 ms <nl> + <nl> + <nl> + Validating node EvalErrorPrediction <nl> + <nl> + Validating - - > labels = InputValue <nl> + Validating - - > W2 = LearnableParameter <nl> + Validating - - > W1 = LearnableParameter <nl> + Validating - - > W0 = LearnableParameter <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 1024 ] ) <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 1024 ] ) <nl> + Validating - - > MVNormalizedFeatures = PerDimMeanVarNormalization ( features [ 363 , 1024 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + Validating - - > W0 * features = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 363 , 1024 ] ) <nl> + Validating - - > B0 = LearnableParameter <nl> + Validating - - > W0 * features + B0 = Plus ( W0 * features [ 512 , 1024 ] , B0 [ 512 , 1 ] ) <nl> + Validating - - > H1 = Sigmoid ( W0 * features + B0 [ 512 , 1024 ] ) <nl> + Validating - - > W1 * H1 = Times ( W1 [ 512 , 512 ] , H1 [ 512 , 1024 ] ) <nl> + Validating - - > B1 = LearnableParameter <nl> + Validating - - > W1 * H1 + B1 = Plus ( W1 * H1 [ 512 , 1024 ] , B1 [ 512 , 1 ] ) <nl> + Validating - - > H2 = Sigmoid ( W1 * H1 + B1 [ 512 , 1024 ] ) <nl> + Validating - - > W2 * H1 = Times ( W2 [ 132 , 512 ] , H2 [ 512 , 1024 ] ) <nl> + Validating - - > B2 = LearnableParameter <nl> + Validating - - > HLast = Plus ( W2 * H1 [ 132 , 1024 ] , B2 [ 132 , 1 ] ) <nl> + Validating - - > EvalErrorPrediction = ErrorPrediction ( labels [ 132 , 1024 ] , HLast [ 132 , 1024 ] ) <nl> + <nl> + Epoch [ 3 of 3 ] - Minibatch [ 1 - 10 of 20 ] : SamplesSeen = 10240 ; TrainLossPerSample = 1 . 86752820 ; EvalErr [ 0 ] PerSample = 0 . 52177733 ; TotalTime = 0 . 40600s ; TotalTimePerSample = 0 . 03965ms ; SamplesPerSecond = 25221 <nl> + Epoch [ 3 of 3 ] - Minibatch [ 11 - 20 of 20 ] : SamplesSeen = 10240 ; TrainLossPerSample = 1 . 87358737 ; EvalErr [ 0 ] PerSample = 0 . 51542968 ; TotalTime = 0 . 05538s ; TotalTimePerSample = 0 . 00541ms ; SamplesPerSecond = 184900 <nl> + Finished Epoch [ 3 ] : [ Training Set ] TrainLossPerSample = 1 . 8705578 ; EvalErrPerSample = 0 . 5186035 ; Ave LearnRatePerSample = 9 . 765625146e - 005 ; EpochTime = 0 . 692077 <nl> + COMPLETED <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . 2ebe3a524d3 <nl> mmm / dev / null <nl> ppp b / Tests / Speech / QuickE2E / baseline . windows . gpu . txt <nl> <nl> + = = = Running / cygdrive / c / Users / svcphil / workspace . vlivan / CNTK - Build - Windows / x64 / release / cntk . exe configFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ QuickE2E \ cntk . config RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_gpu DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data DeviceId = Auto <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + Build info : <nl> + <nl> + Built time : Aug 11 2015 16 : 18 : 17 <nl> + Last modified date : Tue Aug 11 16 : 16 : 08 2015 <nl> + Built by svcphil on dphaim - 26 - new <nl> + Build Path : C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ MachineLearning \ CNTK \ <nl> + CUDA_PATH : C : \ Program Files \ NVIDIA GPU Computing Toolkit \ CUDA \ v7 . 0 <nl> + Build Branch : master <nl> + Build SHA1 : 397cc7cc16c00b1c12864d331c0729fde7a1bde3 <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + running on dphaim - 26 - new at 2015 / 08 / 11 17 : 47 : 26 <nl> + command line options : <nl> + configFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ QuickE2E \ cntk . config RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_gpu DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data DeviceId = Auto <nl> + <nl> + > > > > > > > > > > > > > > > > > > > > RAW CONFIG ( VARIABLES NOT RESOLVED ) > > > > > > > > > > > > > > > > > > > > <nl> + precision = float <nl> + command = speechTrain <nl> + deviceId = $ DeviceId $ <nl> + parallelTrain = false <nl> + speechTrain = [ <nl> + action = train <nl> + modelPath = $ RunDir $ / models / cntkSpeech . dnn <nl> + deviceId = $ DeviceId $ <nl> + traceLevel = 1 <nl> + SimpleNetworkBuilder = [ <nl> + layerSizes = 363 : 512 : 512 : 132 <nl> + trainingCriterion = CrossEntropyWithSoftmax <nl> + evalCriterion = ErrorPrediction <nl> + layerTypes = Sigmoid <nl> + initValueScale = 1 . 0 <nl> + applyMeanVarNorm = true <nl> + uniformInit = true <nl> + needPrior = true <nl> + ] <nl> + SGD = [ <nl> + epochSize = 20480 <nl> + minibatchSize = 64 : 256 : 1024 : <nl> + learningRatesPerMB = 1 . 0 : 0 . 5 : 0 . 1 <nl> + numMBsToShowResult = 10 <nl> + momentumPerMB = 0 . 9 : 0 . 656119 <nl> + dropoutRate = 0 . 0 <nl> + maxEpochs = 3 <nl> + keepCheckPointFiles = true <nl> + AutoAdjust = [ <nl> + reduceLearnRateIfImproveLessThan = 0 <nl> + loadBestModel = true <nl> + increaseLearnRateIfImproveMoreThan = 1000000000 <nl> + learnRateDecreaseFactor = 0 . 5 <nl> + learnRateIncreaseFactor = 1 . 382 <nl> + autoAdjustLR = AdjustAfterEpoch <nl> + ] <nl> + clippingThresholdPerSample = 1 # INF <nl> + ] <nl> + reader = [ <nl> + readerType = HTKMLFReader <nl> + readMethod = blockRandomize <nl> + miniBatchMode = Partial <nl> + randomize = Auto <nl> + verbosity = 0 <nl> + features = [ <nl> + dim = 363 <nl> + type = Real <nl> + scpFile = glob_0000 . scp <nl> + ] <nl> + labels = [ <nl> + mlfFile = $ DataDir $ / glob_0000 . mlf <nl> + labelMappingFile = $ DataDir $ / state . list <nl> + labelDim = 132 <nl> + labelType = Category <nl> + ] <nl> + ] <nl> + ] <nl> + RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_gpu <nl> + DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data <nl> + DeviceId = Auto <nl> + <nl> + < < < < < < < < < < < < < < < < < < < < RAW CONFIG ( VARIABLES NOT RESOLVED ) < < < < < < < < < < < < < < < < < < < < <nl> + <nl> + > > > > > > > > > > > > > > > > > > > > RAW CONFIG WITH ALL VARIABLES RESOLVED > > > > > > > > > > > > > > > > > > > > <nl> + precision = float <nl> + command = speechTrain <nl> + deviceId = Auto <nl> + parallelTrain = false <nl> + speechTrain = [ <nl> + action = train <nl> + modelPath = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_gpu / models / cntkSpeech . dnn <nl> + deviceId = Auto <nl> + traceLevel = 1 <nl> + SimpleNetworkBuilder = [ <nl> + layerSizes = 363 : 512 : 512 : 132 <nl> + trainingCriterion = CrossEntropyWithSoftmax <nl> + evalCriterion = ErrorPrediction <nl> + layerTypes = Sigmoid <nl> + initValueScale = 1 . 0 <nl> + applyMeanVarNorm = true <nl> + uniformInit = true <nl> + needPrior = true <nl> + ] <nl> + SGD = [ <nl> + epochSize = 20480 <nl> + minibatchSize = 64 : 256 : 1024 : <nl> + learningRatesPerMB = 1 . 0 : 0 . 5 : 0 . 1 <nl> + numMBsToShowResult = 10 <nl> + momentumPerMB = 0 . 9 : 0 . 656119 <nl> + dropoutRate = 0 . 0 <nl> + maxEpochs = 3 <nl> + keepCheckPointFiles = true <nl> + AutoAdjust = [ <nl> + reduceLearnRateIfImproveLessThan = 0 <nl> + loadBestModel = true <nl> + increaseLearnRateIfImproveMoreThan = 1000000000 <nl> + learnRateDecreaseFactor = 0 . 5 <nl> + learnRateIncreaseFactor = 1 . 382 <nl> + autoAdjustLR = AdjustAfterEpoch <nl> + ] <nl> + clippingThresholdPerSample = 1 # INF <nl> + ] <nl> + reader = [ <nl> + readerType = HTKMLFReader <nl> + readMethod = blockRandomize <nl> + miniBatchMode = Partial <nl> + randomize = Auto <nl> + verbosity = 0 <nl> + features = [ <nl> + dim = 363 <nl> + type = Real <nl> + scpFile = glob_0000 . scp <nl> + ] <nl> + labels = [ <nl> + mlfFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / glob_0000 . mlf <nl> + labelMappingFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / state . list <nl> + labelDim = 132 <nl> + labelType = Category <nl> + ] <nl> + ] <nl> + ] <nl> + RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_gpu <nl> + DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data <nl> + DeviceId = Auto <nl> + <nl> + < < < < < < < < < < < < < < < < < < < < RAW CONFIG WITH ALL VARIABLES RESOLVED < < < < < < < < < < < < < < < < < < < < <nl> + <nl> + > > > > > > > > > > > > > > > > > > > > PROCESSED CONFIG WITH ALL VARIABLES RESOLVED > > > > > > > > > > > > > > > > > > > > <nl> + configparameters : cntk . config : command = speechTrain <nl> + configparameters : cntk . config : DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data <nl> + configparameters : cntk . config : deviceId = Auto <nl> + configparameters : cntk . config : parallelTrain = false <nl> + configparameters : cntk . config : precision = float <nl> + configparameters : cntk . config : RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_gpu <nl> + configparameters : cntk . config : speechTrain = [ <nl> + action = train <nl> + modelPath = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_gpu / models / cntkSpeech . dnn <nl> + deviceId = Auto <nl> + traceLevel = 1 <nl> + SimpleNetworkBuilder = [ <nl> + layerSizes = 363 : 512 : 512 : 132 <nl> + trainingCriterion = CrossEntropyWithSoftmax <nl> + evalCriterion = ErrorPrediction <nl> + layerTypes = Sigmoid <nl> + initValueScale = 1 . 0 <nl> + applyMeanVarNorm = true <nl> + uniformInit = true <nl> + needPrior = true <nl> + ] <nl> + SGD = [ <nl> + epochSize = 20480 <nl> + minibatchSize = 64 : 256 : 1024 : <nl> + learningRatesPerMB = 1 . 0 : 0 . 5 : 0 . 1 <nl> + numMBsToShowResult = 10 <nl> + momentumPerMB = 0 . 9 : 0 . 656119 <nl> + dropoutRate = 0 . 0 <nl> + maxEpochs = 3 <nl> + keepCheckPointFiles = true <nl> + AutoAdjust = [ <nl> + reduceLearnRateIfImproveLessThan = 0 <nl> + loadBestModel = true <nl> + increaseLearnRateIfImproveMoreThan = 1000000000 <nl> + learnRateDecreaseFactor = 0 . 5 <nl> + learnRateIncreaseFactor = 1 . 382 <nl> + autoAdjustLR = AdjustAfterEpoch <nl> + ] <nl> + clippingThresholdPerSample = 1 # INF <nl> + ] <nl> + reader = [ <nl> + readerType = HTKMLFReader <nl> + readMethod = blockRandomize <nl> + miniBatchMode = Partial <nl> + randomize = Auto <nl> + verbosity = 0 <nl> + features = [ <nl> + dim = 363 <nl> + type = Real <nl> + scpFile = glob_0000 . scp <nl> + ] <nl> + labels = [ <nl> + mlfFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / glob_0000 . mlf <nl> + labelMappingFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / state . list <nl> + labelDim = 132 <nl> + labelType = Category <nl> + ] <nl> + ] <nl> + ] <nl> + <nl> + < < < < < < < < < < < < < < < < < < < < PROCESSED CONFIG WITH ALL VARIABLES RESOLVED < < < < < < < < < < < < < < < < < < < < <nl> + command : speechTrain <nl> + precision = float <nl> + LockDevice : Capture device 1 and lock it for exclusive use <nl> + LockDevice : Capture device 2 and lock it for exclusive use <nl> + LockDevice : Capture device 3 and lock it for exclusive use <nl> + LockDevice : Capture device 0 and lock it for exclusive use <nl> + LockDevice : Capture device 1 and lock it for exclusive use <nl> + SimpleNetworkBuilder Using GPU 1 <nl> + reading script file glob_0000 . scp . . . 948 entries <nl> + trainlayer : OOV - exclusion code enabled , but no unigram specified to derive the word set from , so you won ' t get OOV exclusion <nl> + total 132 state names in state list C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / state . list <nl> + htkmlfreader : reading MLF file C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / glob_0000 . mlf . . . total 948 entries <nl> + . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . feature set 0 : 252734 frames in 948 out of 948 utterances <nl> + label set 0 : 129 classes <nl> + minibatchutterancesource : 948 utterances grouped into 3 chunks , av . chunk size : 316 . 0 utterances , 84244 . 7 frames <nl> + GetTrainCriterionNodes . . . <nl> + GetEvalCriterionNodes . . . <nl> + <nl> + <nl> + Validating node CrossEntropyWithSoftmax <nl> + <nl> + Validating - - > labels = InputValue <nl> + Validating - - > W2 = LearnableParameter <nl> + Validating - - > W1 = LearnableParameter <nl> + Validating - - > W0 = LearnableParameter <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 3 ] ) <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 3 ] ) <nl> + Validating - - > MVNormalizedFeatures = PerDimMeanVarNormalization ( features [ 363 , 3 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + Validating - - > W0 * features = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 363 , 3 ] ) <nl> + Validating - - > B0 = LearnableParameter <nl> + Validating - - > W0 * features + B0 = Plus ( W0 * features [ 512 , 3 ] , B0 [ 512 , 1 ] ) <nl> + Validating - - > H1 = Sigmoid ( W0 * features + B0 [ 512 , 3 ] ) <nl> + Validating - - > W1 * H1 = Times ( W1 [ 512 , 512 ] , H1 [ 512 , 3 ] ) <nl> + Validating - - > B1 = LearnableParameter <nl> + Validating - - > W1 * H1 + B1 = Plus ( W1 * H1 [ 512 , 3 ] , B1 [ 512 , 1 ] ) <nl> + Validating - - > H2 = Sigmoid ( W1 * H1 + B1 [ 512 , 3 ] ) <nl> + Validating - - > W2 * H1 = Times ( W2 [ 132 , 512 ] , H2 [ 512 , 3 ] ) <nl> + Validating - - > B2 = LearnableParameter <nl> + Validating - - > HLast = Plus ( W2 * H1 [ 132 , 3 ] , B2 [ 132 , 1 ] ) <nl> + Validating - - > CrossEntropyWithSoftmax = CrossEntropyWithSoftmax ( labels [ 132 , 3 ] , HLast [ 132 , 3 ] ) <nl> + <nl> + Found 3 PreCompute nodes <nl> + NodeName : InvStdOfFeatures <nl> + NodeName : MeanOfFeatures <nl> + NodeName : Prior <nl> + minibatchiterator : epoch 0 : frames [ 0 . . 252734 ] ( first utterance at frame 0 ) with 1 datapasses <nl> + requiredata : determined feature kind as 33 - dimensional ' USER ' with frame shift 10 . 0 ms <nl> + <nl> + <nl> + Validating node InvStdOfFeatures <nl> + <nl> + Validating - - > features = InputValue <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 64 ] ) <nl> + <nl> + <nl> + <nl> + Validating node MeanOfFeatures <nl> + <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 64 ] ) <nl> + <nl> + <nl> + <nl> + Validating node Prior <nl> + <nl> + Validating - - > labels = InputValue <nl> + Validating - - > Prior = Mean ( labels [ 132 , 64 ] ) <nl> + <nl> + Set Max Temp Mem Size For Convolution Nodes to 0 samples . <nl> + Starting Epoch 1 : learning rate per sample = 0 . 015625 momentum = 0 . 900000 <nl> + minibatchiterator : epoch 0 : frames [ 0 . . 20480 ] ( first utterance at frame 0 ) with 1 datapasses <nl> + <nl> + <nl> + Validating node EvalErrorPrediction <nl> + <nl> + Validating - - > labels = InputValue <nl> + Validating - - > W2 = LearnableParameter <nl> + Validating - - > W1 = LearnableParameter <nl> + Validating - - > W0 = LearnableParameter <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 64 ] ) <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 64 ] ) <nl> + Validating - - > MVNormalizedFeatures = PerDimMeanVarNormalization ( features [ 363 , 64 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + Validating - - > W0 * features = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 363 , 64 ] ) <nl> + Validating - - > B0 = LearnableParameter <nl> + Validating - - > W0 * features + B0 = Plus ( W0 * features [ 512 , 64 ] , B0 [ 512 , 1 ] ) <nl> + Validating - - > H1 = Sigmoid ( W0 * features + B0 [ 512 , 64 ] ) <nl> + Validating - - > W1 * H1 = Times ( W1 [ 512 , 512 ] , H1 [ 512 , 64 ] ) <nl> + Validating - - > B1 = LearnableParameter <nl> + Validating - - > W1 * H1 + B1 = Plus ( W1 * H1 [ 512 , 64 ] , B1 [ 512 , 1 ] ) <nl> + Validating - - > H2 = Sigmoid ( W1 * H1 + B1 [ 512 , 64 ] ) <nl> + Validating - - > W2 * H1 = Times ( W2 [ 132 , 512 ] , H2 [ 512 , 64 ] ) <nl> + Validating - - > B2 = LearnableParameter <nl> + Validating - - > HLast = Plus ( W2 * H1 [ 132 , 64 ] , B2 [ 132 , 1 ] ) <nl> + Validating - - > EvalErrorPrediction = ErrorPrediction ( labels [ 132 , 64 ] , HLast [ 132 , 64 ] ) <nl> + <nl> + Epoch [ 1 of 3 ] - Minibatch [ 1 - 10 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 4 . 45646143 ; EvalErr [ 0 ] PerSample = 0 . 92500001 ; TotalTime = 0 . 03190s ; TotalTimePerSample = 0 . 04985ms ; SamplesPerSecond = 20061 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 11 - 20 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 4 . 22315693 ; EvalErr [ 0 ] PerSample = 0 . 90156251 ; TotalTime = 0 . 02454s ; TotalTimePerSample = 0 . 03835ms ; SamplesPerSecond = 26075 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 21 - 30 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 95180511 ; EvalErr [ 0 ] PerSample = 0 . 84687501 ; TotalTime = 0 . 02438s ; TotalTimePerSample = 0 . 03809ms ; SamplesPerSecond = 26254 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 31 - 40 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 94157934 ; EvalErr [ 0 ] PerSample = 0 . 89843750 ; TotalTime = 0 . 02445s ; TotalTimePerSample = 0 . 03820ms ; SamplesPerSecond = 26181 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 41 - 50 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 85668945 ; EvalErr [ 0 ] PerSample = 0 . 91093749 ; TotalTime = 0 . 02429s ; TotalTimePerSample = 0 . 03795ms ; SamplesPerSecond = 26352 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 51 - 60 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 72866368 ; EvalErr [ 0 ] PerSample = 0 . 89531249 ; TotalTime = 0 . 02445s ; TotalTimePerSample = 0 . 03820ms ; SamplesPerSecond = 26178 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 61 - 70 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 51809072 ; EvalErr [ 0 ] PerSample = 0 . 82968748 ; TotalTime = 0 . 02423s ; TotalTimePerSample = 0 . 03786ms ; SamplesPerSecond = 26415 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 71 - 80 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 48454905 ; EvalErr [ 0 ] PerSample = 0 . 80781251 ; TotalTime = 0 . 02249s ; TotalTimePerSample = 0 . 03514ms ; SamplesPerSecond = 28457 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 81 - 90 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 33829641 ; EvalErr [ 0 ] PerSample = 0 . 76875001 ; TotalTime = 0 . 02169s ; TotalTimePerSample = 0 . 03390ms ; SamplesPerSecond = 29501 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 91 - 100 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 50167227 ; EvalErr [ 0 ] PerSample = 0 . 79843748 ; TotalTime = 0 . 02178s ; TotalTimePerSample = 0 . 03403ms ; SamplesPerSecond = 29386 <nl> + WARNING : The same matrix with dim [ 1 , 1 ] has been transferred between different devices for 20 times . <nl> + Epoch [ 1 of 3 ] - Minibatch [ 101 - 110 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 22861624 ; EvalErr [ 0 ] PerSample = 0 . 80000001 ; TotalTime = 0 . 02166s ; TotalTimePerSample = 0 . 03385ms ; SamplesPerSecond = 29546 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 111 - 120 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 32616878 ; EvalErr [ 0 ] PerSample = 0 . 79062498 ; TotalTime = 0 . 02063s ; TotalTimePerSample = 0 . 03224ms ; SamplesPerSecond = 31018 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 121 - 130 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 16897583 ; EvalErr [ 0 ] PerSample = 0 . 77968752 ; TotalTime = 0 . 01950s ; TotalTimePerSample = 0 . 03048ms ; SamplesPerSecond = 32813 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 131 - 140 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 08891916 ; EvalErr [ 0 ] PerSample = 0 . 77656251 ; TotalTime = 0 . 01961s ; TotalTimePerSample = 0 . 03063ms ; SamplesPerSecond = 32644 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 141 - 150 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 3 . 06004953 ; EvalErr [ 0 ] PerSample = 0 . 72968751 ; TotalTime = 0 . 01950s ; TotalTimePerSample = 0 . 03046ms ; SamplesPerSecond = 32825 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 151 - 160 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 91128540 ; EvalErr [ 0 ] PerSample = 0 . 69531250 ; TotalTime = 0 . 01965s ; TotalTimePerSample = 0 . 03070ms ; SamplesPerSecond = 32571 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 161 - 170 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 90172124 ; EvalErr [ 0 ] PerSample = 0 . 72968751 ; TotalTime = 0 . 01828s ; TotalTimePerSample = 0 . 02857ms ; SamplesPerSecond = 35003 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 171 - 180 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 73261714 ; EvalErr [ 0 ] PerSample = 0 . 65312499 ; TotalTime = 0 . 01799s ; TotalTimePerSample = 0 . 02811ms ; SamplesPerSecond = 35569 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 181 - 190 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 66515493 ; EvalErr [ 0 ] PerSample = 0 . 68437499 ; TotalTime = 0 . 01789s ; TotalTimePerSample = 0 . 02796ms ; SamplesPerSecond = 35766 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 191 - 200 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 67383432 ; EvalErr [ 0 ] PerSample = 0 . 66406250 ; TotalTime = 0 . 01792s ; TotalTimePerSample = 0 . 02800ms ; SamplesPerSecond = 35708 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 201 - 210 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 52869272 ; EvalErr [ 0 ] PerSample = 0 . 63593751 ; TotalTime = 0 . 01805s ; TotalTimePerSample = 0 . 02821ms ; SamplesPerSecond = 35451 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 211 - 220 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 60032344 ; EvalErr [ 0 ] PerSample = 0 . 66718751 ; TotalTime = 0 . 01696s ; TotalTimePerSample = 0 . 02650ms ; SamplesPerSecond = 37738 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 221 - 230 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 51134038 ; EvalErr [ 0 ] PerSample = 0 . 64843750 ; TotalTime = 0 . 01658s ; TotalTimePerSample = 0 . 02591ms ; SamplesPerSecond = 38598 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 231 - 240 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 45362544 ; EvalErr [ 0 ] PerSample = 0 . 63749999 ; TotalTime = 0 . 01663s ; TotalTimePerSample = 0 . 02598ms ; SamplesPerSecond = 38491 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 241 - 250 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 41640615 ; EvalErr [ 0 ] PerSample = 0 . 61562502 ; TotalTime = 0 . 01670s ; TotalTimePerSample = 0 . 02610ms ; SamplesPerSecond = 38321 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 251 - 260 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 39745474 ; EvalErr [ 0 ] PerSample = 0 . 62812501 ; TotalTime = 0 . 01672s ; TotalTimePerSample = 0 . 02612ms ; SamplesPerSecond = 38279 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 261 - 270 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 16415405 ; EvalErr [ 0 ] PerSample = 0 . 56718749 ; TotalTime = 0 . 01621s ; TotalTimePerSample = 0 . 02533ms ; SamplesPerSecond = 39481 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 271 - 280 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 30347300 ; EvalErr [ 0 ] PerSample = 0 . 63593751 ; TotalTime = 0 . 01583s ; TotalTimePerSample = 0 . 02474ms ; SamplesPerSecond = 40427 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 281 - 290 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 24398804 ; EvalErr [ 0 ] PerSample = 0 . 60937500 ; TotalTime = 0 . 01579s ; TotalTimePerSample = 0 . 02467ms ; SamplesPerSecond = 40542 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 291 - 300 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 15322256 ; EvalErr [ 0 ] PerSample = 0 . 57968748 ; TotalTime = 0 . 01582s ; TotalTimePerSample = 0 . 02472ms ; SamplesPerSecond = 40447 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 301 - 310 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 21664429 ; EvalErr [ 0 ] PerSample = 0 . 59531248 ; TotalTime = 0 . 01570s ; TotalTimePerSample = 0 . 02453ms ; SamplesPerSecond = 40761 <nl> + Epoch [ 1 of 3 ] - Minibatch [ 311 - 320 of 320 ] : SamplesSeen = 640 ; TrainLossPerSample = 2 . 25246572 ; EvalErr [ 0 ] PerSample = 0 . 60156250 ; TotalTime = 0 . 01556s ; TotalTimePerSample = 0 . 02431ms ; SamplesPerSecond = 41139 <nl> + Finished Epoch [ 1 ] : [ Training Set ] TrainLossPerSample = 3 . 0000031 ; EvalErrPerSample = 0 . 72836918 ; Ave LearnRatePerSample = 0 . 015625 ; EpochTime = 0 . 657568 <nl> + Starting Epoch 2 : learning rate per sample = 0 . 001953 momentum = 0 . 656119 <nl> + minibatchiterator : epoch 1 : frames [ 20480 . . 40960 ] ( first utterance at frame 20480 ) with 1 datapasses <nl> + Epoch [ 2 of 3 ] - Minibatch [ 1 - 10 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 2 . 08151960 ; EvalErr [ 0 ] PerSample = 0 . 55859375 ; TotalTime = 0 . 03143s ; TotalTimePerSample = 0 . 01228ms ; SamplesPerSecond = 81456 <nl> + Epoch [ 2 of 3 ] - Minibatch [ 11 - 20 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 1 . 98395634 ; EvalErr [ 0 ] PerSample = 0 . 54257810 ; TotalTime = 0 . 02295s ; TotalTimePerSample = 0 . 00896ms ; SamplesPerSecond = 111561 <nl> + Epoch [ 2 of 3 ] - Minibatch [ 21 - 30 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 1 . 98575521 ; EvalErr [ 0 ] PerSample = 0 . 54492188 ; TotalTime = 0 . 02287s ; TotalTimePerSample = 0 . 00893ms ; SamplesPerSecond = 111951 <nl> + Epoch [ 2 of 3 ] - Minibatch [ 31 - 40 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 1 . 90484965 ; EvalErr [ 0 ] PerSample = 0 . 53164065 ; TotalTime = 0 . 02284s ; TotalTimePerSample = 0 . 00892ms ; SamplesPerSecond = 112069 <nl> + Epoch [ 2 of 3 ] - Minibatch [ 41 - 50 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 1 . 88324130 ; EvalErr [ 0 ] PerSample = 0 . 52539063 ; TotalTime = 0 . 02277s ; TotalTimePerSample = 0 . 00889ms ; SamplesPerSecond = 112448 <nl> + Epoch [ 2 of 3 ] - Minibatch [ 51 - 60 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 1 . 89109266 ; EvalErr [ 0 ] PerSample = 0 . 53359377 ; TotalTime = 0 . 02287s ; TotalTimePerSample = 0 . 00894ms ; SamplesPerSecond = 111917 <nl> + Epoch [ 2 of 3 ] - Minibatch [ 61 - 70 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 1 . 89496076 ; EvalErr [ 0 ] PerSample = 0 . 52890623 ; TotalTime = 0 . 02279s ; TotalTimePerSample = 0 . 00890ms ; SamplesPerSecond = 112325 <nl> + Epoch [ 2 of 3 ] - Minibatch [ 71 - 80 of 80 ] : SamplesSeen = 2560 ; TrainLossPerSample = 1 . 85944366 ; EvalErr [ 0 ] PerSample = 0 . 52265626 ; TotalTime = 0 . 02265s ; TotalTimePerSample = 0 . 00885ms ; SamplesPerSecond = 113044 <nl> + Finished Epoch [ 2 ] : [ Training Set ] TrainLossPerSample = 1 . 9356024 ; EvalErrPerSample = 0 . 53603518 ; Ave LearnRatePerSample = 0 . 001953125 ; EpochTime = 0 . 192318 <nl> + Starting Epoch 3 : learning rate per sample = 0 . 000098 momentum = 0 . 656119 <nl> + minibatchiterator : epoch 2 : frames [ 40960 . . 61440 ] ( first utterance at frame 40960 ) with 1 datapasses <nl> + Epoch [ 3 of 3 ] - Minibatch [ 1 - 10 of 20 ] : SamplesSeen = 10240 ; TrainLossPerSample = 1 . 86752820 ; EvalErr [ 0 ] PerSample = 0 . 52177733 ; TotalTime = 0 . 08080s ; TotalTimePerSample = 0 . 00789ms ; SamplesPerSecond = 126735 <nl> + Epoch [ 3 of 3 ] - Minibatch [ 11 - 20 of 20 ] : SamplesSeen = 10240 ; TrainLossPerSample = 1 . 87358737 ; EvalErr [ 0 ] PerSample = 0 . 51542968 ; TotalTime = 0 . 05544s ; TotalTimePerSample = 0 . 00541ms ; SamplesPerSecond = 184694 <nl> + Finished Epoch [ 3 ] : [ Training Set ] TrainLossPerSample = 1 . 8705578 ; EvalErrPerSample = 0 . 5186035 ; Ave LearnRatePerSample = 9 . 765625146e - 005 ; EpochTime = 0 . 139063 <nl> + COMPLETED <nl> + = = = Deleting last epoch data <nl> + = = = = Re - running from checkpoint <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + Build info : <nl> + <nl> + Built time : Aug 11 2015 16 : 18 : 17 <nl> + Last modified date : Tue Aug 11 16 : 16 : 08 2015 <nl> + Built by svcphil on dphaim - 26 - new <nl> + Build Path : C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ MachineLearning \ CNTK \ <nl> + CUDA_PATH : C : \ Program Files \ NVIDIA GPU Computing Toolkit \ CUDA \ v7 . 0 <nl> + Build Branch : master <nl> + Build SHA1 : 397cc7cc16c00b1c12864d331c0729fde7a1bde3 <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + running on dphaim - 26 - new at 2015 / 08 / 11 17 : 47 : 34 <nl> + command line options : <nl> + configFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ QuickE2E \ cntk . config RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_gpu DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data DeviceId = Auto <nl> + <nl> + > > > > > > > > > > > > > > > > > > > > RAW CONFIG ( VARIABLES NOT RESOLVED ) > > > > > > > > > > > > > > > > > > > > <nl> + precision = float <nl> + command = speechTrain <nl> + deviceId = $ DeviceId $ <nl> + parallelTrain = false <nl> + speechTrain = [ <nl> + action = train <nl> + modelPath = $ RunDir $ / models / cntkSpeech . dnn <nl> + deviceId = $ DeviceId $ <nl> + traceLevel = 1 <nl> + SimpleNetworkBuilder = [ <nl> + layerSizes = 363 : 512 : 512 : 132 <nl> + trainingCriterion = CrossEntropyWithSoftmax <nl> + evalCriterion = ErrorPrediction <nl> + layerTypes = Sigmoid <nl> + initValueScale = 1 . 0 <nl> + applyMeanVarNorm = true <nl> + uniformInit = true <nl> + needPrior = true <nl> + ] <nl> + SGD = [ <nl> + epochSize = 20480 <nl> + minibatchSize = 64 : 256 : 1024 : <nl> + learningRatesPerMB = 1 . 0 : 0 . 5 : 0 . 1 <nl> + numMBsToShowResult = 10 <nl> + momentumPerMB = 0 . 9 : 0 . 656119 <nl> + dropoutRate = 0 . 0 <nl> + maxEpochs = 3 <nl> + keepCheckPointFiles = true <nl> + AutoAdjust = [ <nl> + reduceLearnRateIfImproveLessThan = 0 <nl> + loadBestModel = true <nl> + increaseLearnRateIfImproveMoreThan = 1000000000 <nl> + learnRateDecreaseFactor = 0 . 5 <nl> + learnRateIncreaseFactor = 1 . 382 <nl> + autoAdjustLR = AdjustAfterEpoch <nl> + ] <nl> + clippingThresholdPerSample = 1 # INF <nl> + ] <nl> + reader = [ <nl> + readerType = HTKMLFReader <nl> + readMethod = blockRandomize <nl> + miniBatchMode = Partial <nl> + randomize = Auto <nl> + verbosity = 0 <nl> + features = [ <nl> + dim = 363 <nl> + type = Real <nl> + scpFile = glob_0000 . scp <nl> + ] <nl> + labels = [ <nl> + mlfFile = $ DataDir $ / glob_0000 . mlf <nl> + labelMappingFile = $ DataDir $ / state . list <nl> + labelDim = 132 <nl> + labelType = Category <nl> + ] <nl> + ] <nl> + ] <nl> + RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_gpu <nl> + DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data <nl> + DeviceId = Auto <nl> + <nl> + < < < < < < < < < < < < < < < < < < < < RAW CONFIG ( VARIABLES NOT RESOLVED ) < < < < < < < < < < < < < < < < < < < < <nl> + <nl> + > > > > > > > > > > > > > > > > > > > > RAW CONFIG WITH ALL VARIABLES RESOLVED > > > > > > > > > > > > > > > > > > > > <nl> + precision = float <nl> + command = speechTrain <nl> + deviceId = Auto <nl> + parallelTrain = false <nl> + speechTrain = [ <nl> + action = train <nl> + modelPath = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_gpu / models / cntkSpeech . dnn <nl> + deviceId = Auto <nl> + traceLevel = 1 <nl> + SimpleNetworkBuilder = [ <nl> + layerSizes = 363 : 512 : 512 : 132 <nl> + trainingCriterion = CrossEntropyWithSoftmax <nl> + evalCriterion = ErrorPrediction <nl> + layerTypes = Sigmoid <nl> + initValueScale = 1 . 0 <nl> + applyMeanVarNorm = true <nl> + uniformInit = true <nl> + needPrior = true <nl> + ] <nl> + SGD = [ <nl> + epochSize = 20480 <nl> + minibatchSize = 64 : 256 : 1024 : <nl> + learningRatesPerMB = 1 . 0 : 0 . 5 : 0 . 1 <nl> + numMBsToShowResult = 10 <nl> + momentumPerMB = 0 . 9 : 0 . 656119 <nl> + dropoutRate = 0 . 0 <nl> + maxEpochs = 3 <nl> + keepCheckPointFiles = true <nl> + AutoAdjust = [ <nl> + reduceLearnRateIfImproveLessThan = 0 <nl> + loadBestModel = true <nl> + increaseLearnRateIfImproveMoreThan = 1000000000 <nl> + learnRateDecreaseFactor = 0 . 5 <nl> + learnRateIncreaseFactor = 1 . 382 <nl> + autoAdjustLR = AdjustAfterEpoch <nl> + ] <nl> + clippingThresholdPerSample = 1 # INF <nl> + ] <nl> + reader = [ <nl> + readerType = HTKMLFReader <nl> + readMethod = blockRandomize <nl> + miniBatchMode = Partial <nl> + randomize = Auto <nl> + verbosity = 0 <nl> + features = [ <nl> + dim = 363 <nl> + type = Real <nl> + scpFile = glob_0000 . scp <nl> + ] <nl> + labels = [ <nl> + mlfFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / glob_0000 . mlf <nl> + labelMappingFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / state . list <nl> + labelDim = 132 <nl> + labelType = Category <nl> + ] <nl> + ] <nl> + ] <nl> + RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_gpu <nl> + DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data <nl> + DeviceId = Auto <nl> + <nl> + < < < < < < < < < < < < < < < < < < < < RAW CONFIG WITH ALL VARIABLES RESOLVED < < < < < < < < < < < < < < < < < < < < <nl> + <nl> + > > > > > > > > > > > > > > > > > > > > PROCESSED CONFIG WITH ALL VARIABLES RESOLVED > > > > > > > > > > > > > > > > > > > > <nl> + configparameters : cntk . config : command = speechTrain <nl> + configparameters : cntk . config : DataDir = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data <nl> + configparameters : cntk . config : deviceId = Auto <nl> + configparameters : cntk . config : parallelTrain = false <nl> + configparameters : cntk . config : precision = float <nl> + configparameters : cntk . config : RunDir = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_gpu <nl> + configparameters : cntk . config : speechTrain = [ <nl> + action = train <nl> + modelPath = C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_gpu / models / cntkSpeech . dnn <nl> + deviceId = Auto <nl> + traceLevel = 1 <nl> + SimpleNetworkBuilder = [ <nl> + layerSizes = 363 : 512 : 512 : 132 <nl> + trainingCriterion = CrossEntropyWithSoftmax <nl> + evalCriterion = ErrorPrediction <nl> + layerTypes = Sigmoid <nl> + initValueScale = 1 . 0 <nl> + applyMeanVarNorm = true <nl> + uniformInit = true <nl> + needPrior = true <nl> + ] <nl> + SGD = [ <nl> + epochSize = 20480 <nl> + minibatchSize = 64 : 256 : 1024 : <nl> + learningRatesPerMB = 1 . 0 : 0 . 5 : 0 . 1 <nl> + numMBsToShowResult = 10 <nl> + momentumPerMB = 0 . 9 : 0 . 656119 <nl> + dropoutRate = 0 . 0 <nl> + maxEpochs = 3 <nl> + keepCheckPointFiles = true <nl> + AutoAdjust = [ <nl> + reduceLearnRateIfImproveLessThan = 0 <nl> + loadBestModel = true <nl> + increaseLearnRateIfImproveMoreThan = 1000000000 <nl> + learnRateDecreaseFactor = 0 . 5 <nl> + learnRateIncreaseFactor = 1 . 382 <nl> + autoAdjustLR = AdjustAfterEpoch <nl> + ] <nl> + clippingThresholdPerSample = 1 # INF <nl> + ] <nl> + reader = [ <nl> + readerType = HTKMLFReader <nl> + readMethod = blockRandomize <nl> + miniBatchMode = Partial <nl> + randomize = Auto <nl> + verbosity = 0 <nl> + features = [ <nl> + dim = 363 <nl> + type = Real <nl> + scpFile = glob_0000 . scp <nl> + ] <nl> + labels = [ <nl> + mlfFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / glob_0000 . mlf <nl> + labelMappingFile = C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / state . list <nl> + labelDim = 132 <nl> + labelType = Category <nl> + ] <nl> + ] <nl> + ] <nl> + <nl> + < < < < < < < < < < < < < < < < < < < < PROCESSED CONFIG WITH ALL VARIABLES RESOLVED < < < < < < < < < < < < < < < < < < < < <nl> + command : speechTrain <nl> + precision = float <nl> + LockDevice : Capture device 1 and lock it for exclusive use <nl> + LockDevice : Capture device 2 and lock it for exclusive use <nl> + LockDevice : Capture device 3 and lock it for exclusive use <nl> + LockDevice : Capture device 0 and lock it for exclusive use <nl> + LockDevice : Capture device 1 and lock it for exclusive use <nl> + SimpleNetworkBuilder Using GPU 1 <nl> + reading script file glob_0000 . scp . . . 948 entries <nl> + trainlayer : OOV - exclusion code enabled , but no unigram specified to derive the word set from , so you won ' t get OOV exclusion <nl> + total 132 state names in state list C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / state . list <nl> + htkmlfreader : reading MLF file C : \ Users \ svcphil \ workspace . vlivan \ CNTK - Build - Windows \ Tests \ Speech \ Data / glob_0000 . mlf . . . total 948 entries <nl> + . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . feature set 0 : 252734 frames in 948 out of 948 utterances <nl> + label set 0 : 129 classes <nl> + minibatchutterancesource : 948 utterances grouped into 3 chunks , av . chunk size : 316 . 0 utterances , 84244 . 7 frames <nl> + Starting from checkpoint . Load Network From File C : \ Users \ svcphil \ AppData \ Local \ Temp \ 2 \ cntk - test - 20150811174551 . 851046 \ Speech_QuickE2E @ release_gpu / models / cntkSpeech . dnn . 2 . <nl> + <nl> + <nl> + Printing Gradient Computation Node Order . . . <nl> + <nl> + CrossEntropyWithSoftmax [ 0 , 0 ] = CrossEntropyWithSoftmax ( labels [ 132 , 256 ] , HLast [ 0 , 0 ] ) <nl> + HLast [ 0 , 0 ] = Plus ( W2 * H1 [ 0 , 0 ] , B2 [ 132 , 1 ] ) <nl> + B2 [ 132 , 1 ] = LearnableParameter <nl> + W2 * H1 [ 0 , 0 ] = Times ( W2 [ 132 , 512 ] , H2 [ 0 , 0 ] ) <nl> + H2 [ 0 , 0 ] = Sigmoid ( W1 * H1 + B1 [ 0 , 0 ] ) <nl> + W1 * H1 + B1 [ 0 , 0 ] = Plus ( W1 * H1 [ 0 , 0 ] , B1 [ 512 , 1 ] ) <nl> + B1 [ 512 , 1 ] = LearnableParameter <nl> + W1 * H1 [ 0 , 0 ] = Times ( W1 [ 512 , 512 ] , H1 [ 0 , 0 ] ) <nl> + H1 [ 0 , 0 ] = Sigmoid ( W0 * features + B0 [ 0 , 0 ] ) <nl> + W0 * features + B0 [ 0 , 0 ] = Plus ( W0 * features [ 0 , 0 ] , B0 [ 512 , 1 ] ) <nl> + B0 [ 512 , 1 ] = LearnableParameter <nl> + W0 * features [ 0 , 0 ] = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 0 , 0 ] ) <nl> + MVNormalizedFeatures [ 0 , 0 ] = PerDimMeanVarNormalization ( features [ 363 , 256 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + InvStdOfFeatures [ 363 , 1 ] = InvStdDev ( features [ 363 , 256 ] ) <nl> + MeanOfFeatures [ 363 , 1 ] = Mean ( features [ 363 , 256 ] ) <nl> + features [ 363 , 256 ] = InputValue <nl> + W0 [ 512 , 363 ] = LearnableParameter <nl> + W1 [ 512 , 512 ] = LearnableParameter <nl> + W2 [ 132 , 512 ] = LearnableParameter <nl> + labels [ 132 , 256 ] = InputValue <nl> + <nl> + Validating node CrossEntropyWithSoftmax <nl> + <nl> + Validating - - > labels = InputValue <nl> + Validating - - > W2 = LearnableParameter <nl> + Validating - - > W1 = LearnableParameter <nl> + Validating - - > W0 = LearnableParameter <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 256 ] ) <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 256 ] ) <nl> + Validating - - > MVNormalizedFeatures = PerDimMeanVarNormalization ( features [ 363 , 256 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + Validating - - > W0 * features = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 363 , 256 ] ) <nl> + Validating - - > B0 = LearnableParameter <nl> + Validating - - > W0 * features + B0 = Plus ( W0 * features [ 512 , 256 ] , B0 [ 512 , 1 ] ) <nl> + Validating - - > H1 = Sigmoid ( W0 * features + B0 [ 512 , 256 ] ) <nl> + Validating - - > W1 * H1 = Times ( W1 [ 512 , 512 ] , H1 [ 512 , 256 ] ) <nl> + Validating - - > B1 = LearnableParameter <nl> + Validating - - > W1 * H1 + B1 = Plus ( W1 * H1 [ 512 , 256 ] , B1 [ 512 , 1 ] ) <nl> + Validating - - > H2 = Sigmoid ( W1 * H1 + B1 [ 512 , 256 ] ) <nl> + Validating - - > W2 * H1 = Times ( W2 [ 132 , 512 ] , H2 [ 512 , 256 ] ) <nl> + Validating - - > B2 = LearnableParameter <nl> + Validating - - > HLast = Plus ( W2 * H1 [ 132 , 256 ] , B2 [ 132 , 1 ] ) <nl> + Validating - - > CrossEntropyWithSoftmax = CrossEntropyWithSoftmax ( labels [ 132 , 256 ] , HLast [ 132 , 256 ] ) <nl> + <nl> + <nl> + <nl> + Validating node ScaledLogLikelihood <nl> + <nl> + Validating - - > W2 = LearnableParameter <nl> + Validating - - > W1 = LearnableParameter <nl> + Validating - - > W0 = LearnableParameter <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 256 ] ) <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 256 ] ) <nl> + Validating - - > MVNormalizedFeatures = PerDimMeanVarNormalization ( features [ 363 , 256 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + Validating - - > W0 * features = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 363 , 256 ] ) <nl> + Validating - - > B0 = LearnableParameter <nl> + Validating - - > W0 * features + B0 = Plus ( W0 * features [ 512 , 256 ] , B0 [ 512 , 1 ] ) <nl> + Validating - - > H1 = Sigmoid ( W0 * features + B0 [ 512 , 256 ] ) <nl> + Validating - - > W1 * H1 = Times ( W1 [ 512 , 512 ] , H1 [ 512 , 256 ] ) <nl> + Validating - - > B1 = LearnableParameter <nl> + Validating - - > W1 * H1 + B1 = Plus ( W1 * H1 [ 512 , 256 ] , B1 [ 512 , 1 ] ) <nl> + Validating - - > H2 = Sigmoid ( W1 * H1 + B1 [ 512 , 256 ] ) <nl> + Validating - - > W2 * H1 = Times ( W2 [ 132 , 512 ] , H2 [ 512 , 256 ] ) <nl> + Validating - - > B2 = LearnableParameter <nl> + Validating - - > HLast = Plus ( W2 * H1 [ 132 , 256 ] , B2 [ 132 , 1 ] ) <nl> + Validating - - > labels = InputValue <nl> + Validating - - > Prior = Mean ( labels [ 132 , 256 ] ) <nl> + Validating - - > LogOfPrior = Log ( Prior [ 132 , 1 ] ) <nl> + Validating - - > ScaledLogLikelihood = Minus ( HLast [ 132 , 256 ] , LogOfPrior [ 132 , 1 ] ) <nl> + <nl> + <nl> + <nl> + Validating node EvalErrorPrediction <nl> + <nl> + Validating - - > labels = InputValue <nl> + Validating - - > W2 = LearnableParameter <nl> + Validating - - > W1 = LearnableParameter <nl> + Validating - - > W0 = LearnableParameter <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 256 ] ) <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 256 ] ) <nl> + Validating - - > MVNormalizedFeatures = PerDimMeanVarNormalization ( features [ 363 , 256 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + Validating - - > W0 * features = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 363 , 256 ] ) <nl> + Validating - - > B0 = LearnableParameter <nl> + Validating - - > W0 * features + B0 = Plus ( W0 * features [ 512 , 256 ] , B0 [ 512 , 1 ] ) <nl> + Validating - - > H1 = Sigmoid ( W0 * features + B0 [ 512 , 256 ] ) <nl> + Validating - - > W1 * H1 = Times ( W1 [ 512 , 512 ] , H1 [ 512 , 256 ] ) <nl> + Validating - - > B1 = LearnableParameter <nl> + Validating - - > W1 * H1 + B1 = Plus ( W1 * H1 [ 512 , 256 ] , B1 [ 512 , 1 ] ) <nl> + Validating - - > H2 = Sigmoid ( W1 * H1 + B1 [ 512 , 256 ] ) <nl> + Validating - - > W2 * H1 = Times ( W2 [ 132 , 512 ] , H2 [ 512 , 256 ] ) <nl> + Validating - - > B2 = LearnableParameter <nl> + Validating - - > HLast = Plus ( W2 * H1 [ 132 , 256 ] , B2 [ 132 , 1 ] ) <nl> + Validating - - > EvalErrorPrediction = ErrorPrediction ( labels [ 132 , 256 ] , HLast [ 132 , 256 ] ) <nl> + <nl> + GetTrainCriterionNodes . . . <nl> + GetEvalCriterionNodes . . . <nl> + <nl> + <nl> + Validating node CrossEntropyWithSoftmax <nl> + <nl> + Validating - - > labels = InputValue <nl> + Validating - - > W2 = LearnableParameter <nl> + Validating - - > W1 = LearnableParameter <nl> + Validating - - > W0 = LearnableParameter <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 256 ] ) <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 256 ] ) <nl> + Validating - - > MVNormalizedFeatures = PerDimMeanVarNormalization ( features [ 363 , 256 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + Validating - - > W0 * features = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 363 , 256 ] ) <nl> + Validating - - > B0 = LearnableParameter <nl> + Validating - - > W0 * features + B0 = Plus ( W0 * features [ 512 , 256 ] , B0 [ 512 , 1 ] ) <nl> + Validating - - > H1 = Sigmoid ( W0 * features + B0 [ 512 , 256 ] ) <nl> + Validating - - > W1 * H1 = Times ( W1 [ 512 , 512 ] , H1 [ 512 , 256 ] ) <nl> + Validating - - > B1 = LearnableParameter <nl> + Validating - - > W1 * H1 + B1 = Plus ( W1 * H1 [ 512 , 256 ] , B1 [ 512 , 1 ] ) <nl> + Validating - - > H2 = Sigmoid ( W1 * H1 + B1 [ 512 , 256 ] ) <nl> + Validating - - > W2 * H1 = Times ( W2 [ 132 , 512 ] , H2 [ 512 , 256 ] ) <nl> + Validating - - > B2 = LearnableParameter <nl> + Validating - - > HLast = Plus ( W2 * H1 [ 132 , 256 ] , B2 [ 132 , 1 ] ) <nl> + Validating - - > CrossEntropyWithSoftmax = CrossEntropyWithSoftmax ( labels [ 132 , 256 ] , HLast [ 132 , 256 ] ) <nl> + <nl> + No PreCompute nodes found , skipping PreCompute step <nl> + Set Max Temp Mem Size For Convolution Nodes to 0 samples . <nl> + Starting Epoch 3 : learning rate per sample = 0 . 000098 momentum = 0 . 656119 <nl> + minibatchiterator : epoch 2 : frames [ 40960 . . 61440 ] ( first utterance at frame 40960 ) with 1 datapasses <nl> + requiredata : determined feature kind as 33 - dimensional ' USER ' with frame shift 10 . 0 ms <nl> + <nl> + <nl> + Validating node EvalErrorPrediction <nl> + <nl> + Validating - - > labels = InputValue <nl> + Validating - - > W2 = LearnableParameter <nl> + Validating - - > W1 = LearnableParameter <nl> + Validating - - > W0 = LearnableParameter <nl> + Validating - - > features = InputValue <nl> + Validating - - > MeanOfFeatures = Mean ( features [ 363 , 1024 ] ) <nl> + Validating - - > InvStdOfFeatures = InvStdDev ( features [ 363 , 1024 ] ) <nl> + Validating - - > MVNormalizedFeatures = PerDimMeanVarNormalization ( features [ 363 , 1024 ] , MeanOfFeatures [ 363 , 1 ] , InvStdOfFeatures [ 363 , 1 ] ) <nl> + Validating - - > W0 * features = Times ( W0 [ 512 , 363 ] , MVNormalizedFeatures [ 363 , 1024 ] ) <nl> + Validating - - > B0 = LearnableParameter <nl> + Validating - - > W0 * features + B0 = Plus ( W0 * features [ 512 , 1024 ] , B0 [ 512 , 1 ] ) <nl> + Validating - - > H1 = Sigmoid ( W0 * features + B0 [ 512 , 1024 ] ) <nl> + Validating - - > W1 * H1 = Times ( W1 [ 512 , 512 ] , H1 [ 512 , 1024 ] ) <nl> + Validating - - > B1 = LearnableParameter <nl> + Validating - - > W1 * H1 + B1 = Plus ( W1 * H1 [ 512 , 1024 ] , B1 [ 512 , 1 ] ) <nl> + Validating - - > H2 = Sigmoid ( W1 * H1 + B1 [ 512 , 1024 ] ) <nl> + Validating - - > W2 * H1 = Times ( W2 [ 132 , 512 ] , H2 [ 512 , 1024 ] ) <nl> + Validating - - > B2 = LearnableParameter <nl> + Validating - - > HLast = Plus ( W2 * H1 [ 132 , 1024 ] , B2 [ 132 , 1 ] ) <nl> + Validating - - > EvalErrorPrediction = ErrorPrediction ( labels [ 132 , 1024 ] , HLast [ 132 , 1024 ] ) <nl> + <nl> + Epoch [ 3 of 3 ] - Minibatch [ 1 - 10 of 20 ] : SamplesSeen = 10240 ; TrainLossPerSample = 1 . 86752820 ; EvalErr [ 0 ] PerSample = 0 . 52177733 ; TotalTime = 0 . 42093s ; TotalTimePerSample = 0 . 04111ms ; SamplesPerSecond = 24327 <nl> + Epoch [ 3 of 3 ] - Minibatch [ 11 - 20 of 20 ] : SamplesSeen = 10240 ; TrainLossPerSample = 1 . 87358737 ; EvalErr [ 0 ] PerSample = 0 . 51542968 ; TotalTime = 0 . 05521s ; TotalTimePerSample = 0 . 00539ms ; SamplesPerSecond = 185480 <nl> + Finished Epoch [ 3 ] : [ Training Set ] TrainLossPerSample = 1 . 8705578 ; EvalErrPerSample = 0 . 5186035 ; Ave LearnRatePerSample = 9 . 765625146e - 005 ; EpochTime = 0 . 690137 <nl> + COMPLETED <nl> \ No newline at end of file <nl> mmm a / Tests / Speech / QuickE2E / run - test <nl> ppp b / Tests / Speech / QuickE2E / run - test <nl> <nl> # ! / bin / bash <nl> - CNTK_BINARY = $ TEST_BUILD_LOCATION / $ TEST_FLAVOR / bin / cntk <nl> if [ " $ TEST_DEVICE " = = " CPU " ] ; then <nl> CNTK_DEVICE_ID = - 1 <nl> else <nl> CNTK_DEVICE_ID = Auto <nl> fi <nl> - CNTK_ARGS = " configFile = $ TEST_DIR / cntk . config RunDir = $ TEST_RUN_DIR DataDir = $ TEST_DATA_DIR DeviceId = $ CNTK_DEVICE_ID " <nl> + <nl> + configFile = $ TEST_DIR / cntk . config <nl> + RunDir = $ TEST_RUN_DIR <nl> + DataDir = $ TEST_DATA_DIR <nl> + <nl> + if [ " $ OS " = = " Windows_NT " ] ; then <nl> + # When running on cygwin translating / cygdrive / xxx paths to proper windows paths : <nl> + configFile = $ ( cygpath - aw $ configFile ) <nl> + RunDir = $ ( cygpath - aw $ RunDir ) <nl> + DataDir = $ ( cygpath - aw $ DataDir ) <nl> + fi <nl> + <nl> + CNTK_ARGS = " configFile = $ configFile RunDir = $ RunDir DataDir = $ DataDir DeviceId = $ CNTK_DEVICE_ID " <nl> MODELS_DIR = $ TEST_RUN_DIR / models <nl> [ - d $ MODELS_DIR ] & & rm - rf $ MODELS_DIR <nl> mkdir - p $ MODELS_DIR | | exit $ ? <nl> - echo = = = Running $ CNTK_BINARY $ CNTK_ARGS <nl> - $ CNTK_BINARY $ CNTK_ARGS | | exit $ ? <nl> + echo = = = Running $ TEST_CNTK_BINARY $ CNTK_ARGS <nl> + $ TEST_CNTK_BINARY $ CNTK_ARGS | | exit $ ? <nl> echo = = = Deleting last epoch data <nl> rm $ TEST_RUN_DIR / models / * . dnn <nl> echo = = = = Re - running from checkpoint <nl> - $ CNTK_BINARY $ CNTK_ARGS | | exit $ ? <nl> + $ TEST_CNTK_BINARY $ CNTK_ARGS | | exit $ ? <nl> mmm a / Tests / TestDriver . py <nl> ppp b / Tests / TestDriver . py <nl> <nl> # <nl> # Each test directory has a following components : <nl> # - testcases . yml - main test confuguration file , whcih defines all test cases <nl> - # - run - test - ( run - test ) script <nl> + # - run - test - ( run - test ) script <nl> # - baseline * . txt - baseline files whith a captured expected output of run - test script <nl> # <nl> # mmm - - testcases . yml format mmmmmm - <nl> <nl> # mmm - Baseline files mmm - <nl> # Order of searching baseline files , depends on the current mode for a given test : <nl> # <nl> - # 1 . baseline . < flavor > . < device > . txt <nl> - # 2 . baseline . < flavor > . txt <nl> - # 3 . baseline . < device > . txt <nl> - # 4 . baseline . txt <nl> + # 1 . baseline . < os > . < flavor > . < device > . txt <nl> + # 2 . baseline . < os > . < flavor > . txt <nl> + # 3 . baseline . < os > . < device > . txt <nl> + # 4 . baseline . < os > . txt <nl> + # 5 . baseline . < flavor > . < device > . txt <nl> + # 6 . baseline . < flavor > . txt <nl> + # 7 . baseline . < device > . txt <nl> + # 8 . baseline . txt <nl> # where < flavor > = { debug | release } <nl> # < device > = { cpu | gpu } <nl> # <nl> <nl> import sys , os , argparse , traceback , yaml , subprocess , random , re , time <nl> <nl> thisDir = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> + windows = os . getenv ( " OS " ) = = " Windows_NT " <nl> <nl> # This class encapsulates an instance of the test <nl> class Test : <nl> def run ( self , flavor , device , args ) : <nl> os . environ [ " TEST_FLAVOR " ] = flavor <nl> os . environ [ " TEST_DEVICE " ] = device <nl> os . environ [ " TEST_BUILD_LOCATION " ] = args . build_location <nl> + if windows : <nl> + os . environ [ " TEST_CNTK_BINARY " ] = os . path . join ( args . build_location , flavor , " cntk . exe " ) <nl> + else : <nl> + os . environ [ " TEST_CNTK_BINARY " ] = os . path . join ( args . build_location , flavor , " bin " , " cntk " ) <nl> os . environ [ " TEST_DIR " ] = self . testDir <nl> os . environ [ " TEST_DATA_DIR " ] = self . dataDir <nl> os . environ [ " TEST_RUN_DIR " ] = runDir <nl> def run ( self , flavor , device , args ) : <nl> return result <nl> <nl> # Finds a location of a baseline file by probing different names in the following order : <nl> + # baseline . $ os . $ flavor . $ device . txt <nl> + # baseline . $ os . $ flavor . txt <nl> + # baseline . $ os . $ device . txt <nl> + # baseline . $ os . txt <nl> # baseline . $ flavor . $ device . txt <nl> # baseline . $ flavor . txt <nl> # baseline . $ device . txt <nl> # baseline . txt <nl> def findBaselineFile ( self , flavor , device ) : <nl> - for f in [ " . " + flavor . lower ( ) , " " ] : <nl> - for d in [ " . " + device . lower ( ) , " " ] : <nl> - candidateName = " baseline " + f + d + " . txt " ; <nl> - fullPath = os . path . join ( self . testDir , candidateName ) <nl> - if os . path . isfile ( fullPath ) : <nl> - return fullPath <nl> + for o in [ " . " + ( " windows " if windows else " linux " ) , " " ] : <nl> + for f in [ " . " + flavor . lower ( ) , " " ] : <nl> + for d in [ " . " + device . lower ( ) , " " ] : <nl> + candidateName = " baseline " + o + f + d + " . txt " <nl> + fullPath = os . path . join ( self . testDir , candidateName ) <nl> + if os . path . isfile ( fullPath ) : <nl> + return fullPath <nl> return None <nl> <nl> # This class encapsulates one testcase ( in testcases . yml file ) <nl> def runCommand ( args ) : <nl> help = " optional test name ( s ) to run , specified as Suite / TestName . " <nl> " Use list command to list available tests . " <nl> " If not specified then all tests will be run . " ) <nl> - # TODO : port paths to Windows <nl> - defaultBuildLocation = os . path . realpath ( os . path . join ( thisDir , " . . " , " build " ) ) <nl> + defaultBuildLocation = os . path . realpath ( os . path . join ( thisDir , " . . " , " x64 " if windows else " build " ) ) <nl> + <nl> runSubparser . add_argument ( " - b " , " - - build - location " , default = defaultBuildLocation , help = " location of the CNTK build to run " ) <nl> runSubparser . add_argument ( " - d " , " - - device " , help = " cpu | gpu - run on a specific device " ) <nl> runSubparser . add_argument ( " - f " , " - - flavor " , help = " release | debug - run only a specific flavor " ) <nl> - # TODO : port paths to Windows <nl> - defaultRunDir = os . path . join ( " / tmp " , " cntk - test - { 0 } . { 1 } " . format ( time . strftime ( " % Y % m % d % H % M % S " ) , random . randint ( 0 , 1000000 ) ) ) <nl> + tmpDir = os . getenv ( " TEMP " ) if windows else " / tmp " <nl> + defaultRunDir = os . path . join ( tmpDir , " cntk - test - { 0 } . { 1 } " . format ( time . strftime ( " % Y % m % d % H % M % S " ) , random . randint ( 0 , 1000000 ) ) ) <nl> runSubparser . add_argument ( " - r " , " - - run - dir " , default = defaultRunDir , help = " directory where to store test output , default : a random dir within / tmp " ) <nl> runSubparser . add_argument ( " - - update - baseline " , action = ' store_true ' , help = " update baseline file ( s ) instead of matching them " ) <nl> runSubparser . add_argument ( " - v " , " - - verbose " , action = ' store_true ' , help = " verbose output - dump all output of test script " ) <nl>
|
Added ability to run speech e2e tests on Windows ( cygwin )
|
microsoft/CNTK
|
ca189d8e351d8e66edf63b95a3f323358bf15f4c
|
2015-08-12T01:17:37Z
|
mmm a / setup . py <nl> ppp b / setup . py <nl> def set_variable ( self , var_name , value ) : <nl> else : <nl> return SetEnvVar . RESULT_DO_NOTHING <nl> <nl> - def set_environment_variables ( self , ndk_root , android_sdk_root , ant_root ) : <nl> + def set_environment_variables ( self , ndk_root , android_sdk_root , ant_root , quiet ) : <nl> <nl> print ( ' \ nSetting up cocos2d - x . . . ' ) <nl> <nl> def set_environment_variables ( self , ndk_root , android_sdk_root , ant_root ) : <nl> else : <nl> print ( ' - > Configuration for Android platform only , you can also skip and manually edit " % s " \ n ' % <nl> self . file_used_for_setup ) <nl> - <nl> - ndk_ret = self . set_variable ( NDK_ROOT , ndk_root ) <nl> - sdk_ret = self . set_variable ( ANDROID_SDK_ROOT , android_sdk_root ) <nl> - ant_ret = self . set_variable ( ANT_ROOT , ant_root ) <nl> + if ( quiet ) : <nl> + ndk_ret = self . set_variable ( NDK_ROOT , ndk_root ) <nl> + sdk_ret = self . set_variable ( ANDROID_SDK_ROOT , android_sdk_root ) <nl> + ant_ret = self . set_variable ( ANT_ROOT , ant_root ) <nl> <nl> # tip the backup file <nl> if ( self . backup_file is not None ) and ( os . path . exists ( self . backup_file ) ) : <nl> def set_environment_variables ( self , ndk_root , android_sdk_root , ant_root ) : <nl> dest = ' android_sdk_root ' , help = ' directory of android sdk root ' ) <nl> parser . add_option ( <nl> ' - t ' , ' - - antroot ' , dest = ' ant_root ' , help = ' directory that contains ant / ant . bat ' ) <nl> + <nl> + parser . add_option ( <nl> + ' - q ' , ' - - quiet ' , dest = ' quiet ' , action = " store_false " , default = True , help = ' setup without setting NDK , SDK , ANT ' ) <nl> opts , args = parser . parse_args ( ) <nl> <nl> # set environment variables <nl> env = SetEnvVar ( ) <nl> env . set_environment_variables ( <nl> - opts . ndk_root , opts . android_sdk_root , opts . ant_root ) <nl> + opts . ndk_root , opts . android_sdk_root , opts . ant_root , opts . quiet ) <nl> <nl> if env . _isWindows ( ) : <nl> import ctypes <nl>
|
Merge pull request from GuoLunHao / v3 . 10_setup
|
cocos2d/cocos2d-x
|
ee75603e74f1b7a29966062cbfdf009d73deef46
|
2015-12-12T15:45:21Z
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.