diff
stringlengths
41
2.03M
msg
stringlengths
1
1.5k
repo
stringlengths
5
40
sha
stringlengths
40
40
time
stringlengths
20
20
mmm a / tensorflow / contrib / quantize / python / quant_ops . py <nl> ppp b / tensorflow / contrib / quantize / python / quant_ops . py <nl> def LastValueQuantize ( inputs , <nl> a tensor containing quantized values . <nl> " " " <nl> with variable_scope . variable_scope ( <nl> - None , default_name = name_prefix , values = [ inputs ] , reuse = reuse ) : <nl> + None , default_name = name_prefix , values = [ inputs ] , reuse = reuse ) as scope : <nl> + scope . set_partitioner ( None ) <nl> input_shape = inputs . get_shape ( ) <nl> input_dim = len ( input_shape ) <nl> if per_channel : <nl> def MovingAvgQuantize ( inputs , <nl> a tensor containing quantized values . <nl> " " " <nl> with variable_scope . variable_scope ( <nl> - None , default_name = name_prefix , values = [ inputs ] , reuse = reuse ) : <nl> + None , default_name = name_prefix , values = [ inputs ] , reuse = reuse ) as scope : <nl> + scope . set_partitioner ( None ) <nl> input_shape = inputs . get_shape ( ) <nl> input_dim = len ( input_shape ) <nl> if per_channel : <nl> mmm a / tensorflow / contrib / quantize / python / quant_ops_test . py <nl> ppp b / tensorflow / contrib / quantize / python / quant_ops_test . py <nl> <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . ops import array_ops <nl> + from tensorflow . python . ops import partitioned_variables <nl> + from tensorflow . python . ops import variable_scope <nl> from tensorflow . python . ops import variables <nl> from tensorflow . python . platform import googletest <nl> <nl> def testMovingAvgQuantizeTrainingAssign ( self ) : <nl> self . assertGreater ( max_value , 0 . 0 ) <nl> self . assertLess ( max_value , 1 . 0 ) <nl> <nl> + def testVariablesNotParitioned_LastValue ( self ) : <nl> + # Variables added should not use a default partiioner since they are <nl> + # scalar . There would be a tensorflow error thrown if the partitioner was <nl> + # respected by the rewrite . <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + with variable_scope . variable_scope ( <nl> + ' part ' , partitioner = partitioned_variables . fixed_size_partitioner ( 2 ) ) : <nl> + x = array_ops . placeholder ( dtypes . float32 , shape = [ 2 ] ) <nl> + _ = quant_ops . LastValueQuantize ( <nl> + x , <nl> + init_min = 0 . 0 , <nl> + init_max = 0 . 0 , <nl> + is_training = True , <nl> + vars_collection = _MIN_MAX_VARS ) <nl> + <nl> + def testVariablesNotParitioned_MovingAvg ( self ) : <nl> + # Variables added should not use a default partiioner since they are <nl> + # scalar . There would be a tensorflow error thrown if the partitioner was <nl> + # respected by the rewrite . <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + with variable_scope . variable_scope ( <nl> + ' part ' , partitioner = partitioned_variables . fixed_size_partitioner ( 2 ) ) : <nl> + x = array_ops . placeholder ( dtypes . float32 , shape = [ 2 ] ) <nl> + _ = quant_ops . MovingAvgQuantize ( <nl> + x , <nl> + init_min = 0 . 0 , <nl> + init_max = 0 . 0 , <nl> + is_training = True , <nl> + vars_collection = _MIN_MAX_VARS ) <nl> + <nl> def _GetMinMaxValues ( self , sess ) : <nl> min_max_vars = ops . get_collection ( _MIN_MAX_VARS ) <nl> self . assertEqual ( len ( min_max_vars ) , 2 ) <nl>
Make sure that variables aren ' t created as partition variables since only non - scalar partition variables are supported .
tensorflow/tensorflow
5334631d7650d2212926fae661c2d0f8b9e7b358
2018-05-14T23:20:31Z
mmm a / arangod / Views / LoggerView . cpp <nl> ppp b / arangod / Views / LoggerView . cpp <nl> static std : : string LevelEnumToString ( LogLevel level ) { <nl> } <nl> } <nl> <nl> + # define VIEW_LOG_TOPIC ( a ) \ <nl> + ! arangodb : : Logger : : isEnabled ( static_cast < arangodb : : LogLevel > ( a ) , \ <nl> + Logger : : VIEWS ) \ <nl> + ? ( void ) 0 \ <nl> + : arangodb : : LogVoidify ( ) & ( arangodb : : LoggerStream ( ) \ <nl> + < < static_cast < arangodb : : LogLevel > ( a ) \ <nl> + < < Logger : : VIEWS \ <nl> + < < arangodb : : Logger : : LINE ( __LINE__ ) \ <nl> + < < arangodb : : Logger : : FILE ( __FILE__ ) \ <nl> + < < arangodb : : Logger : : FUNCTION ( __FUNCTION__ ) ) <nl> + <nl> std : : string LoggerView : : type ( " logger " ) ; <nl> <nl> std : : unique_ptr < ViewImplementation > LoggerView : : creator ( <nl> LogicalView * view , arangodb : : velocypack : : Slice const & info , bool isNew ) { <nl> - LOG_TOPIC ( INFO , Logger : : FIXME ) <nl> + LOG_TOPIC ( TRACE , Logger : : VIEWS ) <nl> < < " called LoggerView : : creator with data : " < < info . toJson ( ) <nl> < < " , isNew : " < < isNew ; <nl> <nl> LoggerView : : LoggerView ( ConstructionGuard const & , LogicalView * logical , <nl> <nl> arangodb : : Result LoggerView : : updateProperties ( <nl> arangodb : : velocypack : : Slice const & slice , bool partialUpdate , bool doSync ) { <nl> - LOG_TOPIC ( INFO , Logger : : FIXME ) <nl> + VIEW_LOG_TOPIC ( _level ) <nl> < < " called LoggerView : : updateProperties with data " < < slice . toJson ( ) <nl> < < " . view data : " <nl> < < _logicalView - > toVelocyPack ( true , false ) . slice ( ) . toJson ( ) ; <nl> arangodb : : Result LoggerView : : updateProperties ( <nl> <nl> / / / @ brief export properties <nl> void LoggerView : : getPropertiesVPack ( velocypack : : Builder & builder ) const { <nl> - LOG_TOPIC ( INFO , Logger : : FIXME ) < < " called LoggerView : : getPropertiesVPack " ; <nl> + VIEW_LOG_TOPIC ( _level ) < < " called LoggerView : : getPropertiesVPack " ; <nl> <nl> TRI_ASSERT ( builder . isOpenObject ( ) ) ; <nl> builder . add ( " level " , VPackValue ( LevelEnumToString ( _level ) ) ) ; <nl> void LoggerView : : getPropertiesVPack ( velocypack : : Builder & builder ) const { <nl> <nl> / / / @ brief opens an existing view <nl> void LoggerView : : open ( ) { <nl> - LOG_TOPIC ( INFO , Logger : : FIXME ) <nl> + VIEW_LOG_TOPIC ( _level ) <nl> < < " called LoggerView : : open . view data : " <nl> < < _logicalView - > toVelocyPack ( true , false ) . slice ( ) . toJson ( ) ; <nl> } <nl> <nl> void LoggerView : : drop ( ) { <nl> - LOG_TOPIC ( INFO , Logger : : FIXME ) <nl> + VIEW_LOG_TOPIC ( _level ) <nl> < < " called LoggerView : : drop . view data : " <nl> < < _logicalView - > toVelocyPack ( true , false ) . slice ( ) . toJson ( ) ; <nl> } <nl> mmm a / lib / Logger / LogTopic . cpp <nl> ppp b / lib / Logger / LogTopic . cpp <nl> LogTopic Logger : : SYSCALL ( " syscall " , LogLevel : : INFO ) ; <nl> LogTopic Logger : : THREADS ( " threads " , LogLevel : : WARN ) ; <nl> LogTopic Logger : : TRANSACTIONS ( " trx " , LogLevel : : WARN ) ; <nl> LogTopic Logger : : V8 ( " v8 " , LogLevel : : WARN ) ; <nl> + LogTopic Logger : : VIEWS ( " views " , LogLevel : : FATAL ) ; <nl> <nl> # ifdef USE_ENTERPRISE <nl> LogTopic AuditFeature : : AUDIT_AUTHENTICATION ( " audit - authentication " , LogLevel : : INFO ) ; <nl> LogTopic * LogTopic : : lookup ( std : : string const & name ) { <nl> <nl> return it - > second ; <nl> } <nl> - <nl> + <nl> std : : string LogTopic : : lookup ( size_t topicId ) { <nl> MUTEX_LOCKER ( guard , _namesLock ) ; <nl> <nl> mmm a / lib / Logger / Logger . h <nl> ppp b / lib / Logger / Logger . h <nl> class Logger { <nl> static LogTopic THREADS ; <nl> static LogTopic TRANSACTIONS ; <nl> static LogTopic V8 ; <nl> + static LogTopic VIEWS ; <nl> <nl> public : <nl> struct FIXED { <nl> class Logger { <nl> double _value ; <nl> int _precision ; <nl> } ; <nl> - <nl> + <nl> struct BINARY { <nl> BINARY ( void const * baseAddress , size_t size ) <nl> : baseAddress ( baseAddress ) , size ( size ) { } <nl> - explicit BINARY ( std : : string const & data ) : BINARY ( data . c_str ( ) , data . size ( ) ) { } <nl> + explicit BINARY ( std : : string const & data ) : BINARY ( data . c_str ( ) , data . size ( ) ) { } <nl> void const * baseAddress ; <nl> size_t size ; <nl> } ; <nl>
Made LoggerView log to VIEWS topic and fixed level bug .
arangodb/arangodb
5f87674b6fce1c0acb3b28c58ece5203687788c3
2017-05-28T22:24:51Z
mmm a / lib / Sema / TypeCheckConstraints . cpp <nl> ppp b / lib / Sema / TypeCheckConstraints . cpp <nl> bool ConstraintSystem : : simplify ( ) { <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - = = = / / <nl> # pragma mark Constraint solving <nl> <nl> + / / / \ brief Resolve an overload set in the given constraint system by <nl> + / / / producing a set of child constraint systems , each of which picks a specific <nl> + / / / overload from that set . Those child constraint systems that do not fail <nl> + / / / during simplification will be added to the stack of constraint systems <nl> + / / / being considered . <nl> + static void resolveOverloadSet ( ConstraintSystem & cs , <nl> + OverloadSet * ovl , <nl> + SmallVectorImpl < ConstraintSystem * > & stack ) { <nl> + auto choices = ovl - > getChoices ( ) ; <nl> + for ( unsigned i = 0 , n = choices . size ( ) ; i ! = n ; + + i ) { <nl> + auto idx = n - i - 1 ; <nl> + auto & choice = ovl - > getChoices ( ) [ idx ] ; <nl> + auto childCS = cs . createDerivedConstraintSystem ( idx ) ; <nl> + <nl> + / / Bind the overload set ' s type to the type of a reference to the <nl> + / / specific declaration choice . <nl> + Type refType ; <nl> + if ( choice . getBaseType ( ) ) <nl> + refType = childCS - > getTypeOfMemberReference ( choice . getBaseType ( ) , <nl> + choice . getDecl ( ) , <nl> + / * FIXME : * / false ) ; <nl> + else <nl> + refType = childCS - > getTypeOfReference ( choice . getDecl ( ) ) ; <nl> + childCS - > addConstraint ( ConstraintKind : : Equal , ovl - > getBoundType ( ) , <nl> + refType ) ; <nl> + <nl> + / / Simplify the child system . Assuming it ' s still valid , add it to <nl> + / / the stack to be dealt with later . <nl> + / / FIXME : If it ' s not still valid , keep it around for diagnostics . <nl> + if ( ! childCS - > simplify ( ) ) <nl> + stack . push_back ( childCS ) ; <nl> + } <nl> + } <nl> + <nl> bool ConstraintSystem : : solve ( SmallVectorImpl < ConstraintSystem * > & viable ) { <nl> assert ( & getTopConstraintSystem ( ) = = this & & " Can only solve at the top level " ) ; <nl> <nl> bool ConstraintSystem : : solve ( SmallVectorImpl < ConstraintSystem * > & viable ) { <nl> / / If there are any unresolved overload sets , create child systems in <nl> / / which we resolve the overload set to each option . <nl> if ( ! cs - > UnresolvedOverloadSets . empty ( ) ) { <nl> - auto ovl = cs - > UnresolvedOverloadSets . front ( ) ; <nl> - auto choices = ovl - > getChoices ( ) ; <nl> - for ( unsigned i = 0 , n = choices . size ( ) ; i ! = n ; + + i ) { <nl> - auto idx = n - i - 1 ; <nl> - auto & choice = ovl - > getChoices ( ) [ idx ] ; <nl> - auto childCS = cs - > createDerivedConstraintSystem ( idx ) ; <nl> - <nl> - / / Bind the overload set ' s type to the type of a reference to the <nl> - / / specific declaration choice . <nl> - Type refType ; <nl> - if ( choice . getBaseType ( ) ) <nl> - refType = childCS - > getTypeOfMemberReference ( choice . getBaseType ( ) , <nl> - choice . getDecl ( ) , <nl> - / * FIXME : * / false ) ; <nl> - else <nl> - refType = childCS - > getTypeOfReference ( choice . getDecl ( ) ) ; <nl> - childCS - > addConstraint ( ConstraintKind : : Equal , ovl - > getBoundType ( ) , <nl> - refType ) ; <nl> - <nl> - / / Simplify the child system . Assuming it ' s still valid , add it to <nl> - / / the stack to be dealt with later . <nl> - / / FIXME : If it ' s not still valid , keep it around for diagnostics . <nl> - if ( ! childCS - > simplify ( ) ) <nl> - stack . push_back ( childCS ) ; <nl> - } <nl> + resolveOverloadSet ( * cs , cs - > UnresolvedOverloadSets . front ( ) , stack ) ; <nl> continue ; <nl> } <nl> <nl>
Factor out the resolution of an overload set ( which builds child
apple/swift
a14e8ca992ed5115e159a704b4f2b1d589ceae6e
2012-08-21T17:36:02Z
mmm a / utils / release / push_packages <nl> ppp b / utils / release / push_packages <nl> def clear_old_incoming_packages ( ssh_connection , user ) : <nl> <nl> def _get_incoming_path ( repo_url , user = None , pkg_type = None , release_type = None ) : <nl> if repo_url = = ' repo . mirror . yandex . net ' : <nl> - if pkg_type ! = ' tgz ' : <nl> - return " / home / { user } / incoming / clickhouse / { pkg } / { release_type } " . format ( <nl> - user = user , pkg = pkg_type , release_type = release_type ) <nl> - else : <nl> - return " / home / { user } / incoming / clickhouse / { pkg } " . format ( <nl> - user = user , pkg = pkg_type ) <nl> - <nl> + return " / home / { user } / incoming / clickhouse / { pkg } / { release_type } " . format ( <nl> + user = user , pkg = pkg_type , release_type = release_type ) <nl> else : <nl> return " / repo / { 0 } / mini - dinstall / incoming / " . format ( repo_url . split ( ' . ' ) [ 0 ] ) <nl> <nl>
Remove another if
ClickHouse/ClickHouse
12e70d08f65834fab6b7bc72314707b192d5c8aa
2020-01-28T08:58:11Z
mmm a / tensorflow / compiler / tf2xla / kernels / mirror_pad_op . cc <nl> ppp b / tensorflow / compiler / tf2xla / kernels / mirror_pad_op . cc <nl> class MirrorPadOp : public XlaOpKernel { <nl> <nl> xla : : StatusOr < xla : : XlaOp > DoMirrorPad ( const xla : : XlaOp & t , <nl> const xla : : Shape & original_shape , <nl> - const xla : : LiteralSlice & pad_literal , <nl> + const xla : : Literal & pad_literal , <nl> xla : : XlaBuilder * b ) { <nl> xla : : XlaOp accum = t ; <nl> for ( int64 dimno = xla : : ShapeUtil : : Rank ( original_shape ) - 1 ; dimno > = 0 ; <nl> mmm a / tensorflow / compiler / tf2xla / kernels / pad_op . cc <nl> ppp b / tensorflow / compiler / tf2xla / kernels / pad_op . cc <nl> class PadOp : public XlaOpKernel { <nl> int before = pad_literal . Get < int32 > ( { i , 0 } ) ; <nl> int after = pad_literal . Get < int32 > ( { i , 1 } ) ; <nl> OP_REQUIRES ( ctx , before > = 0 & & after > = 0 , <nl> - errors : : InvalidArgument ( <nl> - " Paddings must be non - negative : " , before , " " , after ) ) ; <nl> + errors : : InvalidArgument ( " Paddings must be non - negative : " , <nl> + before , " " , after ) ) ; <nl> dim - > set_edge_padding_low ( before ) ; <nl> dim - > set_edge_padding_high ( after ) ; <nl> } <nl> mmm a / tensorflow / compiler / tf2xla / kernels / reduction_ops_common . cc <nl> ppp b / tensorflow / compiler / tf2xla / kernels / reduction_ops_common . cc <nl> void XlaReductionOp : : Compile ( XlaOpKernelContext * ctx ) { <nl> <nl> / / Evaluate the constant , reshaping to a 1 - vector if it is a scalar . <nl> xla : : Literal axes_literal ; <nl> - OP_REQUIRES_OK ( <nl> - ctx , ctx - > ConstantInputReshaped ( 1 , { axes_tensor_shape . num_elements ( ) } , <nl> - & axes_literal ) ) ; <nl> + OP_REQUIRES_OK ( ctx , <nl> + ctx - > ConstantInputReshaped ( <nl> + 1 , { axes_tensor_shape . num_elements ( ) } , & axes_literal ) ) ; <nl> <nl> VLOG ( 1 ) < < " data shape : " < < data_shape . DebugString ( ) ; <nl> VLOG ( 1 ) < < " axes : " < < axes_literal . ToString ( ) ; <nl> mmm a / tensorflow / compiler / tf2xla / kernels / sequence_ops . cc <nl> ppp b / tensorflow / compiler / tf2xla / kernels / sequence_ops . cc <nl> Status GetIntValue ( int index , XlaOpKernelContext * ctx , int64 * value ) { <nl> <nl> / / The type - specific part of the implementation of Range . <nl> template < typename T > <nl> - Status CreateRangeTensor ( const xla : : LiteralSlice & start_literal , <nl> - const xla : : LiteralSlice & limit_literal , <nl> - const xla : : LiteralSlice & delta_literal , <nl> - Tensor * output ) { <nl> + Status CreateRangeTensor ( const xla : : Literal & start_literal , <nl> + const xla : : Literal & limit_literal , <nl> + const xla : : Literal & delta_literal , Tensor * output ) { <nl> T start = start_literal . Get < T > ( { } ) ; <nl> T limit = limit_literal . Get < T > ( { } ) ; <nl> T delta = delta_literal . Get < T > ( { } ) ; <nl> Status CreateRangeTensor ( const xla : : LiteralSlice & start_literal , <nl> } <nl> if ( delta > 0 ) { <nl> if ( start > limit ) { <nl> - return errors : : InvalidArgument ( <nl> - " Requires start < = limit when delta > 0 : " , start , " / " , limit ) ; <nl> + return errors : : InvalidArgument ( " Requires start < = limit when delta > 0 : " , <nl> + start , " / " , limit ) ; <nl> } <nl> } else { <nl> if ( start < limit ) { <nl> - return errors : : InvalidArgument ( <nl> - " Requires start > = limit when delta < 0 : " , start , " / " , limit ) ; <nl> + return errors : : InvalidArgument ( " Requires start > = limit when delta < 0 : " , <nl> + start , " / " , limit ) ; <nl> } <nl> } <nl> int64 size = <nl> mmm a / tensorflow / compiler / tf2xla / kernels / split_op . cc <nl> ppp b / tensorflow / compiler / tf2xla / kernels / split_op . cc <nl> class SplitVOp : public XlaOpKernel { <nl> errors : : InvalidArgument ( <nl> " Number of ways to split should be > 0 , but got " , num_split ) ) ; <nl> <nl> - / / Check that sizes are correct . <nl> + / / check that sizes are correct <nl> int total_split_size = 0 ; <nl> int neg_one_dim = - 1 ; <nl> std : : vector < int64 > split_sizes_vec ( num_split , - 1 ) ; <nl> class SplitVOp : public XlaOpKernel { <nl> " number of elements as the output . Got " , <nl> split_size_shape . dims ( ) , " - D and " , <nl> split_size_shape . num_elements ( ) , " elements " ) ) ; <nl> - / / Get the dimension of this split . <nl> + / / get the dimension of this split <nl> xla : : Literal split_size_literal ; <nl> OP_REQUIRES_OK ( ctx , ctx - > ConstantInput ( 1 , & split_size_literal ) ) ; <nl> <nl> mmm a / tensorflow / compiler / tf2xla / literal_util . cc <nl> ppp b / tensorflow / compiler / tf2xla / literal_util . cc <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> <nl> + Status HostTensorToLiteral ( const Tensor & host_tensor , xla : : Literal * literal ) { <nl> + xla : : Shape literal_shape ; <nl> + TF_RETURN_IF_ERROR ( TensorShapeToXLAShape ( <nl> + host_tensor . dtype ( ) , host_tensor . shape ( ) , & literal_shape ) ) ; <nl> + <nl> + * literal = xla : : Literal ( literal_shape ) ; <nl> + <nl> + / / memcpy over the payload . . . <nl> + / / TODO ( phawkins ) : handle string types . <nl> + size_t total_bytes = host_tensor . TotalBytes ( ) ; <nl> + if ( total_bytes > 0 ) { <nl> + void * dst_ptr = literal - > untyped_data ( ) ; <nl> + const void * src_ptr = DMAHelper : : base ( & host_tensor ) ; <nl> + memcpy ( dst_ptr , src_ptr , total_bytes ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> Status HostTensorToBorrowingLiteral ( const Tensor & host_tensor , <nl> xla : : BorrowingLiteral * literal ) { <nl> xla : : Shape xla_shape ; <nl> mmm a / tensorflow / compiler / tf2xla / literal_util . h <nl> ppp b / tensorflow / compiler / tf2xla / literal_util . h <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> <nl> + / / Copies ' host_tensor ' to an XLA Literal . Fails if host_tensor is of an <nl> + / / unsupported type . <nl> + Status HostTensorToLiteral ( const Tensor & host_tensor , xla : : Literal * literal ) ; <nl> + <nl> / / Returns a BorrowingLiteral that utilizes the same underlying buffer owned by <nl> / / ' host_tensor ' . <nl> Status HostTensorToBorrowingLiteral ( const Tensor & host_tensor , <nl> mmm a / tensorflow / compiler / tf2xla / xla_context . cc <nl> ppp b / tensorflow / compiler / tf2xla / xla_context . cc <nl> void XlaContext : : AddRetval ( int retval_index , DataType type , <nl> } <nl> <nl> Status XlaContext : : AddConstRetval ( int retval_index , DataType dtype , <nl> - const xla : : LiteralSlice & literal ) { <nl> + const xla : : Literal & literal ) { <nl> VLOG ( 1 ) < < " Adding retval index " < < retval_index <nl> < < " with non - data - dependent tensor to XLA computation " ; <nl> if ( retvals_ . size ( ) < = retval_index ) { <nl> mmm a / tensorflow / compiler / tf2xla / xla_context . h <nl> ppp b / tensorflow / compiler / tf2xla / xla_context . h <nl> class XlaContext : public ResourceBase { <nl> <nl> / / As for Retval , but for return values that are compile - time constants . <nl> Status AddConstRetval ( int retval_index , DataType dtype , <nl> - const xla : : LiteralSlice & literal ) ; <nl> + const xla : : Literal & literal ) ; <nl> <nl> / / Creates a resource with resource ` kind ` and initial value ` handle ` . ` name ` <nl> / / is a descriptive name for use in error messages . See the ` XlaResource ` <nl> mmm a / tensorflow / compiler / tf2xla / xla_helpers . cc <nl> ppp b / tensorflow / compiler / tf2xla / xla_helpers . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / tf2xla / xla_op_kernel . h " <nl> # include " tensorflow / compiler / xla / client / xla_client / xla_builder . h " <nl> # include " tensorflow / compiler / xla / types . h " <nl> + # include " tensorflow / core / common_runtime / dma_helper . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> # include " tensorflow / core / lib / gtl / array_slice . h " <nl> Status XlaHelpers : : OneHot ( xla : : XlaBuilder * builder , int64 depth , int axis , <nl> return errors : : InvalidArgument ( " Invalid argument type " , <nl> DataTypeString ( index_type ) ) ; <nl> } <nl> - <nl> xla : : BorrowingLiteral linspace_literal ; <nl> TF_RETURN_IF_ERROR ( HostTensorToBorrowingLiteral ( linspace , & linspace_literal ) ) ; <nl> <nl> mmm a / tensorflow / compiler / tf2xla / xla_op_kernel . cc <nl> ppp b / tensorflow / compiler / tf2xla / xla_op_kernel . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / tf2xla / literal_util . h " <nl> # include " tensorflow / compiler / tf2xla / shape_util . h " <nl> # include " tensorflow / compiler / tf2xla / xla_context . h " <nl> - # include " tensorflow / core / common_runtime / dma_helper . h " <nl> <nl> namespace tensorflow { <nl> <nl> Status XlaOpKernelContext : : ConstantInputReshaped ( <nl> } <nl> const XlaExpression * expression = CastExpressionFromTensor ( tensor ) ; <nl> <nl> - auto copy_tensor_to_literal = [ ] ( const Tensor & tensor , <nl> - xla : : Literal * literal ) { <nl> - xla : : Shape literal_shape ; <nl> - TF_RETURN_IF_ERROR ( <nl> - TensorShapeToXLAShape ( tensor . dtype ( ) , tensor . shape ( ) , & literal_shape ) ) ; <nl> - <nl> - * literal = xla : : Literal ( literal_shape ) ; <nl> - <nl> - / / memcpy over the payload . . . <nl> - / / TODO ( phawkins ) : handle string types . <nl> - size_t total_bytes = tensor . TotalBytes ( ) ; <nl> - if ( total_bytes > 0 ) { <nl> - void * dst_ptr = literal - > untyped_data ( ) ; <nl> - const void * src_ptr = DMAHelper : : base ( & tensor ) ; <nl> - memcpy ( dst_ptr , src_ptr , total_bytes ) ; <nl> - } <nl> - return Status : : OK ( ) ; <nl> - } ; <nl> - <nl> / / If the tensor has a known constant value , there is no need to invoke XLA . <nl> if ( expression - > has_constant_value ( ) ) { <nl> Tensor temp ( tensor . dtype ( ) ) ; <nl> Status XlaOpKernelContext : : ConstantInputReshaped ( <nl> / / with the enclosing Tensor . <nl> return errors : : Internal ( " Incompatible shapes in ConstantInputReshaped . " ) ; <nl> } <nl> - <nl> - return copy_tensor_to_literal ( temp , constant_literal ) ; <nl> + return HostTensorToLiteral ( temp , constant_literal ) ; <nl> } <nl> <nl> / / Make sure we treat zero - element tensors as constant . <nl> if ( new_shape . num_elements ( ) = = 0 ) { <nl> Tensor temp ( tensor . dtype ( ) , new_shape ) ; <nl> - <nl> - return copy_tensor_to_literal ( temp , constant_literal ) ; <nl> + return HostTensorToLiteral ( temp , constant_literal ) ; <nl> } <nl> <nl> xla : : XlaOp handle = expression - > handle ( ) ; <nl> Status XlaOpKernelContext : : ConstantInputReshaped ( <nl> } <nl> <nl> / / Converts an int32 or int64 scalar literal to an int64 . <nl> - static Status LiteralToInt64Scalar ( const xla : : LiteralSlice & literal , <nl> - int64 * out ) { <nl> + static Status LiteralToInt64Scalar ( const xla : : Literal & literal , int64 * out ) { <nl> if ( xla : : ShapeUtil : : Rank ( literal . shape ( ) ) ! = 0 ) { <nl> return errors : : InvalidArgument ( " value is not a scalar " ) ; <nl> } <nl> static Status LiteralToInt64Scalar ( const xla : : LiteralSlice & literal , <nl> } <nl> <nl> / / Converts an float32 or float64 scalar literal to a float64 . <nl> - static Status LiteralToFloat64Scalar ( const xla : : LiteralSlice & literal , <nl> - double * out ) { <nl> + static Status LiteralToFloat64Scalar ( const xla : : Literal & literal , double * out ) { <nl> if ( xla : : ShapeUtil : : Rank ( literal . shape ( ) ) ! = 0 ) { <nl> return errors : : InvalidArgument ( " value is not a scalar " ) ; <nl> } <nl> Status XlaOpKernelContext : : ConstantInputAsFloatScalar ( int index , double * out ) { <nl> } <nl> <nl> / / Converts an int32 or int64 1D literal to an int64 vector . <nl> - static Status LiteralToInt64Vector ( const xla : : LiteralSlice & literal , <nl> + static Status LiteralToInt64Vector ( const xla : : Literal & literal , <nl> std : : vector < int64 > * out ) { <nl> if ( xla : : ShapeUtil : : Rank ( literal . shape ( ) ) ! = 1 ) { <nl> return errors : : InvalidArgument ( " value is not 1D " ) ; <nl> void XlaOpKernelContext : : SetOutput ( int index , const xla : : XlaOp & handle ) { <nl> void XlaOpKernelContext : : SetConstantOutput ( int index , const Tensor & constant ) { <nl> const TensorShape & shape = constant . shape ( ) ; <nl> <nl> - xla : : BorrowingLiteral literal ; <nl> - OP_REQUIRES_OK ( context_ , HostTensorToBorrowingLiteral ( constant , & literal ) ) ; <nl> - <nl> + xla : : Literal literal ; <nl> + OP_REQUIRES_OK ( context_ , HostTensorToLiteral ( constant , & literal ) ) ; <nl> xla : : XlaOp handle = builder ( ) - > ConstantLiteral ( literal ) ; <nl> CHECK_NE ( handle . builder ( ) , nullptr ) ; <nl> <nl> mmm a / tensorflow / compiler / xla / literal_util . cc <nl> ppp b / tensorflow / compiler / xla / literal_util . cc <nl> LiteralSlice : : LiteralSlice ( const LiteralBase & literal , <nl> BorrowingLiteral : : BorrowingLiteral ( const char * src_buf_ptr , const Shape & shape ) <nl> : LiteralBase ( ) , shape_ ( MakeUnique < Shape > ( shape ) ) { <nl> CHECK ( ShapeUtil : : IsArray ( * shape_ ) ) ; <nl> + CHECK_NE ( src_buf_ptr , nullptr ) ; <nl> CHECK ( LayoutUtil : : HasLayout ( * shape_ ) ) ; <nl> <nl> root_piece_ = Piece ( ) ; <nl>
Automated g4 rollback of changelist 200750664
tensorflow/tensorflow
33f8f7e1843c750186c8fbcfbf94f286bb7ca505
2018-06-15T21:52:33Z
mmm a / src / Functions / CMakeLists . txt <nl> ppp b / src / Functions / CMakeLists . txt <nl> option ( STRIP_DEBUG_SYMBOLS_FUNCTIONS <nl> Provides faster linking and lower binary size . <nl> Tradeoff is the inability to debug some source files with e . g . gdb <nl> ( empty stack frames and no local variables ) . " <nl> - STRIP_DSF_DEFAULT ) <nl> + $ { STRIP_DSF_DEFAULT } ) <nl> <nl> if ( STRIP_DEBUG_SYMBOLS_FUNCTIONS ) <nl> message ( WARNING " Not generating debugger info for ClickHouse functions " ) <nl>
fix : default value
ClickHouse/ClickHouse
e0081980230e636545a43a67238dc25b44f7777c
2020-09-10T12:44:49Z
mmm a / xbmc / utils / test / TestCharsetConverter . cpp <nl> ppp b / xbmc / utils / test / TestCharsetConverter . cpp <nl> TEST_F ( TestCharsetConverter , isValidUtf8_4 ) <nl> TEST_F ( TestCharsetConverter , wToUTF8 ) <nl> { <nl> refstrw1 = L " test _ wToUTF8 " ; <nl> - refstra1 = " test _ wToUTF8 " ; <nl> + refstra1 = u8 " test _ wToUTF8 " ; <nl> varstra1 . clear ( ) ; <nl> g_charsetConverter . wToUTF8 ( refstrw1 , varstra1 ) ; <nl> EXPECT_STREQ ( refstra1 . c_str ( ) , varstra1 . c_str ( ) ) ; <nl>
Merge pull request from Paxxi / fix_tests
xbmc/xbmc
13c805e5bd361ce63efe97b2d15fbad0f2a874ce
2016-07-16T18:16:27Z
mmm a / tensorflow / compiler / xla / pjrt / pjrt_client . cc <nl> ppp b / tensorflow / compiler / xla / pjrt / pjrt_client . cc <nl> PjRtBuffer : : GetBufferForHoldLocked ( ScopedHold : : Type type ) { <nl> / / acquiring any other kind of hold . <nl> WaitForOutstandingDonationHold ( ) ; <nl> if ( device_buffer_ = = nullptr ) { <nl> - return InvalidArgument ( " Hold requested on invalid buffer " ) ; <nl> + return InvalidArgument ( " Hold requested on deleted or donated buffer " ) ; <nl> } else { <nl> + + holds_ [ type ] ; <nl> } <nl> PjRtBuffer : : CopyToHostAsyncInternal ( bool discard_cached_copy , <nl> / / We can ' t perform any other action while a donation hold is in progress . <nl> WaitForOutstandingDonationHold ( ) ; <nl> if ( device_buffer_ = = nullptr ) { <nl> - return InvalidArgument ( " CopyToHostAsync ( ) called on invalid buffer . " ) ; <nl> + return InvalidArgument ( <nl> + " CopyToHostAsync ( ) called on deleted or donated buffer " ) ; <nl> } <nl> if ( discard_cached_copy ) { <nl> auto it = host_values_ . find ( host_layout ) ; <nl> StatusOr < std : : shared_ptr < Literal > > PjRtBuffer : : ToLiteral ( <nl> TF_ASSIGN_OR_RETURN ( std : : shared_ptr < HostValue > host_value , <nl> CopyToHostAsyncInternal ( discard_cached_copy , layout ) ) ; <nl> if ( host_value = = nullptr ) { <nl> - return InvalidArgument ( " ToLiteral called on invalid buffer " ) ; <nl> + return InvalidArgument ( " ToLiteral called on deleted or donated buffer " ) ; <nl> } <nl> host_value - > ready . WaitForNotification ( ) ; <nl> TF_RETURN_IF_ERROR ( host_value - > status ) ; <nl> StatusOr < std : : unique_ptr < PjRtBuffer > > PjRtBuffer : : CopyToDevice ( <nl> / / We can ' t perform any other action while a donation hold is in progress . <nl> WaitForOutstandingDonationHold ( ) ; <nl> if ( device_buffer_ = = nullptr ) { <nl> - return InvalidArgument ( " CopyToDevice called on invalid buffer " ) ; <nl> + return InvalidArgument ( <nl> + " CopyToDevice called on deleted or donated buffer " ) ; <nl> } <nl> AcquireHoldLocked ( & src_device_buffer ) ; <nl> } <nl> Status PjRtBuffer : : BlockHostUntilReady ( ) { <nl> { <nl> absl : : MutexLock lock ( & mu_ ) ; <nl> if ( device_buffer_ = = nullptr ) { <nl> - return InvalidArgument ( " BlockHostUntilReady ( ) called on invalid buffer . " ) ; <nl> + return InvalidArgument ( <nl> + " BlockHostUntilReady ( ) called on deleted or donated buffer " ) ; <nl> } <nl> device_buffer = device_buffer_ ; <nl> } <nl>
Minor improvement , saying why the object is invalid .
tensorflow/tensorflow
d201be6284693c9ac5b93bdccfeeac2524d05239
2020-08-03T08:00:57Z
mmm a / samples / opengl / CMakeLists . txt <nl> ppp b / samples / opengl / CMakeLists . txt <nl> <nl> + if ( APPLE ) <nl> + return ( ) <nl> + endif ( ) <nl> + <nl> if ( UNIX ) <nl> find_package ( X11 QUIET ) <nl> if ( NOT X11_FOUND ) <nl> if ( UNIX ) <nl> set ( SAMPLE_LINKER_DEPS " $ { X11_LIBRARIES } " ) <nl> endif ( ) <nl> <nl> + <nl> + <nl> SET ( OPENCV_OPENGL_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_imgcodecs opencv_videoio opencv_highgui ) <nl> <nl> ocv_check_dependencies ( $ { OPENCV_OPENGL_SAMPLES_REQUIRED_DEPS } ) <nl>
Merge pull request from eruffaldi : macos_build_opengl_qt5_interop2
opencv/opencv
b67c64e2c87ff0f2c0dbb1d267a6eaa3c2e75152
2017-08-23T04:13:50Z
mmm a / tensorflow / python / framework / importer . py <nl> ppp b / tensorflow / python / framework / importer . py <nl> def _PopulateTFImportGraphDefOptions ( options , prefix , input_map , <nl> <nl> def _ProcessNewOps ( graph ) : <nl> " " " Processes the newly - added TF_Operations in ` graph ` . " " " <nl> - for c_op in c_api_util . new_tf_operations ( graph ) : <nl> - graph . _create_op_from_tf_operation ( c_op ) # pylint : disable = protected - access <nl> + # Maps from a node to the names of the ops it ' s colocated with , if colocation <nl> + # is specified in the attributes . <nl> + colocation_pairs = { } <nl> <nl> - # TODO ( skyewm ) : colocation logic <nl> + for c_op in c_api_util . new_tf_operations ( graph ) : <nl> + # pylint : disable = protected - access <nl> + new_op = graph . _create_op_from_tf_operation ( c_op , compute_device = False ) <nl> + # pylint : enable = protected - access <nl> + <nl> + colocation_names = _GetColocationNames ( new_op ) <nl> + if colocation_names : <nl> + colocation_pairs [ new_op ] = colocation_names <nl> + # Don ' t apply this op ' s device function , since colocation constraints <nl> + # override device functions . Note that this op ' s device may still be set <nl> + # by the loop below . <nl> + else : <nl> + with _MaybeDevice ( new_op . device ) : <nl> + graph . _apply_device_functions ( new_op ) # pylint : disable = protected - access <nl> + <nl> + # The following loop populates the device field of ops that are colocated <nl> + # with another op . This is implied by the colocation attribute , but we <nl> + # propagate the device field for completeness . <nl> + for op , coloc_op_list in colocation_pairs . items ( ) : <nl> + coloc_device = None <nl> + # Find any device in the list of colocated ops that have a device , if it <nl> + # exists . We assume that if multiple ops have devices , they refer to the <nl> + # same device . Otherwise , a runtime error will occur since the colocation <nl> + # property cannot be guaranteed . <nl> + # <nl> + # One possible improvement is to try to check for compatibility of all <nl> + # devices in this list at import time here , which would require <nl> + # implementing a compatibility function for device specs in python . <nl> + for coloc_op_name in coloc_op_list : <nl> + try : <nl> + coloc_op = graph . _get_operation_by_name_unsafe ( coloc_op_name ) # pylint : disable = protected - access <nl> + except KeyError : <nl> + raise ValueError ( ' Specified colocation to an op that ' <nl> + ' does not exist during import : % s in % s ' % ( <nl> + coloc_op_name , op . name ) ) <nl> + if coloc_op . device : <nl> + coloc_device = pydev . DeviceSpec . from_string ( coloc_op . device ) <nl> + break <nl> + if coloc_device : <nl> + op . _set_device ( coloc_device ) # pylint : disable = protected - access <nl> + <nl> + <nl> + def _GetColocationNames ( op ) : <nl> + " " " Returns names of the ops that ` op ` should be colocated with . " " " <nl> + colocation_names = [ ] <nl> + try : <nl> + class_values = op . get_attr ( ' _class ' ) <nl> + except ValueError : <nl> + # No _class attr <nl> + return <nl> + for val in class_values : <nl> + val = compat . as_str ( val ) <nl> + if val . startswith ( ' loc : @ ' ) : <nl> + colocation_node_name = val [ len ( ' loc : @ ' ) : ] <nl> + if colocation_node_name ! = op . name : <nl> + colocation_names . append ( colocation_node_name ) <nl> + return colocation_names <nl> <nl> <nl> def _GatherReturnElements ( requested_return_elements , graph , results ) : <nl> mmm a / tensorflow / python / framework / importer_test . py <nl> ppp b / tensorflow / python / framework / importer_test . py <nl> def testNamePrefixColocationAttrs ( self ) : <nl> b . node_def . attr [ " _class " ] ) <nl> <nl> def testColocationWithDeviceFn ( self ) : <nl> - if ops . _USE_C_API : return # TODO ( skyewm ) : make this work with C API <nl> - <nl> original_graph_def = self . _MakeGraphDef ( " " " <nl> node { name : ' A ' op : ' None ' attr { <nl> key : ' _class ' <nl> def CustomDeviceFn ( op ) : <nl> <nl> with ops . Graph ( ) . as_default ( ) : <nl> with ops . device ( CustomDeviceFn ) : <nl> - b , = importer . import_graph_def ( <nl> - original_graph_def , return_elements = [ " B " ] , name = " imported_graph " ) <nl> - <nl> - self . assertProtoEqualsVersion ( " " " <nl> - node { name : ' imported_graph / A ' op : ' None ' device : " / device : A : 0 " <nl> - attr { <nl> - key : ' _class ' value { list { s : ' loc : @ imported_graph / A ' } } <nl> - } <nl> - } <nl> - node { name : ' imported_graph / B ' op : ' None ' device : " / device : A : 0 " <nl> - attr { <nl> - key : ' _class ' value { list { s : ' loc : @ imported_graph / A ' } } <nl> - } } " " " , b . graph . as_graph_def ( ) ) <nl> - <nl> - # Test a scenario where ' A ' doesn ' t get a device ; ' A ' should <nl> - # not have a device , but during runtime will get colocated with <nl> - # ' B ' because of the colocation attribute . <nl> + a , b = importer . import_graph_def ( original_graph_def , <nl> + return_elements = [ " A " , " B " ] , <nl> + name = " imported_graph " ) <nl> + self . assertEqual ( a . device , " / device : A : 0 " ) <nl> + self . assertEqual ( b . device , " / device : A : 0 " ) <nl> + self . assertEqual ( a . colocation_groups ( ) , [ b " loc : @ imported_graph / A " ] ) <nl> + self . assertEqual ( b . colocation_groups ( ) , [ b " loc : @ imported_graph / A " ] ) <nl> + <nl> + # Test a scenario where ' A ' doesn ' t get a device ; ' A ' should not have a <nl> + # device , but during runtime will get colocated with ' B ' because of the <nl> + # colocation attribute . B ' s device function is still overridden by A . <nl> def BDeviceFn ( op ) : <nl> if " B " in op . name : <nl> return " / device : B : 0 " <nl> def BDeviceFn ( op ) : <nl> <nl> with ops . Graph ( ) . as_default ( ) : <nl> with ops . device ( BDeviceFn ) : <nl> - b , = importer . import_graph_def ( <nl> - original_graph_def , return_elements = [ " B " ] , name = " imported_graph " ) <nl> - <nl> - self . assertProtoEqualsVersion ( " " " <nl> - node { name : ' imported_graph / A ' op : ' None ' <nl> - attr { <nl> - key : ' _class ' value { list { s : ' loc : @ imported_graph / A ' } } <nl> - } <nl> - } <nl> - node { name : ' imported_graph / B ' op : ' None ' <nl> - attr { <nl> - key : ' _class ' value { list { s : ' loc : @ imported_graph / A ' } } <nl> - } } " " " , b . graph . as_graph_def ( ) ) <nl> + a , b = importer . import_graph_def ( original_graph_def , <nl> + return_elements = [ " A " , " B " ] , <nl> + name = " imported_graph " ) <nl> + self . assertEqual ( a . device , " " ) <nl> + self . assertEqual ( b . device , " " ) <nl> + self . assertEqual ( a . colocation_groups ( ) , [ b " loc : @ imported_graph / A " ] ) <nl> + self . assertEqual ( b . colocation_groups ( ) , [ b " loc : @ imported_graph / A " ] ) <nl> <nl> # Only A gets a device , so B inherits it implicitly . <nl> def ADeviceFn ( op ) : <nl> def ADeviceFn ( op ) : <nl> <nl> with ops . Graph ( ) . as_default ( ) : <nl> with ops . device ( ADeviceFn ) : <nl> - b , = importer . import_graph_def ( <nl> - original_graph_def , return_elements = [ " B " ] , name = " imported_graph " ) <nl> - <nl> - self . assertProtoEqualsVersion ( " " " <nl> - node { name : ' imported_graph / A ' op : ' None ' device : " / device : A : 0 " <nl> - attr { <nl> - key : ' _class ' value { list { s : ' loc : @ imported_graph / A ' } } <nl> - } <nl> - } <nl> - node { name : ' imported_graph / B ' op : ' None ' device : " / device : A : 0 " <nl> - attr { <nl> - key : ' _class ' value { list { s : ' loc : @ imported_graph / A ' } } <nl> - } } " " " , b . graph . as_graph_def ( ) ) <nl> + a , b = importer . import_graph_def ( original_graph_def , <nl> + return_elements = [ " A " , " B " ] , <nl> + name = " imported_graph " ) <nl> + self . assertEqual ( a . device , " / device : A : 0 " ) <nl> + self . assertEqual ( b . device , " / device : A : 0 " ) <nl> + self . assertEqual ( a . colocation_groups ( ) , [ b " loc : @ imported_graph / A " ] ) <nl> + self . assertEqual ( b . colocation_groups ( ) , [ b " loc : @ imported_graph / A " ] ) <nl> <nl> def testMultipleColocationWithDeviceFn ( self ) : <nl> - if ops . _USE_C_API : return # TODO ( skyewm ) : make this work with C API <nl> - <nl> original_graph_def = self . _MakeGraphDef ( " " " <nl> node { name : ' A ' op : ' None ' } <nl> node { name : ' B ' op : ' None ' } <nl> def CustomDeviceFn ( op ) : <nl> <nl> with ops . Graph ( ) . as_default ( ) : <nl> with ops . device ( CustomDeviceFn ) : <nl> - c , = importer . import_graph_def ( <nl> - original_graph_def , return_elements = [ " C " ] , name = " imported_graph " ) <nl> - <nl> - self . assertProtoEqualsVersion ( " " " <nl> - node { name : ' imported_graph / A ' op : ' None ' } <nl> - node { name : ' imported_graph / B ' op : ' None ' device : " / device : B : 0 " } <nl> - node { name : ' imported_graph / C ' op : ' None ' device : " / device : B : 0 " <nl> - attr { <nl> - key : ' _class ' value { <nl> - list { s : ' loc : @ imported_graph / A ' <nl> - s : ' loc : @ imported_graph / B ' } <nl> - } <nl> - } <nl> - } " " " , c . graph . as_graph_def ( ) ) <nl> + a , b , c = importer . import_graph_def ( original_graph_def , <nl> + return_elements = [ " A " , " B " , " C " ] , <nl> + name = " imported_graph " ) <nl> + self . assertEqual ( a . device , " " ) <nl> + self . assertEqual ( b . device , " / device : B : 0 " ) <nl> + self . assertEqual ( c . device , " / device : B : 0 " ) <nl> + self . assertEqual ( a . colocation_groups ( ) , [ b " loc : @ imported_graph / A " ] ) <nl> + self . assertEqual ( b . colocation_groups ( ) , [ b " loc : @ imported_graph / B " ] ) <nl> + self . assertEqual ( c . colocation_groups ( ) , <nl> + [ b " loc : @ imported_graph / A " , b " loc : @ imported_graph / B " ] ) <nl> <nl> def testNamePrefixColocationAttrsMultipleImport ( self ) : <nl> - if ops . _USE_C_API : return # TODO ( skyewm ) : make this work with C API <nl> + if ops . _USE_C_API : return # TODO ( skyewm ) : set uniquify_names <nl> <nl> original_graph_def = self . _MakeGraphDef ( " " " <nl> node { name : ' A ' op : ' None ' } <nl> def testNamePrefixColocationAttrsMultipleImport ( self ) : <nl> } } " " " , b . graph . as_graph_def ( ) ) <nl> <nl> def testNamePrefixColocationAttrsNotFound ( self ) : <nl> - if ops . _USE_C_API : return # TODO ( skyewm ) : make this work with C API <nl> - <nl> original_graph_def = self . _MakeGraphDef ( " " " <nl> node { name : ' B ' op : ' None ' attr { <nl> key : ' _class ' <nl> value { list { s : ' loc : @ A ' } } <nl> } } " " " ) <nl> + <nl> + if ops . _USE_C_API : <nl> + error_msg = " Node ' B ' expects to be colocated with unknown node ' A ' " <nl> + else : <nl> + error_msg = " does not exist during import " <nl> + <nl> with ops . Graph ( ) . as_default ( ) : <nl> - with self . assertRaisesRegexp ( ValueError , " does not exist during import " ) : <nl> + with self . assertRaisesRegexp ( ValueError , error_msg ) : <nl> importer . import_graph_def ( <nl> original_graph_def , return_elements = [ " B " ] , name = " imported_graph " ) <nl> <nl> mmm a / tensorflow / python / framework / ops . py <nl> ppp b / tensorflow / python / framework / ops . py <nl> def create_op ( <nl> compute_device = compute_device ) <nl> return ret <nl> <nl> - def _create_op_from_tf_operation ( self , c_op ) : <nl> + def _create_op_from_tf_operation ( self , c_op , compute_device = True ) : <nl> " " " Creates an ` Operation ` in this graph from the supplied TF_Operation . <nl> <nl> This method is like create_op ( ) except the new Operation is constructed <nl> def _create_op_from_tf_operation ( self , c_op ) : <nl> <nl> Args : <nl> c_op : a wrapped TF_Operation <nl> + compute_device : ( Optional . ) If True , device functions will be executed <nl> + to compute the device property of the Operation . <nl> <nl> Returns : <nl> An ` Operation ` object . <nl> def _create_op_from_tf_operation ( self , c_op ) : <nl> for output in tf_outputs ) <nl> control_inputs = self . _control_dependencies_for_inputs ( input_ops ) <nl> ret = Operation ( c_op , self , control_inputs = control_inputs ) <nl> - self . _create_op_helper ( ret ) <nl> + self . _create_op_helper ( ret , compute_device = compute_device ) <nl> return ret <nl> <nl> def _create_op_helper ( self , op , compute_shapes = True , compute_device = True ) : <nl>
Implement Python - specific device and colocation logic in import_graph_def with C API enabled .
tensorflow/tensorflow
976049bb0bcdebe10d0a67f6c843f2b51eb1348c
2017-11-30T17:13:05Z
mmm a / tools / depends / target / Makefile <nl> ppp b / tools / depends / target / Makefile <nl> ifeq ( $ ( OS ) , android ) <nl> EXCLUDED_DEPENDS = libcec libusb <nl> DEPENDS + = dummy - libxbmc libuuid libandroidjni <nl> PYMODULE_DEPS = dummy - libxbmc <nl> - CROSSGUID_DEPS = libuuid <nl> + LIBUUID = libuuid <nl> endif <nl> <nl> DEPENDS : = $ ( filter - out $ ( EXCLUDED_DEPENDS ) , $ ( DEPENDS ) ) <nl> ifeq ( $ ( OS ) , linux ) <nl> endif <nl> DEPENDS + = alsa - lib <nl> ALSA_LIB = alsa - lib <nl> - CROSSGUID_DEPS = libuuid <nl> + LIBUUID = libuuid <nl> ifeq ( $ ( TARGET_PLATFORM ) , $ ( filter $ ( TARGET_PLATFORM ) , raspberry - pi gbm ) ) <nl> DEPENDS + = libxkbcommon libinput libudev libevdev mtdev <nl> endif <nl> all : . installed - $ ( PLATFORM ) <nl> <nl> gettext : $ ( ICONV ) <nl> libgcrypt : libgpg - error <nl> - fontconfig : freetype2 expat $ ( ICONV ) <nl> + fontconfig : freetype2 expat $ ( ICONV ) $ ( LIBUUID ) <nl> curl : openssl nghttp2 <nl> libass : fontconfig fribidi libpng freetype2 expat $ ( ICONV ) <nl> libmicrohttpd : gnutls libgcrypt libgpg - error <nl> pythonmodule - setuptools : $ ( PYMODULE_DEPS ) python27 <nl> libxslt : libgcrypt libxml2 <nl> ffmpeg : $ ( ICONV ) $ ( ZLIB ) bzip2 $ ( FFMPEG_DEPENDS ) <nl> libcec : p8 - platform <nl> - crossguid : $ ( CROSSGUID_DEPS ) <nl> + crossguid : $ ( LIBUUID ) <nl> libdvdnav : libdvdread <nl> libdvdread : libdvdcss <nl> wayland : expat libffi <nl> new file mode 100644 <nl> index 000000000000 . . f3c6cfe3c186 <nl> mmm / dev / null <nl> ppp b / tools / depends / target / fontconfig / 01 - disable - test . patch <nl> <nl> + mmm a / Makefile . am <nl> ppp + b / Makefile . am <nl> + <nl> + SUBDIRS = fontconfig fc - case fc - lang src \ <nl> + fc - cache fc - cat fc - conflist fc - list fc - match \ <nl> + fc - pattern fc - query fc - scan fc - validate conf . d \ <nl> + - its po po - conf test <nl> + + its po po - conf <nl> + if ENABLE_DOCS <nl> + SUBDIRS + = doc <nl> + endif <nl> mmm a / tools / depends / target / fontconfig / Makefile <nl> ppp b / tools / depends / target / fontconfig / Makefile <nl> <nl> include . . / . . / Makefile . include <nl> - DEPS = . . / . . / Makefile . include lconv . patch fix - aarch64_atomics . patch Makefile <nl> + DEPS = . . / . . / Makefile . include 01 - disable - test . patch lconv . patch fix - aarch64_atomics . patch Makefile <nl> <nl> # lib name , version <nl> LIBNAME = fontconfig <nl> - VERSION = 2 . 12 . 4 <nl> + VERSION = 2 . 13 . 1 <nl> SOURCE = $ ( LIBNAME ) - $ ( VERSION ) <nl> ARCHIVE = $ ( SOURCE ) . tar . bz2 <nl> <nl> # configuration settings <nl> CONFIGURE = . / configure - - prefix = $ ( PREFIX ) \ <nl> - - - with - freetype - config = $ ( PREFIX ) / bin / freetype - config \ <nl> - - disable - libxml2 - - disable - docs - - with - arch = $ ( PLATFORM ) - - disable - shared <nl> <nl> LIBDYLIB = $ ( PLATFORM ) / src / . libs / lib $ ( LIBNAME ) . a <nl> $ ( TARBALLS_LOCATION ) / $ ( ARCHIVE ) : <nl> $ ( PLATFORM ) : $ ( TARBALLS_LOCATION ) / $ ( ARCHIVE ) $ ( DEPS ) <nl> rm - rf $ ( PLATFORM ) / * ; mkdir - p $ ( PLATFORM ) <nl> cd $ ( PLATFORM ) ; $ ( ARCHIVE_TOOL ) $ ( ARCHIVE_TOOL_FLAGS ) $ ( TARBALLS_LOCATION ) / $ ( ARCHIVE ) <nl> + cd $ ( PLATFORM ) ; patch - p1 - i . . / 01 - disable - test . patch <nl> cd $ ( PLATFORM ) ; patch - p1 - i . . / lconv . patch <nl> cd $ ( PLATFORM ) ; patch - p1 - i . . / fix - aarch64_atomics . patch <nl> cd $ ( PLATFORM ) ; $ ( AUTORECONF ) - vif <nl> clean : <nl> <nl> distclean : : <nl> rm - rf $ ( PLATFORM ) . installed - $ ( PLATFORM ) <nl> - <nl> mmm a / tools / depends / target / fontconfig / lconv . patch <nl> ppp b / tools / depends / target / fontconfig / lconv . patch <nl> <nl> mmm a / src / fcxml . c <nl> ppp b / src / fcxml . c <nl> - <nl> + <nl> static double <nl> FcStrtod ( char * s , char * * end ) <nl> { <nl> <nl> # ifndef __BIONIC__ <nl> struct lconv * locale_data ; <nl> # endif <nl> - <nl> + <nl> else <nl> v = strtod ( s , end ) ; <nl> return v ; <nl>
Merge pull request from Rechi / depends / fontconfig
xbmc/xbmc
2e0f01e1485798934503557b591a758831d98b1d
2019-09-11T09:33:03Z
mmm a / xbmc / music / MusicThumbLoader . cpp <nl> ppp b / xbmc / music / MusicThumbLoader . cpp <nl> bool CMusicThumbLoader : : LoadItemCached ( CFileItem * pItem ) <nl> pItem - > SetArt ( " artist . fanart " , fanart ) ; <nl> pItem - > SetArtFallback ( " fanart " , " artist . fanart " ) ; <nl> } <nl> + else if ( ! pItem - > GetMusicInfoTag ( ) - > GetAlbumArtist ( ) . empty ( ) & & <nl> + pItem - > GetMusicInfoTag ( ) - > GetAlbumArtist ( ) [ 0 ] ! = artist ) <nl> + { <nl> + / / If no artist fanart and the album artist is different to the artist , <nl> + / / try to get fanart from the album artist <nl> + artist = pItem - > GetMusicInfoTag ( ) - > GetAlbumArtist ( ) [ 0 ] ; <nl> + idArtist = m_musicDatabase - > GetArtistByName ( artist ) ; <nl> + if ( idArtist > = 0 ) <nl> + { <nl> + fanart = m_musicDatabase - > GetArtForItem ( idArtist , " artist " , " fanart " ) ; <nl> + if ( ! fanart . empty ( ) ) <nl> + { <nl> + pItem - > SetArt ( " albumartist . fanart " , fanart ) ; <nl> + pItem - > SetArtFallback ( " fanart " , " albumartist . fanart " ) ; <nl> + } <nl> + } <nl> + } <nl> } <nl> m_musicDatabase - > Close ( ) ; <nl> } <nl> bool CMusicThumbLoader : : FillLibraryArt ( CFileItem & item ) <nl> item . SetArt ( " artist . fanart " , fanart ) ; <nl> item . SetArtFallback ( " fanart " , " artist . fanart " ) ; <nl> } <nl> + else if ( tag . GetType ( ) = = " song " ) <nl> + { <nl> + / / If no artist fanart , try for album artist fanart <nl> + fanart = m_musicDatabase - > GetArtistArtForItem ( tag . GetAlbumId ( ) , " album " , " fanart " ) ; <nl> + if ( ! fanart . empty ( ) ) <nl> + { <nl> + item . SetArt ( " albumartist . fanart " , fanart ) ; <nl> + item . SetArtFallback ( " fanart " , " albumartist . fanart " ) ; <nl> + } <nl> + } <nl> } <nl> m_musicDatabase - > Close ( ) ; <nl> } <nl>
Merge pull request from adamreeve / album_artist_fanart
xbmc/xbmc
b4c0464003a525f7d30d110b230a38e250e9b286
2013-10-12T15:56:07Z
mmm a / tensorflow / core / framework / resource_mgr . cc <nl> ppp b / tensorflow / core / framework / resource_mgr . cc <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> <nl> + namespace internal { <nl> + <nl> + Status ValidateDevice ( OpKernelContext * ctx , const ResourceHandle & p ) { <nl> + if ( ctx - > device ( ) - > attributes ( ) . name ( ) ! = p . device ( ) ) { <nl> + return errors : : InvalidArgument ( <nl> + " Trying to access resource located in device " , p . device ( ) , <nl> + " from device " , ctx - > device ( ) - > attributes ( ) . name ( ) ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + } / / end namespace internal <nl> + <nl> + Status ResourceMgr : : InsertDebugTypeName ( uint64 hash_code , <nl> + const string & type_name ) { <nl> + auto iter = debug_type_names_ . emplace ( hash_code , type_name ) ; <nl> + if ( iter . first - > second ! = type_name ) { <nl> + return errors : : AlreadyExists ( " Duplicate hash code found for type " , <nl> + type_name ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + const char * ResourceMgr : : DebugTypeName ( uint64 hash_code ) const { <nl> + auto type_name_iter = debug_type_names_ . find ( hash_code ) ; <nl> + if ( type_name_iter = = debug_type_names_ . end ( ) ) { <nl> + return " < unknown > " ; <nl> + } else { <nl> + return type_name_iter - > second . c_str ( ) ; <nl> + } <nl> + } <nl> + <nl> ResourceMgr : : ResourceMgr ( ) : default_container_ ( " localhost " ) { } <nl> <nl> ResourceMgr : : ResourceMgr ( const string & default_container ) <nl> string ResourceMgr : : DebugString ( ) const { <nl> const string & container = p . first ; <nl> for ( const auto & q : * p . second ) { <nl> const Key & key = q . first ; <nl> - const char * type = key . first . name ( ) ; <nl> + const char * type = DebugTypeName ( key . first ) ; <nl> const string & resource = key . second ; <nl> Line l { & container , port : : Demangle ( type ) , & resource , <nl> q . second - > DebugString ( ) } ; <nl> Status ResourceMgr : : DoCreate ( const string & container , TypeIndex type , <nl> if ( * b = = nullptr ) { <nl> * b = new Container ; <nl> } <nl> - if ( ( * b ) - > insert ( { { type , name } , resource } ) . second ) { <nl> + if ( ( * b ) - > insert ( { { type . hash_code ( ) , name } , resource } ) . second ) { <nl> + TF_RETURN_IF_ERROR ( InsertDebugTypeName ( type . hash_code ( ) , type . name ( ) ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> } <nl> Status ResourceMgr : : DoLookup ( const string & container , TypeIndex type , <nl> if ( b = = nullptr ) { <nl> return errors : : NotFound ( " Container " , container , " does not exist . " ) ; <nl> } <nl> - auto r = gtl : : FindPtrOrNull ( * b , { type , name } ) ; <nl> + auto r = gtl : : FindPtrOrNull ( * b , { type . hash_code ( ) , name } ) ; <nl> if ( r = = nullptr ) { <nl> return errors : : NotFound ( " Resource " , container , " / " , name , " / " , type . name ( ) , <nl> " does not exist . " ) ; <nl> Status ResourceMgr : : DoLookup ( const string & container , TypeIndex type , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - Status ResourceMgr : : DoDelete ( const string & container , TypeIndex type , <nl> - const string & name ) { <nl> + Status ResourceMgr : : DoDelete ( const string & container , uint64 type_hash_code , <nl> + const string & resource_name , <nl> + const string & type_name ) { <nl> ResourceBase * base = nullptr ; <nl> { <nl> mutex_lock l ( mu_ ) ; <nl> Status ResourceMgr : : DoDelete ( const string & container , TypeIndex type , <nl> if ( b = = nullptr ) { <nl> return errors : : NotFound ( " Container " , container , " does not exist . " ) ; <nl> } <nl> - auto iter = b - > find ( { type , name } ) ; <nl> + auto iter = b - > find ( { type_hash_code , resource_name } ) ; <nl> if ( iter = = b - > end ( ) ) { <nl> - return errors : : NotFound ( " Resource " , container , " / " , name , " / " , <nl> - type . name ( ) , " does not exist . " ) ; <nl> + return errors : : NotFound ( " Resource " , container , " / " , resource_name , " / " , <nl> + type_name , " does not exist . " ) ; <nl> } <nl> base = iter - > second ; <nl> b - > erase ( iter ) ; <nl> Status ResourceMgr : : DoDelete ( const string & container , TypeIndex type , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + Status ResourceMgr : : DoDelete ( const string & container , TypeIndex type , <nl> + const string & resource_name ) { <nl> + return DoDelete ( container , type . hash_code ( ) , resource_name , type . name ( ) ) ; <nl> + } <nl> + <nl> + Status ResourceMgr : : Delete ( const ResourceHandle & handle ) { <nl> + return DoDelete ( handle . container ( ) , handle . hash_code ( ) , handle . name ( ) , <nl> + " < unknown > " ) ; <nl> + } <nl> + <nl> Status ResourceMgr : : Cleanup ( const string & container ) { <nl> Container * b = nullptr ; <nl> { <nl> ResourceHandle HandleFromInput ( OpKernelContext * ctx , int input ) { <nl> return ctx - > input ( input ) . flat < ResourceHandle > ( ) ( 0 ) ; <nl> } <nl> <nl> + Status DeleteResource ( OpKernelContext * ctx , const ResourceHandle & p ) { <nl> + TF_RETURN_IF_ERROR ( internal : : ValidateDevice ( ctx , p ) ) ; <nl> + return ctx - > resource_manager ( ) - > Delete ( p ) ; <nl> + } <nl> + <nl> } / / end namespace tensorflow <nl> mmm a / tensorflow / core / framework / resource_mgr . h <nl> ppp b / tensorflow / core / framework / resource_mgr . h <nl> class ResourceMgr { <nl> template < typename T > <nl> Status Delete ( const string & container , const string & name ) TF_MUST_USE_RESULT ; <nl> <nl> + / / Deletes the resource pointed by " handle " . <nl> + Status Delete ( const ResourceHandle & handle ) TF_MUST_USE_RESULT ; <nl> + <nl> / / Deletes all resources from the " container " and removes the container . <nl> Status Cleanup ( const string & container ) TF_MUST_USE_RESULT ; <nl> <nl> class ResourceMgr { <nl> string DebugString ( ) const ; <nl> <nl> private : <nl> - typedef std : : pair < TypeIndex , string > Key ; <nl> + typedef std : : pair < uint64 , string > Key ; <nl> struct KeyHash { <nl> std : : size_t operator ( ) ( const Key & k ) const { <nl> - return Hash64 ( k . second . data ( ) , k . second . size ( ) , k . first . hash_code ( ) ) ; <nl> + return Hash64 ( k . second . data ( ) , k . second . size ( ) , k . first ) ; <nl> } <nl> } ; <nl> struct KeyEqual { <nl> class ResourceMgr { <nl> ResourceBase * resource ) TF_MUST_USE_RESULT ; <nl> Status DoLookup ( const string & container , TypeIndex type , const string & name , <nl> ResourceBase * * resource ) const TF_MUST_USE_RESULT ; <nl> + Status DoDelete ( const string & container , uint64 type_hash_code , <nl> + const string & resource_name , <nl> + const string & type_name ) TF_MUST_USE_RESULT ; <nl> Status DoDelete ( const string & container , TypeIndex type , <nl> - const string & name ) TF_MUST_USE_RESULT ; <nl> + const string & resource_name ) TF_MUST_USE_RESULT ; <nl> + <nl> + / / Inserts the type name for ' hash_code ' into the hash_code to type name map . <nl> + Status InsertDebugTypeName ( uint64 hash_code , const string & type_name ) <nl> + EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) TF_MUST_USE_RESULT ; <nl> + <nl> + / / Returns the type name for the ' hash_code ' . <nl> + / / Returns " < unknown > " if a resource with such a type was never inserted into <nl> + / / the container . <nl> + const char * DebugTypeName ( uint64 hash_code ) const <nl> + EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) ; <nl> + <nl> + / / Map from type hash_code to type name . <nl> + std : : unordered_map < uint64 , string > debug_type_names_ GUARDED_BY ( mu_ ) ; <nl> <nl> TF_DISALLOW_COPY_AND_ASSIGN ( ResourceMgr ) ; <nl> } ; <nl> Status LookupOrCreateResource ( OpKernelContext * ctx , const ResourceHandle & p , <nl> template < typename T > <nl> Status DeleteResource ( OpKernelContext * ctx , const ResourceHandle & p ) ; <nl> <nl> + / / Same as above , but uses the hash code of the type directly . <nl> + / / The type name information will be missing in the debug output when the <nl> + / / resource is not present in the container . <nl> + Status DeleteResource ( OpKernelContext * ctx , const ResourceHandle & p ) ; <nl> + <nl> / / Policy helper to decide which container / shared_name to use for a <nl> / / stateful kernel that accesses shared resource . <nl> class ContainerInfo { <nl> ResourceHandle MakePerStepResourceHandle ( OpKernelContext * ctx , <nl> <nl> namespace internal { <nl> <nl> + Status ValidateDevice ( OpKernelContext * ctx , const ResourceHandle & p ) ; <nl> + <nl> template < typename T > <nl> Status ValidateDeviceAndType ( OpKernelContext * ctx , const ResourceHandle & p ) { <nl> - if ( ctx - > device ( ) - > attributes ( ) . name ( ) ! = p . device ( ) ) { <nl> - return errors : : InvalidArgument ( <nl> - " Trying to access resource located in device " , p . device ( ) , <nl> - " from device " , ctx - > device ( ) - > attributes ( ) . name ( ) ) ; <nl> - } <nl> + TF_RETURN_IF_ERROR ( internal : : ValidateDevice ( ctx , p ) ) ; <nl> auto type_index = MakeTypeIndex < T > ( ) ; <nl> if ( type_index . hash_code ( ) ! = p . hash_code ( ) ) { <nl> return errors : : InvalidArgument ( <nl> Status DeleteResource ( OpKernelContext * ctx , const ResourceHandle & p ) { <nl> return ctx - > resource_manager ( ) - > Delete < T > ( p . container ( ) , p . name ( ) ) ; <nl> } <nl> <nl> + Status DeleteResource ( OpKernelContext * ctx , const ResourceHandle & p ) ; <nl> + <nl> template < typename T > <nl> void IsResourceInitialized < T > : : Compute ( OpKernelContext * ctx ) { <nl> Tensor * output ; <nl> mmm a / tensorflow / core / framework / resource_mgr_test . cc <nl> ppp b / tensorflow / core / framework / resource_mgr_test . cc <nl> TEST ( ResourceHandleTest , DifferentType ) { <nl> r - > Unref ( ) ; <nl> } <nl> <nl> + TEST ( ResourceHandleTest , DeleteUsingResourceHandle ) { <nl> + ResourceMgr resource_mgr ( " " ) ; <nl> + OpKernelContext : : Params params ; <nl> + params . resource_manager = & resource_mgr ; <nl> + StubDevice device ( " device_name " ) ; <nl> + params . device = & device ; <nl> + OpKernelContext ctx ( & params , 0 ) ; <nl> + <nl> + ResourceHandle p = <nl> + MakeResourceHandle < StubResource > ( & ctx , " container " , " name " ) ; <nl> + <nl> + StubResource * r = new StubResource ; <nl> + TF_EXPECT_OK ( CreateResource ( & ctx , p , r ) ) ; <nl> + <nl> + StubResource * lookup_r = nullptr ; <nl> + TF_EXPECT_OK ( LookupResource < StubResource > ( & ctx , p , & lookup_r ) ) ; <nl> + EXPECT_EQ ( lookup_r , r ) ; <nl> + <nl> + TF_EXPECT_OK ( DeleteResource ( & ctx , p ) ) ; <nl> + EXPECT_NE ( LookupResource < StubResource > ( & ctx , p , & lookup_r ) . ok ( ) , true ) ; <nl> + r - > Unref ( ) ; <nl> + } <nl> + <nl> } / / end namespace tensorflow <nl> mmm a / tensorflow / core / kernels / resource_variable_ops . cc <nl> ppp b / tensorflow / core / kernels / resource_variable_ops . cc <nl> class ReadVariableOp : public OpKernel { <nl> } <nl> } ; <nl> <nl> + class DestroyResourceOp : public OpKernel { <nl> + public : <nl> + explicit DestroyResourceOp ( OpKernelConstruction * ctx ) : OpKernel ( ctx ) { } <nl> + <nl> + void Compute ( OpKernelContext * ctx ) override { <nl> + OP_REQUIRES_OK ( ctx , DeleteResource ( ctx , HandleFromInput ( ctx , 0 ) ) ) ; <nl> + } <nl> + } ; <nl> + <nl> + REGISTER_KERNEL_BUILDER ( Name ( " DestroyResourceOp " ) . Device ( DEVICE_CPU ) , <nl> + DestroyResourceOp ) ; <nl> + <nl> / / TODO ( apassos ) register for the GPU as well . <nl> # define REGISTER_KERNELS ( type ) \ <nl> REGISTER_KERNEL_BUILDER ( \ <nl> mmm a / tensorflow / core / ops / resource_variable_ops . cc <nl> ppp b / tensorflow / core / ops / resource_variable_ops . cc <nl> <nl> / / limitations under the License . <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> + # include " tensorflow / core / framework / common_shape_fns . h " <nl> # include " tensorflow / core / framework / node_def_util . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / resource_mgr . h " <nl> resource : handle to the resource in which to store the variable . <nl> dtype : the dtype of the value . <nl> ) " ) ; <nl> <nl> + REGISTER_OP ( " DestroyResourceOp " ) <nl> + . Input ( " resource : resource " ) <nl> + . SetIsStateful ( ) <nl> + . SetShapeFn ( shape_inference : : NoOutputs ) <nl> + . Doc ( R " ( <nl> + Deletes the resource specified by the handle . <nl> + <nl> + All subsequent operations using the resource will result in a NotFound <nl> + error status . <nl> + ) " ) ; <nl> + <nl> Status CreateAssignShapeFn ( InferenceContext * c ) { <nl> DataType handle_dtype = c - > input_handle_dtype ( 0 ) ; <nl> DataType value_dtype ; <nl> mmm a / tensorflow / python / kernel_tests / resource_variable_ops_test . py <nl> ppp b / tensorflow / python / kernel_tests / resource_variable_ops_test . py <nl> def testAssignSubMethod ( self ) : <nl> v . assign_sub ( 1 . 0 ) . eval ( ) <nl> self . assertEqual ( 2 . 0 , v . value ( ) . eval ( ) ) <nl> <nl> + def testDestroyResource ( self ) : <nl> + with self . test_session ( ) as sess : <nl> + v = resource_variable_ops . ResourceVariable ( 3 . 0 ) <nl> + variables . global_variables_initializer ( ) . run ( ) <nl> + self . assertEqual ( 3 . 0 , v . value ( ) . eval ( ) ) <nl> + sess . run ( resource_variable_ops . destroy_resource_op ( v . handle ) ) <nl> + with self . assertRaises ( errors . NotFoundError ) : <nl> + v . value ( ) . eval ( ) <nl> + <nl> + <nl> if __name__ = = " __main__ " : <nl> test . main ( ) <nl>
Added an op to delete a resource from a container .
tensorflow/tensorflow
d0897c6eacb70bd676b637be977b221a133b932f
2017-01-30T23:11:50Z
mmm a / client / parallel . cpp <nl> ppp b / client / parallel . cpp <nl> namespace mongo { <nl> / / TODO : should do some simplification here if possibl ideally <nl> } <nl> <nl> - BSONObj ClusteredCursor : : explain ( ) { <nl> + void ClusteredCursor : : explain ( BSONObjBuilder & b ) { <nl> / / Note : by default we filter out allPlans and oldPlan in the shell ' s <nl> / / explain ( ) function . If you add any recursive structures , make sure to <nl> / / edit the JS to make sure everything gets filtered . <nl> <nl> - BSONObjBuilder b ; <nl> b . append ( " clusteredType " , type ( ) ) ; <nl> <nl> long long millis = 0 ; <nl> namespace mongo { <nl> for ( map < string , long long > : : iterator i = counters . begin ( ) ; i ! = counters . end ( ) ; + + i ) <nl> b . appendNumber ( i - > first , i - > second ) ; <nl> <nl> - b . appendNumber ( " millisTotal " , millis ) ; <nl> - b . append ( " millisAvg " , ( int ) ( ( double ) millis / numExplains ) ) ; <nl> + b . appendNumber ( " millisShardTotal " , millis ) ; <nl> + b . append ( " millisShardAvg " , ( int ) ( ( double ) millis / numExplains ) ) ; <nl> b . append ( " numQueries " , ( int ) numExplains ) ; <nl> b . append ( " numShards " , ( int ) out . size ( ) ) ; <nl> - <nl> - return b . obj ( ) ; <nl> } <nl> <nl> / / mmmmmm - - FilteringClientCursor mmmmmmmmm - - <nl> mmm a / client / parallel . h <nl> ppp b / client / parallel . h <nl> namespace mongo { <nl> <nl> virtual string type ( ) const = 0 ; <nl> <nl> - virtual BSONObj explain ( ) ; <nl> + virtual void explain ( BSONObjBuilder & b ) ; <nl> <nl> protected : <nl> <nl> mmm a / s / strategy_shard . cpp <nl> ppp b / s / strategy_shard . cpp <nl> namespace mongo { <nl> assert ( cursor ) ; <nl> <nl> try { <nl> + long long start_millis ; <nl> + if ( query . isExplain ( ) ) start_millis = curTimeMillis64 ( ) ; <nl> cursor - > init ( ) ; <nl> <nl> LOG ( 5 ) < < " cursor type : " < < cursor - > type ( ) < < endl ; <nl> shardedCursorTypes . hit ( cursor - > type ( ) ) ; <nl> <nl> if ( query . isExplain ( ) ) { <nl> - BSONObj explain = cursor - > explain ( ) ; <nl> - replyToQuery ( 0 , r . p ( ) , r . m ( ) , explain ) ; <nl> + / / fetch elapsed time for the query <nl> + long long elapsed_millis = curTimeMillis64 ( ) - start_millis ; <nl> + BSONObjBuilder explain_builder ; <nl> + cursor - > explain ( explain_builder ) ; <nl> + explain_builder . appendNumber ( " millis " , elapsed_millis ) ; <nl> + BSONObj b = explain_builder . obj ( ) ; <nl> + <nl> + replyToQuery ( 0 , r . p ( ) , r . m ( ) , b ) ; <nl> delete ( cursor ) ; <nl> return ; <nl> } <nl>
SERVER - 4398 report the actual elapsed time for a sharded query , rather than all the shards ' time added together
mongodb/mongo
d9c699c39246d99454dc16fbb4f24ccae04acce8
2011-12-05T22:17:41Z
mmm a / include / swift / AST / DiagnosticsSema . def <nl> ppp b / include / swift / AST / DiagnosticsSema . def <nl> ERROR ( serialization_missing_single_dependency , Fatal , <nl> " missing required module ' % 0 ' " , ( StringRef ) ) <nl> ERROR ( serialization_missing_dependencies , Fatal , <nl> " missing required modules : % 0 " , ( StringRef ) ) <nl> + ERROR ( serialization_circular_dependency , Fatal , <nl> + " circular dependency between modules ' % 0 ' and % 1 " , <nl> + ( StringRef , Identifier ) ) <nl> ERROR ( serialization_missing_shadowed_module , Fatal , <nl> " cannot load underlying module for % 0 " , ( Identifier ) ) <nl> ERROR ( serialization_name_mismatch , Fatal , <nl> mmm a / include / swift / AST / Module . h <nl> ppp b / include / swift / AST / Module . h <nl> class ModuleDecl : public DeclContext , public TypeDecl { <nl> unsigned TestingEnabled : 1 ; <nl> unsigned FailedToLoad : 1 ; <nl> unsigned ResilienceStrategy : 1 ; <nl> + unsigned HasResolvedImports : 1 ; <nl> } Flags ; <nl> <nl> ModuleDecl ( Identifier name , ASTContext & ctx ) ; <nl> class ModuleDecl : public DeclContext , public TypeDecl { <nl> Flags . FailedToLoad = failed ; <nl> } <nl> <nl> + bool hasResolvedImports ( ) const { <nl> + return Flags . HasResolvedImports ; <nl> + } <nl> + void setHasResolvedImports ( ) { <nl> + Flags . HasResolvedImports = true ; <nl> + } <nl> + <nl> ResilienceStrategy getResilienceStrategy ( ) const { <nl> return ResilienceStrategy ( Flags . ResilienceStrategy ) ; <nl> } <nl> mmm a / include / swift / Serialization / Validation . h <nl> ppp b / include / swift / Serialization / Validation . h <nl> enum class Status { <nl> / / / The module file is an overlay for a Clang module , which can ' t be found . <nl> MissingShadowedModule , <nl> <nl> + / / / The module file depends on a module that is still being loaded , i . e . <nl> + / / / there is a circular dependency . <nl> + CircularDependency , <nl> + <nl> / / / The module file depends on a bridging header that can ' t be loaded . <nl> FailedToLoadBridgingHeader , <nl> <nl> mmm a / lib / AST / ASTContext . cpp <nl> ppp b / lib / AST / ASTContext . cpp <nl> ConstraintCheckerArenaRAII : : ~ ConstraintCheckerArenaRAII ( ) { <nl> static ModuleDecl * createBuiltinModule ( ASTContext & ctx ) { <nl> auto M = ModuleDecl : : create ( ctx . getIdentifier ( BUILTIN_NAME ) , ctx ) ; <nl> M - > addFile ( * new ( ctx ) BuiltinUnit ( * M ) ) ; <nl> + M - > setHasResolvedImports ( ) ; <nl> return M ; <nl> } <nl> <nl> mmm a / lib / AST / Module . cpp <nl> ppp b / lib / AST / Module . cpp <nl> void SourceLookupCache : : invalidate ( ) { <nl> ModuleDecl : : ModuleDecl ( Identifier name , ASTContext & ctx ) <nl> : DeclContext ( DeclContextKind : : Module , nullptr ) , <nl> TypeDecl ( DeclKind : : Module , & ctx , name , SourceLoc ( ) , { } ) , <nl> - Flags ( { 0 , 0 , 0 } ) { <nl> + Flags ( ) { <nl> ctx . addDestructorCleanup ( * this ) ; <nl> setImplicit ( ) ; <nl> setInterfaceType ( ModuleType : : get ( this ) ) ; <nl> mmm a / lib / ClangImporter / ClangImporter . cpp <nl> ppp b / lib / ClangImporter / ClangImporter . cpp <nl> ClangImporter : : create ( ASTContext & ctx , <nl> importer - > Impl . ImportedHeaderUnit = <nl> new ( ctx ) ClangModuleUnit ( * importedHeaderModule , importer - > Impl , nullptr ) ; <nl> importedHeaderModule - > addFile ( * importer - > Impl . ImportedHeaderUnit ) ; <nl> + importedHeaderModule - > setHasResolvedImports ( ) ; <nl> <nl> importer - > Impl . IsReadingBridgingPCH = false ; <nl> <nl> ModuleDecl * ClangImporter : : Implementation : : finishLoadingClangModule ( <nl> result = ModuleDecl : : create ( name , SwiftContext ) ; <nl> / / Silence error messages about testably importing a Clang module . <nl> result - > setTestingEnabled ( ) ; <nl> + result - > setHasResolvedImports ( ) ; <nl> <nl> wrapperUnit = <nl> new ( SwiftContext ) ClangModuleUnit ( * result , * this , clangModule ) ; <nl> ClangModuleUnit * ClangImporter : : Implementation : : getWrapperForModule ( <nl> auto wrapper = ModuleDecl : : create ( name , SwiftContext ) ; <nl> / / Silence error messages about testably importing a Clang module . <nl> wrapper - > setTestingEnabled ( ) ; <nl> + wrapper - > setHasResolvedImports ( ) ; <nl> <nl> auto file = new ( SwiftContext ) ClangModuleUnit ( * wrapper , * this , <nl> underlying ) ; <nl> mmm a / lib / Frontend / Frontend . cpp <nl> ppp b / lib / Frontend / Frontend . cpp <nl> void CompilerInstance : : parseAndCheckTypes ( <nl> TypeCheckOptions ) ; <nl> } <nl> <nl> + assert ( llvm : : all_of ( MainModule - > getFiles ( ) , [ ] ( const FileUnit * File ) - > bool { <nl> + auto * SF = dyn_cast < SourceFile > ( File ) ; <nl> + if ( ! SF ) <nl> + return true ; <nl> + return SF - > ASTStage > = SourceFile : : NameBound ; <nl> + } ) & & " some files have not yet had their imports resolved " ) ; <nl> + MainModule - > setHasResolvedImports ( ) ; <nl> + <nl> const auto & options = Invocation . getFrontendOptions ( ) ; <nl> forEachFileToTypeCheck ( [ & ] ( SourceFile & SF ) { <nl> performTypeChecking ( SF , PersistentState . getTopLevelContext ( ) , <nl> mmm a / lib / Sema / SourceLoader . cpp <nl> ppp b / lib / Sema / SourceLoader . cpp <nl> ModuleDecl * SourceLoader : : loadModule ( SourceLoc importLoc , <nl> else <nl> performTypeChecking ( * importFile , persistentState . getTopLevelContext ( ) , <nl> None ) ; <nl> + importMod - > setHasResolvedImports ( ) ; <nl> return importMod ; <nl> } <nl> <nl> mmm a / lib / Serialization / ModuleFile . cpp <nl> ppp b / lib / Serialization / ModuleFile . cpp <nl> Status ModuleFile : : associateWithFileContext ( FileUnit * file , <nl> dependency . Import = { ctx . AllocateCopy ( llvm : : makeArrayRef ( accessPathElem ) ) , <nl> module } ; <nl> } <nl> + <nl> + if ( ! module - > hasResolvedImports ( ) ) { <nl> + / / Notice that we check this condition / after / recording the module that <nl> + / / caused the problem . Clients need to be able to track down what the <nl> + / / cycle was . <nl> + return error ( Status : : CircularDependency ) ; <nl> + } <nl> } <nl> <nl> if ( missingDependency ) { <nl> mmm a / lib / Serialization / SerializedModuleLoader . cpp <nl> ppp b / lib / Serialization / SerializedModuleLoader . cpp <nl> <nl> # include " swift / Strings . h " <nl> # include " swift / AST / ASTContext . h " <nl> # include " swift / AST / DiagnosticsSema . h " <nl> + # include " swift / Basic / Defer . h " <nl> # include " swift / Basic / STLExtras . h " <nl> # include " swift / Basic / SourceManager . h " <nl> # include " swift / Basic / Version . h " <nl> FileUnit * SerializedModuleLoader : : loadAST ( <nl> break ; <nl> } <nl> <nl> + case serialization : : Status : : CircularDependency : { <nl> + auto circularDependencyIter = <nl> + llvm : : find_if ( loadedModuleFile - > getDependencies ( ) , <nl> + [ ] ( const ModuleFile : : Dependency & next ) { <nl> + return ! next . Import . second - > hasResolvedImports ( ) ; <nl> + } ) ; <nl> + assert ( circularDependencyIter ! = loadedModuleFile - > getDependencies ( ) . end ( ) <nl> + & & " circular dependency reported , but no module with unresolved " <nl> + " imports found " ) ; <nl> + <nl> + / / FIXME : We should include the path of the circularity as well , but that ' s <nl> + / / hard because we ' re discovering this / while / resolving imports , which <nl> + / / means the problematic modules haven ' t been recorded yet . <nl> + Ctx . Diags . diagnose ( * diagLoc , diag : : serialization_circular_dependency , <nl> + circularDependencyIter - > getPrettyPrintedPath ( ) , <nl> + M . getName ( ) ) ; <nl> + break ; <nl> + } <nl> + <nl> case serialization : : Status : : MissingShadowedModule : { <nl> Ctx . Diags . diagnose ( * diagLoc , diag : : serialization_missing_shadowed_module , <nl> M . getName ( ) ) ; <nl> ModuleDecl * SerializedModuleLoader : : loadModule ( SourceLoc importLoc , <nl> <nl> auto M = ModuleDecl : : create ( moduleID . first , Ctx ) ; <nl> Ctx . LoadedModules [ moduleID . first ] = M ; <nl> + SWIFT_DEFER { M - > setHasResolvedImports ( ) ; } ; <nl> <nl> if ( ! loadAST ( * M , moduleID . second , std : : move ( moduleInputBuffer ) , <nl> std : : move ( moduleDocInputBuffer ) , isFramework ) ) { <nl> new file mode 100644 <nl> index 000000000000 . . 1fbaa5ddf99d <nl> mmm / dev / null <nl> ppp b / test / Serialization / circular - import . swift <nl> <nl> + / / Circularities involving the module currently being type - checked . <nl> + <nl> + / / RUN : % empty - directory ( % t ) <nl> + / / RUN : % target - swift - frontend - emit - module % s - module - name A - o % t <nl> + / / RUN : % target - swift - frontend - emit - module % s - module - name B - D IMPORT_A - I % t - o % t <nl> + / / RUN : not % target - swift - frontend - typecheck % s - module - name A - D IMPORT_B - I % t 2 > & 1 | % FileCheck - check - prefix CHECK - ABA % s <nl> + <nl> + / / RUN : % target - swift - frontend - emit - module % s - module - name C - D IMPORT_B - I % t - o % t <nl> + / / RUN : not % target - swift - frontend - typecheck % s - module - name A - D IMPORT_C - I % t 2 > & 1 | % FileCheck - check - prefix CHECK - ABCA % s <nl> + <nl> + / / Circularities not involving the module currently being type - checked . <nl> + / / RUN : % empty - directory ( % t / plain ) <nl> + / / RUN : % target - swift - frontend - emit - module % s - module - name A - o % t / plain <nl> + / / RUN : % target - swift - frontend - emit - module % s - module - name B - o % t / plain <nl> + / / RUN : % empty - directory ( % t / cycle ) <nl> + / / RUN : % target - swift - frontend - emit - module % s - module - name A - D IMPORT_B - o % t / cycle - I % t / plain <nl> + / / RUN : % target - swift - frontend - emit - module % s - module - name B - D IMPORT_A - o % t / cycle - I % t / plain <nl> + / / RUN : not % target - swift - frontend - typecheck % s - module - name C - D IMPORT_A - I % t / cycle 2 > & 1 | % FileCheck - check - prefix CHECK - ABA - BUILT % s <nl> + <nl> + <nl> + # if IMPORT_A <nl> + import A <nl> + / / CHECK - ABA - BUILT : < unknown > : 0 : error : circular dependency between modules ' A ' and ' B ' <nl> + # endif <nl> + <nl> + # if IMPORT_B <nl> + import B <nl> + / / CHECK - ABA : : [ [ @ LINE - 1 ] ] : 8 : error : circular dependency between modules ' A ' and ' B ' <nl> + # endif <nl> + <nl> + # if IMPORT_C <nl> + import C <nl> + / / CHECK - ABCA : < unknown > : 0 : error : circular dependency between modules ' A ' and ' B ' <nl> + # endif <nl> deleted file mode 100644 <nl> index 09d54b3e2dba . . 000000000000 <nl> mmm a / test / SourceKit / Indexing / Inputs / cycle - depend / A . response <nl> ppp / dev / null <nl> <nl> - { <nl> - key . hash : < hash > , <nl> - key . dependencies : [ <nl> - { <nl> - key . kind : source . lang . swift . import . module . swift , <nl> - key . name : " B " , <nl> - key . filepath : B . swiftmodule , <nl> - key . hash : < hash > , <nl> - key . dependencies : [ <nl> - { <nl> - key . kind : source . lang . swift . import . module . swift , <nl> - key . name : " A " , <nl> - key . filepath : A . swiftmodule , <nl> - key . hash : < hash > , <nl> - key . dependencies : [ <nl> - { <nl> - key . kind : source . lang . swift . import . module . swift , <nl> - key . name : " B " , <nl> - key . filepath : B . swiftmodule , <nl> - key . hash : < hash > <nl> - } , <nl> - { <nl> - key . kind : source . lang . swift . import . module . swift , <nl> - key . name : " Swift " , <nl> - key . filepath : Swift . swiftmodule , <nl> - key . hash : < hash > , <nl> - key . is_system : 1 <nl> - } , <nl> - { <nl> - key . kind : source . lang . swift . import . module . swift , <nl> - key . name : " SwiftOnoneSupport " , <nl> - key . filepath : SwiftOnoneSupport . swiftmodule , <nl> - key . hash : < hash > , <nl> - key . dependencies : [ <nl> - { <nl> - key . kind : source . lang . swift . import . module . swift , <nl> - key . name : " Swift " , <nl> - key . filepath : Swift . swiftmodule , <nl> - key . hash : < hash > , <nl> - key . is_system : 1 <nl> - } <nl> - ] <nl> - } <nl> - ] <nl> - } , <nl> - { <nl> - key . kind : source . lang . swift . import . module . swift , <nl> - key . name : " Swift " , <nl> - key . filepath : Swift . swiftmodule , <nl> - key . hash : < hash > , <nl> - key . is_system : 1 <nl> - } , <nl> - { <nl> - key . kind : source . lang . swift . import . module . swift , <nl> - key . name : " SwiftOnoneSupport " , <nl> - key . filepath : SwiftOnoneSupport . swiftmodule , <nl> - key . hash : < hash > <nl> - } <nl> - ] <nl> - } , <nl> - { <nl> - key . kind : source . lang . swift . import . module . swift , <nl> - key . name : " Swift " , <nl> - key . filepath : Swift . swiftmodule , <nl> - key . hash : < hash > , <nl> - key . is_system : 1 <nl> - } , <nl> - { <nl> - key . kind : source . lang . swift . import . module . swift , <nl> - key . name : " SwiftOnoneSupport " , <nl> - key . filepath : SwiftOnoneSupport . swiftmodule , <nl> - key . hash : < hash > <nl> - } <nl> - ] , <nl> - key . entities : [ <nl> - { <nl> - key . kind : source . lang . swift . decl . class , <nl> - key . name : " A " , <nl> - key . usr : " s : 1AAAC " , <nl> - key . entities : [ <nl> - { <nl> - key . kind : source . lang . swift . decl . var . instance , <nl> - key . name : " x " , <nl> - key . usr : " s : 1AAAC1x1BADCvp " , <nl> - key . entities : [ <nl> - { <nl> - key . kind : source . lang . swift . decl . function . accessor . getter , <nl> - key . usr : " s : 1AAAC1x1BADCvg " , <nl> - key . is_dynamic : 1 <nl> - } , <nl> - { <nl> - key . kind : source . lang . swift . decl . function . accessor . setter , <nl> - key . usr : " s : 1AAAC1x1BADCvs " , <nl> - key . is_dynamic : 1 <nl> - } <nl> - ] <nl> - } , <nl> - { <nl> - key . kind : source . lang . swift . decl . function . constructor , <nl> - key . usr : " s : 1AAACABycfc " <nl> - } <nl> - ] <nl> - } <nl> - ] <nl> - } <nl> deleted file mode 100644 <nl> index b15d3a3da981 . . 000000000000 <nl> mmm a / test / SourceKit / Indexing / Inputs / cycle - depend / A . swift <nl> ppp / dev / null <nl> <nl> - import B <nl> - <nl> - public class A { <nl> - var x : B = B ( ) <nl> - } <nl> deleted file mode 100644 <nl> index 96ff9e05e86c . . 000000000000 <nl> mmm a / test / SourceKit / Indexing / Inputs / cycle - depend / B . swift <nl> ppp / dev / null <nl> <nl> - import A <nl> - <nl> - public class B { <nl> - var x : A ? <nl> - public init ( ) { } <nl> - } <nl> deleted file mode 100644 <nl> index 71f69805336d . . 000000000000 <nl> mmm a / test / SourceKit / Indexing / index_module_cycle . swift <nl> ppp / dev / null <nl> <nl> - / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % swift - emit - module - o % t % S / Inputs / cycle - depend / A . swift - I % S / Inputs / cycle - depend - enable - source - import <nl> - / / RUN : % swift - emit - module - o % t % S / Inputs / cycle - depend / B . swift - I % S / Inputs / cycle - depend - enable - source - import <nl> - <nl> - / / RUN : % sourcekitd - test - req = index % t / A . swiftmodule - - % t / A . swiftmodule - I % t | % sed_clean > % t . response <nl> - / / RUN : diff - u % S / Inputs / cycle - depend / A . response % t . response <nl> mmm a / test / SourceKit / Indexing / index_module_missing_depend . swift <nl> ppp b / test / SourceKit / Indexing / index_module_missing_depend . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % swift - emit - module - o % t % S / Inputs / cycle - depend / A . swift - I % S / Inputs / cycle - depend - enable - source - import <nl> + / / RUN : % target - swift - frontend - emit - module - o % t % S / . . / . . / Inputs / empty . swift <nl> + / / RUN : % target - swift - frontend - emit - module - o % t % s - I % t - module - name A <nl> + / / RUN : rm % t / empty . swiftmodule <nl> <nl> / / RUN : not % sourcekitd - test - req = index % t / A . swiftmodule - - % t / A . swiftmodule 2 > & 1 | % FileCheck % s <nl> <nl> + import empty <nl> + <nl> / / FIXME : Report the reason we couldn ' t load a module . <nl> / / CHECK - DISABLED : error response ( Request Failed ) : missing module dependency <nl> / / CHECK : error response ( Request Failed ) : failed to load module <nl> mmm a / tools / SourceKit / lib / SwiftLang / SwiftIndexing . cpp <nl> ppp b / tools / SourceKit / lib / SwiftLang / SwiftIndexing . cpp <nl> static void indexModule ( llvm : : MemoryBuffer * Input , <nl> IdxConsumer . failed ( " failed to load module " ) ; <nl> return ; <nl> } <nl> + <nl> + Mod - > setHasResolvedImports ( ) ; <nl> } <nl> <nl> / / Setup a typechecker for protocol conformance resolving . <nl>
Diagnose modules with circular dependencies ( )
apple/swift
df2e63d07da1bc0bb95f89a52685c147662daa40
2018-05-02T22:01:09Z
mmm a / PowerEditor / installer / nativeLang / swedish . xml <nl> ppp b / PowerEditor / installer / nativeLang / swedish . xml <nl> <nl> < Item id = " 1 " name = " Sök nästa " / > <nl> < Item id = " 1722 " name = " Sök föregående " / > <nl> < Item id = " 2 " name = " Stäng " / > <nl> - < Item id = " 1620 " name = " Sök & amp ; efter : " / > <nl> + < Item id = " 1620 " name = " & amp ; Sök efter : " / > <nl> < Item id = " 1603 " name = " Matcha & amp ; hela ord " / > <nl> < Item id = " 1604 " name = " & amp ; Matcha små / STORA bokstäver " / > <nl> < Item id = " 1605 " name = " & amp ; Reguljärt uttryck " / > <nl> - < Item id = " 1606 " name = " & amp ; Loopa " / > <nl> - < Item id = " 1614 " name = " & amp ; Antal sökträffar " / > <nl> + < Item id = " 1606 " name = " L & amp ; oopa " / > <nl> + < Item id = " 1614 " name = " Anta & amp ; l sökträffar " / > <nl> < Item id = " 1615 " name = " Hitta alla " / > <nl> < Item id = " 1616 " name = " & amp ; Bokmärk rad " / > <nl> < Item id = " 1618 " name = " Rensa tidigare markering vid varje sökning " / > <nl> <nl> < Item id = " 1624 " name = " Sökläge " / > <nl> < Item id = " 1625 " name = " & amp ; Normal " / > <nl> < Item id = " 1626 " name = " & amp ; Utökat ( \ n , \ r , \ t , \ 0 , \ x . . . ) " / > <nl> - < Item id = " 1660 " name = " Ersätt i & amp ; filer " / > <nl> + < Item id = " 1660 " name = " Ersätt i f & amp ; iler " / > <nl> < Item id = " 1661 " name = " Följ aktuellt dok . " / > <nl> < Item id = " 1641 " name = " Sök alla i aktuellt dokument " / > <nl> < Item id = " 1686 " name = " & amp ; Transparens " / > <nl>
Adjusted a few Swedish shortcuts in the Find dialog to avoid clashes .
notepad-plus-plus/notepad-plus-plus
3e993ff4c71f03ae85becca9c239671714c62993
2019-05-30T14:42:04Z
mmm a / atom / browser / ui / tray_icon_gtk . cc <nl> ppp b / atom / browser / ui / tray_icon_gtk . cc <nl> void TrayIconGtk : : SetContextMenu ( ui : : SimpleMenuModel * menu_model ) { <nl> } <nl> <nl> void TrayIconGtk : : OnClick ( ) { <nl> + NotifyClicked ( ) ; <nl> } <nl> <nl> bool TrayIconGtk : : HasClickAction ( ) { <nl>
gtk : Notify the click event
electron/electron
f31bfab127c47b3419b6ea787658e819d1e8be93
2015-05-21T07:22:52Z
mmm a / xbmc / video / tags / VideoTagLoaderNFO . cpp <nl> ppp b / xbmc / video / tags / VideoTagLoaderNFO . cpp <nl> std : : string CVideoTagLoaderNFO : : FindNFO ( const CFileItem & item , <nl> if ( URIUtils : : IsInRAR ( item . GetPath ( ) ) ) / / we have a rarred item - we want to check outside the rars <nl> { <nl> CFileItem item2 ( item ) ; <nl> - CURL url ( m_item . GetPath ( ) ) ; <nl> + CURL url ( item . GetPath ( ) ) ; <nl> std : : string strPath = URIUtils : : GetDirectory ( url . GetHostName ( ) ) ; <nl> item2 . SetPath ( URIUtils : : AddFileToFolder ( strPath , <nl> URIUtils : : GetFileName ( item . GetPath ( ) ) ) ) ; <nl>
Merge pull request from Paxxi / stackoverflow
xbmc/xbmc
4354cc1e97053a248e623c37465db405e3aeb8ac
2020-03-04T07:27:50Z
mmm a / tensorflow / contrib / linear_optimizer / python / ops / sdca_ops . py <nl> ppp b / tensorflow / contrib / linear_optimizer / python / ops / sdca_ops . py <nl> <nl> from tensorflow . python . framework . load_library import load_op_library <nl> from tensorflow . python . framework . ops import convert_to_tensor <nl> from tensorflow . python . framework . ops import name_scope <nl> + from tensorflow . python . framework . ops import op_scope <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import control_flow_ops <nl> from tensorflow . python . ops import math_ops <nl> + from tensorflow . python . ops import state_ops <nl> from tensorflow . python . ops import variables as var_ops <nl> from tensorflow . python . ops . nn import sigmoid_cross_entropy_with_logits <nl> from tensorflow . python . platform import resource_loader <nl> def _maybe_load_sdca_ops ( ) : <nl> assert _sdca_ops , ' Could not load _sdca_ops . so ' <nl> <nl> <nl> + # TODO ( rohananil ) : add op_scope to appropriate methods . <nl> class SdcaModel ( object ) : <nl> " " " Stochastic dual coordinate ascent solver for linear models . <nl> <nl> def predictions ( self , examples ) : <nl> predictions = math_ops . sigmoid ( predictions ) <nl> return predictions <nl> <nl> - def minimize ( self ) : <nl> + def minimize ( self , global_step = None , name = None ) : <nl> " " " Add operations to train a linear model by minimizing the loss function . <nl> <nl> + Args : <nl> + global_step : Optional ` Variable ` to increment by one after the <nl> + variables have been updated . <nl> + name : Optional name for the returned operation . <nl> + <nl> Returns : <nl> An Operation that updates the variables passed in the constructor . <nl> " " " <nl> - with name_scope ( ' sdca / minimize ' ) : <nl> + # Technically , the op depends on a lot more than the variables , <nl> + # but we ' ll keep the list short . <nl> + with op_scope ( [ ] , name , ' sdca / minimize ' ) : <nl> sparse_features_indices = [ ] <nl> sparse_features_values = [ ] <nl> for sf in self . _examples [ ' sparse_features ' ] : <nl> def minimize ( self ) : <nl> assign_ops . append ( var . assign ( slot_var ) ) <nl> assign_group = control_flow_ops . group ( * assign_ops ) <nl> with ops . control_dependencies ( [ assign_group ] ) : <nl> - return _sdca_ops . sdca_shrink_l1 ( <nl> + shrink_l1 = _sdca_ops . sdca_shrink_l1 ( <nl> self . _convert_n_to_tensor ( <nl> self . _variables [ ' sparse_features_weights ' ] , <nl> as_ref = True ) , <nl> def minimize ( self ) : <nl> as_ref = True ) , <nl> l1 = self . _options [ ' symmetric_l1_regularization ' ] , <nl> l2 = self . _symmetric_l2_regularization ( ) ) <nl> + if not global_step : <nl> + return shrink_l1 <nl> + with ops . control_dependencies ( [ shrink_l1 ] ) : <nl> + with ops . colocate_with ( global_step ) : <nl> + return state_ops . assign_add ( global_step , 1 , name = name ) . op <nl> <nl> def approximate_duality_gap ( self ) : <nl> " " " Add operations to compute the approximate duality gap . <nl>
Allowing users to optionally provide a global step tensor and name to SDCA ' s minimize method .
tensorflow/tensorflow
7b4359c41a2f5d099525ae043ba4746ba691e02a
2016-03-18T15:44:28Z
mmm a / docs / root / intro / deprecated . rst <nl> ppp b / docs / root / intro / deprecated . rst <nl> Deprecated items below are listed in chronological order . <nl> been deprecated in favor of ` compressor ` . <nl> * The statistics counter ` header_gzip ` in : ref : ` HTTP Gzip filter < config_http_filters_gzip > ` <nl> has been deprecated in favor of ` header_compressor_used ` . <nl> + * Support for the undocumented HTTP / 1 . 1 ` : no - chunks ` pseudo - header has been removed . If an extension <nl> + was using this it can achieve the same behavior via the new ` http1StreamEncoderOptions ( ) ` API . <nl> <nl> 1 . 13 . 0 ( January 20 , 2020 ) <nl> = = = = = = = = = = = = = = = = = = = = = = = = = <nl> mmm a / include / envoy / http / codec . h <nl> ppp b / include / envoy / http / codec . h <nl> const char MaxResponseHeadersCountOverrideKey [ ] = <nl> <nl> class Stream ; <nl> <nl> + / * * <nl> + * Stream encoder options specific to HTTP / 1 . <nl> + * / <nl> + class Http1StreamEncoderOptions { <nl> + public : <nl> + virtual ~ Http1StreamEncoderOptions ( ) = default ; <nl> + <nl> + / * * <nl> + * Force disable chunk encoding , even if there is no known content length . This effectively forces <nl> + * HTTP / 1 . 0 behavior in which the connection will need to be closed to indicate end of stream . <nl> + * / <nl> + virtual void disableChunkEncoding ( ) PURE ; <nl> + } ; <nl> + <nl> + using Http1StreamEncoderOptionsOptRef = <nl> + absl : : optional < std : : reference_wrapper < Http1StreamEncoderOptions > > ; <nl> + <nl> / * * <nl> * Encodes an HTTP stream . This interface contains methods common to both the request and response <nl> * path . <nl> class StreamEncoder { <nl> * @ param metadata_map_vector is the vector of metadata maps to encode . <nl> * / <nl> virtual void encodeMetadata ( const MetadataMapVector & metadata_map_vector ) PURE ; <nl> + <nl> + / * * <nl> + * Return the HTTP / 1 stream encoder options if applicable . If the stream is not HTTP / 1 returns <nl> + * absl : : nullopt . <nl> + * / <nl> + virtual Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions ( ) PURE ; <nl> } ; <nl> <nl> / * * <nl> mmm a / include / envoy / http / filter . h <nl> ppp b / include / envoy / http / filter . h <nl> class StreamEncoderFilterCallbacks : public virtual StreamFilterCallbacks { <nl> * @ return the buffer limit the filter should apply . <nl> * / <nl> virtual uint32_t encoderBufferLimit ( ) PURE ; <nl> + <nl> + / * * <nl> + * Return the HTTP / 1 stream encoder options if applicable . If the stream is not HTTP / 1 returns <nl> + * absl : : nullopt . <nl> + * / <nl> + virtual Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions ( ) PURE ; <nl> } ; <nl> <nl> / * * <nl> mmm a / include / envoy / http / header_map . h <nl> ppp b / include / envoy / http / header_map . h <nl> class HeaderEntry { <nl> HEADER_FUNC ( EnvoyAttemptCount ) \ <nl> HEADER_FUNC ( EnvoyDecoratorOperation ) \ <nl> HEADER_FUNC ( KeepAlive ) \ <nl> - HEADER_FUNC ( NoChunks ) \ <nl> HEADER_FUNC ( ProxyConnection ) \ <nl> HEADER_FUNC ( RequestId ) \ <nl> HEADER_FUNC ( TransferEncoding ) \ <nl> mmm a / include / envoy / server / admin . h <nl> ppp b / include / envoy / server / admin . h <nl> class AdminStream { <nl> * request . <nl> * / <nl> virtual const Http : : RequestHeaderMap & getRequestHeaders ( ) const PURE ; <nl> + <nl> + / * * <nl> + * Return the HTTP / 1 stream encoder options if applicable . If the stream is not HTTP / 1 returns <nl> + * absl : : nullopt . <nl> + * / <nl> + virtual Http : : Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions ( ) PURE ; <nl> } ; <nl> <nl> / * * <nl> mmm a / source / common / http / codec_wrappers . h <nl> ppp b / source / common / http / codec_wrappers . h <nl> class RequestEncoderWrapper : public RequestEncoder { <nl> <nl> Stream & getStream ( ) override { return inner_ . getStream ( ) ; } <nl> <nl> + Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions ( ) override { <nl> + return inner_ . http1StreamEncoderOptions ( ) ; <nl> + } <nl> + <nl> protected : <nl> RequestEncoderWrapper ( RequestEncoder & inner ) : inner_ ( inner ) { } <nl> <nl> mmm a / source / common / http / conn_manager_impl . h <nl> ppp b / source / common / http / conn_manager_impl . h <nl> class ConnectionManagerImpl : Logger : : Loggable < Logger : : Id : : http > , <nl> ASSERT ( parent_ . state_ . latest_data_encoding_filter_ = = this ) ; <nl> callback ( * parent_ . buffered_response_data_ . get ( ) ) ; <nl> } <nl> + Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions ( ) override { <nl> + / / TODO ( mattklein123 ) : At some point we might want to actually wrap this interface but for now <nl> + / / we give the filter direct access to the encoder options . <nl> + return parent_ . response_encoder_ - > http1StreamEncoderOptions ( ) ; <nl> + } <nl> <nl> void responseDataTooLarge ( ) ; <nl> void responseDataDrained ( ) ; <nl> mmm a / source / common / http / headers . h <nl> ppp b / source / common / http / headers . h <nl> class HeaderValues { <nl> const LowerCaseString KeepAlive { " keep - alive " } ; <nl> const LowerCaseString Location { " location " } ; <nl> const LowerCaseString Method { " : method " } ; <nl> - const LowerCaseString NoChunks { " : no - chunks " } ; / / Illegal pseudo - header used internally . <nl> const LowerCaseString Origin { " origin " } ; <nl> const LowerCaseString OtSpanContext { " x - ot - span - context " } ; <nl> const LowerCaseString Path { " : path " } ; <nl> mmm a / source / common / http / http1 / codec_impl . cc <nl> ppp b / source / common / http / http1 / codec_impl . cc <nl> const std : : string StreamEncoderImpl : : LAST_CHUNK = " 0 \ r \ n " ; <nl> <nl> StreamEncoderImpl : : StreamEncoderImpl ( ConnectionImpl & connection , <nl> HeaderKeyFormatter * header_key_formatter ) <nl> - : connection_ ( connection ) , chunk_encoding_ ( true ) , processing_100_continue_ ( false ) , <nl> - is_response_to_head_request_ ( false ) , is_content_length_allowed_ ( true ) , <nl> - header_key_formatter_ ( header_key_formatter ) { <nl> + : connection_ ( connection ) , disable_chunk_encoding_ ( false ) , chunk_encoding_ ( true ) , <nl> + processing_100_continue_ ( false ) , is_response_to_head_request_ ( false ) , <nl> + is_content_length_allowed_ ( true ) , header_key_formatter_ ( header_key_formatter ) { <nl> if ( connection_ . connection ( ) . aboveHighWatermark ( ) ) { <nl> runHighWatermarkCallbacks ( ) ; <nl> } <nl> void StreamEncoderImpl : : encodeHeadersBase ( const RequestOrResponseHeaderMap & head <nl> / / response . Upper layers generally should strip transfer - encoding since it only applies to <nl> / / HTTP / 1 . 1 . The codec will infer it based on the type of response . <nl> / / for streaming ( e . g . SSE stream sent to hystrix dashboard ) , we do not want <nl> - / / chunk transfer encoding but we don ' t have a content - length so we pass " envoy only " <nl> - / / header to avoid adding chunks <nl> + / / chunk transfer encoding but we don ' t have a content - length so disable_chunk_encoding_ is <nl> + / / consulted before enabling chunk encoding . <nl> / / <nl> / / Note that for HEAD requests Envoy does best - effort guessing when there is no <nl> / / content - length . If a client makes a HEAD request for an upstream resource <nl> / / with no bytes but the upstream response doesn ' t include " Content - length : 0 " , <nl> / / Envoy will incorrectly assume a subsequent response to GET will be chunk encoded . <nl> - if ( saw_content_length | | headers . NoChunks ( ) ) { <nl> + if ( saw_content_length | | disable_chunk_encoding_ ) { <nl> chunk_encoding_ = false ; <nl> } else { <nl> if ( processing_100_continue_ ) { <nl> mmm a / source / common / http / http1 / codec_impl . h <nl> ppp b / source / common / http / http1 / codec_impl . h <nl> class ConnectionImpl ; <nl> class StreamEncoderImpl : public virtual StreamEncoder , <nl> public Stream , <nl> Logger : : Loggable < Logger : : Id : : http > , <nl> - public StreamCallbackHelper { <nl> + public StreamCallbackHelper , <nl> + public Http1StreamEncoderOptions { <nl> public : <nl> / / Http : : StreamEncoder <nl> void encodeData ( Buffer : : Instance & data , bool end_stream ) override ; <nl> void encodeMetadata ( const MetadataMapVector & ) override ; <nl> Stream & getStream ( ) override { return * this ; } <nl> + Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions ( ) override { return * this ; } <nl> + <nl> + / / Http : : Http1StreamEncoderOptions <nl> + void disableChunkEncoding ( ) override { disable_chunk_encoding_ = true ; } <nl> <nl> / / Http : : Stream <nl> void addCallbacks ( StreamCallbacks & callbacks ) override { addCallbacks_ ( callbacks ) ; } <nl> class StreamEncoderImpl : public virtual StreamEncoder , <nl> static const std : : string LAST_CHUNK ; <nl> <nl> ConnectionImpl & connection_ ; <nl> + bool disable_chunk_encoding_ : 1 ; <nl> bool chunk_encoding_ : 1 ; <nl> bool processing_100_continue_ : 1 ; <nl> bool is_response_to_head_request_ : 1 ; <nl> mmm a / source / common / http / http2 / codec_impl . h <nl> ppp b / source / common / http / http2 / codec_impl . h <nl> class ConnectionImpl : public virtual Connection , protected Logger : : Loggable < Log <nl> void encodeData ( Buffer : : Instance & data , bool end_stream ) override ; <nl> Stream & getStream ( ) override { return * this ; } <nl> void encodeMetadata ( const MetadataMapVector & metadata_map_vector ) override ; <nl> + Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions ( ) override { return absl : : nullopt ; } <nl> <nl> / / Http : : Stream <nl> void addCallbacks ( StreamCallbacks & callbacks ) override { addCallbacks_ ( callbacks ) ; } <nl> mmm a / source / extensions / quic_listeners / quiche / envoy_quic_client_stream . h <nl> ppp b / source / extensions / quic_listeners / quiche / envoy_quic_client_stream . h <nl> class EnvoyQuicClientStream : public quic : : QuicSpdyClientStream , <nl> / / Http : : StreamEncoder <nl> void encodeData ( Buffer : : Instance & data , bool end_stream ) override ; <nl> void encodeMetadata ( const Http : : MetadataMapVector & metadata_map_vector ) override ; <nl> + Http : : Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions ( ) override { <nl> + return absl : : nullopt ; <nl> + } <nl> <nl> / / Http : : RequestEncoder <nl> void encodeHeaders ( const Http : : RequestHeaderMap & headers , bool end_stream ) override ; <nl> mmm a / source / extensions / quic_listeners / quiche / envoy_quic_server_stream . h <nl> ppp b / source / extensions / quic_listeners / quiche / envoy_quic_server_stream . h <nl> class EnvoyQuicServerStream : public quic : : QuicSpdyServerStreamBase , <nl> void encodeData ( Buffer : : Instance & data , bool end_stream ) override ; <nl> void encodeTrailers ( const Http : : ResponseTrailerMap & trailers ) override ; <nl> void encodeMetadata ( const Http : : MetadataMapVector & metadata_map_vector ) override ; <nl> + Http : : Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions ( ) override { <nl> + return absl : : nullopt ; <nl> + } <nl> <nl> / / Http : : Stream <nl> void resetStream ( Http : : StreamResetReason reason ) override ; <nl> mmm a / source / extensions / stat_sinks / hystrix / hystrix . cc <nl> ppp b / source / extensions / stat_sinks / hystrix / hystrix . cc <nl> Http : : Code HystrixSink : : handlerHystrixEventStream ( absl : : string_view , <nl> admin_stream . getDecoderFilterCallbacks ( ) ; <nl> <nl> / / Disable chunk - encoding in HTTP / 1 . x . <nl> - / / TODO : This request should be propagated to codecs via API , instead of using a pseudo - header . <nl> - / / See : https : / / github . com / envoyproxy / envoy / issues / 9749 <nl> if ( stream_decoder_filter_callbacks . streamInfo ( ) . protocol ( ) < Http : : Protocol : : Http2 ) { <nl> - response_headers . setNoChunks ( 0 ) ; <nl> + admin_stream . http1StreamEncoderOptions ( ) . value ( ) . get ( ) . disableChunkEncoding ( ) ; <nl> } <nl> <nl> registerConnection ( & stream_decoder_filter_callbacks ) ; <nl> void HystrixSink : : flush ( Stats : : MetricSnapshot & snapshot ) { <nl> * cluster_stats_cache_ptr , cluster_info - > name ( ) , <nl> cluster_info - > resourceManager ( Upstream : : ResourcePriority : : Default ) . pendingRequests ( ) . max ( ) , <nl> cluster_info - > statsScope ( ) <nl> - . gaugeFromStatName ( membership_total_ , Stats : : Gauge : : ImportMode : : Accumulate ) <nl> + . gaugeFromStatName ( membership_total_ , Stats : : Gauge : : ImportMode : : NeverImport ) <nl> . value ( ) , <nl> server_ . statsFlushInterval ( ) , time_histograms [ cluster_info - > name ( ) ] , ss ) ; <nl> } <nl> mmm a / source / extensions / stat_sinks / hystrix / hystrix . h <nl> ppp b / source / extensions / stat_sinks / hystrix / hystrix . h <nl> class HystrixSink : public Stats : : Sink , public Logger : : Loggable < Logger : : Id : : hyst <nl> std : : unordered_map < std : : string , ClusterStatsCachePtr > cluster_stats_cache_map_ ; <nl> <nl> / / Saved StatNames for fast comparisons in loop . <nl> + / / TODO ( mattklein123 ) : Many / all of these stats should just be pulled directly from the cluster <nl> + / / stats directly . This needs some cleanup . <nl> Stats : : StatNamePool stat_name_pool_ ; <nl> const Stats : : StatName cluster_name_ ; <nl> const Stats : : StatName cluster_upstream_rq_time_ ; <nl> mmm a / source / server / http / BUILD <nl> ppp b / source / server / http / BUILD <nl> envoy_cc_library ( <nl> " / / source / common / stats : stats_lib " , <nl> " / / source / common / upstream : host_utility_lib " , <nl> " / / source / extensions / access_loggers / file : file_access_log_lib " , <nl> + " / / source / extensions / filters / http / common : pass_through_filter_lib " , <nl> " @ envoy_api / / envoy / admin / v3 : pkg_cc_proto " , <nl> " @ envoy_api / / envoy / config / core / v3 : pkg_cc_proto " , <nl> " @ envoy_api / / envoy / config / route / v3 : pkg_cc_proto " , <nl> mmm a / source / server / http / admin . cc <nl> ppp b / source / server / http / admin . cc <nl> Http : : FilterDataStatus AdminFilter : : decodeData ( Buffer : : Instance & data , bool end_ <nl> / / If we ever support streaming admin requests we may need to revisit this . Note , we must use <nl> / / addDecodedData ( ) here since we might need to perform onComplete ( ) processing if end_stream is <nl> / / true . <nl> - callbacks_ - > addDecodedData ( data , false ) ; <nl> + decoder_callbacks_ - > addDecodedData ( data , false ) ; <nl> <nl> if ( end_stream ) { <nl> onComplete ( ) ; <nl> void AdminFilter : : addOnDestroyCallback ( std : : function < void ( ) > cb ) { <nl> } <nl> <nl> Http : : StreamDecoderFilterCallbacks & AdminFilter : : getDecoderFilterCallbacks ( ) const { <nl> - ASSERT ( callbacks_ ! = nullptr ) ; <nl> - return * callbacks_ ; <nl> + ASSERT ( decoder_callbacks_ ! = nullptr ) ; <nl> + return * decoder_callbacks_ ; <nl> } <nl> <nl> - const Buffer : : Instance * AdminFilter : : getRequestBody ( ) const { return callbacks_ - > decodingBuffer ( ) ; } <nl> + const Buffer : : Instance * AdminFilter : : getRequestBody ( ) const { <nl> + return decoder_callbacks_ - > decodingBuffer ( ) ; <nl> + } <nl> <nl> const Http : : RequestHeaderMap & AdminFilter : : getRequestHeaders ( ) const { <nl> ASSERT ( request_headers_ ! = nullptr ) ; <nl> ConfigTracker & AdminImpl : : getConfigTracker ( ) { return config_tracker_ ; } <nl> <nl> void AdminFilter : : onComplete ( ) { <nl> absl : : string_view path = request_headers_ - > Path ( ) - > value ( ) . getStringView ( ) ; <nl> - ENVOY_STREAM_LOG ( debug , " request complete : path : { } " , * callbacks_ , path ) ; <nl> + ENVOY_STREAM_LOG ( debug , " request complete : path : { } " , * decoder_callbacks_ , path ) ; <nl> <nl> Buffer : : OwnedImpl response ; <nl> Http : : ResponseHeaderMapPtr header_map { new Http : : ResponseHeaderMapImpl } ; <nl> RELEASE_ASSERT ( request_headers_ , " " ) ; <nl> Http : : Code code = parent_ . runCallback ( path , * header_map , response , * this ) ; <nl> populateFallbackResponseHeaders ( code , * header_map ) ; <nl> - callbacks_ - > encodeHeaders ( std : : move ( header_map ) , <nl> - end_stream_on_complete_ & & response . length ( ) = = 0 ) ; <nl> + decoder_callbacks_ - > encodeHeaders ( std : : move ( header_map ) , <nl> + end_stream_on_complete_ & & response . length ( ) = = 0 ) ; <nl> <nl> if ( response . length ( ) > 0 ) { <nl> - callbacks_ - > encodeData ( response , end_stream_on_complete_ ) ; <nl> + decoder_callbacks_ - > encodeData ( response , end_stream_on_complete_ ) ; <nl> } <nl> } <nl> <nl> bool AdminImpl : : createNetworkFilterChain ( Network : : Connection & connection , <nl> } <nl> <nl> void AdminImpl : : createFilterChain ( Http : : FilterChainFactoryCallbacks & callbacks ) { <nl> - callbacks . addStreamDecoderFilter ( Http : : StreamDecoderFilterSharedPtr { new AdminFilter ( * this ) } ) ; <nl> + callbacks . addStreamFilter ( std : : make_shared < AdminFilter > ( * this ) ) ; <nl> } <nl> <nl> Http : : Code AdminImpl : : runCallback ( absl : : string_view path_and_query , <nl> mmm a / source / server / http / admin . h <nl> ppp b / source / server / http / admin . h <nl> <nl> <nl> # include " server / http / config_tracker_impl . h " <nl> <nl> + # include " extensions / filters / http / common / pass_through_filter . h " <nl> + <nl> # include " absl / strings / string_view . h " <nl> <nl> namespace Envoy { <nl> class AdminImpl : public Admin , <nl> / * * <nl> * A terminal HTTP filter that implements server admin functionality . <nl> * / <nl> - class AdminFilter : public Http : : StreamDecoderFilter , <nl> + class AdminFilter : public Http : : PassThroughFilter , <nl> public AdminStream , <nl> Logger : : Loggable < Logger : : Id : : admin > { <nl> public : <nl> AdminFilter ( AdminImpl & parent ) ; <nl> <nl> / / Http : : StreamFilterBase <nl> + / / Handlers relying on the reference should use addOnDestroyCallback ( ) <nl> + / / to add a callback that will notify them when the reference is no <nl> + / / longer valid . <nl> void onDestroy ( ) override ; <nl> <nl> / / Http : : StreamDecoderFilter <nl> class AdminFilter : public Http : : StreamDecoderFilter , <nl> bool end_stream ) override ; <nl> Http : : FilterDataStatus decodeData ( Buffer : : Instance & data , bool end_stream ) override ; <nl> Http : : FilterTrailersStatus decodeTrailers ( Http : : RequestTrailerMap & trailers ) override ; <nl> - void setDecoderFilterCallbacks ( Http : : StreamDecoderFilterCallbacks & callbacks ) override { <nl> - callbacks_ = & callbacks ; <nl> - } <nl> <nl> / / AdminStream <nl> void setEndStreamOnComplete ( bool end_stream ) override { end_stream_on_complete_ = end_stream ; } <nl> class AdminFilter : public Http : : StreamDecoderFilter , <nl> Http : : StreamDecoderFilterCallbacks & getDecoderFilterCallbacks ( ) const override ; <nl> const Buffer : : Instance * getRequestBody ( ) const override ; <nl> const Http : : RequestHeaderMap & getRequestHeaders ( ) const override ; <nl> + Http : : Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions ( ) override { <nl> + return encoder_callbacks_ - > http1StreamEncoderOptions ( ) ; <nl> + } <nl> <nl> private : <nl> / * * <nl> class AdminFilter : public Http : : StreamDecoderFilter , <nl> void onComplete ( ) ; <nl> <nl> AdminImpl & parent_ ; <nl> - / / Handlers relying on the reference should use addOnDestroyCallback ( ) <nl> - / / to add a callback that will notify them when the reference is no <nl> - / / longer valid . <nl> - Http : : StreamDecoderFilterCallbacks * callbacks_ { } ; <nl> Http : : RequestHeaderMap * request_headers_ { } ; <nl> std : : list < std : : function < void ( ) > > on_destroy_callbacks_ ; <nl> bool end_stream_on_complete_ = true ; <nl> mmm a / test / common / http / http2 / codec_impl_test . cc <nl> ppp b / test / common / http / http2 / codec_impl_test . cc <nl> class Http2CodecImplTest : public : : testing : : TestWithParam < Http2SettingsTestPara <nl> <nl> TEST_P ( Http2CodecImplTest , ShutdownNotice ) { <nl> initialize ( ) ; <nl> + EXPECT_EQ ( absl : : nullopt , request_encoder_ - > http1StreamEncoderOptions ( ) ) ; <nl> <nl> TestRequestHeaderMapImpl request_headers ; <nl> HttpTestUtility : : addDefaultHeaders ( request_headers ) ; <nl> mmm a / test / extensions / quic_listeners / quiche / envoy_quic_client_stream_test . cc <nl> ppp b / test / extensions / quic_listeners / quiche / envoy_quic_client_stream_test . cc <nl> INSTANTIATE_TEST_SUITE_P ( EnvoyQuicClientStreamTests , EnvoyQuicClientStreamTest , <nl> testing : : ValuesIn ( { true , false } ) ) ; <nl> <nl> TEST_P ( EnvoyQuicClientStreamTest , PostRequestAndResponse ) { <nl> + EXPECT_EQ ( absl : : nullopt , quic_stream_ - > http1StreamEncoderOptions ( ) ) ; <nl> quic_stream_ - > encodeHeaders ( request_headers_ , false ) ; <nl> quic_stream_ - > encodeData ( request_body_ , true ) ; <nl> <nl> mmm a / test / extensions / quic_listeners / quiche / envoy_quic_server_stream_test . cc <nl> ppp b / test / extensions / quic_listeners / quiche / envoy_quic_server_stream_test . cc <nl> TEST_P ( EnvoyQuicServerStreamTest , GetRequestAndResponse ) { <nl> } <nl> <nl> TEST_P ( EnvoyQuicServerStreamTest , PostRequestAndResponse ) { <nl> + EXPECT_EQ ( absl : : nullopt , quic_stream_ - > http1StreamEncoderOptions ( ) ) ; <nl> sendRequest ( request_body_ , true , request_body_ . size ( ) * 2 ) ; <nl> quic_stream_ - > encodeHeaders ( response_headers_ , / * end_stream = * / true ) ; <nl> } <nl> mmm a / test / extensions / stats_sinks / hystrix / BUILD <nl> ppp b / test / extensions / stats_sinks / hystrix / BUILD <nl> envoy_extension_cc_test ( <nl> " / / test / mocks / upstream : upstream_mocks " , <nl> ] , <nl> ) <nl> + <nl> + envoy_extension_cc_test ( <nl> + name = " hystrix_integration_test " , <nl> + srcs = [ " hystrix_integration_test . cc " ] , <nl> + extension_name = " envoy . stat_sinks . hystrix " , <nl> + deps = [ <nl> + " / / source / extensions / stat_sinks / hystrix : config " , <nl> + " / / test / integration : http_protocol_integration_lib " , <nl> + ] , <nl> + ) <nl> new file mode 100644 <nl> index 00000000000 . . 9a5667e6d58 <nl> mmm / dev / null <nl> ppp b / test / extensions / stats_sinks / hystrix / hystrix_integration_test . cc <nl> <nl> + # include " test / integration / http_protocol_integration . h " <nl> + <nl> + using testing : : HasSubstr ; <nl> + using testing : : Not ; <nl> + using testing : : StartsWith ; <nl> + <nl> + namespace Envoy { <nl> + <nl> + class HystrixIntegrationTest : public HttpProtocolIntegrationTest { } ; <nl> + <nl> + INSTANTIATE_TEST_SUITE_P ( Protocols , HystrixIntegrationTest , <nl> + testing : : ValuesIn ( HttpProtocolIntegrationTest : : getProtocolTestParams ( <nl> + { Http : : CodecClient : : Type : : HTTP1 , Http : : CodecClient : : Type : : HTTP2 } , <nl> + { FakeHttpConnection : : Type : : HTTP1 } ) ) , <nl> + HttpProtocolIntegrationTest : : protocolTestParamsToString ) ; <nl> + <nl> + TEST_P ( HystrixIntegrationTest , NoChunkEncoding ) { <nl> + config_helper_ . addConfigModifier ( [ ] ( envoy : : config : : bootstrap : : v3 : : Bootstrap & bootstrap ) { <nl> + auto * metrics_sink = bootstrap . add_stats_sinks ( ) ; <nl> + metrics_sink - > set_name ( " envoy . stat_sinks . hystrix " ) ; <nl> + bootstrap . mutable_stats_flush_interval ( ) - > CopyFrom ( <nl> + Protobuf : : util : : TimeUtil : : MillisecondsToDuration ( 100 ) ) ; <nl> + } ) ; <nl> + initialize ( ) ; <nl> + <nl> + if ( downstreamProtocol ( ) = = Http : : CodecClient : : Type : : HTTP1 ) { <nl> + / / For HTTP / 1 . 1 we use a raw client to make absolutely sure there is no chunk encoding . <nl> + Buffer : : OwnedImpl buffer ( " GET / hystrix_event_stream HTTP / 1 . 1 \ r \ nHost : admin \ r \ n \ r \ n " ) ; <nl> + std : : string response ; <nl> + RawConnectionDriver connection ( <nl> + lookupPort ( " admin " ) , buffer , <nl> + [ & ] ( Network : : ClientConnection & client , const Buffer : : Instance & data ) - > void { <nl> + response . append ( data . toString ( ) ) ; <nl> + / / Wait until there is a flush . <nl> + if ( response . find ( " rollingCountCollapsedRequests " ) ! = std : : string : : npos ) { <nl> + client . close ( Network : : ConnectionCloseType : : NoFlush ) ; <nl> + } <nl> + } , <nl> + version_ ) ; <nl> + connection . run ( ) ; <nl> + EXPECT_THAT ( response , StartsWith ( " HTTP / 1 . 1 200 OK \ r \ n " ) ) ; <nl> + / / Make sure that the response is not actually chunk encoded , but it does have the hystrix flush <nl> + / / trailer . <nl> + EXPECT_THAT ( response , Not ( HasSubstr ( " chunked " ) ) ) ; <nl> + EXPECT_THAT ( response , Not ( HasSubstr ( " 3 \ r \ n : \ n \ n " ) ) ) ; <nl> + EXPECT_THAT ( response , HasSubstr ( " : \ n \ n " ) ) ; <nl> + connection . close ( ) ; <nl> + } else { <nl> + codec_client_ = makeHttpConnection ( lookupPort ( " admin " ) ) ; <nl> + auto response = codec_client_ - > makeHeaderOnlyRequest ( <nl> + Http : : TestRequestHeaderMapImpl { { " : method " , " GET " } , <nl> + { " : path " , " / hystrix_event_stream " } , <nl> + { " : scheme " , " http " } , <nl> + { " : authority " , " admin " } } ) ; <nl> + response - > waitForBodyData ( 1 ) ; <nl> + EXPECT_THAT ( response - > body ( ) , HasSubstr ( " rollingCountCollapsedRequests " ) ) ; <nl> + codec_client_ - > close ( ) ; <nl> + } <nl> + } <nl> + <nl> + } / / namespace Envoy <nl> mmm a / test / extensions / stats_sinks / hystrix / hystrix_test . cc <nl> ppp b / test / extensions / stats_sinks / hystrix / hystrix_test . cc <nl> class ClusterTestInfo { <nl> <nl> / / Set gauge value . <nl> membership_total_gauge_ . name_ = " membership_total " ; <nl> - ON_CALL ( cluster_stats_scope_ , gauge ( " membership_total " , Stats : : Gauge : : ImportMode : : Accumulate ) ) <nl> + ON_CALL ( cluster_stats_scope_ , gauge ( " membership_total " , Stats : : Gauge : : ImportMode : : NeverImport ) ) <nl> . WillByDefault ( ReturnRef ( membership_total_gauge_ ) ) ; <nl> ON_CALL ( membership_total_gauge_ , value ( ) ) . WillByDefault ( Return ( 5 ) ) ; <nl> <nl> TEST_F ( HystrixSinkTest , HystrixEventStreamHandler ) { <nl> <nl> auto addr_instance_ = Envoy : : Network : : Utility : : parseInternetAddress ( " 2 . 3 . 4 . 5 " , 123 , false ) ; <nl> <nl> + Http : : MockHttp1StreamEncoderOptions stream_encoder_options ; <nl> ON_CALL ( admin_stream_mock , getDecoderFilterCallbacks ( ) ) . WillByDefault ( ReturnRef ( callbacks_ ) ) ; <nl> + ON_CALL ( admin_stream_mock , http1StreamEncoderOptions ( ) ) <nl> + . WillByDefault ( Return ( Http : : Http1StreamEncoderOptionsOptRef ( stream_encoder_options ) ) ) ; <nl> ON_CALL ( callbacks_ , connection ( ) ) . WillByDefault ( Return ( & connection_mock ) ) ; <nl> ON_CALL ( connection_mock , remoteAddress ( ) ) . WillByDefault ( ReturnRef ( addr_instance_ ) ) ; <nl> <nl> + EXPECT_CALL ( stream_encoder_options , disableChunkEncoding ( ) ) ; <nl> ASSERT_EQ ( <nl> sink_ - > handlerHystrixEventStream ( path_and_query , response_headers , buffer , admin_stream_mock ) , <nl> Http : : Code : : OK ) ; <nl> mmm a / test / integration / fake_upstream . h <nl> ppp b / test / integration / fake_upstream . h <nl> class FakeStream : public Http : : RequestDecoder , <nl> void setAddServedByHeader ( bool add_header ) { add_served_by_header_ = add_header ; } <nl> const Http : : RequestTrailerMapPtr & trailers ( ) { return trailers_ ; } <nl> bool receivedData ( ) { return received_data_ ; } <nl> + Http : : Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions ( ) { <nl> + return encoder_ . http1StreamEncoderOptions ( ) ; <nl> + } <nl> <nl> ABSL_MUST_USE_RESULT <nl> testing : : AssertionResult <nl> mmm a / test / integration / integration_test . cc <nl> ppp b / test / integration / integration_test . cc <nl> TEST_P ( IntegrationTest , ResponseFramedByConnectionCloseWithReadLimits ) { <nl> auto response = codec_client_ - > makeHeaderOnlyRequest ( default_request_headers_ ) ; <nl> waitForNextUpstreamRequest ( ) ; <nl> / / Disable chunk encoding to trigger framing by connection close . <nl> - / / TODO : This request should be propagated to codecs via API , instead of using a pseudo - header . <nl> - / / See : https : / / github . com / envoyproxy / envoy / issues / 9749 <nl> - upstream_request_ - > encodeHeaders ( <nl> - Http : : TestResponseHeaderMapImpl { { " : status " , " 200 " } , { " : no - chunks " , " 1 " } } , false ) ; <nl> + upstream_request_ - > http1StreamEncoderOptions ( ) . value ( ) . get ( ) . disableChunkEncoding ( ) ; <nl> + upstream_request_ - > encodeHeaders ( Http : : TestResponseHeaderMapImpl { { " : status " , " 200 " } } , false ) ; <nl> upstream_request_ - > encodeData ( 512 , true ) ; <nl> ASSERT_TRUE ( fake_upstream_connection_ - > close ( ) ) ; <nl> <nl> mmm a / test / mocks / http / mocks . h <nl> ppp b / test / mocks / http / mocks . h <nl> class MockStreamEncoderFilterCallbacks : public StreamEncoderFilterCallbacks , <nl> MOCK_METHOD ( void , continueEncoding , ( ) ) ; <nl> MOCK_METHOD ( const Buffer : : Instance * , encodingBuffer , ( ) ) ; <nl> MOCK_METHOD ( void , modifyEncodingBuffer , ( std : : function < void ( Buffer : : Instance & ) > ) ) ; <nl> + MOCK_METHOD ( Http1StreamEncoderOptionsOptRef , http1StreamEncoderOptions , ( ) ) ; <nl> <nl> Buffer : : InstancePtr buffer_ ; <nl> testing : : NiceMock < Tracing : : MockSpan > active_span_ ; <nl> mmm a / test / mocks / http / stream_encoder . cc <nl> ppp b / test / mocks / http / stream_encoder . cc <nl> using testing : : Invoke ; <nl> namespace Envoy { <nl> namespace Http { <nl> <nl> + MockHttp1StreamEncoderOptions : : MockHttp1StreamEncoderOptions ( ) = default ; <nl> + MockHttp1StreamEncoderOptions : : ~ MockHttp1StreamEncoderOptions ( ) = default ; <nl> + <nl> MockStreamEncoder : : MockStreamEncoder ( ) { <nl> ON_CALL ( * this , getStream ( ) ) . WillByDefault ( ReturnRef ( stream_ ) ) ; <nl> } <nl> mmm a / test / mocks / http / stream_encoder . h <nl> ppp b / test / mocks / http / stream_encoder . h <nl> <nl> namespace Envoy { <nl> namespace Http { <nl> <nl> + class MockHttp1StreamEncoderOptions : public Http1StreamEncoderOptions { <nl> + public : <nl> + MockHttp1StreamEncoderOptions ( ) ; <nl> + ~ MockHttp1StreamEncoderOptions ( ) ; <nl> + <nl> + MOCK_METHOD ( void , disableChunkEncoding , ( ) ) ; <nl> + } ; <nl> + <nl> class MockStreamEncoder : public virtual StreamEncoder { <nl> public : <nl> MockStreamEncoder ( ) ; <nl> class MockStreamEncoder : public virtual StreamEncoder { <nl> MOCK_METHOD ( void , encodeData , ( Buffer : : Instance & data , bool end_stream ) ) ; <nl> MOCK_METHOD ( void , encodeMetadata , ( const MetadataMapVector & metadata_map_vector ) ) ; <nl> MOCK_METHOD ( Stream & , getStream , ( ) ) ; <nl> + MOCK_METHOD ( Http1StreamEncoderOptionsOptRef , http1StreamEncoderOptions , ( ) ) ; <nl> <nl> testing : : NiceMock < MockStream > stream_ ; <nl> } ; <nl> mmm a / test / mocks / server / mocks . h <nl> ppp b / test / mocks / server / mocks . h <nl> class MockAdminStream : public AdminStream { <nl> MOCK_METHOD ( Http : : RequestHeaderMap & , getRequestHeaders , ( ) , ( const ) ) ; <nl> MOCK_METHOD ( NiceMock < Http : : MockStreamDecoderFilterCallbacks > & , getDecoderFilterCallbacks , ( ) , <nl> ( const ) ) ; <nl> + MOCK_METHOD ( Http : : Http1StreamEncoderOptionsOptRef , http1StreamEncoderOptions , ( ) ) ; <nl> } ; <nl> <nl> class MockDrainManager : public DrainManager { <nl>
http : remove magic : no - chunks header ( )
envoyproxy/envoy
63448a48cdd6e32db7dd1b43bfe18e3319061e20
2020-03-25T20:09:11Z
mmm a / lib / Sema / TypeCheckAccess . cpp <nl> ppp b / lib / Sema / TypeCheckAccess . cpp <nl> class UsableFromInlineChecker : public AccessControlCheckerBase , <nl> } ; <nl> <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> - using CheckExportabilityTypeCallback = <nl> - llvm : : function_ref < void ( const TypeDecl * , const TypeRepr * ) > ; <nl> - using CheckExportabilityConformanceCallback = <nl> - llvm : : function_ref < void ( const ProtocolConformance * ) > ; <nl> + class Diagnoser ; <nl> <nl> void checkTypeImpl ( <nl> Type type , const TypeRepr * typeRepr , const SourceFile & SF , <nl> - CheckExportabilityTypeCallback diagnoseType , <nl> - CheckExportabilityConformanceCallback diagnoseConformance ) { <nl> + const Diagnoser & diagnoser ) { <nl> / / Don ' t bother checking errors . <nl> if ( type & & type - > hasError ( ) ) <nl> return ; <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> if ( ! SF . isImportedImplementationOnly ( M ) ) <nl> return true ; <nl> <nl> - diagnoseType ( component - > getBoundDecl ( ) , component ) ; <nl> + diagnoser . diagnoseType ( component - > getBoundDecl ( ) , component ) ; <nl> foundAnyIssues = true ; <nl> / / We still continue even in the diagnostic case to report multiple <nl> / / violations . <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> <nl> class ProblematicTypeFinder : public TypeDeclFinder { <nl> const SourceFile & SF ; <nl> - CheckExportabilityTypeCallback diagnoseType ; <nl> - CheckExportabilityConformanceCallback diagnoseConformance ; <nl> + const Diagnoser & diagnoser ; <nl> public : <nl> - ProblematicTypeFinder ( <nl> - const SourceFile & SF , <nl> - CheckExportabilityTypeCallback diagnoseType , <nl> - CheckExportabilityConformanceCallback diagnoseConformance ) <nl> - : SF ( SF ) , diagnoseType ( diagnoseType ) , <nl> - diagnoseConformance ( diagnoseConformance ) { } <nl> + ProblematicTypeFinder ( const SourceFile & SF , const Diagnoser & diagnoser ) <nl> + : SF ( SF ) , diagnoser ( diagnoser ) { } <nl> <nl> void visitTypeDecl ( const TypeDecl * typeDecl ) { <nl> ModuleDecl * M = typeDecl - > getModuleContext ( ) ; <nl> if ( ! SF . isImportedImplementationOnly ( M ) ) <nl> return ; <nl> <nl> - diagnoseType ( typeDecl , / * typeRepr * / nullptr ) ; <nl> + diagnoser . diagnoseType ( typeDecl , / * typeRepr * / nullptr ) ; <nl> } <nl> <nl> void visitSubstitutionMap ( SubstitutionMap subs ) { <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> ModuleDecl * M = rootConf - > getDeclContext ( ) - > getParentModule ( ) ; <nl> if ( ! SF . isImportedImplementationOnly ( M ) ) <nl> continue ; <nl> - diagnoseConformance ( rootConf ) ; <nl> + diagnoser . diagnoseConformance ( rootConf ) ; <nl> } <nl> } <nl> <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> } <nl> } ; <nl> <nl> - type . walk ( ProblematicTypeFinder ( SF , diagnoseType , diagnoseConformance ) ) ; <nl> + type . walk ( ProblematicTypeFinder ( SF , diagnoser ) ) ; <nl> } <nl> <nl> void checkType ( <nl> Type type , const TypeRepr * typeRepr , const Decl * context , <nl> - CheckExportabilityTypeCallback diagnoseType , <nl> - CheckExportabilityConformanceCallback diagnoseConformance ) { <nl> + const Diagnoser & diagnoser ) { <nl> auto * SF = context - > getDeclContext ( ) - > getParentSourceFile ( ) ; <nl> assert ( SF & & " checking a non - source declaration ? " ) ; <nl> - return checkTypeImpl ( type , typeRepr , * SF , diagnoseType , <nl> - diagnoseConformance ) ; <nl> + return checkTypeImpl ( type , typeRepr , * SF , diagnoser ) ; <nl> } <nl> <nl> void checkType ( <nl> - const TypeLoc & TL , const Decl * context , <nl> - CheckExportabilityTypeCallback diagnoseType , <nl> - CheckExportabilityConformanceCallback diagnoseConformance ) { <nl> - checkType ( TL . getType ( ) , TL . getTypeRepr ( ) , context , diagnoseType , <nl> - diagnoseConformance ) ; <nl> + const TypeLoc & TL , const Decl * context , const Diagnoser & diagnoser ) { <nl> + checkType ( TL . getType ( ) , TL . getTypeRepr ( ) , context , diagnoser ) ; <nl> } <nl> <nl> void checkGenericParams ( const GenericContext * ownerCtx , <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> continue ; <nl> assert ( param - > getInherited ( ) . size ( ) = = 1 ) ; <nl> checkType ( param - > getInherited ( ) . front ( ) , ownerDecl , <nl> - getDiagnoseCallback ( ownerDecl ) , <nl> - getDiagnoseCallback ( ownerDecl ) ) ; <nl> + getDiagnoser ( ownerDecl ) ) ; <nl> } <nl> <nl> forAllRequirementTypes ( WhereClauseOwner ( <nl> const_cast < GenericContext * > ( ownerCtx ) ) , <nl> [ & ] ( Type type , TypeRepr * typeRepr ) { <nl> - checkType ( type , typeRepr , ownerDecl , getDiagnoseCallback ( ownerDecl ) , <nl> - getDiagnoseCallback ( ownerDecl ) ) ; <nl> + checkType ( type , typeRepr , ownerDecl , getDiagnoser ( ownerDecl ) ) ; <nl> } ) ; <nl> } <nl> <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> ExtensionWithConditionalConformances <nl> } ; <nl> <nl> - class DiagnoseGenerically { <nl> + class Diagnoser { <nl> const Decl * D ; <nl> Reason reason ; <nl> public : <nl> - DiagnoseGenerically ( const Decl * D , Reason reason ) : D ( D ) , reason ( reason ) { } <nl> + Diagnoser ( const Decl * D , Reason reason ) : D ( D ) , reason ( reason ) { } <nl> <nl> - void operator ( ) ( const TypeDecl * offendingType , <nl> - const TypeRepr * complainRepr ) { <nl> + void diagnoseType ( const TypeDecl * offendingType , <nl> + const TypeRepr * complainRepr ) const { <nl> ModuleDecl * M = offendingType - > getModuleContext ( ) ; <nl> auto diag = D - > diagnose ( diag : : decl_from_implementation_only_module , <nl> offendingType - > getDescriptiveKind ( ) , <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> highlightOffendingType ( diag , complainRepr ) ; <nl> } <nl> <nl> - void operator ( ) ( const ProtocolConformance * offendingConformance ) { <nl> + void diagnoseConformance ( const ProtocolConformance * offendingConformance ) const { <nl> ModuleDecl * M = offendingConformance - > getDeclContext ( ) - > getParentModule ( ) ; <nl> D - > diagnose ( diag : : conformance_from_implementation_only_module , <nl> offendingConformance - > getType ( ) , <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> } <nl> } ; <nl> <nl> - static_assert ( <nl> - std : : is_convertible < DiagnoseGenerically , <nl> - CheckExportabilityTypeCallback > : : value , <nl> - " DiagnoseGenerically has wrong call signature " ) ; <nl> - static_assert ( <nl> - std : : is_convertible < DiagnoseGenerically , <nl> - CheckExportabilityConformanceCallback > : : value , <nl> - " DiagnoseGenerically has wrong call signature for conformance diags " ) ; <nl> - <nl> - DiagnoseGenerically getDiagnoseCallback ( const Decl * D , <nl> - Reason reason = Reason : : General ) { <nl> - return DiagnoseGenerically ( D , reason ) ; <nl> + Diagnoser getDiagnoser ( const Decl * D , Reason reason = Reason : : General ) { <nl> + return Diagnoser ( D , reason ) ; <nl> } <nl> <nl> public : <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> return ; <nl> <nl> checkType ( theVar - > getInterfaceType ( ) , / * typeRepr * / nullptr , theVar , <nl> - getDiagnoseCallback ( theVar ) , getDiagnoseCallback ( theVar ) ) ; <nl> + getDiagnoser ( theVar ) ) ; <nl> } <nl> <nl> / / / \ see visitPatternBindingDecl <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> if ( shouldSkipChecking ( anyVar ) ) <nl> return ; <nl> <nl> - checkType ( TP - > getTypeLoc ( ) , anyVar , getDiagnoseCallback ( anyVar ) , <nl> - getDiagnoseCallback ( anyVar ) ) ; <nl> + checkType ( TP - > getTypeLoc ( ) , anyVar , getDiagnoser ( anyVar ) ) ; <nl> } <nl> <nl> void visitPatternBindingDecl ( PatternBindingDecl * PBD ) { <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> void visitTypeAliasDecl ( TypeAliasDecl * TAD ) { <nl> checkGenericParams ( TAD , TAD ) ; <nl> checkType ( TAD - > getUnderlyingType ( ) , <nl> - TAD - > getUnderlyingTypeRepr ( ) , TAD , getDiagnoseCallback ( TAD ) , <nl> - getDiagnoseCallback ( TAD ) ) ; <nl> + TAD - > getUnderlyingTypeRepr ( ) , TAD , getDiagnoser ( TAD ) ) ; <nl> } <nl> <nl> void visitAssociatedTypeDecl ( AssociatedTypeDecl * assocType ) { <nl> llvm : : for_each ( assocType - > getInherited ( ) , <nl> [ & ] ( TypeLoc requirement ) { <nl> - checkType ( requirement , assocType , getDiagnoseCallback ( assocType ) , <nl> - getDiagnoseCallback ( assocType ) ) ; <nl> + checkType ( requirement , assocType , getDiagnoser ( assocType ) ) ; <nl> } ) ; <nl> checkType ( assocType - > getDefaultDefinitionType ( ) , <nl> assocType - > getDefaultDefinitionTypeRepr ( ) , assocType , <nl> - getDiagnoseCallback ( assocType ) , getDiagnoseCallback ( assocType ) ) ; <nl> + getDiagnoser ( assocType ) ) ; <nl> <nl> if ( assocType - > getTrailingWhereClause ( ) ) { <nl> forAllRequirementTypes ( assocType , <nl> [ & ] ( Type type , TypeRepr * typeRepr ) { <nl> - checkType ( type , typeRepr , assocType , getDiagnoseCallback ( assocType ) , <nl> - getDiagnoseCallback ( assocType ) ) ; <nl> + checkType ( type , typeRepr , assocType , getDiagnoser ( assocType ) ) ; <nl> } ) ; <nl> } <nl> } <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> <nl> llvm : : for_each ( nominal - > getInherited ( ) , <nl> [ & ] ( TypeLoc nextInherited ) { <nl> - checkType ( nextInherited , nominal , getDiagnoseCallback ( nominal ) , <nl> - getDiagnoseCallback ( nominal ) ) ; <nl> + checkType ( nextInherited , nominal , getDiagnoser ( nominal ) ) ; <nl> } ) ; <nl> } <nl> <nl> void visitProtocolDecl ( ProtocolDecl * proto ) { <nl> llvm : : for_each ( proto - > getInherited ( ) , <nl> [ & ] ( TypeLoc requirement ) { <nl> - checkType ( requirement , proto , getDiagnoseCallback ( proto ) , <nl> - getDiagnoseCallback ( proto ) ) ; <nl> + checkType ( requirement , proto , getDiagnoser ( proto ) ) ; <nl> } ) ; <nl> <nl> if ( proto - > getTrailingWhereClause ( ) ) { <nl> forAllRequirementTypes ( proto , [ & ] ( Type type , TypeRepr * typeRepr ) { <nl> - checkType ( type , typeRepr , proto , getDiagnoseCallback ( proto ) , <nl> - getDiagnoseCallback ( proto ) ) ; <nl> + checkType ( type , typeRepr , proto , getDiagnoser ( proto ) ) ; <nl> } ) ; <nl> } <nl> } <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> <nl> for ( auto & P : * SD - > getIndices ( ) ) { <nl> checkType ( P - > getInterfaceType ( ) , P - > getTypeRepr ( ) , SD , <nl> - getDiagnoseCallback ( SD ) , getDiagnoseCallback ( SD ) ) ; <nl> + getDiagnoser ( SD ) ) ; <nl> } <nl> - checkType ( SD - > getElementTypeLoc ( ) , SD , getDiagnoseCallback ( SD ) , <nl> - getDiagnoseCallback ( SD ) ) ; <nl> + checkType ( SD - > getElementTypeLoc ( ) , SD , getDiagnoser ( SD ) ) ; <nl> } <nl> <nl> void visitAbstractFunctionDecl ( AbstractFunctionDecl * fn ) { <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> <nl> for ( auto * P : * fn - > getParameters ( ) ) <nl> checkType ( P - > getInterfaceType ( ) , P - > getTypeRepr ( ) , fn , <nl> - getDiagnoseCallback ( fn ) , getDiagnoseCallback ( fn ) ) ; <nl> + getDiagnoser ( fn ) ) ; <nl> } <nl> <nl> void visitFuncDecl ( FuncDecl * FD ) { <nl> visitAbstractFunctionDecl ( FD ) ; <nl> - checkType ( FD - > getBodyResultTypeLoc ( ) , FD , getDiagnoseCallback ( FD ) , <nl> - getDiagnoseCallback ( FD ) ) ; <nl> + checkType ( FD - > getBodyResultTypeLoc ( ) , FD , getDiagnoser ( FD ) ) ; <nl> } <nl> <nl> void visitEnumElementDecl ( EnumElementDecl * EED ) { <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> return ; <nl> for ( auto & P : * EED - > getParameterList ( ) ) <nl> checkType ( P - > getInterfaceType ( ) , P - > getTypeRepr ( ) , EED , <nl> - getDiagnoseCallback ( EED ) , getDiagnoseCallback ( EED ) ) ; <nl> + getDiagnoser ( EED ) ) ; <nl> } <nl> <nl> void checkConstrainedExtensionRequirements ( ExtensionDecl * ED , <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> if ( ! ED - > getTrailingWhereClause ( ) ) <nl> return ; <nl> forAllRequirementTypes ( ED , [ & ] ( Type type , TypeRepr * typeRepr ) { <nl> - checkType ( type , typeRepr , ED , getDiagnoseCallback ( ED , reason ) , <nl> - getDiagnoseCallback ( ED , reason ) ) ; <nl> + checkType ( type , typeRepr , ED , getDiagnoser ( ED , reason ) ) ; <nl> } ) ; <nl> } <nl> <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> / / but just hide that from interfaces . <nl> llvm : : for_each ( ED - > getInherited ( ) , <nl> [ & ] ( TypeLoc nextInherited ) { <nl> - checkType ( nextInherited , ED , getDiagnoseCallback ( ED ) , <nl> - getDiagnoseCallback ( ED ) ) ; <nl> + checkType ( nextInherited , ED , getDiagnoser ( ED ) ) ; <nl> } ) ; <nl> <nl> bool hasPublicMembers = llvm : : any_of ( ED - > getMembers ( ) , <nl> class ExportabilityChecker : public DeclVisitor < ExportabilityChecker > { <nl> <nl> if ( hasPublicMembers ) { <nl> checkType ( ED - > getExtendedType ( ) , ED - > getExtendedTypeRepr ( ) , ED , <nl> - getDiagnoseCallback ( ED , Reason : : ExtensionWithPublicMembers ) , <nl> - getDiagnoseCallback ( ED , Reason : : ExtensionWithPublicMembers ) ) ; <nl> + getDiagnoser ( ED , Reason : : ExtensionWithPublicMembers ) ) ; <nl> } <nl> <nl> if ( hasPublicMembers | | ! ED - > getInherited ( ) . empty ( ) ) { <nl>
Remove some unnecessary generalization in exportability checking .
apple/swift
47cd59b0cc6bdb94638bf18628a864efa5c4c269
2020-02-01T01:43:10Z
deleted file mode 100644 <nl> index e69de29bb2 . . 0000000000 <nl> deleted file mode 100644 <nl> index e69de29bb2 . . 0000000000 <nl> deleted file mode 100644 <nl> index e69de29bb2 . . 0000000000 <nl>
move upgrade guides from troubleshooting to its own folder
EOSIO/eos
4f71e4cfe41fc97006ce205c889cd1fc0f3359dc
2019-12-23T14:11:54Z
mmm a / src / base / preferences . cpp <nl> ppp b / src / base / preferences . cpp <nl> void Preferences : : setRssHSplitterState ( const QByteArray & state ) <nl> QByteArray Preferences : : getRssVSplitterState ( ) const <nl> { <nl> # ifdef QBT_USES_QT5 <nl> - return value ( " Rss / qt5 / splitter_v " ) . toByteArray ( ) ; <nl> + return value ( " Rss / qt5 / splitterV " ) . toByteArray ( ) ; <nl> # else <nl> - return value ( " Rss / splitter_v " ) . toByteArray ( ) ; <nl> + return value ( " Rss / splitterV " ) . toByteArray ( ) ; <nl> # endif <nl> } <nl> <nl> void Preferences : : setRssVSplitterState ( const QByteArray & state ) <nl> { <nl> # ifdef QBT_USES_QT5 <nl> - setValue ( " Rss / qt5 / splitter_v " , state ) ; <nl> + setValue ( " Rss / qt5 / splitterV " , state ) ; <nl> # else <nl> - setValue ( " Rss / splitter_v " , state ) ; <nl> + setValue ( " Rss / splitterV " , state ) ; <nl> # endif <nl> } <nl> <nl>
Use new key for storing RSS splitter_v value .
qbittorrent/qBittorrent
6b835f53ce9ee4c4eded4e057289a0c9bace5e30
2016-11-10T16:35:40Z
mmm a / torch / nn / modules / rnn . py <nl> ppp b / torch / nn / modules / rnn . py <nl> def forward ( self , input , hx = None ) : <nl> <nl> if hx is None : <nl> num_directions = 2 if self . bidirectional else 1 <nl> - hx = input . new_zeros ( self . num_layers * num_directions , <nl> - max_batch_size , self . hidden_size , <nl> - requires_grad = False ) <nl> + hx = torch . zeros ( self . num_layers * num_directions , <nl> + max_batch_size , self . hidden_size , <nl> + dtype = input . dtype , device = input . device ) <nl> if self . mode = = ' LSTM ' : <nl> hx = ( hx , hx ) <nl> else : <nl>
Use torch . zeros for nn . LSTM
pytorch/pytorch
7bf7a4162df1c5318804138a26bc58cf2cae1cf5
2019-02-06T01:57:51Z
mmm a / hphp / runtime / ext / filter / ext_filter . cpp <nl> ppp b / hphp / runtime / ext / filter / ext_filter . cpp <nl> static bool filter_var ( Variant & ret , const Variant & variable , int64_t filter , <nl> <nl> static bool filter_recursive ( Variant & ret , const Variant & variable , int64_t filter , <nl> const Variant & options ) { <nl> - Array arr ; <nl> + Array arr = Array : : Create ( ) ; <nl> for ( ArrayIter iter ( variable . toArray ( ) ) ; iter ; + + iter ) { <nl> Variant v ; <nl> if ( iter . second ( ) . isArray ( ) ) { <nl> new file mode 100644 <nl> index 00000000000 . . f82e8a90d79 <nl> mmm / dev / null <nl> ppp b / hphp / test / slow / ext_filter / array . expect <nl> <nl> + array ( 1 ) { <nl> + [ 0 ] = > <nl> + array ( 0 ) { <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 0b89e1e4b01 <nl> mmm / dev / null <nl> ppp b / hphp / test / slow / ext_filter / array . php <nl> <nl> + < ? php <nl> + / / based on zend / ext / filter / 010 . php <nl> + <nl> + var_dump ( <nl> + filter_var ( array ( array ( ) ) , FILTER_VALIDATE_FLOAT , FILTER_REQUIRE_ARRAY ) <nl> + ) ; <nl>
Fix filter_var ( Array , FILTER_ * , FILTER_REQUIRE_ARRAY )
facebook/hhvm
c0b6b3b4dee6eb9fe7f14e62c1588a7899b60d34
2015-10-06T20:30:32Z
mmm a / dbms / programs / performance - test / PerformanceTestInfo . cpp <nl> ppp b / dbms / programs / performance - test / PerformanceTestInfo . cpp <nl> void PerformanceTestInfo : : applySettings ( XMLConfigurationPtr config ) <nl> } <nl> <nl> extractSettings ( config , " settings " , config_settings , settings_to_apply ) ; <nl> - settings . applyChanges ( settings_to_apply ) ; <nl> + settings . loadFromChanges ( settings_to_apply ) ; <nl> <nl> if ( settings_contain ( " average_rows_speed_precision " ) ) <nl> TestStats : : avg_rows_speed_precision = <nl> mmm a / dbms / programs / server / TCPHandler . cpp <nl> ppp b / dbms / programs / server / TCPHandler . cpp <nl> void TCPHandler : : runImpl ( ) <nl> / / / Should we send internal logs to client ? <nl> const auto client_logs_level = query_context - > getSettingsRef ( ) . send_logs_level ; <nl> if ( client_revision > = DBMS_MIN_REVISION_WITH_SERVER_LOGS <nl> - & & client_logs_level . value ! = LogsLevel : : none ) <nl> + & & client_logs_level ! = LogsLevel : : none ) <nl> { <nl> state . logs_queue = std : : make_shared < InternalTextLogsQueue > ( ) ; <nl> state . logs_queue - > max_priority = Poco : : Logger : : parseLevel ( client_logs_level . toString ( ) ) ; <nl> - CurrentThread : : attachInternalTextLogsQueue ( state . logs_queue , client_logs_level . value ) ; <nl> + CurrentThread : : attachInternalTextLogsQueue ( state . logs_queue , client_logs_level ) ; <nl> } <nl> <nl> query_context - > setExternalTablesInitializer ( [ & connection_settings , this ] ( Context & context ) <nl> void TCPHandler : : readData ( const Settings & connection_settings ) <nl> const auto receive_timeout = query_context - > getSettingsRef ( ) . receive_timeout . value ; <nl> <nl> / / / Poll interval should not be greater than receive_timeout <nl> - const size_t default_poll_interval = connection_settings . poll_interval . value * 1000000 ; <nl> + const size_t default_poll_interval = connection_settings . poll_interval * 1000000 ; <nl> size_t current_poll_interval = static_cast < size_t > ( receive_timeout . totalMicroseconds ( ) ) ; <nl> constexpr size_t min_poll_interval = 5000 ; / / 5 ms <nl> size_t poll_interval = std : : max ( min_poll_interval , std : : min ( default_poll_interval , current_poll_interval ) ) ; <nl> mmm a / dbms / src / Common / ErrorCodes . cpp <nl> ppp b / dbms / src / Common / ErrorCodes . cpp <nl> namespace ErrorCodes <nl> extern const int CANNOT_PTHREAD_ATTR = 468 ; <nl> extern const int VIOLATED_CONSTRAINT = 469 ; <nl> extern const int QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW = 470 ; <nl> + extern const int SETTINGS_ARE_NOT_SUPPORTED = 471 ; <nl> + extern const int IMMUTABLE_SETTING = 472 ; <nl> <nl> extern const int KEEPER_EXCEPTION = 999 ; <nl> extern const int POCO_EXCEPTION = 1000 ; <nl> mmm a / dbms / src / Core / Settings . h <nl> ppp b / dbms / src / Core / Settings . h <nl> struct Settings : public SettingsCollection < Settings > <nl> * but we are not going to do it , because settings is used everywhere as static struct fields . <nl> * / <nl> <nl> - # define LIST_OF_SETTINGS ( M ) \ <nl> + / / / M ( mutable ) for normal settings , IM ( immutable ) for not updateable settings . <nl> + # define LIST_OF_SETTINGS ( M , IM ) \ <nl> M ( SettingUInt64 , min_compress_block_size , 65536 , " The actual size of the block to compress , if the uncompressed data less than max_compress_block_size is no less than this value and no less than the volume of data for one mark . " ) \ <nl> M ( SettingUInt64 , max_compress_block_size , 1048576 , " The maximum size of blocks of uncompressed data before compressing for writing to a table . " ) \ <nl> M ( SettingUInt64 , max_block_size , DEFAULT_BLOCK_SIZE , " Maximum block size for reading " ) \ <nl> mmm a / dbms / src / Core / SettingsCommon . h <nl> ppp b / dbms / src / Core / SettingsCommon . h <nl> class Field ; <nl> class ReadBuffer ; <nl> class WriteBuffer ; <nl> <nl> + namespace ErrorCodes <nl> + { <nl> + extern const int IMMUTABLE_SETTING ; <nl> + } <nl> <nl> / * * One setting for any type . <nl> * Stores a value within itself , as well as a flag - whether the value was changed . <nl> class SettingsCollection <nl> Derived & castToDerived ( ) { return * static_cast < Derived * > ( this ) ; } <nl> const Derived & castToDerived ( ) const { return * static_cast < const Derived * > ( this ) ; } <nl> <nl> + using IsChangedFunction = bool ( * ) ( const Derived & ) ; <nl> using GetStringFunction = String ( * ) ( const Derived & ) ; <nl> using GetFieldFunction = Field ( * ) ( const Derived & ) ; <nl> using SetStringFunction = void ( * ) ( Derived & , const String & ) ; <nl> class SettingsCollection <nl> <nl> struct MemberInfo <nl> { <nl> - size_t offset_of_changed ; <nl> + IsChangedFunction is_changed ; <nl> StringRef name ; <nl> StringRef description ; <nl> + / / / Can be updated after first load for config / definition . <nl> + / / / Non updatable settings can be ` changed ` , <nl> + / / / if they were overwritten in config / definition . <nl> + const bool updateable ; <nl> GetStringFunction get_string ; <nl> GetFieldFunction get_field ; <nl> SetStringFunction set_string ; <nl> class SettingsCollection <nl> DeserializeFunction deserialize ; <nl> CastValueWithoutApplyingFunction cast_value_without_applying ; <nl> <nl> - bool isChanged ( const Derived & collection ) const { return * reinterpret_cast < const bool * > ( reinterpret_cast < const UInt8 * > ( & collection ) + offset_of_changed ) ; } <nl> + bool isChanged ( const Derived & collection ) const { return is_changed ( collection ) ; } <nl> } ; <nl> <nl> class MemberInfos <nl> class SettingsCollection <nl> const_reference ( const const_reference & src ) = default ; <nl> const StringRef & getName ( ) const { return member - > name ; } <nl> const StringRef & getDescription ( ) const { return member - > description ; } <nl> + bool isUpdateable ( ) const { return member - > updateable ; } <nl> bool isChanged ( ) const { return member - > isChanged ( * collection ) ; } <nl> Field getValue ( ) const { return member - > get_field ( * collection ) ; } <nl> String getValueAsString ( ) const { return member - > get_string ( * collection ) ; } <nl> class SettingsCollection <nl> reference ( const const_reference & src ) : const_reference ( src ) { } <nl> void setValue ( const Field & value ) { this - > member - > set_field ( * const_cast < Derived * > ( this - > collection ) , value ) ; } <nl> void setValue ( const String & value ) { this - > member - > set_string ( * const_cast < Derived * > ( this - > collection ) , value ) ; } <nl> + void updateValue ( const Field & value ) <nl> + { <nl> + if ( ! this - > member - > updateable ) <nl> + throw Exception ( " Setting ' " + this - > member - > name . toString ( ) + " ' is restricted for updates . " , ErrorCodes : : IMMUTABLE_SETTING ) ; <nl> + setValue ( value ) ; <nl> + } <nl> + void updateValue ( const String & value ) <nl> + { <nl> + if ( ! this - > member - > updateable ) <nl> + throw Exception ( " Setting ' " + this - > member - > name . toString ( ) + " ' is restricted for updates . " , ErrorCodes : : IMMUTABLE_SETTING ) ; <nl> + setValue ( value ) ; <nl> + } <nl> } ; <nl> <nl> / / / Iterator to iterating through all the settings . <nl> class SettingsCollection <nl> void set ( size_t index , const String & value ) { ( * this ) [ index ] . setValue ( value ) ; } <nl> void set ( const String & name , const String & value ) { ( * this ) [ name ] . setValue ( value ) ; } <nl> <nl> + / / / Updates setting ' s value . Checks it ' mutability . <nl> + void update ( size_t index , const Field & value ) { ( * this ) [ index ] . updateValue ( value ) ; } <nl> + <nl> + void update ( const String & name , const Field & value ) { ( * this ) [ name ] . updateValue ( value ) ; } <nl> + <nl> + void update ( size_t index , const String & value ) { ( * this ) [ index ] . updateValue ( value ) ; } <nl> + <nl> + void update ( const String & name , const String & value ) { ( * this ) [ name ] . updateValue ( value ) ; } <nl> + <nl> / / / Returns value of a setting . <nl> Field get ( size_t index ) const { return ( * this ) [ index ] . getValue ( ) ; } <nl> Field get ( const String & name ) const { return ( * this ) [ name ] . getValue ( ) ; } <nl> class SettingsCollection <nl> return found_changes ; <nl> } <nl> <nl> - / / / Applies changes to the settings . <nl> - void applyChange ( const SettingChange & change ) <nl> + / / / Applies change to the settings . Doesn ' t check settings mutability . <nl> + void loadFromChange ( const SettingChange & change ) <nl> { <nl> set ( change . name , change . value ) ; <nl> } <nl> <nl> - void applyChanges ( const SettingsChanges & changes ) <nl> + / / / Applies changes to the settings . Should be used in initial settings loading . <nl> + / / / ( on table creation or loading from config ) <nl> + void loadFromChanges ( const SettingsChanges & changes ) <nl> { <nl> for ( const SettingChange & change : changes ) <nl> - applyChange ( change ) ; <nl> + loadFromChange ( change ) ; <nl> } <nl> <nl> + / / / Applies change to the settings , checks settings mutability . <nl> + void updateFromChange ( const SettingChange & change ) <nl> + { <nl> + update ( change . name , change . value ) ; <nl> + } <nl> + <nl> + / / / Applies changes to the settings . Should be used for settigns update . <nl> + / / / ( ALTER MODIFY SETTINGS ) <nl> + void updateFromChanges ( const SettingsChanges & changes ) <nl> + { <nl> + for ( const SettingChange & change : changes ) <nl> + updateFromChange ( change ) ; <nl> + } <nl> + <nl> + <nl> void copyChangesFrom ( const Derived & src ) <nl> { <nl> for ( const auto & member : members ( ) ) <nl> class SettingsCollection <nl> } ; <nl> <nl> # define DECLARE_SETTINGS_COLLECTION ( LIST_OF_SETTINGS_MACRO ) \ <nl> - LIST_OF_SETTINGS_MACRO ( DECLARE_SETTINGS_COLLECTION_DECLARE_VARIABLES_HELPER_ ) <nl> + LIST_OF_SETTINGS_MACRO ( DECLARE_SETTINGS_COLLECTION_DECLARE_VARIABLES_HELPER_ , DECLARE_SETTINGS_COLLECTION_DECLARE_VARIABLES_HELPER_ ) <nl> <nl> <nl> # define IMPLEMENT_SETTINGS_COLLECTION ( DERIVED_CLASS_NAME , LIST_OF_SETTINGS_MACRO ) \ <nl> class SettingsCollection <nl> using Derived = DERIVED_CLASS_NAME ; \ <nl> struct Functions \ <nl> { \ <nl> - LIST_OF_SETTINGS_MACRO ( IMPLEMENT_SETTINGS_COLLECTION_DEFINE_FUNCTIONS_HELPER_ ) \ <nl> + LIST_OF_SETTINGS_MACRO ( IMPLEMENT_SETTINGS_COLLECTION_DEFINE_FUNCTIONS_HELPER_ , IMPLEMENT_SETTINGS_COLLECTION_DEFINE_FUNCTIONS_HELPER_ ) \ <nl> } ; \ <nl> - LIST_OF_SETTINGS_MACRO ( IMPLEMENT_SETTINGS_COLLECTION_ADD_MEMBER_INFO_HELPER_ ) \ <nl> + LIST_OF_SETTINGS_MACRO ( IMPLEMENT_SETTINGS_COLLECTION_ADD_MUTABLE_MEMBER_INFO_HELPER_ , IMPLEMENT_SETTINGS_COLLECTION_ADD_IMMUTABLE_MEMBER_INFO_HELPER_ ) \ <nl> } <nl> <nl> <nl> class SettingsCollection <nl> static Field NAME # # _castValueWithoutApplying ( const Field & value ) { TYPE temp { DEFAULT } ; temp . set ( value ) ; return temp . toField ( ) ; } <nl> <nl> <nl> - # define IMPLEMENT_SETTINGS_COLLECTION_ADD_MEMBER_INFO_HELPER_ ( TYPE , NAME , DEFAULT , DESCRIPTION ) \ <nl> - static_assert ( std : : is_same_v < decltype ( std : : declval < Derived > ( ) . NAME . changed ) , bool > ) ; \ <nl> - add ( { offsetof ( Derived , NAME . changed ) , \ <nl> - StringRef ( # NAME , strlen ( # NAME ) ) , StringRef ( # DESCRIPTION , strlen ( # DESCRIPTION ) ) , \ <nl> + # define IMPLEMENT_SETTINGS_COLLECTION_ADD_MUTABLE_MEMBER_INFO_HELPER_ ( TYPE , NAME , DEFAULT , DESCRIPTION ) \ <nl> + add ( { [ ] ( const Derived & d ) { return d . NAME . changed ; } , \ <nl> + StringRef ( # NAME , strlen ( # NAME ) ) , StringRef ( # DESCRIPTION , strlen ( # DESCRIPTION ) ) , true , \ <nl> & Functions : : NAME # # _getString , & Functions : : NAME # # _getField , \ <nl> & Functions : : NAME # # _setString , & Functions : : NAME # # _setField , \ <nl> & Functions : : NAME # # _serialize , & Functions : : NAME # # _deserialize , \ <nl> & Functions : : NAME # # _castValueWithoutApplying } ) ; <nl> <nl> + # define IMPLEMENT_SETTINGS_COLLECTION_ADD_IMMUTABLE_MEMBER_INFO_HELPER_ ( TYPE , NAME , DEFAULT , DESCRIPTION ) \ <nl> + add ( { [ ] ( const Derived & d ) { return d . NAME . changed ; } , \ <nl> + StringRef ( # NAME , strlen ( # NAME ) ) , StringRef ( # DESCRIPTION , strlen ( # DESCRIPTION ) ) , false , \ <nl> + & Functions : : NAME # # _getString , & Functions : : NAME # # _getField , \ <nl> + & Functions : : NAME # # _setString , & Functions : : NAME # # _setField , \ <nl> + & Functions : : NAME # # _serialize , & Functions : : NAME # # _deserialize , \ <nl> + & Functions : : NAME # # _castValueWithoutApplying } ) ; <nl> } <nl> mmm a / dbms / src / Interpreters / ClusterProxy / executeQuery . cpp <nl> ppp b / dbms / src / Interpreters / ClusterProxy / executeQuery . cpp <nl> Context removeUserRestrictionsFromSettings ( const Context & context , const Settin <nl> / / / Set as unchanged to avoid sending to remote server . <nl> new_settings . max_concurrent_queries_for_user . changed = false ; <nl> new_settings . max_memory_usage_for_user . changed = false ; <nl> - new_settings . max_memory_usage_for_all_queries . changed = false ; <nl> + new_settings . max_memory_usage_for_all_queries = false ; <nl> <nl> Context new_context ( context ) ; <nl> new_context . setSettings ( new_settings ) ; <nl> mmm a / dbms / src / Interpreters / Context . cpp <nl> ppp b / dbms / src / Interpreters / Context . cpp <nl> struct ContextShared <nl> std : : unique_ptr < DDLWorker > ddl_worker ; / / / Process ddl commands from zk . <nl> / / / Rules for selecting the compression settings , depending on the size of the part . <nl> mutable std : : unique_ptr < CompressionCodecSelector > compression_codec_selector ; <nl> - std : : optional < MergeTreeSettings > merge_tree_settings ; / / / Settings of MergeTree * engines . <nl> + MergeTreeSettingsPtr merge_tree_settings ; / / / Settings of MergeTree * engines . <nl> size_t max_table_size_to_drop = 50000000000lu ; / / / Protects MergeTree tables from accidental DROP ( 50GB by default ) <nl> size_t max_partition_size_to_drop = 50000000000lu ; / / / Protects MergeTree partitions from accidental DROP ( 50GB by default ) <nl> String format_schema_path ; / / / Path to a directory that contains schema files used by input formats . <nl> void Context : : applySettingsChanges ( const SettingsChanges & changes ) <nl> applySettingChange ( change ) ; <nl> } <nl> <nl> + void Context : : updateSettingsChanges ( const SettingsChanges & changes ) <nl> + { <nl> + auto lock = getLock ( ) ; <nl> + for ( const SettingChange & change : changes ) <nl> + { <nl> + if ( change . name = = " profile " ) <nl> + setProfile ( change . value . safeGet < String > ( ) ) ; <nl> + else <nl> + settings . updateFromChange ( change ) ; <nl> + } <nl> + } <nl> <nl> void Context : : checkSettingsConstraints ( const SettingChange & change ) <nl> { <nl> const MergeTreeSettings & Context : : getMergeTreeSettings ( ) const <nl> if ( ! shared - > merge_tree_settings ) <nl> { <nl> auto & config = getConfigRef ( ) ; <nl> - shared - > merge_tree_settings . emplace ( ) ; <nl> - shared - > merge_tree_settings - > loadFromConfig ( " merge_tree " , config ) ; <nl> + MutableMergeTreeSettingsPtr settings_ptr = MergeTreeSettings : : create ( ) ; <nl> + settings_ptr - > loadFromConfig ( " merge_tree " , config ) ; <nl> + shared - > merge_tree_settings = std : : move ( settings_ptr ) ; <nl> } <nl> <nl> return * shared - > merge_tree_settings ; <nl> mmm a / dbms / src / Interpreters / Context . h <nl> ppp b / dbms / src / Interpreters / Context . h <nl> class Context <nl> void applySettingChange ( const SettingChange & change ) ; <nl> void applySettingsChanges ( const SettingsChanges & changes ) ; <nl> <nl> + / / / Update checking that each setting is updatable <nl> + void updateSettingsChanges ( const SettingsChanges & changes ) ; <nl> + <nl> / / / Checks the constraints . <nl> void checkSettingsConstraints ( const SettingChange & change ) ; <nl> void checkSettingsConstraints ( const SettingsChanges & changes ) ; <nl> mmm a / dbms / src / Interpreters / InterpreterSetQuery . cpp <nl> ppp b / dbms / src / Interpreters / InterpreterSetQuery . cpp <nl> BlockIO InterpreterSetQuery : : execute ( ) <nl> { <nl> const auto & ast = query_ptr - > as < ASTSetQuery & > ( ) ; <nl> context . checkSettingsConstraints ( ast . changes ) ; <nl> - context . getSessionContext ( ) . applySettingsChanges ( ast . changes ) ; <nl> + context . getSessionContext ( ) . updateSettingsChanges ( ast . changes ) ; <nl> return { } ; <nl> } <nl> <nl> void InterpreterSetQuery : : executeForCurrentContext ( ) <nl> { <nl> const auto & ast = query_ptr - > as < ASTSetQuery & > ( ) ; <nl> context . checkSettingsConstraints ( ast . changes ) ; <nl> - context . applySettingsChanges ( ast . changes ) ; <nl> + context . updateSettingsChanges ( ast . changes ) ; <nl> } <nl> <nl> } <nl> mmm a / dbms / src / Interpreters / LogicalExpressionsOptimizer . cpp <nl> ppp b / dbms / src / Interpreters / LogicalExpressionsOptimizer . cpp <nl> bool LogicalExpressionsOptimizer : : OrWithExpression : : operator < ( const OrWithExpres <nl> return std : : tie ( this - > or_function , this - > expression ) < std : : tie ( rhs . or_function , rhs . expression ) ; <nl> } <nl> <nl> - LogicalExpressionsOptimizer : : LogicalExpressionsOptimizer ( ASTSelectQuery * select_query_ , ExtractedSettings & & settings_ ) <nl> - : select_query ( select_query_ ) , settings ( settings_ ) <nl> + LogicalExpressionsOptimizer : : LogicalExpressionsOptimizer ( ASTSelectQuery * select_query_ , UInt64 optimize_min_equality_disjunction_chain_length ) <nl> + : select_query ( select_query_ ) , settings ( optimize_min_equality_disjunction_chain_length ) <nl> { <nl> } <nl> <nl> mmm a / dbms / src / Interpreters / LogicalExpressionsOptimizer . h <nl> ppp b / dbms / src / Interpreters / LogicalExpressionsOptimizer . h <nl> class LogicalExpressionsOptimizer final <nl> <nl> public : <nl> / / / Constructor . Accepts the root of the query DAG . <nl> - LogicalExpressionsOptimizer ( ASTSelectQuery * select_query_ , ExtractedSettings & & settings_ ) ; <nl> + LogicalExpressionsOptimizer ( ASTSelectQuery * select_query_ , UInt64 optimize_min_equality_disjunction_chain_length ) ; <nl> <nl> / * * Replace all rather long homogeneous OR - chains expr = x1 OR . . . OR expr = xN <nl> * on the expressions ` expr ` IN ( x1 , . . . , xN ) . <nl> mmm a / dbms / src / Interpreters / ThreadStatusExt . cpp <nl> ppp b / dbms / src / Interpreters / ThreadStatusExt . cpp <nl> void ThreadStatus : : logToQueryThreadLog ( QueryThreadLog & thread_log ) <nl> { <nl> elem . client_info = query_context - > getClientInfo ( ) ; <nl> <nl> - if ( query_context - > getSettingsRef ( ) . log_profile_events . value ! = 0 ) <nl> + if ( query_context - > getSettingsRef ( ) . log_profile_events ! = 0 ) <nl> { <nl> / / / NOTE : Here we are in the same thread , so we can make memcpy ( ) <nl> elem . profile_counters = std : : make_shared < ProfileEvents : : Counters > ( performance_counters . getPartiallyAtomicSnapshot ( ) ) ; <nl> mmm a / dbms / src / Parsers / ASTAlterQuery . cpp <nl> ppp b / dbms / src / Parsers / ASTAlterQuery . cpp <nl> ASTPtr ASTAlterCommand : : clone ( ) const <nl> res - > ttl = ttl - > clone ( ) ; <nl> res - > children . push_back ( res - > ttl ) ; <nl> } <nl> + if ( settings_changes ) <nl> + { <nl> + res - > settings_changes = settings_changes - > clone ( ) ; <nl> + res - > children . push_back ( res - > settings_changes ) ; <nl> + } <nl> if ( values ) <nl> { <nl> res - > values = values - > clone ( ) ; <nl> void ASTAlterCommand : : formatImpl ( <nl> settings . ostr < < ( settings . hilite ? hilite_keyword : " " ) < < indent_str < < " MODIFY TTL " < < ( settings . hilite ? hilite_none : " " ) ; <nl> ttl - > formatImpl ( settings , state , frame ) ; <nl> } <nl> + else if ( type = = ASTAlterCommand : : MODIFY_SETTING ) <nl> + { <nl> + settings . ostr < < ( settings . hilite ? hilite_keyword : " " ) < < indent_str < < " MODIFY SETTING " < < ( settings . hilite ? hilite_none : " " ) ; <nl> + settings_changes - > formatImpl ( settings , state , frame ) ; <nl> + } <nl> else if ( type = = ASTAlterCommand : : LIVE_VIEW_REFRESH ) <nl> { <nl> settings . ostr < < ( settings . hilite ? hilite_keyword : " " ) < < indent_str < < " REFRESH " < < ( settings . hilite ? hilite_none : " " ) ; <nl> mmm a / dbms / src / Parsers / ASTAlterQuery . h <nl> ppp b / dbms / src / Parsers / ASTAlterQuery . h <nl> class ASTAlterCommand : public IAST <nl> COMMENT_COLUMN , <nl> MODIFY_ORDER_BY , <nl> MODIFY_TTL , <nl> + MODIFY_SETTING , <nl> <nl> ADD_INDEX , <nl> DROP_INDEX , <nl> class ASTAlterCommand : public IAST <nl> / / / For MODIFY TTL query <nl> ASTPtr ttl ; <nl> <nl> + / / / FOR MODIFY_SETTING <nl> + ASTPtr settings_changes ; <nl> + <nl> / * * In ALTER CHANNEL , ADD , DROP , SUSPEND , RESUME , REFRESH , MODIFY queries , the list of live views is stored here <nl> * / <nl> ASTPtr values ; <nl> mmm a / dbms / src / Parsers / ParserAlterQuery . cpp <nl> ppp b / dbms / src / Parsers / ParserAlterQuery . cpp <nl> <nl> # include < Parsers / ExpressionListParsers . h > <nl> # include < Parsers / ParserCreateQuery . h > <nl> # include < Parsers / ParserPartition . h > <nl> + # include < Parsers / ParserSetQuery . h > <nl> # include < Parsers / ASTIdentifier . h > <nl> # include < Parsers / ASTIndexDeclaration . h > <nl> # include < Parsers / ASTAlterQuery . h > <nl> bool ParserAlterCommand : : parseImpl ( Pos & pos , ASTPtr & node , Expected & expected <nl> ParserKeyword s_comment_column ( " COMMENT COLUMN " ) ; <nl> ParserKeyword s_modify_order_by ( " MODIFY ORDER BY " ) ; <nl> ParserKeyword s_modify_ttl ( " MODIFY TTL " ) ; <nl> + ParserKeyword s_modify_setting ( " MODIFY SETTING " ) ; <nl> <nl> ParserKeyword s_add_index ( " ADD INDEX " ) ; <nl> ParserKeyword s_drop_index ( " DROP INDEX " ) ; <nl> bool ParserAlterCommand : : parseImpl ( Pos & pos , ASTPtr & node , Expected & expected <nl> ParserList parser_assignment_list ( <nl> std : : make_unique < ParserAssignment > ( ) , std : : make_unique < ParserToken > ( TokenType : : Comma ) , <nl> / * allow_empty = * / false ) ; <nl> + ParserSetQuery parser_settings ( true ) ; <nl> ParserNameList values_p ; <nl> <nl> if ( is_live_view ) <nl> bool ParserAlterCommand : : parseImpl ( Pos & pos , ASTPtr & node , Expected & expected <nl> return false ; <nl> command - > type = ASTAlterCommand : : MODIFY_TTL ; <nl> } <nl> + else if ( s_modify_setting . ignore ( pos , expected ) ) <nl> + { <nl> + if ( ! parser_settings . parse ( pos , command - > settings_changes , expected ) ) <nl> + return false ; <nl> + command - > type = ASTAlterCommand : : MODIFY_SETTING ; <nl> + } <nl> else <nl> return false ; <nl> + <nl> } <nl> <nl> if ( command - > col_decl ) <nl> bool ParserAlterCommand : : parseImpl ( Pos & pos , ASTPtr & node , Expected & expected <nl> command - > children . push_back ( command - > comment ) ; <nl> if ( command - > ttl ) <nl> command - > children . push_back ( command - > ttl ) ; <nl> + if ( command - > settings_changes ) <nl> + command - > children . push_back ( command - > settings_changes ) ; <nl> <nl> return true ; <nl> } <nl> mmm a / dbms / src / Parsers / ParserAlterQuery . h <nl> ppp b / dbms / src / Parsers / ParserAlterQuery . h <nl> namespace DB <nl> * [ CLEAR COLUMN [ IF EXISTS ] col_to_clear [ IN PARTITION partition ] , ] <nl> * [ MODIFY COLUMN [ IF EXISTS ] col_to_modify type , . . . ] <nl> * [ MODIFY PRIMARY KEY ( a , b , c . . . ) ] <nl> + * [ MODIFY SETTING setting_name = setting_value , . . . ] <nl> * [ COMMENT COLUMN [ IF EXISTS ] col_name string ] <nl> * [ DROP | DETACH | ATTACH PARTITION | PART partition , . . . ] <nl> * [ FETCH PARTITION partition FROM . . . ] <nl> mmm a / dbms / src / Storages / AlterCommands . cpp <nl> ppp b / dbms / src / Storages / AlterCommands . cpp <nl> <nl> # include < Parsers / ASTFunction . h > <nl> # include < Parsers / ASTAlterQuery . h > <nl> # include < Parsers / ASTColumnDeclaration . h > <nl> + # include < Parsers / ASTSetQuery . h > <nl> # include < Common / typeid_cast . h > <nl> # include < Compression / CompressionFactory . h > <nl> <nl> namespace ErrorCodes <nl> extern const int ILLEGAL_COLUMN ; <nl> extern const int BAD_ARGUMENTS ; <nl> extern const int LOGICAL_ERROR ; <nl> + extern const int UNKNOWN_SETTING ; <nl> } <nl> <nl> <nl> std : : optional < AlterCommand > AlterCommand : : parse ( const ASTAlterCommand * command_ <nl> command . ttl = command_ast - > ttl ; <nl> return command ; <nl> } <nl> + else if ( command_ast - > type = = ASTAlterCommand : : MODIFY_SETTING ) <nl> + { <nl> + AlterCommand command ; <nl> + command . type = AlterCommand : : MODIFY_SETTING ; <nl> + command . settings_changes = command_ast - > settings_changes - > as < ASTSetQuery & > ( ) . changes ; <nl> + return command ; <nl> + } <nl> else <nl> return { } ; <nl> } <nl> <nl> <nl> void AlterCommand : : apply ( ColumnsDescription & columns_description , IndicesDescription & indices_description , <nl> - ConstraintsDescription & constraints_description , <nl> - ASTPtr & order_by_ast , ASTPtr & primary_key_ast , ASTPtr & ttl_table_ast ) const <nl> + ConstraintsDescription & constraints_description , ASTPtr & order_by_ast , ASTPtr & primary_key_ast , <nl> + ASTPtr & ttl_table_ast , SettingsChanges & changes ) const <nl> { <nl> if ( type = = ADD_COLUMN ) <nl> { <nl> void AlterCommand : : apply ( ColumnsDescription & columns_description , IndicesDescri <nl> { <nl> ttl_table_ast = ttl ; <nl> } <nl> + else if ( type = = MODIFY_SETTING ) <nl> + { <nl> + changes . insert ( changes . end ( ) , settings_changes . begin ( ) , settings_changes . end ( ) ) ; <nl> + } <nl> else <nl> throw Exception ( " Wrong parameter type in ALTER query " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> } <nl> <nl> bool AlterCommand : : isMutable ( ) const <nl> { <nl> - if ( type = = COMMENT_COLUMN ) <nl> + if ( type = = COMMENT_COLUMN | | type = = MODIFY_SETTING ) <nl> return false ; <nl> if ( type = = MODIFY_COLUMN ) <nl> return data_type . get ( ) | | default_expression ; <nl> - / / TODO : возможно , здесь нужно дополнить <nl> return true ; <nl> } <nl> <nl> + bool AlterCommand : : isSettingsAlter ( ) const <nl> + { <nl> + return type = = MODIFY_SETTING ; <nl> + } <nl> + <nl> void AlterCommands : : apply ( ColumnsDescription & columns_description , IndicesDescription & indices_description , <nl> - ConstraintsDescription & constraints_description , <nl> - ASTPtr & order_by_ast , ASTPtr & primary_key_ast , ASTPtr & ttl_table_ast ) const <nl> + ConstraintsDescription & constraints_description , ASTPtr & order_by_ast , ASTPtr & primary_key_ast , <nl> + ASTPtr & ttl_table_ast , SettingsChanges & changes ) const <nl> { <nl> auto new_columns_description = columns_description ; <nl> auto new_indices_description = indices_description ; <nl> void AlterCommands : : apply ( ColumnsDescription & columns_description , IndicesDescr <nl> auto new_order_by_ast = order_by_ast ; <nl> auto new_primary_key_ast = primary_key_ast ; <nl> auto new_ttl_table_ast = ttl_table_ast ; <nl> + auto new_changes = changes ; <nl> <nl> for ( const AlterCommand & command : * this ) <nl> if ( ! command . ignore ) <nl> - command . apply ( new_columns_description , new_indices_description , new_constraints_description , new_order_by_ast , new_primary_key_ast , new_ttl_table_ast ) ; <nl> + command . apply ( new_columns_description , new_indices_description , new_constraints_description , new_order_by_ast , new_primary_key_ast , new_ttl_table_ast , new_changes ) ; <nl> <nl> columns_description = std : : move ( new_columns_description ) ; <nl> indices_description = std : : move ( new_indices_description ) ; <nl> void AlterCommands : : apply ( ColumnsDescription & columns_description , IndicesDescr <nl> order_by_ast = std : : move ( new_order_by_ast ) ; <nl> primary_key_ast = std : : move ( new_primary_key_ast ) ; <nl> ttl_table_ast = std : : move ( new_ttl_table_ast ) ; <nl> + changes = std : : move ( new_changes ) ; <nl> } <nl> <nl> void AlterCommands : : validate ( const IStorage & table , const Context & context ) <nl> void AlterCommands : : validate ( const IStorage & table , const Context & context ) <nl> throw Exception { " Wrong column name . Cannot find column " + command . column_name + " to comment " , ErrorCodes : : ILLEGAL_COLUMN } ; <nl> } <nl> } <nl> + else if ( command . type = = AlterCommand : : MODIFY_SETTING ) <nl> + { <nl> + for ( const auto & change : command . settings_changes ) <nl> + { <nl> + if ( ! table . hasSetting ( change . name ) ) <nl> + { <nl> + throw Exception { " Storage ' " + table . getName ( ) + " ' doesn ' t have setting ' " + change . name + " ' " , ErrorCodes : : UNKNOWN_SETTING } ; <nl> + } <nl> + } <nl> + } <nl> } <nl> <nl> / * * Existing defaulted columns may require default expression extensions with a type conversion , <nl> void AlterCommands : : validate ( const IStorage & table , const Context & context ) <nl> } <nl> } <nl> <nl> - void AlterCommands : : apply ( ColumnsDescription & columns_description ) const <nl> + void AlterCommands : : applyForColumnsOnly ( ColumnsDescription & columns_description ) const <nl> { <nl> auto out_columns_description = columns_description ; <nl> IndicesDescription indices_description ; <nl> void AlterCommands : : apply ( ColumnsDescription & columns_description ) const <nl> ASTPtr out_order_by ; <nl> ASTPtr out_primary_key ; <nl> ASTPtr out_ttl_table ; <nl> - apply ( out_columns_description , indices_description , constraints_description , out_order_by , out_primary_key , out_ttl_table ) ; <nl> + SettingsChanges out_changes ; <nl> + apply ( out_columns_description , indices_description , constraints_description , <nl> + out_order_by , out_primary_key , out_ttl_table , out_changes ) ; <nl> <nl> if ( out_order_by ) <nl> throw Exception ( " Storage doesn ' t support modifying ORDER BY expression " , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> void AlterCommands : : apply ( ColumnsDescription & columns_description ) const <nl> throw Exception ( " Storage doesn ' t support modifying constraints " , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> if ( out_ttl_table ) <nl> throw Exception ( " Storage doesn ' t support modifying TTL expression " , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> + if ( ! out_changes . empty ( ) ) <nl> + throw Exception ( " Storage doesn ' t support modifying settings " , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> + <nl> <nl> columns_description = std : : move ( out_columns_description ) ; <nl> } <nl> <nl> + <nl> + void AlterCommands : : applyForSettingsOnly ( SettingsChanges & changes ) const <nl> + { <nl> + ColumnsDescription out_columns_description ; <nl> + IndicesDescription indices_description ; <nl> + ConstraintsDescription constraints_description ; <nl> + ASTPtr out_order_by ; <nl> + ASTPtr out_primary_key ; <nl> + ASTPtr out_ttl_table ; <nl> + SettingsChanges out_changes ; <nl> + apply ( out_columns_description , indices_description , constraints_description , out_order_by , <nl> + out_primary_key , out_ttl_table , out_changes ) ; <nl> + <nl> + if ( out_columns_description . begin ( ) ! = out_columns_description . end ( ) ) <nl> + throw Exception ( " Alter modifying columns , but only settings change applied . " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + if ( out_order_by ) <nl> + throw Exception ( " Alter modifying ORDER BY expression , but only settings change applied . " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + if ( out_primary_key ) <nl> + throw Exception ( " Alter modifying PRIMARY KEY expression , but only settings change applied . " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + if ( ! indices_description . indices . empty ( ) ) <nl> + throw Exception ( " Alter modifying indices , but only settings change applied . " , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> + if ( out_ttl_table ) <nl> + throw Exception ( " Alter modifying TTL , but only settings change applied . " , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> + <nl> + changes = std : : move ( out_changes ) ; <nl> + } <nl> + <nl> bool AlterCommands : : isMutable ( ) const <nl> { <nl> for ( const auto & param : * this ) <nl> bool AlterCommands : : isMutable ( ) const <nl> return false ; <nl> } <nl> <nl> + bool AlterCommands : : isSettingsAlter ( ) const <nl> + { <nl> + return std : : all_of ( begin ( ) , end ( ) , [ ] ( const AlterCommand & c ) { return c . isSettingsAlter ( ) ; } ) ; <nl> + } <nl> } <nl> mmm a / dbms / src / Storages / AlterCommands . h <nl> ppp b / dbms / src / Storages / AlterCommands . h <nl> <nl> # include < Storages / IndicesDescription . h > <nl> # include < Storages / ConstraintsDescription . h > <nl> <nl> + # include < Common / SettingsChanges . h > <nl> + <nl> <nl> namespace DB <nl> { <nl> struct AlterCommand <nl> DROP_CONSTRAINT , <nl> MODIFY_TTL , <nl> UKNOWN_TYPE , <nl> + MODIFY_SETTING , <nl> } ; <nl> <nl> Type type = UKNOWN_TYPE ; <nl> struct AlterCommand <nl> / / / For ADD and MODIFY <nl> CompressionCodecPtr codec ; <nl> <nl> + / / / For MODIFY SETTING <nl> + SettingsChanges settings_changes ; <nl> + <nl> AlterCommand ( ) = default ; <nl> AlterCommand ( const Type type_ , const String & column_name_ , const DataTypePtr & data_type_ , <nl> const ColumnDefaultKind default_kind_ , const ASTPtr & default_expression_ , <nl> struct AlterCommand <nl> static std : : optional < AlterCommand > parse ( const ASTAlterCommand * command ) ; <nl> <nl> void apply ( ColumnsDescription & columns_description , IndicesDescription & indices_description , <nl> - ConstraintsDescription & constraints_description , <nl> - ASTPtr & order_by_ast , ASTPtr & primary_key_ast , ASTPtr & ttl_table_ast ) const ; <nl> + ConstraintsDescription & constraints_description , ASTPtr & order_by_ast , <nl> + ASTPtr & primary_key_ast , ASTPtr & ttl_table_ast , SettingsChanges & changes ) const ; <nl> <nl> / / / Checks that not only metadata touched by that command <nl> bool isMutable ( ) const ; <nl> + <nl> + / / / checks that only settings changed by alter <nl> + bool isSettingsAlter ( ) const ; <nl> } ; <nl> <nl> class Context ; <nl> class Context ; <nl> class AlterCommands : public std : : vector < AlterCommand > <nl> { <nl> public : <nl> + / / / Used for primitive table engines , where only columns metadata can be changed <nl> + void applyForColumnsOnly ( ColumnsDescription & columns_description ) const ; <nl> void apply ( ColumnsDescription & columns_description , IndicesDescription & indices_description , <nl> - ConstraintsDescription & constraints_description , <nl> - ASTPtr & order_by_ast , ASTPtr & primary_key_ast , ASTPtr & ttl_table_ast ) const ; <nl> + ConstraintsDescription & constraints_description , ASTPtr & order_by_ast , ASTPtr & primary_key_ast , <nl> + ASTPtr & ttl_table_ast , SettingsChanges & changes ) const ; <nl> <nl> - / / / For storages that don ' t support MODIFY_ORDER_BY . <nl> - void apply ( ColumnsDescription & columns_description ) const ; <nl> + / / / Apply alter commands only for settings . Exception will be thrown if any other part of table structure will be modified . <nl> + void applyForSettingsOnly ( SettingsChanges & changes ) const ; <nl> <nl> void validate ( const IStorage & table , const Context & context ) ; <nl> bool isMutable ( ) const ; <nl> + bool isSettingsAlter ( ) const ; <nl> } ; <nl> <nl> } <nl> mmm a / dbms / src / Storages / IStorage . cpp <nl> ppp b / dbms / src / Storages / IStorage . cpp <nl> <nl> # include < Storages / IStorage . h > <nl> <nl> # include < Storages / AlterCommands . h > <nl> + # include < Parsers / ASTCreateQuery . h > <nl> + # include < Parsers / ASTSetQuery . h > <nl> <nl> # include < sparsehash / dense_hash_map > <nl> # include < sparsehash / dense_hash_set > <nl> namespace ErrorCodes <nl> extern const int NO_SUCH_COLUMN_IN_TABLE ; <nl> extern const int NOT_FOUND_COLUMN_IN_BLOCK ; <nl> extern const int TYPE_MISMATCH ; <nl> + extern const int SETTINGS_ARE_NOT_SUPPORTED ; <nl> + extern const int UNKNOWN_SETTING ; <nl> } <nl> <nl> IStorage : : IStorage ( ColumnsDescription columns_ ) <nl> bool IStorage : : isVirtualColumn ( const String & column_name ) const <nl> return getColumns ( ) . get ( column_name ) . is_virtual ; <nl> } <nl> <nl> + bool IStorage : : hasSetting ( const String & / * setting_name * / ) const <nl> + { <nl> + if ( ! supportsSettings ( ) ) <nl> + throw Exception ( " Storage ' " + getName ( ) + " ' doesn ' t support settings . " , ErrorCodes : : SETTINGS_ARE_NOT_SUPPORTED ) ; <nl> + return false ; <nl> + } <nl> + <nl> TableStructureReadLockHolder IStorage : : lockStructureForShare ( bool will_add_new_data , const String & query_id ) <nl> { <nl> TableStructureReadLockHolder result ; <nl> TableStructureWriteLockHolder IStorage : : lockExclusively ( const String & query_id ) <nl> return result ; <nl> } <nl> <nl> + <nl> + void IStorage : : alterSettings ( <nl> + const SettingsChanges & new_changes , <nl> + const String & current_database_name , <nl> + const String & current_table_name , <nl> + const Context & context , <nl> + TableStructureWriteLockHolder & / * table_lock_holder * / ) <nl> + { <nl> + IDatabase : : ASTModifier storage_modifier = [ & ] ( IAST & ast ) <nl> + { <nl> + if ( ! new_changes . empty ( ) ) <nl> + { <nl> + auto & storage_changes = ast . as < ASTStorage & > ( ) . settings - > changes ; <nl> + / / / Make storage settings unique <nl> + for ( const auto & change : new_changes ) <nl> + { <nl> + if ( hasSetting ( change . name ) ) <nl> + { <nl> + auto finder = [ & change ] ( const SettingChange & c ) { return c . name = = change . name ; } ; <nl> + if ( auto it = std : : find_if ( storage_changes . begin ( ) , storage_changes . end ( ) , finder ) ; it ! = storage_changes . end ( ) ) <nl> + it - > value = change . value ; <nl> + else <nl> + storage_changes . push_back ( change ) ; <nl> + } <nl> + else <nl> + throw Exception { " Storage ' " + getName ( ) + " ' doesn ' t have setting ' " + change . name + " ' " , ErrorCodes : : UNKNOWN_SETTING } ; <nl> + } <nl> + } <nl> + } ; <nl> + context . getDatabase ( current_database_name ) - > alterTable ( context , current_table_name , getColumns ( ) , getIndices ( ) , getConstraints ( ) , storage_modifier ) ; <nl> + } <nl> + <nl> + <nl> void IStorage : : alter ( <nl> const AlterCommands & params , <nl> const String & database_name , <nl> void IStorage : : alter ( <nl> const Context & context , <nl> TableStructureWriteLockHolder & table_lock_holder ) <nl> { <nl> - for ( const auto & param : params ) <nl> + if ( params . isSettingsAlter ( ) ) <nl> { <nl> - if ( param . isMutable ( ) ) <nl> - throw Exception ( " Method alter supports only change comment of column for storage " + getName ( ) , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> + SettingsChanges new_changes ; <nl> + params . applyForSettingsOnly ( new_changes ) ; <nl> + alterSettings ( new_changes , database_name , table_name , context , table_lock_holder ) ; <nl> + return ; <nl> } <nl> <nl> + if ( params . isMutable ( ) ) <nl> + throw Exception ( " Method alter supports only change comment of column for storage " + getName ( ) , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> + <nl> lockStructureExclusively ( table_lock_holder , context . getCurrentQueryId ( ) ) ; <nl> auto new_columns = getColumns ( ) ; <nl> auto new_indices = getIndices ( ) ; <nl> auto new_constraints = getConstraints ( ) ; <nl> - params . apply ( new_columns ) ; <nl> + params . applyForColumnsOnly ( new_columns ) ; <nl> context . getDatabase ( database_name ) - > alterTable ( context , table_name , new_columns , new_indices , new_constraints , { } ) ; <nl> setColumns ( std : : move ( new_columns ) ) ; <nl> } <nl> mmm a / dbms / src / Storages / IStorage . h <nl> ppp b / dbms / src / Storages / IStorage . h <nl> <nl> # include < Common / ActionLock . h > <nl> # include < Common / Exception . h > <nl> # include < Common / RWLock . h > <nl> + # include < Common / SettingsChanges . h > <nl> # include < Storages / ConstraintsDescription . h > <nl> <nl> # include < optional > <nl> class IStorage : public std : : enable_shared_from_this < IStorage > <nl> / / / Returns true if the storage supports deduplication of inserted data blocks . <nl> virtual bool supportsDeduplication ( ) const { return false ; } <nl> <nl> + / / / Returns true if the storage supports settings . <nl> + virtual bool supportsSettings ( ) const { return false ; } <nl> + <nl> / / / Optional size information of each physical column . <nl> / / / Currently it ' s only used by the MergeTree family for query optimizations . <nl> using ColumnSizeByName = std : : unordered_map < std : : string , ColumnSize > ; <nl> class IStorage : public std : : enable_shared_from_this < IStorage > <nl> / / / If | need_all | is set , then checks that all the columns of the table are in the block . <nl> void check ( const Block & block , bool need_all = false ) const ; <nl> <nl> + / / / Check storage has setting . Exception will be thrown if it doesn ' t support settings at all . <nl> + virtual bool hasSetting ( const String & setting_name ) const ; <nl> + <nl> protected : / / / still thread - unsafe part . <nl> void setIndices ( IndicesDescription indices_ ) ; <nl> <nl> / / / Returns whether the column is virtual - by default all columns are real . <nl> / / / Initially reserved virtual column name may be shadowed by real column . <nl> virtual bool isVirtualColumn ( const String & column_name ) const ; <nl> - <nl> private : <nl> ColumnsDescription columns ; / / / combined real and virtual columns <nl> const ColumnsDescription virtuals = { } ; <nl> class IStorage : public std : : enable_shared_from_this < IStorage > <nl> throw Exception ( " Partition operations are not supported by storage " + getName ( ) , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> } <nl> <nl> + / * * ALTER table settings if possible . Otherwise throws exception . <nl> + * / <nl> + virtual void alterSettings ( <nl> + const SettingsChanges & new_changes , <nl> + const String & current_database_name , <nl> + const String & current_table_name , <nl> + const Context & context , <nl> + TableStructureWriteLockHolder & table_lock_holder ) ; <nl> + <nl> / * * Perform any background work . For example , combining parts in a MergeTree type table . <nl> * Returns whether any work has been done . <nl> * / <nl> mmm a / dbms / src / Storages / Kafka / KafkaSettings . cpp <nl> ppp b / dbms / src / Storages / Kafka / KafkaSettings . cpp <nl> void KafkaSettings : : loadFromQuery ( ASTStorage & storage_def ) <nl> { <nl> try <nl> { <nl> - applyChanges ( storage_def . settings - > changes ) ; <nl> + loadFromChanges ( storage_def . settings - > changes ) ; <nl> } <nl> catch ( Exception & e ) <nl> { <nl> mmm a / dbms / src / Storages / Kafka / KafkaSettings . h <nl> ppp b / dbms / src / Storages / Kafka / KafkaSettings . h <nl> class ASTStorage ; <nl> struct KafkaSettings : public SettingsCollection < KafkaSettings > <nl> { <nl> <nl> - # define LIST_OF_KAFKA_SETTINGS ( M ) \ <nl> + <nl> + / / / M ( mutable ) for normal settings , IM ( immutable ) for not updateable settings . <nl> + # define LIST_OF_KAFKA_SETTINGS ( M , IM ) \ <nl> M ( SettingString , kafka_broker_list , " " , " A comma - separated list of brokers for Kafka engine . " ) \ <nl> M ( SettingString , kafka_topic_list , " " , " A list of Kafka topics . " ) \ <nl> M ( SettingString , kafka_group_name , " " , " A group of Kafka consumers . " ) \ <nl> mmm a / dbms / src / Storages / Kafka / StorageKafka . cpp <nl> ppp b / dbms / src / Storages / Kafka / StorageKafka . cpp <nl> namespace ErrorCodes <nl> extern const int LOGICAL_ERROR ; <nl> extern const int BAD_ARGUMENTS ; <nl> extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH ; <nl> + extern const int UNSUPPORTED_METHOD ; <nl> } <nl> <nl> namespace <nl> bool StorageKafka : : streamToViews ( ) <nl> const Settings & settings = global_context . getSettingsRef ( ) ; <nl> size_t block_size = max_block_size ; <nl> if ( block_size = = 0 ) <nl> - block_size = settings . max_block_size . value ; <nl> + block_size = settings . max_block_size ; <nl> <nl> / / Create a stream for each consumer and join them in a union stream <nl> InterpreterInsertQuery interpreter { insert , global_context } ; <nl> bool StorageKafka : : streamToViews ( ) <nl> } <nl> <nl> <nl> + bool StorageKafka : : hasSetting ( const String & setting_name ) const <nl> + { <nl> + return KafkaSettings : : findIndex ( setting_name ) ! = KafkaSettings : : npos ; <nl> + } <nl> + <nl> + void StorageKafka : : alterSettings ( <nl> + const SettingsChanges & / * new_changes * / , <nl> + const String & / * current_database_name * / , <nl> + const String & / * current_table_name * / , <nl> + const Context & / * context * / , <nl> + TableStructureWriteLockHolder & / * table_lock_holder * / ) <nl> + { <nl> + throw Exception ( " Storage ' " + getName ( ) + " ' doesn ' t support settings alter " , ErrorCodes : : UNSUPPORTED_METHOD ) ; <nl> + } <nl> + <nl> + <nl> void registerStorageKafka ( StorageFactory & factory ) <nl> { <nl> factory . registerStorage ( " Kafka " , [ ] ( const StorageFactory : : Arguments & args ) <nl> void registerStorageKafka ( StorageFactory & factory ) <nl> # undef CHECK_KAFKA_STORAGE_ARGUMENT <nl> <nl> / / Get and check broker list <nl> - String brokers = kafka_settings . kafka_broker_list . value ; <nl> + String brokers = kafka_settings . kafka_broker_list ; <nl> if ( args_count > = 1 ) <nl> { <nl> const auto * ast = engine_args [ 0 ] - > as < ASTLiteral > ( ) ; <nl> void registerStorageKafka ( StorageFactory & factory ) <nl> } <nl> <nl> / / Parse row delimiter ( optional ) <nl> - char row_delimiter = kafka_settings . kafka_row_delimiter . value ; <nl> + char row_delimiter = kafka_settings . kafka_row_delimiter ; <nl> if ( args_count > = 5 ) <nl> { <nl> engine_args [ 4 ] = evaluateConstantExpressionOrIdentifierAsLiteral ( engine_args [ 4 ] , args . local_context ) ; <nl> void registerStorageKafka ( StorageFactory & factory ) <nl> } <nl> <nl> / / Parse number of consumers ( optional ) <nl> - UInt64 num_consumers = kafka_settings . kafka_num_consumers . value ; <nl> + UInt64 num_consumers = kafka_settings . kafka_num_consumers ; <nl> if ( args_count > = 7 ) <nl> { <nl> const auto * ast = engine_args [ 6 ] - > as < ASTLiteral > ( ) ; <nl> void registerStorageKafka ( StorageFactory & factory ) <nl> } <nl> <nl> / / Parse max block size ( optional ) <nl> - UInt64 max_block_size = static_cast < size_t > ( kafka_settings . kafka_max_block_size . value ) ; <nl> + UInt64 max_block_size = static_cast < size_t > ( kafka_settings . kafka_max_block_size ) ; <nl> if ( args_count > = 8 ) <nl> { <nl> const auto * ast = engine_args [ 7 ] - > as < ASTLiteral > ( ) ; <nl> void registerStorageKafka ( StorageFactory & factory ) <nl> } <nl> } <nl> <nl> - size_t skip_broken = static_cast < size_t > ( kafka_settings . kafka_skip_broken_messages . value ) ; <nl> + size_t skip_broken = static_cast < size_t > ( kafka_settings . kafka_skip_broken_messages ) ; <nl> if ( args_count > = 9 ) <nl> { <nl> const auto * ast = engine_args [ 8 ] - > as < ASTLiteral > ( ) ; <nl> mmm a / dbms / src / Storages / Kafka / StorageKafka . h <nl> ppp b / dbms / src / Storages / Kafka / StorageKafka . h <nl> class StorageKafka : public ext : : shared_ptr_helper < StorageKafka > , public IStorag <nl> std : : string getName ( ) const override { return " Kafka " ; } <nl> std : : string getTableName ( ) const override { return table_name ; } <nl> std : : string getDatabaseName ( ) const override { return database_name ; } <nl> + bool supportsSettings ( ) const override { return true ; } <nl> <nl> void startup ( ) override ; <nl> void shutdown ( ) override ; <nl> class StorageKafka : public ext : : shared_ptr_helper < StorageKafka > , public IStorag <nl> const auto & getSchemaName ( ) const { return schema_name ; } <nl> const auto & skipBroken ( ) const { return skip_broken ; } <nl> <nl> + bool hasSetting ( const String & setting_name ) const override ; <nl> + <nl> + void alterSettings ( <nl> + const SettingsChanges & new_changes , <nl> + const String & current_database_name , <nl> + const String & current_table_name , <nl> + const Context & context , <nl> + TableStructureWriteLockHolder & table_lock_holder ) override ; <nl> + <nl> protected : <nl> StorageKafka ( <nl> const std : : string & table_name_ , <nl> mmm a / dbms / src / Storages / MergeTree / DataPartsExchange . cpp <nl> ppp b / dbms / src / Storages / MergeTree / DataPartsExchange . cpp <nl> void Service : : processQuery ( const Poco : : Net : : HTMLForm & params , ReadBuffer & / * bo <nl> throw Exception ( " Transferring part to replica was cancelled " , ErrorCodes : : ABORTED ) ; <nl> <nl> String part_name = params . get ( " part " ) ; <nl> + const auto data_settings = data . getCOWSettings ( ) ; <nl> <nl> / / / Validation of the input that may come from malicious replica . <nl> MergeTreePartInfo : : fromPartName ( part_name , data . format_version ) ; <nl> <nl> static std : : atomic_uint total_sends { 0 } ; <nl> <nl> - if ( ( data . settings . replicated_max_parallel_sends & & total_sends > = data . settings . replicated_max_parallel_sends ) <nl> - | | ( data . settings . replicated_max_parallel_sends_for_table & & data . current_table_sends > = data . settings . replicated_max_parallel_sends_for_table ) ) <nl> + if ( ( data_settings - > replicated_max_parallel_sends & & total_sends > = data_settings - > replicated_max_parallel_sends ) <nl> + | | ( data_settings - > replicated_max_parallel_sends_for_table & & data . current_table_sends > = data_settings - > replicated_max_parallel_sends_for_table ) ) <nl> { <nl> response . setStatus ( std : : to_string ( HTTP_TOO_MANY_REQUESTS ) ) ; <nl> response . setReason ( " Too many concurrent fetches , try again later " ) ; <nl> MergeTreeData : : MutableDataPartPtr Fetcher : : fetchPart ( <nl> { <nl> / / / Validation of the input that may come from malicious replica . <nl> MergeTreePartInfo : : fromPartName ( part_name , data . format_version ) ; <nl> + const auto data_settings = data . getCOWSettings ( ) ; <nl> <nl> Poco : : URI uri ; <nl> uri . setScheme ( interserver_scheme ) ; <nl> MergeTreeData : : MutableDataPartPtr Fetcher : : fetchPart ( <nl> timeouts , <nl> creds , <nl> DBMS_DEFAULT_BUFFER_SIZE , <nl> - data . settings . replicated_max_parallel_fetches_for_host <nl> + data_settings - > replicated_max_parallel_fetches_for_host <nl> } ; <nl> <nl> static const String TMP_PREFIX = " tmp_fetch_ " ; <nl> mmm a / dbms / src / Storages / MergeTree / IMergedBlockOutputStream . cpp <nl> ppp b / dbms / src / Storages / MergeTree / IMergedBlockOutputStream . cpp <nl> IMergedBlockOutputStream : : IMergedBlockOutputStream ( <nl> , compute_granularity ( index_granularity . empty ( ) ) <nl> , codec ( std : : move ( codec_ ) ) <nl> , skip_indices ( indices_to_recalc ) <nl> - , with_final_mark ( storage . settings . write_final_mark & & can_use_adaptive_granularity ) <nl> + , with_final_mark ( storage . getCOWSettings ( ) - > write_final_mark & & can_use_adaptive_granularity ) <nl> { <nl> if ( blocks_are_granules_size & & ! index_granularity . empty ( ) ) <nl> throw Exception ( " Can ' t take information about index granularity from blocks , when non empty index_granularity array specified " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> void fillIndexGranularityImpl ( <nl> <nl> void IMergedBlockOutputStream : : fillIndexGranularity ( const Block & block ) <nl> { <nl> + const auto storage_settings = storage . getCOWSettings ( ) ; <nl> fillIndexGranularityImpl ( <nl> block , <nl> - storage . settings . index_granularity_bytes , <nl> - storage . settings . index_granularity , <nl> + storage_settings - > index_granularity_bytes , <nl> + storage_settings - > index_granularity , <nl> blocks_are_granules_size , <nl> index_offset , <nl> index_granularity , <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeData . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeData . cpp <nl> <nl> # include < Parsers / ASTLiteral . h > <nl> # include < Parsers / ASTFunction . h > <nl> # include < Parsers / ASTPartition . h > <nl> + # include < Parsers / ASTSetQuery . h > <nl> # include < Parsers / ExpressionListParsers . h > <nl> # include < Parsers / parseQuery . h > <nl> # include < Parsers / queryToString . h > <nl> namespace ErrorCodes <nl> extern const int BAD_TTL_EXPRESSION ; <nl> extern const int INCORRECT_FILE_NAME ; <nl> extern const int BAD_DATA_PART_NAME ; <nl> + extern const int UNKNOWN_SETTING ; <nl> } <nl> <nl> <nl> MergeTreeData : : MergeTreeData ( <nl> const ASTPtr & sample_by_ast_ , <nl> const ASTPtr & ttl_table_ast_ , <nl> const MergingParams & merging_params_ , <nl> - const MergeTreeSettings & settings_ , <nl> + MergeTreeSettingsPtr settings_ , <nl> bool require_part_metadata_ , <nl> bool attach , <nl> BrokenPartCallback broken_part_callback_ ) <nl> : global_context ( context_ ) , <nl> merging_params ( merging_params_ ) , <nl> - settings ( settings_ ) , <nl> partition_by_ast ( partition_by_ast_ ) , <nl> sample_by_ast ( sample_by_ast_ ) , <nl> ttl_table_ast ( ttl_table_ast_ ) , <nl> MergeTreeData : : MergeTreeData ( <nl> full_path ( full_path_ ) , <nl> broken_part_callback ( broken_part_callback_ ) , <nl> log_name ( database_name + " . " + table_name ) , log ( & Logger : : get ( log_name ) ) , <nl> + guarded_settings ( settings_ ) , <nl> data_parts_by_info ( data_parts_indexes . get < TagByInfo > ( ) ) , <nl> data_parts_by_state_and_info ( data_parts_indexes . get < TagByStateAndInfo > ( ) ) <nl> { <nl> + const auto settings = getCOWSettings ( ) ; <nl> setProperties ( order_by_ast_ , primary_key_ast_ , columns_ , indices_ , constraints_ ) ; <nl> setConstraints ( constraints_ ) ; <nl> <nl> MergeTreeData : : MergeTreeData ( <nl> sampling_expr_column_name = sample_by_ast - > getColumnName ( ) ; <nl> <nl> if ( ! primary_key_sample . has ( sampling_expr_column_name ) <nl> - & & ! attach & & ! settings . compatibility_allow_sampling_expression_not_in_primary_key ) / / / This is for backward compatibility . <nl> + & & ! attach & & ! settings - > compatibility_allow_sampling_expression_not_in_primary_key ) / / / This is for backward compatibility . <nl> throw Exception ( " Sampling expression must be present in the primary key " , ErrorCodes : : BAD_ARGUMENTS ) ; <nl> <nl> auto syntax = SyntaxAnalyzer ( global_context ) . analyze ( sample_by_ast , getColumns ( ) . getAllPhysical ( ) ) ; <nl> void MergeTreeData : : loadDataParts ( bool skip_sanity_checks ) <nl> { <nl> LOG_DEBUG ( log , " Loading data parts " ) ; <nl> <nl> + const auto settings = getCOWSettings ( ) ; <nl> Strings part_file_names ; <nl> Poco : : DirectoryIterator end ; <nl> for ( Poco : : DirectoryIterator it ( full_path ) ; it ! = end ; + + it ) <nl> void MergeTreeData : : loadDataParts ( bool skip_sanity_checks ) <nl> } <nl> <nl> / / / Parallel loading of data parts . <nl> - size_t num_threads = std : : min ( size_t ( settings . max_part_loading_threads ) , part_file_names . size ( ) ) ; <nl> + size_t num_threads = std : : min ( size_t ( settings - > max_part_loading_threads ) , part_file_names . size ( ) ) ; <nl> <nl> std : : mutex mutex ; <nl> <nl> void MergeTreeData : : loadDataParts ( bool skip_sanity_checks ) <nl> <nl> pool . wait ( ) ; <nl> <nl> - if ( has_non_adaptive_parts & & has_adaptive_parts & & ! settings . enable_mixed_granularity_parts ) <nl> + if ( has_non_adaptive_parts & & has_adaptive_parts & & ! settings - > enable_mixed_granularity_parts ) <nl> throw Exception ( " Table contains parts with adaptive and non adaptive marks , but ` setting enable_mixed_granularity_parts ` is disabled " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> <nl> has_non_adaptive_index_granularity_parts = has_non_adaptive_parts ; <nl> <nl> - if ( suspicious_broken_parts > settings . max_suspicious_broken_parts & & ! skip_sanity_checks ) <nl> + if ( suspicious_broken_parts > settings - > max_suspicious_broken_parts & & ! skip_sanity_checks ) <nl> throw Exception ( " Suspiciously many ( " + toString ( suspicious_broken_parts ) + " ) broken parts to remove . " , <nl> ErrorCodes : : TOO_MANY_UNEXPECTED_DATA_PARTS ) ; <nl> <nl> void MergeTreeData : : clearOldTemporaryDirectories ( ssize_t custom_directories_life <nl> if ( ! lock . try_lock ( ) ) <nl> return ; <nl> <nl> + const auto settings = getCOWSettings ( ) ; <nl> time_t current_time = time ( nullptr ) ; <nl> ssize_t deadline = ( custom_directories_lifetime_seconds > = 0 ) <nl> ? current_time - custom_directories_lifetime_seconds <nl> - : current_time - settings . temporary_directories_lifetime . totalSeconds ( ) ; <nl> + : current_time - settings - > temporary_directories_lifetime . totalSeconds ( ) ; <nl> <nl> / / / Delete temporary directories older than a day . <nl> Poco : : DirectoryIterator end ; <nl> MergeTreeData : : DataPartsVector MergeTreeData : : grabOldParts ( ) <nl> <nl> if ( part . unique ( ) & & / / / Grab only parts that are not used by anyone ( SELECTs for example ) . <nl> part_remove_time < now & & <nl> - now - part_remove_time > settings . old_parts_lifetime . totalSeconds ( ) ) <nl> + now - part_remove_time > getCOWSettings ( ) - > old_parts_lifetime . totalSeconds ( ) ) <nl> { <nl> parts_to_delete . emplace_back ( it ) ; <nl> } <nl> void MergeTreeData : : clearOldPartsFromFilesystem ( ) <nl> <nl> void MergeTreeData : : clearPartsFromFilesystem ( const DataPartsVector & parts_to_remove ) <nl> { <nl> - if ( parts_to_remove . size ( ) > 1 & & settings . max_part_removal_threads > 1 & & parts_to_remove . size ( ) > settings . concurrent_part_removal_threshold ) <nl> + const auto settings = getCOWSettings ( ) ; <nl> + if ( parts_to_remove . size ( ) > 1 & & settings - > max_part_removal_threads > 1 & & parts_to_remove . size ( ) > settings - > concurrent_part_removal_threshold ) <nl> { <nl> / / / Parallel parts removal . <nl> <nl> - size_t num_threads = std : : min ( size_t ( settings . max_part_removal_threads ) , parts_to_remove . size ( ) ) ; <nl> + size_t num_threads = std : : min ( size_t ( settings - > max_part_removal_threads ) , parts_to_remove . size ( ) ) ; <nl> ThreadPool pool ( num_threads ) ; <nl> <nl> / / / NOTE : Under heavy system load you may get " Cannot schedule a task " from ThreadPool . <nl> void MergeTreeData : : checkAlter ( const AlterCommands & commands , const Context & c <nl> ASTPtr new_order_by_ast = order_by_ast ; <nl> ASTPtr new_primary_key_ast = primary_key_ast ; <nl> ASTPtr new_ttl_table_ast = ttl_table_ast ; <nl> - commands . apply ( new_columns , new_indices , new_constraints , new_order_by_ast , new_primary_key_ast , new_ttl_table_ast ) ; <nl> + SettingsChanges new_changes ; <nl> + commands . apply ( new_columns , new_indices , new_constraints , new_order_by_ast , new_primary_key_ast , new_ttl_table_ast , new_changes ) ; <nl> if ( getIndices ( ) . empty ( ) & & ! new_indices . empty ( ) & & <nl> ! context . getSettingsRef ( ) . allow_experimental_data_skipping_indices ) <nl> throw Exception ( " You must set the setting ` allow_experimental_data_skipping_indices ` to 1 " \ <nl> void MergeTreeData : : checkAlter ( const AlterCommands & commands , const Context & c <nl> <nl> setTTLExpressions ( new_columns . getColumnTTLs ( ) , new_ttl_table_ast , / * only_check = * / true ) ; <nl> <nl> + for ( const auto & setting : new_changes ) <nl> + { <nl> + if ( ! hasSetting ( setting . name ) ) <nl> + throw Exception { " Storage ' " + getName ( ) + " ' doesn ' t have setting ' " + setting . name + " ' " , ErrorCodes : : UNKNOWN_SETTING } ; <nl> + } <nl> + <nl> / / / Check that type conversions are possible . <nl> ExpressionActionsPtr unused_expression ; <nl> NameToNameMap unused_map ; <nl> void MergeTreeData : : createConvertExpression ( const DataPartPtr & part , const Name <nl> const IndicesASTs & old_indices , const IndicesASTs & new_indices , ExpressionActionsPtr & out_expression , <nl> NameToNameMap & out_rename_map , bool & out_force_update_metadata ) const <nl> { <nl> + const auto settings = getCOWSettings ( ) ; <nl> out_expression = nullptr ; <nl> out_rename_map = { } ; <nl> out_force_update_metadata = false ; <nl> void MergeTreeData : : createConvertExpression ( const DataPartPtr & part , const Name <nl> if ( part ) <nl> part_mrk_file_extension = part - > index_granularity_info . marks_file_extension ; <nl> else <nl> - part_mrk_file_extension = settings . index_granularity_bytes = = 0 ? getNonAdaptiveMrkExtension ( ) : getAdaptiveMrkExtension ( ) ; <nl> + part_mrk_file_extension = settings - > index_granularity_bytes = = 0 ? getNonAdaptiveMrkExtension ( ) : getAdaptiveMrkExtension ( ) ; <nl> <nl> using NameToType = std : : map < String , const IDataType * > ; <nl> NameToType new_types ; <nl> void MergeTreeData : : alterDataPart ( <nl> bool skip_sanity_checks , <nl> AlterDataPartTransactionPtr & transaction ) <nl> { <nl> + const auto settings = getCOWSettings ( ) ; <nl> ExpressionActionsPtr expression ; <nl> const auto & part = transaction - > getDataPart ( ) ; <nl> bool force_update_metadata ; <nl> void MergeTreeData : : alterDataPart ( <nl> + + num_files_to_remove ; <nl> <nl> if ( ! skip_sanity_checks <nl> - & & ( num_files_to_modify > settings . max_files_to_modify_in_alter_columns <nl> - | | num_files_to_remove > settings . max_files_to_remove_in_alter_columns ) ) <nl> + & & ( num_files_to_modify > settings - > max_files_to_modify_in_alter_columns <nl> + | | num_files_to_remove > settings - > max_files_to_remove_in_alter_columns ) ) <nl> { <nl> transaction - > clear ( ) ; <nl> <nl> - const bool forbidden_because_of_modify = num_files_to_modify > settings . max_files_to_modify_in_alter_columns ; <nl> + const bool forbidden_because_of_modify = num_files_to_modify > settings - > max_files_to_modify_in_alter_columns ; <nl> <nl> std : : stringstream exception_message ; <nl> exception_message <nl> void MergeTreeData : : alterDataPart ( <nl> < < " If it is not an error , you could increase merge_tree / " <nl> < < ( forbidden_because_of_modify ? " max_files_to_modify_in_alter_columns " : " max_files_to_remove_in_alter_columns " ) <nl> < < " parameter in configuration file ( current value : " <nl> - < < ( forbidden_because_of_modify ? settings . max_files_to_modify_in_alter_columns : settings . max_files_to_remove_in_alter_columns ) <nl> + < < ( forbidden_because_of_modify ? settings - > max_files_to_modify_in_alter_columns : settings - > max_files_to_remove_in_alter_columns ) <nl> < < " ) " ; <nl> <nl> throw Exception ( exception_message . str ( ) , ErrorCodes : : TABLE_DIFFERS_TOO_MUCH ) ; <nl> void MergeTreeData : : alterDataPart ( <nl> return ; <nl> } <nl> <nl> + void MergeTreeData : : alterSettings ( <nl> + const SettingsChanges & new_changes , <nl> + const String & current_database_name , <nl> + const String & current_table_name , <nl> + const Context & context , <nl> + TableStructureWriteLockHolder & table_lock_holder ) <nl> + { <nl> + std : : lock_guard lock ( settings_mutex ) ; <nl> + MutableMergeTreeSettingsPtr settings = std : : move ( * guarded_settings . getPtr ( ) ) . mutate ( ) ; <nl> + settings - > updateFromChanges ( new_changes ) ; <nl> + IStorage : : alterSettings ( new_changes , current_database_name , current_table_name , context , table_lock_holder ) ; <nl> + guarded_settings . setPtr ( std : : move ( settings ) ) ; <nl> + } <nl> + <nl> + bool MergeTreeData : : hasSetting ( const String & setting_name ) const <nl> + { <nl> + return MergeTreeSettings : : findIndex ( setting_name ) ! = MergeTreeSettings : : npos ; <nl> + } <nl> + <nl> void MergeTreeData : : removeEmptyColumnsFromPart ( MergeTreeData : : MutableDataPartPtr & data_part ) <nl> { <nl> auto & empty_columns = data_part - > empty_columns ; <nl> std : : optional < Int64 > MergeTreeData : : getMinPartDataVersion ( ) const <nl> } <nl> <nl> <nl> - void MergeTreeData : : delayInsertOrThrowIfNeeded ( Poco : : Event * until ) const <nl> + void MergeTreeData : : delayInsertOrThrowIfNeeded ( Poco : : Event * until ) const <nl> { <nl> + const auto settings = getCOWSettings ( ) ; <nl> const size_t parts_count_in_total = getPartsCount ( ) ; <nl> - if ( parts_count_in_total > = settings . max_parts_in_total ) <nl> + if ( parts_count_in_total > = settings - > max_parts_in_total ) <nl> { <nl> ProfileEvents : : increment ( ProfileEvents : : RejectedInserts ) ; <nl> throw Exception ( " Too many parts ( " + toString ( parts_count_in_total ) + " ) in all partitions in total . This indicates wrong choice of partition key . The threshold can be modified with ' max_parts_in_total ' setting in < merge_tree > element in config . xml or with per - table setting . " , ErrorCodes : : TOO_MANY_PARTS ) ; <nl> } <nl> <nl> const size_t parts_count_in_partition = getMaxPartsCountForPartition ( ) ; <nl> - if ( parts_count_in_partition < settings . parts_to_delay_insert ) <nl> + if ( parts_count_in_partition < settings - > parts_to_delay_insert ) <nl> return ; <nl> <nl> - if ( parts_count_in_partition > = settings . parts_to_throw_insert ) <nl> + if ( parts_count_in_partition > = settings - > parts_to_throw_insert ) <nl> { <nl> ProfileEvents : : increment ( ProfileEvents : : RejectedInserts ) ; <nl> throw Exception ( " Too many parts ( " + toString ( parts_count_in_partition ) + " ) . Merges are processing significantly slower than inserts . " , ErrorCodes : : TOO_MANY_PARTS ) ; <nl> } <nl> <nl> - const size_t max_k = settings . parts_to_throw_insert - settings . parts_to_delay_insert ; / / / always > 0 <nl> - const size_t k = 1 + parts_count_in_partition - settings . parts_to_delay_insert ; / / / from 1 to max_k <nl> - const double delay_milliseconds = : : pow ( settings . max_delay_to_insert * 1000 , static_cast < double > ( k ) / max_k ) ; <nl> + const size_t max_k = settings - > parts_to_throw_insert - settings - > parts_to_delay_insert ; / / / always > 0 <nl> + const size_t k = 1 + parts_count_in_partition - settings - > parts_to_delay_insert ; / / / from 1 to max_k <nl> + const double delay_milliseconds = : : pow ( settings - > max_delay_to_insert * 1000 , static_cast < double > ( k ) / max_k ) ; <nl> <nl> ProfileEvents : : increment ( ProfileEvents : : DelayedInserts ) ; <nl> ProfileEvents : : increment ( ProfileEvents : : DelayedInsertsMilliseconds , delay_milliseconds ) ; <nl> void MergeTreeData : : delayInsertOrThrowIfNeeded ( Poco : : Event * until ) const <nl> <nl> void MergeTreeData : : throwInsertIfNeeded ( ) const <nl> { <nl> + const auto settings = getCOWSettings ( ) ; <nl> const size_t parts_count_in_total = getPartsCount ( ) ; <nl> - if ( parts_count_in_total > = settings . max_parts_in_total ) <nl> + if ( parts_count_in_total > = settings - > max_parts_in_total ) <nl> { <nl> ProfileEvents : : increment ( ProfileEvents : : RejectedInserts ) ; <nl> throw Exception ( " Too many parts ( " + toString ( parts_count_in_total ) + " ) in all partitions in total . This indicates wrong choice of partition key . The threshold can be modified with ' max_parts_in_total ' setting in < merge_tree > element in config . xml or with per - table setting . " , ErrorCodes : : TOO_MANY_PARTS ) ; <nl> void MergeTreeData : : throwInsertIfNeeded ( ) const <nl> <nl> const size_t parts_count_in_partition = getMaxPartsCountForPartition ( ) ; <nl> <nl> - if ( parts_count_in_partition > = settings . parts_to_throw_insert ) <nl> + if ( parts_count_in_partition > = settings - > parts_to_throw_insert ) <nl> { <nl> ProfileEvents : : increment ( ProfileEvents : : RejectedInserts ) ; <nl> throw Exception ( " Too many parts ( " + toString ( parts_count_in_partition ) + " ) . Merges are processing significantly slower than inserts . " , ErrorCodes : : TOO_MANY_PARTS ) ; <nl> void MergeTreeData : : freezePartitionsByMatcher ( MatcherFn matcher , const String & <nl> <nl> bool MergeTreeData : : canReplacePartition ( const DataPartPtr & src_part ) const <nl> { <nl> - if ( ! settings . enable_mixed_granularity_parts | | settings . index_granularity_bytes = = 0 ) <nl> + const auto settings = getCOWSettings ( ) ; <nl> + <nl> + if ( ! settings - > enable_mixed_granularity_parts | | settings - > index_granularity_bytes = = 0 ) <nl> { <nl> if ( ! canUseAdaptiveGranularity ( ) & & src_part - > index_granularity_info . is_adaptive ) <nl> return false ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeData . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeData . h <nl> class MergeTreeData : public IStorage <nl> const ASTPtr & sample_by_ast_ , / / / nullptr , if sampling is not supported . <nl> const ASTPtr & ttl_table_ast_ , <nl> const MergingParams & merging_params_ , <nl> - const MergeTreeSettings & settings_ , <nl> + MergeTreeSettingsPtr settings_ , <nl> bool require_part_metadata_ , <nl> bool attach , <nl> BrokenPartCallback broken_part_callback_ = [ ] ( const String & ) { } ) ; <nl> class MergeTreeData : public IStorage <nl> | | merging_params . mode = = MergingParams : : VersionedCollapsing ; <nl> } <nl> <nl> + bool supportsSettings ( ) const override { return true ; } <nl> + <nl> bool mayBenefitFromIndexForIn ( const ASTPtr & left_in_operand , const Context & ) const override ; <nl> <nl> NameAndTypePair getColumn ( const String & column_name ) const override <nl> class MergeTreeData : public IStorage <nl> bool skip_sanity_checks , <nl> AlterDataPartTransactionPtr & transaction ) ; <nl> <nl> + / / / Performs ALTER of table settings ( MergeTreeSettings ) . Lightweight operation , affects metadata only . <nl> + / / / Not atomic , have to be done with alter intention lock . <nl> + void alterSettings ( <nl> + const SettingsChanges & new_changes , <nl> + const String & current_database_name , <nl> + const String & current_table_name , <nl> + const Context & context , <nl> + TableStructureWriteLockHolder & table_lock_holder ) override ; <nl> + <nl> + / / / All MergeTreeData children have settings . <nl> + bool hasSetting ( const String & setting_name ) const override ; <nl> + <nl> / / / Remove columns , that have been markedd as empty after zeroing values with expired ttl <nl> void removeEmptyColumnsFromPart ( MergeTreeData : : MutableDataPartPtr & data_part ) ; <nl> <nl> class MergeTreeData : public IStorage <nl> / / / Has additional constraint in replicated version <nl> virtual bool canUseAdaptiveGranularity ( ) const <nl> { <nl> - return settings . index_granularity_bytes ! = 0 & & <nl> - ( settings . enable_mixed_granularity_parts | | ! has_non_adaptive_index_granularity_parts ) ; <nl> + const auto settings = getCOWSettings ( ) ; <nl> + return settings - > index_granularity_bytes ! = 0 & & <nl> + ( settings - > enable_mixed_granularity_parts | | ! has_non_adaptive_index_granularity_parts ) ; <nl> } <nl> <nl> <nl> class MergeTreeData : public IStorage <nl> String sampling_expr_column_name ; <nl> Names columns_required_for_sampling ; <nl> <nl> - MergeTreeSettings settings ; <nl> - <nl> / / / Limiting parallel sends per one table , used in DataPartsExchange <nl> std : : atomic_uint current_table_sends { 0 } ; <nl> <nl> class MergeTreeData : public IStorage <nl> <nl> bool has_non_adaptive_index_granularity_parts = false ; <nl> <nl> + / / / Get copy - on - write pointer to storage settings . <nl> + / / / Copy this pointer into your scope and you will <nl> + / / / get consistent settings . <nl> + const MergeTreeSettingsPtr getCOWSettings ( ) const <nl> + { <nl> + std : : shared_lock lock ( settings_mutex ) ; <nl> + return guarded_settings . copyPtr ( ) ; <nl> + } <nl> + <nl> protected : <nl> + <nl> friend struct MergeTreeDataPart ; <nl> friend class MergeTreeDataMergerMutator ; <nl> friend class ReplicatedMergeTreeAlterThread ; <nl> class MergeTreeData : public IStorage <nl> String log_name ; <nl> Logger * log ; <nl> <nl> + / / / Just hides settings pointer from direct usage <nl> + class MergeTreeSettingsGuard <nl> + { <nl> + private : <nl> + / / / Settings COW pointer . Data maybe changed at any point of time . <nl> + / / / If you need consistent settings , just copy pointer to your scope . <nl> + MergeTreeSettingsPtr settings_ptr ; <nl> + public : <nl> + MergeTreeSettingsGuard ( MergeTreeSettingsPtr settings_ptr_ ) <nl> + : settings_ptr ( settings_ptr_ ) <nl> + { } <nl> + <nl> + const MergeTreeSettingsPtr copyPtr ( ) const { return settings_ptr ; } <nl> + MergeTreeSettingsPtr getPtr ( ) { return settings_ptr ; } <nl> + void setPtr ( MergeTreeSettingsPtr ptr ) { settings_ptr = ptr ; } <nl> + } ; <nl> + <nl> + / / / Storage settings . Don ' t use this field directly , if you <nl> + / / / want readonly settings . Prefer getCOWSettings ( ) method . <nl> + MergeTreeSettingsGuard guarded_settings ; <nl> <nl> / / / Work with data parts <nl> <nl> class MergeTreeData : public IStorage <nl> std : : mutex grab_old_parts_mutex ; <nl> / / / The same for clearOldTemporaryDirectories . <nl> std : : mutex clear_old_temporary_directories_mutex ; <nl> + / / / Mutex for settings usage <nl> + mutable std : : shared_mutex settings_mutex ; <nl> <nl> void setProperties ( const ASTPtr & new_order_by_ast , const ASTPtr & new_primary_key_ast , <nl> const ColumnsDescription & new_columns , <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeDataMergerMutator . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataMergerMutator . cpp <nl> UInt64 MergeTreeDataMergerMutator : : getMaxSourcePartsSizeForMerge ( size_t pool_siz <nl> throw Exception ( " Logical error : invalid arguments passed to getMaxSourcePartsSize : pool_used > pool_size " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> <nl> size_t free_entries = pool_size - pool_used ; <nl> + const auto data_settings = data . getCOWSettings ( ) ; <nl> <nl> UInt64 max_size = 0 ; <nl> - if ( free_entries > = data . settings . number_of_free_entries_in_pool_to_lower_max_size_of_merge ) <nl> - max_size = data . settings . max_bytes_to_merge_at_max_space_in_pool ; <nl> + if ( free_entries > = data_settings - > number_of_free_entries_in_pool_to_lower_max_size_of_merge ) <nl> + max_size = data_settings - > max_bytes_to_merge_at_max_space_in_pool ; <nl> else <nl> max_size = interpolateExponential ( <nl> - data . settings . max_bytes_to_merge_at_min_space_in_pool , <nl> - data . settings . max_bytes_to_merge_at_max_space_in_pool , <nl> - static_cast < double > ( free_entries ) / data . settings . number_of_free_entries_in_pool_to_lower_max_size_of_merge ) ; <nl> + data_settings - > max_bytes_to_merge_at_min_space_in_pool , <nl> + data_settings - > max_bytes_to_merge_at_max_space_in_pool , <nl> + static_cast < double > ( free_entries ) / data_settings - > number_of_free_entries_in_pool_to_lower_max_size_of_merge ) ; <nl> <nl> return std : : min ( max_size , static_cast < UInt64 > ( DiskSpaceMonitor : : getUnreservedFreeSpace ( data . full_path ) / DISK_USAGE_COEFFICIENT_TO_SELECT ) ) ; <nl> } <nl> UInt64 MergeTreeDataMergerMutator : : getMaxSourcePartsSizeForMerge ( size_t pool_siz <nl> <nl> UInt64 MergeTreeDataMergerMutator : : getMaxSourcePartSizeForMutation ( ) <nl> { <nl> + <nl> + const auto data_settings = data . getCOWSettings ( ) ; <nl> size_t total_threads_in_pool = pool . getNumberOfThreads ( ) ; <nl> size_t busy_threads_in_pool = CurrentMetrics : : values [ CurrentMetrics : : BackgroundPoolTask ] . load ( std : : memory_order_relaxed ) ; <nl> <nl> / / / Allow mutations only if there are enough threads , leave free threads for merges else <nl> - if ( total_threads_in_pool - busy_threads_in_pool > = data . settings . number_of_free_entries_in_pool_to_execute_mutation ) <nl> + if ( total_threads_in_pool - busy_threads_in_pool > = data_settings - > number_of_free_entries_in_pool_to_execute_mutation ) <nl> return static_cast < UInt64 > ( DiskSpaceMonitor : : getUnreservedFreeSpace ( data . full_path ) / DISK_USAGE_COEFFICIENT_TO_RESERVE ) ; <nl> <nl> return 0 ; <nl> bool MergeTreeDataMergerMutator : : selectPartsToMerge ( <nl> String * out_disable_reason ) <nl> { <nl> MergeTreeData : : DataPartsVector data_parts = data . getDataPartsVector ( ) ; <nl> + const auto data_settings = data . getCOWSettings ( ) ; <nl> <nl> if ( data_parts . empty ( ) ) <nl> { <nl> bool MergeTreeDataMergerMutator : : selectPartsToMerge ( <nl> merge_settings . base = 1 ; <nl> <nl> bool can_merge_with_ttl = <nl> - ( current_time - last_merge_with_ttl > data . settings . merge_with_ttl_timeout ) ; <nl> + ( current_time - last_merge_with_ttl > data_settings - > merge_with_ttl_timeout ) ; <nl> <nl> / / / NOTE Could allow selection of different merge strategy . <nl> if ( can_merge_with_ttl & & has_part_with_expired_ttl & & ! ttl_merges_blocker . isCancelled ( ) ) <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> <nl> Names all_column_names = data . getColumns ( ) . getNamesOfPhysical ( ) ; <nl> NamesAndTypesList all_columns = data . getColumns ( ) . getAllPhysical ( ) ; <nl> + const auto data_settings = data . getCOWSettings ( ) ; <nl> <nl> NamesAndTypesList gathering_columns , merging_columns ; <nl> Names gathering_column_names , merging_column_names ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> / / / We count total amount of bytes in parts <nl> / / / and use direct_io + aio if there is more than min_merge_bytes_to_use_direct_io <nl> bool read_with_direct_io = false ; <nl> - if ( data . settings . min_merge_bytes_to_use_direct_io ! = 0 ) <nl> + if ( data_settings - > min_merge_bytes_to_use_direct_io ! = 0 ) <nl> { <nl> size_t total_size = 0 ; <nl> for ( const auto & part : parts ) <nl> { <nl> total_size + = part - > bytes_on_disk ; <nl> - if ( total_size > = data . settings . min_merge_bytes_to_use_direct_io ) <nl> + if ( total_size > = data_settings - > min_merge_bytes_to_use_direct_io ) <nl> { <nl> LOG_DEBUG ( log , " Will merge parts reading files in O_DIRECT " ) ; <nl> read_with_direct_io = true ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> merging_columns , <nl> compression_codec , <nl> merged_column_to_size , <nl> - data . settings . min_merge_bytes_to_use_direct_io , <nl> + data_settings - > min_merge_bytes_to_use_direct_io , <nl> blocks_are_granules_size } ; <nl> <nl> merged_stream - > readPrefix ( ) ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mutatePartToTempor <nl> const auto & updated_header = mutations_interpreter . getUpdatedHeader ( ) ; <nl> <nl> NamesAndTypesList all_columns = data . getColumns ( ) . getAllPhysical ( ) ; <nl> + const auto data_settings = data . getCOWSettings ( ) ; <nl> <nl> Block in_header = in - > getHeader ( ) ; <nl> <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mutatePartToTempor <nl> } <nl> <nl> NameSet files_to_skip = { " checksums . txt " , " columns . txt " } ; <nl> - auto mrk_extension = data . settings . index_granularity_bytes ? getAdaptiveMrkExtension ( ) : getNonAdaptiveMrkExtension ( ) ; <nl> + <nl> + auto mrk_extension = data_settings - > index_granularity_bytes ? getAdaptiveMrkExtension ( ) : getNonAdaptiveMrkExtension ( ) ; <nl> for ( const auto & entry : updated_header ) <nl> { <nl> IDataType : : StreamCallback callback = [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> MergeTreeDataMergerMutator : : MergeAlgorithm MergeTreeDataMergerMutator : : chooseMer <nl> const MergeTreeData : : DataPartsVector & parts , size_t sum_rows_upper_bound , <nl> const NamesAndTypesList & gathering_columns , bool deduplicate , bool need_remove_expired_values ) const <nl> { <nl> + const auto data_settings = data . getCOWSettings ( ) ; <nl> + <nl> if ( deduplicate ) <nl> return MergeAlgorithm : : Horizontal ; <nl> - if ( data . settings . enable_vertical_merge_algorithm = = 0 ) <nl> + if ( data_settings - > enable_vertical_merge_algorithm = = 0 ) <nl> return MergeAlgorithm : : Horizontal ; <nl> if ( need_remove_expired_values ) <nl> return MergeAlgorithm : : Horizontal ; <nl> MergeTreeDataMergerMutator : : MergeAlgorithm MergeTreeDataMergerMutator : : chooseMer <nl> data . merging_params . mode = = MergeTreeData : : MergingParams : : Replacing | | <nl> data . merging_params . mode = = MergeTreeData : : MergingParams : : VersionedCollapsing ; <nl> <nl> - bool enough_ordinary_cols = gathering_columns . size ( ) > = data . settings . vertical_merge_algorithm_min_columns_to_activate ; <nl> + bool enough_ordinary_cols = gathering_columns . size ( ) > = data_settings - > vertical_merge_algorithm_min_columns_to_activate ; <nl> <nl> - bool enough_total_rows = sum_rows_upper_bound > = data . settings . vertical_merge_algorithm_min_rows_to_activate ; <nl> + bool enough_total_rows = sum_rows_upper_bound > = data_settings - > vertical_merge_algorithm_min_rows_to_activate ; <nl> <nl> bool no_parts_overflow = parts . size ( ) < = RowSourcePart : : MAX_PARTS ; <nl> <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeDataSelectExecutor . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataSelectExecutor . cpp <nl> BlockInputStreams MergeTreeDataSelectExecutor : : spreadMarkRangesAmongStreams ( <nl> size_t sum_marks = 0 ; <nl> size_t total_rows = 0 ; <nl> <nl> + const auto data_settings = data . getCOWSettings ( ) ; <nl> size_t adaptive_parts = 0 ; <nl> for ( size_t i = 0 ; i < parts . size ( ) ; + + i ) <nl> { <nl> BlockInputStreams MergeTreeDataSelectExecutor : : spreadMarkRangesAmongStreams ( <nl> <nl> size_t index_granularity_bytes = 0 ; <nl> if ( adaptive_parts > parts . size ( ) / 2 ) <nl> - index_granularity_bytes = data . settings . index_granularity_bytes ; <nl> + index_granularity_bytes = data_settings - > index_granularity_bytes ; <nl> <nl> const size_t max_marks_to_use_cache = roundRowsOrBytesToMarks ( <nl> settings . merge_tree_max_rows_to_use_cache , <nl> settings . merge_tree_max_bytes_to_use_cache , <nl> - data . settings . index_granularity , <nl> + data_settings - > index_granularity , <nl> index_granularity_bytes ) ; <nl> <nl> const size_t min_marks_for_concurrent_read = roundRowsOrBytesToMarks ( <nl> settings . merge_tree_min_rows_for_concurrent_read , <nl> settings . merge_tree_min_bytes_for_concurrent_read , <nl> - data . settings . index_granularity , <nl> + data_settings - > index_granularity , <nl> index_granularity_bytes ) ; <nl> <nl> if ( sum_marks > max_marks_to_use_cache ) <nl> BlockInputStreams MergeTreeDataSelectExecutor : : spreadMarkRangesAmongStreamsWithO <nl> SortingInfoPtr sorting_info = query_info . sorting_info ; <nl> size_t adaptive_parts = 0 ; <nl> std : : vector < size_t > sum_marks_in_parts ( parts . size ( ) ) ; <nl> + const auto data_settings = data . getCOWSettings ( ) ; <nl> <nl> for ( size_t i = 0 ; i < parts . size ( ) ; + + i ) <nl> { <nl> BlockInputStreams MergeTreeDataSelectExecutor : : spreadMarkRangesAmongStreamsWithO <nl> <nl> size_t index_granularity_bytes = 0 ; <nl> if ( adaptive_parts > parts . size ( ) / 2 ) <nl> - index_granularity_bytes = data . settings . index_granularity_bytes ; <nl> + index_granularity_bytes = data_settings - > index_granularity_bytes ; <nl> <nl> const size_t max_marks_to_use_cache = roundRowsOrBytesToMarks ( <nl> settings . merge_tree_max_rows_to_use_cache , <nl> settings . merge_tree_max_bytes_to_use_cache , <nl> - data . settings . index_granularity , <nl> + data_settings - > index_granularity , <nl> index_granularity_bytes ) ; <nl> <nl> const size_t min_marks_for_concurrent_read = roundRowsOrBytesToMarks ( <nl> settings . merge_tree_min_rows_for_concurrent_read , <nl> settings . merge_tree_min_bytes_for_concurrent_read , <nl> - data . settings . index_granularity , <nl> + data_settings - > index_granularity , <nl> index_granularity_bytes ) ; <nl> <nl> if ( sum_marks > max_marks_to_use_cache ) <nl> BlockInputStreams MergeTreeDataSelectExecutor : : spreadMarkRangesAmongStreamsWithO <nl> return streams ; <nl> <nl> / / / Let ' s split ranges to avoid reading much data . <nl> - auto split_ranges = [ rows_granularity = data . settings . index_granularity , max_block_size ] ( const auto & ranges , int direction ) <nl> + auto split_ranges = [ rows_granularity = data_settings - > index_granularity , max_block_size ] ( const auto & ranges , int direction ) <nl> { <nl> MarkRanges new_ranges ; <nl> const size_t max_marks_in_range = ( max_block_size + rows_granularity - 1 ) / rows_granularity ; <nl> BlockInputStreams MergeTreeDataSelectExecutor : : spreadMarkRangesAmongStreamsFinal <nl> const Names & virt_columns , <nl> const Settings & settings ) const <nl> { <nl> + const auto data_settings = data . getCOWSettings ( ) ; <nl> size_t sum_marks = 0 ; <nl> size_t adaptive_parts = 0 ; <nl> for ( size_t i = 0 ; i < parts . size ( ) ; + + i ) <nl> BlockInputStreams MergeTreeDataSelectExecutor : : spreadMarkRangesAmongStreamsFinal <nl> <nl> size_t index_granularity_bytes = 0 ; <nl> if ( adaptive_parts > = parts . size ( ) / 2 ) <nl> - index_granularity_bytes = data . settings . index_granularity_bytes ; <nl> + index_granularity_bytes = data_settings - > index_granularity_bytes ; <nl> <nl> const size_t max_marks_to_use_cache = roundRowsOrBytesToMarks ( <nl> settings . merge_tree_max_rows_to_use_cache , <nl> settings . merge_tree_max_bytes_to_use_cache , <nl> - data . settings . index_granularity , <nl> + data_settings - > index_granularity , <nl> index_granularity_bytes ) ; <nl> <nl> if ( sum_marks > max_marks_to_use_cache ) <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeIndexGranularityInfo . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeIndexGranularityInfo . cpp <nl> std : : optional < std : : string > MergeTreeIndexGranularityInfo : : getMrkExtensionFromFS ( <nl> MergeTreeIndexGranularityInfo : : MergeTreeIndexGranularityInfo ( <nl> const MergeTreeData & storage ) <nl> { <nl> - fixed_index_granularity = storage . settings . index_granularity ; <nl> + const auto storage_settings = storage . getCOWSettings ( ) ; <nl> + fixed_index_granularity = storage_settings - > index_granularity ; <nl> / / / Granularity is fixed <nl> if ( ! storage . canUseAdaptiveGranularity ( ) ) <nl> setNonAdaptive ( ) ; <nl> else <nl> - setAdaptive ( storage . settings . index_granularity_bytes ) ; <nl> + setAdaptive ( storage_settings - > index_granularity_bytes ) ; <nl> } <nl> <nl> <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeSettings . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeSettings . cpp <nl> void MergeTreeSettings : : loadFromQuery ( ASTStorage & storage_def ) <nl> { <nl> try <nl> { <nl> - applyChanges ( storage_def . settings - > changes ) ; <nl> + loadFromChanges ( storage_def . settings - > changes ) ; <nl> } <nl> catch ( Exception & e ) <nl> { <nl> void MergeTreeSettings : : loadFromQuery ( ASTStorage & storage_def ) <nl> <nl> # define ADD_IF_ABSENT ( NAME ) \ <nl> if ( std : : find_if ( changes . begin ( ) , changes . end ( ) , \ <nl> - [ ] ( const SettingChange & c ) { return c . name = = # NAME ; } ) \ <nl> + [ ] ( const SettingChange & c ) { return c . name = = # NAME ; } ) \ <nl> = = changes . end ( ) ) \ <nl> changes . push_back ( SettingChange { # NAME , NAME . value } ) ; <nl> <nl> void MergeTreeSettings : : loadFromQuery ( ASTStorage & storage_def ) <nl> # undef ADD_IF_ABSENT <nl> } <nl> <nl> + <nl> + MergeTreeSettings : : MutablePtr MergeTreeSettings : : clone ( ) const <nl> + { <nl> + return COW : : create ( * this ) ; <nl> + } <nl> } <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeSettings . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeSettings . h <nl> <nl> <nl> # include < Core / Defines . h > <nl> # include < Core / SettingsCommon . h > <nl> + # include < Common / COW . h > <nl> <nl> <nl> namespace Poco <nl> class ASTStorage ; <nl> / * * Settings for the MergeTree family of engines . <nl> * Could be loaded from config or from a CREATE TABLE query ( SETTINGS clause ) . <nl> * / <nl> - struct MergeTreeSettings : public SettingsCollection < MergeTreeSettings > <nl> + struct MergeTreeSettings : public SettingsCollection < MergeTreeSettings > , public COW < MergeTreeSettings > <nl> { <nl> <nl> - # define LIST_OF_MERGE_TREE_SETTINGS ( M ) \ <nl> - M ( SettingUInt64 , index_granularity , 8192 , " How many rows correspond to one primary key value . " ) \ <nl> + friend class COW < MergeTreeSettings > ; <nl> + <nl> + / / / M ( mutable ) for normal settings , IM ( immutable ) for not updateable settings . <nl> + # define LIST_OF_MERGE_TREE_SETTINGS ( M , IM ) \ <nl> + IM ( SettingUInt64 , index_granularity , 8192 , " How many rows correspond to one primary key value . " ) \ <nl> \ <nl> / * * Merge settings . * / \ <nl> M ( SettingUInt64 , max_bytes_to_merge_at_max_space_in_pool , 150ULL * 1024 * 1024 * 1024 , " Maximum in total size of parts to merge , when there are maximum free threads in background pool ( or entries in replication queue ) . " ) \ <nl> struct MergeTreeSettings : public SettingsCollection < MergeTreeSettings > <nl> M ( SettingBool , use_minimalistic_part_header_in_zookeeper , false , " Store part header ( checksums and columns ) in a compact format and a single part znode instead of separate znodes ( < part > / columns and < part > / checksums ) . This can dramatically reduce snapshot size in ZooKeeper . Before enabling check that all replicas support new format . " ) \ <nl> M ( SettingUInt64 , finished_mutations_to_keep , 100 , " How many records about mutations that are done to keep . If zero , then keep all of them . " ) \ <nl> M ( SettingUInt64 , min_merge_bytes_to_use_direct_io , 10ULL * 1024 * 1024 * 1024 , " Minimal amount of bytes to enable O_DIRECT in merge ( 0 - disabled ) . " ) \ <nl> - M ( SettingUInt64 , index_granularity_bytes , 10 * 1024 * 1024 , " Approximate amount of bytes in single granule ( 0 - disabled ) . " ) \ <nl> + IM ( SettingUInt64 , index_granularity_bytes , 10 * 1024 * 1024 , " Approximate amount of bytes in single granule ( 0 - disabled ) . " ) \ <nl> M ( SettingInt64 , merge_with_ttl_timeout , 3600 * 24 , " Minimal time in seconds , when merge with TTL can be repeated . " ) \ <nl> M ( SettingBool , write_final_mark , 1 , " Write final mark after end of column ( 0 - disabled , do nothing if index_granularity_bytes = 0 ) " ) \ <nl> M ( SettingBool , enable_mixed_granularity_parts , 0 , " Enable parts with adaptive and non adaptive granularity " ) \ <nl> struct MergeTreeSettings : public SettingsCollection < MergeTreeSettings > <nl> <nl> / / / NOTE : will rewrite the AST to add immutable settings . <nl> void loadFromQuery ( ASTStorage & storage_def ) ; <nl> + <nl> + MutablePtr clone ( ) const ; <nl> + private : <nl> + MergeTreeSettings ( ) = default ; <nl> + MergeTreeSettings ( const MergeTreeSettings & o ) = default ; <nl> } ; <nl> <nl> + using MergeTreeSettingsPtr = MergeTreeSettings : : Ptr ; <nl> + using MutableMergeTreeSettingsPtr = MergeTreeSettings : : MutablePtr ; <nl> + <nl> } <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeThreadSelectBlockInputStream . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeThreadSelectBlockInputStream . cpp <nl> MergeTreeThreadSelectBlockInputStream : : MergeTreeThreadSelectBlockInputStream ( <nl> / / / Maybe it will make sence to add settings ` max_block_size_bytes ` <nl> if ( max_block_size_rows & & ! storage . canUseAdaptiveGranularity ( ) ) <nl> { <nl> - size_t fixed_index_granularity = storage . settings . index_granularity ; <nl> + size_t fixed_index_granularity = storage . getCOWSettings ( ) - > index_granularity ; <nl> min_marks_to_read = ( min_marks_to_read_ * fixed_index_granularity + max_block_size_rows - 1 ) <nl> / max_block_size_rows * max_block_size_rows / fixed_index_granularity ; <nl> } <nl> mmm a / dbms / src / Storages / MergeTree / ReplicatedMergeTreeCleanupThread . cpp <nl> ppp b / dbms / src / Storages / MergeTree / ReplicatedMergeTreeCleanupThread . cpp <nl> ReplicatedMergeTreeCleanupThread : : ReplicatedMergeTreeCleanupThread ( StorageReplic <nl> <nl> void ReplicatedMergeTreeCleanupThread : : run ( ) <nl> { <nl> - const auto CLEANUP_SLEEP_MS = storage . settings . cleanup_delay_period * 1000 <nl> - + std : : uniform_int_distribution < UInt64 > ( 0 , storage . settings . cleanup_delay_period_random_add * 1000 ) ( rng ) ; <nl> + auto storage_settings = storage . getCOWSettings ( ) ; <nl> + const auto CLEANUP_SLEEP_MS = storage_settings - > cleanup_delay_period * 1000 <nl> + + std : : uniform_int_distribution < UInt64 > ( 0 , storage_settings - > cleanup_delay_period_random_add * 1000 ) ( rng ) ; <nl> <nl> try <nl> { <nl> void ReplicatedMergeTreeCleanupThread : : iterate ( ) <nl> void ReplicatedMergeTreeCleanupThread : : clearOldLogs ( ) <nl> { <nl> auto zookeeper = storage . getZooKeeper ( ) ; <nl> + auto storage_settings = storage . getCOWSettings ( ) ; <nl> <nl> Coordination : : Stat stat ; <nl> if ( ! zookeeper - > exists ( storage . zookeeper_path + " / log " , & stat ) ) <nl> void ReplicatedMergeTreeCleanupThread : : clearOldLogs ( ) <nl> int children_count = stat . numChildren ; <nl> <nl> / / / We will wait for 1 . 1 times more records to accumulate than necessary . <nl> - if ( static_cast < double > ( children_count ) < storage . settings . min_replicated_logs_to_keep * 1 . 1 ) <nl> + if ( static_cast < double > ( children_count ) < storage_settings - > min_replicated_logs_to_keep * 1 . 1 ) <nl> return ; <nl> <nl> Strings replicas = zookeeper - > getChildren ( storage . zookeeper_path + " / replicas " , & stat ) ; <nl> void ReplicatedMergeTreeCleanupThread : : clearOldLogs ( ) <nl> std : : sort ( entries . begin ( ) , entries . end ( ) ) ; <nl> <nl> String min_saved_record_log_str = entries [ <nl> - entries . size ( ) > storage . settings . max_replicated_logs_to_keep . value <nl> - ? entries . size ( ) - storage . settings . max_replicated_logs_to_keep . value <nl> + entries . size ( ) > storage_settings - > max_replicated_logs_to_keep <nl> + ? entries . size ( ) - storage_settings - > max_replicated_logs_to_keep <nl> : 0 ] ; <nl> <nl> / / / Replicas that were marked is_lost but are active . <nl> void ReplicatedMergeTreeCleanupThread : : clearOldLogs ( ) <nl> min_saved_log_pointer = std : : min ( min_saved_log_pointer , min_log_pointer_lost_candidate ) ; <nl> <nl> / / / We will not touch the last ` min_replicated_logs_to_keep ` records . <nl> - entries . erase ( entries . end ( ) - std : : min < UInt64 > ( entries . size ( ) , storage . settings . min_replicated_logs_to_keep . value ) , entries . end ( ) ) ; <nl> + entries . erase ( entries . end ( ) - std : : min < UInt64 > ( entries . size ( ) , storage_settings - > min_replicated_logs_to_keep ) , entries . end ( ) ) ; <nl> / / / We will not touch records that are no less than ` min_saved_log_pointer ` . <nl> entries . erase ( std : : lower_bound ( entries . begin ( ) , entries . end ( ) , " log - " + padIndex ( min_saved_log_pointer ) ) , entries . end ( ) ) ; <nl> <nl> struct ReplicatedMergeTreeCleanupThread : : NodeWithStat <nl> void ReplicatedMergeTreeCleanupThread : : clearOldBlocks ( ) <nl> { <nl> auto zookeeper = storage . getZooKeeper ( ) ; <nl> + auto storage_settings = storage . getCOWSettings ( ) ; <nl> <nl> std : : vector < NodeWithStat > timed_blocks ; <nl> getBlocksSortedByTime ( * zookeeper , timed_blocks ) ; <nl> void ReplicatedMergeTreeCleanupThread : : clearOldBlocks ( ) <nl> <nl> / / / Use ZooKeeper ' s first node ( last according to time ) timestamp as " current " time . <nl> Int64 current_time = timed_blocks . front ( ) . ctime ; <nl> - Int64 time_threshold = std : : max ( static_cast < Int64 > ( 0 ) , current_time - static_cast < Int64 > ( 1000 * storage . settings . replicated_deduplication_window_seconds ) ) ; <nl> + Int64 time_threshold = std : : max ( static_cast < Int64 > ( 0 ) , current_time - static_cast < Int64 > ( 1000 * storage_settings - > replicated_deduplication_window_seconds ) ) ; <nl> <nl> / / / Virtual node , all nodes that are " greater " than this one will be deleted <nl> NodeWithStat block_threshold { { } , time_threshold } ; <nl> <nl> - size_t current_deduplication_window = std : : min < size_t > ( timed_blocks . size ( ) , storage . settings . replicated_deduplication_window . value ) ; <nl> + size_t current_deduplication_window = std : : min < size_t > ( timed_blocks . size ( ) , storage_settings - > replicated_deduplication_window ) ; <nl> auto first_outdated_block_fixed_threshold = timed_blocks . begin ( ) + current_deduplication_window ; <nl> auto first_outdated_block_time_threshold = std : : upper_bound ( timed_blocks . begin ( ) , timed_blocks . end ( ) , block_threshold , NodeWithStat : : greaterByTime ) ; <nl> auto first_outdated_block = std : : min ( first_outdated_block_fixed_threshold , first_outdated_block_time_threshold ) ; <nl> void ReplicatedMergeTreeCleanupThread : : getBlocksSortedByTime ( zkutil : : ZooKeeper & <nl> <nl> void ReplicatedMergeTreeCleanupThread : : clearOldMutations ( ) <nl> { <nl> - if ( ! storage . settings . finished_mutations_to_keep ) <nl> + auto storage_settings = storage . getCOWSettings ( ) ; <nl> + if ( ! storage_settings - > finished_mutations_to_keep ) <nl> return ; <nl> <nl> - if ( storage . queue . countFinishedMutations ( ) < = storage . settings . finished_mutations_to_keep ) <nl> + if ( storage . queue . countFinishedMutations ( ) < = storage_settings - > finished_mutations_to_keep ) <nl> { <nl> / / / Not strictly necessary , but helps to avoid unnecessary ZooKeeper requests . <nl> / / / If even this replica hasn ' t finished enough mutations yet , then we don ' t need to clean anything . <nl> void ReplicatedMergeTreeCleanupThread : : clearOldMutations ( ) <nl> <nl> / / / Do not remove entries that are greater than ` min_pointer ` ( they are not done yet ) . <nl> entries . erase ( std : : upper_bound ( entries . begin ( ) , entries . end ( ) , padIndex ( min_pointer ) ) , entries . end ( ) ) ; <nl> - / / / Do not remove last ` storage . settings . finished_mutations_to_keep ` entries . <nl> - if ( entries . size ( ) < = storage . settings . finished_mutations_to_keep ) <nl> + / / / Do not remove last ` storage_settings - > finished_mutations_to_keep ` entries . <nl> + if ( entries . size ( ) < = storage_settings - > finished_mutations_to_keep ) <nl> return ; <nl> - entries . erase ( entries . end ( ) - storage . settings . finished_mutations_to_keep , entries . end ( ) ) ; <nl> + entries . erase ( entries . end ( ) - storage_settings - > finished_mutations_to_keep , entries . end ( ) ) ; <nl> <nl> if ( entries . empty ( ) ) <nl> return ; <nl> mmm a / dbms / src / Storages / MergeTree / ReplicatedMergeTreeQueue . cpp <nl> ppp b / dbms / src / Storages / MergeTree / ReplicatedMergeTreeQueue . cpp <nl> bool ReplicatedMergeTreeQueue : : shouldExecuteLogEntry ( <nl> * Setting max_bytes_to_merge_at_max_space_in_pool still working for regular merges , <nl> * because the leader replica does not assign merges of greater size ( except OPTIMIZE PARTITION and OPTIMIZE FINAL ) . <nl> * / <nl> - bool ignore_max_size = ( entry . type = = LogEntry : : MERGE_PARTS ) & & ( max_source_parts_size = = data . settings . max_bytes_to_merge_at_max_space_in_pool ) ; <nl> + const auto data_settings = data . getCOWSettings ( ) ; <nl> + bool ignore_max_size = ( entry . type = = LogEntry : : MERGE_PARTS ) & & ( max_source_parts_size = = data_settings - > max_bytes_to_merge_at_max_space_in_pool ) ; <nl> <nl> if ( ! ignore_max_size & & sum_parts_size_in_bytes > max_source_parts_size ) <nl> { <nl> mmm a / dbms / src / Storages / MergeTree / ReplicatedMergeTreeRestartingThread . cpp <nl> ppp b / dbms / src / Storages / MergeTree / ReplicatedMergeTreeRestartingThread . cpp <nl> ReplicatedMergeTreeRestartingThread : : ReplicatedMergeTreeRestartingThread ( Storage <nl> , log ( & Logger : : get ( log_name ) ) <nl> , active_node_identifier ( generateActiveNodeIdentifier ( ) ) <nl> { <nl> - check_period_ms = storage . settings . zookeeper_session_expiration_check_period . totalSeconds ( ) * 1000 ; <nl> + const auto storage_settings = storage . getCOWSettings ( ) ; <nl> + check_period_ms = storage_settings - > zookeeper_session_expiration_check_period . totalSeconds ( ) * 1000 ; <nl> <nl> / / / Periodicity of checking lag of replica . <nl> - if ( check_period_ms > static_cast < Int64 > ( storage . settings . check_delay_period ) * 1000 ) <nl> - check_period_ms = storage . settings . check_delay_period * 1000 ; <nl> + if ( check_period_ms > static_cast < Int64 > ( storage_settings - > check_delay_period ) * 1000 ) <nl> + check_period_ms = storage_settings - > check_delay_period * 1000 ; <nl> <nl> task = storage . global_context . getSchedulePool ( ) . createTask ( log_name , [ this ] { run ( ) ; } ) ; <nl> } <nl> void ReplicatedMergeTreeRestartingThread : : run ( ) <nl> } <nl> <nl> time_t current_time = time ( nullptr ) ; <nl> - if ( current_time > = prev_time_of_check_delay + static_cast < time_t > ( storage . settings . check_delay_period ) ) <nl> + const auto storage_settings = storage . getCOWSettings ( ) ; <nl> + if ( current_time > = prev_time_of_check_delay + static_cast < time_t > ( storage_settings - > check_delay_period ) ) <nl> { <nl> / / / Find out lag of replicas . <nl> time_t absolute_delay = 0 ; <nl> void ReplicatedMergeTreeRestartingThread : : run ( ) <nl> <nl> / / / We give up leadership if the relative lag is greater than threshold . <nl> if ( storage . is_leader <nl> - & & relative_delay > static_cast < time_t > ( storage . settings . min_relative_delay_to_yield_leadership ) ) <nl> + & & relative_delay > static_cast < time_t > ( storage_settings - > min_relative_delay_to_yield_leadership ) ) <nl> { <nl> LOG_INFO ( log , " Relative replica delay ( " < < relative_delay < < " seconds ) is bigger than threshold ( " <nl> - < < storage . settings . min_relative_delay_to_yield_leadership < < " ) . Will yield leadership . " ) ; <nl> + < < storage_settings - > min_relative_delay_to_yield_leadership < < " ) . Will yield leadership . " ) ; <nl> <nl> ProfileEvents : : increment ( ProfileEvents : : ReplicaYieldLeadership ) ; <nl> <nl> bool ReplicatedMergeTreeRestartingThread : : tryStartup ( ) <nl> activateReplica ( ) ; <nl> <nl> const auto & zookeeper = storage . getZooKeeper ( ) ; <nl> + const auto storage_settings = storage . getCOWSettings ( ) ; <nl> <nl> storage . cloneReplicaIfNeeded ( zookeeper ) ; <nl> <nl> bool ReplicatedMergeTreeRestartingThread : : tryStartup ( ) <nl> <nl> updateQuorumIfWeHavePart ( ) ; <nl> <nl> - if ( storage . settings . replicated_can_become_leader ) <nl> + if ( storage_settings - > replicated_can_become_leader ) <nl> storage . enterLeaderElection ( ) ; <nl> else <nl> LOG_INFO ( log , " Will not enter leader election because replicated_can_become_leader = 0 " ) ; <nl> mmm a / dbms / src / Storages / MergeTree / ReplicatedMergeTreeTableMetadata . cpp <nl> ppp b / dbms / src / Storages / MergeTree / ReplicatedMergeTreeTableMetadata . cpp <nl> ReplicatedMergeTreeTableMetadata : : ReplicatedMergeTreeTableMetadata ( const MergeTr <nl> if ( data . format_version < MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING ) <nl> date_column = data . minmax_idx_columns [ data . minmax_idx_date_column_pos ] ; <nl> <nl> + const auto data_settings = data . getCOWSettings ( ) ; <nl> sampling_expression = formattedAST ( data . sample_by_ast ) ; <nl> - index_granularity = data . settings . index_granularity ; <nl> + index_granularity = data_settings - > index_granularity ; <nl> merging_params_mode = static_cast < int > ( data . merging_params . mode ) ; <nl> sign_column = data . merging_params . sign_column ; <nl> <nl> ReplicatedMergeTreeTableMetadata : : ReplicatedMergeTreeTableMetadata ( const MergeTr <nl> ttl_table = formattedAST ( data . ttl_table_ast ) ; <nl> skip_indices = data . getIndices ( ) . toString ( ) ; <nl> if ( data . canUseAdaptiveGranularity ( ) ) <nl> - index_granularity_bytes = data . settings . index_granularity_bytes ; <nl> + index_granularity_bytes = data_settings - > index_granularity_bytes ; <nl> else <nl> index_granularity_bytes = 0 ; <nl> <nl> mmm a / dbms / src / Storages / MergeTree / registerStorageMergeTree . cpp <nl> ppp b / dbms / src / Storages / MergeTree / registerStorageMergeTree . cpp <nl> static StoragePtr create ( const StorageFactory : : Arguments & args ) <nl> ASTPtr ttl_table_ast ; <nl> IndicesDescription indices_description ; <nl> ConstraintsDescription constraints_description ; <nl> - MergeTreeSettings storage_settings = args . context . getMergeTreeSettings ( ) ; <nl> + MutableMergeTreeSettingsPtr storage_settings = MergeTreeSettings : : create ( args . context . getMergeTreeSettings ( ) ) ; <nl> <nl> if ( is_extended_storage_def ) <nl> { <nl> static StoragePtr create ( const StorageFactory : : Arguments & args ) <nl> if ( args . query . columns_list & & args . query . columns_list - > constraints ) <nl> for ( const auto & constraint : args . query . columns_list - > constraints - > children ) <nl> constraints_description . constraints . push_back ( <nl> - std : : dynamic_pointer_cast < ASTConstraintDeclaration > ( constraint - > clone ( ) ) ) ; <nl> - storage_settings . loadFromQuery ( * args . storage_def ) ; <nl> + std : : dynamic_pointer_cast < ASTConstraintDeclaration > ( constraint - > clone ( ) ) ) ; <nl> + <nl> + storage_settings - > loadFromQuery ( * args . storage_def ) ; <nl> } <nl> else <nl> { <nl> static StoragePtr create ( const StorageFactory : : Arguments & args ) <nl> <nl> const auto * ast = engine_args . back ( ) - > as < ASTLiteral > ( ) ; <nl> if ( ast & & ast - > value . getType ( ) = = Field : : Types : : UInt64 ) <nl> - storage_settings . index_granularity = safeGet < UInt64 > ( ast - > value ) ; <nl> + storage_settings - > index_granularity = safeGet < UInt64 > ( ast - > value ) ; <nl> else <nl> throw Exception ( <nl> " Index granularity must be a positive integer " + getMergeTreeVerboseHelp ( is_extended_storage_def ) , <nl> static StoragePtr create ( const StorageFactory : : Arguments & args ) <nl> zookeeper_path , replica_name , args . attach , args . data_path , args . database_name , args . table_name , <nl> args . columns , indices_description , constraints_description , <nl> args . context , date_column_name , partition_by_ast , order_by_ast , primary_key_ast , <nl> - sample_by_ast , ttl_table_ast , merging_params , storage_settings , <nl> + sample_by_ast , ttl_table_ast , merging_params , std : : move ( storage_settings ) , <nl> args . has_force_restore_data_flag ) ; <nl> else <nl> return StorageMergeTree : : create ( <nl> args . data_path , args . database_name , args . table_name , args . columns , indices_description , <nl> constraints_description , args . attach , args . context , date_column_name , partition_by_ast , order_by_ast , <nl> - primary_key_ast , sample_by_ast , ttl_table_ast , merging_params , storage_settings , <nl> + primary_key_ast , sample_by_ast , ttl_table_ast , merging_params , std : : move ( storage_settings ) , <nl> args . has_force_restore_data_flag ) ; <nl> } <nl> <nl> mmm a / dbms / src / Storages / StorageBuffer . cpp <nl> ppp b / dbms / src / Storages / StorageBuffer . cpp <nl> void StorageBuffer : : alter ( const AlterCommands & params , const String & database_ <nl> auto new_columns = getColumns ( ) ; <nl> auto new_indices = getIndices ( ) ; <nl> auto new_constraints = getConstraints ( ) ; <nl> - params . apply ( new_columns ) ; <nl> + params . applyForColumnsOnly ( new_columns ) ; <nl> context . getDatabase ( database_name_ ) - > alterTable ( context , table_name_ , new_columns , new_indices , new_constraints , { } ) ; <nl> setColumns ( std : : move ( new_columns ) ) ; <nl> } <nl> mmm a / dbms / src / Storages / StorageDistributed . cpp <nl> ppp b / dbms / src / Storages / StorageDistributed . cpp <nl> BlockOutputStreamPtr StorageDistributed : : write ( const ASTPtr & , const Context & c <nl> const auto & settings = context . getSettingsRef ( ) ; <nl> <nl> / / / Ban an attempt to make async insert into the table belonging to DatabaseMemory <nl> - if ( path . empty ( ) & & ! owned_cluster & & ! settings . insert_distributed_sync . value ) <nl> + if ( path . empty ( ) & & ! owned_cluster & & ! settings . insert_distributed_sync ) <nl> { <nl> throw Exception ( " Storage " + getName ( ) + " must has own data directory to enable asynchronous inserts " , <nl> ErrorCodes : : BAD_ARGUMENTS ) ; <nl> void StorageDistributed : : alter ( <nl> auto new_columns = getColumns ( ) ; <nl> auto new_indices = getIndices ( ) ; <nl> auto new_constraints = getConstraints ( ) ; <nl> - params . apply ( new_columns ) ; <nl> + params . applyForColumnsOnly ( new_columns ) ; <nl> context . getDatabase ( current_database_name ) - > alterTable ( context , current_table_name , new_columns , new_indices , new_constraints , { } ) ; <nl> setColumns ( std : : move ( new_columns ) ) ; <nl> } <nl> mmm a / dbms / src / Storages / StorageJoin . cpp <nl> ppp b / dbms / src / Storages / StorageJoin . cpp <nl> void registerStorageJoin ( StorageFactory & factory ) <nl> args . database_name , <nl> args . table_name , <nl> key_names , <nl> - join_use_nulls . value , <nl> - SizeLimits { max_rows_in_join . value , max_bytes_in_join . value , join_overflow_mode . value } , <nl> + join_use_nulls , <nl> + SizeLimits { max_rows_in_join , max_bytes_in_join , join_overflow_mode } , <nl> kind , <nl> strictness , <nl> args . columns , <nl> mmm a / dbms / src / Storages / StorageMerge . cpp <nl> ppp b / dbms / src / Storages / StorageMerge . cpp <nl> void StorageMerge : : alter ( <nl> auto new_columns = getColumns ( ) ; <nl> auto new_indices = getIndices ( ) ; <nl> auto new_constraints = getConstraints ( ) ; <nl> - params . apply ( new_columns ) ; <nl> + params . applyForColumnsOnly ( new_columns ) ; <nl> context . getDatabase ( database_name_ ) - > alterTable ( context , table_name_ , new_columns , new_indices , new_constraints , { } ) ; <nl> setColumns ( new_columns ) ; <nl> } <nl> mmm a / dbms / src / Storages / StorageMergeTree . cpp <nl> ppp b / dbms / src / Storages / StorageMergeTree . cpp <nl> <nl> # include < Parsers / ASTFunction . h > <nl> # include < Parsers / ASTLiteral . h > <nl> # include < Parsers / ASTPartition . h > <nl> + # include < Parsers / ASTSetQuery . h > <nl> # include < Parsers / queryToString . h > <nl> # include < Storages / MergeTree / MergeTreeData . h > <nl> # include < Storages / MergeTree / ActiveDataPartSet . h > <nl> namespace ErrorCodes <nl> extern const int INCORRECT_FILE_NAME ; <nl> extern const int CANNOT_ASSIGN_OPTIMIZE ; <nl> extern const int INCOMPATIBLE_COLUMNS ; <nl> + extern const int UNKNOWN_SETTING ; <nl> } <nl> <nl> namespace ActionLocks <nl> StorageMergeTree : : StorageMergeTree ( <nl> const ASTPtr & sample_by_ast_ , / / / nullptr , if sampling is not supported . <nl> const ASTPtr & ttl_table_ast_ , <nl> const MergingParams & merging_params_ , <nl> - const MergeTreeSettings & settings_ , <nl> + MergeTreeSettingsPtr settings_ , <nl> bool has_force_restore_data_flag ) <nl> : MergeTreeData ( database_name_ , table_name_ , <nl> path_ + escapeForFileName ( table_name_ ) + ' / ' , <nl> void StorageMergeTree : : alter ( <nl> { <nl> if ( ! params . isMutable ( ) ) <nl> { <nl> + SettingsChanges new_changes ; <nl> + / / / We don ' t need to lock table structure exclusively to ALTER settings . <nl> + if ( params . isSettingsAlter ( ) ) <nl> + { <nl> + params . applyForSettingsOnly ( new_changes ) ; <nl> + alterSettings ( new_changes , current_database_name , current_table_name , context , table_lock_holder ) ; <nl> + return ; <nl> + } <nl> + <nl> lockStructureExclusively ( table_lock_holder , context . getCurrentQueryId ( ) ) ; <nl> auto new_columns = getColumns ( ) ; <nl> auto new_indices = getIndices ( ) ; <nl> auto new_constraints = getConstraints ( ) ; <nl> - params . apply ( new_columns ) ; <nl> + ASTPtr new_order_by_ast = order_by_ast ; <nl> + ASTPtr new_primary_key_ast = primary_key_ast ; <nl> + ASTPtr new_ttl_table_ast = ttl_table_ast ; <nl> + params . apply ( new_columns , new_indices , new_constraints , new_order_by_ast , new_primary_key_ast , new_ttl_table_ast , new_changes ) ; <nl> context . getDatabase ( current_database_name ) - > alterTable ( context , current_table_name , new_columns , new_indices , new_constraints , { } ) ; <nl> setColumns ( std : : move ( new_columns ) ) ; <nl> return ; <nl> void StorageMergeTree : : alter ( <nl> ASTPtr new_order_by_ast = order_by_ast ; <nl> ASTPtr new_primary_key_ast = primary_key_ast ; <nl> ASTPtr new_ttl_table_ast = ttl_table_ast ; <nl> - params . apply ( new_columns , new_indices , new_constraints , new_order_by_ast , new_primary_key_ast , new_ttl_table_ast ) ; <nl> + SettingsChanges new_changes ; <nl> + params . apply ( new_columns , new_indices , new_constraints , new_order_by_ast , new_primary_key_ast , new_ttl_table_ast , new_changes ) ; <nl> <nl> auto transactions = prepareAlterTransactions ( new_columns , new_indices , context ) ; <nl> <nl> Int64 StorageMergeTree : : getCurrentMutationVersion ( <nl> <nl> void StorageMergeTree : : clearOldMutations ( ) <nl> { <nl> - if ( ! settings . finished_mutations_to_keep ) <nl> + const auto settings = getCOWSettings ( ) ; <nl> + if ( ! settings - > finished_mutations_to_keep ) <nl> return ; <nl> <nl> std : : vector < MergeTreeMutationEntry > mutations_to_delete ; <nl> { <nl> std : : lock_guard lock ( currently_merging_mutex ) ; <nl> <nl> - if ( current_mutations_by_version . size ( ) < = settings . finished_mutations_to_keep ) <nl> + if ( current_mutations_by_version . size ( ) < = settings - > finished_mutations_to_keep ) <nl> return ; <nl> <nl> auto begin_it = current_mutations_by_version . begin ( ) ; <nl> void StorageMergeTree : : clearOldMutations ( ) <nl> end_it = current_mutations_by_version . upper_bound ( * min_version ) ; <nl> <nl> size_t done_count = std : : distance ( begin_it , end_it ) ; <nl> - if ( done_count < = settings . finished_mutations_to_keep ) <nl> + if ( done_count < = settings - > finished_mutations_to_keep ) <nl> return ; <nl> <nl> - size_t to_delete_count = done_count - settings . finished_mutations_to_keep ; <nl> + size_t to_delete_count = done_count - settings - > finished_mutations_to_keep ; <nl> <nl> auto it = begin_it ; <nl> for ( size_t i = 0 ; i < to_delete_count ; + + i ) <nl> void StorageMergeTree : : clearColumnOrIndexInPartition ( const ASTPtr & partition , c <nl> ASTPtr ignored_order_by_ast ; <nl> ASTPtr ignored_primary_key_ast ; <nl> ASTPtr ignored_ttl_table_ast ; <nl> - alter_command . apply ( new_columns , new_indices , new_constraints , ignored_order_by_ast , ignored_primary_key_ast , ignored_ttl_table_ast ) ; <nl> + SettingsChanges ignored_settings_changes ; <nl> + <nl> + alter_command . apply ( new_columns , new_indices , new_constraints , ignored_order_by_ast , <nl> + ignored_primary_key_ast , ignored_ttl_table_ast , ignored_settings_changes ) ; <nl> <nl> auto columns_for_parts = new_columns . getAllPhysical ( ) ; <nl> for ( const auto & part : parts ) <nl> mmm a / dbms / src / Storages / StorageMergeTree . h <nl> ppp b / dbms / src / Storages / StorageMergeTree . h <nl> class StorageMergeTree : public ext : : shared_ptr_helper < StorageMergeTree > , public <nl> friend struct CurrentlyMergingPartsTagger ; <nl> <nl> protected : <nl> + <nl> / * * Attach the table with the appropriate name , along the appropriate path ( with / at the end ) , <nl> * ( correctness of names and paths are not checked ) <nl> * consisting of the specified columns . <nl> class StorageMergeTree : public ext : : shared_ptr_helper < StorageMergeTree > , public <nl> const ASTPtr & sample_by_ast_ , / / / nullptr , if sampling is not supported . <nl> const ASTPtr & ttl_table_ast_ , <nl> const MergingParams & merging_params_ , <nl> - const MergeTreeSettings & settings_ , <nl> + MergeTreeSettingsPtr settings_ , <nl> bool has_force_restore_data_flag ) ; <nl> } ; <nl> <nl> mmm a / dbms / src / Storages / StorageNull . cpp <nl> ppp b / dbms / src / Storages / StorageNull . cpp <nl> void StorageNull : : alter ( <nl> ColumnsDescription new_columns = getColumns ( ) ; <nl> IndicesDescription new_indices = getIndices ( ) ; <nl> ConstraintsDescription new_constraints = getConstraints ( ) ; <nl> - params . apply ( new_columns ) ; <nl> + params . applyForColumnsOnly ( new_columns ) ; <nl> context . getDatabase ( current_database_name ) - > alterTable ( context , current_table_name , new_columns , new_indices , new_constraints , { } ) ; <nl> setColumns ( std : : move ( new_columns ) ) ; <nl> } <nl> mmm a / dbms / src / Storages / StorageReplicatedMergeTree . cpp <nl> ppp b / dbms / src / Storages / StorageReplicatedMergeTree . cpp <nl> StorageReplicatedMergeTree : : StorageReplicatedMergeTree ( <nl> const ASTPtr & sample_by_ast_ , <nl> const ASTPtr & ttl_table_ast_ , <nl> const MergingParams & merging_params_ , <nl> - const MergeTreeSettings & settings_ , <nl> + MergeTreeSettingsPtr settings_ , <nl> bool has_force_restore_data_flag ) <nl> : MergeTreeData ( database_name_ , table_name_ , <nl> path_ + escapeForFileName ( table_name_ ) + ' / ' , <nl> void StorageReplicatedMergeTree : : createTableIfNotExists ( ) <nl> } <nl> <nl> <nl> - / * * Verify that list of columns and table settings match those specified in ZK ( / metadata ) . <nl> + / * * Verify that list of columns and table storage_settings match those specified in ZK ( / metadata ) . <nl> * If not , throw an exception . <nl> * / <nl> void StorageReplicatedMergeTree : : checkTableStructure ( bool skip_sanity_checks , bool allow_alter ) <nl> void StorageReplicatedMergeTree : : checkParts ( bool skip_sanity_checks ) <nl> for ( const auto & part : parts ) <nl> total_rows_on_filesystem + = part - > rows_count ; <nl> <nl> - bool insane = unexpected_parts_rows > total_rows_on_filesystem * settings . replicated_max_ratio_of_wrong_parts ; <nl> + const auto storage_settings = getCOWSettings ( ) ; <nl> + bool insane = unexpected_parts_rows > total_rows_on_filesystem * storage_settings - > replicated_max_ratio_of_wrong_parts ; <nl> <nl> if ( insane & & ! skip_sanity_checks ) <nl> { <nl> void StorageReplicatedMergeTree : : checkPartChecksumsAndAddCommitOps ( const zkutil : <nl> <nl> if ( ! has_been_already_added ) <nl> { <nl> + const auto storage_settings = getCOWSettings ( ) ; <nl> String part_path = replica_path + " / parts / " + part_name ; <nl> <nl> ops . emplace_back ( zkutil : : makeCheckRequest ( <nl> zookeeper_path + " / columns " , expected_columns_version ) ) ; <nl> <nl> - if ( settings . use_minimalistic_part_header_in_zookeeper ) <nl> + if ( storage_settings - > use_minimalistic_part_header_in_zookeeper ) <nl> { <nl> ops . emplace_back ( zkutil : : makeCreateRequest ( <nl> part_path , local_part_header . toString ( ) , zkutil : : CreateMode : : Persistent ) ) ; <nl> MergeTreeData : : DataPartsVector StorageReplicatedMergeTree : : checkPartChecksumsAnd <nl> String StorageReplicatedMergeTree : : getChecksumsForZooKeeper ( const MergeTreeDataPartChecksums & checksums ) const <nl> { <nl> return MinimalisticDataPartChecksums : : getSerializedString ( checksums , <nl> - static_cast < bool > ( settings . use_minimalistic_checksums_in_zookeeper ) ) ; <nl> + getCOWSettings ( ) - > use_minimalistic_checksums_in_zookeeper ) ; <nl> } <nl> <nl> <nl> bool StorageReplicatedMergeTree : : tryExecuteMerge ( const LogEntry & entry ) <nl> parts . push_back ( part ) ; <nl> } <nl> <nl> + const auto storage_settings = getCOWSettings ( ) ; <nl> if ( ! have_all_parts ) <nl> { <nl> / / / If you do not have all the necessary parts , try to take some already merged part from someone . <nl> LOG_DEBUG ( log , " Don ' t have all parts for merge " < < entry . new_part_name < < " ; will try to fetch it instead " ) ; <nl> return false ; <nl> } <nl> - else if ( entry . create_time + settings . prefer_fetch_merged_part_time_threshold . totalSeconds ( ) < = time ( nullptr ) ) <nl> + else if ( entry . create_time + storage_settings - > prefer_fetch_merged_part_time_threshold . totalSeconds ( ) < = time ( nullptr ) ) <nl> { <nl> / / / If entry is old enough , and have enough size , and part are exists in any replica , <nl> / / / then prefer fetching of merged part from replica . <nl> bool StorageReplicatedMergeTree : : tryExecuteMerge ( const LogEntry & entry ) <nl> for ( const auto & part : parts ) <nl> sum_parts_bytes_on_disk + = part - > bytes_on_disk ; <nl> <nl> - if ( sum_parts_bytes_on_disk > = settings . prefer_fetch_merged_part_size_threshold ) <nl> + if ( sum_parts_bytes_on_disk > = storage_settings - > prefer_fetch_merged_part_size_threshold ) <nl> { <nl> String replica = findReplicaHavingPart ( entry . new_part_name , true ) ; / / / NOTE excessive ZK requests for same data later , may remove . <nl> if ( ! replica . empty ( ) ) <nl> bool StorageReplicatedMergeTree : : tryExecuteMerge ( const LogEntry & entry ) <nl> bool StorageReplicatedMergeTree : : tryExecutePartMutation ( const StorageReplicatedMergeTree : : LogEntry & entry ) <nl> { <nl> const String & source_part_name = entry . source_parts . at ( 0 ) ; <nl> + const auto storage_settings = getCOWSettings ( ) ; <nl> LOG_TRACE ( log , " Executing log entry to mutate part " < < source_part_name < < " to " < < entry . new_part_name ) ; <nl> <nl> DataPartPtr source_part = getActiveContainingPart ( source_part_name ) ; <nl> bool StorageReplicatedMergeTree : : tryExecutePartMutation ( const StorageReplicatedM <nl> / / / TODO - some better heuristic ? <nl> size_t estimated_space_for_result = MergeTreeDataMergerMutator : : estimateNeededDiskSpace ( { source_part } ) ; <nl> <nl> - if ( entry . create_time + settings . prefer_fetch_merged_part_time_threshold . totalSeconds ( ) < = time ( nullptr ) <nl> - & & estimated_space_for_result > = settings . prefer_fetch_merged_part_size_threshold ) <nl> + if ( entry . create_time + storage_settings - > prefer_fetch_merged_part_time_threshold . totalSeconds ( ) < = time ( nullptr ) <nl> + & & estimated_space_for_result > = storage_settings - > prefer_fetch_merged_part_size_threshold ) <nl> { <nl> / / / If entry is old enough , and have enough size , and some replica has the desired part , <nl> / / / then prefer fetching from replica . <nl> bool StorageReplicatedMergeTree : : tryExecutePartMutation ( const StorageReplicatedM <nl> bool StorageReplicatedMergeTree : : executeFetch ( LogEntry & entry ) <nl> { <nl> String replica = findReplicaHavingCoveringPart ( entry , true ) ; <nl> + const auto storage_settings = getCOWSettings ( ) ; <nl> <nl> static std : : atomic_uint total_fetches { 0 } ; <nl> - if ( settings . replicated_max_parallel_fetches & & total_fetches > = settings . replicated_max_parallel_fetches ) <nl> + if ( storage_settings - > replicated_max_parallel_fetches & & total_fetches > = storage_settings - > replicated_max_parallel_fetches ) <nl> { <nl> - throw Exception ( " Too many total fetches from replicas , maximum : " + settings . replicated_max_parallel_fetches . toString ( ) , <nl> + throw Exception ( " Too many total fetches from replicas , maximum : " + storage_settings - > replicated_max_parallel_fetches . toString ( ) , <nl> ErrorCodes : : TOO_MANY_FETCHES ) ; <nl> } <nl> <nl> + + total_fetches ; <nl> SCOPE_EXIT ( { - - total_fetches ; } ) ; <nl> <nl> - if ( settings . replicated_max_parallel_fetches_for_table & & current_table_fetches > = settings . replicated_max_parallel_fetches_for_table ) <nl> + if ( storage_settings - > replicated_max_parallel_fetches_for_table & & current_table_fetches > = storage_settings - > replicated_max_parallel_fetches_for_table ) <nl> { <nl> - throw Exception ( " Too many fetches from replicas for table , maximum : " + settings . replicated_max_parallel_fetches_for_table . toString ( ) , <nl> + throw Exception ( " Too many fetches from replicas for table , maximum : " + storage_settings - > replicated_max_parallel_fetches_for_table . toString ( ) , <nl> ErrorCodes : : TOO_MANY_FETCHES ) ; <nl> } <nl> <nl> void StorageReplicatedMergeTree : : executeClearColumnOrIndexInPartition ( const LogE <nl> ASTPtr ignored_order_by_ast ; <nl> ASTPtr ignored_primary_key_ast ; <nl> ASTPtr ignored_ttl_table_ast ; <nl> - alter_command . apply ( new_columns , new_indices , new_constraints , ignored_order_by_ast , ignored_primary_key_ast , ignored_ttl_table_ast ) ; <nl> + SettingsChanges ignored_changes ; <nl> + alter_command . apply ( new_columns , new_indices , new_constraints , ignored_order_by_ast , ignored_primary_key_ast , ignored_ttl_table_ast , ignored_changes ) ; <nl> <nl> size_t modified_parts = 0 ; <nl> auto parts = getDataParts ( ) ; <nl> void StorageReplicatedMergeTree : : mergeSelectingTask ( ) <nl> if ( ! is_leader ) <nl> return ; <nl> <nl> + const auto storage_settings = getCOWSettings ( ) ; <nl> const bool deduplicate = false ; / / / TODO : read deduplicate option from table config <nl> const bool force_ttl = false ; <nl> <nl> void StorageReplicatedMergeTree : : mergeSelectingTask ( ) <nl> / / / If many merges is already queued , then will queue only small enough merges . <nl> / / / Otherwise merge queue could be filled with only large merges , <nl> / / / and in the same time , many small parts could be created and won ' t be merged . <nl> + <nl> auto merges_and_mutations_queued = queue . countMergesAndPartMutations ( ) ; <nl> size_t merges_and_mutations_sum = merges_and_mutations_queued . first + merges_and_mutations_queued . second ; <nl> - if ( merges_and_mutations_sum > = settings . max_replicated_merges_in_queue ) <nl> + if ( merges_and_mutations_sum > = storage_settings - > max_replicated_merges_in_queue ) <nl> { <nl> LOG_TRACE ( log , " Number of queued merges ( " < < merges_and_mutations_queued . first < < " ) and part mutations ( " <nl> < < merges_and_mutations_queued . second < < " ) is greater than max_replicated_merges_in_queue ( " <nl> - < < settings . max_replicated_merges_in_queue < < " ) , so won ' t select new parts to merge or mutate . " ) ; <nl> + < < storage_settings - > max_replicated_merges_in_queue < < " ) , so won ' t select new parts to merge or mutate . " ) ; <nl> } <nl> else <nl> { <nl> UInt64 max_source_parts_size_for_merge = merger_mutator . getMaxSourcePartsSizeForMerge ( <nl> - settings . max_replicated_merges_in_queue , merges_and_mutations_sum ) ; <nl> + storage_settings - > max_replicated_merges_in_queue , merges_and_mutations_sum ) ; <nl> UInt64 max_source_part_size_for_mutation = merger_mutator . getMaxSourcePartSizeForMutation ( ) ; <nl> <nl> FutureMergedMutatedPart future_merged_part ; <nl> void StorageReplicatedMergeTree : : mergeSelectingTask ( ) <nl> } <nl> / / / If there are many mutations in queue it may happen , that we cannot enqueue enough merges to merge all new parts <nl> else if ( max_source_part_size_for_mutation > 0 & & queue . countMutations ( ) > 0 <nl> - & & merges_and_mutations_queued . second < settings . max_replicated_mutations_in_queue ) <nl> + & & merges_and_mutations_queued . second < storage_settings - > max_replicated_mutations_in_queue ) <nl> { <nl> / / / Choose a part to mutate . <nl> DataPartsVector data_parts = getDataPartsVector ( ) ; <nl> void StorageReplicatedMergeTree : : assertNotReadonly ( ) const <nl> <nl> BlockOutputStreamPtr StorageReplicatedMergeTree : : write ( const ASTPtr & / * query * / , const Context & context ) <nl> { <nl> + const auto storage_settings = getCOWSettings ( ) ; <nl> assertNotReadonly ( ) ; <nl> <nl> const Settings & query_settings = context . getSettingsRef ( ) ; <nl> - bool deduplicate = settings . replicated_deduplication_window ! = 0 & & query_settings . insert_deduplicate ; <nl> + bool deduplicate = storage_settings - > replicated_deduplication_window ! = 0 & & query_settings . insert_deduplicate ; <nl> <nl> return std : : make_shared < ReplicatedMergeTreeBlockOutputStream > ( * this , <nl> query_settings . insert_quorum , query_settings . insert_quorum_timeout . totalMilliseconds ( ) , query_settings . max_partitions_per_insert_block , deduplicate ) ; <nl> bool StorageReplicatedMergeTree : : optimize ( const ASTPtr & query , const ASTPtr & p <nl> } ; <nl> <nl> bool force_ttl = ( final & & ( hasTableTTL ( ) | | hasAnyColumnTTL ( ) ) ) ; <nl> + const auto storage_settings = getCOWSettings ( ) ; <nl> <nl> if ( ! partition & & final ) <nl> { <nl> bool StorageReplicatedMergeTree : : optimize ( const ASTPtr & query , const ASTPtr & p <nl> if ( ! partition ) <nl> { <nl> selected = merger_mutator . selectPartsToMerge ( <nl> - future_merged_part , true , settings . max_bytes_to_merge_at_max_space_in_pool , can_merge , & disable_reason ) ; <nl> + future_merged_part , true , storage_settings - > max_bytes_to_merge_at_max_space_in_pool , can_merge , & disable_reason ) ; <nl> } <nl> else <nl> { <nl> bool StorageReplicatedMergeTree : : optimize ( const ASTPtr & query , const ASTPtr & p <nl> <nl> <nl> void StorageReplicatedMergeTree : : alter ( <nl> - const AlterCommands & params , const String & / * database_name * / , const String & / * table_name * / , <nl> + const AlterCommands & params , const String & current_database_name , const String & current_table_name , <nl> const Context & query_context , TableStructureWriteLockHolder & table_lock_holder ) <nl> { <nl> assertNotReadonly ( ) ; <nl> <nl> LOG_DEBUG ( log , " Doing ALTER " ) ; <nl> <nl> + if ( params . isSettingsAlter ( ) ) <nl> + { <nl> + / / / We don ' t replicate storage_settings ALTER . It ' s local operation . <nl> + / / / Also we don ' t upgrade alter lock to table structure lock . <nl> + LOG_DEBUG ( log , " ALTER storage_settings only " ) ; <nl> + SettingsChanges new_changes ; <nl> + params . applyForSettingsOnly ( new_changes ) ; <nl> + alterSettings ( new_changes , current_database_name , current_table_name , query_context , table_lock_holder ) ; <nl> + return ; <nl> + } <nl> + <nl> / / / Alter is done by modifying the metadata nodes in ZK that are shared between all replicas <nl> / / / ( / columns , / metadata ) . We set contents of the shared nodes to the new values and wait while <nl> / / / replicas asynchronously apply changes ( see ReplicatedMergeTreeAlterThread . cpp ) and modify <nl> void StorageReplicatedMergeTree : : alter ( <nl> ASTPtr new_order_by_ast = order_by_ast ; <nl> ASTPtr new_primary_key_ast = primary_key_ast ; <nl> ASTPtr new_ttl_table_ast = ttl_table_ast ; <nl> - params . apply ( new_columns , new_indices , new_constraints , new_order_by_ast , new_primary_key_ast , new_ttl_table_ast ) ; <nl> + SettingsChanges new_changes ; <nl> + params . apply ( new_columns , new_indices , new_constraints , new_order_by_ast , new_primary_key_ast , new_ttl_table_ast , new_changes ) ; <nl> <nl> String new_columns_str = new_columns . toString ( ) ; <nl> if ( new_columns_str ! = getColumns ( ) . toString ( ) ) <nl> void StorageReplicatedMergeTree : : waitForReplicaToProcessLogEntry ( const String & <nl> void StorageReplicatedMergeTree : : getStatus ( Status & res , bool with_zk_fields ) <nl> { <nl> auto zookeeper = tryGetZooKeeper ( ) ; <nl> + const auto storage_settings = getCOWSettings ( ) ; <nl> <nl> res . is_leader = is_leader ; <nl> - res . can_become_leader = settings . replicated_can_become_leader ; <nl> + res . can_become_leader = storage_settings - > replicated_can_become_leader ; <nl> res . is_readonly = is_readonly ; <nl> res . is_session_expired = ! zookeeper | | zookeeper - > expired ( ) ; <nl> <nl> void StorageReplicatedMergeTree : : getReplicaDelays ( time_t & out_absolute_delay , t <nl> <nl> out_absolute_delay = getAbsoluteDelay ( ) ; <nl> out_relative_delay = 0 ; <nl> + const auto storage_settings = getCOWSettings ( ) ; <nl> <nl> / * * Relative delay is the maximum difference of absolute delay from any other replica , <nl> * ( if this replica lags behind any other live replica , or zero , otherwise ) . <nl> * Calculated only if the absolute delay is large enough . <nl> * / <nl> <nl> - if ( out_absolute_delay < static_cast < time_t > ( settings . min_relative_delay_to_yield_leadership ) ) <nl> + if ( out_absolute_delay < static_cast < time_t > ( storage_settings - > min_relative_delay_to_yield_leadership ) ) <nl> return ; <nl> <nl> auto zookeeper = getZooKeeper ( ) ; <nl> void StorageReplicatedMergeTree : : mutate ( const MutationCommands & commands , const <nl> / / / instead . <nl> / / / <nl> / / / Mutations of individual parts are in fact pretty similar to merges , e . g . their assignment and execution <nl> - / / / is governed by the same settings . TODO : support a single " merge - mutation " operation when the data <nl> + / / / is governed by the same storage_settings . TODO : support a single " merge - mutation " operation when the data <nl> / / / read from the the source parts is first mutated on the fly to some uniform mutation version and then <nl> / / / merged to a resulting part . <nl> / / / <nl> void StorageReplicatedMergeTree : : getCommitPartOps ( <nl> const String & block_id_path ) const <nl> { <nl> const String & part_name = part - > name ; <nl> + const auto storage_settings = getCOWSettings ( ) ; <nl> <nl> if ( ! block_id_path . empty ( ) ) <nl> { <nl> void StorageReplicatedMergeTree : : getCommitPartOps ( <nl> zookeeper_path + " / columns " , <nl> columns_version ) ) ; <nl> <nl> - if ( settings . use_minimalistic_part_header_in_zookeeper ) <nl> + if ( storage_settings - > use_minimalistic_part_header_in_zookeeper ) <nl> { <nl> ops . emplace_back ( zkutil : : makeCreateRequest ( <nl> replica_path + " / parts / " + part - > name , <nl> void StorageReplicatedMergeTree : : updatePartHeaderInZooKeeperAndCommit ( <nl> AlterDataPartTransaction & transaction ) <nl> { <nl> String part_path = replica_path + " / parts / " + transaction . getPartName ( ) ; <nl> + const auto storage_settings = getCOWSettings ( ) ; <nl> <nl> bool need_delete_columns_and_checksums_nodes = false ; <nl> try <nl> { <nl> - if ( settings . use_minimalistic_part_header_in_zookeeper ) <nl> + if ( storage_settings - > use_minimalistic_part_header_in_zookeeper ) <nl> { <nl> auto part_header = ReplicatedMergeTreePartHeader : : fromColumnsAndChecksums ( <nl> transaction . getNewColumns ( ) , transaction . getNewChecksums ( ) ) ; <nl> CheckResults StorageReplicatedMergeTree : : checkData ( const ASTPtr & query , const C <nl> <nl> bool StorageReplicatedMergeTree : : canUseAdaptiveGranularity ( ) const <nl> { <nl> - return settings . index_granularity_bytes ! = 0 & & <nl> - ( settings . enable_mixed_granularity_parts | | <nl> + const auto storage_settings = getCOWSettings ( ) ; <nl> + return storage_settings - > index_granularity_bytes ! = 0 & & <nl> + ( storage_settings - > enable_mixed_granularity_parts | | <nl> ( ! has_non_adaptive_index_granularity_parts & & ! other_replicas_fixed_granularity ) ) ; <nl> } <nl> <nl> mmm a / dbms / src / Storages / StorageReplicatedMergeTree . h <nl> ppp b / dbms / src / Storages / StorageReplicatedMergeTree . h <nl> class StorageReplicatedMergeTree : public ext : : shared_ptr_helper < StorageReplicat <nl> const ASTPtr & sample_by_ast_ , <nl> const ASTPtr & table_ttl_ast_ , <nl> const MergingParams & merging_params_ , <nl> - const MergeTreeSettings & settings_ , <nl> + MergeTreeSettingsPtr settings_ , <nl> bool has_force_restore_data_flag ) ; <nl> } ; <nl> <nl> new file mode 100644 <nl> index 00000000000 . . d00491fd7e5 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00980_alter_settings_race . reference <nl> @ @ - 0 , 0 + 1 @ @ <nl> + 1 <nl> new file mode 100755 <nl> index 00000000000 . . 4a948841ed7 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00980_alter_settings_race . sh <nl> <nl> + # ! / usr / bin / env bash <nl> + <nl> + CURDIR = $ ( cd " $ ( dirname " $ { BASH_SOURCE [ 0 ] } " ) " & & pwd ) <nl> + . $ CURDIR / . . / shell_config . sh <nl> + <nl> + $ CLICKHOUSE_CLIENT - - query = " DROP TABLE IF EXISTS table_for_concurrent_alter " <nl> + <nl> + $ CLICKHOUSE_CLIENT - - query = " CREATE TABLE table_for_concurrent_alter ( id UInt64 , Data String ) ENGINE = MergeTree ( ) ORDER BY id SETTINGS index_granularity = 4096 ; " ; <nl> + <nl> + n = 0 <nl> + while [ " $ n " - lt 100 ] ; <nl> + do <nl> + n = $ ( ( n + 1 ) ) <nl> + $ CLICKHOUSE_CLIENT - - query = " INSERT INTO table_for_concurrent_alter VALUES ( 1 , ' Hello ' ) " > / dev / null 2 > / dev / null & <nl> + $ CLICKHOUSE_CLIENT - - query = " OPTIMIZE TABLE table_for_concurrent_alter FINAL " > / dev / null 2 > / dev / null & <nl> + done & <nl> + <nl> + <nl> + q = 0 <nl> + while [ " $ q " - lt 100 ] ; <nl> + do <nl> + q = $ ( ( q + 1 ) ) <nl> + counter = $ ( ( 100 + q ) ) <nl> + $ CLICKHOUSE_CLIENT - - query = " ALTER TABLE table_for_concurrent_alter MODIFY SETTING parts_to_throw_insert = $ counter , parts_to_delay_insert = $ counter , min_merge_bytes_to_use_direct_io = $ counter " > / dev / null 2 > / dev / null & <nl> + done & <nl> + <nl> + sleep 4 <nl> + <nl> + # we just test race conditions , not logic <nl> + $ CLICKHOUSE_CLIENT - - query " SELECT 1 " <nl> + <nl> + $ CLICKHOUSE_CLIENT - - query = " DROP TABLE IF EXISTS table_for_concurrent_alter " <nl> new file mode 100644 <nl> index 00000000000 . . c7f912ddc79 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00980_merge_alter_settings . reference <nl> <nl> + CREATE TABLE default . table_for_alter ( ` id ` UInt64 , ` Data ` String ) ENGINE = MergeTree ( ) ORDER BY id SETTINGS index_granularity = 4096 <nl> + CREATE TABLE default . table_for_alter ( ` id ` UInt64 , ` Data ` String ) ENGINE = MergeTree ( ) ORDER BY id SETTINGS index_granularity = 4096 , parts_to_throw_insert = 1 , parts_to_delay_insert = 1 <nl> + CREATE TABLE default . table_for_alter ( ` id ` UInt64 , ` Data ` String ) ENGINE = MergeTree ( ) ORDER BY id SETTINGS index_granularity = 4096 , parts_to_throw_insert = 100 , parts_to_delay_insert = 100 <nl> + 2 <nl> + CREATE TABLE default . table_for_alter ( ` id ` UInt64 , ` Data ` String ) ENGINE = MergeTree ( ) ORDER BY id SETTINGS index_granularity = 4096 , parts_to_throw_insert = 100 , parts_to_delay_insert = 100 , check_delay_period = 30 <nl> new file mode 100644 <nl> index 00000000000 . . 43838b8a727 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00980_merge_alter_settings . sql <nl> <nl> + DROP TABLE IF EXISTS log_for_alter ; <nl> + <nl> + CREATE TABLE log_for_alter ( <nl> + id UInt64 , <nl> + Data String <nl> + ) ENGINE = Log ( ) ; <nl> + <nl> + ALTER TABLE log_for_alter MODIFY SETTING aaa = 123 ; - - { serverError 471 } <nl> + <nl> + DROP TABLE IF EXISTS log_for_alter ; <nl> + <nl> + DROP TABLE IF EXISTS table_for_alter ; <nl> + <nl> + CREATE TABLE table_for_alter ( <nl> + id UInt64 , <nl> + Data String <nl> + ) ENGINE = MergeTree ( ) ORDER BY id SETTINGS index_granularity = 4096 ; <nl> + <nl> + ALTER TABLE table_for_alter MODIFY SETTING index_granularity = 555 ; - - { serverError 472 } <nl> + <nl> + SHOW CREATE TABLE table_for_alter ; <nl> + <nl> + ALTER TABLE table_for_alter MODIFY SETTING parts_to_throw_insert = 1 , parts_to_delay_insert = 1 ; <nl> + <nl> + SHOW CREATE TABLE table_for_alter ; <nl> + <nl> + INSERT INTO table_for_alter VALUES ( 1 , ' 1 ' ) ; <nl> + INSERT INTO table_for_alter VALUES ( 2 , ' 2 ' ) ; - - { serverError 252 } <nl> + <nl> + DETACH TABLE table_for_alter ; <nl> + <nl> + ATTACH TABLE table_for_alter ; <nl> + <nl> + INSERT INTO table_for_alter VALUES ( 2 , ' 2 ' ) ; - - { serverError 252 } <nl> + <nl> + ALTER TABLE table_for_alter MODIFY SETTING xxx_yyy = 124 ; - - { serverError 115 } <nl> + <nl> + ALTER TABLE table_for_alter MODIFY SETTING parts_to_throw_insert = 100 , parts_to_delay_insert = 100 ; <nl> + <nl> + INSERT INTO table_for_alter VALUES ( 2 , ' 2 ' ) ; <nl> + <nl> + SHOW CREATE TABLE table_for_alter ; <nl> + <nl> + SELECT COUNT ( ) FROM table_for_alter ; <nl> + <nl> + ALTER TABLE table_for_alter MODIFY SETTING check_delay_period = 10 , check_delay_period = 20 , check_delay_period = 30 ; <nl> + <nl> + SHOW CREATE TABLE table_for_alter ; <nl> + <nl> + DROP TABLE IF EXISTS table_for_alter ; <nl> + <nl> new file mode 100644 <nl> index 00000000000 . . e55bfadd538 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00980_zookeeper_merge_tree_alter_settings . reference <nl> <nl> + CREATE TABLE default . replicated_table_for_alter1 ( ` id ` UInt64 , ` Data ` String ) ENGINE = ReplicatedMergeTree ( \ ' / clickhouse / tables / replicated_table_for_alter \ ' , \ ' 1 \ ' ) ORDER BY id SETTINGS index_granularity = 8192 <nl> + CREATE TABLE default . replicated_table_for_alter1 ( ` id ` UInt64 , ` Data ` String ) ENGINE = ReplicatedMergeTree ( \ ' / clickhouse / tables / replicated_table_for_alter \ ' , \ ' 1 \ ' ) ORDER BY id SETTINGS index_granularity = 8192 <nl> + 4 <nl> + 4 <nl> + 4 <nl> + 4 <nl> + 6 <nl> + 6 <nl> + CREATE TABLE default . replicated_table_for_alter1 ( ` id ` UInt64 , ` Data ` String ) ENGINE = ReplicatedMergeTree ( \ ' / clickhouse / tables / replicated_table_for_alter \ ' , \ ' 1 \ ' ) ORDER BY id SETTINGS index_granularity = 8192 , use_minimalistic_part_header_in_zookeeper = 1 <nl> + CREATE TABLE default . replicated_table_for_alter2 ( ` id ` UInt64 , ` Data ` String ) ENGINE = ReplicatedMergeTree ( \ ' / clickhouse / tables / replicated_table_for_alter \ ' , \ ' 2 \ ' ) ORDER BY id SETTINGS index_granularity = 8192 , parts_to_throw_insert = 1 , parts_to_delay_insert = 1 <nl> new file mode 100644 <nl> index 00000000000 . . 792a704b6a1 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00980_zookeeper_merge_tree_alter_settings . sql <nl> <nl> + DROP TABLE IF EXISTS replicated_table_for_alter1 ; <nl> + DROP TABLE IF EXISTS replicated_table_for_alter2 ; <nl> + <nl> + CREATE TABLE replicated_table_for_alter1 ( <nl> + id UInt64 , <nl> + Data String <nl> + ) ENGINE = ReplicatedMergeTree ( ' / clickhouse / tables / replicated_table_for_alter ' , ' 1 ' ) ORDER BY id ; <nl> + <nl> + CREATE TABLE replicated_table_for_alter2 ( <nl> + id UInt64 , <nl> + Data String <nl> + ) ENGINE = ReplicatedMergeTree ( ' / clickhouse / tables / replicated_table_for_alter ' , ' 2 ' ) ORDER BY id ; <nl> + <nl> + SHOW CREATE TABLE replicated_table_for_alter1 ; <nl> + <nl> + ALTER TABLE replicated_table_for_alter1 MODIFY SETTING index_granularity = 4096 ; - - { serverError 472 } <nl> + <nl> + SHOW CREATE TABLE replicated_table_for_alter1 ; <nl> + <nl> + INSERT INTO replicated_table_for_alter2 VALUES ( 1 , ' 1 ' ) , ( 2 , ' 2 ' ) ; <nl> + <nl> + SYSTEM SYNC REPLICA replicated_table_for_alter1 ; <nl> + <nl> + ALTER TABLE replicated_table_for_alter1 MODIFY SETTING use_minimalistic_part_header_in_zookeeper = 1 ; <nl> + <nl> + INSERT INTO replicated_table_for_alter1 VALUES ( 3 , ' 3 ' ) , ( 4 , ' 4 ' ) ; <nl> + <nl> + SYSTEM SYNC REPLICA replicated_table_for_alter2 ; <nl> + <nl> + SELECT COUNT ( ) FROM replicated_table_for_alter1 ; <nl> + SELECT COUNT ( ) FROM replicated_table_for_alter2 ; <nl> + <nl> + DETACH TABLE replicated_table_for_alter2 ; <nl> + ATTACH TABLE replicated_table_for_alter2 ; <nl> + <nl> + DETACH TABLE replicated_table_for_alter1 ; <nl> + ATTACH TABLE replicated_table_for_alter1 ; <nl> + <nl> + SELECT COUNT ( ) FROM replicated_table_for_alter1 ; <nl> + SELECT COUNT ( ) FROM replicated_table_for_alter2 ; <nl> + <nl> + ALTER TABLE replicated_table_for_alter2 MODIFY SETTING parts_to_throw_insert = 1 , parts_to_delay_insert = 1 ; <nl> + INSERT INTO replicated_table_for_alter2 VALUES ( 3 , ' 1 ' ) , ( 4 , ' 2 ' ) ; - - { serverError 252 } <nl> + <nl> + INSERT INTO replicated_table_for_alter1 VALUES ( 5 , ' 5 ' ) , ( 6 , ' 6 ' ) ; <nl> + <nl> + SYSTEM SYNC REPLICA replicated_table_for_alter2 ; <nl> + <nl> + SELECT COUNT ( ) FROM replicated_table_for_alter1 ; <nl> + SELECT COUNT ( ) FROM replicated_table_for_alter2 ; <nl> + <nl> + DETACH TABLE replicated_table_for_alter2 ; <nl> + ATTACH TABLE replicated_table_for_alter2 ; <nl> + <nl> + DETACH TABLE replicated_table_for_alter1 ; <nl> + ATTACH TABLE replicated_table_for_alter1 ; <nl> + <nl> + SHOW CREATE TABLE replicated_table_for_alter1 ; <nl> + SHOW CREATE TABLE replicated_table_for_alter2 ; <nl> + <nl> + DROP TABLE IF EXISTS replicated_table_for_alter2 ; <nl> + DROP TABLE IF EXISTS replicated_table_for_alter1 ; <nl>
Merge pull request from yandex / merge_tree_settings_alter
ClickHouse/ClickHouse
19c3cf4aa8c35ddf8ce80e6a738b6115f0acf3cc
2019-08-26T13:05:59Z
mmm a / test / integration / BUILD <nl> ppp b / test / integration / BUILD <nl> envoy_cc_test ( <nl> ] , <nl> ) <nl> <nl> + envoy_cc_test ( <nl> + name = " http_timeout_integration_test " , <nl> + srcs = [ <nl> + " http_timeout_integration_test . cc " , <nl> + " http_timeout_integration_test . h " , <nl> + ] , <nl> + deps = [ <nl> + " : http_integration_lib " , <nl> + ] , <nl> + ) <nl> + <nl> envoy_cc_test ( <nl> name = " protocol_integration_test " , <nl> srcs = [ <nl> new file mode 100644 <nl> index 00000000000 . . 00b0f4c65ea <nl> mmm / dev / null <nl> ppp b / test / integration / http_timeout_integration_test . cc <nl> <nl> + # include " test / integration / http_timeout_integration_test . h " <nl> + <nl> + # include " gtest / gtest . h " <nl> + <nl> + namespace Envoy { <nl> + <nl> + INSTANTIATE_TEST_SUITE_P ( IpVersions , HttpTimeoutIntegrationTest , <nl> + testing : : ValuesIn ( TestEnvironment : : getIpVersionsForTest ( ) ) , <nl> + TestUtility : : ipTestParamsToString ) ; <nl> + <nl> + / / Sends a request with a global timeout specified , sleeps for longer than the <nl> + / / timeout , and ensures that a timeout is received . <nl> + TEST_P ( HttpTimeoutIntegrationTest , GlobalTimeout ) { <nl> + initialize ( ) ; <nl> + <nl> + codec_client_ = makeHttpConnection ( makeClientConnection ( lookupPort ( " http " ) ) ) ; <nl> + auto encoder_decoder = codec_client_ - > startRequest ( <nl> + Http : : TestHeaderMapImpl { { " : method " , " POST " } , <nl> + { " : path " , " / test / long / url " } , <nl> + { " : scheme " , " http " } , <nl> + { " : authority " , " host " } , <nl> + { " x - forwarded - for " , " 10 . 0 . 0 . 1 " } , <nl> + { " x - envoy - upstream - rq - timeout - ms " , " 500 " } } ) ; <nl> + auto response = std : : move ( encoder_decoder . second ) ; <nl> + request_encoder_ = & encoder_decoder . first ; <nl> + <nl> + ASSERT_TRUE ( fake_upstreams_ [ 0 ] - > waitForHttpConnection ( * dispatcher_ , fake_upstream_connection_ ) ) ; <nl> + ASSERT_TRUE ( fake_upstream_connection_ - > waitForNewStream ( * dispatcher_ , upstream_request_ ) ) ; <nl> + ASSERT_TRUE ( upstream_request_ - > waitForHeadersComplete ( ) ) ; <nl> + codec_client_ - > sendData ( * request_encoder_ , 0 , true ) ; <nl> + <nl> + ASSERT_TRUE ( upstream_request_ - > waitForEndStream ( * dispatcher_ ) ) ; <nl> + <nl> + / / Trigger global timeout . <nl> + timeSystem ( ) . sleep ( std : : chrono : : milliseconds ( 501 ) ) ; <nl> + <nl> + / / Ensure we got a timeout downstream and canceled the upstream request . <nl> + response - > waitForHeaders ( ) ; <nl> + ASSERT_TRUE ( upstream_request_ - > waitForReset ( std : : chrono : : milliseconds ( 0 ) ) ) ; <nl> + <nl> + codec_client_ - > close ( ) ; <nl> + <nl> + EXPECT_TRUE ( upstream_request_ - > complete ( ) ) ; <nl> + EXPECT_EQ ( 0U , upstream_request_ - > bodyLength ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( response - > complete ( ) ) ; <nl> + EXPECT_EQ ( " 504 " , response - > headers ( ) . Status ( ) - > value ( ) . getStringView ( ) ) ; <nl> + } <nl> + <nl> + / / Sends a request with a global timeout and per try timeout specified , sleeps <nl> + / / for longer than the per try but slightly less than the global timeout . <nl> + / / Ensures that two requests are attempted and a timeout is returned <nl> + / / downstream . <nl> + TEST_P ( HttpTimeoutIntegrationTest , PerTryTimeout ) { <nl> + initialize ( ) ; <nl> + <nl> + codec_client_ = makeHttpConnection ( makeClientConnection ( lookupPort ( " http " ) ) ) ; <nl> + auto encoder_decoder = codec_client_ - > startRequest ( <nl> + Http : : TestHeaderMapImpl { { " : method " , " POST " } , <nl> + { " : path " , " / test / long / url " } , <nl> + { " : scheme " , " http " } , <nl> + { " : authority " , " host " } , <nl> + { " x - forwarded - for " , " 10 . 0 . 0 . 1 " } , <nl> + { " x - envoy - retry - on " , " 5xx " } , <nl> + { " x - envoy - upstream - rq - timeout - ms " , " 500 " } , <nl> + { " x - envoy - upstream - rq - per - try - timeout - ms " , " 400 " } } ) ; <nl> + auto response = std : : move ( encoder_decoder . second ) ; <nl> + request_encoder_ = & encoder_decoder . first ; <nl> + <nl> + ASSERT_TRUE ( fake_upstreams_ [ 0 ] - > waitForHttpConnection ( * dispatcher_ , fake_upstream_connection_ ) ) ; <nl> + ASSERT_TRUE ( fake_upstream_connection_ - > waitForNewStream ( * dispatcher_ , upstream_request_ ) ) ; <nl> + ASSERT_TRUE ( upstream_request_ - > waitForHeadersComplete ( ) ) ; <nl> + codec_client_ - > sendData ( * request_encoder_ , 0 , true ) ; <nl> + <nl> + ASSERT_TRUE ( upstream_request_ - > waitForEndStream ( * dispatcher_ ) ) ; <nl> + <nl> + / / Trigger per try timeout ( but not global timeout ) . <nl> + timeSystem ( ) . sleep ( std : : chrono : : milliseconds ( 400 ) ) ; <nl> + <nl> + / / Wait for a second request to be sent upstream <nl> + ASSERT_TRUE ( fake_upstream_connection_ - > waitForNewStream ( * dispatcher_ , upstream_request_ ) ) ; <nl> + ASSERT_TRUE ( upstream_request_ - > waitForHeadersComplete ( ) ) ; <nl> + ASSERT_TRUE ( upstream_request_ - > waitForEndStream ( * dispatcher_ ) ) ; <nl> + <nl> + / / Trigger global timeout . <nl> + timeSystem ( ) . sleep ( std : : chrono : : milliseconds ( 100 ) ) ; <nl> + response - > waitForHeaders ( ) ; <nl> + <nl> + codec_client_ - > close ( ) ; <nl> + <nl> + EXPECT_TRUE ( upstream_request_ - > complete ( ) ) ; <nl> + EXPECT_EQ ( 0U , upstream_request_ - > bodyLength ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( response - > complete ( ) ) ; <nl> + EXPECT_EQ ( " 504 " , response - > headers ( ) . Status ( ) - > value ( ) . getStringView ( ) ) ; <nl> + } <nl> + <nl> + } / / namespace Envoy <nl> new file mode 100644 <nl> index 00000000000 . . fd378f4ce7f <nl> mmm / dev / null <nl> ppp b / test / integration / http_timeout_integration_test . h <nl> <nl> + # pragma once <nl> + <nl> + # include " test / integration / http_integration . h " <nl> + <nl> + # include " gtest / gtest . h " <nl> + <nl> + namespace Envoy { <nl> + class HttpTimeoutIntegrationTest : public testing : : TestWithParam < Network : : Address : : IpVersion > , <nl> + public Event : : TestUsingSimulatedTime , <nl> + public HttpIntegrationTest { <nl> + public : <nl> + / / Arbitrarily choose HTTP2 here , the tests for this class are around <nl> + / / timeouts which don ' t have version specific behavior . <nl> + HttpTimeoutIntegrationTest ( ) : HttpIntegrationTest ( Http : : CodecClient : : Type : : HTTP2 , GetParam ( ) ) { } <nl> + <nl> + void SetUp ( ) override { <nl> + setDownstreamProtocol ( Http : : CodecClient : : Type : : HTTP2 ) ; <nl> + setUpstreamProtocol ( FakeHttpConnection : : Type : : HTTP2 ) ; <nl> + } <nl> + } ; <nl> + <nl> + } / / namespace Envoy <nl>
add HTTP integration tests exercising timeouts ( )
envoyproxy/envoy
504e15f1017466eaf4822875a3eb35b17e78005f
2019-04-17T19:14:13Z
mmm a / android / sdk / src / main / java / com / taobao / weex / dom / WXTextDomObject . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / dom / WXTextDomObject . java <nl> public void measure ( CSSNode node , float width , MeasureOutput measureOutput ) { <nl> . isBoring ( text , textPaint ) ; <nl> float desiredWidth = boring = = null ? Layout . getDesiredWidth ( text , <nl> textPaint ) : Float . NaN ; <nl> - <nl> + if ( CSSConstants . isUndefined ( width ) ) { <nl> + width = node . cssstyle . maxWidth ; <nl> + } <nl> if ( boring = = null <nl> & & ( CSSConstants . isUndefined ( width ) | | ( ! CSSConstants <nl> . isUndefined ( desiredWidth ) & & desiredWidth < = width ) ) ) { <nl>
Fetch max - width from CSSStyle if width is undefined .
apache/incubator-weex
b9d1d3b6ee4f52085b13ddddc2797fe3cab6261a
2016-04-14T10:06:43Z
mmm a / README . md <nl> ppp b / README . md <nl> <nl> <nl> This work is maintained by a community of hundreds of people and is a _massive collaborative effort_ to bring the readily available coding knowledge * * offline * * . <nl> <nl> - > * * Many coders ask me how to improve their own performances . I cannot say anything except " solve and review and prepare your library * * - _Uwi Tenpen_ <nl> + > * * Many coders ask me how to improve their own performances . I cannot say anything except " solve and review and prepare your library " * * - _Uwi Tenpen_ <nl> <nl> # Cosmic Structure <nl> <nl>
Merge pull request from JSewell - Git / readme - update
OpenGenus/cosmos
c4aac08862c33cac2eae031602e708ca7d52a680
2017-10-20T18:45:50Z
mmm a / toolsrc / src / vcpkg / base / cofffilereader . cpp <nl> ppp b / toolsrc / src / vcpkg / base / cofffilereader . cpp <nl> namespace vcpkg : : CoffFileReader <nl> { <nl> void set_to_offset ( const fpos_t position ) { this - > m_absolute_position = position ; } <nl> <nl> - void set_to_current_pos ( fstream & fs ) { this - > m_absolute_position = fs . tellg ( ) . seekpos ( ) ; } <nl> + void set_to_current_pos ( fstream & fs ) { this - > m_absolute_position = fs . tellg ( ) ; } <nl> <nl> void seek_to_marker ( fstream & fs ) const { fs . seekg ( this - > m_absolute_position , ios_base : : beg ) ; } <nl> <nl>
Merge pull request from BillyONeal / master
microsoft/vcpkg
7a1003f2ce11de88e261dcfa84d2f177a0d8d8ca
2018-04-12T22:34:08Z
mmm a / DEPS <nl> ppp b / DEPS <nl> vars = { <nl> <nl> deps = { <nl> ' v8 / build ' : <nl> - Var ( ' chromium_url ' ) + ' / chromium / src / build . git ' + ' @ ' + ' 0a96846217ec16e8c83ab609457a0da6ac2b10af ' , <nl> + Var ( ' chromium_url ' ) + ' / chromium / src / build . git ' + ' @ ' + ' 69e327091ad13d761bfd91ccab6d06920308eaf8 ' , <nl> ' v8 / third_party / depot_tools ' : <nl> - Var ( ' chromium_url ' ) + ' / chromium / tools / depot_tools . git ' + ' @ ' + ' 0081c0ff173c3af29933cbc6965d4a2d83b09576 ' , <nl> + Var ( ' chromium_url ' ) + ' / chromium / tools / depot_tools . git ' + ' @ ' + ' a19d35307bbed0ad41cad4e5b06ff052c43fc631 ' , <nl> ' v8 / third_party / icu ' : <nl> Var ( ' chromium_url ' ) + ' / chromium / deps / icu . git ' + ' @ ' + ' 2ecd66c696c46e255cd634d3175b1072f4499949 ' , <nl> ' v8 / third_party / instrumented_libraries ' : <nl>
Update V8 DEPS .
v8/v8
d69f4a2746f3b83b1c29db6a4a8104eace19fd08
2019-10-01T03:34:09Z
mmm a / tensorflow / lite / experimental / support / codegen / android_java_generator . cc <nl> ppp b / tensorflow / lite / experimental / support / codegen / android_java_generator . cc <nl> TensorInfo CreateTensorInfo ( const TensorMetadata * metadata , <nl> tensor_info . upper_camel_name [ 0 ] = toupper ( tensor_info . upper_camel_name [ 0 ] ) ; <nl> tensor_info . normalization_unit = <nl> FindNormalizationUnit ( metadata , tensor_identifier , err ) ; <nl> - if ( metadata - > content ( ) - > content_properties_type ( ) = = <nl> - ContentProperties_ImageProperties ) { <nl> - if ( metadata - > content ( ) <nl> - - > content_properties_as_ImageProperties ( ) <nl> - - > color_space ( ) = = ColorSpaceType_RGB ) { <nl> - tensor_info . content_type = " image " ; <nl> - tensor_info . wrapper_type = " TensorImage " ; <nl> - tensor_info . processor_type = " ImageProcessor " ; <nl> - return tensor_info ; <nl> - } else { <nl> - err - > Warning ( <nl> - " Found Non - RGB image on tensor ( % s ) . Codegen currently does not " <nl> - " support it , and regard it as a plain numeric tensor . " , <nl> - tensor_identifier . c_str ( ) ) ; <nl> + if ( metadata - > content ( ) ! = nullptr & & <nl> + metadata - > content ( ) - > content_properties ( ) ! = nullptr ) { <nl> + / / Enter tensor wrapper type inferring <nl> + if ( metadata - > content ( ) - > content_properties_type ( ) = = <nl> + ContentProperties_ImageProperties ) { <nl> + if ( metadata - > content ( ) <nl> + - > content_properties_as_ImageProperties ( ) <nl> + - > color_space ( ) = = ColorSpaceType_RGB ) { <nl> + tensor_info . content_type = " image " ; <nl> + tensor_info . wrapper_type = " TensorImage " ; <nl> + tensor_info . processor_type = " ImageProcessor " ; <nl> + return tensor_info ; <nl> + } else { <nl> + err - > Warning ( <nl> + " Found Non - RGB image on tensor ( % s ) . Codegen currently does not " <nl> + " support it , and regard it as a plain numeric tensor . " , <nl> + tensor_identifier . c_str ( ) ) ; <nl> + } <nl> } <nl> } <nl> tensor_info . content_type = " tensor " ; <nl> ModelInfo CreateModelInfo ( const ModelMetadata * metadata , <nl> graph - > input_tensor_metadata ( ) , graph - > output_tensor_metadata ( ) ) ; <nl> std : : vector < std : : string > input_tensor_names = std : : move ( names . first ) ; <nl> std : : vector < std : : string > output_tensor_names = std : : move ( names . second ) ; <nl> - for ( int i = 0 ; i < graph - > input_tensor_metadata ( ) - > size ( ) ; i + + ) { <nl> + for ( int i = 0 ; i < input_tensor_names . size ( ) ; i + + ) { <nl> model_info . inputs . push_back ( <nl> CreateTensorInfo ( graph - > input_tensor_metadata ( ) - > Get ( i ) , <nl> input_tensor_names [ i ] , true , i , err ) ) ; <nl> } <nl> - for ( int i = 0 ; i < graph - > output_tensor_metadata ( ) - > size ( ) ; i + + ) { <nl> + for ( int i = 0 ; i < output_tensor_names . size ( ) ; i + + ) { <nl> model_info . outputs . push_back ( <nl> CreateTensorInfo ( graph - > output_tensor_metadata ( ) - > Get ( i ) , <nl> output_tensor_names [ i ] , false , i , err ) ) ; <nl> GenerationResult AndroidJavaGenerator : : Generate ( <nl> const Model * model , const std : : string & package_name , <nl> const std : : string & model_class_name , const std : : string & model_asset_path ) { <nl> GenerationResult result ; <nl> + if ( model = = nullptr ) { <nl> + err_ . Error ( <nl> + " Cannot read model from the buffer . Codegen will generate nothing . " ) ; <nl> + return result ; <nl> + } <nl> const ModelMetadata * metadata = GetMetadataFromModel ( model ) ; <nl> if ( metadata = = nullptr ) { <nl> err_ . Error ( <nl> mmm a / tensorflow / lite / experimental / support / codegen / metadata_helper . cc <nl> ppp b / tensorflow / lite / experimental / support / codegen / metadata_helper . cc <nl> namespace codegen { <nl> <nl> constexpr char BUFFER_KEY [ ] = " TFLITE_METADATA " ; <nl> const ModelMetadata * GetMetadataFromModel ( const Model * model ) { <nl> - if ( model - > metadata ( ) = = nullptr ) { <nl> + if ( model = = nullptr | | model - > metadata ( ) = = nullptr ) { <nl> return nullptr ; <nl> } <nl> for ( auto i = 0 ; i < model - > metadata ( ) - > size ( ) ; i + + ) { <nl> - if ( model - > metadata ( ) - > Get ( i ) - > name ( ) - > str ( ) = = BUFFER_KEY ) { <nl> + const auto * name = model - > metadata ( ) - > Get ( i ) - > name ( ) ; <nl> + if ( name ! = nullptr & & name - > str ( ) = = BUFFER_KEY ) { <nl> const auto buffer_index = model - > metadata ( ) - > Get ( i ) - > buffer ( ) ; <nl> - const auto * buffer = model - > buffers ( ) - > Get ( buffer_index ) - > data ( ) - > data ( ) ; <nl> - return GetModelMetadata ( buffer ) ; <nl> + if ( model - > buffers ( ) = = nullptr | | <nl> + model - > buffers ( ) - > size ( ) < = buffer_index ) { <nl> + continue ; <nl> + } <nl> + const auto * buffer_vec = model - > buffers ( ) - > Get ( buffer_index ) - > data ( ) ; <nl> + if ( buffer_vec = = nullptr | | buffer_vec - > data ( ) = = nullptr ) { <nl> + continue ; <nl> + } <nl> + return GetModelMetadata ( buffer_vec - > data ( ) ) ; <nl> } <nl> } <nl> return nullptr ; <nl>
[ tfls . codegen ] Fix potential nullptr seg fault .
tensorflow/tensorflow
d3c76ae3af38fdcaa88bedb9c73c1ab9bf3bd4f5
2020-04-15T00:13:31Z
mmm a / Marlin / Marlin_main . cpp <nl> ppp b / Marlin / Marlin_main . cpp <nl> static void clean_up_after_endstop_or_probe_move ( ) { <nl> <nl> float z_dest = LOGICAL_Z_POSITION ( z_raise ) ; <nl> if ( zprobe_zoffset < 0 ) z_dest - = zprobe_zoffset ; <nl> + # if ENABLED ( DELTA ) <nl> + z_dest - = home_offset [ Z_AXIS ] ; <nl> + # endif <nl> <nl> if ( z_dest > current_position [ Z_AXIS ] ) <nl> do_blocking_move_to_z ( z_dest ) ; <nl> static void clean_up_after_endstop_or_probe_move ( ) { <nl> / / move down quickly before doing the slow probe <nl> float z = LOGICAL_Z_POSITION ( Z_CLEARANCE_BETWEEN_PROBES ) ; <nl> if ( zprobe_zoffset < 0 ) z - = zprobe_zoffset ; <nl> + # if ENABLED ( DELTA ) <nl> + z - = home_offset [ Z_AXIS ] ; <nl> + # endif <nl> if ( z < current_position [ Z_AXIS ] ) <nl> do_blocking_move_to_z ( z , MMM_TO_MMS ( Z_PROBE_SPEED_FAST ) ) ; <nl> <nl>
Delta probe height bug solved
MarlinFirmware/Marlin
244f67590ee9ebf6e6a7e6abaafaa2cd233e0c1d
2017-04-21T23:23:56Z
mmm a / . circleci / config . yml <nl> ppp b / . circleci / config . yml <nl> jobs : <nl> - run : rm - rf / home / circleci / project / . git # CircleCI git caching is likely broken <nl> - checkout <nl> - run : ci / do_circle_ci . sh check_format <nl> + - run : ci / do_circle_ci . sh check_repositories <nl> - run : ci / do_circle_ci . sh check_spelling <nl> build_image : <nl> docker : <nl> mmm a / api / bazel / repositories . bzl <nl> ppp b / api / bazel / repositories . bzl <nl> BAZEL_SKYLIB_SHA256 = " b5f6abe419da897b7901f90cbab08af958b97a8f3575b0d3dd062ac7c <nl> GOGOPROTO_RELEASE = " 1 . 1 . 1 " <nl> GOGOPROTO_SHA256 = " 9f8c2ad49849ab063cd9fef67e77d49606640044227ecf7f3617ea2c92ef147c " <nl> <nl> - GOOGLEAPIS_SHA = " d642131a6e6582fc226caf9893cb7fe7885b3411 " # May 23 , 2018 <nl> - PROMETHEUS_SHA = " 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c " # Nov 17 , 2017 <nl> - OPENCENSUS_SHA = " ab82e5fdec8267dc2a726544b10af97675970847 " # May 23 , 2018 <nl> + GOOGLEAPIS_GIT_SHA = " d642131a6e6582fc226caf9893cb7fe7885b3411 " # May 23 , 2018 <nl> + GOOGLEAPIS_SHA = " 16f5b2e8bf1e747a32f9a62e211f8f33c94645492e9bbd72458061d9a9de1f63 " <nl> + <nl> + PROMETHEUS_GIT_SHA = " 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c " # Nov 17 , 2017 <nl> + PROMETHEUS_SHA = " 783bdaf8ee0464b35ec0c8704871e1e72afa0005c3f3587f65d9d6694bf3911b " <nl> + <nl> + OPENCENSUS_GIT_SHA = " ab82e5fdec8267dc2a726544b10af97675970847 " # May 23 , 2018 <nl> + OPENCENSUS_SHA = " 1950f844d9f338ba731897a9bb526f9074c0487b3f274ce2ec3b4feaf0bef7e2 " <nl> <nl> PGV_GIT_SHA = " 30da78c4bcdd477b3c24d13e43cf39361ae3859f " # Sep 27 , 2018 <nl> + PGV_SHA = " 2bc9a34b1c485e73540dc5093d6715ef437294020fa6580f21306aa5e884f511 " <nl> <nl> - load ( " @ bazel_tools / / tools / build_defs / repo : git . bzl " , " git_repository " ) <nl> + load ( " @ bazel_tools / / tools / build_defs / repo : http . bzl " , " http_archive " ) <nl> <nl> def api_dependencies ( ) : <nl> - native . http_archive ( <nl> + http_archive ( <nl> name = " bazel_skylib " , <nl> sha256 = BAZEL_SKYLIB_SHA256 , <nl> strip_prefix = " bazel - skylib - " + BAZEL_SKYLIB_RELEASE , <nl> url = " https : / / github . com / bazelbuild / bazel - skylib / archive / " + BAZEL_SKYLIB_RELEASE + " . tar . gz " , <nl> ) <nl> - git_repository ( <nl> + http_archive ( <nl> name = " com_lyft_protoc_gen_validate " , <nl> - remote = " https : / / github . com / lyft / protoc - gen - validate . git " , <nl> - commit = PGV_GIT_SHA , <nl> + url = " https : / / github . com / lyft / protoc - gen - validate / archive / " + PGV_GIT_SHA + " . tar . gz " , <nl> + sha256 = PGV_SHA , <nl> + strip_prefix = " protoc - gen - validate - " + PGV_GIT_SHA , <nl> ) <nl> - native . new_http_archive ( <nl> + http_archive ( <nl> name = " googleapis " , <nl> - strip_prefix = " googleapis - " + GOOGLEAPIS_SHA , <nl> - url = " https : / / github . com / googleapis / googleapis / archive / " + GOOGLEAPIS_SHA + " . tar . gz " , <nl> + strip_prefix = " googleapis - " + GOOGLEAPIS_GIT_SHA , <nl> + url = " https : / / github . com / googleapis / googleapis / archive / " + GOOGLEAPIS_GIT_SHA + " . tar . gz " , <nl> # TODO ( dio ) : Consider writing a Skylark macro for importing Google API proto . <nl> + sha256 = GOOGLEAPIS_SHA , <nl> build_file_content = " " " <nl> load ( " @ com_google_protobuf / / : protobuf . bzl " , " cc_proto_library " , " py_proto_library " ) <nl> load ( " @ io_bazel_rules_go / / proto : def . bzl " , " go_proto_library " ) <nl> py_proto_library ( <nl> " " " , <nl> ) <nl> <nl> - native . new_http_archive ( <nl> + http_archive ( <nl> name = " com_github_gogo_protobuf " , <nl> sha256 = GOGOPROTO_SHA256 , <nl> strip_prefix = " protobuf - " + GOGOPROTO_RELEASE , <nl> py_proto_library ( <nl> " " " , <nl> ) <nl> <nl> - native . new_http_archive ( <nl> + http_archive ( <nl> name = " prometheus_metrics_model " , <nl> - strip_prefix = " client_model - " + PROMETHEUS_SHA , <nl> - url = " https : / / github . com / prometheus / client_model / archive / " + PROMETHEUS_SHA + " . tar . gz " , <nl> + strip_prefix = " client_model - " + PROMETHEUS_GIT_SHA , <nl> + url = " https : / / github . com / prometheus / client_model / archive / " + PROMETHEUS_GIT_SHA + " . tar . gz " , <nl> + sha256 = PROMETHEUS_SHA , <nl> build_file_content = " " " <nl> load ( " @ envoy_api / / bazel : api_build_system . bzl " , " api_proto_library " ) <nl> load ( " @ io_bazel_rules_go / / proto : def . bzl " , " go_proto_library " ) <nl> go_proto_library ( <nl> " " " , <nl> ) <nl> <nl> - native . new_http_archive ( <nl> + http_archive ( <nl> name = " io_opencensus_trace " , <nl> - strip_prefix = " opencensus - proto - " + OPENCENSUS_SHA + " / opencensus / proto / trace " , <nl> - url = " https : / / github . com / census - instrumentation / opencensus - proto / archive / " + OPENCENSUS_SHA + " . tar . gz " , <nl> + strip_prefix = " opencensus - proto - " + OPENCENSUS_GIT_SHA + " / opencensus / proto / trace " , <nl> + url = " https : / / github . com / census - instrumentation / opencensus - proto / archive / " + OPENCENSUS_GIT_SHA + " . tar . gz " , <nl> + sha256 = OPENCENSUS_SHA , <nl> build_file_content = " " " <nl> load ( " @ envoy_api / / bazel : api_build_system . bzl " , " api_proto_library " ) <nl> load ( " @ io_bazel_rules_go / / proto : def . bzl " , " go_proto_library " ) <nl> mmm a / bazel / EXTERNAL_DEPS . md <nl> ppp b / bazel / EXTERNAL_DEPS . md <nl> to point to a local copy . The option can used multiple times to override multipl <nl> The name of the dependency can be found in <nl> [ the repository locations file . ] ( https : / / github . com / envoyproxy / envoy / blob / master / bazel / repository_locations . bzl ) <nl> The path of the local copy has to be absolute path . <nl> + <nl> + # Distdir - prefetching dependencies <nl> + <nl> + Usually Bazel downloads all dependencies during build time . But there is a <nl> + possibility to prefetch dependencies and point Bazel to them by using ` - - distdir ` <nl> + option and providing a path to directory which contains tarballs with exactly <nl> + the same name and the same SHA256 sum that are defined in repositories <nl> + definitions . <nl> + <nl> + For example , let ' s assume that your distdir location is ` $ HOME / envoy_distdir ` . <nl> + To prefetch ` boringssl ` which is defined in ` bazel / repository_locations . bzl ` as : <nl> + <nl> + ` ` ` <nl> + boringssl = dict ( <nl> + # Use commits from branch " chromium - stable - with - bazel " <nl> + sha256 = " d1700e0455f5f918f8a85ff3ce6cd684d05c766200ba6bdb18c77d5dcadc05a1 " , <nl> + strip_prefix = " boringssl - 060e9a583976e73d1ea8b2bfe8b9cab33c62fa17 " , <nl> + # chromium - 70 . 0 . 3538 . 67 <nl> + urls = [ " https : / / github . com / google / boringssl / archive / 060e9a583976e73d1ea8b2bfe8b9cab33c62fa17 . tar . gz " ] , <nl> + ) , <nl> + ` ` ` <nl> + <nl> + ` $ HOME / envoy_distdir ` needs to contain ` 060e9a583976e73d1ea8b2bfe8b9cab33c62fa17 . tar . gz ` <nl> + file . <nl> + <nl> + Then Envoy needs to be built with the following command : <nl> + <nl> + ` ` ` <nl> + bazel build - - distdir = $ HOME / envoy_distdir / / source / exe : envoy <nl> + ` ` ` <nl> deleted file mode 100644 <nl> index 8a6d54881cd . . 00000000000 <nl> mmm a / bazel / patched_http_archive . bzl <nl> ppp / dev / null <nl> <nl> - def _patched_http_archive ( ctx ) : <nl> - ctx . download_and_extract ( <nl> - ctx . attr . urls , <nl> - " " , # output <nl> - ctx . attr . sha256 , <nl> - " " , # type <nl> - ctx . attr . strip_prefix , <nl> - ) <nl> - for ii , patch in enumerate ( ctx . attr . patches ) : <nl> - patch_input = " patch - input - % d . patch " % ( ii , ) <nl> - ctx . symlink ( patch , patch_input ) <nl> - patch_result = ctx . execute ( [ " patch " , " - p0 " , " - - input " , patch_input ] ) <nl> - if patch_result . return_code ! = 0 : <nl> - fail ( " Failed to apply patch % r : % s " % ( patch , patch_result . stderr ) ) <nl> - <nl> - patched_http_archive = repository_rule ( <nl> - attrs = { <nl> - " urls " : attr . string_list ( <nl> - mandatory = True , <nl> - allow_empty = False , <nl> - ) , <nl> - " sha256 " : attr . string ( ) , <nl> - " strip_prefix " : attr . string ( ) , <nl> - " patches " : attr . label_list ( <nl> - allow_files = [ " . patch " ] , <nl> - allow_empty = True , <nl> - ) , <nl> - } , <nl> - implementation = _patched_http_archive , <nl> - ) <nl> mmm a / bazel / repositories . bzl <nl> ppp b / bazel / repositories . bzl <nl> <nl> - load ( <nl> - " @ bazel_tools / / tools / build_defs / repo : git . bzl " , <nl> - " git_repository " , <nl> - " new_git_repository " , <nl> - ) <nl> + load ( " @ bazel_tools / / tools / build_defs / repo : http . bzl " , " http_archive " ) <nl> load ( " : genrule_repository . bzl " , " genrule_repository " ) <nl> - load ( " : patched_http_archive . bzl " , " patched_http_archive " ) <nl> load ( " : repository_locations . bzl " , " REPOSITORY_LOCATIONS " ) <nl> load ( " : target_recipes . bzl " , " TARGET_RECIPES " ) <nl> load ( <nl> def _repository_impl ( name , * * kwargs ) : <nl> ( location [ " tag " ] , name ) , <nl> ) <nl> <nl> - if " commit " in location : <nl> - # Git repository at given commit ID . Add a BUILD file if requested . <nl> - if " build_file " in kwargs : <nl> - new_git_repository ( <nl> - name = name , <nl> - remote = location [ " remote " ] , <nl> - commit = location [ " commit " ] , <nl> - * * kwargs <nl> - ) <nl> - else : <nl> - git_repository ( <nl> - name = name , <nl> - remote = location [ " remote " ] , <nl> - commit = location [ " commit " ] , <nl> - * * kwargs <nl> - ) <nl> - else : # HTTP <nl> - # HTTP tarball at a given URL . Add a BUILD file if requested . <nl> - if " build_file " in kwargs : <nl> - native . new_http_archive ( <nl> - name = name , <nl> - urls = location [ " urls " ] , <nl> - sha256 = location [ " sha256 " ] , <nl> - strip_prefix = location [ " strip_prefix " ] , <nl> - * * kwargs <nl> - ) <nl> - else : <nl> - native . http_archive ( <nl> - name = name , <nl> - urls = location [ " urls " ] , <nl> - sha256 = location [ " sha256 " ] , <nl> - strip_prefix = location [ " strip_prefix " ] , <nl> - * * kwargs <nl> - ) <nl> + # HTTP tarball at a given URL . Add a BUILD file if requested . <nl> + http_archive ( <nl> + name = name , <nl> + urls = location [ " urls " ] , <nl> + sha256 = location [ " sha256 " ] , <nl> + strip_prefix = location [ " strip_prefix " ] , <nl> + * * kwargs <nl> + ) <nl> <nl> def _build_recipe_repository_impl ( ctxt ) : <nl> # modify the recipes list based on the build context <nl> mmm a / bazel / repository_locations . bzl <nl> ppp b / bazel / repository_locations . bzl <nl> REPOSITORY_LOCATIONS = dict ( <nl> ) , <nl> boringssl = dict ( <nl> # Use commits from branch " chromium - stable - with - bazel " <nl> - commit = " 060e9a583976e73d1ea8b2bfe8b9cab33c62fa17 " , # chromium - 70 . 0 . 3538 . 67 <nl> - remote = " https : / / github . com / google / boringssl " , <nl> + sha256 = " d1700e0455f5f918f8a85ff3ce6cd684d05c766200ba6bdb18c77d5dcadc05a1 " , <nl> + strip_prefix = " boringssl - 060e9a583976e73d1ea8b2bfe8b9cab33c62fa17 " , <nl> + # chromium - 70 . 0 . 3538 . 67 <nl> + urls = [ " https : / / github . com / google / boringssl / archive / 060e9a583976e73d1ea8b2bfe8b9cab33c62fa17 . tar . gz " ] , <nl> ) , <nl> com_google_absl = dict ( <nl> - commit = " 92e07e5590752d6b8e67f7f2f86c6286561e8cea " , # 2018 - 08 - 01 <nl> - remote = " https : / / github . com / abseil / abseil - cpp " , <nl> + sha256 = " 3f24d99cfa1b719ed51e12dae7741e4500a703e2c34bede452b457492d1fe36e " , <nl> + strip_prefix = " abseil - cpp - 92e07e5590752d6b8e67f7f2f86c6286561e8cea " , <nl> + # 2018 - 08 - 01 <nl> + urls = [ " https : / / github . com / abseil / abseil - cpp / archive / 92e07e5590752d6b8e67f7f2f86c6286561e8cea . tar . gz " ] , <nl> ) , <nl> com_github_apache_thrift = dict ( <nl> sha256 = " 7d59ac4fdcb2c58037ebd4a9da5f9a49e3e034bf75b3f26d9fe48ba3d8806e6b " , <nl> REPOSITORY_LOCATIONS = dict ( <nl> urls = [ " https : / / github . com / bombela / backward - cpp / archive / v1 . 4 . tar . gz " ] , <nl> ) , <nl> com_github_circonus_labs_libcircllhist = dict ( <nl> - commit = " 050da53a44dede7bda136b93a9aeef47bd91fa12 " , # 2018 - 07 - 02 <nl> - remote = " https : / / github . com / circonus - labs / libcircllhist " , <nl> + sha256 = " 9949e2864b8ad00ee5c3e9c1c3c01e51b6b68bb442a919652fc66b9776477987 " , <nl> + strip_prefix = " libcircllhist - fd8a14463739d247b414825cc56ca3946792a3b9 " , <nl> + # 2018 - 07 - 02 <nl> + urls = [ " https : / / github . com / circonus - labs / libcircllhist / archive / fd8a14463739d247b414825cc56ca3946792a3b9 . tar . gz " ] , <nl> ) , <nl> com_github_cyan4973_xxhash = dict ( <nl> sha256 = " 19030315f4fc1b4b2cdb9d7a317069a109f90e39d1fe4c9159b7aaa39030eb95 " , <nl> REPOSITORY_LOCATIONS = dict ( <nl> urls = [ " https : / / github . com / gcovr / gcovr / archive / 3 . 3 . tar . gz " ] , <nl> ) , <nl> com_github_google_libprotobuf_mutator = dict ( <nl> - commit = " c3d2faf04a1070b0b852b0efdef81e1a81ba925e " , # 2018 - 03 - 06 <nl> - remote = " https : / / github . com / google / libprotobuf - mutator " , <nl> + sha256 = " 97b3639630040f41c45f45838ab00b78909e6b4cb69c8028e01302bea5b79495 " , <nl> + strip_prefix = " libprotobuf - mutator - c3d2faf04a1070b0b852b0efdef81e1a81ba925e " , <nl> + # 2018 - 03 - 06 <nl> + urls = [ " https : / / github . com / google / libprotobuf - mutator / archive / c3d2faf04a1070b0b852b0efdef81e1a81ba925e . tar . gz " ] , <nl> ) , <nl> com_github_grpc_grpc = dict ( <nl> sha256 = " 013cc34f3c51c0f87e059a12ea203087a7a15dca2e453295345e1d02e2b9634b " , <nl> REPOSITORY_LOCATIONS = dict ( <nl> urls = [ " https : / / github . com / lightstep / lightstep - tracer - cpp / archive / v0 . 8 . 0 . tar . gz " ] , <nl> ) , <nl> lightstep_vendored_googleapis = dict ( <nl> + sha256 = " d1ef4f790eeaa805e7b364de05b91f9eed66bd6ae46f1483bbf49c33d86998e5 " , <nl> + strip_prefix = " googleapis - d6f78d948c53f3b400bb46996eb3084359914f9b " , <nl> # From : https : / / github . com / lightstep / lightstep - tracer - cpp / blob / v0 . 8 . 0 / lightstep - tracer - common / third_party / googleapis / README . lightstep - tracer - common # L6 <nl> - commit = " d6f78d948c53f3b400bb46996eb3084359914f9b " , <nl> - remote = " https : / / github . com / google / googleapis " , <nl> + urls = [ " https : / / github . com / googleapis / googleapis / archive / d6f78d948c53f3b400bb46996eb3084359914f9b . tar . gz " ] , <nl> ) , <nl> com_github_google_jwt_verify = dict ( <nl> - commit = " 66792a057ec54e4b75c6a2eeda4e98220bd12a9a " , # 2018 - 08 - 17 <nl> - remote = " https : / / github . com / google / jwt_verify_lib " , <nl> + sha256 = " 499f1e145c19f33031eb8fc6452d5d391b4cecfdeda23e2055386a3b33be4d41 " , <nl> + strip_prefix = " jwt_verify_lib - 66792a057ec54e4b75c6a2eeda4e98220bd12a9a " , <nl> + # 2018 - 08 - 17 <nl> + urls = [ " https : / / github . com / google / jwt_verify_lib / archive / 66792a057ec54e4b75c6a2eeda4e98220bd12a9a . tar . gz " ] , <nl> ) , <nl> com_github_nodejs_http_parser = dict ( <nl> + sha256 = " f742dc5a206958c4d0a6b2c35e3e102afb5683f55f7a7cb1eae024a03f081347 " , <nl> + strip_prefix = " http - parser - 77310eeb839c4251c07184a5db8885a572a08352 " , <nl> # 2018 - 07 - 20 snapshot to pick up : <nl> # A performance fix , nodejs / http - parser PR 422 . <nl> # A bug fix , nodejs / http - parser PR 432 . <nl> # TODO ( brian - pane ) : Upgrade to the next http - parser release once it ' s available <nl> - commit = " 77310eeb839c4251c07184a5db8885a572a08352 " , <nl> - remote = " https : / / github . com / nodejs / http - parser " , <nl> + urls = [ " https : / / github . com / nodejs / http - parser / archive / 77310eeb839c4251c07184a5db8885a572a08352 . tar . gz " ] , <nl> ) , <nl> com_github_pallets_jinja = dict ( <nl> sha256 = " 0d31d3466c313a9ca014a2d904fed18cdac873a5ba1f7b70b8fd8b206cd860d6 " , <nl> REPOSITORY_LOCATIONS = dict ( <nl> urls = [ " https : / / github . com / google / googletest / archive / release - 1 . 8 . 1 . tar . gz " ] , <nl> ) , <nl> com_google_protobuf = dict ( <nl> + sha256 = " 3d610ac90f8fa16e12490088605c248b85fdaf23114ce4b3605cdf81f7823604 " , <nl> + strip_prefix = " protobuf - fa252ec2a54acb24ddc87d48fed1ecfd458445fd " , <nl> # TODO ( htuch ) : Switch back to released versions for protobuf when a release > 3 . 6 . 0 happens <nl> # that includes : <nl> # - https : / / github . com / google / protobuf / commit / f35669b8d3f46f7f1236bd21f14d744bba251e60 <nl> # - https : / / github . com / google / protobuf / commit / 6a4fec616ec4b20f54d5fb530808b855cb664390 <nl> # - https : / / github . com / google / protobuf / commit / fa252ec2a54acb24ddc87d48fed1ecfd458445fd <nl> - commit = " fa252ec2a54acb24ddc87d48fed1ecfd458445fd " , <nl> - remote = " https : / / github . com / google / protobuf " , <nl> + urls = [ " https : / / github . com / protocolbuffers / protobuf / archive / fa252ec2a54acb24ddc87d48fed1ecfd458445fd . tar . gz " ] , <nl> ) , <nl> grpc_httpjson_transcoding = dict ( <nl> - commit = " 05a15e4ecd0244a981fdf0348a76658def62fa9c " , # 2018 - 05 - 30 <nl> - remote = " https : / / github . com / grpc - ecosystem / grpc - httpjson - transcoding " , <nl> + sha256 = " 9765764644d74af9a9654f7fb90cf2bc7228014664668719a589a4677967ca09 " , <nl> + strip_prefix = " grpc - httpjson - transcoding - 05a15e4ecd0244a981fdf0348a76658def62fa9c " , <nl> + # 2018 - 05 - 30 <nl> + urls = [ " https : / / github . com / grpc - ecosystem / grpc - httpjson - transcoding / archive / 05a15e4ecd0244a981fdf0348a76658def62fa9c . tar . gz " ] , <nl> ) , <nl> com_github_golang_protobuf = dict ( <nl> # TODO ( sesmith177 ) : Remove this dependency when both : <nl> # 1 . There ' s a release of golang / protobuf that includes <nl> # https : / / github . com / golang / protobuf / commit / 31e0d063dd98c052257e5b69eeb006818133f45c <nl> # 2 . That release is included in rules_go <nl> - commit = " 31e0d063dd98c052257e5b69eeb006818133f45c " , # 2018 - 10 - 03 <nl> - remote = " https : / / github . com / golang / protobuf " , <nl> + sha256 = " 4cbd5303a5cf85791b3c310a50a479027c035d75091bb90c482ba67b0a2cf5b4 " , <nl> + strip_prefix = " protobuf - 31e0d063dd98c052257e5b69eeb006818133f45c " , <nl> + urls = [ " https : / / github . com / golang / protobuf / archive / 31e0d063dd98c052257e5b69eeb006818133f45c . tar . gz " ] , <nl> ) , <nl> io_bazel_rules_go = dict ( <nl> - commit = " 3d966375ff7971d43b863f785f495c7dcd6923da " , # 2018 - 10 - 02 <nl> - remote = " https : / / github . com / bazelbuild / rules_go " , <nl> + sha256 = " d1ad521fbd0997df53161e29df0964468157fc9c6ee16265db37cc6daaf334ef " , <nl> + strip_prefix = " rules_go - 3d966375ff7971d43b863f785f495c7dcd6923da " , <nl> + # 2018 - 10 - 02 <nl> + urls = [ " https : / / github . com / bazelbuild / rules_go / archive / 3d966375ff7971d43b863f785f495c7dcd6923da . tar . gz " ] , <nl> ) , <nl> six_archive = dict ( <nl> sha256 = " 105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a " , <nl> mmm a / ci / do_ci . sh <nl> ppp b / ci / do_ci . sh <nl> <nl> set - e <nl> <nl> build_setup_args = " " <nl> - if [ [ " $ 1 " = = " fix_format " | | " $ 1 " = = " check_format " | | " $ 1 " = = " check_spelling " | | " $ 1 " = = " fix_spelling " ] ] ; then <nl> + if [ [ " $ 1 " = = " fix_format " | | " $ 1 " = = " check_format " | | " $ 1 " = = " check_repositories " | | " $ 1 " = = " check_spelling " | | " $ 1 " = = " fix_spelling " ] ] ; then <nl> build_setup_args = " - nofetch " <nl> fi <nl> <nl> elif [ [ " $ 1 " = = " check_format " ] ] ; then <nl> echo " check_format . . . " <nl> . / tools / check_format . py check <nl> exit 0 <nl> + elif [ [ " $ 1 " = = " check_repositories " ] ] ; then <nl> + cd " $ { ENVOY_SRCDIR } " <nl> + echo " check_repositories . . . " <nl> + . / tools / check_repositories . sh <nl> + exit 0 <nl> elif [ [ " $ 1 " = = " check_spelling " ] ] ; then <nl> cd " $ { ENVOY_SRCDIR } " <nl> echo " check_spelling . . . " <nl> mmm a / support / hooks / pre - push <nl> ppp b / support / hooks / pre - push <nl> do <nl> exit 1 <nl> fi <nl> done <nl> + <nl> + # Check correctness of repositories definitions . <nl> + echo " Checking repositories definitions " <nl> + if ! " $ SCRIPT_DIR " / check_repositories . sh ; then <nl> + exit 1 <nl> + fi <nl> fi <nl> done <nl> <nl> mmm a / tools / BUILD <nl> ppp b / tools / BUILD <nl> exports_files ( [ <nl> " check_format . py " , <nl> " header_order . py " , <nl> " envoy_build_fixer . py " , <nl> + " check_repositories . sh " , <nl> ] ) <nl> <nl> envoy_py_test_binary ( <nl> new file mode 100755 <nl> index 00000000000 . . 2811a598ce5 <nl> mmm / dev / null <nl> ppp b / tools / check_repositories . sh <nl> <nl> + # ! / bin / bash <nl> + <nl> + set - eu <nl> + <nl> + # Check whether any git repositories are defined . <nl> + # Git repository definition contains ` commit ` and ` remote ` fields . <nl> + if grep - nr " commit = \ | remote = " - - include = * . bzl . ; then <nl> + echo " Using git repositories is not allowed . " <nl> + echo " To ensure that all dependencies can be stored offline in distdir , only HTTP repositories are allowed . " <nl> + exit 1 <nl> + fi <nl> + <nl> + # Check whether number of defined ` url = ` or ` urls = ` and ` sha256 = ` kwargs in <nl> + # repository definitions is equal . <nl> + urls_count = $ ( grep - IPnrs " url ( s ) ? = " - - include = * . bzl . | wc - l ) <nl> + sha256sums_count = $ ( grep - nr " sha256 = " - - include = * . bzl . | wc - l ) <nl> + <nl> + if [ [ $ urls_count ! = $ sha256sums_count ] ] ; then <nl> + echo " Found more defined repository URLs than SHA256 sums , which means that there are some repositories without sums . " <nl> + echo " Dependencies without SHA256 sums cannot be stored in distdir . " <nl> + echo " Please ensure that every repository has a SHA256 sum . " <nl> + echo " Repositories are defined in the following files : " <nl> + echo " " <nl> + echo " bazel / repository_locations . bzl " <nl> + echo " api / bazel / repositories . bzl " <nl> + exit 1 <nl> + fi <nl>
bazel : Allow to distdir all dependencies ( )
envoyproxy/envoy
dc0d35b9ed93b4f5d2ad83f6a7e78832504aeeea
2018-10-20T00:35:41Z
mmm a / lib / Sema / CSApply . cpp <nl> ppp b / lib / Sema / CSApply . cpp <nl> namespace { <nl> / / / Diagnose an optional injection that is probably not what the <nl> / / / user wanted , because it comes from a forced downcast . <nl> void diagnoseOptionalInjection ( InjectIntoOptionalExpr * injection ) { <nl> - / / Don ' t diagnose when we ' re injecting into <nl> - auto toOptionalType = cs . getType ( injection ) ; <nl> - <nl> / / Check whether we have a forced downcast . <nl> auto & tc = cs . getTypeChecker ( ) ; <nl> auto * cast = findForcedDowncast ( tc . Context , injection - > getSubExpr ( ) ) ; <nl>
Merge remote - tracking branch ' origin / master ' into master - llvm - swift5 - transition
apple/swift
df71ec2a1a568c1422b39f8173e33f06c51055da
2018-02-08T02:19:03Z
mmm a / README . md <nl> ppp b / README . md <nl> Karabiner - Elements works fine . <nl> <nl> You can download the latest Karabiner - Elements from https : / / pqrs . org / latest / karabiner - elements - latest . dmg <nl> <nl> - # [ Usage ] ( usage / README . md ) <nl> + # Usage <nl> + <nl> + Detailed usage instructions are available [ here ] ( usage / README . md ) . <nl> <nl> # # Features <nl> <nl>
Explicitly call out usage instructions
pqrs-org/Karabiner-Elements
241d763cf73c6a45bc4b882c0d8dff30c530efe4
2016-11-01T10:19:18Z
mmm a / hphp / hack / src / typing / typing . ml <nl> ppp b / hphp / hack / src / typing / typing . ml <nl> and expr_ <nl> Errors . re_prefixed_non_string p " Non - strings " ; <nl> expr_error env p ( Reason . Rregex p ) ) <nl> | Fun_id x - > <nl> - let env , fty = fun_type_of_id env x [ ] in <nl> + let env , fty , _tyvars = fun_type_of_id env x [ ] in <nl> begin match fty with <nl> | _ , Tfun fty - > check_deprecated ( fst x ) fty ; <nl> | _ - > ( ) <nl> and expr_ <nl> match ty with <nl> | ( r , Tfun ft ) - > <nl> begin <nl> - let env , ft = Phase . localize_ft ~ use_pos : p ~ ety_env env ft in <nl> + let env , ft , _tyvars = Phase . localize_ft ~ use_pos : p ~ ety_env env ft in <nl> let ty = r , Tfun ft in <nl> check_deprecated p ft ; <nl> match ce_visibility with <nl> and expr_ <nl> * ) <nl> let ety_env = <nl> { ( Phase . env_with_self env ) with from_class = Some CIstatic } in <nl> - let env , declared_ft = Phase . localize_ft ~ use_pos : p ~ ety_env env declared_ft in <nl> + let env , declared_ft , _tyvars = Phase . localize_ft ~ use_pos : p ~ ety_env env declared_ft in <nl> List . iter idl ( check_escaping_var env ) ; <nl> ( * Ensure lambda arity is not Fellipsis in strict mode * ) <nl> begin match declared_ft . ft_arity with <nl> and is_abstract_ft fty = match fty with <nl> make_call env ( T . make_typed_expr fpos ( Reason . Rnone , TUtils . tany env ) ( T . Id id ) ) [ ] tel [ ] ty in <nl> ( * For special functions and pseudofunctions with a definition in hhi . * ) <nl> let make_call_special_from_def env id tel ty_ = <nl> - let env , fty = fun_type_of_id env id hl in <nl> + let env , fty , _tyvars = fun_type_of_id env id hl in <nl> let ty = match fty with <nl> | _ , Tfun ft - > ft . ft_ret <nl> | _ - > ( Reason . Rwitness p , ty_ ) in <nl> and is_abstract_ft fty = match fty with <nl> when array_filter = SN . StdlibFunctions . array_filter & & el < > [ ] & & uel = [ ] - > <nl> check_function_in_suspend SN . StdlibFunctions . array_filter ; <nl> ( * dispatch the call to typecheck the arguments * ) <nl> - let env , fty = fun_type_of_id env id hl in <nl> - let env , tel , tuel , res = call ~ expected p env fty el uel in <nl> + let env , fty , tyvars = fun_type_of_id env id hl in <nl> + let env , tel , tuel , res = call ~ tyvars ~ expected p env fty el uel in <nl> ( * but ignore the result and overwrite it with custom return type * ) <nl> let x = List . hd_exn el in <nl> let env , _tx , ty = expr env x in <nl> and is_abstract_ft fty = match fty with <nl> | Id ( ( _ , array_map ) as x ) <nl> when array_map = SN . StdlibFunctions . array_map & & el < > [ ] & & uel = [ ] - > <nl> check_function_in_suspend SN . StdlibFunctions . array_map ; <nl> - let env , fty = fun_type_of_id env x [ ] in <nl> + let env , fty , tyvars = fun_type_of_id env x [ ] in <nl> let env , fty = Env . expand_type env fty in <nl> let env , fty = match fty , el with <nl> | ( ( r_fty , Tfun fty ) , _ : : args ) when args < > [ ] - > <nl> and is_abstract_ft fty = match fty with <nl> build_function env ( fun tr - > <nl> ( r_fty , Tarraykind ( AKvec ( tr ) ) ) ) ) <nl> | _ - > env , fty in <nl> - let env , tel , tuel , ty = call ~ expected p env fty el [ ] in <nl> + let env , tel , tuel , ty = call ~ tyvars ~ expected p env fty el [ ] in <nl> make_call env ( T . make_typed_expr fpos fty ( T . Id x ) ) hl tel tuel ty <nl> ( * Special function ` idx ` * ) <nl> | Id ( ( _ , idx ) as id ) when idx = SN . FB . idx - > <nl> and is_abstract_ft fty = match fty with <nl> | _ - > fty . ft_params , fty . ft_ret in <nl> let fty = { fty with ft_params = params ; ft_ret = ret } in <nl> let ety_env = Phase . env_with_self env in <nl> - let env , fty = Phase . localize_ft ~ use_pos : p ~ ety_env env fty in <nl> + let env , fty , tyvars = Phase . localize_ft ~ use_pos : p ~ ety_env env fty in <nl> let tfun = Reason . Rwitness fty . ft_pos , Tfun fty in <nl> - let env , tel , _tuel , ty = call ~ expected p env tfun el [ ] in <nl> + let env , tel , _tuel , ty = call ~ tyvars ~ expected p env tfun el [ ] in <nl> let env , ty = match ty with <nl> | r , Toption ty - > <nl> let env , ty = TUtils . non_null env ty in <nl> and is_abstract_ft fty = match fty with <nl> let fty = check_abstract_parent_meth ( snd m ) p fty in <nl> check_coroutine_call env fty ; <nl> let env , tel , tuel , ty = <nl> - call ~ expected ~ is_expr_statement <nl> + call ~ tyvars : ISet . empty ~ expected ~ is_expr_statement <nl> ~ method_call_info : ( TR . make_call_info ~ receiver_is_self : false <nl> ~ is_static : true ( Reason . Rwitness fpos , TUtils . this_of ( Env . get_self env ) ) ( snd m ) ) <nl> p env fty el uel in <nl> and is_abstract_ft fty = match fty with <nl> begin fun ( env , fty , _ ) - > <nl> let fty = check_abstract_parent_meth ( snd m ) p fty in <nl> check_coroutine_call env fty ; <nl> - let env , _tel , _tuel , method_ = call ~ expected <nl> + let env , _tel , _tuel , method_ = call ~ tyvars : ISet . empty ~ expected <nl> ~ method_call_info : ( TR . make_call_info ~ receiver_is_self : false <nl> ~ is_static : false this_ty ( snd m ) ) <nl> p env fty el uel in <nl> and is_abstract_ft fty = match fty with <nl> let fty = check_abstract_parent_meth ( snd m ) p fty in <nl> check_coroutine_call env fty ; <nl> let env , tel , tuel , ty = <nl> - call ~ expected <nl> + call ~ tyvars : ISet . empty ~ expected <nl> ~ method_call_info : ( TR . make_call_info ~ receiver_is_self : false <nl> ~ is_static : true ( Reason . Rwitness fpos , TUtils . this_of ( Env . get_self env ) ) ( snd m ) ) <nl> p env fty el uel in <nl> and is_abstract_ft fty = match fty with <nl> | _ - > ( ) in <nl> check_coroutine_call env fty ; <nl> let env , tel , tuel , ty = <nl> - call ~ expected <nl> + call ~ tyvars : ISet . empty ~ expected <nl> ~ method_call_info : ( TR . make_call_info ~ receiver_is_self : ( e1 = CIself ) <nl> ~ is_static : true ty1 ( snd m ) ) <nl> ~ is_expr_statement p env fty el uel in <nl> and is_abstract_ft fty = match fty with <nl> let tel = ref [ ] and tuel = ref [ ] and tftyl = ref [ ] in <nl> let fn = ( fun ( env , fty , _ ) - > <nl> let env , tel_ , tuel_ , method_ = <nl> - call <nl> + call ~ tyvars : ISet . empty <nl> ~ expected <nl> ~ method_call_info : ( TR . make_call_info ~ receiver_is_self : false <nl> ~ is_static : false ty1 ( snd m ) ) <nl> and is_abstract_ft fty = match fty with <nl> let k = ( fun ( env , fty , _ ) - > <nl> check_coroutine_call env fty ; <nl> let env , tel_ , tuel_ , method_ = <nl> - call ~ expected <nl> + call ~ tyvars : ISet . empty ~ expected <nl> ~ method_call_info : ( TR . make_call_info ~ receiver_is_self : false <nl> ~ is_static : false ty1 ( snd m ) ) <nl> ~ is_expr_statement p env fty el uel in <nl> and is_abstract_ft fty = match fty with <nl> <nl> ( * Function invocation * ) <nl> | Fun_id x - > <nl> - let env , fty = fun_type_of_id env x hl in <nl> + let env , fty , tyvars = fun_type_of_id env x hl in <nl> check_coroutine_call env fty ; <nl> let env , tel , tuel , ty = <nl> - call ~ expected ~ is_expr_statement p env fty el uel in <nl> + call ~ tyvars ~ expected ~ is_expr_statement p env fty el uel in <nl> make_call env ( T . make_typed_expr fpos fty ( T . Fun_id x ) ) hl tel tuel ty <nl> | Id ( _ , id as x ) - > <nl> - let env , fty = fun_type_of_id env x hl in <nl> + let env , fty , tyvars = fun_type_of_id env x hl in <nl> check_coroutine_call env fty ; <nl> let env , tel , tuel , ty = <nl> - call ~ expected ~ is_expr_statement p env fty el uel in <nl> + call ~ tyvars ~ expected ~ is_expr_statement p env fty el uel in <nl> let is_mutable = id = SN . Rx . mutable_ in <nl> let is_move = id = SN . Rx . move in <nl> let is_freeze = id = SN . Rx . freeze in <nl> and is_abstract_ft fty = match fty with <nl> let env , te , fty = expr env e in <nl> check_coroutine_call env fty ; <nl> let env , tel , tuel , ty = <nl> - call ~ expected ~ is_expr_statement p env fty el uel in <nl> + call ~ tyvars : ISet . empty ~ expected ~ is_expr_statement p env fty el uel in <nl> make_call env te hl tel tuel ty <nl> <nl> and fun_type_of_id env x hl = <nl> - let env , fty = <nl> - match Env . get_fun env ( snd x ) with <nl> - | None - > let env , _ , ty = unbound_name env x in env , ty <nl> - | Some fty - > <nl> - let ety_env = Phase . env_with_self env in <nl> - let env , fty = Phase . localize_ft ~ use_pos : ( fst x ) ~ explicit_tparams : hl ~ ety_env env fty in <nl> - env , ( Reason . Rwitness fty . ft_pos , Tfun fty ) <nl> - in <nl> - env , fty <nl> + match Env . get_fun env ( snd x ) with <nl> + | None - > let env , _ , ty = unbound_name env x in env , ty , ISet . empty <nl> + | Some fty - > <nl> + let ety_env = Phase . env_with_self env in <nl> + let env , fty , tyvars = <nl> + Phase . localize_ft ~ use_pos : ( fst x ) ~ explicit_tparams : hl ~ ety_env env fty in <nl> + env , ( Reason . Rwitness fty . ft_pos , Tfun fty ) , tyvars <nl> <nl> ( * * <nl> * Checks if a class ( given by cty ) contains a given static method . <nl> and class_get_ ~ is_method ~ is_const ~ ety_env ? ( explicit_tparams = [ ] ) <nl> | Some { ce_visibility = vis ; ce_lsb = lsb ; ce_type = lazy ( r , Tfun ft ) ; _ } - > <nl> let p_vis = Reason . to_pos r in <nl> TVis . check_class_access p env ( p_vis , vis , lsb ) cid class_ ; <nl> - let env , ft = <nl> + let env , ft , _tyvars = <nl> Phase . localize_ft ~ use_pos : p ~ ety_env ~ explicit_tparams : explicit_tparams env ft in <nl> let arity_pos = match ft . ft_params with <nl> | [ _ ; { fp_pos ; fp_kind = FPnormal ; _ } ] - > fp_pos <nl> and class_get_ ~ is_method ~ is_const ~ ety_env ? ( explicit_tparams = [ ] ) <nl> begin match method_ with <nl> ( * We special case Tfun here to allow passing in explicit tparams to localize_ft . * ) <nl> | r , Tfun ft - > <nl> - let env , ft = <nl> + let env , ft , _tyvars = <nl> Phase . localize_ft ~ use_pos : p ~ ety_env ~ explicit_tparams : explicit_tparams env ft <nl> in env , ( r , Tfun ft ) <nl> | _ - > Phase . localize ~ ety_env env method_ <nl> and obj_get_concrete_ty ~ is_method ~ valkind ? ( explicit_tparams = [ ] ) <nl> <nl> ( * the return type of __call can depend on the class params or be this * ) <nl> let ety_env = mk_ety_env r class_info x paraml in <nl> - let env , ft = Phase . localize_ft ~ use_pos : id_pos ~ ety_env env ft in <nl> + let env , ft , _tyvars = Phase . localize_ft ~ use_pos : id_pos ~ ety_env env ft in <nl> <nl> let arity_pos = match ft . ft_params with <nl> | [ _ ; { fp_pos ; fp_kind = FPnormal ; _ } ] - > fp_pos <nl> and obj_get_concrete_ty ~ is_method ~ valkind ? ( explicit_tparams = [ ] ) <nl> | ( r , Tfun ft ) - > <nl> ( * We special case function types here to be able to pass explicit type <nl> * parameters . * ) <nl> - let ( env , ft ) = <nl> + let env , ft , _tyvars = <nl> Phase . localize_ft ~ use_pos : id_pos ~ explicit_tparams ~ ety_env env ft in <nl> ( env , ( r , Tfun ft ) ) <nl> | _ - > Phase . localize ~ ety_env env member_ty <nl> and static_class_id ~ check_constraints p env = <nl> let env , te , ty = expr env e in <nl> let rec resolve_ety ty = <nl> let env , ty = TUtils . fold_unresolved env ty in <nl> - let _ , ty = Env . expand_type env ty in <nl> + let env , ty = Env . expand_type env ty in <nl> match TUtils . get_base_type env ty with <nl> | _ , Tabstract ( AKnewtype ( classname , [ the_cls ] ) , _ ) when <nl> classname = SN . Classes . cClassname - > resolve_ety the_cls <nl> and call_construct p env class_ params el uel cid = <nl> | Some { ce_visibility = vis ; ce_type = lazy m ; _ } - > <nl> TVis . check_obj_access p env ( Reason . to_pos ( fst m ) , vis ) ; <nl> let env , m = Phase . localize ~ ety_env env m in <nl> - let env , tel , tuel , _ty = call ~ expected : None p env m el uel in <nl> + let env , tel , tuel , _ty = call ~ tyvars : ISet . empty ~ expected : None p env m el uel in <nl> env , tcid , tel , tuel , m <nl> <nl> and check_arity ? ( did_unpack = false ) pos pos_def ( arity : int ) exp_arity = <nl> and inout_write_back env { fp_type ; _ } ( _ , e ) = <nl> env <nl> | _ - > env <nl> <nl> - and call ~ expected ? ( is_expr_statement = false ) ? method_call_info pos env fty el uel = <nl> + and call ~ tyvars ~ expected ? ( is_expr_statement = false ) ? method_call_info pos env fty el uel = <nl> let env , tel , tuel , ty = <nl> - call_ ~ expected ~ is_expr_statement ~ method_call_info pos env fty el uel in <nl> + call_ ~ tyvars ~ expected ~ is_expr_statement ~ method_call_info pos env fty el uel in <nl> ( * We need to solve the constraints after every single function call . <nl> * The type - checker is control - flow sensitive , the same value could <nl> * have different type depending on the branch that we are in . <nl> and call ~ expected ? ( is_expr_statement = false ) ? method_call_info pos env fty el u <nl> let env = Env . check_todo env in <nl> env , tel , tuel , ty <nl> <nl> - and call_ ~ expected ~ method_call_info ~ is_expr_statement pos env fty el uel = <nl> + and call_ ~ tyvars ~ expected ~ method_call_info ~ is_expr_statement pos env fty el uel = <nl> let make_unpacked_traversable_ty pos ty = <nl> let unpack_r = Reason . Runpack_param pos in <nl> unpack_r , Tclass ( ( pos , SN . Collections . cTraversable ) , [ ty ] ) <nl> and call_ ~ expected ~ method_call_info ~ is_expr_statement pos env fty el uel = <nl> in <nl> env , tel , [ ] , ty <nl> | _ , Tunresolved [ ty ] - > <nl> - call ~ expected pos env ty el uel <nl> + call ~ tyvars ~ expected pos env ty el uel <nl> | r , Tunresolved tyl - > <nl> let env , retl = List . map_env env tyl begin fun env ty - > <nl> - let env , _ , _ , ty = call ~ expected pos env ty el uel in env , ty <nl> + let env , _ , _ , ty = call ~ tyvars ~ expected pos env ty el uel in env , ty <nl> end in <nl> let env , ty = TUtils . in_var env ( r , Tunresolved retl ) in <nl> env , [ ] , [ ] , ty <nl> and call_ ~ expected ~ method_call_info ~ is_expr_statement pos env fty el uel = <nl> check_deprecated pos ft ; <nl> let env , var_param = variadic_param env ft in <nl> <nl> + ( * Set variance of type variables appearing in the return type * ) <nl> + let env = SubType . set_tyvar_variance ~ tyvars env ft . ft_ret in <nl> ( * Force subtype with expected result * ) <nl> let env = check_expected_ty " Call result " env ft . ft_ret expected in <nl> <nl> and overload_function make_call fpos p env ( cpos , class_id ) method_id el uel f = <nl> but ignore the result and overwrite with custom one * ) <nl> let ( env , tel , tuel , res ) , has_error = Errors . try_with_error <nl> ( * TODO : Should we be passing hints here * ) <nl> - ( fun ( ) - > ( call ~ expected : None p env fty el uel ) , false ) <nl> + ( fun ( ) - > ( call ~ tyvars : ISet . empty ~ expected : None p env fty el uel ) , false ) <nl> ( fun ( ) - > ( env , [ ] , [ ] , ( Reason . Rwitness p , Typing_utils . tany env ) ) , true ) in <nl> ( * if there are errors already stop here - going forward would <nl> * report them twice * ) <nl> mmm a / hphp / hack / src / typing / typing_log . ml <nl> ppp b / hphp / hack / src / typing / typing_log . ml <nl> let log_tpenv env = <nl> <nl> let log_tvenv env = <nl> indentEnv " tvenv " ( fun ( ) - > <nl> - IMap . iter begin fun var _ - > <nl> - let lower = Typing_set . elements ( Env . get_tyvar_lower_bounds env var ) in <nl> - let upper = Typing_set . elements ( Env . get_tyvar_upper_bounds env var ) in <nl> + IMap . iter begin fun var <nl> + Env . { lower_bounds ; upper_bounds ; <nl> + appears_covariantly ; appears_contravariantly ; _ } - > <nl> + let lower = Typing_set . elements lower_bounds in <nl> + let upper = Typing_set . elements upper_bounds in <nl> lnewline ( ) ; <nl> ( if not ( List . is_empty lower ) <nl> then ( log_type_list env lower ; lprintf ( Normal Green ) " < : " ) ) ; <nl> - lprintf ( Bold Green ) " # % d " var ; <nl> + lprintf ( Bold Green ) " % s % s # % d " <nl> + ( if appears_covariantly then " + " else " " ) <nl> + ( if appears_contravariantly then " - " else " " ) <nl> + var ; <nl> ( if not ( List . is_empty upper ) <nl> then ( lprintf ( Normal Green ) " < : " ; log_type_list env upper ) ) <nl> end env . Env . tvenv ) <nl> mmm a / hphp / hack / src / typing / typing_phase . ml <nl> ppp b / hphp / hack / src / typing / typing_phase . ml <nl> let rec localize_with_env ~ ety_env env ( dty : decl ty ) = <nl> Toption ty in <nl> env , ( ety_env , ( r , ty_ ) ) <nl> | r , Tfun ft - > <nl> - let env , ft = localize_ft ~ use_pos : ft . ft_pos ~ ety_env env ft in <nl> + let env , ft , _ = localize_ft ~ use_pos : ft . ft_pos ~ ety_env env ft in <nl> env , ( ety_env , ( r , Tfun ft ) ) <nl> | r , Tapply ( ( _ , x ) , argl ) when Env . is_typedef x - > <nl> let env , argl = List . map_env env argl ( localize ~ ety_env ) in <nl> and localize_ft ~ use_pos ? ( instantiate_tparams = true ) ? ( explicit_tparams = [ ] ) ~ ety <nl> * something like " mixed " . <nl> * If explicit type parameters are provided , just instantiate tvarl to them . <nl> * ) <nl> - let env , substs = <nl> + let env , substs , tvarl = <nl> if instantiate_tparams <nl> then <nl> let default ( ) = List . map_env env ft . ft_tparams ( TUtils . unresolved_tparam ~ use_pos ) in <nl> and localize_ft ~ use_pos ? ( instantiate_tparams = true ) ? ( explicit_tparams = [ ] ) ~ ety <nl> List . map_env env explicit_tparams type_argument <nl> in <nl> let ft_subst = Subst . make ft . ft_tparams tvarl in <nl> - env , SMap . union ft_subst ety_env . substs <nl> + env , SMap . union ft_subst ety_env . substs , tvarl <nl> else <nl> env , List . fold_left ft . ft_tparams ~ f : begin fun subst ( _ , ( _ , x ) , _ , _ ) - > <nl> SMap . remove x subst <nl> - end ~ init : ety_env . substs <nl> + end ~ init : ety_env . substs , [ ] <nl> in <nl> let ety_env = { ety_env with substs = substs } in <nl> let env , params = List . map_env env ft . ft_params begin fun env param - > <nl> and localize_ft ~ use_pos ? ( instantiate_tparams = true ) ? ( explicit_tparams = [ ] ) ~ ety <nl> env , Fvariadic ( min , { param with fp_type = var_ty } ) <nl> | Fellipsis _ | Fstandard ( _ , _ ) as x - > env , x in <nl> let env , ret = localize ~ ety_env env ft . ft_ret in <nl> + let vars = List . fold_left <nl> + ~ f : ( fun vars ty - > match ty with ( _ , Tvar i ) - > ISet . add i vars | _ - > vars ) <nl> + ~ init : ISet . empty tvarl in <nl> env , { ft with ft_arity = arity ; ft_params = params ; <nl> ft_ret = ret ; ft_tparams = tparams ; <nl> - ft_where_constraints = where_constraints } <nl> + ft_where_constraints = where_constraints } , vars <nl> <nl> ( * Given a list of generic parameters [ tparams ] and a substitution <nl> * in [ ety_env . substs ] whose domain is at least these generic parameters , <nl> mmm a / hphp / hack / src / typing / typing_subtype . ml <nl> ppp b / hphp / hack / src / typing / typing_subtype . ml <nl> let subtype_method <nl> Errors . abstract_concrete_override ft_sub . ft_pos ft_super . ft_pos ` method_ ; <nl> let ety_env = <nl> Phase . env_with_self env in <nl> - let env , ft_super_no_tvars = <nl> + let env , ft_super_no_tvars , _ = <nl> Phase . localize_ft ~ use_pos : ft_super . ft_pos ~ ety_env ~ instantiate_tparams : false env ft_super in <nl> - let env , ft_sub_no_tvars = <nl> + let env , ft_sub_no_tvars , _ = <nl> Phase . localize_ft ~ use_pos : ft_sub . ft_pos ~ ety_env ~ instantiate_tparams : false env ft_sub in <nl> let old_tpenv = env . Env . lenv . Env . tpenv in <nl> <nl> let add_constraint <nl> in <nl> iter 0 env ' <nl> <nl> + <nl> + let flip_variance v = <nl> + match v with <nl> + | Ast . Covariant - > Ast . Contravariant <nl> + | Ast . Contravariant - > Ast . Covariant <nl> + | Ast . Invariant - > Ast . Invariant <nl> + <nl> + let combine_variance v1 v2 = <nl> + match v1 , v2 with <nl> + | Ast . Contravariant , v | v , Ast . Contravariant - > flip_variance v <nl> + | Ast . Invariant , _ | _ , Ast . Invariant - > Ast . Invariant <nl> + | _ - > Ast . Covariant <nl> + <nl> + ( * For type variables in vars , set ` appears_covariantly ` and <nl> + * ` appears_contravariantly ` in the type variable environment according to their <nl> + * position in ` ty ` . The current position of ` ty ` is indicated by ` variance ` . <nl> + * ) <nl> + let rec set_tyvar_variance ~ variance ~ tyvars env ty = <nl> + match snd ty with <nl> + | Tvar v - > <nl> + if ISet . mem v tyvars <nl> + then <nl> + match variance with <nl> + | Ast . Covariant - > Env . set_tyvar_appears_covariantly env v <nl> + | Ast . Contravariant - > Env . set_tyvar_appears_contravariantly env v <nl> + | Ast . Invariant - > <nl> + let env = Env . set_tyvar_appears_covariantly env v in <nl> + Env . set_tyvar_appears_contravariantly env v <nl> + else env <nl> + | Tany | Tnonnull | Terr | Tdynamic | Tobject | Tprim _ | Tanon _ | Tabstract ( _ , None ) - > <nl> + env <nl> + ( * Nullable is covariant * ) <nl> + | Toption ty - > <nl> + set_tyvar_variance ~ variance ~ tyvars env ty <nl> + ( * Tuples and unions are covariant * ) <nl> + | Ttuple tyl | Tunresolved tyl - > <nl> + List . fold_left ~ f : ( set_tyvar_variance ~ variance ~ tyvars ) ~ init : env tyl <nl> + ( * Shape data is covariant * ) <nl> + | Tshape ( _ , m ) - > <nl> + Nast . ShapeMap . fold begin fun _ { sft_ty ; _ } env - > <nl> + set_tyvar_variance ~ variance ~ tyvars env sft_ty end m env <nl> + ( * Functions are covariant in return type , contravariant in parameter types * ) <nl> + | Tfun ft - > <nl> + let flipped = flip_variance variance in <nl> + let env = List . fold_left ~ f : begin fun env { fp_type ; _ } - > <nl> + set_tyvar_variance ~ variance : flipped ~ tyvars env fp_type end ~ init : env ft . ft_params in <nl> + set_tyvar_variance ~ variance ~ tyvars env ft . ft_ret <nl> + | Tabstract ( AKnewtype ( name , tyl ) , _ ) - > <nl> + begin match Env . get_typedef env name with <nl> + | Some { td_tparams ; _ } - > <nl> + let variancel = List . map td_tparams ( fun ( v , _ , _ , _ ) - > combine_variance variance v ) in <nl> + set_tyvar_variance_list ~ variancel ~ tyvars env tyl <nl> + | None - > <nl> + env <nl> + end <nl> + | Tabstract ( _ , Some ty ) - > <nl> + set_tyvar_variance ~ variance ~ tyvars env ty <nl> + ( * Classes carry their own variance declarations * ) <nl> + | Tclass ( ( _ , cid ) , tyl ) - > <nl> + begin match Env . get_class env cid with <nl> + | None - > env <nl> + | Some { tc_tparams ; _ } - > <nl> + let variancel = List . map tc_tparams ( fun ( v , _ , _ , _ ) - > combine_variance variance v ) in <nl> + set_tyvar_variance_list ~ variancel ~ tyvars env tyl <nl> + end <nl> + ( * Arrays are covariant in key and data types * ) <nl> + | Tarraykind ak - > <nl> + begin match ak with <nl> + | AKany | AKempty - > env <nl> + | AKvarray ty | AKvec ty | AKvarray_or_darray ty - > <nl> + set_tyvar_variance ~ variance ~ tyvars env ty <nl> + | AKdarray ( ty1 , ty2 ) | AKmap ( ty1 , ty2 ) - > <nl> + let env = set_tyvar_variance ~ variance ~ tyvars env ty1 in <nl> + set_tyvar_variance ~ variance ~ tyvars env ty2 <nl> + | AKshape m - > <nl> + Nast . ShapeMap . fold begin fun _ ( ty1 , ty2 ) env - > <nl> + let env = set_tyvar_variance ~ variance ~ tyvars env ty1 in <nl> + set_tyvar_variance ~ variance ~ tyvars env ty2 end m env <nl> + | AKtuple m - > <nl> + IMap . fold ( fun _ ty env - > set_tyvar_variance ~ variance ~ tyvars env ty ) m env <nl> + end <nl> + <nl> + and set_tyvar_variance_list ~ variancel ~ tyvars env tyl = <nl> + match variancel , tyl with <nl> + | [ ] , [ ] - > env <nl> + | variance : : variancel , ty : : tyl - > <nl> + let env = set_tyvar_variance ~ variance ~ tyvars env ty in <nl> + set_tyvar_variance_list ~ variancel ~ tyvars env tyl <nl> + | _ - > env <nl> + <nl> + let set_tyvar_variance ~ tyvars env ty = <nl> + set_tyvar_variance ~ variance : Ast . Covariant ~ tyvars env ty <nl> + <nl> ( * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ) <nl> ( * Exporting * ) <nl> ( * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ) <nl> mmm a / hphp / hack / src / typing / typing_subtype . mli <nl> ppp b / hphp / hack / src / typing / typing_subtype . mli <nl> val add_constraint : <nl> locl ty - > <nl> locl ty - > <nl> Env . env <nl> + <nl> + val set_tyvar_variance : <nl> + tyvars : ISet . t - > <nl> + Env . env - > <nl> + locl ty - > <nl> + Env . env <nl>
Add variance of type variables based on expression type
facebook/hhvm
9dbe19943dd2ba05180f09f6c6311c77627c3545
2018-11-15T08:26:32Z
mmm a / src / mongo / s / catalog / replset / replset_dist_lock_manager . cpp <nl> ppp b / src / mongo / s / catalog / replset / replset_dist_lock_manager . cpp <nl> StatusWith < bool > ReplSetDistLockManager : : canOvertakeLock ( LocksType lockDoc ) { <nl> pingValue = pingDoc . getPing ( ) ; <nl> } else if ( pingStatus . getStatus ( ) ! = ErrorCodes : : NoMatchingDocument ) { <nl> return pingStatus . getStatus ( ) ; <nl> - } / / else use default pingValue if ping document does not exist . <nl> + } / / else use default pingValue if ping document does not exist . <nl> <nl> Timer timer ( _serviceContext - > getTickSource ( ) ) ; <nl> auto serverInfoStatus = _catalog - > getServerInfo ( ) ; <nl>
SERVER - 19897 Fix clang format
mongodb/mongo
9237325ded582696d28e54a2c702b56b6e10f4dd
2015-09-01T21:08:03Z
mmm a / cocos2dx / cocoa / CCArray . cpp <nl> ppp b / cocos2dx / cocoa / CCArray . cpp <nl> CCArray : : ~ CCArray ( ) <nl> ccArrayFree ( data ) ; <nl> } <nl> <nl> - <nl> - <nl> CCObject * CCArray : : copyWithZone ( CCZone * pZone ) <nl> { <nl> CCAssert ( pZone = = NULL , " CCArray should not be inherited . " ) ; <nl> CCObject * CCArray : : copyWithZone ( CCZone * pZone ) <nl> return pArray ; <nl> } <nl> <nl> - / / # pragma mark CCArray - sorting <nl> - <nl> - / * * @ since 1 . 1 * / <nl> - / / # pragma mark - <nl> - / / # pragma mark CCArray insertionSortUsingCFuncComparator <nl> - <nl> - void CCArray : : insertionSortUsingCFuncComparator ( cc_comparator comparator ) <nl> - { <nl> - / / TODO : cc_insertionSort ( data , comparator ) ; <nl> - } <nl> - <nl> - / / # pragma mark CCArray qsortUsingCFuncComparator <nl> - <nl> - void CCArray : : qsortUsingCFuncComparator ( cc_comparator comparator ) <nl> - { <nl> - / / stable c qsort is used - cost of sorting : best n * log ( n ) , average n * log ( n ) <nl> - / / qsort ( void * , size_t , size_t , int ( * ) ( const void * arg1 , const void * arg2 ) ) ; <nl> - <nl> - qsort ( data - > arr , data - > num , sizeof ( CCObject * ) , comparator ) ; <nl> - } <nl> - <nl> - / / # pragma mark CCArray mergesortLUsingCFuncComparator <nl> - <nl> - void CCArray : : mergesortLUsingCFuncComparator ( cc_comparator comparator ) <nl> - { <nl> - / / TODO : cc_mergesortL ( data , sizeof ( CCObject * ) , comparator ) ; <nl> - } <nl> - <nl> - / / # pragma mark CCArray insertionSort with ( SEL ) selector <nl> - <nl> - void CCArray : : insertionSort ( SEL_Compare selector ) / / It sorts source array in ascending order <nl> - { <nl> - int i , j , length = data - > num ; <nl> - <nl> - CCObject * * x = data - > arr ; <nl> - CCObject * temp ; <nl> - <nl> - / / insertion sort <nl> - for ( i = 1 ; i < length ; i + + ) <nl> - { <nl> - j = i ; <nl> - / / continue moving element downwards while order is descending <nl> - while ( j > 0 & & ( x [ j - 1 ] - > * selector ) ( x [ j ] ) = = CCOrderedDescending ) <nl> - { <nl> - temp = x [ j ] ; <nl> - x [ j ] = x [ j - 1 ] ; <nl> - x [ j - 1 ] = temp ; <nl> - j - - ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - static inline int selectorCompare ( CCObject * object1 , CCObject * object2 , SEL_Compare selector ) <nl> - { <nl> - return ( int ) ( object1 - > * selector ) ( object2 ) ; <nl> - } <nl> - <nl> - void CCArray : : sortUsingSelector ( SEL_Compare selector ) <nl> - { <nl> - this - > sortUsingFunction ( selectorCompare , selector ) ; <nl> - } <nl> - <nl> - / / # pragma mark CCArray sortUsingFunction <nl> - <nl> - / / using a comparison function <nl> - void CCArray : : sortUsingFunction ( int ( * compare ) ( CCObject * , CCObject * , SEL_Compare ) , SEL_Compare context ) <nl> - { <nl> - int h , i , j , k , l , m , n = this - > count ( ) ; <nl> - CCObject * A , * * B = ( CCObject * * ) malloc ( ( n / 2 + 1 ) * sizeof ( CCObject * ) ) ; <nl> - <nl> - / / to prevent retain counts from temporarily hitting zero . <nl> - for ( i = 0 ; i < n ; i + + ) <nl> - { <nl> - / / [ [ self objectAtIndex : i ] retain ] ; / / prevents compiler warning <nl> - data - > arr [ i ] - > retain ( ) ; <nl> - } <nl> - <nl> - for ( h = 1 ; h < n ; h + = h ) <nl> - { <nl> - for ( m = n - 1 - h ; m > = 0 ; m - = h + h ) <nl> - { <nl> - l = m - h + 1 ; <nl> - if ( l < 0 ) <nl> - { <nl> - l = 0 ; <nl> - } <nl> - <nl> - for ( i = 0 , j = l ; j < = m ; i + + , j + + ) <nl> - { <nl> - B [ i ] = this - > objectAtIndex ( j ) ; <nl> - } <nl> - <nl> - for ( i = 0 , k = l ; k < j & & j < = m + h ; k + + ) <nl> - { <nl> - A = this - > objectAtIndex ( j ) ; <nl> - if ( compare ( A , B [ i ] , context ) = = CCOrderedDescending ) <nl> - { <nl> - this - > replaceObjectAtIndex ( k , B [ i + + ] ) ; <nl> - } <nl> - else <nl> - { <nl> - this - > replaceObjectAtIndex ( k , A ) ; <nl> - j + + ; <nl> - } <nl> - } <nl> - <nl> - while ( k < j ) <nl> - { <nl> - this - > replaceObjectAtIndex ( k + + , B [ i + + ] ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - for ( i = 0 ; i < n ; i + + ) <nl> - { <nl> - / / [ [ self objectAtIndex : i ] release ] ; / / prevents compiler warning <nl> - data - > arr [ i ] - > release ( ) ; <nl> - } <nl> - <nl> - free ( B ) ; <nl> - } <nl> - <nl> NS_CC_END <nl> mmm a / cocos2dx / cocoa / CCArray . h <nl> ppp b / cocos2dx / cocoa / CCArray . h <nl> class CC_DLL CCArray : public CCObject <nl> void reverseObjects ( ) ; <nl> / * Shrinks the array so the memory footprint corresponds with the number of items * / <nl> void reduceMemoryFootprint ( ) ; <nl> - <nl> - / / Sorting Array <nl> - / * * all since @ 1 . 1 * / <nl> - void qsortUsingCFuncComparator ( cc_comparator comparator ) ; / / c qsort is used for sorting <nl> - void insertionSortUsingCFuncComparator ( cc_comparator comparator ) ; / / insertion sort <nl> - void mergesortLUsingCFuncComparator ( cc_comparator comparator ) ; / / mergesort <nl> - void insertionSort ( SEL_Compare selector ) ; / / It sorts source array in ascending order <nl> - void sortUsingFunction ( int ( * compare ) ( CCObject * , CCObject * , SEL_Compare ) , SEL_Compare context ) ; <nl> - void sortUsingSelector ( SEL_Compare selector ) ; <nl> <nl> / * override functions * / <nl> virtual CCObject * copyWithZone ( CCZone * pZone ) ; <nl> mmm a / cocos2dx / extensions / CCBReader / CCBReader_v2 . cpp <nl> ppp b / cocos2dx / extensions / CCBReader / CCBReader_v2 . cpp <nl> void CCBReader : : setPropsForNode ( CCNode * node , CCDictionary * props , CCDictionary * <nl> node - > setScaleY ( floatValFromDict ( props , " scaleY " ) ) ; <nl> node - > setAnchorPoint ( pointValFromDict ( props , " anchorPoint " ) ) ; <nl> node - > setRotation ( floatValFromDict ( props , " rotation " ) ) ; <nl> - node - > setIsRelativeAnchorPoint ( boolValFromDict ( props , " isRelativeAnchorPoint " ) ) ; <nl> + node - > setIgnoreAnchorPointForPosition ( ! boolValFromDict ( props , " isRelativeAnchorPoint " ) ) ; <nl> node - > setIsVisible ( boolValFromDict ( props , " visible " ) ) ; <nl> <nl> if ( extraProps ) <nl> mmm a / cocos2dx / extensions / CCTextureWatcher / CCTextureWatcher . cpp <nl> ppp b / cocos2dx / extensions / CCTextureWatcher / CCTextureWatcher . cpp <nl> CCTextureWatcher : : CCTextureWatcher ( ) <nl> <nl> / / the menu of disabling touch event <nl> / / * <nl> - CCLabelTTF * label = CCLabelTTF : : labelWithString ( " " , size , kCCTextAlignmentLeft , kCCVerticalTextAlignmentTop , " Arial " , 12 ) ; <nl> + CCLabelTTF * label = CCLabelTTF : : labelWithString ( " " , size , kCCTextAlignmentLeft , " Arial " , 12 ) ; <nl> CCMenuItemLabel * menuItem = CCMenuItemLabel : : itemWithLabel ( label ) ; <nl> menuItem - > setAnchorPoint ( ccp ( 0 , 0 ) ) ; <nl> menuItem - > setPosition ( ccp ( 0 , 0 ) ) ; <nl> CCTextureWatcher : : CCTextureWatcher ( ) <nl> m_pLayer - > addChild ( menu1 ) ; <nl> <nl> / / label page <nl> - m_labelPage = CCLabelTTF : : labelWithString ( " " , CCSizeMake ( size . width * 0 . 1 , labelFresh - > getContentSize ( ) . height ) , kCCTextAlignmentCenter , kCCVerticalTextAlignmentTop , " Arial " , 16 ) ; <nl> + m_labelPage = CCLabelTTF : : labelWithString ( " " , CCSizeMake ( size . width * 0 . 1 , labelFresh - > getContentSize ( ) . height ) , kCCTextAlignmentCenter , " Arial " , 16 ) ; <nl> m_labelPage - > setAnchorPoint ( ccp ( 0 . 5 , 0 ) ) ; <nl> m_labelPage - > setPosition ( ccp ( size . width / 2 . 0 , 0 ) ) ; <nl> m_pLayer - > addChild ( m_labelPage , 0 ) ; <nl> void CCTextureWatcher : : CCListView_cellForRow ( CCListView * listView , CCListViewPro <nl> string name = key . substr ( pos , len - pos ) ; <nl> sprintf ( m_pszString , " % s " , name . c_str ( ) ) ; <nl> CCSize dimensions = CCSizeMake ( listItemSize . width * 0 . 9f , labelSize - > getContentSize ( ) . height ) ; <nl> - CCLabelTTF * labelName = CCLabelTTF : : labelWithString ( m_pszString , dimensions , kCCTextAlignmentCenter , kCCVerticalTextAlignmentTop , " Arial " , 16 ) ; <nl> + CCLabelTTF * labelName = CCLabelTTF : : labelWithString ( m_pszString , dimensions , kCCTextAlignmentCenter , " Arial " , 16 ) ; <nl> offX = offsetX + listItemSize . width * 0 . 5f ; <nl> offY = offY + labelName - > getContentSize ( ) . height ; <nl> labelName - > setPosition ( ccp ( offX , offY ) ) ; <nl> mmm a / cocos2dx / misc_nodes / CCRenderTexture . cpp <nl> ppp b / cocos2dx / misc_nodes / CCRenderTexture . cpp <nl> CCRenderTexture * CCRenderTexture : : renderTextureWithWidthAndHeight ( int w , int h ) <nl> return NULL ; <nl> } <nl> <nl> + bool CCRenderTexture : : initWithWidthAndHeight ( int w , int h , CCTexture2DPixelFormat eFormat ) <nl> + { <nl> + return initWithWidthAndHeight ( w , h , eFormat , 0 ) ; <nl> + } <nl> + <nl> bool CCRenderTexture : : initWithWidthAndHeight ( int w , int h , CCTexture2DPixelFormat eFormat , GLuint uDepthStencilFormat ) <nl> { <nl> CCAssert ( m_ePixelFormat ! = kCCTexture2DPixelFormat_A8 , " only RGB and RGBA formats are valid for a render texture " ) ; <nl> mmm a / cocos2dx / particle_nodes / CCParticleSystem . cpp <nl> ppp b / cocos2dx / particle_nodes / CCParticleSystem . cpp <nl> void CCParticleSystem : : setBlendFunc ( ccBlendFunc blendFunc ) <nl> } <nl> } <nl> <nl> + bool CCParticleSystem : : getOpacityModifyRGB ( ) <nl> + { <nl> + return m_bOpacityModifyRGB ; <nl> + } <nl> + <nl> + void CCParticleSystem : : setOpacityModifyRGB ( bool bOpacityModifyRGB ) <nl> + { <nl> + m_bOpacityModifyRGB = bOpacityModifyRGB ; <nl> + } <nl> + <nl> tCCPositionType CCParticleSystem : : getPositionType ( ) <nl> { <nl> return m_ePositionType ; <nl> mmm a / cocos2dx / proj . win32 / cocos2d - win32 . vcproj <nl> ppp b / cocos2dx / proj . win32 / cocos2d - win32 . vcproj <nl> <nl> RelativePath = " . . \ actions \ CCActionCamera . h " <nl> > <nl> < / File > <nl> + < File <nl> + RelativePath = " . . \ actions \ CCActionCatmullRom . cpp " <nl> + > <nl> + < / File > <nl> + < File <nl> + RelativePath = " . . \ actions \ CCActionCatmullRom . h " <nl> + > <nl> + < / File > <nl> < File <nl> RelativePath = " . . \ actions \ CCActionEase . cpp " <nl> > <nl> mmm a / cocos2dx / support / data_support / ccCArray . cpp <nl> ppp b / cocos2dx / support / data_support / ccCArray . cpp <nl> THE SOFTWARE . <nl> NS_CC_BEGIN <nl> <nl> / * * Allocates and initializes a new array with specified capacity * / <nl> - ccArray * ccArrayNew ( unsigned int capacity ) { <nl> + ccArray * ccArrayNew ( unsigned int capacity ) <nl> + { <nl> if ( capacity = = 0 ) <nl> capacity = 1 ; <nl> <nl> ccArray * arr = ( ccArray * ) malloc ( sizeof ( ccArray ) ) ; <nl> arr - > num = 0 ; <nl> - arr - > arr = ( CCARRAY_ID * ) calloc ( capacity , sizeof ( CCObject * ) ) ; <nl> + arr - > arr = ( CCObject * * ) calloc ( capacity , sizeof ( CCObject * ) ) ; <nl> arr - > max = capacity ; <nl> <nl> return arr ; <nl> ccArray * ccArrayNew ( unsigned int capacity ) { <nl> / * * Frees array after removing all remaining objects . Silently ignores NULL arr . * / <nl> void ccArrayFree ( ccArray * & arr ) <nl> { <nl> - if ( arr = = NULL ) return ; <nl> - <nl> + if ( arr = = NULL ) <nl> + { <nl> + return ; <nl> + } <nl> ccArrayRemoveAllObjects ( arr ) ; <nl> <nl> free ( arr - > arr ) ; <nl> void ccArrayFree ( ccArray * & arr ) <nl> void ccArrayDoubleCapacity ( ccArray * arr ) <nl> { <nl> arr - > max * = 2 ; <nl> - CCARRAY_ID * newArr = ( CCARRAY_ID * ) realloc ( arr - > arr , arr - > max * sizeof ( CCObject * ) ) ; <nl> + CCObject * * newArr = ( CCObject * * ) realloc ( arr - > arr , arr - > max * sizeof ( CCObject * ) ) ; <nl> / / will fail when there ' s not enough memory <nl> - CCAssert ( newArr ! = NULL , " ccArrayDoubleCapacity failed . Not enough memory " ) ; <nl> + CCAssert ( newArr ! = 0 , " ccArrayDoubleCapacity failed . Not enough memory " ) ; <nl> arr - > arr = newArr ; <nl> } <nl> <nl> void ccArrayEnsureExtraCapacity ( ccArray * arr , unsigned int extra ) <nl> { <nl> while ( arr - > max < arr - > num + extra ) <nl> + { <nl> ccArrayDoubleCapacity ( arr ) ; <nl> + } <nl> } <nl> <nl> void ccArrayShrink ( ccArray * arr ) <nl> { <nl> - unsigned int newSize ; <nl> + unsigned int newSize = 0 ; <nl> <nl> / / only resize when necessary <nl> if ( arr - > max > arr - > num & & ! ( arr - > num = = 0 & & arr - > max = = 1 ) ) <nl> void ccArrayShrink ( ccArray * arr ) <nl> arr - > max = 1 ; <nl> } <nl> <nl> - arr - > arr = ( CCARRAY_ID * ) realloc ( arr - > arr , newSize * sizeof ( CCObject * ) ) ; <nl> + arr - > arr = ( CCObject * * ) realloc ( arr - > arr , newSize * sizeof ( CCObject * ) ) ; <nl> CCAssert ( arr - > arr ! = NULL , " could not reallocate the memory " ) ; <nl> } <nl> } <nl> void ccArrayShrink ( ccArray * arr ) <nl> / * * Returns index of first occurence of object , CC_INVALID_INDEX if object not found . * / <nl> unsigned int ccArrayGetIndexOfObject ( ccArray * arr , CCObject * object ) <nl> { <nl> - unsigned int i ; <nl> - <nl> - for ( i = 0 ; i < arr - > num ; i + + ) <nl> + for ( unsigned int i = 0 ; i < arr - > num ; i + + ) <nl> + { <nl> if ( arr - > arr [ i ] = = object ) return i ; <nl> + } <nl> <nl> return CC_INVALID_INDEX ; <nl> } <nl> void ccArrayAppendObjectWithResize ( ccArray * arr , CCObject * object ) <nl> enough capacity . * / <nl> void ccArrayAppendArray ( ccArray * arr , ccArray * plusArr ) <nl> { <nl> - unsigned int i ; <nl> - <nl> - for ( i = 0 ; i < plusArr - > num ; i + + ) <nl> + for ( unsigned int i = 0 ; i < plusArr - > num ; i + + ) <nl> + { <nl> ccArrayAppendObject ( arr , plusArr - > arr [ i ] ) ; <nl> + } <nl> } <nl> <nl> / * * Appends objects from plusArr to arr . Capacity of arr is increased if needed . * / <nl> void ccArrayRemoveAllObjects ( ccArray * arr ) <nl> Behaviour undefined if index outside [ 0 , num - 1 ] . * / <nl> void ccArrayRemoveObjectAtIndex ( ccArray * arr , unsigned int index , bool bReleaseObj / * = true * / ) <nl> { <nl> + CCAssert ( arr & & arr - > num > 0 & & index < arr - > num , " Invalid index . Out of bounds " ) ; <nl> if ( bReleaseObj ) <nl> { <nl> CC_SAFE_RELEASE ( arr - > arr [ index ] ) ; <nl> void ccArrayFastRemoveObject ( ccArray * arr , CCObject * object ) <nl> { <nl> unsigned int index = ccArrayGetIndexOfObject ( arr , object ) ; <nl> if ( index ! = CC_INVALID_INDEX ) <nl> + { <nl> ccArrayFastRemoveObjectAtIndex ( arr , index ) ; <nl> + } <nl> } <nl> <nl> / * * Searches for the first occurance of object and removes it . If object is not <nl> void ccArrayRemoveObject ( ccArray * arr , CCObject * object , bool bReleaseObj / * = tr <nl> first matching instance in arr will be removed . * / <nl> void ccArrayRemoveArray ( ccArray * arr , ccArray * minusArr ) <nl> { <nl> - unsigned int i ; <nl> - <nl> - for ( i = 0 ; i < minusArr - > num ; i + + ) <nl> + for ( unsigned int i = 0 ; i < minusArr - > num ; i + + ) <nl> + { <nl> ccArrayRemoveObject ( arr , minusArr - > arr [ i ] ) ; <nl> + } <nl> } <nl> <nl> / * * Removes from arr all objects in minusArr . For each object in minusArr , all <nl> void ccArrayRemoveArray ( ccArray * arr , ccArray * minusArr ) <nl> void ccArrayFullRemoveArray ( ccArray * arr , ccArray * minusArr ) <nl> { <nl> unsigned int back = 0 ; <nl> - unsigned int i ; <nl> + unsigned int i = 0 ; <nl> <nl> - for ( i = 0 ; i < arr - > num ; i + + ) { <nl> - if ( ccArrayContainsObject ( minusArr , arr - > arr [ i ] ) ) { <nl> + for ( i = 0 ; i < arr - > num ; i + + ) <nl> + { <nl> + if ( ccArrayContainsObject ( minusArr , arr - > arr [ i ] ) ) <nl> + { <nl> CC_SAFE_RELEASE ( arr - > arr [ i ] ) ; <nl> back + + ; <nl> - } else <nl> + } <nl> + else <nl> + { <nl> arr - > arr [ i - back ] = arr - > arr [ i ] ; <nl> + } <nl> } <nl> <nl> arr - > num - = back ; <nl> void ccArrayFullRemoveArray ( ccArray * arr , ccArray * minusArr ) <nl> ccCArray * ccCArrayNew ( unsigned int capacity ) <nl> { <nl> if ( capacity = = 0 ) <nl> + { <nl> capacity = 1 ; <nl> - <nl> + } <nl> + <nl> ccCArray * arr = ( ccCArray * ) malloc ( sizeof ( ccCArray ) ) ; <nl> arr - > num = 0 ; <nl> arr - > arr = ( void * * ) malloc ( capacity * sizeof ( void * ) ) ; <nl> ccCArray * ccCArrayNew ( unsigned int capacity ) <nl> / * * Frees C array after removing all remaining values . Silently ignores NULL arr . * / <nl> void ccCArrayFree ( ccCArray * arr ) <nl> { <nl> - if ( arr = = NULL ) return ; <nl> - <nl> + if ( arr = = NULL ) <nl> + { <nl> + return ; <nl> + } <nl> ccCArrayRemoveAllValues ( arr ) ; <nl> <nl> free ( arr - > arr ) ; <nl> void ccCArrayFree ( ccCArray * arr ) <nl> / * * Doubles C array capacity * / <nl> void ccCArrayDoubleCapacity ( ccCArray * arr ) <nl> { <nl> - arr - > max * = 2 ; <nl> - void * * newArr = ( void * * ) realloc ( arr - > arr , arr - > max * sizeof ( void * ) ) ; <nl> - / / will fail when there ' s not enough memory <nl> - CCAssert ( newArr ! = NULL , " ccCArrayDoubleCapacity failed . Not enough memory " ) ; <nl> - arr - > arr = newArr ; <nl> + ccArrayDoubleCapacity ( ( ccArray * ) arr ) ; <nl> } <nl> <nl> / * * Increases array capacity such that max > = num + extra . * / <nl> void ccCArrayEnsureExtraCapacity ( ccCArray * arr , unsigned int extra ) <nl> { <nl> - while ( arr - > max < arr - > num + extra ) <nl> - ccCArrayDoubleCapacity ( arr ) ; <nl> + ccArrayEnsureExtraCapacity ( ( ccArray * ) arr , extra ) ; <nl> } <nl> <nl> / * * Returns index of first occurence of value , CC_INVALID_INDEX if value not found . * / <nl> void ccCArrayInsertValueAtIndex ( ccCArray * arr , void * value , unsigned int index ) <nl> CCAssert ( index < arr - > max , " ccCArrayInsertValueAtIndex : invalid index " ) ; <nl> <nl> unsigned int remaining = arr - > num - index ; <nl> - <nl> + / / make sure it has enough capacity <nl> + if ( arr - > num + 1 = = arr - > max ) <nl> + { <nl> + ccCArrayDoubleCapacity ( arr ) ; <nl> + } <nl> / / last Value doesn ' t need to be moved <nl> if ( remaining > 0 ) { <nl> / / tex coordinates <nl> void ccCArrayInsertValueAtIndex ( ccCArray * arr , void * value , unsigned int index ) <nl> / * * Appends an value . Bahaviour undefined if array doesn ' t have enough capacity . * / <nl> void ccCArrayAppendValue ( ccCArray * arr , void * value ) <nl> { <nl> - arr - > arr [ arr - > num ] = ( CCObject * ) value ; <nl> + arr - > arr [ arr - > num ] = value ; <nl> arr - > num + + ; <nl> + / / double the capacity for the next append action <nl> + / / if the num > = max <nl> + if ( arr - > num > = arr - > max ) <nl> + { <nl> + ccCArrayDoubleCapacity ( arr ) ; <nl> + } <nl> } <nl> <nl> / * * Appends an value . Capacity of arr is increased if needed . * / <nl> void ccCArrayRemoveValueAtIndex ( ccCArray * arr , unsigned int index ) <nl> unsigned int last ; <nl> <nl> for ( last = - - arr - > num ; index < last ; index + + ) <nl> + { <nl> arr - > arr [ index ] = arr - > arr [ index + 1 ] ; <nl> + } <nl> } <nl> <nl> / * * Removes value at specified index and fills the gap with the last value , <nl> void ccCArrayRemoveValue ( ccCArray * arr , void * value ) <nl> { <nl> unsigned int index = ccCArrayGetIndexOfValue ( arr , value ) ; <nl> if ( index ! = CC_INVALID_INDEX ) <nl> + { <nl> ccCArrayRemoveValueAtIndex ( arr , index ) ; <nl> + } <nl> } <nl> <nl> / * * Removes from arr all values in minusArr . For each Value in minusArr , the first matching instance in arr will be removed . <nl> void ccCArrayRemoveValue ( ccCArray * arr , void * value ) <nl> * / <nl> void ccCArrayRemoveArray ( ccCArray * arr , ccCArray * minusArr ) <nl> { <nl> - unsigned int i ; <nl> - <nl> - for ( i = 0 ; i < minusArr - > num ; i + + ) <nl> + for ( unsigned int i = 0 ; i < minusArr - > num ; i + + ) <nl> + { <nl> ccCArrayRemoveValue ( arr , minusArr - > arr [ i ] ) ; <nl> + } <nl> } <nl> <nl> / * * Removes from arr all values in minusArr . For each value in minusArr , all matching instances in arr will be removed . <nl> void ccCArrayRemoveArray ( ccCArray * arr , ccCArray * minusArr ) <nl> * / <nl> void ccCArrayFullRemoveArray ( ccCArray * arr , ccCArray * minusArr ) <nl> { <nl> - unsigned int i ; <nl> unsigned int back = 0 ; <nl> <nl> - for ( i = 0 ; i < arr - > num ; i + + ) { <nl> - if ( ccCArrayContainsValue ( minusArr , arr - > arr [ i ] ) ) { <nl> + for ( unsigned int i = 0 ; i < arr - > num ; i + + ) <nl> + { <nl> + if ( ccCArrayContainsValue ( minusArr , arr - > arr [ i ] ) ) <nl> + { <nl> back + + ; <nl> - } else <nl> + } <nl> + else <nl> + { <nl> arr - > arr [ i - back ] = arr - > arr [ i ] ; <nl> + } <nl> } <nl> <nl> arr - > num - = back ; <nl> } <nl> <nl> - / / used by mergesortL <nl> - void cc_pointerswap ( void * a , void * b , size_t width ) <nl> - { <nl> - void * tmp ; <nl> - tmp = * ( void * * ) a ; <nl> - * ( void * * ) a = * ( void * * ) b ; <nl> - * ( void * * ) b = tmp ; <nl> - } <nl> - <nl> - / / iterative mergesort arrd on <nl> - / / http : / / www . inf . fh - flensburg . de / lang / algorithmen / sortieren / merge / mergiter . htm <nl> - int cc_mergesortL ( ccCArray * array , size_t width , cc_comparator comparator ) <nl> - { <nl> - / / void * * arr = array - > arr ; <nl> - / / int i , j , k , s , m , n = array - > num ; <nl> - / / <nl> - / / void * B = malloc ( ( n / 2 + 1 ) * width ) ; <nl> - / / for ( s = 1 ; s < n ; s + = s ) <nl> - / / { <nl> - / / for ( m = n - 1 - s ; m > = 0 ; m - = s + s ) <nl> - / / { <nl> - / / int lo = MAX ( m - ( s + 1 ) , 0 ) ; <nl> - / / int hi = m + s ; <nl> - / / <nl> - / / j = lo ; <nl> - / / <nl> - / / if ( m - j > 0 ) <nl> - / / { <nl> - / / / / triggers a warning when compiled with ARC , B needs to be strong typed , for compiling for obj - c + + <nl> - / / / / memcpy aritmetics aren ' t allowed on void * types <nl> - / / / / explicitely casting didn ' t work <nl> - / / / / # pragma clang diagnostic push <nl> - / / / / # if defined ( __has_feature ) & & __has_feature ( objc_arc ) <nl> - / / / / # pragma clang diagnostic ignored " - Warc - non - pod - memaccess " <nl> - / / / / # endif <nl> - / / <nl> - / / memcpy ( B , & arr [ j ] , ( m - j ) * width ) ; <nl> - / / / / # pragma clang diagnostic pop <nl> - / / } <nl> - / / <nl> - / / i = 0 ; <nl> - / / j = m ; <nl> - / / k = lo ; <nl> - / / <nl> - / / while ( k < j & & j < = hi ) <nl> - / / { <nl> - / / if ( comparator ( & B [ i ] , & arr [ j ] ) < = 0 ) <nl> - / / { <nl> - / / cc_pointerswap ( & arr [ k + + ] , & B [ i + + ] , width ) ; <nl> - / / } <nl> - / / <nl> - / / else <nl> - / / { <nl> - / / cc_pointerswap ( & arr [ k + + ] , & arr [ j + + ] , width ) ; <nl> - / / } <nl> - / / } <nl> - / / <nl> - / / while ( k < j ) <nl> - / / cc_pointerswap ( & arr [ k + + ] , & B [ i + + ] , width ) ; <nl> - / / } <nl> - / / } <nl> - / / free ( B ) ; <nl> - return 0 ; <nl> - } <nl> - <nl> - void cc_insertionSort ( ccCArray * arr , cc_comparator comparator ) <nl> - { <nl> - / / It sorts source array in ascending order <nl> - <nl> - / / adaptive - performance adapts to the initial order of elements <nl> - / / stable - insertion sort retains relative order of the same elements <nl> - / / in - place - requires constant amount of additional space <nl> - / / online - new elements can be added during the sort <nl> - <nl> - int i , j , length = arr - > num ; <nl> - <nl> - void * * x = arr - > arr ; <nl> - void * temp ; <nl> - <nl> - / / insertion sort <nl> - for ( i = 1 ; i < length ; i + + ) <nl> - { <nl> - j = i ; <nl> - / / continue moving element downwards while order is descending <nl> - while ( j > 0 & & ( comparator ( & x [ j - 1 ] , & x [ j ] ) = = CCOrderedDescending ) ) <nl> - { <nl> - temp = x [ j ] ; <nl> - x [ j ] = x [ j - 1 ] ; <nl> - x [ j - 1 ] = temp ; <nl> - j - - ; <nl> - } <nl> - } <nl> - } <nl> - <nl> NS_CC_END <nl> mmm a / cocos2dx / support / data_support / ccCArray . h <nl> ppp b / cocos2dx / support / data_support / ccCArray . h <nl> NS_CC_BEGIN <nl> <nl> # define CC_INVALID_INDEX 0xffffffff <nl> <nl> - <nl> - / / # pragma mark - <nl> - / / # pragma mark ccArray for Objects <nl> - <nl> / / Easy integration <nl> # define CCARRAYDATA_FOREACH ( __array__ , __object__ ) \ <nl> __object__ = __array__ - > arr [ 0 ] ; for ( unsigned int i = 0 , num = __array__ - > num ; i < num ; i + + , __object__ = __array__ - > arr [ i ] ) \ <nl> <nl> - / / # if defined ( __has_feature ) & & __has_feature ( objc_arc ) <nl> - / / typedef __strong CCObject * CCARRAY_ID ; <nl> - / / # else <nl> - typedef CCObject * CCARRAY_ID ; <nl> - / / # endif <nl> <nl> typedef struct _ccArray { <nl> unsigned int num , max ; <nl> - CCARRAY_ID * arr ; <nl> + CCObject * * arr ; <nl> } ccArray ; <nl> <nl> - enum CCComparisonResult <nl> - { <nl> - CCOrderedDescending = 1 , <nl> - CCOrderedAscending = - 1 , <nl> - CCOrderedSame = 0 <nl> - } ; <nl> - <nl> - typedef int ( * cc_comparator ) ( const void * , const void * ) ; <nl> - <nl> / * * Allocates and initializes a new array with specified capacity * / <nl> ccArray * ccArrayNew ( unsigned int capacity ) ; <nl> <nl> void ccArrayFullRemoveArray ( ccArray * arr , ccArray * minusArr ) ; <nl> <nl> typedef struct _ccCArray { <nl> unsigned int num , max ; <nl> - void * * arr ; <nl> + void * * arr ; <nl> } ccCArray ; <nl> <nl> / * * Allocates and initializes a new C array with specified capacity * / <nl> void ccCArrayRemoveArray ( ccCArray * arr , ccCArray * minusArr ) ; <nl> * / <nl> void ccCArrayFullRemoveArray ( ccCArray * arr , ccCArray * minusArr ) ; <nl> <nl> - / / iterative mergesort arrd on <nl> - / / http : / / www . inf . fh - flensburg . de / lang / algorithmen / sortieren / merge / mergiter . htm <nl> - int cc_mergesortL ( ccCArray * array , size_t width , cc_comparator comparator ) ; <nl> - <nl> - void cc_insertionSort ( ccCArray * arr , cc_comparator comparator ) ; <nl> - <nl> - void cc_pointerswap ( void * a , void * b , size_t width ) ; <nl> - <nl> NS_CC_END <nl> <nl> # endif / / CC_ARRAY_H <nl> mmm a / cocos2dx / textures / CCTexture2D . h <nl> ppp b / cocos2dx / textures / CCTexture2D . h <nl> class CC_DLL CCTexture2D : public CCObject <nl> / * * whether or not the texture has their Alpha premultiplied * / <nl> CC_PROPERTY_READONLY ( bool , m_bHasPremultipliedAlpha , HasPremultipliedAlpha ) ; <nl> <nl> - CC_PROPERTY_READONLY ( bool , m_bHasMipmaps , HasMipmaps ) ; <nl> + CC_SYNTHESIZE_READONLY ( bool , m_bHasMipmaps , HasMipmaps ) ; <nl> <nl> / * * shader program used by drawAtPoint and drawInRect * / <nl> CC_PROPERTY ( CCGLProgram * , m_pShaderProgram , ShaderProgram ) ; <nl> mmm a / cocos2dx / textures / CCTexturePVR . h <nl> ppp b / cocos2dx / textures / CCTexturePVR . h <nl> class CCTexturePVR : public CCObject <nl> How many mipmaps do we have . It must be at least one <nl> when proper initialization finishes <nl> * / <nl> - CC_PROPERTY_READONLY ( unsigned int , m_uNumberOfMipmaps , NumberOfMipmaps ) ; <nl> + CC_SYNTHESIZE_READONLY ( unsigned int , m_uNumberOfMipmaps , NumberOfMipmaps ) ; <nl> <nl> / * <nl> Makrs for mipmaps . Each entry contains position in file <nl>
Merge pull request from dumganhar / gles20
cocos2d/cocos2d-x
36e393a5f4fbf1d6dd515cd506cb93bf80cb5573
2012-06-08T09:20:27Z
mmm a / db / merge_test . cc <nl> ppp b / db / merge_test . cc <nl> class Counters { <nl> void assert_add ( const string & key , uint64_t value ) { <nl> int result = add ( key , value ) ; <nl> assert ( result ) ; <nl> - if ( result = = 0 ) exit ( 1 ) ; / / Disable unused variable warning . <nl> + if ( result = = 0 ) exit ( 1 ) ; / / Disable unused variable warning . <nl> } <nl> } ; <nl> <nl> void runTest ( int argc , const string & dbname , const bool use_ttl = false ) { <nl> DestroyDB ( dbname , Options ( ) ) ; <nl> } <nl> <nl> + / * Temporary remove this test <nl> { <nl> cout < < " Test merge - operator not set after reopen ( recovery case ) \ n " ; <nl> { <nl> void runTest ( int argc , const string & dbname , const bool use_ttl = false ) { <nl> DB * reopen_db ; <nl> ASSERT_TRUE ( DB : : Open ( Options ( ) , dbname , & reopen_db ) . IsInvalidArgument ( ) ) ; <nl> } <nl> + * / <nl> } <nl> } / / namespace <nl> <nl>
Temporary remove the last test in merge_test
facebook/rocksdb
2105ecac4d8f74fdf2e0b7514ed04269c9f4c176
2014-07-31T18:20:49Z
mmm a / Doxygen / avocado . doxy . in <nl> ppp b / Doxygen / avocado . doxy . in <nl> INPUT = \ <nl> @ srcdir @ / RestServer \ <nl> @ srcdir @ / ShapedJson \ <nl> @ srcdir @ / V8 \ <nl> - @ srcdir @ / VocBase <nl> + @ srcdir @ / VocBase \ <nl> + @ srcdir @ / QL <nl> <nl> # This tag can be used to specify the character encoding of the source files <nl> # that doxygen parses . Internally doxygen uses the UTF - 8 encoding , which is <nl> mmm a / RestServer / user - manual - server . dox <nl> ppp b / RestServer / user - manual - server . dox <nl> <nl> / / / < / li > <nl> / / / < li > @ ref AvocadoScript <nl> / / / < ol > <nl> - / / / < li > @ ref GeoCoordinates <nl> + / / / < li > GeoCoordinates <nl> / / / < / li > <nl> / / / < / ol > <nl> / / / < / li > <nl> mmm a / configure <nl> ppp b / configure <nl> fi <nl> if test " $ { enable_relative + set } " = set ; then : <nl> enableval = $ enable_relative ; tr_RELATIVE = " $ { enableval : - devel } " <nl> else <nl> - tr_RELATIVE = devel <nl> + tr_RELATIVE = no <nl> <nl> fi <nl> <nl>
fixed documentation
arangodb/arangodb
cc2ea602850204edd0c9be93864139e1542e5381
2012-03-21T10:29:25Z
mmm a / arangod / Aql / ExecutionNode . h <nl> ppp b / arangod / Aql / ExecutionNode . h <nl> namespace triagens { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> VarOverview const * getVarOverview ( ) const { <nl> + TRI_ASSERT ( _varOverview ! = nullptr ) ; <nl> return _varOverview . get ( ) ; <nl> } <nl> <nl>
Assert whether we have a varoverview .
arangodb/arangodb
8f74eeff9709be21302860aa58ac627f16cbae79
2014-10-06T11:20:31Z
mmm a / docs / tutorial / interfaces . md <nl> ppp b / docs / tutorial / interfaces . md <nl> Use CPU : <nl> <nl> Use GPU and specify its gpu_id : <nl> <nl> - caffe . set_mode_gpu ( ) ; <nl> caffe . set_device ( gpu_id ) ; <nl> + caffe . set_mode_gpu ( ) ; <nl> <nl> # # # # Create a network and access its layers and blobs <nl> <nl> mmm a / python / train . py <nl> ppp b / python / train . py <nl> def show_time ( ) : <nl> <nl> <nl> def solve ( proto , snapshot , gpus , timing , uid , rank ) : <nl> - caffe . set_mode_gpu ( ) <nl> caffe . set_device ( gpus [ rank ] ) <nl> + caffe . set_mode_gpu ( ) <nl> caffe . set_solver_count ( len ( gpus ) ) <nl> caffe . set_solver_rank ( rank ) <nl> caffe . set_multiprocess ( True ) <nl>
Merge pull request from xerus / python_gpu
BVLC/caffe
99bd99795dcdf0b1d3086a8d67ab1782a8a08383
2018-08-21T12:53:16Z
mmm a / test / DebugInfo / DumpDeclFromMangledName . swift <nl> ppp b / test / DebugInfo / DumpDeclFromMangledName . swift <nl> <nl> / / RUN : sed - ne ' / mmm > / s / ^ . * mmm > * / / p ' < % S / Inputs / decl - reconstr - names . txt > % t . check <nl> <nl> / / RUN : % target - build - swift - emit - executable % s - g - o % t / DeclReconstr - emit - module <nl> - / / RUN : % lldb - moduleimport - test % t / DeclReconstr \ <nl> + / / RUN : % lldb - moduleimport - test % t / DeclReconstr - target - triple % target - triple \ <nl> / / RUN : - decl - from - mangled = % t . input > % t . output 2 > & 1 <nl> / / RUN : diff % t . check % t . output <nl> <nl> mmm a / tools / lldb - moduleimport - test / lldb - moduleimport - test . cpp <nl> ppp b / tools / lldb - moduleimport - test / lldb - moduleimport - test . cpp <nl> int main ( int argc , char * * argv ) { <nl> llvm : : cl : : opt < std : : string > DumpTypeFromMangled ( <nl> " type - from - mangled " , llvm : : cl : : desc ( " dump type from mangled names list " ) ) ; <nl> <nl> + / / FIXME : we should infer this from the module . <nl> + llvm : : cl : : opt < std : : string > TargetTriple ( <nl> + " target - triple " , llvm : : cl : : desc ( " specify target triple " ) ) ; <nl> + <nl> llvm : : cl : : ParseCommandLineOptions ( argc , argv ) ; <nl> / / Unregister our options so they don ' t interfere with the command line <nl> / / parsing in CodeGen / BackendUtil . cpp . <nl> int main ( int argc , char * * argv ) { <nl> DumpTypeFromMangled . removeArgument ( ) ; <nl> SDK . removeArgument ( ) ; <nl> InputNames . removeArgument ( ) ; <nl> + TargetTriple . removeArgument ( ) ; <nl> <nl> / / If no SDK was specified via - sdk , check environment variable SDKROOT . <nl> if ( SDK . getNumOccurrences ( ) = = 0 ) { <nl> int main ( int argc , char * * argv ) { <nl> reinterpret_cast < void * > ( & anchorForGetMainExecutable ) ) ) ; <nl> <nl> Invocation . setSDKPath ( SDK ) ; <nl> - Invocation . setTargetTriple ( llvm : : sys : : getDefaultTargetTriple ( ) ) ; <nl> + <nl> + / / FIXME : we should infer this from the module . <nl> + if ( ! TargetTriple . empty ( ) ) <nl> + Invocation . setTargetTriple ( TargetTriple ) ; <nl> + else <nl> + Invocation . setTargetTriple ( llvm : : sys : : getDefaultTargetTriple ( ) ) ; <nl> + <nl> Invocation . setModuleName ( " lldbtest " ) ; <nl> Invocation . getClangImporterOptions ( ) . ModuleCachePath = ModuleCachePath ; <nl> Invocation . setImportSearchPaths ( ImportPaths ) ; <nl>
[ lldb - moduleimport - test ] Pass a target triple as a stopgap solution .
apple/swift
5e463b1bc040c766193779a85403bfce8d34d2df
2018-03-23T20:13:26Z
mmm a / lib / Serialization / Serialization . cpp <nl> ppp b / lib / Serialization / Serialization . cpp <nl> <nl> # include " swift / AST / ProtocolConformance . h " <nl> # include " swift / AST / RawComment . h " <nl> # include " swift / AST / TypeCheckRequests . h " <nl> + # include " swift / AST / TypeVisitor . h " <nl> # include " swift / Basic / Dwarf . h " <nl> # include " swift / Basic / FileSystem . h " <nl> # include " swift / Basic / STLExtras . h " <nl> static uint8_t getRawStableDefaultArgumentKind ( swift : : DefaultArgumentKind kind ) <nl> llvm_unreachable ( " Unhandled DefaultArgumentKind in switch . " ) ; <nl> } <nl> <nl> - static uint8_t getRawStableMetatypeRepresentation ( AnyMetatypeType * metatype ) { <nl> + static uint8_t <nl> + getRawStableMetatypeRepresentation ( const AnyMetatypeType * metatype ) { <nl> if ( ! metatype - > hasRepresentation ( ) ) { <nl> return serialization : : MetatypeRepresentation : : MR_None ; <nl> } <nl> static TypeAliasDecl * findTypeAliasForBuiltin ( ASTContext & Ctx , Type T ) { <nl> return cast < TypeAliasDecl > ( CurModuleResults [ 0 ] ) ; <nl> } <nl> <nl> - void Serializer : : writeType ( Type ty ) { <nl> - using namespace decls_block ; <nl> - PrettyStackTraceType traceRAII ( ty - > getASTContext ( ) , " serializing " , ty ) ; <nl> + class Serializer : : TypeSerializer : public TypeVisitor < TypeSerializer > { <nl> + Serializer & S ; <nl> <nl> - auto id = DeclAndTypeIDs [ ty ] ; <nl> - assert ( id ! = 0 & & " type not referenced properly " ) ; <nl> - ( void ) id ; <nl> + public : <nl> + explicit TypeSerializer ( Serializer & S ) : S ( S ) { } <nl> <nl> - assert ( ( id - 1 ) = = TypeOffsets . size ( ) ) ; <nl> - assert ( ( DeclOffsets . empty ( ) | | DeclOffsets . back ( ) ! = Out . GetCurrentBitNo ( ) ) & & <nl> - " encoding Decl and Type to the same offset " ) ; <nl> - TypeOffsets . push_back ( Out . GetCurrentBitNo ( ) ) ; <nl> - SWIFT_DEFER { <nl> - / / This is important enough to leave on in Release builds . <nl> - if ( TypeOffsets . back ( ) = = Out . GetCurrentBitNo ( ) ) { <nl> - llvm : : PrettyStackTraceString message ( " failed to serialize anything " ) ; <nl> - abort ( ) ; <nl> - } <nl> - } ; <nl> + / / / If this gets referenced , we forgot to handle a type . <nl> + void visitType ( const TypeBase * ) = delete ; <nl> <nl> - switch ( ty - > getKind ( ) ) { <nl> - case TypeKind : : Error : <nl> - case TypeKind : : Unresolved : <nl> + void visitErrorType ( const ErrorType * ) { <nl> llvm_unreachable ( " should not serialize an invalid type " ) ; <nl> + } <nl> + <nl> + void visitUnresolvedType ( const UnresolvedType * ) { <nl> + llvm_unreachable ( " should not serialize an invalid type " ) ; <nl> + } <nl> <nl> - case TypeKind : : BuiltinInteger : <nl> - case TypeKind : : BuiltinIntegerLiteral : <nl> - case TypeKind : : BuiltinFloat : <nl> - case TypeKind : : BuiltinRawPointer : <nl> - case TypeKind : : BuiltinNativeObject : <nl> - case TypeKind : : BuiltinBridgeObject : <nl> - case TypeKind : : BuiltinUnknownObject : <nl> - case TypeKind : : BuiltinUnsafeValueBuffer : <nl> - case TypeKind : : BuiltinVector : <nl> - case TypeKind : : SILToken : { <nl> + void visitModuleType ( const ModuleType * ) { <nl> + llvm_unreachable ( " modules are currently not first - class values " ) ; <nl> + } <nl> + <nl> + void visitInOutType ( const InOutType * ) { <nl> + llvm_unreachable ( " inout types are only used in function type parameters " ) ; <nl> + } <nl> + <nl> + void visitLValueType ( const LValueType * ) { <nl> + llvm_unreachable ( " lvalue types are only used in function bodies " ) ; <nl> + } <nl> + <nl> + void visitTypeVariableType ( const TypeVariableType * ) { <nl> + llvm_unreachable ( " type variables should not escape the type checker " ) ; <nl> + } <nl> + <nl> + void visitBuiltinTypeImpl ( Type ty ) { <nl> + using namespace decls_block ; <nl> TypeAliasDecl * typeAlias = <nl> - findTypeAliasForBuiltin ( M - > getASTContext ( ) , ty ) ; <nl> + findTypeAliasForBuiltin ( S . M - > getASTContext ( ) , ty ) ; <nl> <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ BuiltinAliasTypeLayout : : Code ] ; <nl> - BuiltinAliasTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - addDeclRef ( typeAlias , <nl> - / * allowTypeAliasXRef * / true ) , <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ BuiltinAliasTypeLayout : : Code ] ; <nl> + BuiltinAliasTypeLayout : : emitRecord ( S . Out , S . ScratchRecord , abbrCode , <nl> + S . addDeclRef ( typeAlias , <nl> + / * allowTypeAliasXRef * / true ) , <nl> TypeID ( ) ) ; <nl> - break ; <nl> } <nl> - case TypeKind : : TypeAlias : { <nl> - auto alias = cast < TypeAliasType > ( ty . getPointer ( ) ) ; <nl> + <nl> + void visitBuiltinType ( BuiltinType * ty ) { <nl> + visitBuiltinTypeImpl ( ty ) ; <nl> + } <nl> + <nl> + void visitSILTokenType ( SILTokenType * ty ) { <nl> + / / This is serialized like a BuiltinType , even though it isn ' t one . <nl> + visitBuiltinTypeImpl ( ty ) ; <nl> + } <nl> + <nl> + void visitTypeAliasType ( const TypeAliasType * alias ) { <nl> + using namespace decls_block ; <nl> const TypeAliasDecl * typeAlias = alias - > getDecl ( ) ; <nl> auto underlyingType = typeAlias - > getUnderlyingTypeLoc ( ) . getType ( ) ; <nl> <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ TypeAliasTypeLayout : : Code ] ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ TypeAliasTypeLayout : : Code ] ; <nl> TypeAliasTypeLayout : : emitRecord ( <nl> - Out , ScratchRecord , abbrCode , <nl> - addDeclRef ( typeAlias , / * allowTypeAliasXRef * / true ) , <nl> - addTypeRef ( alias - > getParent ( ) ) , <nl> - addTypeRef ( underlyingType ) , <nl> - addTypeRef ( alias - > getSinglyDesugaredType ( ) ) , <nl> - addSubstitutionMapRef ( alias - > getSubstitutionMap ( ) ) ) ; <nl> - break ; <nl> + S . Out , S . ScratchRecord , abbrCode , <nl> + S . addDeclRef ( typeAlias , / * allowTypeAliasXRef * / true ) , <nl> + S . addTypeRef ( alias - > getParent ( ) ) , <nl> + S . addTypeRef ( underlyingType ) , <nl> + S . addTypeRef ( alias - > getSinglyDesugaredType ( ) ) , <nl> + S . addSubstitutionMapRef ( alias - > getSubstitutionMap ( ) ) ) ; <nl> } <nl> <nl> - case TypeKind : : Paren : { <nl> - auto parenTy = cast < ParenType > ( ty . getPointer ( ) ) ; <nl> + void visitParenType ( const ParenType * parenTy ) { <nl> + using namespace decls_block ; <nl> assert ( parenTy - > getParameterFlags ( ) . isNone ( ) ) ; <nl> <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ ParenTypeLayout : : Code ] ; <nl> - ParenTypeLayout : : emitRecord ( <nl> - Out , ScratchRecord , abbrCode , addTypeRef ( parenTy - > getUnderlyingType ( ) ) ) ; <nl> - break ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ ParenTypeLayout : : Code ] ; <nl> + ParenTypeLayout : : emitRecord ( S . Out , S . ScratchRecord , abbrCode , <nl> + S . addTypeRef ( parenTy - > getUnderlyingType ( ) ) ) ; <nl> } <nl> <nl> - case TypeKind : : Tuple : { <nl> - auto tupleTy = cast < TupleType > ( ty . getPointer ( ) ) ; <nl> - <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ TupleTypeLayout : : Code ] ; <nl> - TupleTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode ) ; <nl> + void visitTupleType ( const TupleType * tupleTy ) { <nl> + using namespace decls_block ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ TupleTypeLayout : : Code ] ; <nl> + TupleTypeLayout : : emitRecord ( S . Out , S . ScratchRecord , abbrCode ) ; <nl> <nl> - abbrCode = DeclTypeAbbrCodes [ TupleTypeEltLayout : : Code ] ; <nl> + abbrCode = S . DeclTypeAbbrCodes [ TupleTypeEltLayout : : Code ] ; <nl> for ( auto & elt : tupleTy - > getElements ( ) ) { <nl> assert ( elt . getParameterFlags ( ) . isNone ( ) ) ; <nl> TupleTypeEltLayout : : emitRecord ( <nl> - Out , ScratchRecord , abbrCode , <nl> - addDeclBaseNameRef ( elt . getName ( ) ) , <nl> - addTypeRef ( elt . getType ( ) ) ) ; <nl> + S . Out , S . ScratchRecord , abbrCode , <nl> + S . addDeclBaseNameRef ( elt . getName ( ) ) , <nl> + S . addTypeRef ( elt . getType ( ) ) ) ; <nl> } <nl> - <nl> - break ; <nl> } <nl> <nl> - case TypeKind : : Struct : <nl> - case TypeKind : : Enum : <nl> - case TypeKind : : Class : <nl> - case TypeKind : : Protocol : { <nl> - auto nominalTy = cast < NominalType > ( ty . getPointer ( ) ) ; <nl> - <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ NominalTypeLayout : : Code ] ; <nl> - NominalTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - addDeclRef ( nominalTy - > getDecl ( ) ) , <nl> - addTypeRef ( nominalTy - > getParent ( ) ) ) ; <nl> - break ; <nl> + void visitNominalType ( const NominalType * nominalTy ) { <nl> + using namespace decls_block ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ NominalTypeLayout : : Code ] ; <nl> + NominalTypeLayout : : emitRecord ( S . Out , S . ScratchRecord , abbrCode , <nl> + S . addDeclRef ( nominalTy - > getDecl ( ) ) , <nl> + S . addTypeRef ( nominalTy - > getParent ( ) ) ) ; <nl> } <nl> <nl> - case TypeKind : : ExistentialMetatype : { <nl> - auto metatypeTy = cast < ExistentialMetatypeType > ( ty . getPointer ( ) ) ; <nl> - <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ ExistentialMetatypeTypeLayout : : Code ] ; <nl> + void visitExistentialMetatypeType ( const ExistentialMetatypeType * metatypeTy ) { <nl> + using namespace decls_block ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ ExistentialMetatypeTypeLayout : : Code ] ; <nl> <nl> / / Map the metatype representation . <nl> auto repr = getRawStableMetatypeRepresentation ( metatypeTy ) ; <nl> - ExistentialMetatypeTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - addTypeRef ( metatypeTy - > getInstanceType ( ) ) , <nl> - static_cast < uint8_t > ( repr ) ) ; <nl> - break ; <nl> + ExistentialMetatypeTypeLayout : : emitRecord ( <nl> + S . Out , S . ScratchRecord , abbrCode , <nl> + S . addTypeRef ( metatypeTy - > getInstanceType ( ) ) , <nl> + static_cast < uint8_t > ( repr ) ) ; <nl> } <nl> <nl> - case TypeKind : : Metatype : { <nl> - auto metatypeTy = cast < MetatypeType > ( ty . getPointer ( ) ) ; <nl> - <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ MetatypeTypeLayout : : Code ] ; <nl> + void visitMetatypeType ( const MetatypeType * metatypeTy ) { <nl> + using namespace decls_block ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ MetatypeTypeLayout : : Code ] ; <nl> <nl> / / Map the metatype representation . <nl> auto repr = getRawStableMetatypeRepresentation ( metatypeTy ) ; <nl> - MetatypeTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - addTypeRef ( metatypeTy - > getInstanceType ( ) ) , <nl> + MetatypeTypeLayout : : emitRecord ( S . Out , S . ScratchRecord , abbrCode , <nl> + S . addTypeRef ( metatypeTy - > getInstanceType ( ) ) , <nl> static_cast < uint8_t > ( repr ) ) ; <nl> - break ; <nl> } <nl> <nl> - case TypeKind : : Module : <nl> - llvm_unreachable ( " modules are currently not first - class values " ) ; <nl> - <nl> - case TypeKind : : DynamicSelf : { <nl> - auto dynamicSelfTy = cast < DynamicSelfType > ( ty . getPointer ( ) ) ; <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ DynamicSelfTypeLayout : : Code ] ; <nl> - DynamicSelfTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - addTypeRef ( dynamicSelfTy - > getSelfType ( ) ) ) ; <nl> - break ; <nl> + void visitDynamicSelfType ( const DynamicSelfType * dynamicSelfTy ) { <nl> + using namespace decls_block ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ DynamicSelfTypeLayout : : Code ] ; <nl> + DynamicSelfTypeLayout : : emitRecord ( <nl> + S . Out , S . ScratchRecord , abbrCode , <nl> + S . addTypeRef ( dynamicSelfTy - > getSelfType ( ) ) ) ; <nl> } <nl> - <nl> - case TypeKind : : PrimaryArchetype : { <nl> - auto archetypeTy = cast < PrimaryArchetypeType > ( ty . getPointer ( ) ) ; <nl> + <nl> + void visitPrimaryArchetypeType ( const PrimaryArchetypeType * archetypeTy ) { <nl> + using namespace decls_block ; <nl> auto env = archetypeTy - > getGenericEnvironment ( ) ; <nl> <nl> - GenericEnvironmentID envID = addGenericEnvironmentRef ( env ) ; <nl> + GenericEnvironmentID envID = S . addGenericEnvironmentRef ( env ) ; <nl> auto interfaceType = archetypeTy - > getInterfaceType ( ) <nl> - > castTo < GenericTypeParamType > ( ) ; <nl> <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ PrimaryArchetypeTypeLayout : : Code ] ; <nl> - PrimaryArchetypeTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ PrimaryArchetypeTypeLayout : : Code ] ; <nl> + PrimaryArchetypeTypeLayout : : emitRecord ( S . Out , S . ScratchRecord , abbrCode , <nl> envID , <nl> interfaceType - > getDepth ( ) , <nl> interfaceType - > getIndex ( ) ) ; <nl> - break ; <nl> } <nl> <nl> - case TypeKind : : OpenedArchetype : { <nl> - auto archetypeTy = cast < OpenedArchetypeType > ( ty . getPointer ( ) ) ; <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ OpenedArchetypeTypeLayout : : Code ] ; <nl> - OpenedArchetypeTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - addTypeRef ( archetypeTy - > getOpenedExistentialType ( ) ) ) ; <nl> - break ; <nl> + void visitOpenedArchetypeType ( const OpenedArchetypeType * archetypeTy ) { <nl> + using namespace decls_block ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ OpenedArchetypeTypeLayout : : Code ] ; <nl> + OpenedArchetypeTypeLayout : : emitRecord ( <nl> + S . Out , S . ScratchRecord , abbrCode , <nl> + S . addTypeRef ( archetypeTy - > getOpenedExistentialType ( ) ) ) ; <nl> } <nl> <nl> - case TypeKind : : OpaqueTypeArchetype : { <nl> - auto archetypeTy = cast < OpaqueTypeArchetypeType > ( ty . getPointer ( ) ) ; <nl> - auto declID = addDeclRef ( archetypeTy - > getDecl ( ) ) ; <nl> - auto substMapID = addSubstitutionMapRef ( archetypeTy - > getSubstitutions ( ) ) ; <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ OpaqueArchetypeTypeLayout : : Code ] ; <nl> - OpaqueArchetypeTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> + void <nl> + visitOpaqueTypeArchetypeType ( const OpaqueTypeArchetypeType * archetypeTy ) { <nl> + using namespace decls_block ; <nl> + auto declID = S . addDeclRef ( archetypeTy - > getDecl ( ) ) ; <nl> + auto substMapID = S . addSubstitutionMapRef ( archetypeTy - > getSubstitutions ( ) ) ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ OpaqueArchetypeTypeLayout : : Code ] ; <nl> + OpaqueArchetypeTypeLayout : : emitRecord ( S . Out , S . ScratchRecord , abbrCode , <nl> declID , substMapID ) ; <nl> - break ; <nl> } <nl> - case TypeKind : : NestedArchetype : { <nl> - auto archetypeTy = cast < NestedArchetypeType > ( ty . getPointer ( ) ) ; <nl> - auto rootTypeID = addTypeRef ( archetypeTy - > getRoot ( ) ) ; <nl> - auto interfaceTypeID = addTypeRef ( archetypeTy - > getInterfaceType ( ) ) ; <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ NestedArchetypeTypeLayout : : Code ] ; <nl> - NestedArchetypeTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> + <nl> + void visitNestedArchetypeType ( const NestedArchetypeType * archetypeTy ) { <nl> + using namespace decls_block ; <nl> + auto rootTypeID = S . addTypeRef ( archetypeTy - > getRoot ( ) ) ; <nl> + auto interfaceTypeID = S . addTypeRef ( archetypeTy - > getInterfaceType ( ) ) ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ NestedArchetypeTypeLayout : : Code ] ; <nl> + NestedArchetypeTypeLayout : : emitRecord ( S . Out , S . ScratchRecord , abbrCode , <nl> rootTypeID , interfaceTypeID ) ; <nl> - break ; <nl> } <nl> <nl> - case TypeKind : : GenericTypeParam : { <nl> - auto genericParam = cast < GenericTypeParamType > ( ty . getPointer ( ) ) ; <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ GenericTypeParamTypeLayout : : Code ] ; <nl> + void visitGenericTypeParamType ( const GenericTypeParamType * genericParam ) { <nl> + using namespace decls_block ; <nl> + <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ GenericTypeParamTypeLayout : : Code ] ; <nl> DeclID declIDOrDepth ; <nl> unsigned indexPlusOne ; <nl> if ( genericParam - > getDecl ( ) & & <nl> ! ( genericParam - > getDecl ( ) - > getDeclContext ( ) - > isModuleScopeContext ( ) & & <nl> - isDeclXRef ( genericParam - > getDecl ( ) ) ) ) { <nl> - declIDOrDepth = addDeclRef ( genericParam - > getDecl ( ) ) ; <nl> + S . isDeclXRef ( genericParam - > getDecl ( ) ) ) ) { <nl> + declIDOrDepth = S . addDeclRef ( genericParam - > getDecl ( ) ) ; <nl> indexPlusOne = 0 ; <nl> } else { <nl> declIDOrDepth = genericParam - > getDepth ( ) ; <nl> indexPlusOne = genericParam - > getIndex ( ) + 1 ; <nl> } <nl> - GenericTypeParamTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> + GenericTypeParamTypeLayout : : emitRecord ( S . Out , S . ScratchRecord , abbrCode , <nl> declIDOrDepth , indexPlusOne ) ; <nl> - break ; <nl> } <nl> <nl> - case TypeKind : : DependentMember : { <nl> - auto dependent = cast < DependentMemberType > ( ty . getPointer ( ) ) ; <nl> - <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ DependentMemberTypeLayout : : Code ] ; <nl> + void visitDependentMemberType ( const DependentMemberType * dependent ) { <nl> + using namespace decls_block ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ DependentMemberTypeLayout : : Code ] ; <nl> assert ( dependent - > getAssocType ( ) & & " Unchecked dependent member type " ) ; <nl> DependentMemberTypeLayout : : emitRecord ( <nl> - Out , ScratchRecord , abbrCode , <nl> - addTypeRef ( dependent - > getBase ( ) ) , <nl> - addDeclRef ( dependent - > getAssocType ( ) ) ) ; <nl> - break ; <nl> + S . Out , S . ScratchRecord , abbrCode , <nl> + S . addTypeRef ( dependent - > getBase ( ) ) , <nl> + S . addDeclRef ( dependent - > getAssocType ( ) ) ) ; <nl> } <nl> <nl> - case TypeKind : : Function : <nl> - case TypeKind : : GenericFunction : { <nl> - auto * fnTy = cast < AnyFunctionType > ( ty . getPointer ( ) ) ; <nl> + void visitAnyFunctionType ( const AnyFunctionType * fnTy ) { <nl> + using namespace decls_block ; <nl> <nl> if ( isa < FunctionType > ( fnTy ) ) { <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ FunctionTypeLayout : : Code ] ; <nl> - FunctionTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - addTypeRef ( fnTy - > getResult ( ) ) , <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ FunctionTypeLayout : : Code ] ; <nl> + FunctionTypeLayout : : emitRecord ( S . Out , S . ScratchRecord , abbrCode , <nl> + S . addTypeRef ( fnTy - > getResult ( ) ) , <nl> getRawStableFunctionTypeRepresentation ( fnTy - > getRepresentation ( ) ) , <nl> fnTy - > isNoEscape ( ) , <nl> fnTy - > throws ( ) ) ; <nl> void Serializer : : writeType ( Type ty ) { <nl> assert ( ! fnTy - > isNoEscape ( ) ) ; <nl> <nl> auto * genericSig = cast < GenericFunctionType > ( fnTy ) - > getGenericSignature ( ) ; <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ GenericFunctionTypeLayout : : Code ] ; <nl> - GenericFunctionTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - addTypeRef ( fnTy - > getResult ( ) ) , <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ GenericFunctionTypeLayout : : Code ] ; <nl> + GenericFunctionTypeLayout : : emitRecord ( S . Out , S . ScratchRecord , abbrCode , <nl> + S . addTypeRef ( fnTy - > getResult ( ) ) , <nl> getRawStableFunctionTypeRepresentation ( fnTy - > getRepresentation ( ) ) , <nl> fnTy - > throws ( ) , <nl> - addGenericSignatureRef ( genericSig ) ) ; <nl> + S . addGenericSignatureRef ( genericSig ) ) ; <nl> } <nl> <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ FunctionParamLayout : : Code ] ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ FunctionParamLayout : : Code ] ; <nl> for ( auto & param : fnTy - > getParams ( ) ) { <nl> auto paramFlags = param . getParameterFlags ( ) ; <nl> auto rawOwnership = <nl> getRawStableValueOwnership ( paramFlags . getValueOwnership ( ) ) ; <nl> FunctionParamLayout : : emitRecord ( <nl> - Out , ScratchRecord , abbrCode , addDeclBaseNameRef ( param . getLabel ( ) ) , <nl> - addTypeRef ( param . getPlainType ( ) ) , paramFlags . isVariadic ( ) , <nl> + S . Out , S . ScratchRecord , abbrCode , <nl> + S . addDeclBaseNameRef ( param . getLabel ( ) ) , <nl> + S . addTypeRef ( param . getPlainType ( ) ) , paramFlags . isVariadic ( ) , <nl> paramFlags . isAutoClosure ( ) , rawOwnership ) ; <nl> } <nl> - <nl> - break ; <nl> } <nl> - <nl> - case TypeKind : : SILBlockStorage : { <nl> - auto storageTy = cast < SILBlockStorageType > ( ty . getPointer ( ) ) ; <nl> - <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ SILBlockStorageTypeLayout : : Code ] ; <nl> - SILBlockStorageTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - addTypeRef ( storageTy - > getCaptureType ( ) ) ) ; <nl> - break ; <nl> + <nl> + void visitSILBlockStorageType ( const SILBlockStorageType * storageTy ) { <nl> + using namespace decls_block ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ SILBlockStorageTypeLayout : : Code ] ; <nl> + SILBlockStorageTypeLayout : : emitRecord ( <nl> + S . Out , S . ScratchRecord , abbrCode , <nl> + S . addTypeRef ( storageTy - > getCaptureType ( ) ) ) ; <nl> } <nl> - <nl> - case TypeKind : : SILBox : { <nl> - auto boxTy = cast < SILBoxType > ( ty . getPointer ( ) ) ; <nl> <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ SILBoxTypeLayout : : Code ] ; <nl> - SILLayoutID layoutRef = addSILLayoutRef ( boxTy - > getLayout ( ) ) ; <nl> + void visitSILBoxType ( const SILBoxType * boxTy ) { <nl> + using namespace decls_block ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ SILBoxTypeLayout : : Code ] ; <nl> + SILLayoutID layoutRef = S . addSILLayoutRef ( boxTy - > getLayout ( ) ) ; <nl> <nl> - SILBoxTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , layoutRef , <nl> - addSubstitutionMapRef ( boxTy - > getSubstitutions ( ) ) ) ; <nl> - break ; <nl> + SILBoxTypeLayout : : emitRecord ( <nl> + S . Out , S . ScratchRecord , abbrCode , layoutRef , <nl> + S . addSubstitutionMapRef ( boxTy - > getSubstitutions ( ) ) ) ; <nl> } <nl> - <nl> - case TypeKind : : SILFunction : { <nl> - auto fnTy = cast < SILFunctionType > ( ty . getPointer ( ) ) ; <nl> + <nl> + void visitSILFunctionType ( const SILFunctionType * fnTy ) { <nl> + using namespace decls_block ; <nl> <nl> auto representation = fnTy - > getRepresentation ( ) ; <nl> auto stableRepresentation = <nl> getRawStableSILFunctionTypeRepresentation ( representation ) ; <nl> - <nl> + <nl> SmallVector < TypeID , 8 > variableData ; <nl> for ( auto param : fnTy - > getParameters ( ) ) { <nl> - variableData . push_back ( addTypeRef ( param . getType ( ) ) ) ; <nl> + variableData . push_back ( S . addTypeRef ( param . getType ( ) ) ) ; <nl> unsigned conv = getRawStableParameterConvention ( param . getConvention ( ) ) ; <nl> variableData . push_back ( TypeID ( conv ) ) ; <nl> } <nl> for ( auto yield : fnTy - > getYields ( ) ) { <nl> - variableData . push_back ( addTypeRef ( yield . getType ( ) ) ) ; <nl> + variableData . push_back ( S . addTypeRef ( yield . getType ( ) ) ) ; <nl> unsigned conv = getRawStableParameterConvention ( yield . getConvention ( ) ) ; <nl> variableData . push_back ( TypeID ( conv ) ) ; <nl> } <nl> for ( auto result : fnTy - > getResults ( ) ) { <nl> - variableData . push_back ( addTypeRef ( result . getType ( ) ) ) ; <nl> + variableData . push_back ( S . addTypeRef ( result . getType ( ) ) ) ; <nl> unsigned conv = getRawStableResultConvention ( result . getConvention ( ) ) ; <nl> variableData . push_back ( TypeID ( conv ) ) ; <nl> } <nl> if ( fnTy - > hasErrorResult ( ) ) { <nl> auto abResult = fnTy - > getErrorResult ( ) ; <nl> - variableData . push_back ( addTypeRef ( abResult . getType ( ) ) ) ; <nl> + variableData . push_back ( S . addTypeRef ( abResult . getType ( ) ) ) ; <nl> unsigned conv = getRawStableResultConvention ( abResult . getConvention ( ) ) ; <nl> variableData . push_back ( TypeID ( conv ) ) ; <nl> } <nl> <nl> auto sig = fnTy - > getGenericSignature ( ) ; <nl> <nl> - auto stableCoroutineKind = <nl> + auto stableCoroutineKind = <nl> getRawStableSILCoroutineKind ( fnTy - > getCoroutineKind ( ) ) ; <nl> <nl> auto stableCalleeConvention = <nl> getRawStableParameterConvention ( fnTy - > getCalleeConvention ( ) ) ; <nl> <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ SILFunctionTypeLayout : : Code ] ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ SILFunctionTypeLayout : : Code ] ; <nl> SILFunctionTypeLayout : : emitRecord ( <nl> - Out , ScratchRecord , abbrCode , <nl> + S . Out , S . ScratchRecord , abbrCode , <nl> stableCoroutineKind , stableCalleeConvention , <nl> stableRepresentation , fnTy - > isPseudogeneric ( ) , fnTy - > isNoEscape ( ) , <nl> fnTy - > hasErrorResult ( ) , fnTy - > getParameters ( ) . size ( ) , <nl> fnTy - > getNumYields ( ) , fnTy - > getNumResults ( ) , <nl> - addGenericSignatureRef ( sig ) , variableData ) ; <nl> + S . addGenericSignatureRef ( sig ) , variableData ) ; <nl> <nl> if ( auto conformance = fnTy - > getWitnessMethodConformanceOrNone ( ) ) <nl> - writeConformance ( * conformance , DeclTypeAbbrCodes ) ; <nl> - <nl> - break ; <nl> + S . writeConformance ( * conformance , S . DeclTypeAbbrCodes ) ; <nl> } <nl> - <nl> - case TypeKind : : ArraySlice : { <nl> - auto sliceTy = cast < ArraySliceType > ( ty . getPointer ( ) ) ; <nl> <nl> + void visitArraySliceType ( const ArraySliceType * sliceTy ) { <nl> + using namespace decls_block ; <nl> Type base = sliceTy - > getBaseType ( ) ; <nl> - <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ ArraySliceTypeLayout : : Code ] ; <nl> - ArraySliceTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - addTypeRef ( base ) ) ; <nl> - break ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ ArraySliceTypeLayout : : Code ] ; <nl> + ArraySliceTypeLayout : : emitRecord ( S . Out , S . ScratchRecord , abbrCode , <nl> + S . addTypeRef ( base ) ) ; <nl> } <nl> <nl> - case TypeKind : : Dictionary : { <nl> - auto dictTy = cast < DictionaryType > ( ty . getPointer ( ) ) ; <nl> - <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ DictionaryTypeLayout : : Code ] ; <nl> - DictionaryTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - addTypeRef ( dictTy - > getKeyType ( ) ) , <nl> - addTypeRef ( dictTy - > getValueType ( ) ) ) ; <nl> - break ; <nl> + void visitDictionaryType ( const DictionaryType * dictTy ) { <nl> + using namespace decls_block ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ DictionaryTypeLayout : : Code ] ; <nl> + DictionaryTypeLayout : : emitRecord ( S . Out , S . ScratchRecord , abbrCode , <nl> + S . addTypeRef ( dictTy - > getKeyType ( ) ) , <nl> + S . addTypeRef ( dictTy - > getValueType ( ) ) ) ; <nl> } <nl> <nl> - case TypeKind : : Optional : { <nl> - auto optionalTy = cast < OptionalType > ( ty . getPointer ( ) ) ; <nl> - <nl> + void visitOptionalType ( const OptionalType * optionalTy ) { <nl> + using namespace decls_block ; <nl> Type base = optionalTy - > getBaseType ( ) ; <nl> - <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ OptionalTypeLayout : : Code ] ; <nl> - OptionalTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - addTypeRef ( base ) ) ; <nl> - break ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ OptionalTypeLayout : : Code ] ; <nl> + OptionalTypeLayout : : emitRecord ( S . Out , S . ScratchRecord , abbrCode , <nl> + S . addTypeRef ( base ) ) ; <nl> } <nl> <nl> - case TypeKind : : ProtocolComposition : { <nl> - auto composition = cast < ProtocolCompositionType > ( ty . getPointer ( ) ) ; <nl> + void <nl> + visitProtocolCompositionType ( const ProtocolCompositionType * composition ) { <nl> + using namespace decls_block ; <nl> <nl> SmallVector < TypeID , 4 > protocols ; <nl> for ( auto proto : composition - > getMembers ( ) ) <nl> - protocols . push_back ( addTypeRef ( proto ) ) ; <nl> + protocols . push_back ( S . addTypeRef ( proto ) ) ; <nl> <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ ProtocolCompositionTypeLayout : : Code ] ; <nl> - ProtocolCompositionTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - composition - > hasExplicitAnyObject ( ) , <nl> - protocols ) ; <nl> - break ; <nl> + unsigned abbrCode = <nl> + S . DeclTypeAbbrCodes [ ProtocolCompositionTypeLayout : : Code ] ; <nl> + ProtocolCompositionTypeLayout : : emitRecord ( <nl> + S . Out , S . ScratchRecord , abbrCode , <nl> + composition - > hasExplicitAnyObject ( ) , <nl> + protocols ) ; <nl> } <nl> <nl> - # define REF_STORAGE ( Name , . . . ) \ <nl> - case TypeKind : : Name # # Storage : <nl> - # include " swift / AST / ReferenceStorage . def " <nl> - { <nl> - auto refTy = cast < ReferenceStorageType > ( ty . getPointer ( ) ) ; <nl> - <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ ReferenceStorageTypeLayout : : Code ] ; <nl> + void visitReferenceStorageType ( const ReferenceStorageType * refTy ) { <nl> + using namespace decls_block ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ ReferenceStorageTypeLayout : : Code ] ; <nl> auto stableOwnership = <nl> getRawStableReferenceOwnership ( refTy - > getOwnership ( ) ) ; <nl> - ReferenceStorageTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - stableOwnership , <nl> - addTypeRef ( refTy - > getReferentType ( ) ) ) ; <nl> - break ; <nl> + ReferenceStorageTypeLayout : : emitRecord ( <nl> + S . Out , S . ScratchRecord , abbrCode , <nl> + stableOwnership , <nl> + S . addTypeRef ( refTy - > getReferentType ( ) ) ) ; <nl> } <nl> <nl> - case TypeKind : : UnboundGeneric : { <nl> - auto generic = cast < UnboundGenericType > ( ty . getPointer ( ) ) ; <nl> - <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ UnboundGenericTypeLayout : : Code ] ; <nl> - UnboundGenericTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - addDeclRef ( generic - > getDecl ( ) , <nl> - / * allowTypeAliasXRef * / true ) , <nl> - addTypeRef ( generic - > getParent ( ) ) ) ; <nl> - break ; <nl> + void visitUnboundGenericType ( const UnboundGenericType * generic ) { <nl> + using namespace decls_block ; <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ UnboundGenericTypeLayout : : Code ] ; <nl> + UnboundGenericTypeLayout : : emitRecord ( <nl> + S . Out , S . ScratchRecord , abbrCode , <nl> + S . addDeclRef ( generic - > getDecl ( ) , / * allowTypeAliasXRef * / true ) , <nl> + S . addTypeRef ( generic - > getParent ( ) ) ) ; <nl> } <nl> <nl> - case TypeKind : : BoundGenericClass : <nl> - case TypeKind : : BoundGenericEnum : <nl> - case TypeKind : : BoundGenericStruct : { <nl> - auto generic = cast < BoundGenericType > ( ty . getPointer ( ) ) ; <nl> + void visitBoundGenericType ( const BoundGenericType * generic ) { <nl> + using namespace decls_block ; <nl> SmallVector < TypeID , 8 > genericArgIDs ; <nl> <nl> for ( auto next : generic - > getGenericArgs ( ) ) <nl> - genericArgIDs . push_back ( addTypeRef ( next ) ) ; <nl> + genericArgIDs . push_back ( S . addTypeRef ( next ) ) ; <nl> <nl> - unsigned abbrCode = DeclTypeAbbrCodes [ BoundGenericTypeLayout : : Code ] ; <nl> - BoundGenericTypeLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> - addDeclRef ( generic - > getDecl ( ) ) , <nl> - addTypeRef ( generic - > getParent ( ) ) , <nl> + unsigned abbrCode = S . DeclTypeAbbrCodes [ BoundGenericTypeLayout : : Code ] ; <nl> + BoundGenericTypeLayout : : emitRecord ( S . Out , S . ScratchRecord , abbrCode , <nl> + S . addDeclRef ( generic - > getDecl ( ) ) , <nl> + S . addTypeRef ( generic - > getParent ( ) ) , <nl> genericArgIDs ) ; <nl> - break ; <nl> } <nl> + } ; <nl> <nl> - case TypeKind : : InOut : <nl> - llvm_unreachable ( " inout types are only used in function type parameters " ) ; <nl> - case TypeKind : : LValue : <nl> - llvm_unreachable ( " lvalue types are only used in function bodies " ) ; <nl> - case TypeKind : : TypeVariable : <nl> - llvm_unreachable ( " type variables should not escape the type checker " ) ; <nl> - } <nl> + void Serializer : : writeType ( Type ty ) { <nl> + using namespace decls_block ; <nl> + PrettyStackTraceType traceRAII ( ty - > getASTContext ( ) , " serializing " , ty ) ; <nl> + <nl> + auto id = DeclAndTypeIDs [ ty ] ; <nl> + assert ( id ! = 0 & & " type not referenced properly " ) ; <nl> + ( void ) id ; <nl> + <nl> + assert ( ( id - 1 ) = = TypeOffsets . size ( ) ) ; <nl> + assert ( ( DeclOffsets . empty ( ) | | DeclOffsets . back ( ) ! = Out . GetCurrentBitNo ( ) ) & & <nl> + " encoding Decl and Type to the same offset " ) ; <nl> + TypeOffsets . push_back ( Out . GetCurrentBitNo ( ) ) ; <nl> + SWIFT_DEFER { <nl> + / / This is important enough to leave on in Release builds . <nl> + if ( TypeOffsets . back ( ) = = Out . GetCurrentBitNo ( ) ) { <nl> + llvm : : PrettyStackTraceString message ( " failed to serialize anything " ) ; <nl> + abort ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + TypeSerializer ( * this ) . visit ( ty ) ; <nl> } <nl> <nl> void Serializer : : writeAllDeclsAndTypes ( ) { <nl> mmm a / lib / Serialization / Serialization . h <nl> ppp b / lib / Serialization / Serialization . h <nl> class SerializerBase { <nl> class Serializer : public SerializerBase { <nl> class DeclSerializer ; <nl> friend class DeclSerializer ; <nl> + class TypeSerializer ; <nl> + friend class TypeSerializer ; <nl> public : <nl> / / / Stores a declaration or a type to be written to the AST file . <nl> / / / <nl>
[ Serialization ] Factor TypeSerializer out of writeType
apple/swift
f89358fc5c14d64581df7e779b2a745775fabbb5
2019-05-24T04:06:14Z
mmm a / docs / StandardLibraryProgrammersManual . md <nl> ppp b / docs / StandardLibraryProgrammersManual . md <nl> These three functions are assertions that will trigger a run time trap if violat <nl> * ` _debugPrecondition ` will execute when * * user code * * is built with assertions enabled . Use this for invariant enforcement that ' s useful while debugging , but might be prohibitively expensive when user code is configured without assertions . <nl> * ` _sanityCheck ` will execute when * * standard library code * * is built with assertions enabled . Use this for internal only invariant checks that useful for debugging the standard library itself . <nl> <nl> + # # # # ` _fixLifetime ` <nl> + <nl> + A call to ` _fixLifetime ` is considered a use of its argument , meaning that the argument is guaranteed live at least up until the call . It is otherwise a nop . This is useful for guaranteeing the lifetime of a value while inspecting its physical layout . Without a call to ` _fixLifetime ` , the last formal use may occur while the value ' s bits are still being munged . <nl> + <nl> + * Example : * <nl> + <nl> + ` ` ` swift <nl> + var x = . . . <nl> + defer { _fixLifetime ( x ) } / / Guarentee at least lexical lifetime for x <nl> + let theBits = unsafeBitCast ( & x , . . . ) <nl> + . . . / / use of theBits in ways that may outlive x if it weren ' t for the _fixLifetime call <nl> + ` ` ` <nl> + <nl> <nl> # # # Annotations <nl> <nl>
[ docs ] document _fixLifetime
apple/swift
461f66da01c3a4e7aa1dff0fd6758a2fed11b166
2017-08-04T18:55:21Z
mmm a / dbms / src / tests / primary_key_none . cpp <nl> ppp b / dbms / src / tests / primary_key_none . cpp <nl> int main ( int argc , char * * argv ) <nl> } <nl> <nl> / / / читаем таблицу <nl> - DB : : AggregatedRowSet data_read ; <nl> { <nl> DB : : Row key ; <nl> Poco : : SharedPtr < DB : : ITablePartReader > reader ( column_group0 . primary_key - > read ( key ) ) ; <nl>
dbms : development .
ClickHouse/ClickHouse
37be72412b443e977e543797feb4d37b322980ac
2009-07-24T16:56:06Z
mmm a / dbms / src / Common / ErrorCodes . cpp <nl> ppp b / dbms / src / Common / ErrorCodes . cpp <nl> namespace ErrorCodes <nl> extern const int CACHE_DICTIONARY_UPDATE_FAIL = 510 ; <nl> extern const int UNKNOWN_ROLE = 511 ; <nl> extern const int SET_NON_GRANTED_ROLE = 512 ; <nl> + extern const int UNKNOWN_PART_TYPE = 513 ; <nl> <nl> extern const int KEEPER_EXCEPTION = 999 ; <nl> extern const int POCO_EXCEPTION = 1000 ; <nl> mmm a / dbms / src / DataStreams / MarkInCompressedFile . h <nl> ppp b / dbms / src / DataStreams / MarkInCompressedFile . h <nl> struct MarkInCompressedFile <nl> <nl> } ; <nl> <nl> - using MarksInCompressedFile = PODArray < MarkInCompressedFile > ; <nl> + class MarksInCompressedFile : public PODArray < MarkInCompressedFile > <nl> + { <nl> + public : <nl> + MarksInCompressedFile ( size_t n ) : PODArray ( n ) { } <nl> + <nl> + void read ( ReadBuffer & buffer , size_t from , size_t count ) <nl> + { <nl> + buffer . readStrict ( reinterpret_cast < char * > ( data ( ) + from ) , count * sizeof ( MarkInCompressedFile ) ) ; <nl> + } <nl> + } ; <nl> <nl> } <nl> mmm a / dbms / src / DataStreams / TTLBlockInputStream . cpp <nl> ppp b / dbms / src / DataStreams / TTLBlockInputStream . cpp <nl> TTLBlockInputStream : : TTLBlockInputStream ( <nl> { <nl> if ( force | | isTTLExpired ( ttl_info . min ) ) <nl> { <nl> - new_ttl_infos . columns_ttl . emplace ( name , MergeTreeDataPart : : TTLInfo { } ) ; <nl> + new_ttl_infos . columns_ttl . emplace ( name , IMergeTreeDataPart : : TTLInfo { } ) ; <nl> empty_columns . emplace ( name ) ; <nl> <nl> auto it = column_defaults . find ( name ) ; <nl> void TTLBlockInputStream : : readSuffixImpl ( ) <nl> new_ttl_infos . updatePartMinMaxTTL ( new_ttl_infos . table_ttl . min , new_ttl_infos . table_ttl . max ) ; <nl> <nl> data_part - > ttl_infos = std : : move ( new_ttl_infos ) ; <nl> - data_part - > empty_columns = std : : move ( empty_columns ) ; <nl> + data_part - > expired_columns = std : : move ( empty_columns ) ; <nl> <nl> if ( rows_removed ) <nl> LOG_INFO ( log , " Removed " < < rows_removed < < " rows with expired TTL from part " < < data_part - > name ) ; <nl> mmm a / dbms / src / DataStreams / TTLBlockInputStream . h <nl> ppp b / dbms / src / DataStreams / TTLBlockInputStream . h <nl> <nl> # pragma once <nl> # include < DataStreams / IBlockInputStream . h > <nl> # include < Storages / MergeTree / MergeTreeData . h > <nl> - # include < Storages / MergeTree / MergeTreeDataPart . h > <nl> + # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> # include < Core / Block . h > <nl> <nl> # include < common / DateLUT . h > <nl> class TTLBlockInputStream : public IBlockInputStream <nl> time_t current_time ; <nl> bool force ; <nl> <nl> - MergeTreeDataPart : : TTLInfos old_ttl_infos ; <nl> - MergeTreeDataPart : : TTLInfos new_ttl_infos ; <nl> + IMergeTreeDataPart : : TTLInfos old_ttl_infos ; <nl> + IMergeTreeDataPart : : TTLInfos new_ttl_infos ; <nl> NameSet empty_columns ; <nl> <nl> size_t rows_removed = 0 ; <nl> mmm a / dbms / src / Interpreters / MutationsInterpreter . cpp <nl> ppp b / dbms / src / Interpreters / MutationsInterpreter . cpp <nl> bool isStorageTouchedByMutations ( <nl> <nl> MutationsInterpreter : : MutationsInterpreter ( <nl> StoragePtr storage_ , <nl> - std : : vector < MutationCommand > commands_ , <nl> + MutationCommands commands_ , <nl> const Context & context_ , <nl> bool can_execute_ ) <nl> : storage ( std : : move ( storage_ ) ) <nl> ASTPtr MutationsInterpreter : : prepareInterpreterSelectQuery ( std : : vector < Stage > & <nl> <nl> if ( i > 0 ) <nl> prepared_stages [ i ] . output_columns = prepared_stages [ i - 1 ] . output_columns ; <nl> + else if ( ! commands . additional_columns . empty ( ) ) <nl> + prepared_stages [ i ] . output_columns . insert ( commands . additional_columns . begin ( ) , commands . additional_columns . end ( ) ) ; <nl> <nl> if ( prepared_stages [ i ] . output_columns . size ( ) < all_columns . size ( ) ) <nl> { <nl> mmm a / dbms / src / Interpreters / MutationsInterpreter . h <nl> ppp b / dbms / src / Interpreters / MutationsInterpreter . h <nl> class MutationsInterpreter <nl> public : <nl> / / / Storage to mutate , array of mutations commands and context . If you really want to execute mutation <nl> / / / use can_execute = true , in other cases ( validation , amount of commands ) it can be false <nl> - MutationsInterpreter ( StoragePtr storage_ , std : : vector < MutationCommand > commands_ , const Context & context_ , bool can_execute_ ) ; <nl> + MutationsInterpreter ( StoragePtr storage_ , MutationCommands commands_ , const Context & context_ , bool can_execute_ ) ; <nl> <nl> void validate ( TableStructureReadLockHolder & table_lock_holder ) ; <nl> <nl> class MutationsInterpreter <nl> BlockInputStreamPtr addStreamsForLaterStages ( const std : : vector < Stage > & prepared_stages , BlockInputStreamPtr in ) const ; <nl> <nl> StoragePtr storage ; <nl> - std : : vector < MutationCommand > commands ; <nl> + MutationCommands commands ; <nl> const Context & context ; <nl> bool can_execute ; <nl> <nl> mmm a / dbms / src / Interpreters / PartLog . cpp <nl> ppp b / dbms / src / Interpreters / PartLog . cpp <nl> <nl> # include < DataTypes / DataTypeDate . h > <nl> # include < DataTypes / DataTypeString . h > <nl> # include < DataTypes / DataTypeEnum . h > <nl> - # include < Storages / MergeTree / MergeTreeDataPart . h > <nl> + # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> # include < Storages / MergeTree / MergeTreeData . h > <nl> # include < Interpreters / PartLog . h > <nl> <nl> mmm a / dbms / src / Interpreters / PartLog . h <nl> ppp b / dbms / src / Interpreters / PartLog . h <nl> struct PartLogElement <nl> void appendToBlock ( Block & block ) const ; <nl> } ; <nl> <nl> - struct MergeTreeDataPart ; <nl> + class IMergeTreeDataPart ; <nl> <nl> <nl> / / / Instead of typedef - to allow forward declaration . <nl> class PartLog : public SystemLog < PartLogElement > <nl> { <nl> using SystemLog < PartLogElement > : : SystemLog ; <nl> <nl> - using MutableDataPartPtr = std : : shared_ptr < MergeTreeDataPart > ; <nl> + using MutableDataPartPtr = std : : shared_ptr < IMergeTreeDataPart > ; <nl> using MutableDataPartsVector = std : : vector < MutableDataPartPtr > ; <nl> <nl> public : <nl> new file mode 100644 <nl> index 00000000000 . . 96f86623ef9 <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / AlterAnalysisResult . h <nl> <nl> + # pragma once <nl> + # include < Interpreters / ExpressionActions . h > <nl> + <nl> + namespace DB <nl> + { <nl> + struct AlterAnalysisResult <nl> + { <nl> + / / / Expression for column type conversion . <nl> + / / / If no conversions are needed , expression = nullptr . <nl> + ExpressionActionsPtr expression = nullptr ; <nl> + <nl> + / / / Denotes if metadata must be changed even if no file should be overwritten <nl> + / / / ( used for transformation - free changing of Enum values list ) . <nl> + bool force_update_metadata = false ; <nl> + <nl> + std : : map < String , const IDataType * > new_types ; <nl> + <nl> + / / / For every column that need to be converted : source column name , <nl> + / / / column name of calculated expression for conversion . <nl> + std : : vector < std : : pair < String , String > > conversions ; <nl> + NamesAndTypesList removed_columns ; <nl> + Names removed_indices ; <nl> + } ; <nl> + } <nl> mmm a / dbms / src / Storages / MergeTree / DataPartsExchange . cpp <nl> ppp b / dbms / src / Storages / MergeTree / DataPartsExchange . cpp <nl> MergeTreeData : : MutableDataPartPtr Fetcher : : fetchPart ( <nl> readBinary ( sum_files_size , in ) ; <nl> if ( server_protocol_version = = REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE_AND_TTL_INFOS ) <nl> { <nl> - MergeTreeDataPart : : TTLInfos ttl_infos ; <nl> + IMergeTreeDataPart : : TTLInfos ttl_infos ; <nl> String ttl_infos_string ; <nl> readBinary ( ttl_infos_string , in ) ; <nl> ReadBufferFromString ttl_infos_buffer ( ttl_infos_string ) ; <nl> MergeTreeData : : MutableDataPartPtr Fetcher : : downloadPart ( <nl> <nl> part_file . createDirectory ( ) ; <nl> <nl> - MergeTreeData : : MutableDataPartPtr new_data_part = std : : make_shared < MergeTreeData : : DataPart > ( data , reservation - > getDisk ( ) , part_name ) ; <nl> - new_data_part - > relative_path = relative_part_path ; <nl> - new_data_part - > is_temp = true ; <nl> - <nl> - <nl> MergeTreeData : : DataPart : : Checksums checksums ; <nl> for ( size_t i = 0 ; i < files ; + + i ) <nl> { <nl> MergeTreeData : : MutableDataPartPtr Fetcher : : downloadPart ( <nl> <nl> assertEOF ( in ) ; <nl> <nl> + MergeTreeData : : MutableDataPartPtr new_data_part = data . createPart ( part_name , reservation - > getDisk ( ) , relative_part_path ) ; <nl> + new_data_part - > is_temp = true ; <nl> new_data_part - > modification_time = time ( nullptr ) ; <nl> new_data_part - > loadColumnsChecksumsIndexes ( true , false ) ; <nl> new_data_part - > checksums . checkEqual ( checksums , false ) ; <nl> similarity index 69 % <nl> rename from dbms / src / Storages / MergeTree / MergeTreeDataPart . cpp <nl> rename to dbms / src / Storages / MergeTree / IMergeTreeDataPart . cpp <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeDataPart . cpp <nl> ppp b / dbms / src / Storages / MergeTree / IMergeTreeDataPart . cpp <nl> <nl> - # include " MergeTreeDataPart . h " <nl> + # include " IMergeTreeDataPart . h " <nl> <nl> # include < optional > <nl> # include < IO / ReadHelpers . h > <nl> namespace ErrorCodes <nl> extern const int BAD_SIZE_OF_FILE_IN_DATA_PART ; <nl> extern const int BAD_TTL_FILE ; <nl> extern const int CANNOT_UNLINK ; <nl> + extern const int NOT_IMPLEMENTED ; <nl> } <nl> <nl> <nl> static ReadBufferFromFile openForReading ( const String & path ) <nl> return ReadBufferFromFile ( path , std : : min ( static_cast < Poco : : File : : FileSize > ( DBMS_DEFAULT_BUFFER_SIZE ) , Poco : : File ( path ) . getSize ( ) ) ) ; <nl> } <nl> <nl> - static String getFileNameForColumn ( const NameAndTypePair & column ) <nl> - { <nl> - String filename ; <nl> - column . type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - if ( filename . empty ( ) ) <nl> - filename = IDataType : : getFileNameForStream ( column . name , substream_path ) ; <nl> - } ) ; <nl> - return filename ; <nl> - } <nl> - <nl> - void MergeTreeDataPart : : MinMaxIndex : : load ( const MergeTreeData & data , const String & part_path ) <nl> + void IMergeTreeDataPart : : MinMaxIndex : : load ( const MergeTreeData & data , const String & part_path ) <nl> { <nl> size_t minmax_idx_size = data . minmax_idx_column_types . size ( ) ; <nl> parallelogram . reserve ( minmax_idx_size ) ; <nl> void MergeTreeDataPart : : MinMaxIndex : : load ( const MergeTreeData & data , const Stri <nl> { <nl> String file_name = part_path + " minmax_ " + escapeForFileName ( data . minmax_idx_columns [ i ] ) + " . idx " ; <nl> ReadBufferFromFile file = openForReading ( file_name ) ; <nl> - const DataTypePtr & type = data . minmax_idx_column_types [ i ] ; <nl> + const DataTypePtr & data_type = data . minmax_idx_column_types [ i ] ; <nl> <nl> Field min_val ; <nl> - type - > deserializeBinary ( min_val , file ) ; <nl> + data_type - > deserializeBinary ( min_val , file ) ; <nl> Field max_val ; <nl> - type - > deserializeBinary ( max_val , file ) ; <nl> + data_type - > deserializeBinary ( max_val , file ) ; <nl> <nl> parallelogram . emplace_back ( min_val , true , max_val , true ) ; <nl> } <nl> initialized = true ; <nl> } <nl> <nl> - void MergeTreeDataPart : : MinMaxIndex : : store ( const MergeTreeData & data , const String & part_path , Checksums & out_checksums ) const <nl> + void IMergeTreeDataPart : : MinMaxIndex : : store ( const MergeTreeData & data , const String & part_path , Checksums & out_checksums ) const <nl> { <nl> store ( data . minmax_idx_columns , data . minmax_idx_column_types , part_path , out_checksums ) ; <nl> } <nl> <nl> - void MergeTreeDataPart : : MinMaxIndex : : store ( const Names & column_names , const DataTypes & data_types , const String & part_path , Checksums & out_checksums ) const <nl> + void IMergeTreeDataPart : : MinMaxIndex : : store ( const Names & column_names , const DataTypes & data_types , const String & part_path , Checksums & out_checksums ) const <nl> { <nl> if ( ! initialized ) <nl> throw Exception ( " Attempt to store uninitialized MinMax index for part " + part_path + " . This is a bug . " , <nl> void MergeTreeDataPart : : MinMaxIndex : : store ( const Names & column_names , const Dat <nl> for ( size_t i = 0 ; i < column_names . size ( ) ; + + i ) <nl> { <nl> String file_name = " minmax_ " + escapeForFileName ( column_names [ i ] ) + " . idx " ; <nl> - const DataTypePtr & type = data_types . at ( i ) ; <nl> + const DataTypePtr & data_type = data_types . at ( i ) ; <nl> <nl> WriteBufferFromFile out ( part_path + file_name ) ; <nl> HashingWriteBuffer out_hashing ( out ) ; <nl> - type - > serializeBinary ( parallelogram [ i ] . left , out_hashing ) ; <nl> - type - > serializeBinary ( parallelogram [ i ] . right , out_hashing ) ; <nl> + data_type - > serializeBinary ( parallelogram [ i ] . left , out_hashing ) ; <nl> + data_type - > serializeBinary ( parallelogram [ i ] . right , out_hashing ) ; <nl> out_hashing . next ( ) ; <nl> out_checksums . files [ file_name ] . file_size = out_hashing . count ( ) ; <nl> out_checksums . files [ file_name ] . file_hash = out_hashing . getHash ( ) ; <nl> } <nl> } <nl> <nl> - void MergeTreeDataPart : : MinMaxIndex : : update ( const Block & block , const Names & column_names ) <nl> + void IMergeTreeDataPart : : MinMaxIndex : : update ( const Block & block , const Names & column_names ) <nl> { <nl> if ( ! initialized ) <nl> parallelogram . reserve ( column_names . size ( ) ) ; <nl> void MergeTreeDataPart : : MinMaxIndex : : update ( const Block & block , const Names & c <nl> initialized = true ; <nl> } <nl> <nl> - void MergeTreeDataPart : : MinMaxIndex : : merge ( const MinMaxIndex & other ) <nl> + void IMergeTreeDataPart : : MinMaxIndex : : merge ( const MinMaxIndex & other ) <nl> { <nl> if ( ! other . initialized ) <nl> return ; <nl> void MergeTreeDataPart : : MinMaxIndex : : merge ( const MinMaxIndex & other ) <nl> } <nl> <nl> <nl> - MergeTreeDataPart : : MergeTreeDataPart ( MergeTreeData & storage_ , const DiskPtr & disk_ , const String & name_ ) <nl> + IMergeTreeDataPart : : IMergeTreeDataPart ( <nl> + MergeTreeData & storage_ , <nl> + const String & name_ , <nl> + const DiskPtr & disk_ , <nl> + const std : : optional < String > & relative_path_ , <nl> + Type part_type_ ) <nl> : storage ( storage_ ) <nl> - , disk ( disk_ ) <nl> , name ( name_ ) <nl> , info ( MergeTreePartInfo : : fromPartName ( name_ , storage . format_version ) ) <nl> - , index_granularity_info ( storage ) <nl> + , disk ( disk_ ) <nl> + , relative_path ( relative_path_ . value_or ( name_ ) ) <nl> + , index_granularity_info ( storage_ , part_type_ ) <nl> + , part_type ( part_type_ ) <nl> { <nl> } <nl> <nl> - MergeTreeDataPart : : MergeTreeDataPart ( <nl> - const MergeTreeData & storage_ , <nl> - const DiskPtr & disk_ , <nl> - const String & name_ , <nl> - const MergeTreePartInfo & info_ ) <nl> + IMergeTreeDataPart : : IMergeTreeDataPart ( <nl> + const MergeTreeData & storage_ , <nl> + const String & name_ , <nl> + const MergeTreePartInfo & info_ , <nl> + const DiskPtr & disk_ , <nl> + const std : : optional < String > & relative_path_ , <nl> + Type part_type_ ) <nl> : storage ( storage_ ) <nl> - , disk ( disk_ ) <nl> , name ( name_ ) <nl> , info ( info_ ) <nl> - , index_granularity_info ( storage ) <nl> - { <nl> - } <nl> - <nl> - <nl> - / / / Takes into account the fact that several columns can e . g . share their . size substreams . <nl> - / / / When calculating totals these should be counted only once . <nl> - ColumnSize MergeTreeDataPart : : getColumnSizeImpl ( <nl> - const String & column_name , const IDataType & type , std : : unordered_set < String > * processed_substreams ) const <nl> - { <nl> - ColumnSize size ; <nl> - if ( checksums . empty ( ) ) <nl> - return size ; <nl> - <nl> - type . enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - String file_name = IDataType : : getFileNameForStream ( column_name , substream_path ) ; <nl> - <nl> - if ( processed_substreams & & ! processed_substreams - > insert ( file_name ) . second ) <nl> - return ; <nl> - <nl> - auto bin_checksum = checksums . files . find ( file_name + " . bin " ) ; <nl> - if ( bin_checksum ! = checksums . files . end ( ) ) <nl> - { <nl> - size . data_compressed + = bin_checksum - > second . file_size ; <nl> - size . data_uncompressed + = bin_checksum - > second . uncompressed_size ; <nl> - } <nl> - <nl> - auto mrk_checksum = checksums . files . find ( file_name + index_granularity_info . marks_file_extension ) ; <nl> - if ( mrk_checksum ! = checksums . files . end ( ) ) <nl> - size . marks + = mrk_checksum - > second . file_size ; <nl> - } , { } ) ; <nl> - <nl> - return size ; <nl> - } <nl> - <nl> - ColumnSize MergeTreeDataPart : : getColumnSize ( const String & column_name , const IDataType & type ) const <nl> - { <nl> - return getColumnSizeImpl ( column_name , type , nullptr ) ; <nl> - } <nl> - <nl> - ColumnSize MergeTreeDataPart : : getTotalColumnsSize ( ) const <nl> - { <nl> - ColumnSize totals ; <nl> - std : : unordered_set < String > processed_substreams ; <nl> - for ( const NameAndTypePair & column : columns ) <nl> - { <nl> - ColumnSize size = getColumnSizeImpl ( column . name , * column . type , & processed_substreams ) ; <nl> - totals . add ( size ) ; <nl> - } <nl> - return totals ; <nl> - } <nl> - <nl> - <nl> - size_t MergeTreeDataPart : : getFileSizeOrZero ( const String & file_name ) const <nl> - { <nl> - auto checksum = checksums . files . find ( file_name ) ; <nl> - if ( checksum = = checksums . files . end ( ) ) <nl> - return 0 ; <nl> - return checksum - > second . file_size ; <nl> - } <nl> - <nl> - / * * Returns the name of a column with minimum compressed size ( as returned by getColumnSize ( ) ) . <nl> - * If no checksums are present returns the name of the first physically existing column . <nl> - * / <nl> - String MergeTreeDataPart : : getColumnNameWithMinumumCompressedSize ( ) const <nl> - { <nl> - const auto & storage_columns = storage . getColumns ( ) . getAllPhysical ( ) ; <nl> - const std : : string * minimum_size_column = nullptr ; <nl> - UInt64 minimum_size = std : : numeric_limits < UInt64 > : : max ( ) ; <nl> - <nl> - for ( const auto & column : storage_columns ) <nl> - { <nl> - if ( ! hasColumnFiles ( column . name , * column . type ) ) <nl> - continue ; <nl> - <nl> - const auto size = getColumnSize ( column . name , * column . type ) . data_compressed ; <nl> - if ( size < minimum_size ) <nl> - { <nl> - minimum_size = size ; <nl> - minimum_size_column = & column . name ; <nl> - } <nl> - } <nl> - <nl> - if ( ! minimum_size_column ) <nl> - throw Exception ( " Could not find a column of minimum size in MergeTree , part " + getFullPath ( ) , ErrorCodes : : LOGICAL_ERROR ) ; <nl> - <nl> - return * minimum_size_column ; <nl> - } <nl> - <nl> - <nl> - String MergeTreeDataPart : : getFullPath ( ) const <nl> + , disk ( disk_ ) <nl> + , relative_path ( relative_path_ . value_or ( name_ ) ) <nl> + , index_granularity_info ( storage_ , part_type_ ) <nl> + , part_type ( part_type_ ) <nl> { <nl> - if ( relative_path . empty ( ) ) <nl> - throw Exception ( " Part relative_path cannot be empty . It ' s bug . " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> - <nl> - return storage . getFullPathOnDisk ( disk ) + relative_path + " / " ; <nl> } <nl> <nl> - String MergeTreeDataPart : : getNameWithPrefix ( ) const <nl> - { <nl> - String res = Poco : : Path ( relative_path ) . getFileName ( ) ; <nl> - <nl> - if ( res . empty ( ) ) <nl> - throw Exception ( " relative_path " + relative_path + " of part " + name + " is invalid or not set " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> - <nl> - return res ; <nl> - } <nl> <nl> - String MergeTreeDataPart : : getNewName ( const MergeTreePartInfo & new_part_info ) const <nl> + String IMergeTreeDataPart : : getNewName ( const MergeTreePartInfo & new_part_info ) const <nl> { <nl> if ( storage . format_version < MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING ) <nl> { <nl> String MergeTreeDataPart : : getNewName ( const MergeTreePartInfo & new_part_info ) co <nl> return new_part_info . getPartName ( ) ; <nl> } <nl> <nl> - DayNum MergeTreeDataPart : : getMinDate ( ) const <nl> + std : : optional < size_t > IMergeTreeDataPart : : getColumnPosition ( const String & column_name ) const <nl> + { <nl> + auto it = column_name_to_position . find ( column_name ) ; <nl> + if ( it = = column_name_to_position . end ( ) ) <nl> + return { } ; <nl> + return it - > second ; <nl> + } <nl> + <nl> + DayNum IMergeTreeDataPart : : getMinDate ( ) const <nl> { <nl> if ( storage . minmax_idx_date_column_pos ! = - 1 & & minmax_idx . initialized ) <nl> return DayNum ( minmax_idx . parallelogram [ storage . minmax_idx_date_column_pos ] . left . get < UInt64 > ( ) ) ; <nl> DayNum MergeTreeDataPart : : getMinDate ( ) const <nl> } <nl> <nl> <nl> - DayNum MergeTreeDataPart : : getMaxDate ( ) const <nl> + DayNum IMergeTreeDataPart : : getMaxDate ( ) const <nl> { <nl> if ( storage . minmax_idx_date_column_pos ! = - 1 & & minmax_idx . initialized ) <nl> return DayNum ( minmax_idx . parallelogram [ storage . minmax_idx_date_column_pos ] . right . get < UInt64 > ( ) ) ; <nl> DayNum MergeTreeDataPart : : getMaxDate ( ) const <nl> return DayNum ( ) ; <nl> } <nl> <nl> - time_t MergeTreeDataPart : : getMinTime ( ) const <nl> + time_t IMergeTreeDataPart : : getMinTime ( ) const <nl> { <nl> if ( storage . minmax_idx_time_column_pos ! = - 1 & & minmax_idx . initialized ) <nl> return minmax_idx . parallelogram [ storage . minmax_idx_time_column_pos ] . left . get < UInt64 > ( ) ; <nl> time_t MergeTreeDataPart : : getMinTime ( ) const <nl> } <nl> <nl> <nl> - time_t MergeTreeDataPart : : getMaxTime ( ) const <nl> + time_t IMergeTreeDataPart : : getMaxTime ( ) const <nl> { <nl> if ( storage . minmax_idx_time_column_pos ! = - 1 & & minmax_idx . initialized ) <nl> return minmax_idx . parallelogram [ storage . minmax_idx_time_column_pos ] . right . get < UInt64 > ( ) ; <nl> time_t MergeTreeDataPart : : getMaxTime ( ) const <nl> return 0 ; <nl> } <nl> <nl> - MergeTreeDataPart : : ~ MergeTreeDataPart ( ) <nl> + void IMergeTreeDataPart : : setColumns ( const NamesAndTypesList & new_columns ) <nl> + { <nl> + columns = new_columns ; <nl> + column_name_to_position . clear ( ) ; <nl> + column_name_to_position . reserve ( new_columns . size ( ) ) ; <nl> + size_t pos = 0 ; <nl> + for ( const auto & column : columns ) <nl> + column_name_to_position . emplace ( column . name , pos + + ) ; <nl> + } <nl> + <nl> + IMergeTreeDataPart : : ~ IMergeTreeDataPart ( ) = default ; <nl> + <nl> + void IMergeTreeDataPart : : removeIfNeeded ( ) <nl> { <nl> if ( state = = State : : DeleteOnDestroy | | is_temp ) <nl> { <nl> MergeTreeDataPart : : ~ MergeTreeDataPart ( ) <nl> <nl> if ( is_temp ) <nl> { <nl> - if ( ! startsWith ( getNameWithPrefix ( ) , " tmp " ) ) <nl> + String file_name = Poco : : Path ( relative_path ) . getFileName ( ) ; <nl> + <nl> + if ( file_name . empty ( ) ) <nl> + throw Exception ( " relative_path " + relative_path + " of part " + name + " is invalid or not set " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> + if ( ! startsWith ( file_name , " tmp " ) ) <nl> { <nl> LOG_ERROR ( storage . log , " ~ DataPart ( ) should remove part " < < path <nl> < < " but its name doesn ' t start with tmp . Too suspicious , keeping the part . " ) ; <nl> MergeTreeDataPart : : ~ MergeTreeDataPart ( ) <nl> } <nl> } <nl> <nl> - UInt64 MergeTreeDataPart : : calculateTotalSizeOnDisk ( const String & from ) <nl> + <nl> + UInt64 IMergeTreeDataPart : : getIndexSizeInBytes ( ) const <nl> { <nl> - Poco : : File cur ( from ) ; <nl> - if ( cur . isFile ( ) ) <nl> - return cur . getSize ( ) ; <nl> - std : : vector < std : : string > files ; <nl> - cur . list ( files ) ; <nl> UInt64 res = 0 ; <nl> - for ( const auto & file : files ) <nl> - res + = calculateTotalSizeOnDisk ( from + file ) ; <nl> + for ( const ColumnPtr & column : index ) <nl> + res + = column - > byteSize ( ) ; <nl> return res ; <nl> } <nl> <nl> - void MergeTreeDataPart : : remove ( ) const <nl> + UInt64 IMergeTreeDataPart : : getIndexSizeInAllocatedBytes ( ) const <nl> { <nl> - if ( relative_path . empty ( ) ) <nl> - throw Exception ( " Part relative_path cannot be empty . This is bug . " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> - <nl> - / * * Atomic directory removal : <nl> - * - rename directory to temporary name ; <nl> - * - remove it recursive . <nl> - * <nl> - * For temporary name we use " delete_tmp_ " prefix . <nl> - * <nl> - * NOTE : We cannot use " tmp_delete_ " prefix , because there is a second thread , <nl> - * that calls " clearOldTemporaryDirectories " and removes all directories , that begin with " tmp_ " and are old enough . <nl> - * But when we removing data part , it can be old enough . And rename doesn ' t change mtime . <nl> - * And a race condition can happen that will lead to " File not found " error here . <nl> - * / <nl> - <nl> - String full_path = storage . getFullPathOnDisk ( disk ) ; <nl> - String from = full_path + relative_path ; <nl> - String to = full_path + " delete_tmp_ " + name ; <nl> - / / TODO directory delete_tmp_ < name > is never removed if server crashes before returning from this function <nl> - <nl> - Poco : : File from_dir { from } ; <nl> - Poco : : File to_dir { to } ; <nl> + UInt64 res = 0 ; <nl> + for ( const ColumnPtr & column : index ) <nl> + res + = column - > allocatedBytes ( ) ; <nl> + return res ; <nl> + } <nl> <nl> - if ( to_dir . exists ( ) ) <nl> + String IMergeTreeDataPart : : stateToString ( IMergeTreeDataPart : : State state ) <nl> + { <nl> + switch ( state ) <nl> { <nl> - LOG_WARNING ( storage . log , " Directory " < < to < < " ( to which part must be renamed before removing ) already exists . " <nl> - " Most likely this is due to unclean restart . Removing it . " ) ; <nl> - <nl> - try <nl> - { <nl> - to_dir . remove ( true ) ; <nl> - } <nl> - catch ( . . . ) <nl> - { <nl> - LOG_ERROR ( storage . log , " Cannot remove directory " < < to < < " . Check owner and access rights . " ) ; <nl> - throw ; <nl> - } <nl> + case State : : Temporary : <nl> + return " Temporary " ; <nl> + case State : : PreCommitted : <nl> + return " PreCommitted " ; <nl> + case State : : Committed : <nl> + return " Committed " ; <nl> + case State : : Outdated : <nl> + return " Outdated " ; <nl> + case State : : Deleting : <nl> + return " Deleting " ; <nl> + case State : : DeleteOnDestroy : <nl> + return " DeleteOnDestroy " ; <nl> } <nl> <nl> - try <nl> - { <nl> - from_dir . renameTo ( to ) ; <nl> - } <nl> - catch ( const Poco : : FileNotFoundException & ) <nl> - { <nl> - LOG_ERROR ( storage . log , " Directory " < < from < < " ( part to remove ) doesn ' t exist or one of nested files has gone . " <nl> - " Most likely this is due to manual removing . This should be discouraged . Ignoring . " ) ; <nl> + __builtin_unreachable ( ) ; <nl> + } <nl> <nl> - return ; <nl> - } <nl> + String IMergeTreeDataPart : : stateString ( ) const <nl> + { <nl> + return stateToString ( state ) ; <nl> + } <nl> <nl> - try <nl> + void IMergeTreeDataPart : : assertState ( const std : : initializer_list < IMergeTreeDataPart : : State > & affordable_states ) const <nl> + { <nl> + if ( ! checkState ( affordable_states ) ) <nl> { <nl> - / / / Remove each expected file in directory , then remove directory itself . <nl> - <nl> - # if ! __clang__ <nl> - # pragma GCC diagnostic push <nl> - # pragma GCC diagnostic ignored " - Wunused - variable " <nl> - # endif <nl> - std : : shared_lock < std : : shared_mutex > lock ( columns_lock ) ; <nl> - <nl> - for ( const auto & [ file , _ ] : checksums . files ) <nl> - { <nl> - String path_to_remove = to + " / " + file ; <nl> - if ( 0 ! = unlink ( path_to_remove . c_str ( ) ) ) <nl> - throwFromErrnoWithPath ( " Cannot unlink file " + path_to_remove , path_to_remove , <nl> - ErrorCodes : : CANNOT_UNLINK ) ; <nl> - } <nl> - # if ! __clang__ <nl> - # pragma GCC diagnostic pop <nl> - # endif <nl> - <nl> - for ( const auto & file : { " checksums . txt " , " columns . txt " } ) <nl> - { <nl> - String path_to_remove = to + " / " + file ; <nl> - if ( 0 ! = unlink ( path_to_remove . c_str ( ) ) ) <nl> - throwFromErrnoWithPath ( " Cannot unlink file " + path_to_remove , path_to_remove , <nl> - ErrorCodes : : CANNOT_UNLINK ) ; <nl> - } <nl> + String states_str ; <nl> + for ( auto affordable_state : affordable_states ) <nl> + states_str + = stateToString ( affordable_state ) + " " ; <nl> <nl> - if ( 0 ! = rmdir ( to . c_str ( ) ) ) <nl> - throwFromErrnoWithPath ( " Cannot rmdir file " + to , to , ErrorCodes : : CANNOT_UNLINK ) ; <nl> + throw Exception ( " Unexpected state of part " + getNameWithState ( ) + " . Expected : " + states_str , ErrorCodes : : NOT_FOUND_EXPECTED_DATA_PART ) ; <nl> } <nl> - catch ( . . . ) <nl> - { <nl> - / / / Recursive directory removal does many excessive " stat " syscalls under the hood . <nl> - <nl> - LOG_ERROR ( storage . log , " Cannot quickly remove directory " < < to < < " by removing files ; fallback to recursive removal . Reason : " <nl> - < < getCurrentExceptionMessage ( false ) ) ; <nl> + } <nl> <nl> - to_dir . remove ( true ) ; <nl> - } <nl> + void IMergeTreeDataPart : : assertOnDisk ( ) const <nl> + { <nl> + if ( ! isStoredOnDisk ( ) ) <nl> + throw Exception ( " Data part ' " + name + " ' with type ' " <nl> + + getType ( ) . toString ( ) + " ' is not stored on disk " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> } <nl> <nl> <nl> - void MergeTreeDataPart : : renameTo ( const String & new_relative_path , bool remove_new_dir_if_exists ) const <nl> + UInt64 IMergeTreeDataPart : : getMarksCount ( ) const <nl> { <nl> - String from = getFullPath ( ) ; <nl> - String to = storage . getFullPathOnDisk ( disk ) + new_relative_path + " / " ; <nl> + return index_granularity . getMarksCount ( ) ; <nl> + } <nl> <nl> - Poco : : File from_file ( from ) ; <nl> - if ( ! from_file . exists ( ) ) <nl> - throw Exception ( " Part directory " + from + " doesn ' t exist . Most likely it is logical error . " , ErrorCodes : : FILE_DOESNT_EXIST ) ; <nl> + size_t IMergeTreeDataPart : : getFileSizeOrZero ( const String & file_name ) const <nl> + { <nl> + auto checksum = checksums . files . find ( file_name ) ; <nl> + if ( checksum = = checksums . files . end ( ) ) <nl> + return 0 ; <nl> + return checksum - > second . file_size ; <nl> + } <nl> <nl> - Poco : : File to_file ( to ) ; <nl> - if ( to_file . exists ( ) ) <nl> - { <nl> - if ( remove_new_dir_if_exists ) <nl> - { <nl> - Names files ; <nl> - Poco : : File ( from ) . list ( files ) ; <nl> + String IMergeTreeDataPart : : getColumnNameWithMinumumCompressedSize ( ) const <nl> + { <nl> + const auto & storage_columns = storage . getColumns ( ) . getAllPhysical ( ) ; <nl> + const std : : string * minimum_size_column = nullptr ; <nl> + UInt64 minimum_size = std : : numeric_limits < UInt64 > : : max ( ) ; <nl> <nl> - LOG_WARNING ( storage . log , " Part directory " < < to < < " already exists " <nl> - < < " and contains " < < files . size ( ) < < " files . Removing it . " ) ; <nl> + for ( const auto & column : storage_columns ) <nl> + { <nl> + if ( ! hasColumnFiles ( column . name , * column . type ) ) <nl> + continue ; <nl> <nl> - to_file . remove ( true ) ; <nl> - } <nl> - else <nl> + const auto size = getColumnSize ( column . name , * column . type ) . data_compressed ; <nl> + if ( size < minimum_size ) <nl> { <nl> - throw Exception ( " Part directory " + to + " already exists " , ErrorCodes : : DIRECTORY_ALREADY_EXISTS ) ; <nl> + minimum_size = size ; <nl> + minimum_size_column = & column . name ; <nl> } <nl> } <nl> <nl> - from_file . setLastModified ( Poco : : Timestamp : : fromEpochTime ( time ( nullptr ) ) ) ; <nl> - from_file . renameTo ( to ) ; <nl> - relative_path = new_relative_path ; <nl> + if ( ! minimum_size_column ) <nl> + throw Exception ( " Could not find a column of minimum size in MergeTree , part " + getFullPath ( ) , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> + return * minimum_size_column ; <nl> } <nl> <nl> <nl> - String MergeTreeDataPart : : getRelativePathForDetachedPart ( const String & prefix ) const <nl> + String IMergeTreeDataPart : : getFullPath ( ) const <nl> { <nl> - / / / Do not allow underscores in the prefix because they are used as separators . <nl> - assert ( prefix . find_first_of ( ' _ ' ) = = String : : npos ) ; <nl> - <nl> - String res ; <nl> + assertOnDisk ( ) ; <nl> <nl> - / * * If you need to detach a part , and directory into which we want to rename it already exists , <nl> - * we will rename to the directory with the name to which the suffix is added in the form of " _tryN " . <nl> - * This is done only in the case of ` to_detached ` , because it is assumed that in this case the exact name does not matter . <nl> - * No more than 10 attempts are made so that there are not too many junk directories left . <nl> - * / <nl> - for ( int try_no = 0 ; try_no < 10 ; try_no + + ) <nl> - { <nl> - res = " detached / " + ( prefix . empty ( ) ? " " : prefix + " _ " ) <nl> - + name + ( try_no ? " _try " + DB : : toString ( try_no ) : " " ) ; <nl> - <nl> - if ( ! Poco : : File ( storage . getFullPathOnDisk ( disk ) + res ) . exists ( ) ) <nl> - return res ; <nl> - <nl> - LOG_WARNING ( storage . log , " Directory " < < res < < " ( to detach to ) already exists . " <nl> - " Will detach to directory with ' _tryN ' suffix . " ) ; <nl> - } <nl> - <nl> - return res ; <nl> - } <nl> - <nl> - void MergeTreeDataPart : : renameToDetached ( const String & prefix ) const <nl> - { <nl> - renameTo ( getRelativePathForDetachedPart ( prefix ) ) ; <nl> - } <nl> - <nl> - <nl> - UInt64 MergeTreeDataPart : : getMarksCount ( ) const <nl> - { <nl> - return index_granularity . getMarksCount ( ) ; <nl> - } <nl> + if ( relative_path . empty ( ) ) <nl> + throw Exception ( " Part relative_path cannot be empty . It ' s bug . " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> <nl> - void MergeTreeDataPart : : makeCloneInDetached ( const String & prefix ) const <nl> - { <nl> - Poco : : Path src ( getFullPath ( ) ) ; <nl> - Poco : : Path dst ( storage . getFullPathOnDisk ( disk ) + getRelativePathForDetachedPart ( prefix ) ) ; <nl> - / / / Backup is not recursive ( max_level is 0 ) , so do not copy inner directories <nl> - localBackup ( src , dst , 0 ) ; <nl> + return storage . getFullPathOnDisk ( disk ) + relative_path + " / " ; <nl> } <nl> <nl> - void MergeTreeDataPart : : makeCloneOnDiskDetached ( const ReservationPtr & reservation ) const <nl> + void IMergeTreeDataPart : : loadColumnsChecksumsIndexes ( bool require_columns_checksums , bool check_consistency ) <nl> { <nl> - auto reserved_disk = reservation - > getDisk ( ) ; <nl> - if ( reserved_disk - > getName ( ) = = disk - > getName ( ) ) <nl> - throw Exception ( " Can not clone data part " + name + " to same disk " + disk - > getName ( ) , ErrorCodes : : LOGICAL_ERROR ) ; <nl> - <nl> - String path_to_clone = storage . getFullPathOnDisk ( reserved_disk ) + " detached / " ; <nl> + assertOnDisk ( ) ; <nl> <nl> - if ( Poco : : File ( path_to_clone + relative_path ) . exists ( ) ) <nl> - throw Exception ( " Path " + path_to_clone + relative_path + " already exists . Can not clone " , ErrorCodes : : DIRECTORY_ALREADY_EXISTS ) ; <nl> - Poco : : File ( path_to_clone ) . createDirectory ( ) ; <nl> - <nl> - Poco : : File cloning_directory ( getFullPath ( ) ) ; <nl> - cloning_directory . copyTo ( path_to_clone ) ; <nl> - } <nl> - <nl> - void MergeTreeDataPart : : loadColumnsChecksumsIndexes ( bool require_columns_checksums , bool check_consistency ) <nl> - { <nl> / / / Memory should not be limited during ATTACH TABLE query . <nl> / / / This is already true at the server startup but must be also ensured for manual table ATTACH . <nl> / / / Motivation : memory for index is shared between queries - not belong to the query itself . <nl> void MergeTreeDataPart : : loadColumnsChecksumsIndexes ( bool require_columns_checksu <nl> loadRowsCount ( ) ; / / / Must be called after loadIndex ( ) as it uses the value of ` index_granularity ` . <nl> loadPartitionAndMinMaxIndex ( ) ; <nl> loadTTLInfos ( ) ; <nl> + <nl> if ( check_consistency ) <nl> checkConsistency ( require_columns_checksums ) ; <nl> } <nl> <nl> - void MergeTreeDataPart : : loadIndexGranularity ( ) <nl> + void IMergeTreeDataPart : : loadIndexGranularity ( ) <nl> { <nl> - String full_path = getFullPath ( ) ; <nl> - index_granularity_info . changeGranularityIfRequired ( full_path ) ; <nl> - <nl> - if ( columns . empty ( ) ) <nl> - throw Exception ( " No columns in part " + name , ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> - <nl> - <nl> - / / / We can use any column , it doesn ' t matter <nl> - std : : string marks_file_path = index_granularity_info . getMarksFilePath ( full_path + getFileNameForColumn ( columns . front ( ) ) ) ; <nl> - if ( ! Poco : : File ( marks_file_path ) . exists ( ) ) <nl> - throw Exception ( " Marks file ' " + marks_file_path + " ' doesn ' t exist " , ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> - <nl> - size_t marks_file_size = Poco : : File ( marks_file_path ) . getSize ( ) ; <nl> - <nl> - / / / old version of marks with static index granularity <nl> - if ( ! index_granularity_info . is_adaptive ) <nl> - { <nl> - size_t marks_count = marks_file_size / index_granularity_info . mark_size_in_bytes ; <nl> - index_granularity . resizeWithFixedGranularity ( marks_count , index_granularity_info . fixed_index_granularity ) ; / / / all the same <nl> - } <nl> - else <nl> - { <nl> - ReadBufferFromFile buffer ( marks_file_path , marks_file_size , - 1 ) ; <nl> - while ( ! buffer . eof ( ) ) <nl> - { <nl> - buffer . seek ( sizeof ( size_t ) * 2 , SEEK_CUR ) ; / / / skip offset_in_compressed file and offset_in_decompressed_block <nl> - size_t granularity ; <nl> - readIntBinary ( granularity , buffer ) ; <nl> - index_granularity . appendMark ( granularity ) ; <nl> - } <nl> - if ( index_granularity . getMarksCount ( ) * index_granularity_info . mark_size_in_bytes ! = marks_file_size ) <nl> - throw Exception ( " Cannot read all marks from file " + marks_file_path , ErrorCodes : : CANNOT_READ_ALL_DATA ) ; <nl> - } <nl> - index_granularity . setInitialized ( ) ; <nl> + throw Exception ( " Method ' loadIndexGranularity ' is not implemented for part with type " + getType ( ) . toString ( ) , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> } <nl> <nl> - void MergeTreeDataPart : : loadIndex ( ) <nl> + void IMergeTreeDataPart : : loadIndex ( ) <nl> { <nl> / / / It can be empty in case of mutations <nl> if ( ! index_granularity . isInitialized ( ) ) <nl> void MergeTreeDataPart : : loadIndex ( ) <nl> } <nl> } <nl> <nl> - void MergeTreeDataPart : : loadPartitionAndMinMaxIndex ( ) <nl> + void IMergeTreeDataPart : : loadPartitionAndMinMaxIndex ( ) <nl> { <nl> if ( storage . format_version < MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING ) <nl> { <nl> void MergeTreeDataPart : : loadPartitionAndMinMaxIndex ( ) <nl> ErrorCodes : : CORRUPTED_DATA ) ; <nl> } <nl> <nl> - void MergeTreeDataPart : : loadChecksums ( bool require ) <nl> + void IMergeTreeDataPart : : loadChecksums ( bool require ) <nl> { <nl> String path = getFullPath ( ) + " checksums . txt " ; <nl> Poco : : File checksums_file ( path ) ; <nl> void MergeTreeDataPart : : loadChecksums ( bool require ) <nl> } <nl> } <nl> <nl> - void MergeTreeDataPart : : loadRowsCount ( ) <nl> + void IMergeTreeDataPart : : loadRowsCount ( ) <nl> { <nl> + String path = getFullPath ( ) + " count . txt " ; <nl> if ( index_granularity . empty ( ) ) <nl> { <nl> rows_count = 0 ; <nl> } <nl> - else if ( storage . format_version > = MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING ) <nl> + else if ( storage . format_version > = MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING | | part_type = = Type : : COMPACT ) <nl> { <nl> - String path = getFullPath ( ) + " count . txt " ; <nl> if ( ! Poco : : File ( path ) . exists ( ) ) <nl> throw Exception ( " No count . txt in part " + name , ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> <nl> void MergeTreeDataPart : : loadRowsCount ( ) <nl> } <nl> } <nl> <nl> - void MergeTreeDataPart : : loadTTLInfos ( ) <nl> + void IMergeTreeDataPart : : loadTTLInfos ( ) <nl> { <nl> String path = getFullPath ( ) + " ttl . txt " ; <nl> if ( Poco : : File ( path ) . exists ( ) ) <nl> void MergeTreeDataPart : : loadTTLInfos ( ) <nl> } <nl> } <nl> <nl> - void MergeTreeDataPart : : accumulateColumnSizes ( ColumnToSize & column_to_size ) const <nl> - { <nl> - std : : shared_lock < std : : shared_mutex > part_lock ( columns_lock ) ; <nl> - <nl> - for ( const NameAndTypePair & name_type : storage . getColumns ( ) . getAllPhysical ( ) ) <nl> - { <nl> - IDataType : : SubstreamPath path ; <nl> - name_type . type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - Poco : : File bin_file ( getFullPath ( ) + IDataType : : getFileNameForStream ( name_type . name , substream_path ) + " . bin " ) ; <nl> - if ( bin_file . exists ( ) ) <nl> - column_to_size [ name_type . name ] + = bin_file . getSize ( ) ; <nl> - } , path ) ; <nl> - } <nl> - } <nl> - <nl> - void MergeTreeDataPart : : loadColumns ( bool require ) <nl> + void IMergeTreeDataPart : : loadColumns ( bool require ) <nl> { <nl> String path = getFullPath ( ) + " columns . txt " ; <nl> Poco : : File poco_file_path { path } ; <nl> if ( ! poco_file_path . exists ( ) ) <nl> { <nl> - if ( require ) <nl> + / / / We can get list of columns only from columns . txt in compact parts . <nl> + if ( require | | part_type = = Type : : COMPACT ) <nl> throw Exception ( " No columns . txt in part " + name , ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> <nl> / / / If there is no file with a list of columns , write it down . <nl> void MergeTreeDataPart : : loadColumns ( bool require ) <nl> columns . writeText ( out ) ; <nl> } <nl> Poco : : File ( path + " . tmp " ) . renameTo ( path ) ; <nl> + } <nl> + else <nl> + { <nl> + is_frozen = ! poco_file_path . canWrite ( ) ; <nl> + ReadBufferFromFile file = openForReading ( path ) ; <nl> + columns . readText ( file ) ; <nl> + } <nl> + <nl> + size_t pos = 0 ; <nl> + for ( const auto & column : columns ) <nl> + column_name_to_position . emplace ( column . name , pos + + ) ; <nl> + } <nl> + <nl> + UInt64 IMergeTreeDataPart : : calculateTotalSizeOnDisk ( const String & from ) <nl> + { <nl> + Poco : : File cur ( from ) ; <nl> + if ( cur . isFile ( ) ) <nl> + return cur . getSize ( ) ; <nl> + std : : vector < std : : string > files ; <nl> + cur . list ( files ) ; <nl> + UInt64 res = 0 ; <nl> + for ( const auto & file : files ) <nl> + res + = calculateTotalSizeOnDisk ( from + file ) ; <nl> + return res ; <nl> + } <nl> + <nl> + <nl> + void IMergeTreeDataPart : : renameTo ( const String & new_relative_path , bool remove_new_dir_if_exists ) const <nl> + { <nl> + assertOnDisk ( ) ; <nl> + <nl> + String from = getFullPath ( ) ; <nl> + String to = storage . getFullPathOnDisk ( disk ) + new_relative_path + " / " ; <nl> + <nl> + Poco : : File from_file ( from ) ; <nl> + if ( ! from_file . exists ( ) ) <nl> + throw Exception ( " Part directory " + from + " doesn ' t exist . Most likely it is logical error . " , ErrorCodes : : FILE_DOESNT_EXIST ) ; <nl> + <nl> + Poco : : File to_file ( to ) ; <nl> + if ( to_file . exists ( ) ) <nl> + { <nl> + if ( remove_new_dir_if_exists ) <nl> + { <nl> + Names files ; <nl> + Poco : : File ( from ) . list ( files ) ; <nl> + <nl> + LOG_WARNING ( storage . log , " Part directory " < < to < < " already exists " <nl> + < < " and contains " < < files . size ( ) < < " files . Removing it . " ) ; <nl> + <nl> + to_file . remove ( true ) ; <nl> + } <nl> + else <nl> + { <nl> + throw Exception ( " Part directory " + to + " already exists " , ErrorCodes : : DIRECTORY_ALREADY_EXISTS ) ; <nl> + } <nl> + } <nl> + <nl> + from_file . setLastModified ( Poco : : Timestamp : : fromEpochTime ( time ( nullptr ) ) ) ; <nl> + from_file . renameTo ( to ) ; <nl> + relative_path = new_relative_path ; <nl> + } <nl> + <nl> + <nl> + void IMergeTreeDataPart : : remove ( ) const <nl> + { <nl> + if ( ! isStoredOnDisk ( ) ) <nl> + return ; <nl> + <nl> + if ( relative_path . empty ( ) ) <nl> + throw Exception ( " Part relative_path cannot be empty . This is bug . " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> + / * * Atomic directory removal : <nl> + * - rename directory to temporary name ; <nl> + * - remove it recursive . <nl> + * <nl> + * For temporary name we use " delete_tmp_ " prefix . <nl> + * <nl> + * NOTE : We cannot use " tmp_delete_ " prefix , because there is a second thread , <nl> + * that calls " clearOldTemporaryDirectories " and removes all directories , that begin with " tmp_ " and are old enough . <nl> + * But when we removing data part , it can be old enough . And rename doesn ' t change mtime . <nl> + * And a race condition can happen that will lead to " File not found " error here . <nl> + * / <nl> + <nl> + String full_path = storage . getFullPathOnDisk ( disk ) ; <nl> + String from = full_path + relative_path ; <nl> + String to = full_path + " delete_tmp_ " + name ; <nl> + / / TODO directory delete_tmp_ < name > is never removed if server crashes before returning from this function <nl> + <nl> + Poco : : File from_dir { from } ; <nl> + Poco : : File to_dir { to } ; <nl> + <nl> + if ( to_dir . exists ( ) ) <nl> + { <nl> + LOG_WARNING ( storage . log , " Directory " < < to < < " ( to which part must be renamed before removing ) already exists . " <nl> + " Most likely this is due to unclean restart . Removing it . " ) ; <nl> + <nl> + try <nl> + { <nl> + to_dir . remove ( true ) ; <nl> + } <nl> + catch ( . . . ) <nl> + { <nl> + LOG_ERROR ( storage . log , " Cannot remove directory " < < to < < " . Check owner and access rights . " ) ; <nl> + throw ; <nl> + } <nl> + } <nl> + <nl> + try <nl> + { <nl> + from_dir . renameTo ( to ) ; <nl> + } <nl> + catch ( const Poco : : FileNotFoundException & ) <nl> + { <nl> + LOG_ERROR ( storage . log , " Directory " < < from < < " ( part to remove ) doesn ' t exist or one of nested files has gone . " <nl> + " Most likely this is due to manual removing . This should be discouraged . Ignoring . " ) ; <nl> <nl> return ; <nl> } <nl> <nl> - is_frozen = ! poco_file_path . canWrite ( ) ; <nl> + try <nl> + { <nl> + / / / Remove each expected file in directory , then remove directory itself . <nl> + <nl> + # if ! __clang__ <nl> + # pragma GCC diagnostic push <nl> + # pragma GCC diagnostic ignored " - Wunused - variable " <nl> + # endif <nl> + std : : shared_lock < std : : shared_mutex > lock ( columns_lock ) ; <nl> <nl> - ReadBufferFromFile file = openForReading ( path ) ; <nl> - columns . readText ( file ) ; <nl> + for ( const auto & [ file , _ ] : checksums . files ) <nl> + { <nl> + String path_to_remove = to + " / " + file ; <nl> + if ( 0 ! = unlink ( path_to_remove . c_str ( ) ) ) <nl> + throwFromErrnoWithPath ( " Cannot unlink file " + path_to_remove , path_to_remove , <nl> + ErrorCodes : : CANNOT_UNLINK ) ; <nl> + } <nl> + # if ! __clang__ <nl> + # pragma GCC diagnostic pop <nl> + # endif <nl> + <nl> + for ( const auto & file : { " checksums . txt " , " columns . txt " } ) <nl> + { <nl> + String path_to_remove = to + " / " + file ; <nl> + if ( 0 ! = unlink ( path_to_remove . c_str ( ) ) ) <nl> + throwFromErrnoWithPath ( " Cannot unlink file " + path_to_remove , path_to_remove , <nl> + ErrorCodes : : CANNOT_UNLINK ) ; <nl> + } <nl> + <nl> + if ( 0 ! = rmdir ( to . c_str ( ) ) ) <nl> + throwFromErrnoWithPath ( " Cannot rmdir file " + to , to , ErrorCodes : : CANNOT_UNLINK ) ; <nl> + } <nl> + catch ( . . . ) <nl> + { <nl> + / / / Recursive directory removal does many excessive " stat " syscalls under the hood . <nl> + <nl> + LOG_ERROR ( storage . log , " Cannot quickly remove directory " < < to < < " by removing files ; fallback to recursive removal . Reason : " <nl> + < < getCurrentExceptionMessage ( false ) ) ; <nl> + <nl> + to_dir . remove ( true ) ; <nl> + } <nl> + } <nl> + <nl> + String IMergeTreeDataPart : : getRelativePathForDetachedPart ( const String & prefix ) const <nl> + { <nl> + / / / Do not allow underscores in the prefix because they are used as separators . <nl> + <nl> + assert ( prefix . find_first_of ( ' _ ' ) = = String : : npos ) ; <nl> + String res ; <nl> + <nl> + / * * If you need to detach a part , and directory into which we want to rename it already exists , <nl> + * we will rename to the directory with the name to which the suffix is added in the form of " _tryN " . <nl> + * This is done only in the case of ` to_detached ` , because it is assumed that in this case the exact name does not matter . <nl> + * No more than 10 attempts are made so that there are not too many junk directories left . <nl> + * / <nl> + for ( int try_no = 0 ; try_no < 10 ; try_no + + ) <nl> + { <nl> + res = " detached / " + ( prefix . empty ( ) ? " " : prefix + " _ " ) <nl> + + name + ( try_no ? " _try " + DB : : toString ( try_no ) : " " ) ; <nl> + <nl> + if ( ! Poco : : File ( storage . getFullPathOnDisk ( disk ) + res ) . exists ( ) ) <nl> + return res ; <nl> + <nl> + LOG_WARNING ( storage . log , " Directory " < < res < < " ( to detach to ) already exists . " <nl> + " Will detach to directory with ' _tryN ' suffix . " ) ; <nl> + } <nl> + <nl> + return res ; <nl> + } <nl> + <nl> + void IMergeTreeDataPart : : renameToDetached ( const String & prefix ) const <nl> + { <nl> + assertOnDisk ( ) ; <nl> + renameTo ( getRelativePathForDetachedPart ( prefix ) ) ; <nl> + } <nl> + <nl> + void IMergeTreeDataPart : : makeCloneInDetached ( const String & prefix ) const <nl> + { <nl> + assertOnDisk ( ) ; <nl> + LOG_INFO ( storage . log , " Detaching " < < relative_path ) ; <nl> + <nl> + Poco : : Path src ( getFullPath ( ) ) ; <nl> + Poco : : Path dst ( storage . getFullPathOnDisk ( disk ) + getRelativePathForDetachedPart ( prefix ) ) ; <nl> + / / / Backup is not recursive ( max_level is 0 ) , so do not copy inner directories <nl> + localBackup ( src , dst , 0 ) ; <nl> } <nl> <nl> - void MergeTreeDataPart : : checkConsistency ( bool require_part_metadata ) <nl> + void IMergeTreeDataPart : : makeCloneOnDiskDetached ( const ReservationPtr & reservation ) const <nl> + { <nl> + assertOnDisk ( ) ; <nl> + auto reserved_disk = reservation - > getDisk ( ) ; <nl> + if ( reserved_disk - > getName ( ) = = disk - > getName ( ) ) <nl> + throw Exception ( " Can not clone data part " + name + " to same disk " + disk - > getName ( ) , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> + String path_to_clone = storage . getFullPathOnDisk ( reserved_disk ) + " detached / " ; <nl> + <nl> + if ( Poco : : File ( path_to_clone + relative_path ) . exists ( ) ) <nl> + throw Exception ( " Path " + path_to_clone + relative_path + " already exists . Can not clone " , ErrorCodes : : DIRECTORY_ALREADY_EXISTS ) ; <nl> + Poco : : File ( path_to_clone ) . createDirectory ( ) ; <nl> + <nl> + Poco : : File cloning_directory ( getFullPath ( ) ) ; <nl> + cloning_directory . copyTo ( path_to_clone ) ; <nl> + } <nl> + <nl> + void IMergeTreeDataPart : : checkConsistencyBase ( ) const <nl> { <nl> String path = getFullPath ( ) ; <nl> <nl> void MergeTreeDataPart : : checkConsistency ( bool require_part_metadata ) <nl> if ( ! storage . primary_key_columns . empty ( ) & & ! checksums . files . count ( " primary . idx " ) ) <nl> throw Exception ( " No checksum for primary . idx " , ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> <nl> - if ( require_part_metadata ) <nl> - { <nl> - for ( const NameAndTypePair & name_type : columns ) <nl> - { <nl> - IDataType : : SubstreamPath stream_path ; <nl> - name_type . type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - String file_name = IDataType : : getFileNameForStream ( name_type . name , substream_path ) ; <nl> - String mrk_file_name = file_name + index_granularity_info . marks_file_extension ; <nl> - String bin_file_name = file_name + " . bin " ; <nl> - if ( ! checksums . files . count ( mrk_file_name ) ) <nl> - throw Exception ( " No " + mrk_file_name + " file checksum for column " + name_type . name + " in part " + path , <nl> - ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> - if ( ! checksums . files . count ( bin_file_name ) ) <nl> - throw Exception ( " No " + bin_file_name + " file checksum for column " + name_type . name + " in part " + path , <nl> - ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> - } , stream_path ) ; <nl> - } <nl> - } <nl> - <nl> if ( storage . format_version > = MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING ) <nl> { <nl> if ( ! checksums . files . count ( " count . txt " ) ) <nl> void MergeTreeDataPart : : checkConsistency ( bool require_part_metadata ) <nl> for ( const String & col_name : storage . minmax_idx_columns ) <nl> check_file_not_empty ( path + " minmax_ " + escapeForFileName ( col_name ) + " . idx " ) ; <nl> } <nl> - <nl> - / / / Check that all marks are nonempty and have the same size . <nl> - <nl> - std : : optional < UInt64 > marks_size ; <nl> - for ( const NameAndTypePair & name_type : columns ) <nl> - { <nl> - name_type . type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - Poco : : File file ( IDataType : : getFileNameForStream ( name_type . name , substream_path ) + index_granularity_info . marks_file_extension ) ; <nl> - <nl> - / / / Missing file is Ok for case when new column was added . <nl> - if ( file . exists ( ) ) <nl> - { <nl> - UInt64 file_size = file . getSize ( ) ; <nl> - <nl> - if ( ! file_size ) <nl> - throw Exception ( " Part " + path + " is broken : " + file . path ( ) + " is empty . " , <nl> - ErrorCodes : : BAD_SIZE_OF_FILE_IN_DATA_PART ) ; <nl> - <nl> - if ( ! marks_size ) <nl> - marks_size = file_size ; <nl> - else if ( file_size ! = * marks_size ) <nl> - throw Exception ( " Part " + path + " is broken : marks have different sizes . " , <nl> - ErrorCodes : : BAD_SIZE_OF_FILE_IN_DATA_PART ) ; <nl> - } <nl> - } ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - bool MergeTreeDataPart : : hasColumnFiles ( const String & column_name , const IDataType & type ) const <nl> - { <nl> - bool res = true ; <nl> - <nl> - type . enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - String file_name = IDataType : : getFileNameForStream ( column_name , substream_path ) ; <nl> - <nl> - auto bin_checksum = checksums . files . find ( file_name + " . bin " ) ; <nl> - auto mrk_checksum = checksums . files . find ( file_name + index_granularity_info . marks_file_extension ) ; <nl> - <nl> - if ( bin_checksum = = checksums . files . end ( ) | | mrk_checksum = = checksums . files . end ( ) ) <nl> - res = false ; <nl> - } , { } ) ; <nl> - <nl> - return res ; <nl> - } <nl> - <nl> - <nl> - UInt64 MergeTreeDataPart : : getIndexSizeInBytes ( ) const <nl> - { <nl> - UInt64 res = 0 ; <nl> - for ( const ColumnPtr & column : index ) <nl> - res + = column - > byteSize ( ) ; <nl> - return res ; <nl> - } <nl> - <nl> - UInt64 MergeTreeDataPart : : getIndexSizeInAllocatedBytes ( ) const <nl> - { <nl> - UInt64 res = 0 ; <nl> - for ( const ColumnPtr & column : index ) <nl> - res + = column - > allocatedBytes ( ) ; <nl> - return res ; <nl> - } <nl> - <nl> - String MergeTreeDataPart : : stateToString ( MergeTreeDataPart : : State state ) <nl> - { <nl> - switch ( state ) <nl> - { <nl> - case State : : Temporary : <nl> - return " Temporary " ; <nl> - case State : : PreCommitted : <nl> - return " PreCommitted " ; <nl> - case State : : Committed : <nl> - return " Committed " ; <nl> - case State : : Outdated : <nl> - return " Outdated " ; <nl> - case State : : Deleting : <nl> - return " Deleting " ; <nl> - case State : : DeleteOnDestroy : <nl> - return " DeleteOnDestroy " ; <nl> } <nl> - <nl> - __builtin_unreachable ( ) ; <nl> } <nl> <nl> - String MergeTreeDataPart : : stateString ( ) const <nl> + bool isCompactPart ( const MergeTreeDataPartPtr & data_part ) <nl> { <nl> - return stateToString ( state ) ; <nl> + return ( data_part & & data_part - > getType ( ) = = MergeTreeDataPartType : : COMPACT ) ; <nl> } <nl> <nl> - void MergeTreeDataPart : : assertState ( const std : : initializer_list < MergeTreeDataPart : : State > & affordable_states ) const <nl> + bool isWidePart ( const MergeTreeDataPartPtr & data_part ) <nl> { <nl> - if ( ! checkState ( affordable_states ) ) <nl> - { <nl> - String states_str ; <nl> - for ( auto affordable_state : affordable_states ) <nl> - states_str + = stateToString ( affordable_state ) + " " ; <nl> - <nl> - throw Exception ( " Unexpected state of part " + getNameWithState ( ) + " . Expected : " + states_str , ErrorCodes : : NOT_FOUND_EXPECTED_DATA_PART ) ; <nl> - } <nl> + return ( data_part & & data_part - > getType ( ) = = MergeTreeDataPartType : : WIDE ) ; <nl> } <nl> <nl> } <nl> similarity index 69 % <nl> rename from dbms / src / Storages / MergeTree / MergeTreeDataPart . h <nl> rename to dbms / src / Storages / MergeTree / IMergeTreeDataPart . h <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeDataPart . h <nl> ppp b / dbms / src / Storages / MergeTree / IMergeTreeDataPart . h <nl> <nl> # pragma once <nl> <nl> + # include < DataStreams / IBlockInputStream . h > <nl> + <nl> + # include < Core / Row . h > <nl> # include < Core / Block . h > <nl> # include < Core / Types . h > <nl> # include < Core / NamesAndTypes . h > <nl> + # include < Storages / IStorage . h > <nl> # include < Storages / MergeTree / MergeTreeIndexGranularity . h > <nl> # include < Storages / MergeTree / MergeTreeIndexGranularityInfo . h > <nl> # include < Storages / MergeTree / MergeTreeIndices . h > <nl> <nl> # include < Storages / MergeTree / MergeTreePartition . h > <nl> # include < Storages / MergeTree / MergeTreeDataPartChecksum . h > <nl> # include < Storages / MergeTree / MergeTreeDataPartTTLInfo . h > <nl> + # include < Storages / MergeTree / MergeTreeIOSettings . h > <nl> + # include < Storages / MergeTree / AlterAnalysisResult . h > <nl> # include < Storages / MergeTree / KeyCondition . h > <nl> # include < Columns / IColumn . h > <nl> <nl> <nl> <nl> # include < shared_mutex > <nl> <nl> - <nl> namespace DB <nl> { <nl> <nl> struct FutureMergedMutatedPart ; <nl> class IReservation ; <nl> using ReservationPtr = std : : unique_ptr < IReservation > ; <nl> <nl> + class IMergeTreeReader ; <nl> + class IMergeTreeDataPartWriter ; <nl> + <nl> + namespace ErrorCodes <nl> + { <nl> + extern const int NOT_IMPLEMETED ; <nl> + } <nl> <nl> / / / Description of the data part . <nl> - struct MergeTreeDataPart <nl> + class IMergeTreeDataPart : public std : : enable_shared_from_this < IMergeTreeDataPart > <nl> { <nl> + public : <nl> + <nl> using Checksums = MergeTreeDataPartChecksums ; <nl> using Checksum = MergeTreeDataPartChecksums : : Checksum ; <nl> + using ValueSizeMap = std : : map < std : : string , double > ; <nl> <nl> - MergeTreeDataPart ( const MergeTreeData & storage_ , const DiskPtr & disk_ , const String & name_ , const MergeTreePartInfo & info_ ) ; <nl> + using MergeTreeReaderPtr = std : : unique_ptr < IMergeTreeReader > ; <nl> + using MergeTreeWriterPtr = std : : unique_ptr < IMergeTreeDataPartWriter > ; <nl> <nl> - MergeTreeDataPart ( MergeTreeData & storage_ , const DiskPtr & disk_ , const String & name_ ) ; <nl> + using ColumnSizeByName = std : : unordered_map < std : : string , ColumnSize > ; <nl> + using NameToPosition = std : : unordered_map < std : : string , size_t > ; <nl> <nl> - / / / Returns the name of a column with minimum compressed size ( as returned by getColumnSize ( ) ) . <nl> - / / / If no checksums are present returns the name of the first physically existing column . <nl> - String getColumnNameWithMinumumCompressedSize ( ) const ; <nl> + using Type = MergeTreeDataPartType ; <nl> + <nl> + <nl> + IMergeTreeDataPart ( <nl> + const MergeTreeData & storage_ , <nl> + const String & name_ , <nl> + const MergeTreePartInfo & info_ , <nl> + const DiskPtr & disk , <nl> + const std : : optional < String > & relative_path , <nl> + Type part_type_ ) ; <nl> + <nl> + IMergeTreeDataPart ( <nl> + MergeTreeData & storage_ , <nl> + const String & name_ , <nl> + const DiskPtr & disk , <nl> + const std : : optional < String > & relative_path , <nl> + Type part_type_ ) ; <nl> + <nl> + virtual MergeTreeReaderPtr getReader ( <nl> + const NamesAndTypesList & columns_ , <nl> + const MarkRanges & mark_ranges , <nl> + UncompressedCache * uncompressed_cache , <nl> + MarkCache * mark_cache , <nl> + const MergeTreeReaderSettings & reader_settings_ , <nl> + const ValueSizeMap & avg_value_size_hints_ = ValueSizeMap { } , <nl> + const ReadBufferFromFileBase : : ProfileCallback & profile_callback_ = ReadBufferFromFileBase : : ProfileCallback { } ) const = 0 ; <nl> + <nl> + virtual MergeTreeWriterPtr getWriter ( <nl> + const NamesAndTypesList & columns_list , <nl> + const std : : vector < MergeTreeIndexPtr > & indices_to_recalc , <nl> + const CompressionCodecPtr & default_codec_ , <nl> + const MergeTreeWriterSettings & writer_settings , <nl> + const MergeTreeIndexGranularity & computed_index_granularity = { } ) const = 0 ; <nl> + <nl> + virtual bool isStoredOnDisk ( ) const = 0 ; <nl> + <nl> + virtual bool supportsVerticalMerge ( ) const { return false ; } <nl> <nl> / / / NOTE : Returns zeros if column files are not found in checksums . <nl> / / / NOTE : You must ensure that no ALTERs are in progress when calculating ColumnSizes . <nl> / / / ( either by locking columns_lock , or by locking table structure ) . <nl> - ColumnSize getColumnSize ( const String & name , const IDataType & type ) const ; <nl> + virtual ColumnSize getColumnSize ( const String & / * name * / , const IDataType & / * type * / ) const { return { } ; } <nl> <nl> - ColumnSize getTotalColumnsSize ( ) const ; <nl> + virtual ColumnSize getTotalColumnsSize ( ) const { return { } ; } <nl> <nl> - size_t getFileSizeOrZero ( const String & file_name ) const ; <nl> + virtual String getFileNameForColumn ( const NameAndTypePair & column ) const = 0 ; <nl> <nl> - / / / Returns full path to part dir <nl> - String getFullPath ( ) const ; <nl> + / / / Returns rename map of column files for the alter converting expression onto new table files . <nl> + / / / Files to be deleted are mapped to an empty string in rename map . <nl> + virtual NameToNameMap createRenameMapForAlter ( <nl> + AlterAnalysisResult & / * analysis_result * / , <nl> + const NamesAndTypesList & / * old_columns * / ) const { return { } ; } <nl> + <nl> + virtual ~ IMergeTreeDataPart ( ) ; <nl> + <nl> + using ColumnToSize = std : : map < std : : string , UInt64 > ; <nl> + virtual void accumulateColumnSizes ( ColumnToSize & / * column_to_size * / ) const { } <nl> + <nl> + Type getType ( ) const { return part_type ; } <nl> + <nl> + String getTypeName ( ) const { return getType ( ) . toString ( ) ; } <nl> <nl> - / / / Returns part - > name with prefixes like ' tmp_ < name > ' <nl> - String getNameWithPrefix ( ) const ; <nl> + void setColumns ( const NamesAndTypesList & new_columns ) ; <nl> + <nl> + const NamesAndTypesList & getColumns ( ) const { return columns ; } <nl> + <nl> + void assertOnDisk ( ) const ; <nl> + <nl> + void remove ( ) const ; <nl> + <nl> + / / / Initialize columns ( from columns . txt if exists , or create from column files if not ) . <nl> + / / / Load checksums from checksums . txt if exists . Load index if required . <nl> + void loadColumnsChecksumsIndexes ( bool require_columns_checksums , bool check_consistency ) ; <nl> + <nl> + String getMarksFileExtension ( ) const { return index_granularity_info . marks_file_extension ; } <nl> <nl> / / / Generate the new name for this part according to ` new_part_info ` and min / max dates from the old name . <nl> / / / This is useful when you want to change e . g . block numbers or the mutation version of the part . <nl> String getNewName ( const MergeTreePartInfo & new_part_info ) const ; <nl> <nl> - bool contains ( const MergeTreeDataPart & other ) const { return info . contains ( other . info ) ; } <nl> + / / / Returns column position in part structure or std : : nullopt if it ' s missing in part . <nl> + std : : optional < size_t > getColumnPosition ( const String & column_name ) const ; <nl> + <nl> + / / / Returns the name of a column with minimum compressed size ( as returned by getColumnSize ( ) ) . <nl> + / / / If no checksums are present returns the name of the first physically existing column . <nl> + String getColumnNameWithMinumumCompressedSize ( ) const ; <nl> + <nl> + bool contains ( const IMergeTreeDataPart & other ) const { return info . contains ( other . info ) ; } <nl> <nl> / / / If the partition key includes date column ( a common case ) , these functions will return min and max values for this column . <nl> DayNum getMinDate ( ) const ; <nl> struct MergeTreeDataPart <nl> <nl> const MergeTreeData & storage ; <nl> <nl> - DiskPtr disk ; <nl> String name ; <nl> MergeTreePartInfo info ; <nl> <nl> - / / / A directory path ( relative to storage ' s path ) where part data is actually stored <nl> - / / / Examples : ' detached / tmp_fetch_ < name > ' , ' tmp_ < name > ' , ' < name > ' <nl> + DiskPtr disk ; <nl> + <nl> mutable String relative_path ; <nl> + MergeTreeIndexGranularityInfo index_granularity_info ; <nl> <nl> size_t rows_count = 0 ; <nl> + <nl> std : : atomic < UInt64 > bytes_on_disk { 0 } ; / / / 0 - if not counted ; <nl> / / / Is used from several threads without locks ( it is changed with ALTER ) . <nl> / / / May not contain size of checksums . txt and columns . txt <nl> + <nl> time_t modification_time = 0 ; <nl> / / / When the part is removed from the working set . Changes once . <nl> mutable std : : atomic < time_t > remove_time { std : : numeric_limits < time_t > : : max ( ) } ; <nl> struct MergeTreeDataPart <nl> / / / Throws an exception if state of the part is not in affordable_states <nl> void assertState ( const std : : initializer_list < State > & affordable_states ) const ; <nl> <nl> - / / / In comparison with lambdas , it is move assignable and could has several overloaded operator ( ) <nl> - struct StatesFilter <nl> - { <nl> - std : : initializer_list < State > affordable_states ; <nl> - StatesFilter ( const std : : initializer_list < State > & affordable_states_ ) : affordable_states ( affordable_states_ ) { } <nl> - <nl> - bool operator ( ) ( const std : : shared_ptr < const MergeTreeDataPart > & part ) const <nl> - { <nl> - return part - > checkState ( affordable_states ) ; <nl> - } <nl> - } ; <nl> - <nl> - / / / Returns a lambda that returns true only for part with states from specified list <nl> - static inline StatesFilter getStatesFilter ( const std : : initializer_list < State > & affordable_states ) <nl> - { <nl> - return StatesFilter ( affordable_states ) ; <nl> - } <nl> - <nl> / / / Primary key ( correspond to primary . idx file ) . <nl> / / / Always loaded in RAM . Contains each index_granularity - th value of primary key tuple . <nl> / / / Note that marks ( also correspond to primary key ) is not always in RAM , but cached . See MarkCache . h . <nl> struct MergeTreeDataPart <nl> <nl> Checksums checksums ; <nl> <nl> - / / / Columns description . <nl> - NamesAndTypesList columns ; <nl> - <nl> / / / Columns with values , that all have been zeroed by expired ttl <nl> - NameSet empty_columns ; <nl> - <nl> - using ColumnToSize = std : : map < std : : string , UInt64 > ; <nl> + NameSet expired_columns ; <nl> <nl> / * * It is blocked for writing when changing columns , checksums or any part files . <nl> * Locked to read when reading columns , checksums or any part files . <nl> * / <nl> mutable std : : shared_mutex columns_lock ; <nl> <nl> - MergeTreeIndexGranularityInfo index_granularity_info ; <nl> - <nl> - ~ MergeTreeDataPart ( ) ; <nl> - <nl> - / / / Calculate the total size of the entire directory with all the files <nl> - static UInt64 calculateTotalSizeOnDisk ( const String & from ) ; <nl> - <nl> - void remove ( ) const ; <nl> - <nl> - / / / Makes checks and move part to new directory <nl> - / / / Changes only relative_dir_name , you need to update other metadata ( name , is_temp ) explicitly <nl> - void renameTo ( const String & new_relative_path , bool remove_new_dir_if_exists = true ) const ; <nl> - <nl> - / / / Generate unique path to detach part <nl> - String getRelativePathForDetachedPart ( const String & prefix ) const ; <nl> + / / / For data in RAM ( ' index ' ) <nl> + UInt64 getIndexSizeInBytes ( ) const ; <nl> + UInt64 getIndexSizeInAllocatedBytes ( ) const ; <nl> + UInt64 getMarksCount ( ) const ; <nl> <nl> - / / / Moves a part to detached / directory and adds prefix to its name <nl> + size_t getFileSizeOrZero ( const String & file_name ) const ; <nl> + String getFullPath ( ) const ; <nl> + void renameTo ( const String & new_relative_path , bool remove_new_dir_if_exists = false ) const ; <nl> void renameToDetached ( const String & prefix ) const ; <nl> - <nl> - / / / Makes clone of a part in detached / directory via hard links <nl> void makeCloneInDetached ( const String & prefix ) const ; <nl> <nl> / / / Makes full clone of part in detached / on another disk <nl> void makeCloneOnDiskDetached ( const ReservationPtr & reservation ) const ; <nl> <nl> - / / / Populates columns_to_size map ( compressed size ) . <nl> - void accumulateColumnSizes ( ColumnToSize & column_to_size ) const ; <nl> + / / / Checks that . bin and . mrk files exist <nl> + virtual bool hasColumnFiles ( const String & / * column * / , const IDataType & / * type * / ) const { return false ; } <nl> <nl> - / / / Initialize columns ( from columns . txt if exists , or create from column files if not ) . <nl> - / / / Load checksums from checksums . txt if exists . Load index if required . <nl> - void loadColumnsChecksumsIndexes ( bool require_columns_checksums , bool check_consistency ) ; <nl> + static UInt64 calculateTotalSizeOnDisk ( const String & from ) ; <nl> <nl> - / / / Checks that . bin and . mrk files exist <nl> - bool hasColumnFiles ( const String & column , const IDataType & type ) const ; <nl> + protected : <nl> + / / / Columns description . <nl> + NamesAndTypesList columns ; <nl> + const Type part_type ; <nl> <nl> - / / / For data in RAM ( ' index ' ) <nl> - UInt64 getIndexSizeInBytes ( ) const ; <nl> - UInt64 getIndexSizeInAllocatedBytes ( ) const ; <nl> - UInt64 getMarksCount ( ) const ; <nl> + void removeIfNeeded ( ) ; <nl> + <nl> + virtual void checkConsistency ( bool require_part_metadata ) const = 0 ; <nl> + void checkConsistencyBase ( ) const ; <nl> <nl> private : <nl> + / / / In compact parts order of columns is necessary <nl> + NameToPosition column_name_to_position ; <nl> + <nl> / / / Reads columns names and types from columns . txt <nl> void loadColumns ( bool require ) ; <nl> <nl> struct MergeTreeDataPart <nl> void loadChecksums ( bool require ) ; <nl> <nl> / / / Loads marks index granularity into memory <nl> - void loadIndexGranularity ( ) ; <nl> + virtual void loadIndexGranularity ( ) ; <nl> <nl> / / / Loads index file . <nl> void loadIndex ( ) ; <nl> struct MergeTreeDataPart <nl> <nl> void loadPartitionAndMinMaxIndex ( ) ; <nl> <nl> - void checkConsistency ( bool require_part_metadata ) ; <nl> - <nl> - ColumnSize getColumnSizeImpl ( const String & name , const IDataType & type , std : : unordered_set < String > * processed_substreams ) const ; <nl> + String getRelativePathForDetachedPart ( const String & prefix ) const ; <nl> } ; <nl> <nl> + using MergeTreeDataPartState = IMergeTreeDataPart : : State ; <nl> + using MergeTreeDataPartPtr = std : : shared_ptr < const IMergeTreeDataPart > ; <nl> <nl> - using MergeTreeDataPartState = MergeTreeDataPart : : State ; <nl> + bool isCompactPart ( const MergeTreeDataPartPtr & data_part ) ; <nl> + bool isWidePart ( const MergeTreeDataPartPtr & data_part ) ; <nl> <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . 627b857ca0d <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / IMergeTreeDataPartWriter . cpp <nl> <nl> + # include < Storages / MergeTree / IMergeTreeDataPartWriter . h > <nl> + # include < IO / createWriteBufferFromFileBase . h > <nl> + # include < Poco / File . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + namespace <nl> + { <nl> + constexpr auto INDEX_FILE_EXTENSION = " . idx " ; <nl> + } <nl> + <nl> + void IMergeTreeDataPartWriter : : Stream : : finalize ( ) <nl> + { <nl> + compressed . next ( ) ; <nl> + plain_file - > next ( ) ; <nl> + marks . next ( ) ; <nl> + } <nl> + <nl> + void IMergeTreeDataPartWriter : : Stream : : sync ( ) <nl> + { <nl> + plain_file - > sync ( ) ; <nl> + marks_file . sync ( ) ; <nl> + } <nl> + <nl> + IMergeTreeDataPartWriter : : Stream : : Stream ( <nl> + const String & escaped_column_name_ , <nl> + const String & data_path_ , <nl> + const std : : string & data_file_extension_ , <nl> + const std : : string & marks_path_ , <nl> + const std : : string & marks_file_extension_ , <nl> + const CompressionCodecPtr & compression_codec_ , <nl> + size_t max_compress_block_size_ , <nl> + size_t estimated_size_ , <nl> + size_t aio_threshold_ ) : <nl> + escaped_column_name ( escaped_column_name_ ) , <nl> + data_file_extension { data_file_extension_ } , <nl> + marks_file_extension { marks_file_extension_ } , <nl> + plain_file ( createWriteBufferFromFileBase ( data_path_ + data_file_extension , estimated_size_ , aio_threshold_ , max_compress_block_size_ ) ) , <nl> + plain_hashing ( * plain_file ) , compressed_buf ( plain_hashing , compression_codec_ ) , compressed ( compressed_buf ) , <nl> + marks_file ( marks_path_ + marks_file_extension , 4096 , O_TRUNC | O_CREAT | O_WRONLY ) , marks ( marks_file ) <nl> + { <nl> + } <nl> + <nl> + void IMergeTreeDataPartWriter : : Stream : : addToChecksums ( MergeTreeData : : DataPart : : Checksums & checksums ) <nl> + { <nl> + String name = escaped_column_name ; <nl> + <nl> + checksums . files [ name + data_file_extension ] . is_compressed = true ; <nl> + checksums . files [ name + data_file_extension ] . uncompressed_size = compressed . count ( ) ; <nl> + checksums . files [ name + data_file_extension ] . uncompressed_hash = compressed . getHash ( ) ; <nl> + checksums . files [ name + data_file_extension ] . file_size = plain_hashing . count ( ) ; <nl> + checksums . files [ name + data_file_extension ] . file_hash = plain_hashing . getHash ( ) ; <nl> + <nl> + checksums . files [ name + marks_file_extension ] . file_size = marks . count ( ) ; <nl> + checksums . files [ name + marks_file_extension ] . file_hash = marks . getHash ( ) ; <nl> + } <nl> + <nl> + <nl> + IMergeTreeDataPartWriter : : IMergeTreeDataPartWriter ( <nl> + const String & part_path_ , <nl> + const MergeTreeData & storage_ , <nl> + const NamesAndTypesList & columns_list_ , <nl> + const std : : vector < MergeTreeIndexPtr > & indices_to_recalc_ , <nl> + const String & marks_file_extension_ , <nl> + const CompressionCodecPtr & default_codec_ , <nl> + const MergeTreeWriterSettings & settings_ , <nl> + const MergeTreeIndexGranularity & index_granularity_ , <nl> + bool need_finish_last_granule_ ) <nl> + : part_path ( part_path_ ) <nl> + , storage ( storage_ ) <nl> + , columns_list ( columns_list_ ) <nl> + , marks_file_extension ( marks_file_extension_ ) <nl> + , index_granularity ( index_granularity_ ) <nl> + , default_codec ( default_codec_ ) <nl> + , skip_indices ( indices_to_recalc_ ) <nl> + , settings ( settings_ ) <nl> + , compute_granularity ( index_granularity . empty ( ) ) <nl> + , with_final_mark ( storage . getSettings ( ) - > write_final_mark & & settings . can_use_adaptive_granularity ) <nl> + , need_finish_last_granule ( need_finish_last_granule_ ) <nl> + { <nl> + if ( settings . blocks_are_granules_size & & ! index_granularity . empty ( ) ) <nl> + throw Exception ( " Can ' t take information about index granularity from blocks , when non empty index_granularity array specified " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> + Poco : : File part_dir ( part_path ) ; <nl> + if ( ! part_dir . exists ( ) ) <nl> + part_dir . createDirectories ( ) ; <nl> + <nl> + } <nl> + <nl> + IMergeTreeDataPartWriter : : ~ IMergeTreeDataPartWriter ( ) = default ; <nl> + <nl> + static void fillIndexGranularityImpl ( <nl> + const Block & block , <nl> + size_t index_granularity_bytes , <nl> + size_t fixed_index_granularity_rows , <nl> + bool blocks_are_granules , <nl> + size_t index_offset , <nl> + MergeTreeIndexGranularity & index_granularity , <nl> + bool can_use_adaptive_index_granularity , <nl> + bool need_finish_last_granule = false ) <nl> + { <nl> + size_t rows_in_block = block . rows ( ) ; <nl> + size_t index_granularity_for_block ; <nl> + if ( ! can_use_adaptive_index_granularity ) <nl> + index_granularity_for_block = fixed_index_granularity_rows ; <nl> + else <nl> + { <nl> + size_t block_size_in_memory = block . bytes ( ) ; <nl> + if ( blocks_are_granules ) <nl> + index_granularity_for_block = rows_in_block ; <nl> + else if ( block_size_in_memory > = index_granularity_bytes ) <nl> + { <nl> + size_t granules_in_block = block_size_in_memory / index_granularity_bytes ; <nl> + index_granularity_for_block = rows_in_block / granules_in_block ; <nl> + } <nl> + else <nl> + { <nl> + size_t size_of_row_in_bytes = block_size_in_memory / rows_in_block ; <nl> + index_granularity_for_block = index_granularity_bytes / size_of_row_in_bytes ; <nl> + } <nl> + } <nl> + if ( index_granularity_for_block = = 0 ) / / / very rare case when index granularity bytes less then single row <nl> + index_granularity_for_block = 1 ; <nl> + <nl> + / / / We should be less or equal than fixed index granularity <nl> + index_granularity_for_block = std : : min ( fixed_index_granularity_rows , index_granularity_for_block ) ; <nl> + <nl> + size_t current_row ; <nl> + for ( current_row = index_offset ; current_row < rows_in_block ; current_row + = index_granularity_for_block ) <nl> + { <nl> + size_t rows_left_in_block = rows_in_block - current_row ; <nl> + <nl> + / / / Try to extend last granule if it ' s needed and block is large enough <nl> + / / / or it shouldn ' t be first in granule ( index_offset ! = 0 ) . <nl> + if ( need_finish_last_granule & & rows_left_in_block < index_granularity_for_block <nl> + & & ( rows_in_block > = index_granularity_for_block | | index_offset ! = 0 ) ) <nl> + { <nl> + / / If enough rows are left , create a new granule . Otherwise , extend previous granule . <nl> + / / So , real size of granule differs from index_granularity_for_block not more than 50 % . <nl> + if ( rows_left_in_block * 2 > = index_granularity_for_block ) <nl> + index_granularity . appendMark ( rows_left_in_block ) ; <nl> + else <nl> + index_granularity . addRowsToLastMark ( rows_left_in_block ) ; <nl> + } <nl> + else <nl> + { <nl> + index_granularity . appendMark ( index_granularity_for_block ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void IMergeTreeDataPartWriter : : fillIndexGranularity ( const Block & block ) <nl> + { <nl> + const auto storage_settings = storage . getSettings ( ) ; <nl> + fillIndexGranularityImpl ( <nl> + block , <nl> + storage_settings - > index_granularity_bytes , <nl> + storage_settings - > index_granularity , <nl> + settings . blocks_are_granules_size , <nl> + index_offset , <nl> + index_granularity , <nl> + settings . can_use_adaptive_granularity , <nl> + need_finish_last_granule ) ; <nl> + } <nl> + <nl> + void IMergeTreeDataPartWriter : : initPrimaryIndex ( ) <nl> + { <nl> + if ( storage . hasPrimaryKey ( ) ) <nl> + { <nl> + index_file_stream = std : : make_unique < WriteBufferFromFile > ( <nl> + part_path + " primary . idx " , DBMS_DEFAULT_BUFFER_SIZE , O_TRUNC | O_CREAT | O_WRONLY ) ; <nl> + index_stream = std : : make_unique < HashingWriteBuffer > ( * index_file_stream ) ; <nl> + } <nl> + <nl> + primary_index_initialized = true ; <nl> + } <nl> + <nl> + void IMergeTreeDataPartWriter : : initSkipIndices ( ) <nl> + { <nl> + for ( const auto & index : skip_indices ) <nl> + { <nl> + String stream_name = index - > getFileName ( ) ; <nl> + skip_indices_streams . emplace_back ( <nl> + std : : make_unique < IMergeTreeDataPartWriter : : Stream > ( <nl> + stream_name , <nl> + part_path + stream_name , INDEX_FILE_EXTENSION , <nl> + part_path + stream_name , marks_file_extension , <nl> + default_codec , settings . max_compress_block_size , <nl> + 0 , settings . aio_threshold ) ) ; <nl> + skip_indices_aggregators . push_back ( index - > createIndexAggregator ( ) ) ; <nl> + skip_index_filling . push_back ( 0 ) ; <nl> + } <nl> + <nl> + skip_indices_initialized = true ; <nl> + } <nl> + <nl> + void IMergeTreeDataPartWriter : : calculateAndSerializePrimaryIndex ( const Block & primary_keys_block , size_t rows ) <nl> + { <nl> + if ( ! primary_index_initialized ) <nl> + throw Exception ( " Primary index is not initialized " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> + size_t primary_columns_num = primary_keys_block . columns ( ) ; <nl> + if ( index_columns . empty ( ) ) <nl> + { <nl> + index_types = primary_keys_block . getDataTypes ( ) ; <nl> + index_columns . resize ( primary_columns_num ) ; <nl> + last_index_row . resize ( primary_columns_num ) ; <nl> + for ( size_t i = 0 ; i < primary_columns_num ; + + i ) <nl> + index_columns [ i ] = primary_keys_block . getByPosition ( i ) . column - > cloneEmpty ( ) ; <nl> + } <nl> + <nl> + / * * While filling index ( index_columns ) , disable memory tracker . <nl> + * Because memory is allocated here ( maybe in context of INSERT query ) , <nl> + * but then freed in completely different place ( while merging parts ) , where query memory_tracker is not available . <nl> + * And otherwise it will look like excessively growing memory consumption in context of query . <nl> + * ( observed in long INSERT SELECTs ) <nl> + * / <nl> + auto temporarily_disable_memory_tracker = getCurrentMemoryTrackerActionLock ( ) ; <nl> + <nl> + / / / Write index . The index contains Primary Key value for each ` index_granularity ` row . <nl> + <nl> + for ( size_t i = index_offset ; i < rows ; ) <nl> + { <nl> + if ( storage . hasPrimaryKey ( ) ) <nl> + { <nl> + for ( size_t j = 0 ; j < primary_columns_num ; + + j ) <nl> + { <nl> + const auto & primary_column = primary_keys_block . getByPosition ( j ) ; <nl> + index_columns [ j ] - > insertFrom ( * primary_column . column , i ) ; <nl> + primary_column . type - > serializeBinary ( * primary_column . column , i , * index_stream ) ; <nl> + } <nl> + } <nl> + <nl> + i + = index_granularity . getMarkRows ( current_mark + + ) ; <nl> + if ( current_mark > = index_granularity . getMarksCount ( ) ) <nl> + break ; <nl> + } <nl> + <nl> + / / / store last index row to write final mark at the end of column <nl> + for ( size_t j = 0 ; j < primary_columns_num ; + + j ) <nl> + { <nl> + const IColumn & primary_column = * primary_keys_block . getByPosition ( j ) . column . get ( ) ; <nl> + primary_column . get ( rows - 1 , last_index_row [ j ] ) ; <nl> + } <nl> + } <nl> + <nl> + void IMergeTreeDataPartWriter : : calculateAndSerializeSkipIndices ( <nl> + const Block & skip_indexes_block , size_t rows ) <nl> + { <nl> + if ( ! skip_indices_initialized ) <nl> + throw Exception ( " Skip indices are not initialized " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> + size_t skip_index_current_data_mark = 0 ; <nl> + <nl> + / / / Filling and writing skip indices like in MergeTreeDataPartWriterWide : : writeColumn <nl> + for ( size_t i = 0 ; i < skip_indices . size ( ) ; + + i ) <nl> + { <nl> + const auto index = skip_indices [ i ] ; <nl> + auto & stream = * skip_indices_streams [ i ] ; <nl> + size_t prev_pos = 0 ; <nl> + skip_index_current_data_mark = skip_index_data_mark ; <nl> + while ( prev_pos < rows ) <nl> + { <nl> + UInt64 limit = 0 ; <nl> + if ( prev_pos = = 0 & & index_offset ! = 0 ) <nl> + { <nl> + limit = index_offset ; <nl> + } <nl> + else <nl> + { <nl> + limit = index_granularity . getMarkRows ( skip_index_current_data_mark ) ; <nl> + if ( skip_indices_aggregators [ i ] - > empty ( ) ) <nl> + { <nl> + skip_indices_aggregators [ i ] = index - > createIndexAggregator ( ) ; <nl> + skip_index_filling [ i ] = 0 ; <nl> + <nl> + if ( stream . compressed . offset ( ) > = settings . min_compress_block_size ) <nl> + stream . compressed . next ( ) ; <nl> + <nl> + writeIntBinary ( stream . plain_hashing . count ( ) , stream . marks ) ; <nl> + writeIntBinary ( stream . compressed . offset ( ) , stream . marks ) ; <nl> + / / / Actually this numbers is redundant , but we have to store them <nl> + / / / to be compatible with normal . mrk2 file format <nl> + if ( settings . can_use_adaptive_granularity ) <nl> + writeIntBinary ( 1UL , stream . marks ) ; <nl> + } <nl> + / / / this mark is aggregated , go to the next one <nl> + skip_index_current_data_mark + + ; <nl> + } <nl> + <nl> + size_t pos = prev_pos ; <nl> + skip_indices_aggregators [ i ] - > update ( skip_indexes_block , & pos , limit ) ; <nl> + <nl> + if ( pos = = prev_pos + limit ) <nl> + { <nl> + + + skip_index_filling [ i ] ; <nl> + <nl> + / / / write index if it is filled <nl> + if ( skip_index_filling [ i ] = = index - > granularity ) <nl> + { <nl> + skip_indices_aggregators [ i ] - > getGranuleAndReset ( ) - > serializeBinary ( stream . compressed ) ; <nl> + skip_index_filling [ i ] = 0 ; <nl> + } <nl> + } <nl> + prev_pos = pos ; <nl> + } <nl> + } <nl> + skip_index_data_mark = skip_index_current_data_mark ; <nl> + } <nl> + <nl> + void IMergeTreeDataPartWriter : : finishPrimaryIndexSerialization ( MergeTreeData : : DataPart : : Checksums & checksums ) <nl> + { <nl> + bool write_final_mark = ( with_final_mark & & data_written ) ; <nl> + if ( write_final_mark & & compute_granularity ) <nl> + index_granularity . appendMark ( 0 ) ; <nl> + <nl> + if ( index_stream ) <nl> + { <nl> + if ( write_final_mark ) <nl> + { <nl> + for ( size_t j = 0 ; j < index_columns . size ( ) ; + + j ) <nl> + { <nl> + index_columns [ j ] - > insert ( last_index_row [ j ] ) ; <nl> + index_types [ j ] - > serializeBinary ( last_index_row [ j ] , * index_stream ) ; <nl> + } <nl> + <nl> + last_index_row . clear ( ) ; <nl> + } <nl> + <nl> + index_stream - > next ( ) ; <nl> + checksums . files [ " primary . idx " ] . file_size = index_stream - > count ( ) ; <nl> + checksums . files [ " primary . idx " ] . file_hash = index_stream - > getHash ( ) ; <nl> + index_stream = nullptr ; <nl> + } <nl> + } <nl> + <nl> + void IMergeTreeDataPartWriter : : finishSkipIndicesSerialization ( <nl> + MergeTreeData : : DataPart : : Checksums & checksums ) <nl> + { <nl> + for ( size_t i = 0 ; i < skip_indices . size ( ) ; + + i ) <nl> + { <nl> + auto & stream = * skip_indices_streams [ i ] ; <nl> + if ( ! skip_indices_aggregators [ i ] - > empty ( ) ) <nl> + skip_indices_aggregators [ i ] - > getGranuleAndReset ( ) - > serializeBinary ( stream . compressed ) ; <nl> + } <nl> + <nl> + for ( auto & stream : skip_indices_streams ) <nl> + { <nl> + stream - > finalize ( ) ; <nl> + stream - > addToChecksums ( checksums ) ; <nl> + } <nl> + <nl> + skip_indices_streams . clear ( ) ; <nl> + skip_indices_aggregators . clear ( ) ; <nl> + skip_index_filling . clear ( ) ; <nl> + } <nl> + <nl> + void IMergeTreeDataPartWriter : : next ( ) <nl> + { <nl> + current_mark = next_mark ; <nl> + index_offset = next_index_offset ; <nl> + } <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . d2a6c5d4994 <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / IMergeTreeDataPartWriter . h <nl> <nl> + # pragma once <nl> + <nl> + # include < IO / WriteBufferFromFile . h > <nl> + # include < IO / WriteBufferFromFileBase . h > <nl> + # include < Compression / CompressedWriteBuffer . h > <nl> + # include < IO / HashingWriteBuffer . h > <nl> + # include < Storages / MergeTree / MergeTreeData . h > <nl> + # include < DataStreams / IBlockOutputStream . h > <nl> + # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> + <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + <nl> + / / / Writes data part to disk in different formats . <nl> + / / / Calculates and serializes primary and skip indices if needed . <nl> + class IMergeTreeDataPartWriter : private boost : : noncopyable <nl> + { <nl> + public : <nl> + using WrittenOffsetColumns = std : : set < std : : string > ; <nl> + <nl> + / / / Helper class , which holds chain of buffers to write data file with marks . <nl> + / / / It is used to write : one column , skip index or all columns ( in compact format ) . <nl> + struct Stream <nl> + { <nl> + Stream ( <nl> + const String & escaped_column_name_ , <nl> + const String & data_path_ , <nl> + const std : : string & data_file_extension_ , <nl> + const std : : string & marks_path_ , <nl> + const std : : string & marks_file_extension_ , <nl> + const CompressionCodecPtr & compression_codec_ , <nl> + size_t max_compress_block_size_ , <nl> + size_t estimated_size_ , <nl> + size_t aio_threshold_ ) ; <nl> + <nl> + String escaped_column_name ; <nl> + std : : string data_file_extension ; <nl> + std : : string marks_file_extension ; <nl> + <nl> + / / / compressed - > compressed_buf - > plain_hashing - > plain_file <nl> + std : : unique_ptr < WriteBufferFromFileBase > plain_file ; <nl> + HashingWriteBuffer plain_hashing ; <nl> + CompressedWriteBuffer compressed_buf ; <nl> + HashingWriteBuffer compressed ; <nl> + <nl> + / / / marks - > marks_file <nl> + WriteBufferFromFile marks_file ; <nl> + HashingWriteBuffer marks ; <nl> + <nl> + void finalize ( ) ; <nl> + <nl> + void sync ( ) ; <nl> + <nl> + void addToChecksums ( IMergeTreeDataPart : : Checksums & checksums ) ; <nl> + } ; <nl> + <nl> + using StreamPtr = std : : unique_ptr < Stream > ; <nl> + <nl> + IMergeTreeDataPartWriter ( <nl> + const String & part_path , <nl> + const MergeTreeData & storage , <nl> + const NamesAndTypesList & columns_list , <nl> + const std : : vector < MergeTreeIndexPtr > & indices_to_recalc , <nl> + const String & marks_file_extension , <nl> + const CompressionCodecPtr & default_codec , <nl> + const MergeTreeWriterSettings & settings , <nl> + const MergeTreeIndexGranularity & index_granularity , <nl> + bool need_finish_last_granule ) ; <nl> + <nl> + virtual ~ IMergeTreeDataPartWriter ( ) ; <nl> + <nl> + virtual void write ( <nl> + const Block & block , const IColumn : : Permutation * permutation = nullptr , <nl> + / * Blocks with already sorted index columns * / <nl> + const Block & primary_key_block = { } , const Block & skip_indexes_block = { } ) = 0 ; <nl> + <nl> + void calculateAndSerializePrimaryIndex ( const Block & primary_index_block , size_t rows ) ; <nl> + void calculateAndSerializeSkipIndices ( const Block & skip_indexes_block , size_t rows ) ; <nl> + <nl> + / / / Shift mark and offset to prepare read next mark . <nl> + / / / You must call it after calling write method and optionally <nl> + / / / calling calculations of primary and skip indices . <nl> + void next ( ) ; <nl> + <nl> + / / / Count index_granularity for block and store in ` index_granularity ` <nl> + void fillIndexGranularity ( const Block & block ) ; <nl> + <nl> + const MergeTreeIndexGranularity & getIndexGranularity ( ) const { return index_granularity ; } <nl> + <nl> + Columns releaseIndexColumns ( ) <nl> + { <nl> + return Columns ( std : : make_move_iterator ( index_columns . begin ( ) ) , std : : make_move_iterator ( index_columns . end ( ) ) ) ; <nl> + } <nl> + <nl> + void setWrittenOffsetColumns ( WrittenOffsetColumns * written_offset_columns_ ) <nl> + { <nl> + written_offset_columns = written_offset_columns_ ; <nl> + } <nl> + <nl> + using SkipIndices = std : : vector < MergeTreeIndexPtr > ; <nl> + const SkipIndices & getSkipIndices ( ) { return skip_indices ; } <nl> + <nl> + void initSkipIndices ( ) ; <nl> + void initPrimaryIndex ( ) ; <nl> + <nl> + virtual void finishDataSerialization ( IMergeTreeDataPart : : Checksums & checksums , bool sync = false ) = 0 ; <nl> + void finishPrimaryIndexSerialization ( MergeTreeData : : DataPart : : Checksums & checksums ) ; <nl> + void finishSkipIndicesSerialization ( MergeTreeData : : DataPart : : Checksums & checksums ) ; <nl> + <nl> + protected : <nl> + using SerializationState = IDataType : : SerializeBinaryBulkStatePtr ; <nl> + using SerializationStates = std : : unordered_map < String , SerializationState > ; <nl> + <nl> + String part_path ; <nl> + const MergeTreeData & storage ; <nl> + NamesAndTypesList columns_list ; <nl> + const String marks_file_extension ; <nl> + <nl> + MergeTreeIndexGranularity index_granularity ; <nl> + <nl> + CompressionCodecPtr default_codec ; <nl> + <nl> + std : : vector < MergeTreeIndexPtr > skip_indices ; <nl> + <nl> + MergeTreeWriterSettings settings ; <nl> + <nl> + bool compute_granularity ; <nl> + bool with_final_mark ; <nl> + bool need_finish_last_granule ; <nl> + <nl> + size_t current_mark = 0 ; <nl> + <nl> + / / / The offset to the first row of the block for which you want to write the index . <nl> + size_t index_offset = 0 ; <nl> + <nl> + size_t next_mark = 0 ; <nl> + size_t next_index_offset = 0 ; <nl> + <nl> + / / / Number of marsk in data from which skip indices have to start <nl> + / / / aggregation . I . e . it ' s data mark number , not skip indices mark . <nl> + size_t skip_index_data_mark = 0 ; <nl> + <nl> + std : : vector < StreamPtr > skip_indices_streams ; <nl> + MergeTreeIndexAggregators skip_indices_aggregators ; <nl> + std : : vector < size_t > skip_index_filling ; <nl> + <nl> + std : : unique_ptr < WriteBufferFromFile > index_file_stream ; <nl> + std : : unique_ptr < HashingWriteBuffer > index_stream ; <nl> + MutableColumns index_columns ; <nl> + DataTypes index_types ; <nl> + / / / Index columns values from the last row from the last block <nl> + / / / It ' s written to index file in the ` writeSuffixAndFinalizePart ` method <nl> + Row last_index_row ; <nl> + <nl> + bool data_written = false ; <nl> + bool primary_index_initialized = false ; <nl> + bool skip_indices_initialized = false ; <nl> + <nl> + / / / To correctly write Nested elements column - by - column . <nl> + WrittenOffsetColumns * written_offset_columns = nullptr ; <nl> + } ; <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . d1a366ee809 <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / IMergeTreeReader . cpp <nl> <nl> + # include < DataTypes / NestedUtils . h > <nl> + # include < DataTypes / DataTypeArray . h > <nl> + # include < Common / escapeForFileName . h > <nl> + # include < Compression / CachedCompressedReadBuffer . h > <nl> + # include < Columns / ColumnArray . h > <nl> + # include < Interpreters / evaluateMissingDefaults . h > <nl> + # include < Storages / MergeTree / IMergeTreeReader . h > <nl> + # include < Common / typeid_cast . h > <nl> + # include < Poco / File . h > <nl> + <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + namespace <nl> + { <nl> + using OffsetColumns = std : : map < std : : string , ColumnPtr > ; <nl> + } <nl> + namespace ErrorCodes <nl> + { <nl> + extern const int LOGICAL_ERROR ; <nl> + extern const int NOT_FOUND_EXPECTED_DATA_PART ; <nl> + extern const int MEMORY_LIMIT_EXCEEDED ; <nl> + extern const int ARGUMENT_OUT_OF_BOUND ; <nl> + } <nl> + <nl> + <nl> + IMergeTreeReader : : IMergeTreeReader ( const MergeTreeData : : DataPartPtr & data_part_ , <nl> + const NamesAndTypesList & columns_ , UncompressedCache * uncompressed_cache_ , MarkCache * mark_cache_ , <nl> + const MarkRanges & all_mark_ranges_ , const MergeTreeReaderSettings & settings_ , <nl> + const ValueSizeMap & avg_value_size_hints_ ) <nl> + : data_part ( data_part_ ) , avg_value_size_hints ( avg_value_size_hints_ ) , path ( data_part_ - > getFullPath ( ) ) <nl> + , columns ( columns_ ) , uncompressed_cache ( uncompressed_cache_ ) , mark_cache ( mark_cache_ ) <nl> + , settings ( settings_ ) , storage ( data_part_ - > storage ) <nl> + , all_mark_ranges ( all_mark_ranges_ ) <nl> + { <nl> + } <nl> + <nl> + IMergeTreeReader : : ~ IMergeTreeReader ( ) = default ; <nl> + <nl> + <nl> + const IMergeTreeReader : : ValueSizeMap & IMergeTreeReader : : getAvgValueSizeHints ( ) const <nl> + { <nl> + return avg_value_size_hints ; <nl> + } <nl> + <nl> + <nl> + static bool arrayHasNoElementsRead ( const IColumn & column ) <nl> + { <nl> + const auto * column_array = typeid_cast < const ColumnArray * > ( & column ) ; <nl> + <nl> + if ( ! column_array ) <nl> + return false ; <nl> + <nl> + size_t size = column_array - > size ( ) ; <nl> + if ( ! size ) <nl> + return false ; <nl> + <nl> + size_t data_size = column_array - > getData ( ) . size ( ) ; <nl> + if ( data_size ) <nl> + return false ; <nl> + <nl> + size_t last_offset = column_array - > getOffsets ( ) [ size - 1 ] ; <nl> + return last_offset ! = 0 ; <nl> + } <nl> + <nl> + <nl> + void IMergeTreeReader : : fillMissingColumns ( Columns & res_columns , bool & should_evaluate_missing_defaults , size_t num_rows ) <nl> + { <nl> + try <nl> + { <nl> + size_t num_columns = columns . size ( ) ; <nl> + <nl> + if ( res_columns . size ( ) ! = num_columns ) <nl> + throw Exception ( " invalid number of columns passed to MergeTreeReader : : fillMissingColumns . " <nl> + " Expected " + toString ( num_columns ) + " , " <nl> + " got " + toString ( res_columns . size ( ) ) , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> + / / / For a missing column of a nested data structure we must create not a column of empty <nl> + / / / arrays , but a column of arrays of correct length . <nl> + <nl> + / / / First , collect offset columns for all arrays in the block . <nl> + OffsetColumns offset_columns ; <nl> + auto requested_column = columns . begin ( ) ; <nl> + for ( size_t i = 0 ; i < num_columns ; + + i , + + requested_column ) <nl> + { <nl> + if ( res_columns [ i ] = = nullptr ) <nl> + continue ; <nl> + <nl> + if ( const auto * array = typeid_cast < const ColumnArray * > ( res_columns [ i ] . get ( ) ) ) <nl> + { <nl> + String offsets_name = Nested : : extractTableName ( requested_column - > name ) ; <nl> + auto & offsets_column = offset_columns [ offsets_name ] ; <nl> + <nl> + / / / If for some reason multiple offsets columns are present for the same nested data structure , <nl> + / / / choose the one that is not empty . <nl> + if ( ! offsets_column | | offsets_column - > empty ( ) ) <nl> + offsets_column = array - > getOffsetsPtr ( ) ; <nl> + } <nl> + } <nl> + <nl> + should_evaluate_missing_defaults = false ; <nl> + <nl> + / / / insert default values only for columns without default expressions <nl> + requested_column = columns . begin ( ) ; <nl> + for ( size_t i = 0 ; i < num_columns ; + + i , + + requested_column ) <nl> + { <nl> + auto & [ name , type ] = * requested_column ; <nl> + <nl> + if ( res_columns [ i ] & & arrayHasNoElementsRead ( * res_columns [ i ] ) ) <nl> + res_columns [ i ] = nullptr ; <nl> + <nl> + if ( res_columns [ i ] = = nullptr ) <nl> + { <nl> + if ( storage . getColumns ( ) . hasDefault ( name ) ) <nl> + { <nl> + should_evaluate_missing_defaults = true ; <nl> + continue ; <nl> + } <nl> + <nl> + String offsets_name = Nested : : extractTableName ( name ) ; <nl> + auto offset_it = offset_columns . find ( offsets_name ) ; <nl> + if ( offset_it ! = offset_columns . end ( ) ) <nl> + { <nl> + ColumnPtr offsets_column = offset_it - > second ; <nl> + DataTypePtr nested_type = typeid_cast < const DataTypeArray & > ( * type ) . getNestedType ( ) ; <nl> + size_t nested_rows = typeid_cast < const ColumnUInt64 & > ( * offsets_column ) . getData ( ) . back ( ) ; <nl> + <nl> + ColumnPtr nested_column = <nl> + nested_type - > createColumnConstWithDefaultValue ( nested_rows ) - > convertToFullColumnIfConst ( ) ; <nl> + <nl> + res_columns [ i ] = ColumnArray : : create ( nested_column , offsets_column ) ; <nl> + } <nl> + else <nl> + { <nl> + / / / We must turn a constant column into a full column because the interpreter could infer <nl> + / / / that it is constant everywhere but in some blocks ( from other parts ) it can be a full column . <nl> + res_columns [ i ] = type - > createColumnConstWithDefaultValue ( num_rows ) - > convertToFullColumnIfConst ( ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + catch ( Exception & e ) <nl> + { <nl> + / / / Better diagnostics . <nl> + e . addMessage ( " ( while reading from part " + path + " ) " ) ; <nl> + throw ; <nl> + } <nl> + } <nl> + <nl> + void IMergeTreeReader : : evaluateMissingDefaults ( Block additional_columns , Columns & res_columns ) <nl> + { <nl> + try <nl> + { <nl> + size_t num_columns = columns . size ( ) ; <nl> + <nl> + if ( res_columns . size ( ) ! = num_columns ) <nl> + throw Exception ( " invalid number of columns passed to MergeTreeReader : : fillMissingColumns . " <nl> + " Expected " + toString ( num_columns ) + " , " <nl> + " got " + toString ( res_columns . size ( ) ) , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> + / / / Convert columns list to block . <nl> + / / / TODO : rewrite with columns interface . It wll be possible after changes in ExpressionActions . <nl> + auto name_and_type = columns . begin ( ) ; <nl> + for ( size_t pos = 0 ; pos < num_columns ; + + pos , + + name_and_type ) <nl> + { <nl> + if ( res_columns [ pos ] = = nullptr ) <nl> + continue ; <nl> + <nl> + additional_columns . insert ( { res_columns [ pos ] , name_and_type - > type , name_and_type - > name } ) ; <nl> + } <nl> + <nl> + DB : : evaluateMissingDefaults ( additional_columns , columns , storage . getColumns ( ) . getDefaults ( ) , storage . global_context ) ; <nl> + <nl> + / / / Move columns from block . <nl> + name_and_type = columns . begin ( ) ; <nl> + for ( size_t pos = 0 ; pos < num_columns ; + + pos , + + name_and_type ) <nl> + res_columns [ pos ] = std : : move ( additional_columns . getByName ( name_and_type - > name ) . column ) ; <nl> + } <nl> + catch ( Exception & e ) <nl> + { <nl> + / / / Better diagnostics . <nl> + e . addMessage ( " ( while reading from part " + path + " ) " ) ; <nl> + throw ; <nl> + } <nl> + } <nl> + <nl> + } <nl> similarity index 57 % <nl> rename from dbms / src / Storages / MergeTree / MergeTreeReader . h <nl> rename to dbms / src / Storages / MergeTree / IMergeTreeReader . h <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeReader . h <nl> ppp b / dbms / src / Storages / MergeTree / IMergeTreeReader . h <nl> <nl> <nl> # include < Core / NamesAndTypes . h > <nl> # include < Storages / MergeTree / MergeTreeReaderStream . h > <nl> + # include < Storages / MergeTree / MergeTreeBlockReadUtils . h > <nl> <nl> <nl> namespace DB <nl> class IDataType ; <nl> / / / Reads the data between pairs of marks in the same part . When reading consecutive ranges , avoids unnecessary seeks . <nl> / / / When ranges are almost consecutive , seeks are fast because they are performed inside the buffer . <nl> / / / Avoids loading the marks file if it is not needed ( e . g . when reading the whole part ) . <nl> - class MergeTreeReader : private boost : : noncopyable <nl> + class IMergeTreeReader : private boost : : noncopyable <nl> { <nl> public : <nl> using ValueSizeMap = std : : map < std : : string , double > ; <nl> using DeserializeBinaryBulkStateMap = std : : map < std : : string , IDataType : : DeserializeBinaryBulkStatePtr > ; <nl> <nl> - MergeTreeReader ( String path_ , / / / Path to the directory containing the part <nl> - MergeTreeData : : DataPartPtr data_part_ , <nl> - NamesAndTypesList columns_ , <nl> + IMergeTreeReader ( const MergeTreeData : : DataPartPtr & data_part_ , <nl> + const NamesAndTypesList & columns_ , <nl> UncompressedCache * uncompressed_cache_ , <nl> MarkCache * mark_cache_ , <nl> - bool save_marks_in_cache_ , <nl> - const MergeTreeData & storage_ , <nl> - MarkRanges all_mark_ranges_ , <nl> - size_t aio_threshold_ , <nl> - size_t mmap_threshold_ , <nl> - size_t max_read_buffer_size_ , <nl> - ValueSizeMap avg_value_size_hints_ = ValueSizeMap { } , <nl> - const ReadBufferFromFileBase : : ProfileCallback & profile_callback_ = ReadBufferFromFileBase : : ProfileCallback { } , <nl> - clockid_t clock_type_ = CLOCK_MONOTONIC_COARSE ) ; <nl> - <nl> - ~ MergeTreeReader ( ) ; <nl> + const MarkRanges & all_mark_ranges_ , <nl> + const MergeTreeReaderSettings & settings_ , <nl> + const ValueSizeMap & avg_value_size_hints_ = ValueSizeMap { } ) ; <nl> + <nl> + / / / Return the number of rows has been read or zero if there is no columns to read . <nl> + / / / If continue_reading is true , continue reading from last state , otherwise seek to from_mark <nl> + virtual size_t readRows ( size_t from_mark , bool continue_reading , size_t max_rows_to_read , Columns & res_columns ) = 0 ; <nl> + <nl> + virtual bool canReadIncompleteGranules ( ) const = 0 ; <nl> + <nl> + virtual ~ IMergeTreeReader ( ) ; <nl> <nl> const ValueSizeMap & getAvgValueSizeHints ( ) const ; <nl> <nl> class MergeTreeReader : private boost : : noncopyable <nl> const NamesAndTypesList & getColumns ( ) const { return columns ; } <nl> size_t numColumnsInResult ( ) const { return columns . size ( ) ; } <nl> <nl> - / / / Return the number of rows has been read or zero if there is no columns to read . <nl> - / / / If continue_reading is true , continue reading from last state , otherwise seek to from_mark . <nl> - / / / Fills res_columns in order specified in getColumns ( ) list . If column was not read it will be nullptr . <nl> - size_t readRows ( size_t from_mark , bool continue_reading , size_t max_rows_to_read , Columns & res_columns ) ; <nl> - <nl> - MergeTreeData : : DataPartPtr data_part ; <nl> - <nl> size_t getFirstMarkToRead ( ) const <nl> { <nl> return all_mark_ranges . front ( ) . begin ; <nl> } <nl> - private : <nl> - using FileStreams = std : : map < std : : string , std : : unique_ptr < MergeTreeReaderStream > > ; <nl> <nl> + MergeTreeData : : DataPartPtr data_part ; <nl> + <nl> + protected : <nl> / / / avg_value_size_hints are used to reduce the number of reallocations when creating columns of variable size . <nl> ValueSizeMap avg_value_size_hints ; <nl> / / / Stores states for IDataType : : deserializeBinaryBulk <nl> class MergeTreeReader : private boost : : noncopyable <nl> / / / Path to the directory containing the part <nl> String path ; <nl> <nl> - FileStreams streams ; <nl> - <nl> / / / Columns that are read . <nl> NamesAndTypesList columns ; <nl> <nl> UncompressedCache * uncompressed_cache ; <nl> MarkCache * mark_cache ; <nl> - / / / If save_marks_in_cache is false , then , if marks are not in cache , we will load them but won ' t save in the cache , to avoid evicting other data . <nl> - bool save_marks_in_cache ; <nl> + <nl> + MergeTreeReaderSettings settings ; <nl> <nl> const MergeTreeData & storage ; <nl> MarkRanges all_mark_ranges ; <nl> - size_t aio_threshold ; <nl> - size_t mmap_threshold ; <nl> - size_t max_read_buffer_size ; <nl> - <nl> - void addStreams ( const String & name , const IDataType & type , <nl> - const ReadBufferFromFileBase : : ProfileCallback & profile_callback , clockid_t clock_type ) ; <nl> - <nl> - void readData ( <nl> - const String & name , const IDataType & type , IColumn & column , <nl> - size_t from_mark , bool continue_reading , size_t max_rows_to_read , <nl> - bool read_offsets = true ) ; <nl> - <nl> <nl> friend class MergeTreeRangeReader : : DelayedStream ; <nl> } ; <nl> mmm a / dbms / src / Storages / MergeTree / IMergedBlockOutputStream . cpp <nl> ppp b / dbms / src / Storages / MergeTree / IMergedBlockOutputStream . cpp <nl> <nl> # include < Storages / MergeTree / IMergedBlockOutputStream . h > <nl> # include < IO / createWriteBufferFromFileBase . h > <nl> + # include < Storages / MergeTree / MergeTreeIOSettings . h > <nl> + # include < Storages / MergeTree / IMergeTreeDataPartWriter . h > <nl> <nl> namespace DB <nl> { <nl> <nl> - namespace ErrorCodes <nl> - { <nl> - extern const int LOGICAL_ERROR ; <nl> - } <nl> - <nl> - namespace <nl> - { <nl> - constexpr auto DATA_FILE_EXTENSION = " . bin " ; <nl> - constexpr auto INDEX_FILE_EXTENSION = " . idx " ; <nl> - } <nl> - <nl> - <nl> IMergedBlockOutputStream : : IMergedBlockOutputStream ( <nl> - MergeTreeData & storage_ , <nl> - const String & part_path_ , <nl> - size_t min_compress_block_size_ , <nl> - size_t max_compress_block_size_ , <nl> - CompressionCodecPtr codec_ , <nl> - size_t aio_threshold_ , <nl> - bool blocks_are_granules_size_ , <nl> - const std : : vector < MergeTreeIndexPtr > & indices_to_recalc , <nl> - const MergeTreeIndexGranularity & index_granularity_ , <nl> - const MergeTreeIndexGranularityInfo * index_granularity_info_ ) <nl> - : storage ( storage_ ) <nl> - , part_path ( part_path_ ) <nl> - , min_compress_block_size ( min_compress_block_size_ ) <nl> - , max_compress_block_size ( max_compress_block_size_ ) <nl> - , aio_threshold ( aio_threshold_ ) <nl> - , can_use_adaptive_granularity ( index_granularity_info_ ? index_granularity_info_ - > is_adaptive : storage . canUseAdaptiveGranularity ( ) ) <nl> - , marks_file_extension ( can_use_adaptive_granularity ? getAdaptiveMrkExtension ( ) : getNonAdaptiveMrkExtension ( ) ) <nl> - , blocks_are_granules_size ( blocks_are_granules_size_ ) <nl> - , index_granularity ( index_granularity_ ) <nl> - , compute_granularity ( index_granularity . empty ( ) ) <nl> - , codec ( std : : move ( codec_ ) ) <nl> - , skip_indices ( indices_to_recalc ) <nl> - , with_final_mark ( storage . getSettings ( ) - > write_final_mark & & can_use_adaptive_granularity ) <nl> - { <nl> - if ( blocks_are_granules_size & & ! index_granularity . empty ( ) ) <nl> - throw Exception ( " Can ' t take information about index granularity from blocks , when non empty index_granularity array specified " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> - } <nl> - <nl> - void IMergedBlockOutputStream : : addStreams ( <nl> - const String & path , <nl> - const String & name , <nl> - const IDataType & type , <nl> - const CompressionCodecPtr & effective_codec , <nl> - size_t estimated_size , <nl> - bool skip_offsets ) <nl> - { <nl> - IDataType : : StreamCallback callback = [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - if ( skip_offsets & & ! substream_path . empty ( ) & & substream_path . back ( ) . type = = IDataType : : Substream : : ArraySizes ) <nl> - return ; <nl> - <nl> - String stream_name = IDataType : : getFileNameForStream ( name , substream_path ) ; <nl> - <nl> - / / / Shared offsets for Nested type . <nl> - if ( column_streams . count ( stream_name ) ) <nl> - return ; <nl> - <nl> - column_streams [ stream_name ] = std : : make_unique < ColumnStream > ( <nl> - stream_name , <nl> - path + stream_name , DATA_FILE_EXTENSION , <nl> - path + stream_name , marks_file_extension , <nl> - effective_codec , <nl> - max_compress_block_size , <nl> - estimated_size , <nl> - aio_threshold ) ; <nl> - } ; <nl> - <nl> - IDataType : : SubstreamPath stream_path ; <nl> - type . enumerateStreams ( callback , stream_path ) ; <nl> - } <nl> - <nl> - <nl> - IDataType : : OutputStreamGetter IMergedBlockOutputStream : : createStreamGetter ( <nl> - const String & name , WrittenOffsetColumns & offset_columns , bool skip_offsets ) <nl> - { <nl> - return [ & , skip_offsets ] ( const IDataType : : SubstreamPath & substream_path ) - > WriteBuffer * <nl> - { <nl> - bool is_offsets = ! substream_path . empty ( ) & & substream_path . back ( ) . type = = IDataType : : Substream : : ArraySizes ; <nl> - if ( is_offsets & & skip_offsets ) <nl> - return nullptr ; <nl> - <nl> - String stream_name = IDataType : : getFileNameForStream ( name , substream_path ) ; <nl> - <nl> - / / / Don ' t write offsets more than one time for Nested type . <nl> - if ( is_offsets & & offset_columns . count ( stream_name ) ) <nl> - return nullptr ; <nl> - <nl> - return & column_streams [ stream_name ] - > compressed ; <nl> - } ; <nl> - } <nl> - <nl> - static void fillIndexGranularityImpl ( <nl> - const Block & block , <nl> - size_t index_granularity_bytes , <nl> - size_t fixed_index_granularity_rows , <nl> - bool blocks_are_granules , <nl> - size_t index_offset , <nl> - MergeTreeIndexGranularity & index_granularity , <nl> - bool can_use_adaptive_index_granularity ) <nl> - { <nl> - size_t rows_in_block = block . rows ( ) ; <nl> - size_t index_granularity_for_block ; <nl> - if ( ! can_use_adaptive_index_granularity ) <nl> - index_granularity_for_block = fixed_index_granularity_rows ; <nl> - else <nl> - { <nl> - size_t block_size_in_memory = block . bytes ( ) ; <nl> - if ( blocks_are_granules ) <nl> - index_granularity_for_block = rows_in_block ; <nl> - else if ( block_size_in_memory > = index_granularity_bytes ) <nl> - { <nl> - size_t granules_in_block = block_size_in_memory / index_granularity_bytes ; <nl> - index_granularity_for_block = rows_in_block / granules_in_block ; <nl> - } <nl> - else <nl> - { <nl> - size_t size_of_row_in_bytes = block_size_in_memory / rows_in_block ; <nl> - index_granularity_for_block = index_granularity_bytes / size_of_row_in_bytes ; <nl> - } <nl> - } <nl> - if ( index_granularity_for_block = = 0 ) / / / very rare case when index granularity bytes less then single row <nl> - index_granularity_for_block = 1 ; <nl> - <nl> - / / / We should be less or equal than fixed index granularity <nl> - index_granularity_for_block = std : : min ( fixed_index_granularity_rows , index_granularity_for_block ) ; <nl> - <nl> - for ( size_t current_row = index_offset ; current_row < rows_in_block ; current_row + = index_granularity_for_block ) <nl> - index_granularity . appendMark ( index_granularity_for_block ) ; <nl> - } <nl> - <nl> - void IMergedBlockOutputStream : : fillIndexGranularity ( const Block & block ) <nl> - { <nl> - const auto storage_settings = storage . getSettings ( ) ; <nl> - fillIndexGranularityImpl ( <nl> - block , <nl> - storage_settings - > index_granularity_bytes , <nl> - storage_settings - > index_granularity , <nl> - blocks_are_granules_size , <nl> - index_offset , <nl> - index_granularity , <nl> - can_use_adaptive_granularity ) ; <nl> - } <nl> - <nl> - void IMergedBlockOutputStream : : writeSingleMark ( <nl> - const String & name , <nl> - const IDataType & type , <nl> - WrittenOffsetColumns & offset_columns , <nl> - bool skip_offsets , <nl> - size_t number_of_rows , <nl> - DB : : IDataType : : SubstreamPath & path ) <nl> - { <nl> - type . enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - bool is_offsets = ! substream_path . empty ( ) & & substream_path . back ( ) . type = = IDataType : : Substream : : ArraySizes ; <nl> - if ( is_offsets & & skip_offsets ) <nl> - return ; <nl> - <nl> - String stream_name = IDataType : : getFileNameForStream ( name , substream_path ) ; <nl> - <nl> - / / / Don ' t write offsets more than one time for Nested type . <nl> - if ( is_offsets & & offset_columns . count ( stream_name ) ) <nl> - return ; <nl> - <nl> - ColumnStream & stream = * column_streams [ stream_name ] ; <nl> - <nl> - / / / There could already be enough data to compress into the new block . <nl> - if ( stream . compressed . offset ( ) > = min_compress_block_size ) <nl> - stream . compressed . next ( ) ; <nl> - <nl> - writeIntBinary ( stream . plain_hashing . count ( ) , stream . marks ) ; <nl> - writeIntBinary ( stream . compressed . offset ( ) , stream . marks ) ; <nl> - if ( can_use_adaptive_granularity ) <nl> - writeIntBinary ( number_of_rows , stream . marks ) ; <nl> - } , path ) ; <nl> - } <nl> - <nl> - size_t IMergedBlockOutputStream : : writeSingleGranule ( <nl> - const String & name , <nl> - const IDataType & type , <nl> - const IColumn & column , <nl> - WrittenOffsetColumns & offset_columns , <nl> - bool skip_offsets , <nl> - IDataType : : SerializeBinaryBulkStatePtr & serialization_state , <nl> - IDataType : : SerializeBinaryBulkSettings & serialize_settings , <nl> - size_t from_row , <nl> - size_t number_of_rows , <nl> - bool write_marks ) <nl> - { <nl> - if ( write_marks ) <nl> - writeSingleMark ( name , type , offset_columns , skip_offsets , number_of_rows , serialize_settings . path ) ; <nl> - <nl> - type . serializeBinaryBulkWithMultipleStreams ( column , from_row , number_of_rows , serialize_settings , serialization_state ) ; <nl> - <nl> - / / / So that instead of the marks pointing to the end of the compressed block , there were marks pointing to the beginning of the next one . <nl> - type . enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - bool is_offsets = ! substream_path . empty ( ) & & substream_path . back ( ) . type = = IDataType : : Substream : : ArraySizes ; <nl> - if ( is_offsets & & skip_offsets ) <nl> - return ; <nl> - <nl> - String stream_name = IDataType : : getFileNameForStream ( name , substream_path ) ; <nl> - <nl> - / / / Don ' t write offsets more than one time for Nested type . <nl> - if ( is_offsets & & offset_columns . count ( stream_name ) ) <nl> - return ; <nl> - <nl> - column_streams [ stream_name ] - > compressed . nextIfAtEnd ( ) ; <nl> - } , serialize_settings . path ) ; <nl> - <nl> - return from_row + number_of_rows ; <nl> - } <nl> - <nl> - / / / column must not be empty . ( column . size ( ) ! = = 0 ) <nl> - <nl> - std : : pair < size_t , size_t > IMergedBlockOutputStream : : writeColumn ( <nl> - const String & name , <nl> - const IDataType & type , <nl> - const IColumn & column , <nl> - WrittenOffsetColumns & offset_columns , <nl> - bool skip_offsets , <nl> - IDataType : : SerializeBinaryBulkStatePtr & serialization_state , <nl> - size_t from_mark ) <nl> + const MergeTreeDataPartPtr & data_part ) <nl> + : storage ( data_part - > storage ) <nl> + , part_path ( data_part - > getFullPath ( ) ) <nl> { <nl> - auto & settings = storage . global_context . getSettingsRef ( ) ; <nl> - IDataType : : SerializeBinaryBulkSettings serialize_settings ; <nl> - serialize_settings . getter = createStreamGetter ( name , offset_columns , skip_offsets ) ; <nl> - serialize_settings . low_cardinality_max_dictionary_size = settings . low_cardinality_max_dictionary_size ; <nl> - serialize_settings . low_cardinality_use_single_dictionary_for_part = settings . low_cardinality_use_single_dictionary_for_part ! = 0 ; <nl> - <nl> - size_t total_rows = column . size ( ) ; <nl> - size_t current_row = 0 ; <nl> - size_t current_column_mark = from_mark ; <nl> - while ( current_row < total_rows ) <nl> - { <nl> - size_t rows_to_write ; <nl> - bool write_marks = true ; <nl> - <nl> - / / / If there is ` index_offset ` , then the first mark goes not immediately , but after this number of rows . <nl> - if ( current_row = = 0 & & index_offset ! = 0 ) <nl> - { <nl> - write_marks = false ; <nl> - rows_to_write = index_offset ; <nl> - } <nl> - else <nl> - { <nl> - if ( index_granularity . getMarksCount ( ) < = current_column_mark ) <nl> - throw Exception ( <nl> - " Incorrect size of index granularity expect mark " + toString ( current_column_mark ) + " totally have marks " + toString ( index_granularity . getMarksCount ( ) ) , <nl> - ErrorCodes : : LOGICAL_ERROR ) ; <nl> - <nl> - rows_to_write = index_granularity . getMarkRows ( current_column_mark ) ; <nl> - } <nl> - <nl> - current_row = writeSingleGranule ( <nl> - name , <nl> - type , <nl> - column , <nl> - offset_columns , <nl> - skip_offsets , <nl> - serialization_state , <nl> - serialize_settings , <nl> - current_row , <nl> - rows_to_write , <nl> - write_marks <nl> - ) ; <nl> - <nl> - if ( write_marks ) <nl> - current_column_mark + + ; <nl> - } <nl> - <nl> - / / / Memoize offsets for Nested types , that are already written . They will not be written again for next columns of Nested structure . <nl> - type . enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - bool is_offsets = ! substream_path . empty ( ) & & substream_path . back ( ) . type = = IDataType : : Substream : : ArraySizes ; <nl> - if ( is_offsets ) <nl> - { <nl> - String stream_name = IDataType : : getFileNameForStream ( name , substream_path ) ; <nl> - offset_columns . insert ( stream_name ) ; <nl> - } <nl> - } , serialize_settings . path ) ; <nl> - <nl> - return std : : make_pair ( current_column_mark , current_row - total_rows ) ; <nl> - } <nl> - <nl> - void IMergedBlockOutputStream : : writeFinalMark ( <nl> - const std : : string & column_name , <nl> - const DataTypePtr column_type , <nl> - WrittenOffsetColumns & offset_columns , <nl> - bool skip_offsets , <nl> - DB : : IDataType : : SubstreamPath & path ) <nl> - { <nl> - writeSingleMark ( column_name , * column_type , offset_columns , skip_offsets , 0 , path ) ; <nl> - / / / Memoize information about offsets <nl> - column_type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - bool is_offsets = ! substream_path . empty ( ) & & substream_path . back ( ) . type = = IDataType : : Substream : : ArraySizes ; <nl> - if ( is_offsets ) <nl> - { <nl> - String stream_name = IDataType : : getFileNameForStream ( column_name , substream_path ) ; <nl> - offset_columns . insert ( stream_name ) ; <nl> - } <nl> - } , path ) ; <nl> } <nl> <nl> - void IMergedBlockOutputStream : : initSkipIndices ( ) <nl> + Block IMergedBlockOutputStream : : getBlockAndPermute ( const Block & block , const Names & names , const IColumn : : Permutation * permutation ) <nl> { <nl> - for ( const auto & index : skip_indices ) <nl> + Block result ; <nl> + for ( size_t i = 0 , size = names . size ( ) ; i < size ; + + i ) <nl> { <nl> - String stream_name = index - > getFileName ( ) ; <nl> - skip_indices_streams . emplace_back ( <nl> - std : : make_unique < ColumnStream > ( <nl> - stream_name , <nl> - part_path + stream_name , INDEX_FILE_EXTENSION , <nl> - part_path + stream_name , marks_file_extension , <nl> - codec , max_compress_block_size , <nl> - 0 , aio_threshold ) ) ; <nl> - skip_indices_aggregators . push_back ( index - > createIndexAggregator ( ) ) ; <nl> - skip_index_filling . push_back ( 0 ) ; <nl> - } <nl> - } <nl> + const auto & name = names [ i ] ; <nl> + result . insert ( i , block . getByName ( name ) ) ; <nl> <nl> - void IMergedBlockOutputStream : : calculateAndSerializeSkipIndices ( <nl> - const ColumnsWithTypeAndName & skip_indexes_columns , size_t rows ) <nl> - { <nl> - / / / Creating block for update <nl> - Block indices_update_block ( skip_indexes_columns ) ; <nl> - size_t skip_index_current_data_mark = 0 ; <nl> - <nl> - / / / Filling and writing skip indices like in IMergedBlockOutputStream : : writeColumn <nl> - for ( size_t i = 0 ; i < skip_indices . size ( ) ; + + i ) <nl> - { <nl> - const auto index = skip_indices [ i ] ; <nl> - auto & stream = * skip_indices_streams [ i ] ; <nl> - size_t prev_pos = 0 ; <nl> - skip_index_current_data_mark = skip_index_data_mark ; <nl> - while ( prev_pos < rows ) <nl> + / / / Reorder primary key columns in advance and add them to ` primary_key_columns ` . <nl> + if ( permutation ) <nl> { <nl> - UInt64 limit = 0 ; <nl> - if ( prev_pos = = 0 & & index_offset ! = 0 ) <nl> - { <nl> - limit = index_offset ; <nl> - } <nl> - else <nl> - { <nl> - limit = index_granularity . getMarkRows ( skip_index_current_data_mark ) ; <nl> - if ( skip_indices_aggregators [ i ] - > empty ( ) ) <nl> - { <nl> - skip_indices_aggregators [ i ] = index - > createIndexAggregator ( ) ; <nl> - skip_index_filling [ i ] = 0 ; <nl> - <nl> - if ( stream . compressed . offset ( ) > = min_compress_block_size ) <nl> - stream . compressed . next ( ) ; <nl> - <nl> - writeIntBinary ( stream . plain_hashing . count ( ) , stream . marks ) ; <nl> - writeIntBinary ( stream . compressed . offset ( ) , stream . marks ) ; <nl> - / / / Actually this numbers is redundant , but we have to store them <nl> - / / / to be compatible with normal . mrk2 file format <nl> - if ( can_use_adaptive_granularity ) <nl> - writeIntBinary ( 1UL , stream . marks ) ; <nl> - } <nl> - / / / this mark is aggregated , go to the next one <nl> - skip_index_current_data_mark + + ; <nl> - } <nl> - <nl> - size_t pos = prev_pos ; <nl> - skip_indices_aggregators [ i ] - > update ( indices_update_block , & pos , limit ) ; <nl> - <nl> - if ( pos = = prev_pos + limit ) <nl> - { <nl> - + + skip_index_filling [ i ] ; <nl> - <nl> - / / / write index if it is filled <nl> - if ( skip_index_filling [ i ] = = index - > granularity ) <nl> - { <nl> - skip_indices_aggregators [ i ] - > getGranuleAndReset ( ) - > serializeBinary ( stream . compressed ) ; <nl> - skip_index_filling [ i ] = 0 ; <nl> - } <nl> - } <nl> - prev_pos = pos ; <nl> + auto & column = result . getByPosition ( i ) ; <nl> + column . column = column . column - > permute ( * permutation , 0 ) ; <nl> } <nl> } <nl> - skip_index_data_mark = skip_index_current_data_mark ; <nl> - } <nl> - <nl> - void IMergedBlockOutputStream : : finishSkipIndicesSerialization ( <nl> - MergeTreeData : : DataPart : : Checksums & checksums ) <nl> - { <nl> - for ( size_t i = 0 ; i < skip_indices . size ( ) ; + + i ) <nl> - { <nl> - auto & stream = * skip_indices_streams [ i ] ; <nl> - if ( ! skip_indices_aggregators [ i ] - > empty ( ) ) <nl> - skip_indices_aggregators [ i ] - > getGranuleAndReset ( ) - > serializeBinary ( stream . compressed ) ; <nl> - } <nl> - <nl> - for ( auto & stream : skip_indices_streams ) <nl> - { <nl> - stream - > finalize ( ) ; <nl> - stream - > addToChecksums ( checksums ) ; <nl> - } <nl> - <nl> - skip_indices_streams . clear ( ) ; <nl> - skip_indices_aggregators . clear ( ) ; <nl> - skip_index_filling . clear ( ) ; <nl> - } <nl> - <nl> - / / / Implementation of IMergedBlockOutputStream : : ColumnStream . <nl> - <nl> - IMergedBlockOutputStream : : ColumnStream : : ColumnStream ( <nl> - const String & escaped_column_name_ , <nl> - const String & data_path_ , <nl> - const std : : string & data_file_extension_ , <nl> - const std : : string & marks_path_ , <nl> - const std : : string & marks_file_extension_ , <nl> - const CompressionCodecPtr & compression_codec_ , <nl> - size_t max_compress_block_size_ , <nl> - size_t estimated_size_ , <nl> - size_t aio_threshold_ ) : <nl> - escaped_column_name ( escaped_column_name_ ) , <nl> - data_file_extension { data_file_extension_ } , <nl> - marks_file_extension { marks_file_extension_ } , <nl> - plain_file ( createWriteBufferFromFileBase ( data_path_ + data_file_extension , estimated_size_ , aio_threshold_ , max_compress_block_size_ ) ) , <nl> - plain_hashing ( * plain_file ) , compressed_buf ( plain_hashing , compression_codec_ ) , compressed ( compressed_buf ) , <nl> - marks_file ( marks_path_ + marks_file_extension , 4096 , O_TRUNC | O_CREAT | O_WRONLY ) , marks ( marks_file ) <nl> - { <nl> - } <nl> - <nl> - void IMergedBlockOutputStream : : ColumnStream : : finalize ( ) <nl> - { <nl> - compressed . next ( ) ; <nl> - plain_file - > next ( ) ; <nl> - marks . next ( ) ; <nl> - } <nl> - <nl> - void IMergedBlockOutputStream : : ColumnStream : : sync ( ) <nl> - { <nl> - plain_file - > sync ( ) ; <nl> - marks_file . sync ( ) ; <nl> - } <nl> - <nl> - void IMergedBlockOutputStream : : ColumnStream : : addToChecksums ( MergeTreeData : : DataPart : : Checksums & checksums ) <nl> - { <nl> - String name = escaped_column_name ; <nl> - <nl> - checksums . files [ name + data_file_extension ] . is_compressed = true ; <nl> - checksums . files [ name + data_file_extension ] . uncompressed_size = compressed . count ( ) ; <nl> - checksums . files [ name + data_file_extension ] . uncompressed_hash = compressed . getHash ( ) ; <nl> - checksums . files [ name + data_file_extension ] . file_size = plain_hashing . count ( ) ; <nl> - checksums . files [ name + data_file_extension ] . file_hash = plain_hashing . getHash ( ) ; <nl> <nl> - checksums . files [ name + marks_file_extension ] . file_size = marks . count ( ) ; <nl> - checksums . files [ name + marks_file_extension ] . file_hash = marks . getHash ( ) ; <nl> + return result ; <nl> } <nl> <nl> } <nl> mmm a / dbms / src / Storages / MergeTree / IMergedBlockOutputStream . h <nl> ppp b / dbms / src / Storages / MergeTree / IMergedBlockOutputStream . h <nl> <nl> # pragma once <nl> <nl> # include < Storages / MergeTree / MergeTreeIndexGranularity . h > <nl> - # include < Storages / MergeTree / MergeTreeIndexGranularityInfo . h > <nl> - # include < IO / WriteBufferFromFile . h > <nl> - # include < Compression / CompressedWriteBuffer . h > <nl> - # include < IO / HashingWriteBuffer . h > <nl> # include < Storages / MergeTree / MergeTreeData . h > <nl> # include < DataStreams / IBlockOutputStream . h > <nl> - <nl> + # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> + # include < Storages / MergeTree / IMergeTreeDataPartWriter . h > <nl> <nl> namespace DB <nl> { <nl> class IMergedBlockOutputStream : public IBlockOutputStream <nl> { <nl> public : <nl> IMergedBlockOutputStream ( <nl> - MergeTreeData & storage_ , <nl> - const String & part_path_ , <nl> - size_t min_compress_block_size_ , <nl> - size_t max_compress_block_size_ , <nl> - CompressionCodecPtr default_codec_ , <nl> - size_t aio_threshold_ , <nl> - bool blocks_are_granules_size_ , <nl> - const std : : vector < MergeTreeIndexPtr > & indices_to_recalc , <nl> - const MergeTreeIndexGranularity & index_granularity_ , <nl> - const MergeTreeIndexGranularityInfo * index_granularity_info_ = nullptr ) ; <nl> + const MergeTreeDataPartPtr & data_part ) ; <nl> <nl> using WrittenOffsetColumns = std : : set < std : : string > ; <nl> <nl> - protected : <nl> - using SerializationState = IDataType : : SerializeBinaryBulkStatePtr ; <nl> - using SerializationStates = std : : vector < SerializationState > ; <nl> - <nl> - struct ColumnStream <nl> + const MergeTreeIndexGranularity & getIndexGranularity ( ) <nl> { <nl> - ColumnStream ( <nl> - const String & escaped_column_name_ , <nl> - const String & data_path_ , <nl> - const std : : string & data_file_extension_ , <nl> - const std : : string & marks_path_ , <nl> - const std : : string & marks_file_extension_ , <nl> - const CompressionCodecPtr & compression_codec_ , <nl> - size_t max_compress_block_size_ , <nl> - size_t estimated_size_ , <nl> - size_t aio_threshold_ ) ; <nl> - <nl> - String escaped_column_name ; <nl> - std : : string data_file_extension ; <nl> - std : : string marks_file_extension ; <nl> - <nl> - / / / compressed - > compressed_buf - > plain_hashing - > plain_file <nl> - std : : unique_ptr < WriteBufferFromFileBase > plain_file ; <nl> - HashingWriteBuffer plain_hashing ; <nl> - CompressedWriteBuffer compressed_buf ; <nl> - HashingWriteBuffer compressed ; <nl> - <nl> - / / / marks - > marks_file <nl> - WriteBufferFromFile marks_file ; <nl> - HashingWriteBuffer marks ; <nl> - <nl> - void finalize ( ) ; <nl> - <nl> - void sync ( ) ; <nl> - <nl> - void addToChecksums ( MergeTreeData : : DataPart : : Checksums & checksums ) ; <nl> - } ; <nl> - <nl> - using ColumnStreams = std : : map < String , std : : unique_ptr < ColumnStream > > ; <nl> - <nl> - void addStreams ( const String & path , const String & name , const IDataType & type , <nl> - const CompressionCodecPtr & codec , size_t estimated_size , bool skip_offsets ) ; <nl> + return writer - > getIndexGranularity ( ) ; <nl> + } <nl> <nl> + protected : <nl> + using SerializationState = IDataType : : SerializeBinaryBulkStatePtr ; <nl> <nl> IDataType : : OutputStreamGetter createStreamGetter ( const String & name , WrittenOffsetColumns & offset_columns , bool skip_offsets ) ; <nl> <nl> - / / / Write data of one column . <nl> - / / / Return how many marks were written and <nl> - / / / how many rows were written for last mark <nl> - std : : pair < size_t , size_t > writeColumn ( <nl> - const String & name , <nl> - const IDataType & type , <nl> - const IColumn & column , <nl> - WrittenOffsetColumns & offset_columns , <nl> - bool skip_offsets , <nl> - IDataType : : SerializeBinaryBulkStatePtr & serialization_state , <nl> - size_t from_mark <nl> - ) ; <nl> - <nl> - / / / Write single granule of one column ( rows between 2 marks ) <nl> - size_t writeSingleGranule ( <nl> - const String & name , <nl> - const IDataType & type , <nl> - const IColumn & column , <nl> - WrittenOffsetColumns & offset_columns , <nl> - bool skip_offsets , <nl> - IDataType : : SerializeBinaryBulkStatePtr & serialization_state , <nl> - IDataType : : SerializeBinaryBulkSettings & serialize_settings , <nl> - size_t from_row , <nl> - size_t number_of_rows , <nl> - bool write_marks ) ; <nl> - <nl> - / / / Write mark for column <nl> - void writeSingleMark ( <nl> - const String & name , <nl> - const IDataType & type , <nl> - WrittenOffsetColumns & offset_columns , <nl> - bool skip_offsets , <nl> - size_t number_of_rows , <nl> - DB : : IDataType : : SubstreamPath & path ) ; <nl> - <nl> - / / / Count index_granularity for block and store in ` index_granularity ` <nl> - void fillIndexGranularity ( const Block & block ) ; <nl> - <nl> - / / / Write final mark to the end of column <nl> - void writeFinalMark ( <nl> - const std : : string & column_name , <nl> - const DataTypePtr column_type , <nl> - WrittenOffsetColumns & offset_columns , <nl> - bool skip_offsets , <nl> - DB : : IDataType : : SubstreamPath & path ) ; <nl> - <nl> - void initSkipIndices ( ) ; <nl> - void calculateAndSerializeSkipIndices ( const ColumnsWithTypeAndName & skip_indexes_columns , size_t rows ) ; <nl> - void finishSkipIndicesSerialization ( MergeTreeData : : DataPart : : Checksums & checksums ) ; <nl> protected : <nl> - MergeTreeData & storage ; <nl> + const MergeTreeData & storage ; <nl> <nl> - SerializationStates serialization_states ; <nl> String part_path ; <nl> <nl> - ColumnStreams column_streams ; <nl> - <nl> - / / / The offset to the first row of the block for which you want to write the index . <nl> - size_t index_offset = 0 ; <nl> - <nl> - size_t min_compress_block_size ; <nl> - size_t max_compress_block_size ; <nl> - <nl> - size_t aio_threshold ; <nl> - <nl> - size_t current_mark = 0 ; <nl> - <nl> - / / / Number of mark in data from which skip indices have to start <nl> - / / / aggregation . I . e . it ' s data mark number , not skip indices mark . <nl> - size_t skip_index_data_mark = 0 ; <nl> - <nl> - const bool can_use_adaptive_granularity ; <nl> - const std : : string marks_file_extension ; <nl> - const bool blocks_are_granules_size ; <nl> - <nl> - MergeTreeIndexGranularity index_granularity ; <nl> - <nl> - const bool compute_granularity ; <nl> - CompressionCodecPtr codec ; <nl> - <nl> - std : : vector < MergeTreeIndexPtr > skip_indices ; <nl> - std : : vector < std : : unique_ptr < ColumnStream > > skip_indices_streams ; <nl> - MergeTreeIndexAggregators skip_indices_aggregators ; <nl> - std : : vector < size_t > skip_index_filling ; <nl> + static Block getBlockAndPermute ( const Block & block , const Names & names , const IColumn : : Permutation * permutation ) ; <nl> <nl> - const bool with_final_mark ; <nl> + IMergeTreeDataPart : : MergeTreeWriterPtr writer ; <nl> } ; <nl> <nl> } <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeBaseSelectProcessor . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeBaseSelectProcessor . cpp <nl> <nl> # include < Storages / MergeTree / MergeTreeBaseSelectProcessor . h > <nl> # include < Storages / MergeTree / MergeTreeRangeReader . h > <nl> - # include < Storages / MergeTree / MergeTreeReader . h > <nl> + # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> + # include < Storages / MergeTree / IMergeTreeReader . h > <nl> # include < Storages / MergeTree / MergeTreeBlockReadUtils . h > <nl> # include < Columns / FilterDescription . h > <nl> # include < Common / typeid_cast . h > <nl> MergeTreeBaseSelectProcessor : : MergeTreeBaseSelectProcessor ( <nl> UInt64 max_block_size_rows_ , <nl> UInt64 preferred_block_size_bytes_ , <nl> UInt64 preferred_max_column_in_block_size_bytes_ , <nl> - UInt64 min_bytes_to_use_direct_io_ , <nl> - UInt64 min_bytes_to_use_mmap_io_ , <nl> - UInt64 max_read_buffer_size_ , <nl> + const MergeTreeReaderSettings & reader_settings_ , <nl> bool use_uncompressed_cache_ , <nl> - bool save_marks_in_cache_ , <nl> const Names & virt_column_names_ ) <nl> : <nl> SourceWithProgress ( getHeader ( std : : move ( header ) , prewhere_info_ , virt_column_names_ ) ) , <nl> MergeTreeBaseSelectProcessor : : MergeTreeBaseSelectProcessor ( <nl> max_block_size_rows ( max_block_size_rows_ ) , <nl> preferred_block_size_bytes ( preferred_block_size_bytes_ ) , <nl> preferred_max_column_in_block_size_bytes ( preferred_max_column_in_block_size_bytes_ ) , <nl> - min_bytes_to_use_direct_io ( min_bytes_to_use_direct_io_ ) , <nl> - min_bytes_to_use_mmap_io ( min_bytes_to_use_mmap_io_ ) , <nl> - max_read_buffer_size ( max_read_buffer_size_ ) , <nl> + reader_settings ( reader_settings_ ) , <nl> use_uncompressed_cache ( use_uncompressed_cache_ ) , <nl> - save_marks_in_cache ( save_marks_in_cache_ ) , <nl> virt_column_names ( virt_column_names_ ) <nl> { <nl> header_without_virtual_columns = getPort ( ) . getHeader ( ) ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeBaseSelectProcessor . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeBaseSelectProcessor . h <nl> <nl> namespace DB <nl> { <nl> <nl> - class MergeTreeReader ; <nl> + class IMergeTreeReader ; <nl> class UncompressedCache ; <nl> class MarkCache ; <nl> <nl> class MergeTreeBaseSelectProcessor : public SourceWithProgress <nl> UInt64 max_block_size_rows_ , <nl> UInt64 preferred_block_size_bytes_ , <nl> UInt64 preferred_max_column_in_block_size_bytes_ , <nl> - UInt64 min_bytes_to_use_direct_io_ , <nl> - UInt64 min_bytes_to_use_mmap_io_ , <nl> - UInt64 max_read_buffer_size_ , <nl> + const MergeTreeReaderSettings & reader_settings_ , <nl> bool use_uncompressed_cache_ , <nl> - bool save_marks_in_cache_ = true , <nl> const Names & virt_column_names_ = { } ) ; <nl> <nl> ~ MergeTreeBaseSelectProcessor ( ) override ; <nl> class MergeTreeBaseSelectProcessor : public SourceWithProgress <nl> UInt64 preferred_block_size_bytes ; <nl> UInt64 preferred_max_column_in_block_size_bytes ; <nl> <nl> - UInt64 min_bytes_to_use_direct_io ; <nl> - UInt64 min_bytes_to_use_mmap_io ; <nl> - UInt64 max_read_buffer_size ; <nl> + MergeTreeReaderSettings reader_settings ; <nl> <nl> bool use_uncompressed_cache ; <nl> - bool save_marks_in_cache ; <nl> <nl> Names virt_column_names ; <nl> / / / This header is used for chunks from readFromPart ( ) . <nl> class MergeTreeBaseSelectProcessor : public SourceWithProgress <nl> std : : shared_ptr < UncompressedCache > owned_uncompressed_cache ; <nl> std : : shared_ptr < MarkCache > owned_mark_cache ; <nl> <nl> - using MergeTreeReaderPtr = std : : unique_ptr < MergeTreeReader > ; <nl> + using MergeTreeReaderPtr = std : : unique_ptr < IMergeTreeReader > ; <nl> MergeTreeReaderPtr reader ; <nl> MergeTreeReaderPtr pre_reader ; <nl> } ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeBlockReadUtils . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeBlockReadUtils . cpp <nl> <nl> namespace DB <nl> { <nl> <nl> + <nl> NameSet injectRequiredColumns ( const MergeTreeData & storage , const MergeTreeData : : DataPartPtr & part , Names & columns ) <nl> { <nl> NameSet required_columns { std : : begin ( columns ) , std : : end ( columns ) } ; <nl> MergeTreeReadTaskColumns getReadTaskColumns ( const MergeTreeData & storage , const <nl> / / / Under owned_data_part - > columns_lock we check that all requested columns are of the same type as in the table . <nl> / / / This may be not true in case of ALTER MODIFY . <nl> if ( ! pre_column_names . empty ( ) ) <nl> - storage . check ( data_part - > columns , pre_column_names ) ; <nl> + storage . check ( data_part - > getColumns ( ) , pre_column_names ) ; <nl> if ( ! column_names . empty ( ) ) <nl> - storage . check ( data_part - > columns , column_names ) ; <nl> + storage . check ( data_part - > getColumns ( ) , column_names ) ; <nl> <nl> const NamesAndTypesList & physical_columns = storage . getColumns ( ) . getAllPhysical ( ) ; <nl> result . pre_columns = physical_columns . addTypes ( pre_column_names ) ; <nl> MergeTreeReadTaskColumns getReadTaskColumns ( const MergeTreeData & storage , const <nl> } <nl> else <nl> { <nl> - result . pre_columns = data_part - > columns . addTypes ( pre_column_names ) ; <nl> - result . columns = data_part - > columns . addTypes ( column_names ) ; <nl> + result . pre_columns = data_part - > getColumns ( ) . addTypes ( pre_column_names ) ; <nl> + result . columns = data_part - > getColumns ( ) . addTypes ( column_names ) ; <nl> } <nl> <nl> result . should_reorder = should_reorder ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeData . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeData . cpp <nl> <nl> # include < Storages / MergeTree / MergeTreeSequentialBlockInputStream . h > <nl> # include < Storages / MergeTree / MergedBlockOutputStream . h > <nl> # include < Storages / MergeTree / MergedColumnOnlyOutputStream . h > <nl> + # include < Storages / MergeTree / MergeTreeDataPartCompact . h > <nl> + # include < Storages / MergeTree / MergeTreeDataPartWide . h > <nl> # include < Storages / MergeTree / checkDataPart . h > <nl> # include < Storages / StorageMergeTree . h > <nl> # include < Storages / StorageReplicatedMergeTree . h > <nl> namespace ErrorCodes <nl> extern const int UNKNOWN_SETTING ; <nl> extern const int READONLY_SETTING ; <nl> extern const int ABORTED ; <nl> + extern const int UNKNOWN_PART_TYPE ; <nl> extern const int UNEXPECTED_AST_STRUCTURE ; <nl> } <nl> <nl> MergeTreeData : : MergeTreeData ( <nl> " MergeTree data format version on disk doesn ' t support custom partitioning " , <nl> ErrorCodes : : METADATA_MISMATCH ) ; <nl> } <nl> + <nl> + String reason ; <nl> + if ( ! canUsePolymorphicParts ( * settings , & reason ) & & ! reason . empty ( ) ) <nl> + LOG_WARNING ( log , reason + " Settings ' min_bytes_for_wide_part ' and ' min_bytes_for_wide_part ' will be ignored . " ) ; <nl> } <nl> <nl> <nl> void MergeTreeData : : setProperties ( const StorageInMemoryMetadata & metadata , bool <nl> <nl> Names new_primary_key_columns ; <nl> Names new_sorting_key_columns ; <nl> + NameSet primary_key_columns_set ; <nl> <nl> for ( size_t i = 0 ; i < sorting_key_size ; + + i ) <nl> { <nl> void MergeTreeData : : setProperties ( const StorageInMemoryMetadata & metadata , bool <nl> + toString ( i ) + " its column is " + pk_column + " , not " + sorting_key_column , <nl> ErrorCodes : : BAD_ARGUMENTS ) ; <nl> <nl> + if ( ! primary_key_columns_set . emplace ( pk_column ) . second ) <nl> + throw Exception ( " Primary key contains duplicate columns " , ErrorCodes : : BAD_ARGUMENTS ) ; <nl> + <nl> new_primary_key_columns . push_back ( pk_column ) ; <nl> } <nl> } <nl> void MergeTreeData : : loadDataParts ( bool skip_sanity_checks ) <nl> { <nl> const auto & part_name = part_names_with_disks [ i ] . first ; <nl> const auto part_disk_ptr = part_names_with_disks [ i ] . second ; <nl> + <nl> MergeTreePartInfo part_info ; <nl> if ( ! MergeTreePartInfo : : tryParsePartName ( part_name , & part_info , format_version ) ) <nl> return ; <nl> <nl> - MutableDataPartPtr part = std : : make_shared < DataPart > ( * this , part_disk_ptr , part_name , part_info ) ; <nl> - part - > relative_path = part_name ; <nl> + auto part = createPart ( part_name , part_info , part_disk_ptr , part_name ) ; <nl> bool broken = false ; <nl> <nl> Poco : : Path part_path ( getFullPathOnDisk ( part_disk_ptr ) , part_name ) ; <nl> void MergeTreeData : : checkAlterIsPossible ( const AlterCommands & commands , const S <nl> if ( settings_ast ) <nl> { <nl> const auto & current_changes = settings_ast - > as < const ASTSetQuery & > ( ) . changes ; <nl> - for ( const auto & changed_setting : metadata . settings_ast - > as < const ASTSetQuery & > ( ) . changes ) <nl> + const auto & new_changes = metadata . settings_ast - > as < const ASTSetQuery & > ( ) . changes ; <nl> + for ( const auto & changed_setting : new_changes ) <nl> { <nl> if ( MergeTreeSettings : : findIndex ( changed_setting . name ) = = MergeTreeSettings : : npos ) <nl> throw Exception { " Storage ' " + getName ( ) + " ' doesn ' t have setting ' " + changed_setting . name + " ' " , <nl> void MergeTreeData : : checkAlterIsPossible ( const AlterCommands & commands , const S <nl> ErrorCodes : : READONLY_SETTING } ; <nl> } <nl> <nl> + if ( current_setting_it = = current_changes . end ( ) <nl> + & & MergeTreeSettings : : isPartFormatSetting ( changed_setting . name ) ) <nl> + { <nl> + MergeTreeSettings copy = * getSettings ( ) ; <nl> + copy . applyChange ( changed_setting ) ; <nl> + String reason ; <nl> + if ( ! canUsePolymorphicParts ( copy , & reason ) & & ! reason . empty ( ) ) <nl> + throw Exception ( " Can ' t change settings . Reason : " + reason , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> + } <nl> + <nl> if ( changed_setting . name = = " storage_policy " ) <nl> setStoragePolicy ( changed_setting . value . safeGet < String > ( ) , / * only_check = * / true ) ; <nl> } <nl> } <nl> <nl> if ( commands . isModifyingData ( ) ) <nl> - { <nl> - / / / Check that type conversions are possible . <nl> - ExpressionActionsPtr unused_expression ; <nl> - NameToNameMap unused_map ; <nl> - bool unused_bool ; <nl> - createConvertExpression ( nullptr , getColumns ( ) . getAllPhysical ( ) , metadata . columns . getAllPhysical ( ) , <nl> - getIndices ( ) . indices , metadata . indices . indices , unused_expression , unused_map , unused_bool ) ; <nl> - } <nl> + analyzeAlterConversions ( getColumns ( ) . getAllPhysical ( ) , metadata . columns . getAllPhysical ( ) , getIndices ( ) . indices , metadata . indices . indices ) ; <nl> } <nl> <nl> - void MergeTreeData : : createConvertExpression ( const DataPartPtr & part , const NamesAndTypesList & old_columns , <nl> - const NamesAndTypesList & new_columns , const IndicesASTs & old_indices , const IndicesASTs & new_indices , <nl> - ExpressionActionsPtr & out_expression , NameToNameMap & out_rename_map , bool & out_force_update_metadata ) const <nl> - { <nl> - const auto settings = getSettings ( ) ; <nl> - out_expression = nullptr ; <nl> - out_rename_map = { } ; <nl> - out_force_update_metadata = false ; <nl> - String part_mrk_file_extension ; <nl> - if ( part ) <nl> - part_mrk_file_extension = part - > index_granularity_info . marks_file_extension ; <nl> - else <nl> - part_mrk_file_extension = settings - > index_granularity_bytes = = 0 ? getNonAdaptiveMrkExtension ( ) : getAdaptiveMrkExtension ( ) ; <nl> - <nl> - using NameToType = std : : map < String , const IDataType * > ; <nl> - NameToType new_types ; <nl> - for ( const NameAndTypePair & column : new_columns ) <nl> - new_types . emplace ( column . name , column . type . get ( ) ) ; <nl> - <nl> - / / / For every column that need to be converted : source column name , column name of calculated expression for conversion . <nl> - std : : vector < std : : pair < String , String > > conversions ; <nl> <nl> + AlterAnalysisResult MergeTreeData : : analyzeAlterConversions ( <nl> + const NamesAndTypesList & old_columns , <nl> + const NamesAndTypesList & new_columns , <nl> + const IndicesASTs & old_indices , <nl> + const IndicesASTs & new_indices ) const <nl> + { <nl> + AlterAnalysisResult res ; <nl> <nl> / / / Remove old indices <nl> std : : unordered_set < String > new_indices_set ; <nl> void MergeTreeData : : createConvertExpression ( const DataPartPtr & part , const Name <nl> { <nl> const auto & index = index_decl - > as < ASTIndexDeclaration & > ( ) ; <nl> if ( ! new_indices_set . count ( index . name ) ) <nl> - { <nl> - out_rename_map [ " skp_idx_ " + index . name + " . idx " ] = " " ; / / / drop this file <nl> - out_rename_map [ " skp_idx_ " + index . name + part_mrk_file_extension ] = " " ; / / / and this one <nl> - } <nl> + res . removed_indices . push_back ( index . name ) ; <nl> } <nl> <nl> - / / / Collect counts for shared streams of different columns . As an example , Nested columns have shared stream with array sizes . <nl> - std : : map < String , size_t > stream_counts ; <nl> - for ( const NameAndTypePair & column : old_columns ) <nl> - { <nl> - column . type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - + + stream_counts [ IDataType : : getFileNameForStream ( column . name , substream_path ) ] ; <nl> - } , { } ) ; <nl> - } <nl> + for ( const NameAndTypePair & column : new_columns ) <nl> + res . new_types . emplace ( column . name , column . type . get ( ) ) ; <nl> <nl> for ( const NameAndTypePair & column : old_columns ) <nl> { <nl> - if ( ! new_types . count ( column . name ) ) <nl> + if ( ! res . new_types . count ( column . name ) ) <nl> { <nl> - / / / The column was deleted . <nl> - if ( ! part | | part - > hasColumnFiles ( column . name , * column . type ) ) <nl> - { <nl> - column . type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - String file_name = IDataType : : getFileNameForStream ( column . name , substream_path ) ; <nl> - <nl> - / / / Delete files if they are no longer shared with another column . <nl> - if ( - - stream_counts [ file_name ] = = 0 ) <nl> - { <nl> - out_rename_map [ file_name + " . bin " ] = " " ; / / / drop this file <nl> - out_rename_map [ file_name + part_mrk_file_extension ] = " " ; / / / and this one <nl> - } <nl> - } , { } ) ; <nl> - } <nl> + res . removed_columns . push_back ( column ) ; <nl> } <nl> else <nl> { <nl> / / / The column was converted . Collect conversions . <nl> - const auto * new_type = new_types [ column . name ] ; <nl> + const auto * new_type = res . new_types [ column . name ] ; <nl> const String new_type_name = new_type - > getName ( ) ; <nl> const auto * old_type = column . type . get ( ) ; <nl> <nl> - if ( ! new_type - > equals ( * old_type ) & & ( ! part | | part - > hasColumnFiles ( column . name , * column . type ) ) ) <nl> + if ( ! new_type - > equals ( * old_type ) ) <nl> { <nl> if ( isMetadataOnlyConversion ( old_type , new_type ) ) <nl> { <nl> - out_force_update_metadata = true ; <nl> + res . force_update_metadata = true ; <nl> continue ; <nl> } <nl> <nl> / / / Need to modify column type . <nl> - if ( ! out_expression ) <nl> - out_expression = std : : make_shared < ExpressionActions > ( NamesAndTypesList ( ) , global_context ) ; <nl> + if ( ! res . expression ) <nl> + res . expression = std : : make_shared < ExpressionActions > ( NamesAndTypesList ( ) , global_context ) ; <nl> <nl> - out_expression - > addInput ( ColumnWithTypeAndName ( nullptr , column . type , column . name ) ) ; <nl> + res . expression - > addInput ( ColumnWithTypeAndName ( nullptr , column . type , column . name ) ) ; <nl> <nl> Names out_names ; <nl> <nl> / / / This is temporary name for expression . TODO Invent the name more safely . <nl> const String new_type_name_column = ' # ' + new_type_name + " _column " ; <nl> - out_expression - > add ( ExpressionAction : : addColumn ( <nl> + res . expression - > add ( ExpressionAction : : addColumn ( <nl> { DataTypeString ( ) . createColumnConst ( 1 , new_type_name ) , std : : make_shared < DataTypeString > ( ) , new_type_name_column } ) ) ; <nl> <nl> const auto & function = FunctionFactory : : instance ( ) . get ( " CAST " , global_context ) ; <nl> - out_expression - > add ( ExpressionAction : : applyFunction ( <nl> + res . expression - > add ( ExpressionAction : : applyFunction ( <nl> function , Names { column . name , new_type_name_column } ) , out_names ) ; <nl> <nl> - out_expression - > add ( ExpressionAction : : removeColumn ( new_type_name_column ) ) ; <nl> - out_expression - > add ( ExpressionAction : : removeColumn ( column . name ) ) ; <nl> - <nl> - conversions . emplace_back ( column . name , out_names . at ( 0 ) ) ; <nl> + res . expression - > add ( ExpressionAction : : removeColumn ( new_type_name_column ) ) ; <nl> + res . expression - > add ( ExpressionAction : : removeColumn ( column . name ) ) ; <nl> <nl> + res . conversions . emplace_back ( column . name , out_names . at ( 0 ) ) ; <nl> } <nl> } <nl> } <nl> <nl> - if ( ! conversions . empty ( ) ) <nl> - { <nl> - / / / Give proper names for temporary columns with conversion results . <nl> + return res ; <nl> + } <nl> <nl> - NamesWithAliases projection ; <nl> - projection . reserve ( conversions . size ( ) ) ; <nl> <nl> - for ( const auto & source_and_expression : conversions ) <nl> - { <nl> - / / / Column name for temporary filenames before renaming . NOTE The is unnecessarily tricky . <nl> + MergeTreeDataPartType MergeTreeData : : choosePartType ( size_t bytes_uncompressed , size_t rows_count ) const <nl> + { <nl> + if ( ! canUseAdaptiveGranularity ( ) ) <nl> + return MergeTreeDataPartType : : WIDE ; <nl> <nl> - String original_column_name = source_and_expression . first ; <nl> - String temporary_column_name = original_column_name + " converting " ; <nl> + const auto settings = getSettings ( ) ; <nl> + if ( bytes_uncompressed < settings - > min_bytes_for_wide_part | | rows_count < settings - > min_rows_for_wide_part ) <nl> + return MergeTreeDataPartType : : COMPACT ; <nl> <nl> - projection . emplace_back ( source_and_expression . second , temporary_column_name ) ; <nl> + return MergeTreeDataPartType : : WIDE ; <nl> + } <nl> <nl> - / / / After conversion , we need to rename temporary files into original . <nl> <nl> - new_types [ source_and_expression . first ] - > enumerateStreams ( <nl> - [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - / / / Skip array sizes , because they cannot be modified in ALTER . <nl> - if ( ! substream_path . empty ( ) & & substream_path . back ( ) . type = = IDataType : : Substream : : ArraySizes ) <nl> - return ; <nl> + MergeTreeData : : MutableDataPartPtr MergeTreeData : : createPart ( const String & name , <nl> + MergeTreeDataPartType type , const MergeTreePartInfo & part_info , <nl> + const DiskPtr & disk , const String & relative_path ) const <nl> + { <nl> + if ( type = = MergeTreeDataPartType : : COMPACT ) <nl> + return std : : make_shared < MergeTreeDataPartCompact > ( * this , name , part_info , disk , relative_path ) ; <nl> + else if ( type = = MergeTreeDataPartType : : WIDE ) <nl> + return std : : make_shared < MergeTreeDataPartWide > ( * this , name , part_info , disk , relative_path ) ; <nl> + else <nl> + throw Exception ( " Unknown type in part " + relative_path , ErrorCodes : : UNKNOWN_PART_TYPE ) ; <nl> + } <nl> <nl> - String original_file_name = IDataType : : getFileNameForStream ( original_column_name , substream_path ) ; <nl> - String temporary_file_name = IDataType : : getFileNameForStream ( temporary_column_name , substream_path ) ; <nl> + static MergeTreeDataPartType getPartTypeFromMarkExtension ( const String & mrk_ext ) <nl> + { <nl> + if ( mrk_ext = = getNonAdaptiveMrkExtension ( ) ) <nl> + return MergeTreeDataPartType : : WIDE ; <nl> + if ( mrk_ext = = getAdaptiveMrkExtension ( MergeTreeDataPartType : : WIDE ) ) <nl> + return MergeTreeDataPartType : : WIDE ; <nl> + if ( mrk_ext = = getAdaptiveMrkExtension ( MergeTreeDataPartType : : COMPACT ) ) <nl> + return MergeTreeDataPartType : : COMPACT ; <nl> <nl> - out_rename_map [ temporary_file_name + " . bin " ] = original_file_name + " . bin " ; <nl> - out_rename_map [ temporary_file_name + part_mrk_file_extension ] = original_file_name + part_mrk_file_extension ; <nl> - } , { } ) ; <nl> - } <nl> + throw Exception ( " Can ' t determine part type , because of unknown mark extension " + mrk_ext , ErrorCodes : : UNKNOWN_PART_TYPE ) ; <nl> + } <nl> + <nl> + MergeTreeData : : MutableDataPartPtr MergeTreeData : : createPart ( <nl> + const String & name , const DiskPtr & disk , const String & relative_path ) const <nl> + { <nl> + return createPart ( name , MergeTreePartInfo : : fromPartName ( name , format_version ) , disk , relative_path ) ; <nl> + } <nl> + <nl> + MergeTreeData : : MutableDataPartPtr MergeTreeData : : createPart ( <nl> + const String & name , const MergeTreePartInfo & part_info , <nl> + const DiskPtr & disk , const String & relative_path ) const <nl> + { <nl> + MergeTreeDataPartType type ; <nl> + auto full_path = getFullPathOnDisk ( disk ) + relative_path + " / " ; <nl> + auto mrk_ext = MergeTreeIndexGranularityInfo : : getMrkExtensionFromFS ( full_path ) ; <nl> <nl> - out_expression - > add ( ExpressionAction : : project ( projection ) ) ; <nl> + if ( mrk_ext ) <nl> + type = getPartTypeFromMarkExtension ( * mrk_ext ) ; <nl> + else <nl> + { <nl> + / / / Didn ' t find any mark file , suppose that part is empty . <nl> + type = choosePartType ( 0 , 0 ) ; <nl> } <nl> <nl> - if ( part & & ! out_rename_map . empty ( ) ) <nl> + return createPart ( name , type , part_info , disk , relative_path ) ; <nl> + } <nl> + <nl> + void MergeTreeData : : alterDataPart ( <nl> + const NamesAndTypesList & new_columns , <nl> + const IndicesASTs & new_indices , <nl> + bool skip_sanity_checks , <nl> + AlterDataPartTransactionPtr & transaction ) <nl> + { <nl> + const auto settings = getSettings ( ) ; <nl> + const auto & part = transaction - > getDataPart ( ) ; <nl> + <nl> + auto res = analyzeAlterConversions ( part - > getColumns ( ) , new_columns , getIndices ( ) . indices , new_indices ) ; <nl> + <nl> + NamesAndTypesList additional_columns ; <nl> + transaction - > rename_map = part - > createRenameMapForAlter ( res , part - > getColumns ( ) ) ; <nl> + <nl> + if ( ! transaction - > rename_map . empty ( ) ) <nl> { <nl> WriteBufferFromOwnString out ; <nl> out < < " Will " ; <nl> bool first = true ; <nl> - for ( const auto & from_to : out_rename_map ) <nl> + for ( const auto & [ from , to ] : transaction - > rename_map ) <nl> { <nl> if ( ! first ) <nl> out < < " , " ; <nl> first = false ; <nl> - if ( from_to . second . empty ( ) ) <nl> - out < < " remove " < < from_to . first ; <nl> + if ( to . empty ( ) ) <nl> + out < < " remove " < < from ; <nl> else <nl> - out < < " rename " < < from_to . first < < " to " < < from_to . second ; <nl> + out < < " rename " < < from < < " to " < < to ; <nl> } <nl> out < < " in part " < < part - > name ; <nl> LOG_DEBUG ( log , out . str ( ) ) ; <nl> } <nl> - } <nl> - <nl> - void MergeTreeData : : alterDataPart ( <nl> - const NamesAndTypesList & new_columns , <nl> - const IndicesASTs & new_indices , <nl> - bool skip_sanity_checks , <nl> - AlterDataPartTransactionPtr & transaction ) <nl> - { <nl> - const auto settings = getSettings ( ) ; <nl> - ExpressionActionsPtr expression ; <nl> - const auto & part = transaction - > getDataPart ( ) ; <nl> - bool force_update_metadata ; <nl> - createConvertExpression ( part , part - > columns , new_columns , <nl> - getIndices ( ) . indices , new_indices , <nl> - expression , transaction - > rename_map , force_update_metadata ) ; <nl> <nl> size_t num_files_to_modify = transaction - > rename_map . size ( ) ; <nl> size_t num_files_to_remove = 0 ; <nl> void MergeTreeData : : alterDataPart ( <nl> <nl> DataPart : : Checksums add_checksums ; <nl> <nl> - if ( transaction - > rename_map . empty ( ) & & ! force_update_metadata ) <nl> + if ( transaction - > rename_map . empty ( ) & & ! res . force_update_metadata ) <nl> { <nl> transaction - > clear ( ) ; <nl> return ; <nl> } <nl> <nl> / / / Apply the expression and write the result to temporary files . <nl> - if ( expression ) <nl> + if ( res . expression ) <nl> { <nl> BlockInputStreamPtr part_in = std : : make_shared < MergeTreeSequentialBlockInputStream > ( <nl> - * this , part , expression - > getRequiredColumns ( ) , false , / * take_column_types_from_storage = * / false ) ; <nl> - <nl> + * this , part , res . expression - > getRequiredColumns ( ) , false , / * take_column_types_from_storage = * / false ) ; <nl> <nl> auto compression_codec = global_context . chooseCompressionCodec ( <nl> part - > bytes_on_disk , <nl> static_cast < double > ( part - > bytes_on_disk ) / this - > getTotalActiveSizeInBytes ( ) ) ; <nl> - ExpressionBlockInputStream in ( part_in , expression ) ; <nl> + ExpressionBlockInputStream in ( part_in , res . expression ) ; <nl> <nl> / * * Don ' t write offsets for arrays , because ALTER never change them <nl> * ( MODIFY COLUMN could only change types of elements but never modify array sizes ) . <nl> void MergeTreeData : : alterDataPart ( <nl> * temporary column name ( ' converting_column_name ' ) created in ' createConvertExpression ' method <nl> * will have old name of shared offsets for arrays . <nl> * / <nl> - IMergedBlockOutputStream : : WrittenOffsetColumns unused_written_offsets ; <nl> <nl> MergedColumnOnlyOutputStream out ( <nl> - * this , <nl> + part , <nl> in . getHeader ( ) , <nl> - part - > getFullPath ( ) , <nl> true / * sync * / , <nl> compression_codec , <nl> true / * skip_offsets * / , <nl> / / / Don ' t recalc indices because indices alter is restricted <nl> std : : vector < MergeTreeIndexPtr > { } , <nl> - unused_written_offsets , <nl> + nullptr / * offset_columns * / , <nl> part - > index_granularity , <nl> - & part - > index_granularity_info ) ; <nl> + & part - > index_granularity_info , <nl> + true / * is_writing_temp_files * / ) ; <nl> <nl> in . readPrefix ( ) ; <nl> out . writePrefix ( ) ; <nl> void MergeTreeData : : alterDataPart ( <nl> <nl> / / / Write the new column list to the temporary file . <nl> { <nl> - transaction - > new_columns = new_columns . filter ( part - > columns . getNames ( ) ) ; <nl> + transaction - > new_columns = new_columns . filter ( part - > getColumns ( ) . getNames ( ) ) ; <nl> WriteBufferFromFile columns_file ( part - > getFullPath ( ) + " columns . txt . tmp " , 4096 ) ; <nl> transaction - > new_columns . writeText ( columns_file ) ; <nl> transaction - > rename_map [ " columns . txt . tmp " ] = " columns . txt " ; <nl> void MergeTreeData : : changeSettings ( <nl> <nl> void MergeTreeData : : removeEmptyColumnsFromPart ( MergeTreeData : : MutableDataPartPtr & data_part ) <nl> { <nl> - auto & empty_columns = data_part - > empty_columns ; <nl> + auto & empty_columns = data_part - > expired_columns ; <nl> if ( empty_columns . empty ( ) ) <nl> return ; <nl> <nl> NamesAndTypesList new_columns ; <nl> - for ( const auto & [ name , type ] : data_part - > columns ) <nl> + for ( const auto & [ name , type ] : data_part - > getColumns ( ) ) <nl> if ( ! empty_columns . count ( name ) ) <nl> new_columns . emplace_back ( name , type ) ; <nl> <nl> void MergeTreeData : : AlterDataPartTransaction : : commit ( ) <nl> <nl> auto & mutable_part = const_cast < DataPart & > ( * data_part ) ; <nl> mutable_part . checksums = new_checksums ; <nl> - mutable_part . columns = new_columns ; <nl> + mutable_part . setColumns ( new_columns ) ; <nl> <nl> / / / 3 ) Delete the old files and drop required columns ( DROP COLUMN ) <nl> for ( const auto & from_to : rename_map ) <nl> void MergeTreeData : : removePartsFromWorkingSet ( const MergeTreeData : : DataPartsVect <nl> <nl> for ( const DataPartPtr & part : remove ) <nl> { <nl> - if ( part - > state = = MergeTreeDataPart : : State : : Committed ) <nl> + if ( part - > state = = IMergeTreeDataPart : : State : : Committed ) <nl> removePartContributionToColumnSizes ( part ) ; <nl> <nl> - if ( part - > state = = MergeTreeDataPart : : State : : Committed | | clear_without_timeout ) <nl> + if ( part - > state = = IMergeTreeDataPart : : State : : Committed | | clear_without_timeout ) <nl> part - > remove_time . store ( remove_time , std : : memory_order_relaxed ) ; <nl> <nl> - if ( part - > state ! = MergeTreeDataPart : : State : : Outdated ) <nl> - modifyPartState ( part , MergeTreeDataPart : : State : : Outdated ) ; <nl> + if ( part - > state ! = IMergeTreeDataPart : : State : : Outdated ) <nl> + modifyPartState ( part , IMergeTreeDataPart : : State : : Outdated ) ; <nl> } <nl> } <nl> <nl> MergeTreeData : : DataPartPtr MergeTreeData : : getPartIfExists ( const String & part_na <nl> <nl> MergeTreeData : : MutableDataPartPtr MergeTreeData : : loadPartAndFixMetadata ( const DiskPtr & disk , const String & relative_path ) <nl> { <nl> - MutableDataPartPtr part = std : : make_shared < DataPart > ( * this , disk , Poco : : Path ( relative_path ) . getFileName ( ) ) ; <nl> - part - > relative_path = relative_path ; <nl> + MutableDataPartPtr part = createPart ( Poco : : Path ( relative_path ) . getFileName ( ) , disk , relative_path ) ; <nl> loadPartAndFixMetadata ( part ) ; <nl> return part ; <nl> } <nl> void MergeTreeData : : loadPartAndFixMetadata ( MutableDataPartPtr part ) <nl> { <nl> String full_part_path = part - > getFullPath ( ) ; <nl> <nl> - / / / Earlier the list of columns was written incorrectly . Delete it and re - create . <nl> - if ( Poco : : File ( full_part_path + " columns . txt " ) . exists ( ) ) <nl> + / / / Earlier the list of columns was written incorrectly . Delete it and re - create . <nl> + / / / But in compact parts we can ' t get list of columns without this file . <nl> + if ( isWidePart ( part ) & & Poco : : File ( full_part_path + " columns . txt " ) . exists ( ) ) <nl> Poco : : File ( full_part_path + " columns . txt " ) . remove ( ) ; <nl> <nl> part - > loadColumnsChecksumsIndexes ( false , true ) ; <nl> void MergeTreeData : : loadPartAndFixMetadata ( MutableDataPartPtr part ) <nl> / / / Check the data while we are at it . <nl> if ( part - > checksums . empty ( ) ) <nl> { <nl> - part - > checksums = checkDataPart ( part , false , primary_key_data_types , skip_indices ) ; <nl> + part - > checksums = checkDataPart ( part , false ) ; <nl> { <nl> WriteBufferFromFile out ( full_part_path + " checksums . txt . tmp " , 4096 ) ; <nl> part - > checksums . write ( out ) ; <nl> void MergeTreeData : : addPartContributionToColumnSizes ( const DataPartPtr & part ) <nl> { <nl> std : : shared_lock < std : : shared_mutex > lock ( part - > columns_lock ) ; <nl> <nl> - for ( const auto & column : part - > columns ) <nl> + for ( const auto & column : part - > getColumns ( ) ) <nl> { <nl> ColumnSize & total_column_size = column_sizes [ column . name ] ; <nl> ColumnSize part_column_size = part - > getColumnSize ( column . name , * column . type ) ; <nl> void MergeTreeData : : removePartContributionToColumnSizes ( const DataPartPtr & part <nl> { <nl> std : : shared_lock < std : : shared_mutex > lock ( part - > columns_lock ) ; <nl> <nl> - for ( const auto & column : part - > columns ) <nl> + for ( const auto & column : part - > getColumns ( ) ) <nl> { <nl> ColumnSize & total_column_size = column_sizes [ column . name ] ; <nl> ColumnSize part_column_size = part - > getColumnSize ( column . name , * column . type ) ; <nl> MergeTreeData : : MutableDataPartsVector MergeTreeData : : tryLoadPartsToAttach ( const <nl> for ( const auto & part_names : renamed_parts . old_and_new_names ) <nl> { <nl> LOG_DEBUG ( log , " Checking part " < < part_names . second ) ; <nl> - MutableDataPartPtr part = std : : make_shared < DataPart > ( * this , name_to_disk [ part_names . first ] , part_names . first ) ; <nl> - part - > relative_path = source_dir + part_names . second ; <nl> + MutableDataPartPtr part = createPart ( part_names . first , name_to_disk [ part_names . first ] , source_dir + part_names . second ) ; <nl> loadPartAndFixMetadata ( part ) ; <nl> loaded_parts . push_back ( part ) ; <nl> } <nl> ReservationPtr MergeTreeData : : tryReserveSpace ( UInt64 expected_size , SpacePtr spa <nl> } <nl> <nl> ReservationPtr MergeTreeData : : reserveSpacePreferringTTLRules ( UInt64 expected_size , <nl> - const MergeTreeDataPart : : TTLInfos & ttl_infos , <nl> + const IMergeTreeDataPart : : TTLInfos & ttl_infos , <nl> time_t time_of_move , <nl> size_t min_volume_index ) const <nl> { <nl> ReservationPtr MergeTreeData : : reserveSpacePreferringTTLRules ( UInt64 expected_siz <nl> } <nl> <nl> ReservationPtr MergeTreeData : : tryReserveSpacePreferringTTLRules ( UInt64 expected_size , <nl> - const MergeTreeDataPart : : TTLInfos & ttl_infos , <nl> + const IMergeTreeDataPart : : TTLInfos & ttl_infos , <nl> time_t time_of_move , <nl> size_t min_volume_index ) const <nl> { <nl> SpacePtr MergeTreeData : : TTLEntry : : getDestination ( const StoragePolicyPtr & policy <nl> return { } ; <nl> } <nl> <nl> - bool MergeTreeData : : TTLEntry : : isPartInDestination ( const StoragePolicyPtr & policy , const MergeTreeDataPart & part ) const <nl> + bool MergeTreeData : : TTLEntry : : isPartInDestination ( const StoragePolicyPtr & policy , const IMergeTreeDataPart & part ) const <nl> { <nl> if ( destination_type = = PartDestinationType : : VOLUME ) <nl> { <nl> bool MergeTreeData : : TTLEntry : : isPartInDestination ( const StoragePolicyPtr & polic <nl> } <nl> <nl> std : : optional < MergeTreeData : : TTLEntry > MergeTreeData : : selectTTLEntryForTTLInfos ( <nl> - const MergeTreeDataPart : : TTLInfos & ttl_infos , <nl> + const IMergeTreeDataPart : : TTLInfos & ttl_infos , <nl> time_t time_of_move ) const <nl> { <nl> time_t max_max_ttl = 0 ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeData : : cloneAndLoadDataPartOnSameDisk ( <nl> LOG_DEBUG ( log , " Cloning part " < < src_part_absolute_path . toString ( ) < < " to " < < dst_part_absolute_path . toString ( ) ) ; <nl> localBackup ( src_part_absolute_path , dst_part_absolute_path ) ; <nl> <nl> - MergeTreeData : : MutableDataPartPtr dst_data_part = std : : make_shared < MergeTreeData : : DataPart > ( <nl> - * this , reservation - > getDisk ( ) , dst_part_name , dst_part_info ) ; <nl> + auto dst_data_part = createPart ( dst_part_name , dst_part_info , reservation - > getDisk ( ) , tmp_dst_part_name ) ; <nl> <nl> - dst_data_part - > relative_path = tmp_dst_part_name ; <nl> dst_data_part - > is_temp = true ; <nl> <nl> dst_data_part - > loadColumnsChecksumsIndexes ( require_part_metadata , true ) ; <nl> bool MergeTreeData : : moveParts ( CurrentlyMovingPartsTagger & & moving_tagger ) <nl> return true ; <nl> } <nl> <nl> + bool MergeTreeData : : canUsePolymorphicParts ( const MergeTreeSettings & settings , String * out_reason ) <nl> + { <nl> + if ( ! canUseAdaptiveGranularity ( ) ) <nl> + { <nl> + if ( ( settings . min_rows_for_wide_part ! = 0 | | settings . min_bytes_for_wide_part ! = 0 ) & & out_reason ) <nl> + { <nl> + std : : ostringstream message ; <nl> + message < < " Table can ' t create parts with adaptive granularity , but settings min_rows_for_wide_part = " <nl> + < < settings . min_rows_for_wide_part < < " , min_bytes_for_wide_part = " < < settings . min_bytes_for_wide_part <nl> + < < " . Parts with non - adaptive granularity can be stored only in Wide ( default ) format . " ; <nl> + * out_reason = message . str ( ) ; <nl> + } <nl> + <nl> + return false ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + <nl> } <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeData . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeData . h <nl> <nl> # include < Storages / MergeTree / MergeTreeSettings . h > <nl> # include < Storages / MergeTree / MergeTreeMutationStatus . h > <nl> # include < Storages / MergeTree / MergeList . h > <nl> + # include < Storages / MergeTree / AlterAnalysisResult . h > <nl> # include < Storages / MergeTree / PartDestinationType . h > <nl> # include < IO / ReadBufferFromString . h > <nl> # include < IO / WriteBufferFromFile . h > <nl> <nl> # include < DataTypes / DataTypeString . h > <nl> # include < DataTypes / DataTypesNumber . h > <nl> # include < DataStreams / GraphiteRollupSortedBlockInputStream . h > <nl> - # include < Storages / MergeTree / MergeTreeDataPart . h > <nl> + # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> # include < Storages / IndicesDescription . h > <nl> # include < Storages / MergeTree / MergeTreePartsMover . h > <nl> # include < Interpreters / PartLog . h > <nl> namespace ErrorCodes <nl> / / / The same files as for month - partitioned tables , plus <nl> / / / count . txt - contains total number of rows in this part . <nl> / / / partition . dat - contains the value of the partitioning expression . <nl> - / / / minmax_ [ Column ] . idx - MinMax indexes ( see MergeTreeDataPart : : MinMaxIndex class ) for the columns required by the partitioning expression . <nl> + / / / minmax_ [ Column ] . idx - MinMax indexes ( see IMergeTreeDataPart : : MinMaxIndex class ) for the columns required by the partitioning expression . <nl> / / / <nl> / / / Several modes are implemented . Modes determine additional actions during merge : <nl> / / / - Ordinary - don ' t do anything special <nl> class MergeTreeData : public IStorage <nl> public : <nl> / / / Function to call if the part is suspected to contain corrupt data . <nl> using BrokenPartCallback = std : : function < void ( const String & ) > ; <nl> - using DataPart = MergeTreeDataPart ; <nl> + using DataPart = IMergeTreeDataPart ; <nl> <nl> using MutableDataPartPtr = std : : shared_ptr < DataPart > ; <nl> using MutableDataPartsVector = std : : vector < MutableDataPartPtr > ; <nl> / / / After the DataPart is added to the working set , it cannot be changed . <nl> using DataPartPtr = std : : shared_ptr < const DataPart > ; <nl> <nl> - using DataPartState = MergeTreeDataPart : : State ; <nl> + using DataPartState = IMergeTreeDataPart : : State ; <nl> using DataPartStates = std : : initializer_list < DataPartState > ; <nl> using DataPartStateVector = std : : vector < DataPartState > ; <nl> <nl> class MergeTreeData : public IStorage <nl> using DataPartsLock = std : : unique_lock < std : : mutex > ; <nl> DataPartsLock lockParts ( ) const { return DataPartsLock ( data_parts_mutex ) ; } <nl> <nl> + MergeTreeDataPartType choosePartType ( size_t bytes_uncompressed , size_t rows_count ) const ; <nl> + <nl> + / / / After this method setColumns must be called <nl> + MutableDataPartPtr createPart ( const String & name , <nl> + MergeTreeDataPartType type , const MergeTreePartInfo & part_info , <nl> + const DiskPtr & disk , const String & relative_path ) const ; <nl> + <nl> + / / / After this methods ' loadColumnsChecksumsIndexes ' must be called <nl> + MutableDataPartPtr createPart ( const String & name , <nl> + const DiskPtr & disk , const String & relative_path ) const ; <nl> + <nl> + MutableDataPartPtr createPart ( const String & name , const MergeTreePartInfo & part_info , <nl> + const DiskPtr & disk , const String & relative_path ) const ; <nl> + <nl> / / / Auxiliary object to add a set of parts into the working set in two steps : <nl> / / / * First , as PreCommitted parts ( the parts are ready , but not yet in the active set ) . <nl> / / / * Next , if commit ( ) is called , the parts are added to the active set and the parts that are <nl> class MergeTreeData : public IStorage <nl> DataPartPtr getPartIfExists ( const String & part_name , const DataPartStates & valid_states ) ; <nl> DataPartPtr getPartIfExists ( const MergeTreePartInfo & part_info , const DataPartStates & valid_states ) ; <nl> <nl> + std : : vector < MergeTreeIndexPtr > getSkipIndices ( ) const <nl> + { <nl> + return std : : vector < MergeTreeIndexPtr > ( std : : begin ( skip_indices ) , std : : end ( skip_indices ) ) ; <nl> + } <nl> + <nl> / / / Total size of active parts in bytes . <nl> size_t getTotalActiveSizeInBytes ( ) const ; <nl> <nl> class MergeTreeData : public IStorage <nl> <nl> / / / Reserves space at least 1MB preferring best destination according to ` ttl_infos ` . <nl> ReservationPtr reserveSpacePreferringTTLRules ( UInt64 expected_size , <nl> - const MergeTreeDataPart : : TTLInfos & ttl_infos , <nl> + const IMergeTreeDataPart : : TTLInfos & ttl_infos , <nl> time_t time_of_move , <nl> size_t min_volume_index = 0 ) const ; <nl> ReservationPtr tryReserveSpacePreferringTTLRules ( UInt64 expected_size , <nl> - const MergeTreeDataPart : : TTLInfos & ttl_infos , <nl> + const IMergeTreeDataPart : : TTLInfos & ttl_infos , <nl> time_t time_of_move , <nl> size_t min_volume_index = 0 ) const ; <nl> / / / Choose disk with max available free space <nl> class MergeTreeData : public IStorage <nl> SpacePtr getDestination ( const StoragePolicyPtr & policy ) const ; <nl> <nl> / / / Checks if given part already belongs destination disk or volume for this rule . <nl> - bool isPartInDestination ( const StoragePolicyPtr & policy , const MergeTreeDataPart & part ) const ; <nl> + bool isPartInDestination ( const StoragePolicyPtr & policy , const IMergeTreeDataPart & part ) const ; <nl> <nl> bool isEmpty ( ) const { return expression = = nullptr ; } <nl> } ; <nl> <nl> - std : : optional < TTLEntry > selectTTLEntryForTTLInfos ( const MergeTreeDataPart : : TTLInfos & ttl_infos , time_t time_of_move ) const ; <nl> + std : : optional < TTLEntry > selectTTLEntryForTTLInfos ( const IMergeTreeDataPart : : TTLInfos & ttl_infos , time_t time_of_move ) const ; <nl> <nl> using TTLEntriesByName = std : : unordered_map < String , TTLEntry > ; <nl> TTLEntriesByName column_ttl_entries_by_name ; <nl> class MergeTreeData : public IStorage <nl> <nl> protected : <nl> <nl> - friend struct MergeTreeDataPart ; <nl> + friend class IMergeTreeDataPart ; <nl> friend class MergeTreeDataMergerMutator ; <nl> friend class ReplicatedMergeTreeAlterThread ; <nl> friend struct ReplicatedMergeTreeTableMetadata ; <nl> class MergeTreeData : public IStorage <nl> void setTTLExpressions ( const ColumnsDescription : : ColumnTTLs & new_column_ttls , <nl> const ASTPtr & new_ttl_table_ast , bool only_check = false ) ; <nl> <nl> - void setStoragePolicy ( const String & new_storage_policy_name , bool only_check = false ) ; <nl> + AlterAnalysisResult analyzeAlterConversions ( <nl> + const NamesAndTypesList & old_columns , <nl> + const NamesAndTypesList & new_columns , <nl> + const IndicesASTs & old_indices , <nl> + const IndicesASTs & new_indices ) const ; <nl> <nl> - / / / Expression for column type conversion . <nl> - / / / If no conversions are needed , out_expression = nullptr . <nl> - / / / out_rename_map maps column files for the out_expression onto new table files . <nl> - / / / out_force_update_metadata denotes if metadata must be changed even if out_rename_map is empty ( used <nl> - / / / for transformation - free changing of Enum values list ) . <nl> - / / / Files to be deleted are mapped to an empty string in out_rename_map . <nl> - / / / If part = = nullptr , just checks that all type conversions are possible . <nl> - void createConvertExpression ( const DataPartPtr & part , const NamesAndTypesList & old_columns , const NamesAndTypesList & new_columns , <nl> - const IndicesASTs & old_indices , const IndicesASTs & new_indices , <nl> - ExpressionActionsPtr & out_expression , NameToNameMap & out_rename_map , bool & out_force_update_metadata ) const ; <nl> + void setStoragePolicy ( const String & new_storage_policy_name , bool only_check = false ) ; <nl> <nl> / / / Calculates column sizes in compressed form for the current state of data_parts . Call with data_parts mutex locked . <nl> void calculateColumnSizesImpl ( ) ; <nl> class MergeTreeData : public IStorage <nl> <nl> / / / Check selected parts for movements . Used by ALTER . . . MOVE queries . <nl> CurrentlyMovingPartsTagger checkPartsForMove ( const DataPartsVector & parts , SpacePtr space ) ; <nl> + <nl> + bool canUsePolymorphicParts ( const MergeTreeSettings & settings , String * out_reason ) ; <nl> } ; <nl> <nl> } <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeDataMergerMutator . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataMergerMutator . cpp <nl> static const double DISK_USAGE_COEFFICIENT_TO_SELECT = 2 ; <nl> / / / because between selecting parts to merge and doing merge , amount of free space could have decreased . <nl> static const double DISK_USAGE_COEFFICIENT_TO_RESERVE = 1 . 1 ; <nl> <nl> - <nl> void FutureMergedMutatedPart : : assign ( MergeTreeData : : DataPartsVector parts_ ) <nl> + { <nl> + if ( parts_ . empty ( ) ) <nl> + return ; <nl> + <nl> + size_t sum_rows = 0 ; <nl> + size_t sum_bytes_uncompressed = 0 ; <nl> + for ( const auto & part : parts_ ) <nl> + { <nl> + sum_rows + = part - > rows_count ; <nl> + sum_bytes_uncompressed + = part - > getTotalColumnsSize ( ) . data_uncompressed ; <nl> + } <nl> + <nl> + auto future_part_type = parts_ . front ( ) - > storage . choosePartType ( sum_bytes_uncompressed , sum_rows ) ; <nl> + assign ( std : : move ( parts_ ) , future_part_type ) ; <nl> + } <nl> + <nl> + void FutureMergedMutatedPart : : assign ( MergeTreeData : : DataPartsVector parts_ , MergeTreeDataPartType future_part_type ) <nl> { <nl> if ( parts_ . empty ( ) ) <nl> return ; <nl> void FutureMergedMutatedPart : : assign ( MergeTreeData : : DataPartsVector parts_ ) <nl> max_mutation = std : : max ( max_mutation , part - > info . mutation ) ; <nl> } <nl> <nl> + type = future_part_type ; <nl> part_info . partition_id = parts . front ( ) - > info . partition_id ; <nl> part_info . min_block = parts . front ( ) - > info . min_block ; <nl> part_info . max_block = parts . back ( ) - > info . max_block ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> <nl> LOG_DEBUG ( log , " Merging " < < parts . size ( ) < < " parts : from " <nl> < < parts . front ( ) - > name < < " to " < < parts . back ( ) - > name <nl> - < < " into " < < TMP_PREFIX + future_part . name ) ; <nl> + < < " into " < < TMP_PREFIX + future_part . name + " with type " + future_part . type . toString ( ) ) ; <nl> <nl> String part_path = data . getFullPathOnDisk ( space_reservation - > getDisk ( ) ) ; <nl> String new_part_tmp_path = part_path + TMP_PREFIX + future_part . name + " / " ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> throw Exception ( " Directory " + new_part_tmp_path + " already exists " , ErrorCodes : : DIRECTORY_ALREADY_EXISTS ) ; <nl> <nl> MergeTreeData : : DataPart : : ColumnToSize merged_column_to_size ; <nl> - for ( const MergeTreeData : : DataPartPtr & part : parts ) <nl> - part - > accumulateColumnSizes ( merged_column_to_size ) ; <nl> <nl> Names all_column_names = data . getColumns ( ) . getNamesOfPhysical ( ) ; <nl> NamesAndTypesList all_columns = data . getColumns ( ) . getAllPhysical ( ) ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> all_columns , data . sorting_key_expr , data . skip_indices , <nl> data . merging_params , gathering_columns , gathering_column_names , merging_columns , merging_column_names ) ; <nl> <nl> - MergeTreeData : : MutableDataPartPtr new_data_part = std : : make_shared < MergeTreeData : : DataPart > ( <nl> - data , space_reservation - > getDisk ( ) , future_part . name , future_part . part_info ) ; <nl> + MergeTreeData : : MutableDataPartPtr new_data_part = data . createPart ( <nl> + future_part . name , <nl> + future_part . type , <nl> + future_part . part_info , <nl> + space_reservation - > getDisk ( ) , <nl> + TMP_PREFIX + future_part . name ) ; <nl> + <nl> + new_data_part - > setColumns ( all_columns ) ; <nl> new_data_part - > partition . assign ( future_part . getPartition ( ) ) ; <nl> - new_data_part - > relative_path = TMP_PREFIX + future_part . name ; <nl> new_data_part - > is_temp = true ; <nl> <nl> - size_t sum_input_rows_upper_bound = merge_entry - > total_rows_count ; <nl> - <nl> bool need_remove_expired_values = force_ttl ; <nl> - for ( const MergeTreeData : : DataPartPtr & part : parts ) <nl> + for ( const auto & part : parts ) <nl> new_data_part - > ttl_infos . update ( part - > ttl_infos ) ; <nl> <nl> const auto & part_min_ttl = new_data_part - > ttl_infos . part_min_ttl ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> need_remove_expired_values = false ; <nl> } <nl> <nl> + size_t sum_input_rows_upper_bound = merge_entry - > total_rows_count ; <nl> MergeAlgorithm merge_alg = chooseMergeAlgorithm ( parts , sum_input_rows_upper_bound , gathering_columns , deduplicate , need_remove_expired_values ) ; <nl> <nl> LOG_DEBUG ( log , " Selected MergeAlgorithm : " < < ( ( merge_alg = = MergeAlgorithm : : Vertical ) ? " Vertical " : " Horizontal " ) ) ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> String rows_sources_file_path ; <nl> std : : unique_ptr < WriteBuffer > rows_sources_uncompressed_write_buf ; <nl> std : : unique_ptr < WriteBuffer > rows_sources_write_buf ; <nl> + std : : optional < ColumnSizeEstimator > column_sizes ; <nl> <nl> if ( merge_alg = = MergeAlgorithm : : Vertical ) <nl> { <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> rows_sources_file_path = new_part_tmp_path + " rows_sources " ; <nl> rows_sources_uncompressed_write_buf = std : : make_unique < WriteBufferFromFile > ( rows_sources_file_path ) ; <nl> rows_sources_write_buf = std : : make_unique < CompressedWriteBuffer > ( * rows_sources_uncompressed_write_buf ) ; <nl> + <nl> + for ( const MergeTreeData : : DataPartPtr & part : parts ) <nl> + part - > accumulateColumnSizes ( merged_column_to_size ) ; <nl> + <nl> + column_sizes = ColumnSizeEstimator ( merged_column_to_size , merging_column_names , gathering_column_names ) ; <nl> } <nl> else <nl> { <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> gathering_column_names . clear ( ) ; <nl> } <nl> <nl> - ColumnSizeEstimator column_sizes ( merged_column_to_size , merging_column_names , gathering_column_names ) ; <nl> - <nl> / * * Read from all parts , merge and write into a new one . <nl> * In passing , we calculate expression for sorting . <nl> * / <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> } <nl> <nl> MergeStageProgress horizontal_stage_progress ( <nl> - merge_alg = = MergeAlgorithm : : Horizontal ? 1 . 0 : column_sizes . keyColumnsWeight ( ) ) ; <nl> + column_sizes ? column_sizes - > keyColumnsWeight ( ) : 1 . 0 ) ; <nl> <nl> for ( const auto & part : parts ) <nl> { <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> merged_stream = std : : make_shared < TTLBlockInputStream > ( merged_stream , data , new_data_part , time_of_merge , force_ttl ) ; <nl> <nl> MergedBlockOutputStream to { <nl> - data , <nl> - new_part_tmp_path , <nl> + new_data_part , <nl> merging_columns , <nl> compression_codec , <nl> merged_column_to_size , <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> { <nl> size_t sum_input_rows_exact = merge_entry - > rows_read ; <nl> merge_entry - > columns_written = merging_column_names . size ( ) ; <nl> - merge_entry - > progress . store ( column_sizes . keyColumnsWeight ( ) , std : : memory_order_relaxed ) ; <nl> + merge_entry - > progress . store ( column_sizes - > keyColumnsWeight ( ) , std : : memory_order_relaxed ) ; <nl> <nl> BlockInputStreams column_part_streams ( parts . size ( ) ) ; <nl> <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> Names column_names { column_name } ; <nl> Float64 progress_before = merge_entry - > progress . load ( std : : memory_order_relaxed ) ; <nl> <nl> - MergeStageProgress column_progress ( progress_before , column_sizes . columnWeight ( column_name ) ) ; <nl> + MergeStageProgress column_progress ( progress_before , column_sizes - > columnWeight ( column_name ) ) ; <nl> for ( size_t part_num = 0 ; part_num < parts . size ( ) ; + + part_num ) <nl> { <nl> auto column_part_stream = std : : make_shared < MergeTreeSequentialBlockInputStream > ( <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> ColumnGathererStream column_gathered_stream ( column_name , column_part_streams , rows_sources_read_buf ) ; <nl> <nl> MergedColumnOnlyOutputStream column_to ( <nl> - data , <nl> + new_data_part , <nl> column_gathered_stream . getHeader ( ) , <nl> - new_part_tmp_path , <nl> false , <nl> compression_codec , <nl> false , <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> / / / because all of them were already recalculated and written <nl> / / / as key part of vertical merge <nl> std : : vector < MergeTreeIndexPtr > { } , <nl> - written_offset_columns , <nl> + & written_offset_columns , <nl> to . getIndexGranularity ( ) ) ; <nl> <nl> size_t column_elems_written = 0 ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> <nl> merge_entry - > columns_written + = 1 ; <nl> merge_entry - > bytes_written_uncompressed + = column_gathered_stream . getProfileInfo ( ) . bytes ; <nl> - merge_entry - > progress . store ( progress_before + column_sizes . columnWeight ( column_name ) , std : : memory_order_relaxed ) ; <nl> + merge_entry - > progress . store ( progress_before + column_sizes - > columnWeight ( column_name ) , std : : memory_order_relaxed ) ; <nl> } <nl> <nl> Poco : : File ( rows_sources_file_path ) . remove ( ) ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mergePartsToTempor <nl> <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mutatePartToTemporaryPart ( <nl> const FutureMergedMutatedPart & future_part , <nl> - const std : : vector < MutationCommand > & commands , <nl> + const MutationCommands & commands , <nl> MergeListEntry & merge_entry , <nl> const Context & context , <nl> const ReservationPtr & space_reservation , <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mutatePartToTempor <nl> context_for_reading . getSettingsRef ( ) . max_streams_to_max_threads_ratio = 1 ; <nl> context_for_reading . getSettingsRef ( ) . max_threads = 1 ; <nl> <nl> - std : : vector < MutationCommand > commands_for_part ; <nl> + MutationCommands commands_for_part ; <nl> std : : copy_if ( <nl> std : : cbegin ( commands ) , std : : cend ( commands ) , <nl> std : : back_inserter ( commands_for_part ) , <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mutatePartToTempor <nl> command . partition , context_for_reading ) ; <nl> } ) ; <nl> <nl> + if ( isCompactPart ( source_part ) ) <nl> + commands_for_part . additional_columns = source_part - > getColumns ( ) . getNames ( ) ; <nl> <nl> if ( ! isStorageTouchedByMutations ( storage_from_source_part , commands_for_part , context_for_reading ) ) <nl> { <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mutatePartToTempor <nl> else <nl> LOG_TRACE ( log , " Mutating part " < < source_part - > name < < " to mutation version " < < future_part . part_info . mutation ) ; <nl> <nl> - MergeTreeData : : MutableDataPartPtr new_data_part = std : : make_shared < MergeTreeData : : DataPart > ( <nl> - data , space_reservation - > getDisk ( ) , future_part . name , future_part . part_info ) ; <nl> - new_data_part - > relative_path = " tmp_mut_ " + future_part . name ; <nl> + MutationsInterpreter mutations_interpreter ( storage_from_source_part , commands_for_part , context_for_reading , true ) ; <nl> + auto in = mutations_interpreter . execute ( table_lock_holder ) ; <nl> + const auto & updated_header = mutations_interpreter . getUpdatedHeader ( ) ; <nl> + <nl> + NamesAndTypesList all_columns = data . getColumns ( ) . getAllPhysical ( ) ; <nl> + <nl> + const auto & source_column_names = source_part - > getColumns ( ) . getNames ( ) ; <nl> + const auto & updated_column_names = updated_header . getNames ( ) ; <nl> + <nl> + NameSet new_columns_set ( source_column_names . begin ( ) , source_column_names . end ( ) ) ; <nl> + new_columns_set . insert ( updated_column_names . begin ( ) , updated_column_names . end ( ) ) ; <nl> + auto new_columns = all_columns . filter ( new_columns_set ) ; <nl> + <nl> + auto new_data_part = data . createPart ( <nl> + future_part . name , <nl> + future_part . type , <nl> + future_part . part_info , <nl> + space_reservation - > getDisk ( ) , <nl> + " tmp_mut_ " + future_part . name ) ; <nl> + <nl> new_data_part - > is_temp = true ; <nl> new_data_part - > ttl_infos = source_part - > ttl_infos ; <nl> + <nl> + / / / It shouldn ' t be changed by mutation . <nl> new_data_part - > index_granularity_info = source_part - > index_granularity_info ; <nl> + new_data_part - > setColumns ( new_columns ) ; <nl> <nl> String new_part_tmp_path = new_data_part - > getFullPath ( ) ; <nl> <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mutatePartToTempor <nl> <nl> Poco : : File ( new_part_tmp_path ) . createDirectories ( ) ; <nl> <nl> - MutationsInterpreter mutations_interpreter ( storage_from_source_part , commands_for_part , context_for_reading , true ) ; <nl> - auto in = mutations_interpreter . execute ( table_lock_holder ) ; <nl> - const auto & updated_header = mutations_interpreter . getUpdatedHeader ( ) ; <nl> - <nl> - NamesAndTypesList all_columns = data . getColumns ( ) . getAllPhysical ( ) ; <nl> const auto data_settings = data . getSettings ( ) ; <nl> <nl> Block in_header = in - > getHeader ( ) ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mutatePartToTempor <nl> MergeStageProgress stage_progress ( 1 . 0 ) ; <nl> in - > setProgressCallback ( MergeProgressCallback ( merge_entry , watch_prev_elapsed , stage_progress ) ) ; <nl> <nl> - if ( updated_header . columns ( ) = = all_columns . size ( ) ) <nl> + / / / All columns from part are changed and may be some more that were missing before in part <nl> + if ( source_part - > getColumns ( ) . isSubsetOf ( updated_header . getNamesAndTypesList ( ) ) ) <nl> { <nl> / / / All columns are modified , proceed to write a new part from scratch . <nl> if ( data . hasPrimaryKey ( ) | | data . hasSkipIndices ( ) ) <nl> in = std : : make_shared < MaterializingBlockInputStream > ( <nl> std : : make_shared < ExpressionBlockInputStream > ( in , data . primary_key_and_skip_indices_expr ) ) ; <nl> <nl> - MergeTreeDataPart : : MinMaxIndex minmax_idx ; <nl> + IMergeTreeDataPart : : MinMaxIndex minmax_idx ; <nl> <nl> - MergedBlockOutputStream out ( data , new_part_tmp_path , all_columns , compression_codec ) ; <nl> + MergedBlockOutputStream out { <nl> + new_data_part , <nl> + all_columns , <nl> + compression_codec } ; <nl> <nl> in - > readPrefix ( ) ; <nl> out . writePrefix ( ) ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mutatePartToTempor <nl> NameSet files_to_skip = { " checksums . txt " , " columns . txt " } ; <nl> <nl> / / / Don ' t change granularity type while mutating subset of columns <nl> - auto mrk_extension = source_part - > index_granularity_info . is_adaptive ? getAdaptiveMrkExtension ( ) : getNonAdaptiveMrkExtension ( ) ; <nl> + auto mrk_extension = source_part - > index_granularity_info . is_adaptive ? getAdaptiveMrkExtension ( new_data_part - > getType ( ) ) : getNonAdaptiveMrkExtension ( ) ; <nl> for ( const auto & entry : updated_header ) <nl> { <nl> IDataType : : StreamCallback callback = [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mutatePartToTempor <nl> IDataType : : SubstreamPath stream_path ; <nl> entry . type - > enumerateStreams ( callback , stream_path ) ; <nl> } <nl> + <nl> for ( const auto & index : indices_to_recalc ) <nl> { <nl> files_to_skip . insert ( index - > getFileName ( ) + " . idx " ) ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mutatePartToTempor <nl> <nl> merge_entry - > columns_written = all_columns . size ( ) - updated_header . columns ( ) ; <nl> <nl> - IMergedBlockOutputStream : : WrittenOffsetColumns unused_written_offsets ; <nl> MergedColumnOnlyOutputStream out ( <nl> - data , <nl> + new_data_part , <nl> updated_header , <nl> - new_part_tmp_path , <nl> / * sync = * / false , <nl> compression_codec , <nl> / * skip_offsets = * / false , <nl> std : : vector < MergeTreeIndexPtr > ( indices_to_recalc . begin ( ) , indices_to_recalc . end ( ) ) , <nl> - unused_written_offsets , <nl> + nullptr , <nl> source_part - > index_granularity , <nl> & source_part - > index_granularity_info <nl> ) ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataMergerMutator : : mutatePartToTempor <nl> WriteBufferFromFile out_checksums ( new_part_tmp_path + " checksums . txt " , 4096 ) ; <nl> new_data_part - > checksums . write ( out_checksums ) ; <nl> } <nl> - <nl> - / / / Write the columns list of the resulting part in the same order as all_columns . <nl> - new_data_part - > columns = all_columns ; <nl> - Names source_column_names = source_part - > columns . getNames ( ) ; <nl> - NameSet source_columns_name_set ( source_column_names . begin ( ) , source_column_names . end ( ) ) ; <nl> - for ( auto it = new_data_part - > columns . begin ( ) ; it ! = new_data_part - > columns . end ( ) ; ) <nl> - { <nl> - if ( source_columns_name_set . count ( it - > name ) | | updated_header . has ( it - > name ) ) <nl> - + + it ; <nl> - else <nl> - it = new_data_part - > columns . erase ( it ) ; <nl> - } <nl> { <nl> / / / Write a file with a description of columns . <nl> WriteBufferFromFile out_columns ( new_part_tmp_path + " columns . txt " , 4096 ) ; <nl> - new_data_part - > columns . writeText ( out_columns ) ; <nl> + new_data_part - > getColumns ( ) . writeText ( out_columns ) ; <nl> } <nl> <nl> new_data_part - > rows_count = source_part - > rows_count ; <nl> MergeTreeDataMergerMutator : : MergeAlgorithm MergeTreeDataMergerMutator : : chooseMer <nl> if ( need_remove_expired_values ) <nl> return MergeAlgorithm : : Horizontal ; <nl> <nl> + for ( const auto & part : parts ) <nl> + if ( ! part - > supportsVerticalMerge ( ) ) <nl> + return MergeAlgorithm : : Horizontal ; <nl> + <nl> bool is_supported_storage = <nl> data . merging_params . mode = = MergeTreeData : : MergingParams : : Ordinary | | <nl> data . merging_params . mode = = MergeTreeData : : MergingParams : : Collapsing | | <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeDataMergerMutator . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataMergerMutator . h <nl> struct FutureMergedMutatedPart <nl> { <nl> String name ; <nl> String path ; <nl> + MergeTreeDataPartType type ; <nl> MergeTreePartInfo part_info ; <nl> MergeTreeData : : DataPartsVector parts ; <nl> <nl> const MergeTreePartition & getPartition ( ) const { return parts . front ( ) - > partition ; } <nl> <nl> FutureMergedMutatedPart ( ) = default ; <nl> + <nl> explicit FutureMergedMutatedPart ( MergeTreeData : : DataPartsVector parts_ ) <nl> { <nl> assign ( std : : move ( parts_ ) ) ; <nl> } <nl> <nl> + FutureMergedMutatedPart ( MergeTreeData : : DataPartsVector parts_ , MergeTreeDataPartType future_part_type ) <nl> + { <nl> + assign ( std : : move ( parts_ ) , future_part_type ) ; <nl> + } <nl> + <nl> void assign ( MergeTreeData : : DataPartsVector parts_ ) ; <nl> + void assign ( MergeTreeData : : DataPartsVector parts_ , MergeTreeDataPartType future_part_type ) ; <nl> + <nl> void updatePath ( const MergeTreeData & storage , const ReservationPtr & reservation ) ; <nl> } ; <nl> <nl> class MergeTreeDataMergerMutator <nl> / / / Mutate a single data part with the specified commands . Will create and return a temporary part . <nl> MergeTreeData : : MutableDataPartPtr mutatePartToTemporaryPart ( <nl> const FutureMergedMutatedPart & future_part , <nl> - const std : : vector < MutationCommand > & commands , <nl> + const MutationCommands & commands , <nl> MergeListEntry & merge_entry , const Context & context , <nl> const ReservationPtr & disk_reservation , <nl> TableStructureReadLockHolder & table_lock_holder ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 47aa013fd9c <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataPartCompact . cpp <nl> <nl> + # include " MergeTreeDataPartCompact . h " <nl> + # include < Storages / MergeTree / MergeTreeReaderCompact . h > <nl> + # include < Storages / MergeTree / MergeTreeDataPartWriterCompact . h > <nl> + # include < Storages / MergeTree / IMergeTreeReader . h > <nl> + # include < Poco / File . h > <nl> + <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + namespace ErrorCodes <nl> + { <nl> + extern const int NO_FILE_IN_DATA_PART ; <nl> + extern const int BAD_SIZE_OF_FILE_IN_DATA_PART ; <nl> + } <nl> + <nl> + <nl> + MergeTreeDataPartCompact : : MergeTreeDataPartCompact ( <nl> + MergeTreeData & storage_ , <nl> + const String & name_ , <nl> + const DiskPtr & disk_ , <nl> + const std : : optional < String > & relative_path_ ) <nl> + : IMergeTreeDataPart ( storage_ , name_ , disk_ , relative_path_ , Type : : COMPACT ) <nl> + { <nl> + } <nl> + <nl> + MergeTreeDataPartCompact : : MergeTreeDataPartCompact ( <nl> + const MergeTreeData & storage_ , <nl> + const String & name_ , <nl> + const MergeTreePartInfo & info_ , <nl> + const DiskPtr & disk_ , <nl> + const std : : optional < String > & relative_path_ ) <nl> + : IMergeTreeDataPart ( storage_ , name_ , info_ , disk_ , relative_path_ , Type : : COMPACT ) <nl> + { <nl> + } <nl> + <nl> + IMergeTreeDataPart : : MergeTreeReaderPtr MergeTreeDataPartCompact : : getReader ( <nl> + const NamesAndTypesList & columns_to_read , <nl> + const MarkRanges & mark_ranges , <nl> + UncompressedCache * uncompressed_cache , <nl> + MarkCache * mark_cache , <nl> + const MergeTreeReaderSettings & reader_settings , <nl> + const ValueSizeMap & avg_value_size_hints , <nl> + const ReadBufferFromFileBase : : ProfileCallback & profile_callback ) const <nl> + { <nl> + auto ptr = std : : static_pointer_cast < const MergeTreeDataPartCompact > ( shared_from_this ( ) ) ; <nl> + return std : : make_unique < MergeTreeReaderCompact > ( <nl> + ptr , columns_to_read , uncompressed_cache , <nl> + mark_cache , mark_ranges , reader_settings , <nl> + avg_value_size_hints , profile_callback ) ; <nl> + } <nl> + <nl> + IMergeTreeDataPart : : MergeTreeWriterPtr MergeTreeDataPartCompact : : getWriter ( <nl> + const NamesAndTypesList & columns_list , <nl> + const std : : vector < MergeTreeIndexPtr > & indices_to_recalc , <nl> + const CompressionCodecPtr & default_codec , <nl> + const MergeTreeWriterSettings & writer_settings , <nl> + const MergeTreeIndexGranularity & computed_index_granularity ) const <nl> + { <nl> + NamesAndTypesList ordered_columns_list ; <nl> + std : : copy_if ( columns_list . begin ( ) , columns_list . end ( ) , std : : back_inserter ( ordered_columns_list ) , <nl> + [ this ] ( const auto & column ) { return getColumnPosition ( column . name ) ! = std : : nullopt ; } ) ; <nl> + <nl> + / / / Order of writing is important in compact format <nl> + ordered_columns_list . sort ( [ this ] ( const auto & lhs , const auto & rhs ) <nl> + { return * getColumnPosition ( lhs . name ) < * getColumnPosition ( rhs . name ) ; } ) ; <nl> + <nl> + return std : : make_unique < MergeTreeDataPartWriterCompact > ( <nl> + getFullPath ( ) , storage , ordered_columns_list , indices_to_recalc , <nl> + index_granularity_info . marks_file_extension , <nl> + default_codec , writer_settings , computed_index_granularity ) ; <nl> + } <nl> + <nl> + ColumnSize MergeTreeDataPartCompact : : getTotalColumnsSize ( ) const <nl> + { <nl> + ColumnSize total_size ; <nl> + auto bin_checksum = checksums . files . find ( DATA_FILE_NAME_WITH_EXTENSION ) ; <nl> + if ( bin_checksum ! = checksums . files . end ( ) ) <nl> + { <nl> + total_size . data_compressed + = bin_checksum - > second . file_size ; <nl> + total_size . data_uncompressed + = bin_checksum - > second . uncompressed_size ; <nl> + } <nl> + <nl> + auto mrk_checksum = checksums . files . find ( DATA_FILE_NAME + index_granularity_info . marks_file_extension ) ; <nl> + if ( mrk_checksum ! = checksums . files . end ( ) ) <nl> + total_size . marks + = mrk_checksum - > second . file_size ; <nl> + <nl> + return total_size ; <nl> + } <nl> + <nl> + void MergeTreeDataPartCompact : : loadIndexGranularity ( ) <nl> + { <nl> + String full_path = getFullPath ( ) ; <nl> + <nl> + if ( columns . empty ( ) ) <nl> + throw Exception ( " No columns in part " + name , ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> + <nl> + if ( ! index_granularity_info . is_adaptive ) <nl> + throw Exception ( " MergeTreeDataPartCompact cannot be created with non - adaptive granulary . " , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> + <nl> + std : : string marks_file_path = index_granularity_info . getMarksFilePath ( full_path + " data " ) ; <nl> + if ( ! Poco : : File ( marks_file_path ) . exists ( ) ) <nl> + throw Exception ( " Marks file ' " + marks_file_path + " ' doesn ' t exist " , ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> + <nl> + size_t marks_file_size = Poco : : File ( marks_file_path ) . getSize ( ) ; <nl> + <nl> + ReadBufferFromFile buffer ( marks_file_path , marks_file_size ) ; <nl> + while ( ! buffer . eof ( ) ) <nl> + { <nl> + / / / Skip offsets for columns <nl> + buffer . seek ( columns . size ( ) * sizeof ( MarkInCompressedFile ) , SEEK_CUR ) ; <nl> + size_t granularity ; <nl> + readIntBinary ( granularity , buffer ) ; <nl> + index_granularity . appendMark ( granularity ) ; <nl> + } <nl> + <nl> + if ( index_granularity . getMarksCount ( ) * index_granularity_info . getMarkSizeInBytes ( columns . size ( ) ) ! = marks_file_size ) <nl> + throw Exception ( " Cannot read all marks from file " + marks_file_path , ErrorCodes : : CANNOT_READ_ALL_DATA ) ; <nl> + <nl> + index_granularity . setInitialized ( ) ; <nl> + } <nl> + <nl> + bool MergeTreeDataPartCompact : : hasColumnFiles ( const String & column_name , const IDataType & ) const <nl> + { <nl> + if ( ! getColumnPosition ( column_name ) ) <nl> + return false ; <nl> + <nl> + auto bin_checksum = checksums . files . find ( DATA_FILE_NAME_WITH_EXTENSION ) ; <nl> + auto mrk_checksum = checksums . files . find ( DATA_FILE_NAME + index_granularity_info . marks_file_extension ) ; <nl> + <nl> + return ( bin_checksum ! = checksums . files . end ( ) & & mrk_checksum ! = checksums . files . end ( ) ) ; <nl> + } <nl> + <nl> + NameToNameMap MergeTreeDataPartCompact : : createRenameMapForAlter ( <nl> + AlterAnalysisResult & analysis_result , <nl> + const NamesAndTypesList & / * old_columns * / ) const <nl> + { <nl> + const auto & part_mrk_file_extension = index_granularity_info . marks_file_extension ; <nl> + NameToNameMap rename_map ; <nl> + <nl> + for ( const auto & index_name : analysis_result . removed_indices ) <nl> + { <nl> + rename_map [ " skp_idx_ " + index_name + " . idx " ] = " " ; <nl> + rename_map [ " skp_idx_ " + index_name + part_mrk_file_extension ] = " " ; <nl> + } <nl> + <nl> + / / / We have to rewrite all data if any column has been changed . <nl> + if ( ! analysis_result . removed_columns . empty ( ) | | ! analysis_result . conversions . empty ( ) ) <nl> + { <nl> + if ( ! analysis_result . expression ) <nl> + analysis_result . expression = std : : make_shared < ExpressionActions > ( NamesAndTypesList ( ) , storage . global_context ) ; <nl> + <nl> + NameSet altered_columns ; <nl> + NamesWithAliases projection ; <nl> + <nl> + for ( const auto & column : analysis_result . removed_columns ) <nl> + altered_columns . insert ( column . name ) ; <nl> + <nl> + for ( const auto & [ source_name , result_name ] : analysis_result . conversions ) <nl> + { <nl> + altered_columns . insert ( source_name ) ; <nl> + projection . emplace_back ( result_name , source_name ) ; <nl> + } <nl> + <nl> + / / / Add other part columns to read <nl> + for ( const auto & column : columns ) <nl> + { <nl> + if ( ! altered_columns . count ( column . name ) ) <nl> + { <nl> + analysis_result . expression - > addInput ( column ) ; <nl> + projection . emplace_back ( column . name , " " ) ; <nl> + } <nl> + } <nl> + <nl> + analysis_result . expression - > add ( ExpressionAction : : project ( projection ) ) ; <nl> + <nl> + String data_temp_name = String ( DATA_FILE_NAME ) + TEMP_FILE_SUFFIX ; <nl> + rename_map [ data_temp_name + DATA_FILE_EXTENSION ] = DATA_FILE_NAME_WITH_EXTENSION ; <nl> + rename_map [ data_temp_name + part_mrk_file_extension ] = DATA_FILE_NAME + part_mrk_file_extension ; <nl> + } <nl> + <nl> + return rename_map ; <nl> + } <nl> + <nl> + void MergeTreeDataPartCompact : : checkConsistency ( bool require_part_metadata ) const <nl> + { <nl> + checkConsistencyBase ( ) ; <nl> + String path = getFullPath ( ) ; <nl> + String mrk_file_name = DATA_FILE_NAME + index_granularity_info . marks_file_extension ; <nl> + <nl> + if ( ! checksums . empty ( ) ) <nl> + { <nl> + / / / count . txt should be present even in non custom - partitioned parts <nl> + if ( ! checksums . files . count ( " count . txt " ) ) <nl> + throw Exception ( " No checksum for count . txt " , ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> + <nl> + if ( require_part_metadata ) <nl> + { <nl> + if ( ! checksums . files . count ( mrk_file_name ) ) <nl> + throw Exception ( " No marks file checksum for column in part " + path , ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> + if ( ! checksums . files . count ( DATA_FILE_NAME_WITH_EXTENSION ) ) <nl> + throw Exception ( " No data file checksum for in part " + path , ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + { <nl> + / / / count . txt should be present even in non custom - partitioned parts <nl> + Poco : : File file ( path + " count . txt " ) ; <nl> + if ( ! file . exists ( ) | | file . getSize ( ) = = 0 ) <nl> + throw Exception ( " Part " + path + " is broken : " + file . path ( ) + " is empty " , ErrorCodes : : BAD_SIZE_OF_FILE_IN_DATA_PART ) ; <nl> + } <nl> + <nl> + / / / Check that marks are nonempty and have the consistent size with columns number . <nl> + Poco : : File file ( path + mrk_file_name ) ; <nl> + <nl> + if ( file . exists ( ) ) <nl> + { <nl> + UInt64 file_size = file . getSize ( ) ; <nl> + if ( ! file_size ) <nl> + throw Exception ( " Part " + path + " is broken : " + file . path ( ) + " is empty . " , <nl> + ErrorCodes : : BAD_SIZE_OF_FILE_IN_DATA_PART ) ; <nl> + <nl> + UInt64 expected_file_size = index_granularity_info . getMarkSizeInBytes ( columns . size ( ) ) * index_granularity . getMarksCount ( ) ; <nl> + if ( expected_file_size ! = file_size ) <nl> + throw Exception ( <nl> + " Part " + path + " is broken : bad size of marks file ' " + file . path ( ) + " ' : " + std : : to_string ( file_size ) + " , must be : " + std : : to_string ( expected_file_size ) , <nl> + ErrorCodes : : BAD_SIZE_OF_FILE_IN_DATA_PART ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + MergeTreeDataPartCompact : : ~ MergeTreeDataPartCompact ( ) <nl> + { <nl> + removeIfNeeded ( ) ; <nl> + } <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . d75e5befe36 <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataPartCompact . h <nl> <nl> + # pragma once <nl> + <nl> + # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + / * * In compact format all columns are stored in one file ( ` data . bin ` ) . <nl> + * Data is splitted in granules and columns are serialized sequentially in one granule . <nl> + * Granules are written one by one in data file . <nl> + * Marks are also stored in single file ( ` data . mrk3 ` ) . <nl> + * In compact format one mark is an array of marks for every column and a number of rows in granule . <nl> + * Format of other data part files is not changed . <nl> + * It ' s considered to store only small parts in compact format ( up to 10M ) . <nl> + * NOTE : Compact parts aren ' t supported for tables with non - adaptive granularty . <nl> + * NOTE : In compact part compressed and uncompressed size of single column is unknown . <nl> + * / <nl> + class MergeTreeDataPartCompact : public IMergeTreeDataPart <nl> + { <nl> + public : <nl> + static constexpr auto DATA_FILE_NAME = " data " ; <nl> + static constexpr auto DATA_FILE_EXTENSION = " . bin " ; <nl> + static constexpr auto TEMP_FILE_SUFFIX = " _temp " ; <nl> + static constexpr auto DATA_FILE_NAME_WITH_EXTENSION = " data . bin " ; <nl> + <nl> + MergeTreeDataPartCompact ( <nl> + const MergeTreeData & storage_ , <nl> + const String & name_ , <nl> + const MergeTreePartInfo & info_ , <nl> + const DiskPtr & disk_ , <nl> + const std : : optional < String > & relative_path_ = { } ) ; <nl> + <nl> + MergeTreeDataPartCompact ( <nl> + MergeTreeData & storage_ , <nl> + const String & name_ , <nl> + const DiskPtr & disk_ , <nl> + const std : : optional < String > & relative_path_ = { } ) ; <nl> + <nl> + MergeTreeReaderPtr getReader ( <nl> + const NamesAndTypesList & columns , <nl> + const MarkRanges & mark_ranges , <nl> + UncompressedCache * uncompressed_cache , <nl> + MarkCache * mark_cache , <nl> + const MergeTreeReaderSettings & reader_settings_ , <nl> + const ValueSizeMap & avg_value_size_hints = ValueSizeMap { } , <nl> + const ReadBufferFromFileBase : : ProfileCallback & profile_callback = ReadBufferFromFileBase : : ProfileCallback { } ) const override ; <nl> + <nl> + MergeTreeWriterPtr getWriter ( <nl> + const NamesAndTypesList & columns_list , <nl> + const std : : vector < MergeTreeIndexPtr > & indices_to_recalc , <nl> + const CompressionCodecPtr & default_codec_ , <nl> + const MergeTreeWriterSettings & writer_settings , <nl> + const MergeTreeIndexGranularity & computed_index_granularity = { } ) const override ; <nl> + <nl> + bool isStoredOnDisk ( ) const override { return true ; } <nl> + <nl> + ColumnSize getTotalColumnsSize ( ) const override ; <nl> + <nl> + bool hasColumnFiles ( const String & column_name , const IDataType & type ) const override ; <nl> + <nl> + String getFileNameForColumn ( const NameAndTypePair & / * column * / ) const override { return DATA_FILE_NAME ; } <nl> + <nl> + NameToNameMap createRenameMapForAlter ( <nl> + AlterAnalysisResult & analysis_result , <nl> + const NamesAndTypesList & old_columns ) const override ; <nl> + <nl> + ~ MergeTreeDataPartCompact ( ) override ; <nl> + <nl> + private : <nl> + void checkConsistency ( bool require_part_metadata ) const override ; <nl> + <nl> + / / / Loads marks index granularity into memory <nl> + void loadIndexGranularity ( ) override ; <nl> + } ; <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . d08f485d214 <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataPartType . cpp <nl> <nl> + # include < Storages / MergeTree / MergeTreeDataPartType . h > <nl> + # include < Common / Exception . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + namespace ErrorCodes <nl> + { <nl> + extern const int UNKNOWN_PART_TYPE ; <nl> + } <nl> + <nl> + void MergeTreeDataPartType : : fromString ( const String & str ) <nl> + { <nl> + if ( str = = " Wide " ) <nl> + value = WIDE ; <nl> + else if ( str = = " Compact " ) <nl> + value = COMPACT ; <nl> + else if ( str = = " InMemory " ) <nl> + value = IN_MEMORY ; <nl> + else <nl> + throw DB : : Exception ( " Unexpected string for part type : " + str , ErrorCodes : : UNKNOWN_PART_TYPE ) ; <nl> + } <nl> + <nl> + String MergeTreeDataPartType : : toString ( ) const <nl> + { <nl> + switch ( value ) <nl> + { <nl> + case WIDE : <nl> + return " Wide " ; <nl> + case COMPACT : <nl> + return " Compact " ; <nl> + case IN_MEMORY : <nl> + return " InMemory " ; <nl> + default : <nl> + return " Unknown " ; <nl> + } <nl> + } <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . bb87918d3a5 <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataPartType . h <nl> <nl> + # pragma once <nl> + <nl> + # include < Core / Types . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + / / / Types of data part format . <nl> + class MergeTreeDataPartType <nl> + { <nl> + public : <nl> + enum Value <nl> + { <nl> + / / / Data of each column is stored in one or several ( for complex types ) files . <nl> + / / / Every data file is followed by marks file . <nl> + WIDE , <nl> + <nl> + / / / Data of all columns is stored in one file . Marks are also stored in single file . <nl> + COMPACT , <nl> + <nl> + / / / Format with buffering data in RAM . Not implemented yet . <nl> + IN_MEMORY , <nl> + <nl> + UNKNOWN , <nl> + } ; <nl> + <nl> + MergeTreeDataPartType ( ) : value ( UNKNOWN ) { } <nl> + MergeTreeDataPartType ( Value value_ ) : value ( value_ ) { } <nl> + <nl> + bool operator = = ( const MergeTreeDataPartType & other ) const <nl> + { <nl> + return value = = other . value ; <nl> + } <nl> + <nl> + bool operator ! = ( const MergeTreeDataPartType & other ) const <nl> + { <nl> + return ! ( * this = = other ) ; <nl> + } <nl> + <nl> + void fromString ( const String & str ) ; <nl> + String toString ( ) const ; <nl> + <nl> + private : <nl> + Value value ; <nl> + } ; <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 740071d17a2 <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataPartWide . cpp <nl> <nl> + # include " MergeTreeDataPartWide . h " <nl> + # include < Poco / File . h > <nl> + # include < Storages / MergeTree / MergeTreeReaderWide . h > <nl> + # include < Storages / MergeTree / MergeTreeDataPartWriterWide . h > <nl> + # include < Storages / MergeTree / IMergeTreeDataPartWriter . h > <nl> + <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + namespace ErrorCodes <nl> + { <nl> + extern const int NO_FILE_IN_DATA_PART ; <nl> + extern const int BAD_SIZE_OF_FILE_IN_DATA_PART ; <nl> + } <nl> + <nl> + <nl> + MergeTreeDataPartWide : : MergeTreeDataPartWide ( <nl> + MergeTreeData & storage_ , <nl> + const String & name_ , <nl> + const DiskPtr & disk_ , <nl> + const std : : optional < String > & relative_path_ ) <nl> + : IMergeTreeDataPart ( storage_ , name_ , disk_ , relative_path_ , Type : : WIDE ) <nl> + { <nl> + } <nl> + <nl> + MergeTreeDataPartWide : : MergeTreeDataPartWide ( <nl> + const MergeTreeData & storage_ , <nl> + const String & name_ , <nl> + const MergeTreePartInfo & info_ , <nl> + const DiskPtr & disk_ , <nl> + const std : : optional < String > & relative_path_ ) <nl> + : IMergeTreeDataPart ( storage_ , name_ , info_ , disk_ , relative_path_ , Type : : WIDE ) <nl> + { <nl> + } <nl> + <nl> + IMergeTreeDataPart : : MergeTreeReaderPtr MergeTreeDataPartWide : : getReader ( <nl> + const NamesAndTypesList & columns_to_read , <nl> + const MarkRanges & mark_ranges , <nl> + UncompressedCache * uncompressed_cache , <nl> + MarkCache * mark_cache , <nl> + const MergeTreeReaderSettings & reader_settings , <nl> + const ValueSizeMap & avg_value_size_hints , <nl> + const ReadBufferFromFileBase : : ProfileCallback & profile_callback ) const <nl> + { <nl> + auto ptr = std : : static_pointer_cast < const MergeTreeDataPartWide > ( shared_from_this ( ) ) ; <nl> + return std : : make_unique < MergeTreeReaderWide > ( <nl> + ptr , columns_to_read , uncompressed_cache , <nl> + mark_cache , mark_ranges , reader_settings , <nl> + avg_value_size_hints , profile_callback ) ; <nl> + } <nl> + <nl> + IMergeTreeDataPart : : MergeTreeWriterPtr MergeTreeDataPartWide : : getWriter ( <nl> + const NamesAndTypesList & columns_list , <nl> + const std : : vector < MergeTreeIndexPtr > & indices_to_recalc , <nl> + const CompressionCodecPtr & default_codec , <nl> + const MergeTreeWriterSettings & writer_settings , <nl> + const MergeTreeIndexGranularity & computed_index_granularity ) const <nl> + { <nl> + return std : : make_unique < MergeTreeDataPartWriterWide > ( <nl> + getFullPath ( ) , storage , columns_list , indices_to_recalc , <nl> + index_granularity_info . marks_file_extension , <nl> + default_codec , writer_settings , computed_index_granularity ) ; <nl> + } <nl> + <nl> + <nl> + / / / Takes into account the fact that several columns can e . g . share their . size substreams . <nl> + / / / When calculating totals these should be counted only once . <nl> + ColumnSize MergeTreeDataPartWide : : getColumnSizeImpl ( <nl> + const String & column_name , const IDataType & type , std : : unordered_set < String > * processed_substreams ) const <nl> + { <nl> + ColumnSize size ; <nl> + if ( checksums . empty ( ) ) <nl> + return size ; <nl> + <nl> + type . enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> + { <nl> + String file_name = IDataType : : getFileNameForStream ( column_name , substream_path ) ; <nl> + <nl> + if ( processed_substreams & & ! processed_substreams - > insert ( file_name ) . second ) <nl> + return ; <nl> + <nl> + auto bin_checksum = checksums . files . find ( file_name + " . bin " ) ; <nl> + if ( bin_checksum ! = checksums . files . end ( ) ) <nl> + { <nl> + size . data_compressed + = bin_checksum - > second . file_size ; <nl> + size . data_uncompressed + = bin_checksum - > second . uncompressed_size ; <nl> + } <nl> + <nl> + auto mrk_checksum = checksums . files . find ( file_name + index_granularity_info . marks_file_extension ) ; <nl> + if ( mrk_checksum ! = checksums . files . end ( ) ) <nl> + size . marks + = mrk_checksum - > second . file_size ; <nl> + } , { } ) ; <nl> + <nl> + return size ; <nl> + } <nl> + <nl> + ColumnSize MergeTreeDataPartWide : : getTotalColumnsSize ( ) const <nl> + { <nl> + ColumnSize totals ; <nl> + std : : unordered_set < String > processed_substreams ; <nl> + for ( const NameAndTypePair & column : columns ) <nl> + { <nl> + ColumnSize size = getColumnSizeImpl ( column . name , * column . type , & processed_substreams ) ; <nl> + totals . add ( size ) ; <nl> + } <nl> + return totals ; <nl> + } <nl> + <nl> + ColumnSize MergeTreeDataPartWide : : getColumnSize ( const String & column_name , const IDataType & type ) const <nl> + { <nl> + return getColumnSizeImpl ( column_name , type , nullptr ) ; <nl> + } <nl> + <nl> + void MergeTreeDataPartWide : : loadIndexGranularity ( ) <nl> + { <nl> + String full_path = getFullPath ( ) ; <nl> + index_granularity_info . changeGranularityIfRequired ( full_path ) ; <nl> + <nl> + <nl> + if ( columns . empty ( ) ) <nl> + throw Exception ( " No columns in part " + name , ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> + <nl> + / / / We can use any column , it doesn ' t matter <nl> + std : : string marks_file_path = index_granularity_info . getMarksFilePath ( full_path + getFileNameForColumn ( columns . front ( ) ) ) ; <nl> + if ( ! Poco : : File ( marks_file_path ) . exists ( ) ) <nl> + throw Exception ( " Marks file ' " + marks_file_path + " ' doesn ' t exist " , ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> + <nl> + size_t marks_file_size = Poco : : File ( marks_file_path ) . getSize ( ) ; <nl> + <nl> + if ( ! index_granularity_info . is_adaptive ) <nl> + { <nl> + size_t marks_count = marks_file_size / index_granularity_info . getMarkSizeInBytes ( ) ; <nl> + index_granularity . resizeWithFixedGranularity ( marks_count , index_granularity_info . fixed_index_granularity ) ; / / / all the same <nl> + } <nl> + else <nl> + { <nl> + ReadBufferFromFile buffer ( marks_file_path , marks_file_size , - 1 ) ; <nl> + while ( ! buffer . eof ( ) ) <nl> + { <nl> + buffer . seek ( sizeof ( size_t ) * 2 , SEEK_CUR ) ; / / / skip offset_in_compressed file and offset_in_decompressed_block <nl> + size_t granularity ; <nl> + readIntBinary ( granularity , buffer ) ; <nl> + index_granularity . appendMark ( granularity ) ; <nl> + } <nl> + <nl> + if ( index_granularity . getMarksCount ( ) * index_granularity_info . getMarkSizeInBytes ( ) ! = marks_file_size ) <nl> + throw Exception ( " Cannot read all marks from file " + marks_file_path , ErrorCodes : : CANNOT_READ_ALL_DATA ) ; <nl> + } <nl> + <nl> + index_granularity . setInitialized ( ) ; <nl> + } <nl> + <nl> + MergeTreeDataPartWide : : ~ MergeTreeDataPartWide ( ) <nl> + { <nl> + removeIfNeeded ( ) ; <nl> + } <nl> + <nl> + void MergeTreeDataPartWide : : accumulateColumnSizes ( ColumnToSize & column_to_size ) const <nl> + { <nl> + std : : shared_lock < std : : shared_mutex > part_lock ( columns_lock ) ; <nl> + <nl> + for ( const NameAndTypePair & name_type : storage . getColumns ( ) . getAllPhysical ( ) ) <nl> + { <nl> + IDataType : : SubstreamPath path ; <nl> + name_type . type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> + { <nl> + Poco : : File bin_file ( getFullPath ( ) + IDataType : : getFileNameForStream ( name_type . name , substream_path ) + " . bin " ) ; <nl> + if ( bin_file . exists ( ) ) <nl> + column_to_size [ name_type . name ] + = bin_file . getSize ( ) ; <nl> + } , path ) ; <nl> + } <nl> + } <nl> + <nl> + void MergeTreeDataPartWide : : checkConsistency ( bool require_part_metadata ) const <nl> + { <nl> + checkConsistencyBase ( ) ; <nl> + String path = getFullPath ( ) ; <nl> + <nl> + if ( ! checksums . empty ( ) ) <nl> + { <nl> + if ( require_part_metadata ) <nl> + { <nl> + for ( const NameAndTypePair & name_type : columns ) <nl> + { <nl> + IDataType : : SubstreamPath stream_path ; <nl> + name_type . type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> + { <nl> + String file_name = IDataType : : getFileNameForStream ( name_type . name , substream_path ) ; <nl> + String mrk_file_name = file_name + index_granularity_info . marks_file_extension ; <nl> + String bin_file_name = file_name + " . bin " ; <nl> + if ( ! checksums . files . count ( mrk_file_name ) ) <nl> + throw Exception ( " No " + mrk_file_name + " file checksum for column " + name_type . name + " in part " + path , <nl> + ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> + if ( ! checksums . files . count ( bin_file_name ) ) <nl> + throw Exception ( " No " + bin_file_name + " file checksum for column " + name_type . name + " in part " + path , <nl> + ErrorCodes : : NO_FILE_IN_DATA_PART ) ; <nl> + } , stream_path ) ; <nl> + } <nl> + } <nl> + <nl> + } <nl> + else <nl> + { <nl> + / / / Check that all marks are nonempty and have the same size . <nl> + std : : optional < UInt64 > marks_size ; <nl> + for ( const NameAndTypePair & name_type : columns ) <nl> + { <nl> + name_type . type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> + { <nl> + Poco : : File file ( IDataType : : getFileNameForStream ( name_type . name , substream_path ) + index_granularity_info . marks_file_extension ) ; <nl> + <nl> + / / / Missing file is Ok for case when new column was added . <nl> + if ( file . exists ( ) ) <nl> + { <nl> + UInt64 file_size = file . getSize ( ) ; <nl> + <nl> + if ( ! file_size ) <nl> + throw Exception ( " Part " + path + " is broken : " + file . path ( ) + " is empty . " , <nl> + ErrorCodes : : BAD_SIZE_OF_FILE_IN_DATA_PART ) ; <nl> + <nl> + if ( ! marks_size ) <nl> + marks_size = file_size ; <nl> + else if ( file_size ! = * marks_size ) <nl> + throw Exception ( " Part " + path + " is broken : marks have different sizes . " , <nl> + ErrorCodes : : BAD_SIZE_OF_FILE_IN_DATA_PART ) ; <nl> + } <nl> + } ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + bool MergeTreeDataPartWide : : hasColumnFiles ( const String & column_name , const IDataType & type ) const <nl> + { <nl> + bool res = true ; <nl> + <nl> + type . enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> + { <nl> + String file_name = IDataType : : getFileNameForStream ( column_name , substream_path ) ; <nl> + <nl> + auto bin_checksum = checksums . files . find ( file_name + " . bin " ) ; <nl> + auto mrk_checksum = checksums . files . find ( file_name + index_granularity_info . marks_file_extension ) ; <nl> + <nl> + if ( bin_checksum = = checksums . files . end ( ) | | mrk_checksum = = checksums . files . end ( ) ) <nl> + res = false ; <nl> + } , { } ) ; <nl> + <nl> + return res ; <nl> + } <nl> + <nl> + NameToNameMap MergeTreeDataPartWide : : createRenameMapForAlter ( <nl> + AlterAnalysisResult & analysis_result , <nl> + const NamesAndTypesList & old_columns ) const <nl> + { <nl> + const auto & part_mrk_file_extension = index_granularity_info . marks_file_extension ; <nl> + NameToNameMap rename_map ; <nl> + <nl> + for ( const auto & index_name : analysis_result . removed_indices ) <nl> + { <nl> + rename_map [ " skp_idx_ " + index_name + " . idx " ] = " " ; <nl> + rename_map [ " skp_idx_ " + index_name + part_mrk_file_extension ] = " " ; <nl> + } <nl> + <nl> + / / / Collect counts for shared streams of different columns . As an example , Nested columns have shared stream with array sizes . <nl> + std : : map < String , size_t > stream_counts ; <nl> + for ( const NameAndTypePair & column : old_columns ) <nl> + { <nl> + column . type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> + { <nl> + + + stream_counts [ IDataType : : getFileNameForStream ( column . name , substream_path ) ] ; <nl> + } , { } ) ; <nl> + } <nl> + <nl> + for ( const auto & column : analysis_result . removed_columns ) <nl> + { <nl> + if ( hasColumnFiles ( column . name , * column . type ) ) <nl> + { <nl> + column . type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> + { <nl> + String file_name = IDataType : : getFileNameForStream ( column . name , substream_path ) ; <nl> + <nl> + / / / Delete files if they are no longer shared with another column . <nl> + if ( - - stream_counts [ file_name ] = = 0 ) <nl> + { <nl> + rename_map [ file_name + " . bin " ] = " " ; <nl> + rename_map [ file_name + part_mrk_file_extension ] = " " ; <nl> + } <nl> + } , { } ) ; <nl> + } <nl> + } <nl> + <nl> + if ( ! analysis_result . conversions . empty ( ) ) <nl> + { <nl> + / / / Give proper names for temporary columns with conversion results . <nl> + NamesWithAliases projection ; <nl> + projection . reserve ( analysis_result . conversions . size ( ) ) ; <nl> + for ( const auto & source_and_expression : analysis_result . conversions ) <nl> + { <nl> + / / / Column name for temporary filenames before renaming . NOTE The is unnecessarily tricky . <nl> + const auto & source_name = source_and_expression . first ; <nl> + String temporary_column_name = source_name + " converting " ; <nl> + <nl> + projection . emplace_back ( source_and_expression . second , temporary_column_name ) ; <nl> + <nl> + / / / After conversion , we need to rename temporary files into original . <nl> + analysis_result . new_types . at ( source_name ) - > enumerateStreams ( <nl> + [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> + { <nl> + / / / Skip array sizes , because they cannot be modified in ALTER . <nl> + if ( ! substream_path . empty ( ) & & substream_path . back ( ) . type = = IDataType : : Substream : : ArraySizes ) <nl> + return ; <nl> + <nl> + String original_file_name = IDataType : : getFileNameForStream ( source_name , substream_path ) ; <nl> + String temporary_file_name = IDataType : : getFileNameForStream ( temporary_column_name , substream_path ) ; <nl> + <nl> + rename_map [ temporary_file_name + " . bin " ] = original_file_name + " . bin " ; <nl> + rename_map [ temporary_file_name + part_mrk_file_extension ] = original_file_name + part_mrk_file_extension ; <nl> + } , { } ) ; <nl> + } <nl> + <nl> + analysis_result . expression - > add ( ExpressionAction : : project ( projection ) ) ; <nl> + } <nl> + <nl> + return rename_map ; <nl> + } <nl> + <nl> + String MergeTreeDataPartWide : : getFileNameForColumn ( const NameAndTypePair & column ) const <nl> + { <nl> + String filename ; <nl> + column . type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> + { <nl> + if ( filename . empty ( ) ) <nl> + filename = IDataType : : getFileNameForStream ( column . name , substream_path ) ; <nl> + } ) ; <nl> + return filename ; <nl> + } <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . c0c7e45b7ef <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataPartWide . h <nl> <nl> + # pragma once <nl> + <nl> + # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + / * * In wide format data of each column is stored in one or several ( for complex types ) files . <nl> + * Every data file is followed by marks file . <nl> + * Can be used in tables with both adaptive and non - adaptive granularity . <nl> + * This is the regular format of parts for MergeTree and suitable for big parts , as it ' s the most efficient . <nl> + * Data part would be created in wide format if it ' s uncompressed size in bytes or number of rows would exceed <nl> + * thresholds ` min_bytes_for_wide_part ` and ` min_rows_for_wide_part ` . <nl> + * / <nl> + class MergeTreeDataPartWide : public IMergeTreeDataPart <nl> + { <nl> + public : <nl> + MergeTreeDataPartWide ( <nl> + const MergeTreeData & storage_ , <nl> + const String & name_ , <nl> + const MergeTreePartInfo & info_ , <nl> + const DiskPtr & disk , <nl> + const std : : optional < String > & relative_path = { } ) ; <nl> + <nl> + MergeTreeDataPartWide ( <nl> + MergeTreeData & storage_ , <nl> + const String & name_ , <nl> + const DiskPtr & disk , <nl> + const std : : optional < String > & relative_path = { } ) ; <nl> + <nl> + MergeTreeReaderPtr getReader ( <nl> + const NamesAndTypesList & columns , <nl> + const MarkRanges & mark_ranges , <nl> + UncompressedCache * uncompressed_cache , <nl> + MarkCache * mark_cache , <nl> + const MergeTreeReaderSettings & reader_settings_ , <nl> + const ValueSizeMap & avg_value_size_hints = ValueSizeMap { } , <nl> + const ReadBufferFromFileBase : : ProfileCallback & profile_callback = ReadBufferFromFileBase : : ProfileCallback { } ) const override ; <nl> + <nl> + MergeTreeWriterPtr getWriter ( <nl> + const NamesAndTypesList & columns_list , <nl> + const std : : vector < MergeTreeIndexPtr > & indices_to_recalc , <nl> + const CompressionCodecPtr & default_codec_ , <nl> + const MergeTreeWriterSettings & writer_settings , <nl> + const MergeTreeIndexGranularity & computed_index_granularity = { } ) const override ; <nl> + <nl> + bool isStoredOnDisk ( ) const override { return true ; } <nl> + <nl> + bool supportsVerticalMerge ( ) const override { return true ; } <nl> + <nl> + void accumulateColumnSizes ( ColumnToSize & column_to_size ) const override ; <nl> + <nl> + String getFileNameForColumn ( const NameAndTypePair & column ) const override ; <nl> + <nl> + ColumnSize getTotalColumnsSize ( ) const override ; <nl> + <nl> + ColumnSize getColumnSize ( const String & column_name , const IDataType & type ) const override ; <nl> + <nl> + NameToNameMap createRenameMapForAlter ( <nl> + AlterAnalysisResult & analysis_result , <nl> + const NamesAndTypesList & old_columns ) const override ; <nl> + <nl> + ~ MergeTreeDataPartWide ( ) override ; <nl> + <nl> + bool hasColumnFiles ( const String & column , const IDataType & type ) const override ; <nl> + <nl> + private : <nl> + void checkConsistency ( bool require_part_metadata ) const override ; <nl> + <nl> + / / / Loads marks index granularity into memory <nl> + void loadIndexGranularity ( ) override ; <nl> + <nl> + ColumnSize getColumnSizeImpl ( const String & name , const IDataType & type , std : : unordered_set < String > * processed_substreams ) const ; <nl> + } ; <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 7d0c16f2729 <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataPartWriterCompact . cpp <nl> <nl> + # include < Storages / MergeTree / MergeTreeDataPartWriterCompact . h > <nl> + # include < Storages / MergeTree / MergeTreeDataPartCompact . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + <nl> + MergeTreeDataPartWriterCompact : : MergeTreeDataPartWriterCompact ( <nl> + const String & part_path_ , <nl> + const MergeTreeData & storage_ , <nl> + const NamesAndTypesList & columns_list_ , <nl> + const std : : vector < MergeTreeIndexPtr > & indices_to_recalc_ , <nl> + const String & marks_file_extension_ , <nl> + const CompressionCodecPtr & default_codec_ , <nl> + const MergeTreeWriterSettings & settings_ , <nl> + const MergeTreeIndexGranularity & index_granularity_ ) <nl> + : IMergeTreeDataPartWriter ( part_path_ , <nl> + storage_ , columns_list_ , <nl> + indices_to_recalc_ , marks_file_extension_ , <nl> + default_codec_ , settings_ , index_granularity_ , true ) <nl> + { <nl> + using DataPart = MergeTreeDataPartCompact ; <nl> + String data_file_name = DataPart : : DATA_FILE_NAME ; <nl> + if ( settings . is_writing_temp_files ) <nl> + data_file_name + = DataPart : : TEMP_FILE_SUFFIX ; <nl> + <nl> + stream = std : : make_unique < Stream > ( <nl> + data_file_name , <nl> + part_path + data_file_name , DataPart : : DATA_FILE_EXTENSION , <nl> + part_path + data_file_name , marks_file_extension , <nl> + default_codec , <nl> + settings . max_compress_block_size , <nl> + settings . estimated_size , <nl> + settings . aio_threshold ) ; <nl> + } <nl> + <nl> + void MergeTreeDataPartWriterCompact : : write ( <nl> + const Block & block , const IColumn : : Permutation * permutation , <nl> + const Block & primary_key_block , const Block & skip_indexes_block ) <nl> + { <nl> + / / / Fill index granularity for this block <nl> + / / / if it ' s unknown ( in case of insert data or horizontal merge , <nl> + / / / but not in case of vertical merge ) <nl> + if ( compute_granularity ) <nl> + fillIndexGranularity ( block ) ; <nl> + <nl> + Block result_block ; <nl> + <nl> + if ( permutation ) <nl> + { <nl> + for ( const auto & it : columns_list ) <nl> + { <nl> + if ( primary_key_block . has ( it . name ) ) <nl> + result_block . insert ( primary_key_block . getByName ( it . name ) ) ; <nl> + else if ( skip_indexes_block . has ( it . name ) ) <nl> + result_block . insert ( skip_indexes_block . getByName ( it . name ) ) ; <nl> + else <nl> + { <nl> + auto column = block . getByName ( it . name ) ; <nl> + column . column = column . column - > permute ( * permutation , 0 ) ; <nl> + result_block . insert ( column ) ; <nl> + } <nl> + } <nl> + } <nl> + else <nl> + { <nl> + result_block = block ; <nl> + } <nl> + <nl> + if ( ! header ) <nl> + header = result_block . cloneEmpty ( ) ; <nl> + <nl> + columns_buffer . add ( result_block . mutateColumns ( ) ) ; <nl> + size_t last_mark_rows = index_granularity . getLastMarkRows ( ) ; <nl> + size_t rows_in_buffer = columns_buffer . size ( ) ; <nl> + <nl> + if ( rows_in_buffer < last_mark_rows ) <nl> + { <nl> + / / / If it ' s not enough rows for granule , accumulate blocks <nl> + / / / and save how much rows we already have . <nl> + next_index_offset = last_mark_rows - rows_in_buffer ; <nl> + return ; <nl> + } <nl> + <nl> + writeBlock ( header . cloneWithColumns ( columns_buffer . releaseColumns ( ) ) ) ; <nl> + } <nl> + <nl> + void MergeTreeDataPartWriterCompact : : writeBlock ( const Block & block ) <nl> + { <nl> + size_t total_rows = block . rows ( ) ; <nl> + size_t from_mark = current_mark ; <nl> + size_t current_row = 0 ; <nl> + <nl> + while ( current_row < total_rows ) <nl> + { <nl> + size_t rows_to_write = index_granularity . getMarkRows ( from_mark ) ; <nl> + <nl> + if ( rows_to_write ) <nl> + data_written = true ; <nl> + <nl> + for ( const auto & column : columns_list ) <nl> + { <nl> + / / / There could already be enough data to compress into the new block . <nl> + if ( stream - > compressed . offset ( ) > = settings . min_compress_block_size ) <nl> + stream - > compressed . next ( ) ; <nl> + <nl> + writeIntBinary ( stream - > plain_hashing . count ( ) , stream - > marks ) ; <nl> + writeIntBinary ( stream - > compressed . offset ( ) , stream - > marks ) ; <nl> + <nl> + writeColumnSingleGranule ( block . getByName ( column . name ) , current_row , rows_to_write ) ; <nl> + } <nl> + <nl> + + + from_mark ; <nl> + size_t rows_written = total_rows - current_row ; <nl> + current_row + = rows_to_write ; <nl> + <nl> + / / / Correct last mark as it should contain exact amount of rows . <nl> + if ( current_row > = total_rows & & rows_written ! = rows_to_write ) <nl> + { <nl> + rows_to_write = rows_written ; <nl> + index_granularity . popMark ( ) ; <nl> + index_granularity . appendMark ( rows_written ) ; <nl> + } <nl> + <nl> + writeIntBinary ( rows_to_write , stream - > marks ) ; <nl> + } <nl> + <nl> + next_index_offset = 0 ; <nl> + next_mark = from_mark ; <nl> + } <nl> + <nl> + void MergeTreeDataPartWriterCompact : : writeColumnSingleGranule ( const ColumnWithTypeAndName & column , size_t from_row , size_t number_of_rows ) const <nl> + { <nl> + IDataType : : SerializeBinaryBulkStatePtr state ; <nl> + IDataType : : SerializeBinaryBulkSettings serialize_settings ; <nl> + <nl> + serialize_settings . getter = [ this ] ( IDataType : : SubstreamPath ) - > WriteBuffer * { return & stream - > compressed ; } ; <nl> + serialize_settings . position_independent_encoding = true ; <nl> + serialize_settings . low_cardinality_max_dictionary_size = 0 ; <nl> + <nl> + column . type - > serializeBinaryBulkStatePrefix ( serialize_settings , state ) ; <nl> + column . type - > serializeBinaryBulkWithMultipleStreams ( * column . column , from_row , number_of_rows , serialize_settings , state ) ; <nl> + column . type - > serializeBinaryBulkStateSuffix ( serialize_settings , state ) ; <nl> + } <nl> + <nl> + void MergeTreeDataPartWriterCompact : : finishDataSerialization ( IMergeTreeDataPart : : Checksums & checksums , bool sync ) <nl> + { <nl> + if ( columns_buffer . size ( ) ! = 0 ) <nl> + writeBlock ( header . cloneWithColumns ( columns_buffer . releaseColumns ( ) ) ) ; <nl> + <nl> + if ( with_final_mark & & data_written ) <nl> + { <nl> + for ( size_t i = 0 ; i < columns_list . size ( ) ; + + i ) <nl> + { <nl> + writeIntBinary ( stream - > plain_hashing . count ( ) , stream - > marks ) ; <nl> + writeIntBinary ( stream - > compressed . offset ( ) , stream - > marks ) ; <nl> + } <nl> + writeIntBinary ( 0ULL , stream - > marks ) ; <nl> + } <nl> + <nl> + stream - > finalize ( ) ; <nl> + if ( sync ) <nl> + stream - > sync ( ) ; <nl> + stream - > addToChecksums ( checksums ) ; <nl> + stream . reset ( ) ; <nl> + } <nl> + <nl> + void MergeTreeDataPartWriterCompact : : ColumnsBuffer : : add ( MutableColumns & & columns ) <nl> + { <nl> + if ( accumulated_columns . empty ( ) ) <nl> + accumulated_columns = std : : move ( columns ) ; <nl> + else <nl> + { <nl> + for ( size_t i = 0 ; i < columns . size ( ) ; + + i ) <nl> + accumulated_columns [ i ] - > insertRangeFrom ( * columns [ i ] , 0 , columns [ i ] - > size ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + Columns MergeTreeDataPartWriterCompact : : ColumnsBuffer : : releaseColumns ( ) <nl> + { <nl> + Columns res ( std : : make_move_iterator ( accumulated_columns . begin ( ) ) , <nl> + std : : make_move_iterator ( accumulated_columns . end ( ) ) ) ; <nl> + accumulated_columns . clear ( ) ; <nl> + return res ; <nl> + } <nl> + <nl> + size_t MergeTreeDataPartWriterCompact : : ColumnsBuffer : : size ( ) const <nl> + { <nl> + if ( accumulated_columns . empty ( ) ) <nl> + return 0 ; <nl> + return accumulated_columns . at ( 0 ) - > size ( ) ; <nl> + } <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 2d9c67bd5df <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataPartWriterCompact . h <nl> <nl> + # include < Storages / MergeTree / IMergeTreeDataPartWriter . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + / / / Writes data part in compact format . <nl> + class MergeTreeDataPartWriterCompact : public IMergeTreeDataPartWriter <nl> + { <nl> + public : <nl> + MergeTreeDataPartWriterCompact ( <nl> + const String & part_path , <nl> + const MergeTreeData & storage , <nl> + const NamesAndTypesList & columns_list , <nl> + const std : : vector < MergeTreeIndexPtr > & indices_to_recalc , <nl> + const String & marks_file_extension , <nl> + const CompressionCodecPtr & default_codec , <nl> + const MergeTreeWriterSettings & settings , <nl> + const MergeTreeIndexGranularity & index_granularity ) ; <nl> + <nl> + void write ( const Block & block , const IColumn : : Permutation * permutation = nullptr , <nl> + const Block & primary_key_block = { } , const Block & skip_indexes_block = { } ) override ; <nl> + <nl> + void finishDataSerialization ( IMergeTreeDataPart : : Checksums & checksums , bool sync = false ) override ; <nl> + <nl> + private : <nl> + / / / Write single granule of one column ( rows between 2 marks ) <nl> + void writeColumnSingleGranule ( <nl> + const ColumnWithTypeAndName & column , <nl> + size_t from_row , <nl> + size_t number_of_rows ) const ; <nl> + <nl> + void writeBlock ( const Block & block ) ; <nl> + <nl> + StreamPtr stream ; <nl> + <nl> + Block header ; <nl> + <nl> + / * * Simplified SquashingTransform . The original one isn ' t suitable in this case <nl> + * as it can return smaller block from buffer without merging it with larger block if last is enough size . <nl> + * But in compact parts we should guarantee , that written block is larger or equals than index_granularity . <nl> + * / <nl> + class ColumnsBuffer <nl> + { <nl> + public : <nl> + void add ( MutableColumns & & columns ) ; <nl> + size_t size ( ) const ; <nl> + Columns releaseColumns ( ) ; <nl> + private : <nl> + MutableColumns accumulated_columns ; <nl> + } ; <nl> + <nl> + ColumnsBuffer columns_buffer ; <nl> + } ; <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . f76f2860b7f <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataPartWriterWide . cpp <nl> <nl> + # include < Storages / MergeTree / MergeTreeDataPartWriterWide . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + namespace <nl> + { <nl> + constexpr auto DATA_FILE_EXTENSION = " . bin " ; <nl> + } <nl> + <nl> + MergeTreeDataPartWriterWide : : MergeTreeDataPartWriterWide ( <nl> + const String & part_path_ , <nl> + const MergeTreeData & storage_ , <nl> + const NamesAndTypesList & columns_list_ , <nl> + const std : : vector < MergeTreeIndexPtr > & indices_to_recalc_ , <nl> + const String & marks_file_extension_ , <nl> + const CompressionCodecPtr & default_codec_ , <nl> + const MergeTreeWriterSettings & settings_ , <nl> + const MergeTreeIndexGranularity & index_granularity_ ) <nl> + : IMergeTreeDataPartWriter ( part_path_ , <nl> + storage_ , columns_list_ , indices_to_recalc_ , <nl> + marks_file_extension_ , default_codec_ , settings_ , index_granularity_ , false ) <nl> + { <nl> + const auto & columns = storage . getColumns ( ) ; <nl> + for ( const auto & it : columns_list ) <nl> + addStreams ( it . name , * it . type , columns . getCodecOrDefault ( it . name , default_codec ) , settings . estimated_size ) ; <nl> + } <nl> + <nl> + void MergeTreeDataPartWriterWide : : addStreams ( <nl> + const String & name , <nl> + const IDataType & type , <nl> + const CompressionCodecPtr & effective_codec , <nl> + size_t estimated_size ) <nl> + { <nl> + IDataType : : StreamCallback callback = [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> + { <nl> + if ( settings . skip_offsets & & ! substream_path . empty ( ) & & substream_path . back ( ) . type = = IDataType : : Substream : : ArraySizes ) <nl> + return ; <nl> + <nl> + String stream_name = IDataType : : getFileNameForStream ( name , substream_path ) ; <nl> + / / / Shared offsets for Nested type . <nl> + if ( column_streams . count ( stream_name ) ) <nl> + return ; <nl> + <nl> + column_streams [ stream_name ] = std : : make_unique < Stream > ( <nl> + stream_name , <nl> + part_path + stream_name , DATA_FILE_EXTENSION , <nl> + part_path + stream_name , marks_file_extension , <nl> + effective_codec , <nl> + settings . max_compress_block_size , <nl> + estimated_size , <nl> + settings . aio_threshold ) ; <nl> + } ; <nl> + <nl> + IDataType : : SubstreamPath stream_path ; <nl> + type . enumerateStreams ( callback , stream_path ) ; <nl> + } <nl> + <nl> + <nl> + IDataType : : OutputStreamGetter MergeTreeDataPartWriterWide : : createStreamGetter ( <nl> + const String & name , WrittenOffsetColumns & offset_columns ) <nl> + { <nl> + return [ & , this ] ( const IDataType : : SubstreamPath & substream_path ) - > WriteBuffer * <nl> + { <nl> + bool is_offsets = ! substream_path . empty ( ) & & substream_path . back ( ) . type = = IDataType : : Substream : : ArraySizes ; <nl> + if ( is_offsets & & settings . skip_offsets ) <nl> + return nullptr ; <nl> + <nl> + String stream_name = IDataType : : getFileNameForStream ( name , substream_path ) ; <nl> + <nl> + / / / Don ' t write offsets more than one time for Nested type . <nl> + if ( is_offsets & & offset_columns . count ( stream_name ) ) <nl> + return nullptr ; <nl> + <nl> + return & column_streams [ stream_name ] - > compressed ; <nl> + } ; <nl> + } <nl> + <nl> + void MergeTreeDataPartWriterWide : : write ( const Block & block , <nl> + const IColumn : : Permutation * permutation , <nl> + const Block & primary_key_block , const Block & skip_indexes_block ) <nl> + { <nl> + / / / Fill index granularity for this block <nl> + / / / if it ' s unknown ( in case of insert data or horizontal merge , <nl> + / / / but not in case of vertical merge ) <nl> + if ( compute_granularity ) <nl> + fillIndexGranularity ( block ) ; <nl> + <nl> + auto offset_columns = written_offset_columns ? * written_offset_columns : WrittenOffsetColumns { } ; <nl> + <nl> + auto it = columns_list . begin ( ) ; <nl> + for ( size_t i = 0 ; i < columns_list . size ( ) ; + + i , + + it ) <nl> + { <nl> + const ColumnWithTypeAndName & column = block . getByName ( it - > name ) ; <nl> + <nl> + if ( permutation ) <nl> + { <nl> + if ( primary_key_block . has ( it - > name ) ) <nl> + { <nl> + const auto & primary_column = * primary_key_block . getByName ( it - > name ) . column ; <nl> + writeColumn ( column . name , * column . type , primary_column , offset_columns ) ; <nl> + } <nl> + else if ( skip_indexes_block . has ( it - > name ) ) <nl> + { <nl> + const auto & index_column = * skip_indexes_block . getByName ( it - > name ) . column ; <nl> + writeColumn ( column . name , * column . type , index_column , offset_columns ) ; <nl> + } <nl> + else <nl> + { <nl> + / / / We rearrange the columns that are not included in the primary key here ; Then the result is released - to save RAM . <nl> + ColumnPtr permuted_column = column . column - > permute ( * permutation , 0 ) ; <nl> + writeColumn ( column . name , * column . type , * permuted_column , offset_columns ) ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + writeColumn ( column . name , * column . type , * column . column , offset_columns ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void MergeTreeDataPartWriterWide : : writeSingleMark ( <nl> + const String & name , <nl> + const IDataType & type , <nl> + WrittenOffsetColumns & offset_columns , <nl> + size_t number_of_rows , <nl> + DB : : IDataType : : SubstreamPath & path ) <nl> + { <nl> + type . enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> + { <nl> + bool is_offsets = ! substream_path . empty ( ) & & substream_path . back ( ) . type = = IDataType : : Substream : : ArraySizes ; <nl> + if ( is_offsets & & settings . skip_offsets ) <nl> + return ; <nl> + <nl> + String stream_name = IDataType : : getFileNameForStream ( name , substream_path ) ; <nl> + <nl> + / / / Don ' t write offsets more than one time for Nested type . <nl> + if ( is_offsets & & offset_columns . count ( stream_name ) ) <nl> + return ; <nl> + <nl> + Stream & stream = * column_streams [ stream_name ] ; <nl> + <nl> + / / / There could already be enough data to compress into the new block . <nl> + if ( stream . compressed . offset ( ) > = settings . min_compress_block_size ) <nl> + stream . compressed . next ( ) ; <nl> + <nl> + writeIntBinary ( stream . plain_hashing . count ( ) , stream . marks ) ; <nl> + writeIntBinary ( stream . compressed . offset ( ) , stream . marks ) ; <nl> + if ( settings . can_use_adaptive_granularity ) <nl> + writeIntBinary ( number_of_rows , stream . marks ) ; <nl> + } , path ) ; <nl> + } <nl> + <nl> + size_t MergeTreeDataPartWriterWide : : writeSingleGranule ( <nl> + const String & name , <nl> + const IDataType & type , <nl> + const IColumn & column , <nl> + WrittenOffsetColumns & offset_columns , <nl> + IDataType : : SerializeBinaryBulkStatePtr & serialization_state , <nl> + IDataType : : SerializeBinaryBulkSettings & serialize_settings , <nl> + size_t from_row , <nl> + size_t number_of_rows , <nl> + bool write_marks ) <nl> + { <nl> + if ( write_marks ) <nl> + writeSingleMark ( name , type , offset_columns , number_of_rows , serialize_settings . path ) ; <nl> + <nl> + type . serializeBinaryBulkWithMultipleStreams ( column , from_row , number_of_rows , serialize_settings , serialization_state ) ; <nl> + <nl> + / / / So that instead of the marks pointing to the end of the compressed block , there were marks pointing to the beginning of the next one . <nl> + type . enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> + { <nl> + bool is_offsets = ! substream_path . empty ( ) & & substream_path . back ( ) . type = = IDataType : : Substream : : ArraySizes ; <nl> + if ( is_offsets & & settings . skip_offsets ) <nl> + return ; <nl> + <nl> + String stream_name = IDataType : : getFileNameForStream ( name , substream_path ) ; <nl> + <nl> + / / / Don ' t write offsets more than one time for Nested type . <nl> + if ( is_offsets & & offset_columns . count ( stream_name ) ) <nl> + return ; <nl> + <nl> + column_streams [ stream_name ] - > compressed . nextIfAtEnd ( ) ; <nl> + } , serialize_settings . path ) ; <nl> + <nl> + return from_row + number_of_rows ; <nl> + } <nl> + <nl> + / / / Column must not be empty . ( column . size ( ) ! = = 0 ) <nl> + void MergeTreeDataPartWriterWide : : writeColumn ( <nl> + const String & name , <nl> + const IDataType & type , <nl> + const IColumn & column , <nl> + WrittenOffsetColumns & offset_columns ) <nl> + { <nl> + auto [ it , inserted ] = serialization_states . emplace ( name , nullptr ) ; <nl> + if ( inserted ) <nl> + { <nl> + IDataType : : SerializeBinaryBulkSettings serialize_settings ; <nl> + serialize_settings . getter = createStreamGetter ( name , offset_columns ) ; <nl> + type . serializeBinaryBulkStatePrefix ( serialize_settings , it - > second ) ; <nl> + } <nl> + <nl> + const auto & global_settings = storage . global_context . getSettingsRef ( ) ; <nl> + IDataType : : SerializeBinaryBulkSettings serialize_settings ; <nl> + serialize_settings . getter = createStreamGetter ( name , offset_columns ) ; <nl> + serialize_settings . low_cardinality_max_dictionary_size = global_settings . low_cardinality_max_dictionary_size ; <nl> + serialize_settings . low_cardinality_use_single_dictionary_for_part = global_settings . low_cardinality_use_single_dictionary_for_part ! = 0 ; <nl> + <nl> + size_t total_rows = column . size ( ) ; <nl> + size_t current_row = 0 ; <nl> + size_t current_column_mark = current_mark ; <nl> + while ( current_row < total_rows ) <nl> + { <nl> + size_t rows_to_write ; <nl> + bool write_marks = true ; <nl> + <nl> + / / / If there is ` index_offset ` , then the first mark goes not immediately , but after this number of rows . <nl> + if ( current_row = = 0 & & index_offset ! = 0 ) <nl> + { <nl> + write_marks = false ; <nl> + rows_to_write = index_offset ; <nl> + } <nl> + else <nl> + { <nl> + if ( index_granularity . getMarksCount ( ) < = current_column_mark ) <nl> + throw Exception ( <nl> + " Incorrect size of index granularity expect mark " + toString ( current_column_mark ) + " totally have marks " + toString ( index_granularity . getMarksCount ( ) ) , <nl> + ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> + rows_to_write = index_granularity . getMarkRows ( current_column_mark ) ; <nl> + } <nl> + <nl> + if ( rows_to_write ! = 0 ) <nl> + data_written = true ; <nl> + <nl> + current_row = writeSingleGranule ( <nl> + name , <nl> + type , <nl> + column , <nl> + offset_columns , <nl> + it - > second , <nl> + serialize_settings , <nl> + current_row , <nl> + rows_to_write , <nl> + write_marks <nl> + ) ; <nl> + <nl> + if ( write_marks ) <nl> + current_column_mark + + ; <nl> + } <nl> + <nl> + type . enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> + { <nl> + bool is_offsets = ! substream_path . empty ( ) & & substream_path . back ( ) . type = = IDataType : : Substream : : ArraySizes ; <nl> + if ( is_offsets ) <nl> + { <nl> + String stream_name = IDataType : : getFileNameForStream ( name , substream_path ) ; <nl> + offset_columns . insert ( stream_name ) ; <nl> + } <nl> + } , serialize_settings . path ) ; <nl> + <nl> + next_mark = current_column_mark ; <nl> + next_index_offset = current_row - total_rows ; <nl> + } <nl> + <nl> + void MergeTreeDataPartWriterWide : : finishDataSerialization ( IMergeTreeDataPart : : Checksums & checksums , bool sync ) <nl> + { <nl> + const auto & global_settings = storage . global_context . getSettingsRef ( ) ; <nl> + IDataType : : SerializeBinaryBulkSettings serialize_settings ; <nl> + serialize_settings . low_cardinality_max_dictionary_size = global_settings . low_cardinality_max_dictionary_size ; <nl> + serialize_settings . low_cardinality_use_single_dictionary_for_part = global_settings . low_cardinality_use_single_dictionary_for_part ! = 0 ; <nl> + WrittenOffsetColumns offset_columns ; <nl> + <nl> + bool write_final_mark = ( with_final_mark & & data_written ) ; <nl> + <nl> + { <nl> + auto it = columns_list . begin ( ) ; <nl> + for ( size_t i = 0 ; i < columns_list . size ( ) ; + + i , + + it ) <nl> + { <nl> + if ( ! serialization_states . empty ( ) ) <nl> + { <nl> + serialize_settings . getter = createStreamGetter ( it - > name , written_offset_columns ? * written_offset_columns : offset_columns ) ; <nl> + it - > type - > serializeBinaryBulkStateSuffix ( serialize_settings , serialization_states [ it - > name ] ) ; <nl> + } <nl> + <nl> + if ( write_final_mark ) <nl> + { <nl> + writeFinalMark ( it - > name , it - > type , offset_columns , serialize_settings . path ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + for ( auto it = column_streams . begin ( ) ; it ! = column_streams . end ( ) ; + + it ) <nl> + { <nl> + it - > second - > finalize ( ) ; <nl> + if ( sync ) <nl> + it - > second - > sync ( ) ; <nl> + it - > second - > addToChecksums ( checksums ) ; <nl> + } <nl> + <nl> + column_streams . clear ( ) ; <nl> + serialization_states . clear ( ) ; <nl> + } <nl> + <nl> + void MergeTreeDataPartWriterWide : : writeFinalMark ( <nl> + const std : : string & column_name , <nl> + const DataTypePtr column_type , <nl> + WrittenOffsetColumns & offset_columns , <nl> + DB : : IDataType : : SubstreamPath & path ) <nl> + { <nl> + writeSingleMark ( column_name , * column_type , offset_columns , 0 , path ) ; <nl> + / / / Memoize information about offsets <nl> + column_type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> + { <nl> + bool is_offsets = ! substream_path . empty ( ) & & substream_path . back ( ) . type = = IDataType : : Substream : : ArraySizes ; <nl> + if ( is_offsets ) <nl> + { <nl> + String stream_name = IDataType : : getFileNameForStream ( column_name , substream_path ) ; <nl> + offset_columns . insert ( stream_name ) ; <nl> + } <nl> + } , path ) ; <nl> + } <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 1240626745a <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataPartWriterWide . h <nl> <nl> + # include < Storages / MergeTree / IMergeTreeDataPartWriter . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + / / / Writes data part in wide format . <nl> + class MergeTreeDataPartWriterWide : public IMergeTreeDataPartWriter <nl> + { <nl> + public : <nl> + <nl> + using ColumnToSize = std : : map < std : : string , UInt64 > ; <nl> + <nl> + MergeTreeDataPartWriterWide ( <nl> + const String & part_path , <nl> + const MergeTreeData & storage , <nl> + const NamesAndTypesList & columns_list , <nl> + const std : : vector < MergeTreeIndexPtr > & indices_to_recalc , <nl> + const String & marks_file_extension , <nl> + const CompressionCodecPtr & default_codec , <nl> + const MergeTreeWriterSettings & settings , <nl> + const MergeTreeIndexGranularity & index_granularity ) ; <nl> + <nl> + void write ( const Block & block , const IColumn : : Permutation * permutation = nullptr , <nl> + const Block & primary_key_block = { } , const Block & skip_indexes_block = { } ) override ; <nl> + <nl> + void finishDataSerialization ( IMergeTreeDataPart : : Checksums & checksums , bool sync = false ) override ; <nl> + <nl> + IDataType : : OutputStreamGetter createStreamGetter ( const String & name , WrittenOffsetColumns & offset_columns ) ; <nl> + <nl> + private : <nl> + / / / Write data of one column . <nl> + / / / Return how many marks were written and <nl> + / / / how many rows were written for last mark <nl> + void writeColumn ( <nl> + const String & name , <nl> + const IDataType & type , <nl> + const IColumn & column , <nl> + WrittenOffsetColumns & offset_columns ) ; <nl> + <nl> + / / / Write single granule of one column ( rows between 2 marks ) <nl> + size_t writeSingleGranule ( <nl> + const String & name , <nl> + const IDataType & type , <nl> + const IColumn & column , <nl> + WrittenOffsetColumns & offset_columns , <nl> + IDataType : : SerializeBinaryBulkStatePtr & serialization_state , <nl> + IDataType : : SerializeBinaryBulkSettings & serialize_settings , <nl> + size_t from_row , <nl> + size_t number_of_rows , <nl> + bool write_marks ) ; <nl> + <nl> + / / / Write mark for column <nl> + void writeSingleMark ( <nl> + const String & name , <nl> + const IDataType & type , <nl> + WrittenOffsetColumns & offset_columns , <nl> + size_t number_of_rows , <nl> + DB : : IDataType : : SubstreamPath & path ) ; <nl> + <nl> + void writeFinalMark ( <nl> + const std : : string & column_name , <nl> + const DataTypePtr column_type , <nl> + WrittenOffsetColumns & offset_columns , <nl> + DB : : IDataType : : SubstreamPath & path ) ; <nl> + <nl> + void addStreams ( <nl> + const String & name , <nl> + const IDataType & type , <nl> + const CompressionCodecPtr & effective_codec , <nl> + size_t estimated_size ) ; <nl> + <nl> + SerializationStates serialization_states ; <nl> + <nl> + using ColumnStreams = std : : map < String , StreamPtr > ; <nl> + ColumnStreams column_streams ; <nl> + } ; <nl> + <nl> + } <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeDataSelectExecutor . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataSelectExecutor . cpp <nl> Pipes MergeTreeDataSelectExecutor : : readFromParts ( <nl> <nl> Pipes res ; <nl> <nl> + MergeTreeReaderSettings reader_settings = <nl> + { <nl> + . min_bytes_to_use_direct_io = settings . min_bytes_to_use_direct_io , <nl> + . max_read_buffer_size = settings . max_read_buffer_size , <nl> + . save_marks_in_cache = true <nl> + } ; <nl> + <nl> if ( select . final ( ) ) <nl> { <nl> / / / Add columns needed to calculate the sorting expression and the sign . <nl> Pipes MergeTreeDataSelectExecutor : : readFromParts ( <nl> settings . use_uncompressed_cache , <nl> query_info , <nl> virt_column_names , <nl> - settings ) ; <nl> + settings , <nl> + reader_settings ) ; <nl> } <nl> else if ( settings . optimize_read_in_order & & query_info . input_sorting_info ) <nl> { <nl> Pipes MergeTreeDataSelectExecutor : : readFromParts ( <nl> query_info , <nl> sorting_key_prefix_expr , <nl> virt_column_names , <nl> - settings ) ; <nl> + settings , <nl> + reader_settings ) ; <nl> } <nl> else <nl> { <nl> Pipes MergeTreeDataSelectExecutor : : readFromParts ( <nl> settings . use_uncompressed_cache , <nl> query_info , <nl> virt_column_names , <nl> - settings ) ; <nl> + settings , <nl> + reader_settings ) ; <nl> } <nl> <nl> if ( use_sampling ) <nl> Pipes MergeTreeDataSelectExecutor : : spreadMarkRangesAmongStreams ( <nl> bool use_uncompressed_cache , <nl> const SelectQueryInfo & query_info , <nl> const Names & virt_columns , <nl> - const Settings & settings ) const <nl> + const Settings & settings , <nl> + const MergeTreeReaderSettings & reader_settings ) const <nl> { <nl> / / / Count marks for each part . <nl> std : : vector < size_t > sum_marks_in_parts ( parts . size ( ) ) ; <nl> Pipes MergeTreeDataSelectExecutor : : spreadMarkRangesAmongStreams ( <nl> auto source = std : : make_shared < MergeTreeThreadSelectBlockInputProcessor > ( <nl> i , pool , min_marks_for_concurrent_read , max_block_size , settings . preferred_block_size_bytes , <nl> settings . preferred_max_column_in_block_size_bytes , data , use_uncompressed_cache , <nl> - query_info . prewhere_info , settings , virt_columns ) ; <nl> + query_info . prewhere_info , reader_settings , virt_columns ) ; <nl> <nl> if ( i = = 0 ) <nl> { <nl> Pipes MergeTreeDataSelectExecutor : : spreadMarkRangesAmongStreams ( <nl> auto source = std : : make_shared < MergeTreeSelectProcessor > ( <nl> data , part . data_part , max_block_size , settings . preferred_block_size_bytes , <nl> settings . preferred_max_column_in_block_size_bytes , column_names , part . ranges , use_uncompressed_cache , <nl> - query_info . prewhere_info , true , settings . min_bytes_to_use_direct_io , settings . min_bytes_to_use_mmap_io , <nl> - settings . max_read_buffer_size , true , <nl> - virt_columns , part . part_index_in_query ) ; <nl> + query_info . prewhere_info , true , reader_settings , virt_columns , part . part_index_in_query ) ; <nl> <nl> res . emplace_back ( std : : move ( source ) ) ; <nl> } <nl> Pipes MergeTreeDataSelectExecutor : : spreadMarkRangesAmongStreamsWithOrder ( <nl> const SelectQueryInfo & query_info , <nl> const ExpressionActionsPtr & sorting_key_prefix_expr , <nl> const Names & virt_columns , <nl> - const Settings & settings ) const <nl> + const Settings & settings , <nl> + const MergeTreeReaderSettings & reader_settings ) const <nl> { <nl> size_t sum_marks = 0 ; <nl> const InputSortingInfoPtr & input_sorting_info = query_info . input_sorting_info ; <nl> Pipes MergeTreeDataSelectExecutor : : spreadMarkRangesAmongStreamsWithOrder ( <nl> pipes . emplace_back ( std : : make_shared < MergeTreeSelectProcessor > ( <nl> data , part . data_part , max_block_size , settings . preferred_block_size_bytes , <nl> settings . preferred_max_column_in_block_size_bytes , column_names , ranges_to_get_from_part , <nl> - use_uncompressed_cache , query_info . prewhere_info , true , settings . min_bytes_to_use_direct_io , settings . min_bytes_to_use_mmap_io , <nl> - settings . max_read_buffer_size , true , virt_columns , part . part_index_in_query ) ) ; <nl> + use_uncompressed_cache , query_info . prewhere_info , true , reader_settings , <nl> + virt_columns , part . part_index_in_query ) ) ; <nl> } <nl> else <nl> { <nl> pipes . emplace_back ( std : : make_shared < MergeTreeReverseSelectProcessor > ( <nl> data , part . data_part , max_block_size , settings . preferred_block_size_bytes , <nl> settings . preferred_max_column_in_block_size_bytes , column_names , ranges_to_get_from_part , <nl> - use_uncompressed_cache , query_info . prewhere_info , true , settings . min_bytes_to_use_direct_io , settings . min_bytes_to_use_mmap_io , <nl> - settings . max_read_buffer_size , true , virt_columns , part . part_index_in_query ) ) ; <nl> + use_uncompressed_cache , query_info . prewhere_info , true , reader_settings , <nl> + virt_columns , part . part_index_in_query ) ) ; <nl> <nl> pipes . back ( ) . addSimpleTransform ( std : : make_shared < ReverseTransform > ( pipes . back ( ) . getHeader ( ) ) ) ; <nl> } <nl> Pipes MergeTreeDataSelectExecutor : : spreadMarkRangesAmongStreamsFinal ( <nl> bool use_uncompressed_cache , <nl> const SelectQueryInfo & query_info , <nl> const Names & virt_columns , <nl> - const Settings & settings ) const <nl> + const Settings & settings , <nl> + const MergeTreeReaderSettings & reader_settings ) const <nl> { <nl> const auto data_settings = data . getSettings ( ) ; <nl> size_t sum_marks = 0 ; <nl> Pipes MergeTreeDataSelectExecutor : : spreadMarkRangesAmongStreamsFinal ( <nl> auto source_processor = std : : make_shared < MergeTreeSelectProcessor > ( <nl> data , part . data_part , max_block_size , settings . preferred_block_size_bytes , <nl> settings . preferred_max_column_in_block_size_bytes , column_names , part . ranges , use_uncompressed_cache , <nl> - query_info . prewhere_info , true , settings . min_bytes_to_use_direct_io , settings . min_bytes_to_use_mmap_io , <nl> - settings . max_read_buffer_size , true , <nl> + query_info . prewhere_info , true , reader_settings , <nl> virt_columns , part . part_index_in_query ) ; <nl> <nl> Pipe pipe ( std : : move ( source_processor ) ) ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeDataSelectExecutor . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataSelectExecutor . h <nl> class MergeTreeDataSelectExecutor <nl> bool use_uncompressed_cache , <nl> const SelectQueryInfo & query_info , <nl> const Names & virt_columns , <nl> - const Settings & settings ) const ; <nl> + const Settings & settings , <nl> + const MergeTreeReaderSettings & reader_settings ) const ; <nl> <nl> Pipes spreadMarkRangesAmongStreamsWithOrder ( <nl> RangesInDataParts & & parts , <nl> class MergeTreeDataSelectExecutor <nl> const SelectQueryInfo & query_info , <nl> const ExpressionActionsPtr & sorting_key_prefix_expr , <nl> const Names & virt_columns , <nl> - const Settings & settings ) const ; <nl> + const Settings & settings , <nl> + const MergeTreeReaderSettings & reader_settings ) const ; <nl> <nl> Pipes spreadMarkRangesAmongStreamsFinal ( <nl> RangesInDataParts & & parts , <nl> class MergeTreeDataSelectExecutor <nl> bool use_uncompressed_cache , <nl> const SelectQueryInfo & query_info , <nl> const Names & virt_columns , <nl> - const Settings & settings ) const ; <nl> + const Settings & settings , <nl> + const MergeTreeReaderSettings & reader_settings ) const ; <nl> <nl> / / / Get the approximate value ( bottom estimate - only by full marks ) of the number of rows falling under the index . <nl> size_t getApproximateTotalRowsToRead ( <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeDataWriter . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataWriter . cpp <nl> void buildScatterSelector ( <nl> <nl> / / / Computes ttls and updates ttl infos <nl> void updateTTL ( const MergeTreeData : : TTLEntry & ttl_entry , <nl> - MergeTreeDataPart : : TTLInfos & ttl_infos , <nl> + IMergeTreeDataPart : : TTLInfos & ttl_infos , <nl> DB : : MergeTreeDataPartTTLInfo & ttl_info , <nl> Block & block , bool update_part_min_max_ttls ) <nl> { <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataWriter : : writeTempPart ( BlockWithPa <nl> / / / This will generate unique name in scope of current server process . <nl> Int64 temp_index = data . insert_increment . get ( ) ; <nl> <nl> - MergeTreeDataPart : : MinMaxIndex minmax_idx ; <nl> + IMergeTreeDataPart : : MinMaxIndex minmax_idx ; <nl> minmax_idx . update ( block , data . minmax_idx_columns ) ; <nl> <nl> MergeTreePartition partition ( std : : move ( block_with_partition . partition ) ) ; <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataWriter : : writeTempPart ( BlockWithPa <nl> / / / Size of part would not be greater than block . bytes ( ) + epsilon <nl> size_t expected_size = block . bytes ( ) ; <nl> <nl> - DB : : MergeTreeDataPart : : TTLInfos move_ttl_infos ; <nl> + DB : : IMergeTreeDataPart : : TTLInfos move_ttl_infos ; <nl> for ( const auto & ttl_entry : data . move_ttl_entries ) <nl> updateTTL ( ttl_entry , move_ttl_infos , move_ttl_infos . moves_ttl [ ttl_entry . result_column ] , block , false ) ; <nl> <nl> + NamesAndTypesList columns = data . getColumns ( ) . getAllPhysical ( ) . filter ( block . getNames ( ) ) ; <nl> ReservationPtr reservation = data . reserveSpacePreferringTTLRules ( expected_size , move_ttl_infos , time ( nullptr ) ) ; <nl> <nl> - MergeTreeData : : MutableDataPartPtr new_data_part = <nl> - std : : make_shared < MergeTreeData : : DataPart > ( data , reservation - > getDisk ( ) , part_name , new_part_info ) ; <nl> + auto new_data_part = data . createPart ( <nl> + part_name , <nl> + data . choosePartType ( expected_size , block . rows ( ) ) , <nl> + new_part_info , <nl> + reservation - > getDisk ( ) , <nl> + TMP_PREFIX + part_name ) ; <nl> <nl> + new_data_part - > setColumns ( columns ) ; <nl> new_data_part - > partition = std : : move ( partition ) ; <nl> new_data_part - > minmax_idx = std : : move ( minmax_idx ) ; <nl> - new_data_part - > relative_path = TMP_PREFIX + part_name ; <nl> new_data_part - > is_temp = true ; <nl> <nl> / / / The name could be non - unique in case of stale files from previous runs . <nl> MergeTreeData : : MutableDataPartPtr MergeTreeDataWriter : : writeTempPart ( BlockWithPa <nl> / / / either default lz4 or compression method with zero thresholds on absolute and relative part size . <nl> auto compression_codec = data . global_context . chooseCompressionCodec ( 0 , 0 ) ; <nl> <nl> - NamesAndTypesList columns = data . getColumns ( ) . getAllPhysical ( ) . filter ( block . getNames ( ) ) ; <nl> - MergedBlockOutputStream out ( data , new_data_part - > getFullPath ( ) , columns , compression_codec ) ; <nl> + MergedBlockOutputStream out ( new_data_part , columns , compression_codec ) ; <nl> <nl> out . writePrefix ( ) ; <nl> out . writeWithPermutation ( block , perm_ptr ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 5d3b2945d47 <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeIOSettings . h <nl> <nl> + # pragma once <nl> + # include < cstddef > <nl> + # include < Core / Settings . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + struct MergeTreeReaderSettings <nl> + { <nl> + size_t min_bytes_to_use_direct_io = 0 ; <nl> + size_t min_bytes_to_use_mmap_io = 0 ; <nl> + size_t max_read_buffer_size = DBMS_DEFAULT_BUFFER_SIZE ; <nl> + / / / If save_marks_in_cache is false , then , if marks are not in cache , <nl> + / / / we will load them but won ' t save in the cache , to avoid evicting other data . <nl> + bool save_marks_in_cache = false ; <nl> + } ; <nl> + <nl> + struct MergeTreeWriterSettings <nl> + { <nl> + MergeTreeWriterSettings ( const Settings & global_settings , bool can_use_adaptive_granularity_ , <nl> + size_t aio_threshold_ , bool blocks_are_granules_size_ = false ) <nl> + : min_compress_block_size ( global_settings . min_compress_block_size ) <nl> + , max_compress_block_size ( global_settings . min_compress_block_size ) <nl> + , aio_threshold ( aio_threshold_ ) <nl> + , can_use_adaptive_granularity ( can_use_adaptive_granularity_ ) <nl> + , blocks_are_granules_size ( blocks_are_granules_size_ ) { } <nl> + <nl> + size_t min_compress_block_size ; <nl> + size_t max_compress_block_size ; <nl> + size_t aio_threshold ; <nl> + bool can_use_adaptive_granularity ; <nl> + bool blocks_are_granules_size ; <nl> + / / / true if we write temporary files during alter . <nl> + bool is_writing_temp_files = false ; <nl> + size_t estimated_size = 0 ; <nl> + / / / used when ALTERing columns if we know that array offsets are not altered . <nl> + bool skip_offsets = false ; <nl> + } ; <nl> + } <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeIndexGranularity . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeIndexGranularity . cpp <nl> void MergeTreeIndexGranularity : : appendMark ( size_t rows_count ) <nl> marks_rows_partial_sums . push_back ( marks_rows_partial_sums . back ( ) + rows_count ) ; <nl> } <nl> <nl> + void MergeTreeIndexGranularity : : addRowsToLastMark ( size_t rows_count ) <nl> + { <nl> + if ( marks_rows_partial_sums . empty ( ) ) <nl> + marks_rows_partial_sums . push_back ( rows_count ) ; <nl> + else <nl> + marks_rows_partial_sums . back ( ) + = rows_count ; <nl> + } <nl> + <nl> + void MergeTreeIndexGranularity : : popMark ( ) <nl> + { <nl> + if ( ! marks_rows_partial_sums . empty ( ) ) <nl> + marks_rows_partial_sums . pop_back ( ) ; <nl> + } <nl> + <nl> size_t MergeTreeIndexGranularity : : getRowsCountInRange ( size_t begin , size_t end ) const <nl> { <nl> size_t subtrahend = 0 ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeIndexGranularity . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeIndexGranularity . h <nl> <nl> namespace DB <nl> { <nl> <nl> - / / / Class contains information about index granularity in rows of MergeTreeDataPart <nl> + / / / Class contains information about index granularity in rows of IMergeTreeDataPart <nl> / / / Inside it contains vector of partial sums of rows after mark : <nl> / / / | mmm - - | mmm | mmm - | mmm - | <nl> / / / | 5 | 8 | 12 | 16 | <nl> class MergeTreeIndexGranularity <nl> / / / Add new mark with rows_count <nl> void appendMark ( size_t rows_count ) ; <nl> <nl> + / / / Extends last mark by rows_count . <nl> + void addRowsToLastMark ( size_t rows_count ) ; <nl> + <nl> + / / / Drops last mark if any exists . <nl> + void popMark ( ) ; <nl> + <nl> / / / Add ` size ` of marks with ` fixed_granularity ` rows <nl> void resizeWithFixedGranularity ( size_t size , size_t fixed_granularity ) ; <nl> } ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeIndexGranularityInfo . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeIndexGranularityInfo . cpp <nl> <nl> <nl> namespace DB <nl> { <nl> - std : : optional < std : : string > MergeTreeIndexGranularityInfo : : getMrkExtensionFromFS ( const std : : string & path_to_part ) const <nl> + <nl> + namespace ErrorCodes <nl> + { <nl> + extern const int NOT_IMPLEMENTED ; <nl> + extern const int UNKNOWN_PART_TYPE ; <nl> + } <nl> + <nl> + std : : optional < std : : string > MergeTreeIndexGranularityInfo : : getMrkExtensionFromFS ( const std : : string & path_to_part ) <nl> { <nl> if ( Poco : : File ( path_to_part ) . exists ( ) ) <nl> { <nl> std : : optional < std : : string > MergeTreeIndexGranularityInfo : : getMrkExtensionFromFS ( <nl> for ( Poco : : DirectoryIterator part_it ( path_to_part ) ; part_it ! = end ; + + part_it ) <nl> { <nl> const auto & ext = " . " + part_it . path ( ) . getExtension ( ) ; <nl> - if ( ext = = getNonAdaptiveMrkExtension ( ) | | ext = = getAdaptiveMrkExtension ( ) ) <nl> + if ( ext = = getNonAdaptiveMrkExtension ( ) <nl> + | | ext = = getAdaptiveMrkExtension ( MergeTreeDataPartType : : WIDE ) <nl> + | | ext = = getAdaptiveMrkExtension ( MergeTreeDataPartType : : COMPACT ) ) <nl> return ext ; <nl> } <nl> } <nl> return { } ; <nl> } <nl> <nl> - MergeTreeIndexGranularityInfo : : MergeTreeIndexGranularityInfo ( <nl> - const MergeTreeData & storage ) <nl> + MergeTreeIndexGranularityInfo : : MergeTreeIndexGranularityInfo ( const MergeTreeData & storage , MergeTreeDataPartType type_ ) <nl> + : type ( type_ ) <nl> { <nl> const auto storage_settings = storage . getSettings ( ) ; <nl> fixed_index_granularity = storage_settings - > index_granularity ; <nl> + <nl> / / / Granularity is fixed <nl> if ( ! storage . canUseAdaptiveGranularity ( ) ) <nl> + { <nl> + if ( type ! = MergeTreeDataPartType : : WIDE ) <nl> + throw Exception ( " Only Wide parts can be used with non - adaptive granularity . " , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> setNonAdaptive ( ) ; <nl> + } <nl> else <nl> setAdaptive ( storage_settings - > index_granularity_bytes ) ; <nl> } <nl> <nl> - <nl> void MergeTreeIndexGranularityInfo : : changeGranularityIfRequired ( const std : : string & path_to_part ) <nl> { <nl> auto mrk_ext = getMrkExtensionFromFS ( path_to_part ) ; <nl> void MergeTreeIndexGranularityInfo : : changeGranularityIfRequired ( const std : : strin <nl> void MergeTreeIndexGranularityInfo : : setAdaptive ( size_t index_granularity_bytes_ ) <nl> { <nl> is_adaptive = true ; <nl> - mark_size_in_bytes = getAdaptiveMrkSize ( ) ; <nl> - marks_file_extension = getAdaptiveMrkExtension ( ) ; <nl> + marks_file_extension = getAdaptiveMrkExtension ( type ) ; <nl> index_granularity_bytes = index_granularity_bytes_ ; <nl> } <nl> <nl> void MergeTreeIndexGranularityInfo : : setNonAdaptive ( ) <nl> { <nl> is_adaptive = false ; <nl> - mark_size_in_bytes = getNonAdaptiveMrkSize ( ) ; <nl> marks_file_extension = getNonAdaptiveMrkExtension ( ) ; <nl> index_granularity_bytes = 0 ; <nl> } <nl> <nl> + size_t MergeTreeIndexGranularityInfo : : getMarkSizeInBytes ( size_t columns_num ) const <nl> + { <nl> + if ( type = = MergeTreeDataPartType : : WIDE ) <nl> + return is_adaptive ? getAdaptiveMrkSizeWide ( ) : getNonAdaptiveMrkSizeWide ( ) ; <nl> + else if ( type = = MergeTreeDataPartType : : COMPACT ) <nl> + return getAdaptiveMrkSizeCompact ( columns_num ) ; <nl> + else <nl> + throw Exception ( " Unknown part type " , ErrorCodes : : UNKNOWN_PART_TYPE ) ; <nl> + } <nl> + <nl> + size_t getAdaptiveMrkSizeCompact ( size_t columns_num ) <nl> + { <nl> + / / / Each mark contains number of rows in granule and two offsets for every column . <nl> + return sizeof ( UInt64 ) * ( columns_num * 2 + 1 ) ; <nl> + } <nl> + <nl> + std : : string getAdaptiveMrkExtension ( MergeTreeDataPartType part_type ) <nl> + { <nl> + if ( part_type = = MergeTreeDataPartType : : WIDE ) <nl> + return " . mrk2 " ; <nl> + else if ( part_type = = MergeTreeDataPartType : : COMPACT ) <nl> + return " . mrk3 " ; <nl> + else <nl> + throw Exception ( " Unknown part type " , ErrorCodes : : UNKNOWN_PART_TYPE ) ; <nl> + } <nl> + <nl> } <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeIndexGranularityInfo . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeIndexGranularityInfo . h <nl> <nl> <nl> # include < optional > <nl> # include < Core / Types . h > <nl> + # include < Storages / MergeTree / MergeTreeDataPartType . h > <nl> + # include < DataStreams / MarkInCompressedFile . h > <nl> <nl> namespace DB <nl> { <nl> <nl> class MergeTreeData ; <nl> + <nl> / / / Meta information about index granularity <nl> struct MergeTreeIndexGranularityInfo <nl> { <nl> struct MergeTreeIndexGranularityInfo <nl> / / / Marks file extension ' . mrk ' or ' . mrk2 ' <nl> String marks_file_extension ; <nl> <nl> - / / / Size of one mark in file two or three size_t numbers <nl> - UInt8 mark_size_in_bytes ; <nl> - <nl> / / / Is stride in rows between marks non fixed ? <nl> - bool is_adaptive ; <nl> + bool is_adaptive = false ; <nl> <nl> / / / Fixed size in rows of one granule if index_granularity_bytes is zero <nl> - size_t fixed_index_granularity ; <nl> + size_t fixed_index_granularity = 0 ; <nl> <nl> / / / Approximate bytes size of one granule <nl> - size_t index_granularity_bytes ; <nl> + size_t index_granularity_bytes = 0 ; <nl> <nl> - MergeTreeIndexGranularityInfo ( <nl> - const MergeTreeData & storage ) ; <nl> + MergeTreeIndexGranularityInfo ( const MergeTreeData & storage , MergeTreeDataPartType type_ ) ; <nl> <nl> void changeGranularityIfRequired ( const std : : string & path_to_part ) ; <nl> <nl> - String getMarksFilePath ( const String & column_path ) const <nl> + String getMarksFilePath ( const String & path_prefix ) const <nl> { <nl> - return column_path + marks_file_extension ; <nl> + return path_prefix + marks_file_extension ; <nl> } <nl> - private : <nl> <nl> + size_t getMarkSizeInBytes ( size_t columns_num = 1 ) const ; <nl> + <nl> + static std : : optional < std : : string > getMrkExtensionFromFS ( const std : : string & path_to_table ) ; <nl> + <nl> + private : <nl> + MergeTreeDataPartType type ; <nl> void setAdaptive ( size_t index_granularity_bytes_ ) ; <nl> void setNonAdaptive ( ) ; <nl> - std : : optional < std : : string > getMrkExtensionFromFS ( const std : : string & path_to_table ) const ; <nl> } ; <nl> <nl> constexpr inline auto getNonAdaptiveMrkExtension ( ) { return " . mrk " ; } <nl> - constexpr inline auto getAdaptiveMrkExtension ( ) { return " . mrk2 " ; } <nl> - constexpr inline auto getNonAdaptiveMrkSize ( ) { return sizeof ( UInt64 ) * 2 ; } <nl> - constexpr inline auto getAdaptiveMrkSize ( ) { return sizeof ( UInt64 ) * 3 ; } <nl> + constexpr inline auto getNonAdaptiveMrkSizeWide ( ) { return sizeof ( UInt64 ) * 2 ; } <nl> + constexpr inline auto getAdaptiveMrkSizeWide ( ) { return sizeof ( UInt64 ) * 3 ; } <nl> + inline size_t getAdaptiveMrkSizeCompact ( size_t columns_num ) ; <nl> + std : : string getAdaptiveMrkExtension ( MergeTreeDataPartType part_type ) ; <nl> <nl> } <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeIndexReader . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeIndexReader . cpp <nl> MergeTreeIndexReader : : MergeTreeIndexReader ( <nl> MergeTreeIndexPtr index_ , MergeTreeData : : DataPartPtr part_ , size_t marks_count_ , const MarkRanges & all_mark_ranges_ ) <nl> : index ( index_ ) , stream ( <nl> part_ - > getFullPath ( ) + index - > getFileName ( ) , " . idx " , marks_count_ , <nl> - all_mark_ranges_ , nullptr , false , nullptr , <nl> - part_ - > getFileSizeOrZero ( index - > getFileName ( ) + " . idx " ) , 0 , 0 , DBMS_DEFAULT_BUFFER_SIZE , <nl> + all_mark_ranges_ , <nl> + MergeTreeReaderSettings { } , nullptr , nullptr , <nl> + part_ - > getFileSizeOrZero ( index - > getFileName ( ) + " . idx " ) , <nl> & part_ - > index_granularity_info , <nl> ReadBufferFromFileBase : : ProfileCallback { } , CLOCK_MONOTONIC_COARSE ) <nl> { <nl> new file mode 100644 <nl> index 00000000000 . . 452d46a4751 <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeMarksLoader . cpp <nl> <nl> + # include < Storages / MergeTree / MergeTreeMarksLoader . h > <nl> + # include < Storages / MergeTree / MergeTreeData . h > <nl> + # include < IO / ReadBufferFromFile . h > <nl> + # include < Poco / File . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + namespace ErrorCodes <nl> + { <nl> + extern const int CORRUPTED_DATA ; <nl> + extern const int LOGICAL_ERROR ; <nl> + } <nl> + <nl> + MergeTreeMarksLoader : : MergeTreeMarksLoader ( <nl> + MarkCache * mark_cache_ , <nl> + const String & mrk_path_ , <nl> + size_t marks_count_ , <nl> + const MergeTreeIndexGranularityInfo & index_granularity_info_ , <nl> + bool save_marks_in_cache_ , <nl> + size_t columns_in_mark_ ) <nl> + : mark_cache ( mark_cache_ ) <nl> + , mrk_path ( mrk_path_ ) <nl> + , marks_count ( marks_count_ ) <nl> + , index_granularity_info ( index_granularity_info_ ) <nl> + , save_marks_in_cache ( save_marks_in_cache_ ) <nl> + , columns_in_mark ( columns_in_mark_ ) { } <nl> + <nl> + const MarkInCompressedFile & MergeTreeMarksLoader : : getMark ( size_t row_index , size_t column_index ) <nl> + { <nl> + if ( ! marks ) <nl> + loadMarks ( ) ; <nl> + <nl> + # ifndef NDEBUG <nl> + if ( column_index > = columns_in_mark ) <nl> + throw Exception ( " Column index : " + toString ( column_index ) <nl> + + " is out of range [ 0 , " + toString ( columns_in_mark ) + " ) " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + # endif <nl> + <nl> + return ( * marks ) [ row_index * columns_in_mark + column_index ] ; <nl> + } <nl> + <nl> + MarkCache : : MappedPtr MergeTreeMarksLoader : : loadMarksImpl ( ) <nl> + { <nl> + / / / Memory for marks must not be accounted as memory usage for query , because they are stored in shared cache . <nl> + auto temporarily_disable_memory_tracker = getCurrentMemoryTrackerActionLock ( ) ; <nl> + <nl> + size_t file_size = Poco : : File ( mrk_path ) . getSize ( ) ; <nl> + size_t mark_size = index_granularity_info . getMarkSizeInBytes ( columns_in_mark ) ; <nl> + size_t expected_file_size = mark_size * marks_count ; <nl> + <nl> + if ( expected_file_size ! = file_size ) <nl> + throw Exception ( <nl> + " Bad size of marks file ' " + mrk_path + " ' : " + std : : to_string ( file_size ) + " , must be : " + std : : to_string ( expected_file_size ) , <nl> + ErrorCodes : : CORRUPTED_DATA ) ; <nl> + <nl> + auto res = std : : make_shared < MarksInCompressedFile > ( marks_count * columns_in_mark ) ; <nl> + <nl> + if ( ! index_granularity_info . is_adaptive ) <nl> + { <nl> + / / / Read directly to marks . <nl> + ReadBufferFromFile buffer ( mrk_path , file_size , - 1 , reinterpret_cast < char * > ( res - > data ( ) ) ) ; <nl> + <nl> + if ( buffer . eof ( ) | | buffer . buffer ( ) . size ( ) ! = file_size ) <nl> + throw Exception ( " Cannot read all marks from file " + mrk_path , ErrorCodes : : CANNOT_READ_ALL_DATA ) ; <nl> + } <nl> + else <nl> + { <nl> + ReadBufferFromFile buffer ( mrk_path , file_size , - 1 ) ; <nl> + size_t i = 0 ; <nl> + while ( ! buffer . eof ( ) ) <nl> + { <nl> + res - > read ( buffer , i * columns_in_mark , columns_in_mark ) ; <nl> + buffer . seek ( sizeof ( size_t ) , SEEK_CUR ) ; <nl> + + + i ; <nl> + } <nl> + <nl> + if ( i * mark_size ! = file_size ) <nl> + throw Exception ( " Cannot read all marks from file " + mrk_path , ErrorCodes : : CANNOT_READ_ALL_DATA ) ; <nl> + } <nl> + res - > protect ( ) ; <nl> + return res ; <nl> + } <nl> + <nl> + void MergeTreeMarksLoader : : loadMarks ( ) <nl> + { <nl> + if ( mark_cache ) <nl> + { <nl> + auto key = mark_cache - > hash ( mrk_path ) ; <nl> + if ( save_marks_in_cache ) <nl> + { <nl> + auto callback = std : : bind ( & MergeTreeMarksLoader : : loadMarksImpl , this ) ; <nl> + marks = mark_cache - > getOrSet ( key , callback ) ; <nl> + } <nl> + else <nl> + { <nl> + marks = mark_cache - > get ( key ) ; <nl> + if ( ! marks ) <nl> + marks = loadMarksImpl ( ) ; <nl> + } <nl> + } <nl> + else <nl> + marks = loadMarksImpl ( ) ; <nl> + <nl> + if ( ! marks ) <nl> + throw Exception ( " Failed to load marks : " + mrk_path , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + } <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 927e78ed8b9 <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeMarksLoader . h <nl> <nl> + # include < Storages / MarkCache . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + struct MergeTreeIndexGranularityInfo ; <nl> + <nl> + class MergeTreeMarksLoader <nl> + { <nl> + public : <nl> + using MarksPtr = MarkCache : : MappedPtr ; <nl> + <nl> + MergeTreeMarksLoader ( <nl> + MarkCache * mark_cache_ , <nl> + const String & mrk_path , <nl> + size_t marks_count_ , <nl> + const MergeTreeIndexGranularityInfo & index_granularity_info_ , <nl> + bool save_marks_in_cache_ , <nl> + size_t columns_num_in_mark_ = 1 ) ; <nl> + <nl> + const MarkInCompressedFile & getMark ( size_t row_index , size_t column_index = 0 ) ; <nl> + <nl> + bool initialized ( ) const { return marks ! = nullptr ; } <nl> + <nl> + private : <nl> + MarkCache * mark_cache = nullptr ; <nl> + String mrk_path ; <nl> + size_t marks_count ; <nl> + const MergeTreeIndexGranularityInfo & index_granularity_info ; <nl> + bool save_marks_in_cache = false ; <nl> + size_t columns_in_mark ; <nl> + MarkCache : : MappedPtr marks ; <nl> + <nl> + void loadMarks ( ) ; <nl> + MarkCache : : MappedPtr loadMarksImpl ( ) ; <nl> + } ; <nl> + <nl> + } <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreePartition . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreePartition . cpp <nl> <nl> # include < Storages / MergeTree / MergeTreePartition . h > <nl> # include < Storages / MergeTree / MergeTreeData . h > <nl> - # include < Storages / MergeTree / MergeTreeDataPart . h > <nl> + # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> # include < IO / ReadBufferFromFile . h > <nl> # include < IO / HashingWriteBuffer . h > <nl> # include < Common / FieldVisitors . h > <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreePartsMover . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreePartsMover . cpp <nl> <nl> # include < Storages / MergeTree / MergeTreePartsMover . h > <nl> # include < Storages / MergeTree / MergeTreeData . h > <nl> + <nl> # include < set > <nl> # include < boost / algorithm / string / join . hpp > <nl> <nl> MergeTreeData : : DataPartPtr MergeTreePartsMover : : clonePart ( const MergeTreeMoveEnt <nl> moving_part . part - > makeCloneOnDiskDetached ( moving_part . reserved_space ) ; <nl> <nl> MergeTreeData : : MutableDataPartPtr cloned_part = <nl> - std : : make_shared < MergeTreeData : : DataPart > ( * data , moving_part . reserved_space - > getDisk ( ) , moving_part . part - > name ) ; <nl> - cloned_part - > relative_path = " detached / " + moving_part . part - > name ; <nl> + data - > createPart ( moving_part . part - > name , moving_part . reserved_space - > getDisk ( ) , " detached / " + moving_part . part - > name ) ; <nl> LOG_TRACE ( log , " Part " < < moving_part . part - > name < < " was cloned to " < < cloned_part - > getFullPath ( ) ) ; <nl> <nl> cloned_part - > loadColumnsChecksumsIndexes ( true , true ) ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreePartsMover . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreePartsMover . h <nl> <nl> # include < optional > <nl> # include < vector > <nl> # include < Disks / DiskSpaceMonitor . h > <nl> - # include < Storages / MergeTree / MergeTreeDataPart . h > <nl> + # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> # include < Common / ActionBlocker . h > <nl> <nl> namespace DB <nl> namespace DB <nl> / / / it have to be moved . <nl> struct MergeTreeMoveEntry <nl> { <nl> - std : : shared_ptr < const MergeTreeDataPart > part ; <nl> + std : : shared_ptr < const IMergeTreeDataPart > part ; <nl> ReservationPtr reserved_space ; <nl> <nl> - MergeTreeMoveEntry ( const std : : shared_ptr < const MergeTreeDataPart > & part_ , ReservationPtr reservation_ ) <nl> + MergeTreeMoveEntry ( const std : : shared_ptr < const IMergeTreeDataPart > & part_ , ReservationPtr reservation_ ) <nl> : part ( part_ ) , reserved_space ( std : : move ( reservation_ ) ) <nl> { <nl> } <nl> class MergeTreePartsMover <nl> { <nl> private : <nl> / / / Callback tells that part is not participating in background process <nl> - using AllowedMovingPredicate = std : : function < bool ( const std : : shared_ptr < const MergeTreeDataPart > & , String * reason ) > ; <nl> + using AllowedMovingPredicate = std : : function < bool ( const std : : shared_ptr < const IMergeTreeDataPart > & , String * reason ) > ; <nl> <nl> public : <nl> MergeTreePartsMover ( MergeTreeData * data_ ) <nl> class MergeTreePartsMover <nl> const std : : lock_guard < std : : mutex > & moving_parts_lock ) ; <nl> <nl> / / / Copies part to selected reservation in detached folder . Throws exception if part already exists . <nl> - std : : shared_ptr < const MergeTreeDataPart > clonePart ( const MergeTreeMoveEntry & moving_part ) const ; <nl> + std : : shared_ptr < const IMergeTreeDataPart > clonePart ( const MergeTreeMoveEntry & moving_part ) const ; <nl> <nl> / / / Replaces cloned part from detached directory into active data parts set . <nl> / / / Replacing part changes state to DeleteOnDestroy and will be removed from disk after destructor of <nl> - / / / MergeTreeDataPart called . If replacing part doesn ' t exists or not active ( commited ) than <nl> + / / / IMergeTreeDataPart called . If replacing part doesn ' t exists or not active ( commited ) than <nl> / / / cloned part will be removed and loge message will be reported . It may happen in case of concurrent <nl> / / / merge or mutation . <nl> - void swapClonedPart ( const std : : shared_ptr < const MergeTreeDataPart > & cloned_parts ) const ; <nl> + void swapClonedPart ( const std : : shared_ptr < const IMergeTreeDataPart > & cloned_parts ) const ; <nl> <nl> public : <nl> / / / Can stop background moves and moves from queries <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeRangeReader . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeRangeReader . cpp <nl> <nl> - # include < Storages / MergeTree / MergeTreeReader . h > <nl> + # include < Storages / MergeTree / IMergeTreeReader . h > <nl> # include < Columns / FilterDescription . h > <nl> # include < Columns / ColumnsCommon . h > <nl> # include < ext / range . h > <nl> namespace DB <nl> { <nl> <nl> MergeTreeRangeReader : : DelayedStream : : DelayedStream ( <nl> - size_t from_mark , MergeTreeReader * merge_tree_reader_ ) <nl> + size_t from_mark , IMergeTreeReader * merge_tree_reader_ ) <nl> : current_mark ( from_mark ) , current_offset ( 0 ) , num_delayed_rows ( 0 ) <nl> , merge_tree_reader ( merge_tree_reader_ ) <nl> , index_granularity ( & ( merge_tree_reader - > data_part - > index_granularity ) ) <nl> size_t MergeTreeRangeReader : : DelayedStream : : finalize ( Columns & columns ) <nl> <nl> <nl> MergeTreeRangeReader : : Stream : : Stream ( <nl> - size_t from_mark , size_t to_mark , MergeTreeReader * merge_tree_reader_ ) <nl> + size_t from_mark , size_t to_mark , IMergeTreeReader * merge_tree_reader_ ) <nl> : current_mark ( from_mark ) , offset_after_current_mark ( 0 ) <nl> , last_mark ( to_mark ) <nl> , merge_tree_reader ( merge_tree_reader_ ) <nl> void MergeTreeRangeReader : : ReadResult : : setFilterConstFalse ( ) <nl> num_rows = 0 ; <nl> } <nl> <nl> - void MergeTreeRangeReader : : ReadResult : : optimize ( ) <nl> + void MergeTreeRangeReader : : ReadResult : : optimize ( bool can_read_incomplete_granules ) <nl> { <nl> if ( total_rows_per_granule = = 0 | | filter = = nullptr ) <nl> return ; <nl> <nl> NumRows zero_tails ; <nl> - auto total_zero_rows_in_tails = countZeroTails ( filter - > getData ( ) , zero_tails ) ; <nl> + auto total_zero_rows_in_tails = countZeroTails ( filter - > getData ( ) , zero_tails , can_read_incomplete_granules ) ; <nl> <nl> if ( total_zero_rows_in_tails = = filter - > size ( ) ) <nl> { <nl> void MergeTreeRangeReader : : ReadResult : : optimize ( ) <nl> need_filter = true ; <nl> } <nl> <nl> - size_t MergeTreeRangeReader : : ReadResult : : countZeroTails ( const IColumn : : Filter & filter_vec , NumRows & zero_tails ) const <nl> + size_t MergeTreeRangeReader : : ReadResult : : countZeroTails ( const IColumn : : Filter & filter_vec , NumRows & zero_tails , bool can_read_incomplete_granules ) const <nl> { <nl> zero_tails . resize ( 0 ) ; <nl> zero_tails . reserve ( rows_per_granule . size ( ) ) ; <nl> size_t MergeTreeRangeReader : : ReadResult : : countZeroTails ( const IColumn : : Filter & <nl> for ( auto rows_to_read : rows_per_granule ) <nl> { <nl> / / / Count the number of zeros at the end of filter for rows were read from current granule . <nl> - zero_tails . push_back ( numZerosInTail ( filter_data , filter_data + rows_to_read ) ) ; <nl> + size_t zero_tail = numZerosInTail ( filter_data , filter_data + rows_to_read ) ; <nl> + if ( ! can_read_incomplete_granules & & zero_tail ! = rows_to_read ) <nl> + zero_tail = 0 ; <nl> + zero_tails . push_back ( zero_tail ) ; <nl> total_zero_rows_in_tails + = zero_tails . back ( ) ; <nl> filter_data + = rows_to_read ; <nl> } <nl> size_t MergeTreeRangeReader : : ReadResult : : countBytesInResultFilter ( const IColumn : <nl> } <nl> <nl> MergeTreeRangeReader : : MergeTreeRangeReader ( <nl> - MergeTreeReader * merge_tree_reader_ , <nl> + IMergeTreeReader * merge_tree_reader_ , <nl> MergeTreeRangeReader * prev_reader_ , <nl> const PrewhereInfoPtr & prewhere_ , <nl> bool last_reader_in_chain_ ) <nl> size_t MergeTreeRangeReader : : Stream : : numPendingRows ( ) const <nl> return rows_between_marks - offset_after_current_mark ; <nl> } <nl> <nl> + <nl> + size_t MergeTreeRangeReader : : Stream : : ceilRowsToCompleteGranules ( size_t rows_num ) const <nl> + { <nl> + / / / FIXME suboptimal <nl> + size_t result = 0 ; <nl> + size_t from_mark = current_mark ; <nl> + while ( result < rows_num & & from_mark < last_mark ) <nl> + result + = index_granularity - > getMarkRows ( from_mark + + ) ; <nl> + <nl> + return result ; <nl> + } <nl> + <nl> + <nl> bool MergeTreeRangeReader : : isCurrentRangeFinished ( ) const <nl> { <nl> return prev_reader ? prev_reader - > isCurrentRangeFinished ( ) : stream . isFinished ( ) ; <nl> MergeTreeRangeReader : : ReadResult MergeTreeRangeReader : : startReadingChain ( size_t <nl> ranges . pop_front ( ) ; <nl> } <nl> <nl> - auto rows_to_read = std : : min ( space_left , stream . numPendingRowsInCurrentGranule ( ) ) ; <nl> + size_t current_space = space_left ; <nl> + <nl> + / / / If reader can ' t read part of granule , we have to increase number of reading rows <nl> + / / / to read complete granules and exceed max_rows a bit . <nl> + if ( ! merge_tree_reader - > canReadIncompleteGranules ( ) ) <nl> + current_space = stream . ceilRowsToCompleteGranules ( space_left ) ; <nl> + <nl> + auto rows_to_read = std : : min ( current_space , stream . numPendingRowsInCurrentGranule ( ) ) ; <nl> + <nl> bool last = rows_to_read = = space_left ; <nl> result . addRows ( stream . read ( result . columns , rows_to_read , ! last ) ) ; <nl> result . addGranule ( rows_to_read ) ; <nl> - space_left - = rows_to_read ; <nl> + space_left = ( rows_to_read > space_left ? 0 : space_left - rows_to_read ) ; <nl> } <nl> } <nl> <nl> void MergeTreeRangeReader : : executePrewhereActionsAndFilterColumns ( ReadResult & r <nl> <nl> / / / If there is a WHERE , we filter in there , and only optimize IO and shrink columns here <nl> if ( ! last_reader_in_chain ) <nl> - result . optimize ( ) ; <nl> + result . optimize ( merge_tree_reader - > canReadIncompleteGranules ( ) ) ; <nl> <nl> / / / If we read nothing or filter gets optimized to nothing <nl> if ( result . totalRowsPerGranule ( ) = = 0 ) <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeRangeReader . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeRangeReader . h <nl> template < typename T > <nl> class ColumnVector ; <nl> using ColumnUInt8 = ColumnVector < UInt8 > ; <nl> <nl> - class MergeTreeReader ; <nl> + class IMergeTreeReader ; <nl> class MergeTreeIndexGranularity ; <nl> struct PrewhereInfo ; <nl> using PrewhereInfoPtr = std : : shared_ptr < PrewhereInfo > ; <nl> class MergeTreeRangeReader <nl> { <nl> public : <nl> MergeTreeRangeReader ( <nl> - MergeTreeReader * merge_tree_reader_ , <nl> + IMergeTreeReader * merge_tree_reader_ , <nl> MergeTreeRangeReader * prev_reader_ , <nl> const PrewhereInfoPtr & prewhere_ , <nl> bool last_reader_in_chain_ ) ; <nl> class MergeTreeRangeReader <nl> { <nl> public : <nl> DelayedStream ( ) = default ; <nl> - DelayedStream ( size_t from_mark , MergeTreeReader * merge_tree_reader ) ; <nl> + DelayedStream ( size_t from_mark , IMergeTreeReader * merge_tree_reader ) ; <nl> <nl> / / / Read @ num_rows rows from @ from_mark starting from @ offset row <nl> / / / Returns the number of rows added to block . <nl> class MergeTreeRangeReader <nl> size_t num_delayed_rows = 0 ; <nl> <nl> / / / Actual reader of data from disk <nl> - MergeTreeReader * merge_tree_reader = nullptr ; <nl> + IMergeTreeReader * merge_tree_reader = nullptr ; <nl> const MergeTreeIndexGranularity * index_granularity = nullptr ; <nl> bool continue_reading = false ; <nl> bool is_finished = true ; <nl> class MergeTreeRangeReader <nl> { <nl> public : <nl> Stream ( ) = default ; <nl> - Stream ( size_t from_mark , size_t to_mark , MergeTreeReader * merge_tree_reader ) ; <nl> + Stream ( size_t from_mark , size_t to_mark , IMergeTreeReader * merge_tree_reader ) ; <nl> <nl> / / / Returns the number of rows added to block . <nl> size_t read ( Columns & columns , size_t num_rows , bool skip_remaining_rows_in_current_granule ) ; <nl> class MergeTreeRangeReader <nl> <nl> size_t last_mark = 0 ; <nl> <nl> - MergeTreeReader * merge_tree_reader = nullptr ; <nl> + IMergeTreeReader * merge_tree_reader = nullptr ; <nl> const MergeTreeIndexGranularity * index_granularity = nullptr ; <nl> <nl> size_t current_mark_index_granularity = 0 ; <nl> class MergeTreeRangeReader <nl> void checkEnoughSpaceInCurrentGranule ( size_t num_rows ) const ; <nl> size_t readRows ( Columns & columns , size_t num_rows ) ; <nl> void toNextMark ( ) ; <nl> + size_t ceilRowsToCompleteGranules ( size_t rows_num ) const ; <nl> } ; <nl> <nl> / / / Statistics after next reading step . <nl> class MergeTreeRangeReader <nl> / / / Set filter or replace old one . Filter must have more zeroes than previous . <nl> void setFilter ( const ColumnPtr & new_filter ) ; <nl> / / / For each granule calculate the number of filtered rows at the end . Remove them and update filter . <nl> - void optimize ( ) ; <nl> + void optimize ( bool can_read_incomplete_granules ) ; <nl> / / / Remove all rows from granules . <nl> void clear ( ) ; <nl> <nl> class MergeTreeRangeReader <nl> const ColumnUInt8 * filter_original = nullptr ; <nl> <nl> void collapseZeroTails ( const IColumn : : Filter & filter , IColumn : : Filter & new_filter ) ; <nl> - size_t countZeroTails ( const IColumn : : Filter & filter , NumRows & zero_tails ) const ; <nl> + size_t countZeroTails ( const IColumn : : Filter & filter , NumRows & zero_tails , bool can_read_incomplete_granules ) const ; <nl> static size_t numZerosInTail ( const UInt8 * begin , const UInt8 * end ) ; <nl> <nl> std : : map < const IColumn : : Filter * , size_t > filter_bytes_map ; <nl> class MergeTreeRangeReader <nl> void executePrewhereActionsAndFilterColumns ( ReadResult & result ) ; <nl> void filterColumns ( Columns & columns , const IColumn : : Filter & filter ) const ; <nl> <nl> - MergeTreeReader * merge_tree_reader = nullptr ; <nl> + IMergeTreeReader * merge_tree_reader = nullptr ; <nl> const MergeTreeIndexGranularity * index_granularity = nullptr ; <nl> MergeTreeRangeReader * prev_reader = nullptr ; / / / If not nullptr , read from prev_reader firstly . <nl> PrewhereInfoPtr prewhere ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeReadPool . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeReadPool . cpp <nl> MergeTreeReadTaskPtr MergeTreeReadPool : : getTask ( const size_t min_marks_to_read , <nl> prewhere_info & & prewhere_info - > remove_prewhere_column , per_part_should_reorder [ part_idx ] , std : : move ( curr_task_size_predictor ) ) ; <nl> } <nl> <nl> - MarkRanges MergeTreeReadPool : : getRestMarks ( const MergeTreeDataPart & part , const MarkRange & from ) const <nl> + MarkRanges MergeTreeReadPool : : getRestMarks ( const IMergeTreeDataPart & part , const MarkRange & from ) const <nl> { <nl> MarkRanges all_part_ranges ; <nl> <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeReadPool . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeReadPool . h <nl> class MergeTreeReadPool : private boost : : noncopyable <nl> void profileFeedback ( const ReadBufferFromFileBase : : ProfileInfo info ) ; <nl> <nl> / / / This method tells which mark ranges we have to read if we start from @ from mark range <nl> - MarkRanges getRestMarks ( const MergeTreeDataPart & part , const MarkRange & from ) const ; <nl> + MarkRanges getRestMarks ( const IMergeTreeDataPart & part , const MarkRange & from ) const ; <nl> <nl> Block getHeader ( ) const ; <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 533a66c0beb <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeReaderCompact . cpp <nl> <nl> + # include < Storages / MergeTree / MergeTreeReaderCompact . h > <nl> + # include < Storages / MergeTree / MergeTreeDataPartCompact . h > <nl> + # include < DataTypes / DataTypeArray . h > <nl> + # include < DataTypes / NestedUtils . h > <nl> + # include < Poco / File . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + namespace ErrorCodes <nl> + { <nl> + extern const int LOGICAL_ERROR ; <nl> + extern const int NOT_FOUND_EXPECTED_DATA_PART ; <nl> + extern const int MEMORY_LIMIT_EXCEEDED ; <nl> + extern const int ARGUMENT_OUT_OF_BOUND ; <nl> + } <nl> + <nl> + <nl> + MergeTreeReaderCompact : : MergeTreeReaderCompact ( <nl> + const DataPartCompactPtr & data_part_ , <nl> + const NamesAndTypesList & columns_ , <nl> + UncompressedCache * uncompressed_cache_ , <nl> + MarkCache * mark_cache_ , <nl> + const MarkRanges & mark_ranges_ , <nl> + const MergeTreeReaderSettings & settings_ , <nl> + const ValueSizeMap & avg_value_size_hints_ , <nl> + const ReadBufferFromFileBase : : ProfileCallback & profile_callback_ , <nl> + clockid_t clock_type_ ) <nl> + : IMergeTreeReader ( data_part_ , columns_ , <nl> + uncompressed_cache_ , mark_cache_ , mark_ranges_ , <nl> + settings_ , avg_value_size_hints_ ) <nl> + , marks_loader ( mark_cache , <nl> + data_part - > index_granularity_info . getMarksFilePath ( path + MergeTreeDataPartCompact : : DATA_FILE_NAME ) , <nl> + data_part - > getMarksCount ( ) , data_part - > index_granularity_info , <nl> + settings . save_marks_in_cache , data_part - > getColumns ( ) . size ( ) ) <nl> + { <nl> + size_t buffer_size = settings . max_read_buffer_size ; <nl> + const String full_data_path = path + MergeTreeDataPartCompact : : DATA_FILE_NAME_WITH_EXTENSION ; <nl> + <nl> + if ( uncompressed_cache ) <nl> + { <nl> + auto buffer = std : : make_unique < CachedCompressedReadBuffer > ( <nl> + full_data_path , uncompressed_cache , 0 , settings . min_bytes_to_use_direct_io , buffer_size ) ; <nl> + <nl> + if ( profile_callback_ ) <nl> + buffer - > setProfileCallback ( profile_callback_ , clock_type_ ) ; <nl> + <nl> + cached_buffer = std : : move ( buffer ) ; <nl> + data_buffer = cached_buffer . get ( ) ; <nl> + } <nl> + else <nl> + { <nl> + auto buffer = std : : make_unique < CompressedReadBufferFromFile > ( <nl> + full_data_path , 0 , settings . min_bytes_to_use_direct_io , buffer_size ) ; <nl> + <nl> + if ( profile_callback_ ) <nl> + buffer - > setProfileCallback ( profile_callback_ , clock_type_ ) ; <nl> + <nl> + non_cached_buffer = std : : move ( buffer ) ; <nl> + data_buffer = non_cached_buffer . get ( ) ; <nl> + } <nl> + <nl> + size_t columns_num = columns . size ( ) ; <nl> + <nl> + column_positions . resize ( columns_num ) ; <nl> + read_only_offsets . resize ( columns_num ) ; <nl> + auto name_and_type = columns . begin ( ) ; <nl> + for ( size_t i = 0 ; i < columns_num ; + + i , + + name_and_type ) <nl> + { <nl> + const auto & [ name , type ] = * name_and_type ; <nl> + auto position = data_part - > getColumnPosition ( name ) ; <nl> + <nl> + / / / If array of Nested column is missing in part , <nl> + / / / we have to read it ' s offsets if they exists . <nl> + if ( ! position & & typeid_cast < const DataTypeArray * > ( type . get ( ) ) ) <nl> + { <nl> + position = findColumnForOffsets ( name ) ; <nl> + read_only_offsets [ i ] = ( position ! = std : : nullopt ) ; <nl> + } <nl> + <nl> + column_positions [ i ] = std : : move ( position ) ; <nl> + } <nl> + <nl> + } <nl> + <nl> + size_t MergeTreeReaderCompact : : readRows ( size_t from_mark , bool continue_reading , size_t max_rows_to_read , Columns & res_columns ) <nl> + { <nl> + if ( continue_reading ) <nl> + from_mark = next_mark ; <nl> + <nl> + size_t read_rows = 0 ; <nl> + size_t num_columns = columns . size ( ) ; <nl> + <nl> + MutableColumns mutable_columns ( num_columns ) ; <nl> + auto column_it = columns . begin ( ) ; <nl> + for ( size_t i = 0 ; i < num_columns ; + + i , + + column_it ) <nl> + { <nl> + if ( ! column_positions [ i ] ) <nl> + continue ; <nl> + <nl> + bool append = res_columns [ i ] ! = nullptr ; <nl> + if ( ! append ) <nl> + res_columns [ i ] = column_it - > type - > createColumn ( ) ; <nl> + mutable_columns [ i ] = res_columns [ i ] - > assumeMutable ( ) ; <nl> + } <nl> + <nl> + while ( read_rows < max_rows_to_read ) <nl> + { <nl> + size_t rows_to_read = data_part - > index_granularity . getMarkRows ( from_mark ) ; <nl> + <nl> + auto name_and_type = columns . begin ( ) ; <nl> + for ( size_t pos = 0 ; pos < num_columns ; + + pos , + + name_and_type ) <nl> + { <nl> + if ( ! res_columns [ pos ] ) <nl> + continue ; <nl> + <nl> + const auto & [ name , type ] = * name_and_type ; <nl> + auto & column = mutable_columns [ pos ] ; <nl> + <nl> + try <nl> + { <nl> + size_t column_size_before_reading = column - > size ( ) ; <nl> + <nl> + readData ( name , * column , * type , from_mark , * column_positions [ pos ] , rows_to_read , read_only_offsets [ pos ] ) ; <nl> + <nl> + size_t read_rows_in_column = column - > size ( ) - column_size_before_reading ; <nl> + <nl> + if ( read_rows_in_column < rows_to_read ) <nl> + throw Exception ( " Cannot read all data in MergeTreeReaderCompact . Rows read : " + toString ( read_rows_in_column ) + <nl> + " . Rows expected : " + toString ( rows_to_read ) + " . " , ErrorCodes : : CANNOT_READ_ALL_DATA ) ; <nl> + } <nl> + catch ( Exception & e ) <nl> + { <nl> + / / / Better diagnostics . <nl> + e . addMessage ( " ( while reading column " + name + " ) " ) ; <nl> + throw ; <nl> + } <nl> + } <nl> + <nl> + + + from_mark ; <nl> + read_rows + = rows_to_read ; <nl> + } <nl> + <nl> + for ( size_t i = 0 ; i < num_columns ; + + i ) <nl> + { <nl> + auto & column = mutable_columns [ i ] ; <nl> + if ( column & & column - > size ( ) ) <nl> + res_columns [ i ] = std : : move ( column ) ; <nl> + else <nl> + res_columns [ i ] = nullptr ; <nl> + } <nl> + <nl> + next_mark = from_mark ; <nl> + <nl> + return read_rows ; <nl> + } <nl> + <nl> + MergeTreeReaderCompact : : ColumnPosition MergeTreeReaderCompact : : findColumnForOffsets ( const String & column_name ) <nl> + { <nl> + String table_name = Nested : : extractTableName ( column_name ) ; <nl> + for ( const auto & part_column : data_part - > getColumns ( ) ) <nl> + { <nl> + if ( typeid_cast < const DataTypeArray * > ( part_column . type . get ( ) ) ) <nl> + { <nl> + auto position = data_part - > getColumnPosition ( part_column . name ) ; <nl> + if ( position & & Nested : : extractTableName ( part_column . name ) = = table_name ) <nl> + return position ; <nl> + } <nl> + } <nl> + <nl> + return { } ; <nl> + } <nl> + <nl> + <nl> + void MergeTreeReaderCompact : : readData ( <nl> + const String & name , IColumn & column , const IDataType & type , <nl> + size_t from_mark , size_t column_position , size_t rows_to_read , bool only_offsets ) <nl> + { <nl> + if ( ! isContinuousReading ( from_mark , column_position ) ) <nl> + seekToMark ( from_mark , column_position ) ; <nl> + <nl> + auto buffer_getter = [ & ] ( const IDataType : : SubstreamPath & substream_path ) - > ReadBuffer * <nl> + { <nl> + if ( only_offsets & & ( substream_path . size ( ) ! = 1 | | substream_path [ 0 ] . type ! = IDataType : : Substream : : ArraySizes ) ) <nl> + return nullptr ; <nl> + <nl> + return data_buffer ; <nl> + } ; <nl> + <nl> + IDataType : : DeserializeBinaryBulkSettings deserialize_settings ; <nl> + deserialize_settings . getter = buffer_getter ; <nl> + deserialize_settings . avg_value_size_hint = avg_value_size_hints [ name ] ; <nl> + deserialize_settings . position_independent_encoding = true ; <nl> + <nl> + IDataType : : DeserializeBinaryBulkStatePtr state ; <nl> + type . deserializeBinaryBulkStatePrefix ( deserialize_settings , state ) ; <nl> + type . deserializeBinaryBulkWithMultipleStreams ( column , rows_to_read , deserialize_settings , state ) ; <nl> + <nl> + / / / The buffer is left in inconsistent state after reading single offsets <nl> + if ( only_offsets ) <nl> + last_read_granule . reset ( ) ; <nl> + else <nl> + last_read_granule . emplace ( from_mark , column_position ) ; <nl> + } <nl> + <nl> + <nl> + void MergeTreeReaderCompact : : seekToMark ( size_t row_index , size_t column_index ) <nl> + { <nl> + MarkInCompressedFile mark = marks_loader . getMark ( row_index , column_index ) ; <nl> + try <nl> + { <nl> + if ( cached_buffer ) <nl> + cached_buffer - > seek ( mark . offset_in_compressed_file , mark . offset_in_decompressed_block ) ; <nl> + if ( non_cached_buffer ) <nl> + non_cached_buffer - > seek ( mark . offset_in_compressed_file , mark . offset_in_decompressed_block ) ; <nl> + } <nl> + catch ( Exception & e ) <nl> + { <nl> + / / / Better diagnostics . <nl> + if ( e . code ( ) = = ErrorCodes : : ARGUMENT_OUT_OF_BOUND ) <nl> + e . addMessage ( " ( while seeking to mark ( " + toString ( row_index ) + " , " + toString ( column_index ) + " ) " ) ; <nl> + <nl> + throw ; <nl> + } <nl> + } <nl> + <nl> + <nl> + bool MergeTreeReaderCompact : : isContinuousReading ( size_t mark , size_t column_position ) <nl> + { <nl> + if ( ! last_read_granule ) <nl> + return false ; <nl> + const auto & [ last_mark , last_column ] = * last_read_granule ; <nl> + return ( mark = = last_mark & & column_position = = last_column + 1 ) <nl> + | | ( mark = = last_mark + 1 & & column_position = = 0 & & last_column = = data_part - > getColumns ( ) . size ( ) - 1 ) ; <nl> + } <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . efaf23d2988 <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeReaderCompact . h <nl> <nl> + # pragma once <nl> + <nl> + # include < Core / NamesAndTypes . h > <nl> + # include < Storages / MergeTree / IMergeTreeReader . h > <nl> + <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + class MergeTreeDataPartCompact ; <nl> + using DataPartCompactPtr = std : : shared_ptr < const MergeTreeDataPartCompact > ; <nl> + <nl> + / / / Reader for compact parts <nl> + class MergeTreeReaderCompact : public IMergeTreeReader <nl> + { <nl> + public : <nl> + MergeTreeReaderCompact ( <nl> + const DataPartCompactPtr & data_part_ , <nl> + const NamesAndTypesList & columns_ , <nl> + UncompressedCache * uncompressed_cache_ , <nl> + MarkCache * mark_cache_ , <nl> + const MarkRanges & mark_ranges_ , <nl> + const MergeTreeReaderSettings & settings_ , <nl> + const ValueSizeMap & avg_value_size_hints_ = ValueSizeMap { } , <nl> + const ReadBufferFromFileBase : : ProfileCallback & profile_callback_ = ReadBufferFromFileBase : : ProfileCallback { } , <nl> + clockid_t clock_type_ = CLOCK_MONOTONIC_COARSE ) ; <nl> + <nl> + / / / Return the number of rows has been read or zero if there is no columns to read . <nl> + / / / If continue_reading is true , continue reading from last state , otherwise seek to from_mark <nl> + size_t readRows ( size_t from_mark , bool continue_reading , size_t max_rows_to_read , Columns & res_columns ) override ; <nl> + <nl> + bool canReadIncompleteGranules ( ) const override { return false ; } <nl> + <nl> + private : <nl> + bool isContinuousReading ( size_t mark , size_t column_position ) ; <nl> + <nl> + ReadBuffer * data_buffer ; <nl> + std : : unique_ptr < CachedCompressedReadBuffer > cached_buffer ; <nl> + std : : unique_ptr < CompressedReadBufferFromFile > non_cached_buffer ; <nl> + <nl> + MergeTreeMarksLoader marks_loader ; <nl> + <nl> + using ColumnPosition = std : : optional < size_t > ; <nl> + / / / Positions of columns in part structure . <nl> + std : : vector < ColumnPosition > column_positions ; <nl> + / / / Should we read full column or only it ' s offsets <nl> + std : : vector < bool > read_only_offsets ; <nl> + <nl> + size_t next_mark = 0 ; <nl> + std : : optional < std : : pair < size_t , size_t > > last_read_granule ; <nl> + <nl> + void seekToMark ( size_t row_index , size_t column_index ) ; <nl> + <nl> + void readData ( const String & name , IColumn & column , const IDataType & type , <nl> + size_t from_mark , size_t column_position , size_t rows_to_read , bool only_offsets = false ) ; <nl> + <nl> + ColumnPosition findColumnForOffsets ( const String & column_name ) ; <nl> + } ; <nl> + <nl> + } <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeReaderStream . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeReaderStream . cpp <nl> namespace ErrorCodes <nl> MergeTreeReaderStream : : MergeTreeReaderStream ( <nl> const String & path_prefix_ , const String & data_file_extension_ , size_t marks_count_ , <nl> const MarkRanges & all_mark_ranges , <nl> - MarkCache * mark_cache_ , bool save_marks_in_cache_ , <nl> - UncompressedCache * uncompressed_cache , <nl> - size_t file_size , size_t aio_threshold , size_t mmap_threshold , size_t max_read_buffer_size , <nl> + const MergeTreeReaderSettings & settings , <nl> + MarkCache * mark_cache_ , <nl> + UncompressedCache * uncompressed_cache , size_t file_size , <nl> const MergeTreeIndexGranularityInfo * index_granularity_info_ , <nl> const ReadBufferFromFileBase : : ProfileCallback & profile_callback , clockid_t clock_type ) <nl> : path_prefix ( path_prefix_ ) , data_file_extension ( data_file_extension_ ) , marks_count ( marks_count_ ) <nl> - , mark_cache ( mark_cache_ ) , save_marks_in_cache ( save_marks_in_cache_ ) <nl> + , mark_cache ( mark_cache_ ) , save_marks_in_cache ( settings . save_marks_in_cache ) <nl> , index_granularity_info ( index_granularity_info_ ) <nl> + , marks_loader ( mark_cache , index_granularity_info - > getMarksFilePath ( path_prefix ) , <nl> + marks_count , * index_granularity_info , save_marks_in_cache ) <nl> { <nl> / / / Compute the size of the buffer . <nl> size_t max_mark_range_bytes = 0 ; <nl> size_t sum_mark_range_bytes = 0 ; <nl> <nl> - / / / Care should be taken to not load marks when the part is empty ( marks_count = = 0 ) . <nl> - <nl> for ( const auto & mark_range : all_mark_ranges ) <nl> { <nl> size_t left_mark = mark_range . begin ; <nl> MergeTreeReaderStream : : MergeTreeReaderStream ( <nl> / / / and we will use max_read_buffer_size for buffer size , thus avoiding the need to load marks . <nl> <nl> / / / If the end of range is inside the block , we will need to read it too . <nl> - if ( right_mark < marks_count & & getMark ( right_mark ) . offset_in_decompressed_block > 0 ) <nl> + if ( right_mark < marks_count & & marks_loader . getMark ( right_mark ) . offset_in_decompressed_block > 0 ) <nl> { <nl> while ( right_mark < marks_count <nl> - & & getMark ( right_mark ) . offset_in_compressed_file = = getMark ( mark_range . end ) . offset_in_compressed_file ) <nl> + & & marks_loader . getMark ( right_mark ) . offset_in_compressed_file = = marks_loader . getMark ( mark_range . end ) . offset_in_compressed_file ) <nl> { <nl> + + right_mark ; <nl> } <nl> MergeTreeReaderStream : : MergeTreeReaderStream ( <nl> / / / If there are no marks after the end of range , just use file size <nl> if ( right_mark > = marks_count <nl> | | ( right_mark + 1 = = marks_count <nl> - & & getMark ( right_mark ) . offset_in_compressed_file = = getMark ( mark_range . end ) . offset_in_compressed_file ) ) <nl> + & & marks_loader . getMark ( right_mark ) . offset_in_compressed_file = = marks_loader . getMark ( mark_range . end ) . offset_in_compressed_file ) ) <nl> { <nl> - mark_range_bytes = file_size - ( left_mark < marks_count ? getMark ( left_mark ) . offset_in_compressed_file : 0 ) ; <nl> + mark_range_bytes = file_size - ( left_mark < marks_count ? marks_loader . getMark ( left_mark ) . offset_in_compressed_file : 0 ) ; <nl> } <nl> else <nl> { <nl> - mark_range_bytes = getMark ( right_mark ) . offset_in_compressed_file - getMark ( left_mark ) . offset_in_compressed_file ; <nl> + mark_range_bytes = marks_loader . getMark ( right_mark ) . offset_in_compressed_file - marks_loader . getMark ( left_mark ) . offset_in_compressed_file ; <nl> } <nl> <nl> max_mark_range_bytes = std : : max ( max_mark_range_bytes , mark_range_bytes ) ; <nl> MergeTreeReaderStream : : MergeTreeReaderStream ( <nl> / / / Avoid empty buffer . May happen while reading dictionary for DataTypeLowCardinality . <nl> / / / For example : part has single dictionary and all marks point to the same position . <nl> if ( max_mark_range_bytes = = 0 ) <nl> - max_mark_range_bytes = max_read_buffer_size ; <nl> + max_mark_range_bytes = settings . max_read_buffer_size ; <nl> <nl> - size_t buffer_size = std : : min ( max_read_buffer_size , max_mark_range_bytes ) ; <nl> + size_t buffer_size = std : : min ( settings . max_read_buffer_size , max_mark_range_bytes ) ; <nl> <nl> / / / Initialize the objects that shall be used to perform read operations . <nl> if ( uncompressed_cache ) <nl> { <nl> auto buffer = std : : make_unique < CachedCompressedReadBuffer > ( <nl> - path_prefix + data_file_extension , uncompressed_cache , sum_mark_range_bytes , aio_threshold , mmap_threshold , buffer_size ) ; <nl> + path_prefix + data_file_extension , uncompressed_cache , sum_mark_range_bytes , <nl> + settings . min_bytes_to_use_direct_io , settings . min_bytes_to_use_mmap_io , buffer_size ) ; <nl> <nl> if ( profile_callback ) <nl> buffer - > setProfileCallback ( profile_callback , clock_type ) ; <nl> MergeTreeReaderStream : : MergeTreeReaderStream ( <nl> else <nl> { <nl> auto buffer = std : : make_unique < CompressedReadBufferFromFile > ( <nl> - path_prefix + data_file_extension , sum_mark_range_bytes , aio_threshold , mmap_threshold , buffer_size ) ; <nl> + path_prefix + data_file_extension , sum_mark_range_bytes , <nl> + settings . min_bytes_to_use_direct_io , settings . min_bytes_to_use_mmap_io , buffer_size ) ; <nl> <nl> if ( profile_callback ) <nl> buffer - > setProfileCallback ( profile_callback , clock_type ) ; <nl> MergeTreeReaderStream : : MergeTreeReaderStream ( <nl> } <nl> <nl> <nl> - const MarkInCompressedFile & MergeTreeReaderStream : : getMark ( size_t index ) <nl> - { <nl> - if ( ! marks ) <nl> - loadMarks ( ) ; <nl> - return ( * marks ) [ index ] ; <nl> - } <nl> - <nl> - <nl> - void MergeTreeReaderStream : : loadMarks ( ) <nl> - { <nl> - std : : string mrk_path = index_granularity_info - > getMarksFilePath ( path_prefix ) ; <nl> - <nl> - auto load = [ & ] ( ) - > MarkCache : : MappedPtr <nl> - { <nl> - / / / Memory for marks must not be accounted as memory usage for query , because they are stored in shared cache . <nl> - auto temporarily_disable_memory_tracker = getCurrentMemoryTrackerActionLock ( ) ; <nl> - <nl> - size_t file_size = Poco : : File ( mrk_path ) . getSize ( ) ; <nl> - size_t expected_file_size = index_granularity_info - > mark_size_in_bytes * marks_count ; <nl> - if ( expected_file_size ! = file_size ) <nl> - throw Exception ( <nl> - " Bad size of marks file ' " + mrk_path + " ' : " + std : : to_string ( file_size ) + " , must be : " + std : : to_string ( expected_file_size ) , <nl> - ErrorCodes : : CORRUPTED_DATA ) ; <nl> - <nl> - auto res = std : : make_shared < MarksInCompressedFile > ( marks_count ) ; <nl> - <nl> - if ( ! index_granularity_info - > is_adaptive ) <nl> - { <nl> - / / / Read directly to marks . <nl> - ReadBufferFromFile buffer ( mrk_path , file_size , - 1 , reinterpret_cast < char * > ( res - > data ( ) ) ) ; <nl> - <nl> - if ( buffer . eof ( ) | | buffer . buffer ( ) . size ( ) ! = file_size ) <nl> - throw Exception ( " Cannot read all marks from file " + mrk_path , ErrorCodes : : CANNOT_READ_ALL_DATA ) ; <nl> - } <nl> - else <nl> - { <nl> - ReadBufferFromFile buffer ( mrk_path , file_size , - 1 ) ; <nl> - size_t i = 0 ; <nl> - while ( ! buffer . eof ( ) ) <nl> - { <nl> - readIntBinary ( ( * res ) [ i ] . offset_in_compressed_file , buffer ) ; <nl> - readIntBinary ( ( * res ) [ i ] . offset_in_decompressed_block , buffer ) ; <nl> - buffer . seek ( sizeof ( size_t ) , SEEK_CUR ) ; <nl> - + + i ; <nl> - } <nl> - if ( i * index_granularity_info - > mark_size_in_bytes ! = file_size ) <nl> - throw Exception ( " Cannot read all marks from file " + mrk_path , ErrorCodes : : CANNOT_READ_ALL_DATA ) ; <nl> - } <nl> - res - > protect ( ) ; <nl> - return res ; <nl> - } ; <nl> - <nl> - if ( mark_cache ) <nl> - { <nl> - auto key = mark_cache - > hash ( mrk_path ) ; <nl> - if ( save_marks_in_cache ) <nl> - { <nl> - marks = mark_cache - > getOrSet ( key , load ) ; <nl> - } <nl> - else <nl> - { <nl> - marks = mark_cache - > get ( key ) ; <nl> - if ( ! marks ) <nl> - marks = load ( ) ; <nl> - } <nl> - } <nl> - else <nl> - marks = load ( ) ; <nl> - <nl> - if ( ! marks ) <nl> - throw Exception ( " Failed to load marks : " + mrk_path , ErrorCodes : : LOGICAL_ERROR ) ; <nl> - } <nl> - <nl> - <nl> void MergeTreeReaderStream : : seekToMark ( size_t index ) <nl> { <nl> - MarkInCompressedFile mark = getMark ( index ) ; <nl> + MarkInCompressedFile mark = marks_loader . getMark ( index ) ; <nl> <nl> try <nl> { <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeReaderStream . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeReaderStream . h <nl> <nl> # include < Storages / MergeTree / MergeTreeIndexGranularityInfo . h > <nl> # include < Compression / CachedCompressedReadBuffer . h > <nl> # include < Compression / CompressedReadBufferFromFile . h > <nl> + # include < Storages / MergeTree / MergeTreeIOSettings . h > <nl> + # include < Storages / MergeTree / MergeTreeMarksLoader . h > <nl> <nl> <nl> namespace DB <nl> class MergeTreeReaderStream <nl> MergeTreeReaderStream ( <nl> const String & path_prefix_ , const String & data_file_extension_ , size_t marks_count_ , <nl> const MarkRanges & all_mark_ranges , <nl> - MarkCache * mark_cache , bool save_marks_in_cache , <nl> - UncompressedCache * uncompressed_cache , <nl> - size_t file_size , size_t aio_threshold , size_t mmap_threshold , size_t max_read_buffer_size , <nl> - const MergeTreeIndexGranularityInfo * index_granularity_info_ , <nl> + const MergeTreeReaderSettings & settings_ , <nl> + MarkCache * mark_cache , UncompressedCache * uncompressed_cache , <nl> + size_t file_size , const MergeTreeIndexGranularityInfo * index_granularity_info_ , <nl> const ReadBufferFromFileBase : : ProfileCallback & profile_callback , clockid_t clock_type ) ; <nl> <nl> void seekToMark ( size_t index ) ; <nl> class MergeTreeReaderStream <nl> ReadBuffer * data_buffer ; <nl> <nl> private : <nl> - / / / NOTE : lazily loads marks from the marks cache . <nl> - const MarkInCompressedFile & getMark ( size_t index ) ; <nl> - <nl> - void loadMarks ( ) ; <nl> - <nl> std : : string path_prefix ; <nl> std : : string data_file_extension ; <nl> <nl> class MergeTreeReaderStream <nl> <nl> MarkCache * mark_cache ; <nl> bool save_marks_in_cache ; <nl> - MarkCache : : MappedPtr marks ; <nl> <nl> const MergeTreeIndexGranularityInfo * index_granularity_info ; <nl> <nl> std : : unique_ptr < CachedCompressedReadBuffer > cached_buffer ; <nl> std : : unique_ptr < CompressedReadBufferFromFile > non_cached_buffer ; <nl> + <nl> + MergeTreeMarksLoader marks_loader ; <nl> } ; <nl> } <nl> similarity index 51 % <nl> rename from dbms / src / Storages / MergeTree / MergeTreeReader . cpp <nl> rename to dbms / src / Storages / MergeTree / MergeTreeReaderWide . cpp <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeReader . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeReaderWide . cpp <nl> <nl> # include < Common / escapeForFileName . h > <nl> # include < Columns / ColumnArray . h > <nl> # include < Interpreters / evaluateMissingDefaults . h > <nl> - # include < Storages / MergeTree / MergeTreeReader . h > <nl> + # include < Storages / MergeTree / MergeTreeReaderWide . h > <nl> + # include < Storages / MergeTree / MergeTreeDataPartWide . h > <nl> # include < Common / typeid_cast . h > <nl> <nl> <nl> namespace DB <nl> namespace <nl> { <nl> using OffsetColumns = std : : map < std : : string , ColumnPtr > ; <nl> - <nl> constexpr auto DATA_FILE_EXTENSION = " . bin " ; <nl> } <nl> <nl> namespace ErrorCodes <nl> extern const int ARGUMENT_OUT_OF_BOUND ; <nl> } <nl> <nl> - <nl> - MergeTreeReader : : ~ MergeTreeReader ( ) = default ; <nl> - <nl> - <nl> - MergeTreeReader : : MergeTreeReader ( <nl> - String path_ , <nl> - MergeTreeData : : DataPartPtr data_part_ , <nl> - NamesAndTypesList columns_ , <nl> + MergeTreeReaderWide : : MergeTreeReaderWide ( <nl> + const DataPartWidePtr & data_part_ , <nl> + const NamesAndTypesList & columns_ , <nl> UncompressedCache * uncompressed_cache_ , <nl> MarkCache * mark_cache_ , <nl> - bool save_marks_in_cache_ , <nl> - const MergeTreeData & storage_ , <nl> - MarkRanges all_mark_ranges_ , <nl> - size_t aio_threshold_ , <nl> - size_t mmap_threshold_ , <nl> - size_t max_read_buffer_size_ , <nl> - ValueSizeMap avg_value_size_hints_ , <nl> + const MarkRanges & mark_ranges_ , <nl> + const MergeTreeReaderSettings & settings_ , <nl> + const ValueSizeMap & avg_value_size_hints_ , <nl> const ReadBufferFromFileBase : : ProfileCallback & profile_callback_ , <nl> clockid_t clock_type_ ) <nl> - : data_part ( std : : move ( data_part_ ) ) <nl> - , avg_value_size_hints ( std : : move ( avg_value_size_hints_ ) ) <nl> - , path ( std : : move ( path_ ) ) , columns ( std : : move ( columns_ ) ) <nl> - , uncompressed_cache ( uncompressed_cache_ ) <nl> - , mark_cache ( mark_cache_ ) <nl> - , save_marks_in_cache ( save_marks_in_cache_ ) <nl> - , storage ( storage_ ) <nl> - , all_mark_ranges ( std : : move ( all_mark_ranges_ ) ) <nl> - , aio_threshold ( aio_threshold_ ) <nl> - , mmap_threshold ( mmap_threshold_ ) <nl> - , max_read_buffer_size ( max_read_buffer_size_ ) <nl> + : IMergeTreeReader ( data_part_ , columns_ <nl> + , uncompressed_cache_ , mark_cache_ , mark_ranges_ <nl> + , settings_ , avg_value_size_hints_ ) <nl> { <nl> try <nl> { <nl> MergeTreeReader : : MergeTreeReader ( <nl> } <nl> <nl> <nl> - const MergeTreeReader : : ValueSizeMap & MergeTreeReader : : getAvgValueSizeHints ( ) const <nl> - { <nl> - return avg_value_size_hints ; <nl> - } <nl> - <nl> - <nl> - size_t MergeTreeReader : : readRows ( size_t from_mark , bool continue_reading , size_t max_rows_to_read , Columns & res_columns ) <nl> + size_t MergeTreeReaderWide : : readRows ( size_t from_mark , bool continue_reading , size_t max_rows_to_read , Columns & res_columns ) <nl> { <nl> size_t read_rows = 0 ; <nl> try <nl> size_t MergeTreeReader : : readRows ( size_t from_mark , bool continue_reading , size_t <nl> return read_rows ; <nl> } <nl> <nl> - void MergeTreeReader : : addStreams ( const String & name , const IDataType & type , <nl> + void MergeTreeReaderWide : : addStreams ( const String & name , const IDataType & type , <nl> const ReadBufferFromFileBase : : ProfileCallback & profile_callback , clockid_t clock_type ) <nl> { <nl> IDataType : : StreamCallback callback = [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> void MergeTreeReader : : addStreams ( const String & name , const IDataType & type , <nl> <nl> streams . emplace ( stream_name , std : : make_unique < MergeTreeReaderStream > ( <nl> path + stream_name , DATA_FILE_EXTENSION , data_part - > getMarksCount ( ) , <nl> - all_mark_ranges , mark_cache , save_marks_in_cache , <nl> + all_mark_ranges , settings , mark_cache , <nl> uncompressed_cache , data_part - > getFileSizeOrZero ( stream_name + DATA_FILE_EXTENSION ) , <nl> - aio_threshold , mmap_threshold , max_read_buffer_size , <nl> & data_part - > index_granularity_info , <nl> profile_callback , clock_type ) ) ; <nl> } ; <nl> void MergeTreeReader : : addStreams ( const String & name , const IDataType & type , <nl> } <nl> <nl> <nl> - void MergeTreeReader : : readData ( <nl> + void MergeTreeReaderWide : : readData ( <nl> const String & name , const IDataType & type , IColumn & column , <nl> size_t from_mark , bool continue_reading , size_t max_rows_to_read , <nl> bool with_offsets ) <nl> void MergeTreeReader : : readData ( <nl> } ; <nl> <nl> double & avg_value_size_hint = avg_value_size_hints [ name ] ; <nl> - IDataType : : DeserializeBinaryBulkSettings settings ; <nl> - settings . avg_value_size_hint = avg_value_size_hint ; <nl> + IDataType : : DeserializeBinaryBulkSettings deserialize_settings ; <nl> + deserialize_settings . avg_value_size_hint = avg_value_size_hint ; <nl> <nl> if ( deserialize_binary_bulk_state_map . count ( name ) = = 0 ) <nl> { <nl> - settings . getter = get_stream_getter ( true ) ; <nl> - type . deserializeBinaryBulkStatePrefix ( settings , deserialize_binary_bulk_state_map [ name ] ) ; <nl> + deserialize_settings . getter = get_stream_getter ( true ) ; <nl> + type . deserializeBinaryBulkStatePrefix ( deserialize_settings , deserialize_binary_bulk_state_map [ name ] ) ; <nl> } <nl> <nl> - settings . getter = get_stream_getter ( false ) ; <nl> - settings . continuous_reading = continue_reading ; <nl> + deserialize_settings . getter = get_stream_getter ( false ) ; <nl> + deserialize_settings . continuous_reading = continue_reading ; <nl> auto & deserialize_state = deserialize_binary_bulk_state_map [ name ] ; <nl> - type . deserializeBinaryBulkWithMultipleStreams ( column , max_rows_to_read , settings , deserialize_state ) ; <nl> + type . deserializeBinaryBulkWithMultipleStreams ( column , max_rows_to_read , deserialize_settings , deserialize_state ) ; <nl> IDataType : : updateAvgValueSizeHint ( column , avg_value_size_hint ) ; <nl> } <nl> <nl> - <nl> - static bool arrayHasNoElementsRead ( const IColumn & column ) <nl> - { <nl> - const auto * column_array = typeid_cast < const ColumnArray * > ( & column ) ; <nl> - <nl> - if ( ! column_array ) <nl> - return false ; <nl> - <nl> - size_t size = column_array - > size ( ) ; <nl> - if ( ! size ) <nl> - return false ; <nl> - <nl> - size_t data_size = column_array - > getData ( ) . size ( ) ; <nl> - if ( data_size ) <nl> - return false ; <nl> - <nl> - size_t last_offset = column_array - > getOffsets ( ) [ size - 1 ] ; <nl> - return last_offset ! = 0 ; <nl> - } <nl> - <nl> - <nl> - void MergeTreeReader : : fillMissingColumns ( Columns & res_columns , bool & should_evaluate_missing_defaults , size_t num_rows ) <nl> - { <nl> - try <nl> - { <nl> - size_t num_columns = columns . size ( ) ; <nl> - <nl> - if ( res_columns . size ( ) ! = num_columns ) <nl> - throw Exception ( " invalid number of columns passed to MergeTreeReader : : fillMissingColumns . " <nl> - " Expected " + toString ( num_columns ) + " , " <nl> - " got " + toString ( res_columns . size ( ) ) , ErrorCodes : : LOGICAL_ERROR ) ; <nl> - <nl> - / / / For a missing column of a nested data structure we must create not a column of empty <nl> - / / / arrays , but a column of arrays of correct length . <nl> - <nl> - / / / First , collect offset columns for all arrays in the block . <nl> - OffsetColumns offset_columns ; <nl> - auto requested_column = columns . begin ( ) ; <nl> - for ( size_t i = 0 ; i < num_columns ; + + i , + + requested_column ) <nl> - { <nl> - if ( res_columns [ i ] = = nullptr ) <nl> - continue ; <nl> - <nl> - if ( const auto * array = typeid_cast < const ColumnArray * > ( res_columns [ i ] . get ( ) ) ) <nl> - { <nl> - String offsets_name = Nested : : extractTableName ( requested_column - > name ) ; <nl> - auto & offsets_column = offset_columns [ offsets_name ] ; <nl> - <nl> - / / / If for some reason multiple offsets columns are present for the same nested data structure , <nl> - / / / choose the one that is not empty . <nl> - if ( ! offsets_column | | offsets_column - > empty ( ) ) <nl> - offsets_column = array - > getOffsetsPtr ( ) ; <nl> - } <nl> - } <nl> - <nl> - should_evaluate_missing_defaults = false ; <nl> - <nl> - / / / insert default values only for columns without default expressions <nl> - requested_column = columns . begin ( ) ; <nl> - for ( size_t i = 0 ; i < num_columns ; + + i , + + requested_column ) <nl> - { <nl> - auto & [ name , type ] = * requested_column ; <nl> - <nl> - if ( res_columns [ i ] & & arrayHasNoElementsRead ( * res_columns [ i ] ) ) <nl> - res_columns [ i ] = nullptr ; <nl> - <nl> - if ( res_columns [ i ] = = nullptr ) <nl> - { <nl> - if ( storage . getColumns ( ) . hasDefault ( name ) ) <nl> - { <nl> - should_evaluate_missing_defaults = true ; <nl> - continue ; <nl> - } <nl> - <nl> - String offsets_name = Nested : : extractTableName ( name ) ; <nl> - auto offset_it = offset_columns . find ( offsets_name ) ; <nl> - if ( offset_it ! = offset_columns . end ( ) ) <nl> - { <nl> - ColumnPtr offsets_column = offset_it - > second ; <nl> - DataTypePtr nested_type = typeid_cast < const DataTypeArray & > ( * type ) . getNestedType ( ) ; <nl> - size_t nested_rows = typeid_cast < const ColumnUInt64 & > ( * offsets_column ) . getData ( ) . back ( ) ; <nl> - <nl> - ColumnPtr nested_column = <nl> - nested_type - > createColumnConstWithDefaultValue ( nested_rows ) - > convertToFullColumnIfConst ( ) ; <nl> - <nl> - res_columns [ i ] = ColumnArray : : create ( nested_column , offsets_column ) ; <nl> - } <nl> - else <nl> - { <nl> - / / / We must turn a constant column into a full column because the interpreter could infer <nl> - / / / that it is constant everywhere but in some blocks ( from other parts ) it can be a full column . <nl> - res_columns [ i ] = type - > createColumnConstWithDefaultValue ( num_rows ) - > convertToFullColumnIfConst ( ) ; <nl> - } <nl> - } <nl> - } <nl> - } <nl> - catch ( Exception & e ) <nl> - { <nl> - / / / Better diagnostics . <nl> - e . addMessage ( " ( while reading from part " + path + " ) " ) ; <nl> - throw ; <nl> - } <nl> - } <nl> - <nl> - void MergeTreeReader : : evaluateMissingDefaults ( Block additional_columns , Columns & res_columns ) <nl> - { <nl> - try <nl> - { <nl> - size_t num_columns = columns . size ( ) ; <nl> - <nl> - if ( res_columns . size ( ) ! = num_columns ) <nl> - throw Exception ( " invalid number of columns passed to MergeTreeReader : : fillMissingColumns . " <nl> - " Expected " + toString ( num_columns ) + " , " <nl> - " got " + toString ( res_columns . size ( ) ) , ErrorCodes : : LOGICAL_ERROR ) ; <nl> - <nl> - / / / Convert columns list to block . <nl> - / / / TODO : rewrite with columns interface . It wll be possible after changes in ExpressionActions . <nl> - auto name_and_type = columns . begin ( ) ; <nl> - for ( size_t pos = 0 ; pos < num_columns ; + + pos , + + name_and_type ) <nl> - { <nl> - if ( res_columns [ pos ] = = nullptr ) <nl> - continue ; <nl> - <nl> - additional_columns . insert ( { res_columns [ pos ] , name_and_type - > type , name_and_type - > name } ) ; <nl> - } <nl> - <nl> - DB : : evaluateMissingDefaults ( additional_columns , columns , storage . getColumns ( ) . getDefaults ( ) , storage . global_context ) ; <nl> - <nl> - / / / Move columns from block . <nl> - name_and_type = columns . begin ( ) ; <nl> - for ( size_t pos = 0 ; pos < num_columns ; + + pos , + + name_and_type ) <nl> - res_columns [ pos ] = std : : move ( additional_columns . getByName ( name_and_type - > name ) . column ) ; <nl> - } <nl> - catch ( Exception & e ) <nl> - { <nl> - / / / Better diagnostics . <nl> - e . addMessage ( " ( while reading from part " + path + " ) " ) ; <nl> - throw ; <nl> - } <nl> - } <nl> - <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . ca4822f932d <nl> mmm / dev / null <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeReaderWide . h <nl> <nl> + # pragma once <nl> + <nl> + # include < Core / NamesAndTypes . h > <nl> + # include < Storages / MergeTree / IMergeTreeReader . h > <nl> + <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + class MergeTreeDataPartWide ; <nl> + using DataPartWidePtr = std : : shared_ptr < const MergeTreeDataPartWide > ; <nl> + <nl> + / / / Reader for Wide parts . <nl> + class MergeTreeReaderWide : public IMergeTreeReader <nl> + { <nl> + public : <nl> + MergeTreeReaderWide ( <nl> + const DataPartWidePtr & data_part_ , <nl> + const NamesAndTypesList & columns_ , <nl> + UncompressedCache * uncompressed_cache_ , <nl> + MarkCache * mark_cache_ , <nl> + const MarkRanges & mark_ranges_ , <nl> + const MergeTreeReaderSettings & settings_ , <nl> + const ValueSizeMap & avg_value_size_hints_ = ValueSizeMap { } , <nl> + const ReadBufferFromFileBase : : ProfileCallback & profile_callback_ = ReadBufferFromFileBase : : ProfileCallback { } , <nl> + clockid_t clock_type_ = CLOCK_MONOTONIC_COARSE ) ; <nl> + <nl> + / / / Return the number of rows has been read or zero if there is no columns to read . <nl> + / / / If continue_reading is true , continue reading from last state , otherwise seek to from_mark <nl> + size_t readRows ( size_t from_mark , bool continue_reading , size_t max_rows_to_read , Columns & res_columns ) override ; <nl> + <nl> + bool canReadIncompleteGranules ( ) const override { return true ; } <nl> + <nl> + private : <nl> + using FileStreams = std : : map < std : : string , std : : unique_ptr < MergeTreeReaderStream > > ; <nl> + <nl> + FileStreams streams ; <nl> + <nl> + void addStreams ( const String & name , const IDataType & type , <nl> + const ReadBufferFromFileBase : : ProfileCallback & profile_callback , clockid_t clock_type ) ; <nl> + <nl> + void readData ( <nl> + const String & name , const IDataType & type , IColumn & column , <nl> + size_t from_mark , bool continue_reading , size_t max_rows_to_read , <nl> + bool read_offsets = true ) ; <nl> + } ; <nl> + <nl> + } <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeReverseSelectProcessor . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeReverseSelectProcessor . cpp <nl> <nl> # include < Storages / MergeTree / MergeTreeReverseSelectProcessor . h > <nl> # include < Storages / MergeTree / MergeTreeBaseSelectProcessor . h > <nl> - # include < Storages / MergeTree / MergeTreeReader . h > <nl> + # include < Storages / MergeTree / IMergeTreeReader . h > <nl> <nl> <nl> namespace DB <nl> static Block replaceTypes ( Block & & header , const MergeTreeData : : DataPartPtr & da <nl> { <nl> / / / Types may be different during ALTER ( when this stream is used to perform an ALTER ) . <nl> / / / NOTE : We may use similar code to implement non blocking ALTERs . <nl> - for ( const auto & name_type : data_part - > columns ) <nl> + for ( const auto & name_type : data_part - > getColumns ( ) ) <nl> { <nl> if ( header . has ( name_type . name ) ) <nl> { <nl> MergeTreeReverseSelectProcessor : : MergeTreeReverseSelectProcessor ( <nl> bool use_uncompressed_cache_ , <nl> const PrewhereInfoPtr & prewhere_info_ , <nl> bool check_columns , <nl> - size_t min_bytes_to_use_direct_io_ , <nl> - size_t min_bytes_to_use_mmap_io_ , <nl> - size_t max_read_buffer_size_ , <nl> - bool save_marks_in_cache_ , <nl> + const MergeTreeReaderSettings & reader_settings_ , <nl> const Names & virt_column_names_ , <nl> size_t part_index_in_query_ , <nl> bool quiet ) <nl> MergeTreeReverseSelectProcessor : : MergeTreeReverseSelectProcessor ( <nl> MergeTreeBaseSelectProcessor { <nl> replaceTypes ( storage_ . getSampleBlockForColumns ( required_columns_ ) , owned_data_part_ ) , <nl> storage_ , prewhere_info_ , max_block_size_rows_ , <nl> - preferred_block_size_bytes_ , preferred_max_column_in_block_size_bytes_ , min_bytes_to_use_direct_io_ , min_bytes_to_use_mmap_io_ , <nl> - max_read_buffer_size_ , use_uncompressed_cache_ , save_marks_in_cache_ , virt_column_names_ } , <nl> + preferred_block_size_bytes_ , preferred_max_column_in_block_size_bytes_ , <nl> + reader_settings_ , use_uncompressed_cache_ , virt_column_names_ } , <nl> required_columns { std : : move ( required_columns_ ) } , <nl> data_part { owned_data_part_ } , <nl> part_columns_lock ( data_part - > columns_lock ) , <nl> MergeTreeReverseSelectProcessor : : MergeTreeReverseSelectProcessor ( <nl> <nl> owned_mark_cache = storage . global_context . getMarkCache ( ) ; <nl> <nl> - reader = std : : make_unique < MergeTreeReader > ( <nl> - path , data_part , task_columns . columns , owned_uncompressed_cache . get ( ) , <nl> - owned_mark_cache . get ( ) , save_marks_in_cache , storage , <nl> - all_mark_ranges , min_bytes_to_use_direct_io , min_bytes_to_use_mmap_io , max_read_buffer_size ) ; <nl> + reader = data_part - > getReader ( task_columns . columns , all_mark_ranges , <nl> + owned_uncompressed_cache . get ( ) , owned_mark_cache . get ( ) , reader_settings ) ; <nl> <nl> if ( prewhere_info ) <nl> - pre_reader = std : : make_unique < MergeTreeReader > ( <nl> - path , data_part , task_columns . pre_columns , owned_uncompressed_cache . get ( ) , <nl> - owned_mark_cache . get ( ) , save_marks_in_cache , storage , <nl> - all_mark_ranges , min_bytes_to_use_direct_io , min_bytes_to_use_mmap_io , max_read_buffer_size ) ; <nl> + pre_reader = data_part - > getReader ( task_columns . pre_columns , all_mark_ranges , <nl> + owned_uncompressed_cache . get ( ) , owned_mark_cache . get ( ) , reader_settings ) ; <nl> } <nl> <nl> bool MergeTreeReverseSelectProcessor : : getNewTask ( ) <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeReverseSelectProcessor . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeReverseSelectProcessor . h <nl> class MergeTreeReverseSelectProcessor : public MergeTreeBaseSelectProcessor <nl> bool use_uncompressed_cache , <nl> const PrewhereInfoPtr & prewhere_info , <nl> bool check_columns , <nl> - size_t min_bytes_to_use_direct_io , <nl> - size_t min_bytes_to_use_mmap_io , <nl> - size_t max_read_buffer_size , <nl> - bool save_marks_in_cache , <nl> + const MergeTreeReaderSettings & reader_settings , <nl> const Names & virt_column_names = { } , <nl> size_t part_index_in_query = 0 , <nl> bool quiet = false ) ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeSelectProcessor . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeSelectProcessor . cpp <nl> <nl> # include < Storages / MergeTree / MergeTreeSelectProcessor . h > <nl> # include < Storages / MergeTree / MergeTreeBaseSelectProcessor . h > <nl> - # include < Storages / MergeTree / MergeTreeReader . h > <nl> + # include < Storages / MergeTree / IMergeTreeReader . h > <nl> <nl> <nl> namespace DB <nl> static Block replaceTypes ( Block & & header , const MergeTreeData : : DataPartPtr & da <nl> { <nl> / / / Types may be different during ALTER ( when this stream is used to perform an ALTER ) . <nl> / / / NOTE : We may use similar code to implement non blocking ALTERs . <nl> - for ( const auto & name_type : data_part - > columns ) <nl> + for ( const auto & name_type : data_part - > getColumns ( ) ) <nl> { <nl> if ( header . has ( name_type . name ) ) <nl> { <nl> MergeTreeSelectProcessor : : MergeTreeSelectProcessor ( <nl> bool use_uncompressed_cache_ , <nl> const PrewhereInfoPtr & prewhere_info_ , <nl> bool check_columns_ , <nl> - size_t min_bytes_to_use_direct_io_ , <nl> - size_t min_bytes_to_use_mmap_io_ , <nl> - size_t max_read_buffer_size_ , <nl> - bool save_marks_in_cache_ , <nl> + const MergeTreeReaderSettings & reader_settings_ , <nl> const Names & virt_column_names_ , <nl> size_t part_index_in_query_ , <nl> bool quiet ) <nl> MergeTreeSelectProcessor : : MergeTreeSelectProcessor ( <nl> MergeTreeBaseSelectProcessor { <nl> replaceTypes ( storage_ . getSampleBlockForColumns ( required_columns_ ) , owned_data_part_ ) , <nl> storage_ , prewhere_info_ , max_block_size_rows_ , <nl> - preferred_block_size_bytes_ , preferred_max_column_in_block_size_bytes_ , min_bytes_to_use_direct_io_ , min_bytes_to_use_mmap_io_ , <nl> - max_read_buffer_size_ , use_uncompressed_cache_ , save_marks_in_cache_ , virt_column_names_ } , <nl> + preferred_block_size_bytes_ , preferred_max_column_in_block_size_bytes_ , <nl> + reader_settings_ , use_uncompressed_cache_ , virt_column_names_ } , <nl> required_columns { std : : move ( required_columns_ ) } , <nl> data_part { owned_data_part_ } , <nl> part_columns_lock ( data_part - > columns_lock ) , <nl> try <nl> <nl> owned_mark_cache = storage . global_context . getMarkCache ( ) ; <nl> <nl> - reader = std : : make_unique < MergeTreeReader > ( <nl> - path , data_part , task_columns . columns , owned_uncompressed_cache . get ( ) , <nl> - owned_mark_cache . get ( ) , save_marks_in_cache , storage , <nl> - all_mark_ranges , min_bytes_to_use_direct_io , min_bytes_to_use_mmap_io , max_read_buffer_size ) ; <nl> + reader = data_part - > getReader ( task_columns . columns , all_mark_ranges , <nl> + owned_uncompressed_cache . get ( ) , owned_mark_cache . get ( ) , reader_settings ) ; <nl> <nl> if ( prewhere_info ) <nl> - pre_reader = std : : make_unique < MergeTreeReader > ( <nl> - path , data_part , task_columns . pre_columns , owned_uncompressed_cache . get ( ) , <nl> - owned_mark_cache . get ( ) , save_marks_in_cache , storage , <nl> - all_mark_ranges , min_bytes_to_use_direct_io , min_bytes_to_use_mmap_io , max_read_buffer_size ) ; <nl> + pre_reader = data_part - > getReader ( task_columns . pre_columns , all_mark_ranges , <nl> + owned_uncompressed_cache . get ( ) , owned_mark_cache . get ( ) , reader_settings ) ; <nl> } <nl> <nl> return true ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeSelectProcessor . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeSelectProcessor . h <nl> class MergeTreeSelectProcessor : public MergeTreeBaseSelectProcessor <nl> bool use_uncompressed_cache , <nl> const PrewhereInfoPtr & prewhere_info , <nl> bool check_columns , <nl> - size_t min_bytes_to_use_direct_io , <nl> - size_t min_bytes_to_use_mmap_io , <nl> - size_t max_read_buffer_size , <nl> - bool save_marks_in_cache , <nl> + const MergeTreeReaderSettings & reader_settings , <nl> const Names & virt_column_names = { } , <nl> size_t part_index_in_query = 0 , <nl> bool quiet = false ) ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeSequentialBlockInputStream . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeSequentialBlockInputStream . cpp <nl> MergeTreeSequentialBlockInputStream : : MergeTreeSequentialBlockInputStream ( <nl> else <nl> { <nl> / / / take columns from data_part <nl> - columns_for_reader = data_part - > columns . addTypes ( columns_to_read ) ; <nl> + columns_for_reader = data_part - > getColumns ( ) . addTypes ( columns_to_read ) ; <nl> } <nl> <nl> - reader = std : : make_unique < MergeTreeReader > ( <nl> - data_part - > getFullPath ( ) , data_part , columns_for_reader , / * uncompressed_cache = * / nullptr , <nl> - mark_cache . get ( ) , / * save_marks_in_cache = * / false , storage , <nl> + MergeTreeReaderSettings reader_settings = <nl> + { <nl> + / / / bytes to use AIO ( this is hack ) <nl> + . min_bytes_to_use_direct_io = read_with_direct_io ? 1UL : std : : numeric_limits < size_t > : : max ( ) , <nl> + . max_read_buffer_size = DBMS_DEFAULT_BUFFER_SIZE , <nl> + . save_marks_in_cache = false <nl> + } ; <nl> + <nl> + reader = data_part - > getReader ( columns_for_reader , <nl> MarkRanges { MarkRange ( 0 , data_part - > getMarksCount ( ) ) } , <nl> - / * bytes to use AIO ( this is hack ) * / <nl> - read_with_direct_io ? 1UL : std : : numeric_limits < size_t > : : max ( ) , <nl> - 0 , DBMS_DEFAULT_BUFFER_SIZE ) ; <nl> + / * uncompressed_cache = * / nullptr , mark_cache . get ( ) , reader_settings ) ; <nl> } <nl> <nl> <nl> void MergeTreeSequentialBlockInputStream : : fixHeader ( Block & header_block ) const <nl> { <nl> / / / Types may be different during ALTER ( when this stream is used to perform an ALTER ) . <nl> - for ( const auto & name_type : data_part - > columns ) <nl> + for ( const auto & name_type : data_part - > getColumns ( ) ) <nl> { <nl> if ( header_block . has ( name_type . name ) ) <nl> { <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeSequentialBlockInputStream . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeSequentialBlockInputStream . h <nl> <nl> # pragma once <nl> # include < DataStreams / IBlockInputStream . h > <nl> # include < Storages / MergeTree / MergeTreeData . h > <nl> - # include < Storages / MergeTree / MergeTreeReader . h > <nl> + # include < Storages / MergeTree / IMergeTreeReader . h > <nl> # include < Storages / MergeTree / MarkRange . h > <nl> # include < memory > <nl> <nl> class MergeTreeSequentialBlockInputStream : public IBlockInputStream <nl> Logger * log = & Logger : : get ( " MergeTreeSequentialBlockInputStream " ) ; <nl> <nl> std : : shared_ptr < MarkCache > mark_cache ; <nl> - using MergeTreeReaderPtr = std : : unique_ptr < MergeTreeReader > ; <nl> + using MergeTreeReaderPtr = std : : unique_ptr < IMergeTreeReader > ; <nl> MergeTreeReaderPtr reader ; <nl> <nl> / / / current mark at which we stop reading <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeSettings . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeSettings . h <nl> struct MergeTreeSettings : public SettingsCollection < MergeTreeSettings > <nl> # define LIST_OF_MERGE_TREE_SETTINGS ( M ) \ <nl> M ( SettingUInt64 , index_granularity , 8192 , " How many rows correspond to one primary key value . " , 0 ) \ <nl> \ <nl> + / * * Data storing format settigns . * / \ <nl> + M ( SettingUInt64 , min_bytes_for_wide_part , 0 , " Minimal uncompressed size in bytes to create part in wide format instead of compact " , 0 ) \ <nl> + M ( SettingUInt64 , min_rows_for_wide_part , 0 , " Minimal number of rows to create part in wide format instead of compact " , 0 ) \ <nl> + \ <nl> / * * Merge settings . * / \ <nl> M ( SettingUInt64 , merge_max_block_size , DEFAULT_MERGE_BLOCK_SIZE , " How many rows in blocks should be formed for merge operations . " , 0 ) \ <nl> M ( SettingUInt64 , max_bytes_to_merge_at_max_space_in_pool , 150ULL * 1024 * 1024 * 1024 , " Maximum in total size of parts to merge , when there are maximum free threads in background pool ( or entries in replication queue ) . " , 0 ) \ <nl> struct MergeTreeSettings : public SettingsCollection < MergeTreeSettings > <nl> { <nl> return name = = " index_granularity " | | name = = " index_granularity_bytes " ; <nl> } <nl> + <nl> + static bool isPartFormatSetting ( const String & name ) <nl> + { <nl> + return name = = " min_bytes_for_wide_part " | | name = = " min_rows_for_wide_part " ; <nl> + } <nl> } ; <nl> <nl> using MergeTreeSettingsPtr = std : : shared_ptr < const MergeTreeSettings > ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeThreadSelectBlockInputProcessor . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeThreadSelectBlockInputProcessor . cpp <nl> <nl> - # include < Storages / MergeTree / MergeTreeReader . h > <nl> + # include < Storages / MergeTree / IMergeTreeReader . h > <nl> # include < Storages / MergeTree / MergeTreeReadPool . h > <nl> # include < Storages / MergeTree / MergeTreeThreadSelectBlockInputProcessor . h > <nl> <nl> MergeTreeThreadSelectBlockInputProcessor : : MergeTreeThreadSelectBlockInputProcess <nl> const MergeTreeData & storage_ , <nl> const bool use_uncompressed_cache_ , <nl> const PrewhereInfoPtr & prewhere_info_ , <nl> - const Settings & settings , <nl> + const MergeTreeReaderSettings & reader_settings_ , <nl> const Names & virt_column_names_ ) <nl> : <nl> - MergeTreeBaseSelectProcessor { <nl> - pool_ - > getHeader ( ) , storage_ , prewhere_info_ , max_block_size_rows_ , <nl> + MergeTreeBaseSelectProcessor { pool_ - > getHeader ( ) , storage_ , prewhere_info_ , max_block_size_rows_ , <nl> preferred_block_size_bytes_ , preferred_max_column_in_block_size_bytes_ , <nl> - settings . min_bytes_to_use_direct_io , settings . min_bytes_to_use_mmap_io , settings . max_read_buffer_size , <nl> - use_uncompressed_cache_ , true , virt_column_names_ } , <nl> + reader_settings_ , use_uncompressed_cache_ , virt_column_names_ } , <nl> thread { thread_ } , <nl> pool { pool_ } <nl> { <nl> bool MergeTreeThreadSelectBlockInputProcessor : : getNewTask ( ) <nl> owned_uncompressed_cache = storage . global_context . getUncompressedCache ( ) ; <nl> owned_mark_cache = storage . global_context . getMarkCache ( ) ; <nl> <nl> - reader = std : : make_unique < MergeTreeReader > ( <nl> - path , task - > data_part , task - > columns , owned_uncompressed_cache . get ( ) , owned_mark_cache . get ( ) , save_marks_in_cache , <nl> - storage , rest_mark_ranges , min_bytes_to_use_direct_io , min_bytes_to_use_mmap_io , max_read_buffer_size , MergeTreeReader : : ValueSizeMap { } , profile_callback ) ; <nl> + reader = task - > data_part - > getReader ( task - > columns , rest_mark_ranges , <nl> + owned_uncompressed_cache . get ( ) , owned_mark_cache . get ( ) , reader_settings , <nl> + IMergeTreeReader : : ValueSizeMap { } , profile_callback ) ; <nl> <nl> if ( prewhere_info ) <nl> - pre_reader = std : : make_unique < MergeTreeReader > ( <nl> - path , task - > data_part , task - > pre_columns , owned_uncompressed_cache . get ( ) , owned_mark_cache . get ( ) , save_marks_in_cache , <nl> - storage , rest_mark_ranges , min_bytes_to_use_direct_io , min_bytes_to_use_mmap_io , <nl> - max_read_buffer_size , MergeTreeReader : : ValueSizeMap { } , profile_callback ) ; <nl> + pre_reader = task - > data_part - > getReader ( task - > pre_columns , rest_mark_ranges , <nl> + owned_uncompressed_cache . get ( ) , owned_mark_cache . get ( ) , reader_settings , <nl> + IMergeTreeReader : : ValueSizeMap { } , profile_callback ) ; <nl> } <nl> else <nl> { <nl> bool MergeTreeThreadSelectBlockInputProcessor : : getNewTask ( ) <nl> { <nl> auto rest_mark_ranges = pool - > getRestMarks ( * task - > data_part , task - > mark_ranges [ 0 ] ) ; <nl> / / / retain avg_value_size_hints <nl> - reader = std : : make_unique < MergeTreeReader > ( <nl> - path , task - > data_part , task - > columns , owned_uncompressed_cache . get ( ) , owned_mark_cache . get ( ) , save_marks_in_cache , <nl> - storage , rest_mark_ranges , min_bytes_to_use_direct_io , min_bytes_to_use_mmap_io , max_read_buffer_size , <nl> + reader = task - > data_part - > getReader ( task - > columns , rest_mark_ranges , <nl> + owned_uncompressed_cache . get ( ) , owned_mark_cache . get ( ) , reader_settings , <nl> reader - > getAvgValueSizeHints ( ) , profile_callback ) ; <nl> <nl> if ( prewhere_info ) <nl> - pre_reader = std : : make_unique < MergeTreeReader > ( <nl> - path , task - > data_part , task - > pre_columns , owned_uncompressed_cache . get ( ) , owned_mark_cache . get ( ) , save_marks_in_cache , <nl> - storage , rest_mark_ranges , min_bytes_to_use_direct_io , min_bytes_to_use_mmap_io , <nl> - max_read_buffer_size , pre_reader - > getAvgValueSizeHints ( ) , profile_callback ) ; <nl> + pre_reader = task - > data_part - > getReader ( task - > pre_columns , rest_mark_ranges , <nl> + owned_uncompressed_cache . get ( ) , owned_mark_cache . get ( ) , reader_settings , <nl> + reader - > getAvgValueSizeHints ( ) , profile_callback ) ; <nl> } <nl> } <nl> + <nl> last_readed_part_path = path ; <nl> <nl> return true ; <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeThreadSelectBlockInputProcessor . h <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeThreadSelectBlockInputProcessor . h <nl> class MergeTreeThreadSelectBlockInputProcessor : public MergeTreeBaseSelectProce <nl> const MergeTreeData & storage_ , <nl> const bool use_uncompressed_cache_ , <nl> const PrewhereInfoPtr & prewhere_info_ , <nl> - const Settings & settings_ , <nl> + const MergeTreeReaderSettings & reader_settings_ , <nl> const Names & virt_column_names_ ) ; <nl> <nl> String getName ( ) const override { return " MergeTreeThread " ; } <nl> mmm a / dbms / src / Storages / MergeTree / MergeTreeWhereOptimizer . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeWhereOptimizer . cpp <nl> void MergeTreeWhereOptimizer : : analyzeImpl ( Conditions & res , const ASTPtr & node ) <nl> <nl> collectIdentifiersNoSubqueries ( node , cond . identifiers ) ; <nl> <nl> + cond . columns_size = getIdentifiersColumnSize ( cond . identifiers ) ; <nl> + <nl> cond . viable = <nl> / / / Condition depend on some column . Constant expressions are not moved . <nl> ! cond . identifiers . empty ( ) <nl> void MergeTreeWhereOptimizer : : analyzeImpl ( Conditions & res , const ASTPtr & node ) <nl> / / / Only table columns are considered . Not array joined columns . NOTE We ' re assuming that aliases was expanded . <nl> & & isSubsetOfTableColumns ( cond . identifiers ) <nl> / / / Do not move conditions involving all queried columns . <nl> - & & cond . identifiers . size ( ) < queried_columns . size ( ) ; <nl> + & & cond . identifiers . size ( ) < queried_columns . size ( ) <nl> + / / / Columns size of compact parts can ' t be counted . If all parts are compact do not move any condition . <nl> + & & cond . columns_size > 0 ; <nl> <nl> if ( cond . viable ) <nl> - { <nl> - cond . columns_size = getIdentifiersColumnSize ( cond . identifiers ) ; <nl> cond . good = isConditionGood ( node ) ; <nl> - } <nl> <nl> res . emplace_back ( std : : move ( cond ) ) ; <nl> } <nl> mmm a / dbms / src / Storages / MergeTree / MergedBlockOutputStream . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergedBlockOutputStream . cpp <nl> <nl> # include < Storages / MergeTree / MergedBlockOutputStream . h > <nl> - # include < Storages / MergeTree / MergeTreeIndexGranularityInfo . h > <nl> - # include < IO / createWriteBufferFromFileBase . h > <nl> - # include < Common / escapeForFileName . h > <nl> - # include < DataTypes / NestedUtils . h > <nl> - # include < DataStreams / MarkInCompressedFile . h > <nl> - # include < Common / StringUtils / StringUtils . h > <nl> - # include < Common / typeid_cast . h > <nl> - # include < Common / MemoryTracker . h > <nl> # include < Poco / File . h > <nl> <nl> <nl> namespace ErrorCodes <nl> <nl> <nl> MergedBlockOutputStream : : MergedBlockOutputStream ( <nl> - MergeTreeData & storage_ , <nl> - const String & part_path_ , <nl> + const MergeTreeDataPartPtr & data_part , <nl> const NamesAndTypesList & columns_list_ , <nl> - CompressionCodecPtr default_codec_ , <nl> - bool blocks_are_granules_size_ ) <nl> - : IMergedBlockOutputStream ( <nl> - storage_ , part_path_ , storage_ . global_context . getSettings ( ) . min_compress_block_size , <nl> - storage_ . global_context . getSettings ( ) . max_compress_block_size , default_codec_ , <nl> - storage_ . global_context . getSettings ( ) . min_bytes_to_use_direct_io , <nl> - blocks_are_granules_size_ , <nl> - std : : vector < MergeTreeIndexPtr > ( std : : begin ( storage_ . skip_indices ) , std : : end ( storage_ . skip_indices ) ) , <nl> - { } ) <nl> - , columns_list ( columns_list_ ) <nl> + CompressionCodecPtr default_codec , <nl> + bool blocks_are_granules_size ) <nl> + : MergedBlockOutputStream ( <nl> + data_part , columns_list_ , default_codec , { } , <nl> + data_part - > storage . global_context . getSettings ( ) . min_bytes_to_use_direct_io , <nl> + blocks_are_granules_size ) <nl> { <nl> - init ( ) ; <nl> - for ( const auto & it : columns_list ) <nl> - { <nl> - const auto columns = storage . getColumns ( ) ; <nl> - addStreams ( part_path , it . name , * it . type , columns . getCodecOrDefault ( it . name , default_codec_ ) , 0 , false ) ; <nl> - } <nl> } <nl> <nl> MergedBlockOutputStream : : MergedBlockOutputStream ( <nl> - MergeTreeData & storage_ , <nl> - const String & part_path_ , <nl> + const MergeTreeDataPartPtr & data_part , <nl> const NamesAndTypesList & columns_list_ , <nl> - CompressionCodecPtr default_codec_ , <nl> - const MergeTreeData : : DataPart : : ColumnToSize & merged_column_to_size_ , <nl> - size_t aio_threshold_ , <nl> - bool blocks_are_granules_size_ ) <nl> - : IMergedBlockOutputStream ( <nl> - storage_ , part_path_ , storage_ . global_context . getSettings ( ) . min_compress_block_size , <nl> - storage_ . global_context . getSettings ( ) . max_compress_block_size , default_codec_ , <nl> - aio_threshold_ , blocks_are_granules_size_ , <nl> - std : : vector < MergeTreeIndexPtr > ( std : : begin ( storage_ . skip_indices ) , std : : end ( storage_ . skip_indices ) ) , { } ) <nl> + CompressionCodecPtr default_codec , <nl> + const MergeTreeData : : DataPart : : ColumnToSize & merged_column_to_size , <nl> + size_t aio_threshold , <nl> + bool blocks_are_granules_size ) <nl> + : IMergedBlockOutputStream ( data_part ) <nl> , columns_list ( columns_list_ ) <nl> { <nl> - init ( ) ; <nl> + MergeTreeWriterSettings writer_settings ( data_part - > storage . global_context . getSettings ( ) , <nl> + data_part - > storage . canUseAdaptiveGranularity ( ) , aio_threshold , blocks_are_granules_size ) ; <nl> <nl> - / / / If summary size is more than threshold than we will use AIO <nl> - size_t total_size = 0 ; <nl> - if ( aio_threshold > 0 ) <nl> + if ( aio_threshold > 0 & & ! merged_column_to_size . empty ( ) ) <nl> { <nl> - for ( const auto & it : columns_list ) <nl> + for ( const auto & column : columns_list ) <nl> { <nl> - auto it2 = merged_column_to_size_ . find ( it . name ) ; <nl> - if ( it2 ! = merged_column_to_size_ . end ( ) ) <nl> - total_size + = it2 - > second ; <nl> + auto size_it = merged_column_to_size . find ( column . name ) ; <nl> + if ( size_it ! = merged_column_to_size . end ( ) ) <nl> + writer_settings . estimated_size + = size_it - > second ; <nl> } <nl> } <nl> <nl> - for ( const auto & it : columns_list ) <nl> - { <nl> - const auto columns = storage . getColumns ( ) ; <nl> - addStreams ( part_path , it . name , * it . type , columns . getCodecOrDefault ( it . name , default_codec_ ) , total_size , false ) ; <nl> - } <nl> + Poco : : File ( part_path ) . createDirectories ( ) ; <nl> + <nl> + writer = data_part - > getWriter ( columns_list , data_part - > storage . getSkipIndices ( ) , default_codec , writer_settings ) ; <nl> + writer - > initPrimaryIndex ( ) ; <nl> + writer - > initSkipIndices ( ) ; <nl> } <nl> <nl> std : : string MergedBlockOutputStream : : getPartPath ( ) const <nl> void MergedBlockOutputStream : : writeSuffixAndFinalizePart ( <nl> const NamesAndTypesList * total_column_list , <nl> MergeTreeData : : DataPart : : Checksums * additional_column_checksums ) <nl> { <nl> - / / / Finish columns serialization . <nl> - { <nl> - auto & settings = storage . global_context . getSettingsRef ( ) ; <nl> - IDataType : : SerializeBinaryBulkSettings serialize_settings ; <nl> - serialize_settings . low_cardinality_max_dictionary_size = settings . low_cardinality_max_dictionary_size ; <nl> - serialize_settings . low_cardinality_use_single_dictionary_for_part = settings . low_cardinality_use_single_dictionary_for_part ! = 0 ; <nl> - WrittenOffsetColumns offset_columns ; <nl> - auto it = columns_list . begin ( ) ; <nl> - for ( size_t i = 0 ; i < columns_list . size ( ) ; + + i , + + it ) <nl> - { <nl> - if ( ! serialization_states . empty ( ) ) <nl> - { <nl> - serialize_settings . getter = createStreamGetter ( it - > name , offset_columns , false ) ; <nl> - it - > type - > serializeBinaryBulkStateSuffix ( serialize_settings , serialization_states [ i ] ) ; <nl> - } <nl> - <nl> - if ( with_final_mark & & rows_count ! = 0 ) <nl> - writeFinalMark ( it - > name , it - > type , offset_columns , false , serialize_settings . path ) ; <nl> - } <nl> - } <nl> - <nl> - if ( with_final_mark & & rows_count ! = 0 ) <nl> - index_granularity . appendMark ( 0 ) ; / / / last mark <nl> - <nl> - if ( ! total_column_list ) <nl> - total_column_list = & columns_list ; <nl> - <nl> / / / Finish write and get checksums . <nl> MergeTreeData : : DataPart : : Checksums checksums ; <nl> <nl> if ( additional_column_checksums ) <nl> checksums = std : : move ( * additional_column_checksums ) ; <nl> <nl> - if ( index_stream ) <nl> - { <nl> - if ( with_final_mark & & rows_count ! = 0 ) <nl> - { <nl> - for ( size_t j = 0 ; j < index_columns . size ( ) ; + + j ) <nl> - { <nl> - auto & column = * last_index_row [ j ] . column ; <nl> - index_columns [ j ] - > insertFrom ( column , 0 ) ; / / / it has only one element <nl> - last_index_row [ j ] . type - > serializeBinary ( column , 0 , * index_stream ) ; <nl> - } <nl> - last_index_row . clear ( ) ; <nl> - } <nl> - <nl> - index_stream - > next ( ) ; <nl> - checksums . files [ " primary . idx " ] . file_size = index_stream - > count ( ) ; <nl> - checksums . files [ " primary . idx " ] . file_hash = index_stream - > getHash ( ) ; <nl> - index_stream = nullptr ; <nl> - } <nl> - <nl> - for ( ColumnStreams : : iterator it = column_streams . begin ( ) ; it ! = column_streams . end ( ) ; + + it ) <nl> - { <nl> - it - > second - > finalize ( ) ; <nl> - it - > second - > addToChecksums ( checksums ) ; <nl> - } <nl> - <nl> - finishSkipIndicesSerialization ( checksums ) ; <nl> + / / / Finish columns serialization . <nl> + writer - > finishDataSerialization ( checksums ) ; <nl> + writer - > finishPrimaryIndexSerialization ( checksums ) ; <nl> + writer - > finishSkipIndicesSerialization ( checksums ) ; <nl> <nl> - column_streams . clear ( ) ; <nl> + if ( ! total_column_list ) <nl> + total_column_list = & columns_list ; <nl> <nl> - if ( storage . format_version > = MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING ) <nl> + if ( storage . format_version > = MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING | | isCompactPart ( new_part ) ) <nl> { <nl> new_part - > partition . store ( storage , part_path , checksums ) ; <nl> if ( new_part - > minmax_idx . initialized ) <nl> void MergedBlockOutputStream : : writeSuffixAndFinalizePart ( <nl> <nl> new_part - > rows_count = rows_count ; <nl> new_part - > modification_time = time ( nullptr ) ; <nl> - new_part - > columns = * total_column_list ; <nl> - new_part - > index . assign ( std : : make_move_iterator ( index_columns . begin ( ) ) , std : : make_move_iterator ( index_columns . end ( ) ) ) ; <nl> + new_part - > index = writer - > releaseIndexColumns ( ) ; <nl> new_part - > checksums = checksums ; <nl> new_part - > bytes_on_disk = checksums . getTotalSizeOnDisk ( ) ; <nl> - new_part - > index_granularity = index_granularity ; <nl> - } <nl> - <nl> - void MergedBlockOutputStream : : init ( ) <nl> - { <nl> - Poco : : File ( part_path ) . createDirectories ( ) ; <nl> - <nl> - if ( storage . hasPrimaryKey ( ) ) <nl> - { <nl> - index_file_stream = std : : make_unique < WriteBufferFromFile > ( <nl> - part_path + " primary . idx " , DBMS_DEFAULT_BUFFER_SIZE , O_TRUNC | O_CREAT | O_WRONLY ) ; <nl> - index_stream = std : : make_unique < HashingWriteBuffer > ( * index_file_stream ) ; <nl> - } <nl> - <nl> - initSkipIndices ( ) ; <nl> + new_part - > index_granularity = writer - > getIndexGranularity ( ) ; <nl> } <nl> <nl> - <nl> void MergedBlockOutputStream : : writeImpl ( const Block & block , const IColumn : : Permutation * permutation ) <nl> { <nl> block . checkNumberOfRows ( ) ; <nl> void MergedBlockOutputStream : : writeImpl ( const Block & block , const IColumn : : Perm <nl> if ( ! rows ) <nl> return ; <nl> <nl> - / / / Fill index granularity for this block <nl> - / / / if it ' s unknown ( in case of insert data or horizontal merge , <nl> - / / / but not in case of vertical merge ) <nl> - if ( compute_granularity ) <nl> - fillIndexGranularity ( block ) ; <nl> - <nl> - / / / The set of written offset columns so that you do not write shared offsets of nested structures columns several times <nl> - WrittenOffsetColumns offset_columns ; <nl> - <nl> - auto primary_key_column_names = storage . primary_key_columns ; <nl> - std : : set < String > skip_indexes_column_names_set ; <nl> + std : : unordered_set < String > skip_indexes_column_names_set ; <nl> for ( const auto & index : storage . skip_indices ) <nl> std : : copy ( index - > columns . cbegin ( ) , index - > columns . cend ( ) , <nl> std : : inserter ( skip_indexes_column_names_set , skip_indexes_column_names_set . end ( ) ) ) ; <nl> Names skip_indexes_column_names ( skip_indexes_column_names_set . begin ( ) , skip_indexes_column_names_set . end ( ) ) ; <nl> <nl> - / / / Here we will add the columns related to the Primary Key , then write the index . <nl> - std : : vector < ColumnWithTypeAndName > primary_key_columns ( primary_key_column_names . size ( ) ) ; <nl> - std : : map < String , size_t > primary_key_column_name_to_position ; <nl> + Block primary_key_block = getBlockAndPermute ( block , storage . primary_key_columns , permutation ) ; <nl> + Block skip_indexes_block = getBlockAndPermute ( block , skip_indexes_column_names , permutation ) ; <nl> <nl> - for ( size_t i = 0 , size = primary_key_column_names . size ( ) ; i < size ; + + i ) <nl> - { <nl> - const auto & name = primary_key_column_names [ i ] ; <nl> - <nl> - if ( ! primary_key_column_name_to_position . emplace ( name , i ) . second ) <nl> - throw Exception ( " Primary key contains duplicate columns " , ErrorCodes : : BAD_ARGUMENTS ) ; <nl> - <nl> - primary_key_columns [ i ] = block . getByName ( name ) ; <nl> - <nl> - / / / Reorder primary key columns in advance and add them to ` primary_key_columns ` . <nl> - if ( permutation ) <nl> - primary_key_columns [ i ] . column = primary_key_columns [ i ] . column - > permute ( * permutation , 0 ) ; <nl> - } <nl> - <nl> - / / / The same for skip indexes columns <nl> - std : : vector < ColumnWithTypeAndName > skip_indexes_columns ( skip_indexes_column_names . size ( ) ) ; <nl> - std : : map < String , size_t > skip_indexes_column_name_to_position ; <nl> - <nl> - for ( size_t i = 0 , size = skip_indexes_column_names . size ( ) ; i < size ; + + i ) <nl> - { <nl> - const auto & name = skip_indexes_column_names [ i ] ; <nl> - skip_indexes_column_name_to_position . emplace ( name , i ) ; <nl> - skip_indexes_columns [ i ] = block . getByName ( name ) ; <nl> - <nl> - / / / Reorder index columns in advance . <nl> - if ( permutation ) <nl> - skip_indexes_columns [ i ] . column = skip_indexes_columns [ i ] . column - > permute ( * permutation , 0 ) ; <nl> - } <nl> - <nl> - if ( index_columns . empty ( ) ) <nl> - { <nl> - index_columns . resize ( primary_key_column_names . size ( ) ) ; <nl> - last_index_row . resize ( primary_key_column_names . size ( ) ) ; <nl> - for ( size_t i = 0 , size = primary_key_column_names . size ( ) ; i < size ; + + i ) <nl> - { <nl> - index_columns [ i ] = primary_key_columns [ i ] . column - > cloneEmpty ( ) ; <nl> - last_index_row [ i ] = primary_key_columns [ i ] . cloneEmpty ( ) ; <nl> - } <nl> - } <nl> - <nl> - if ( serialization_states . empty ( ) ) <nl> - { <nl> - serialization_states . reserve ( columns_list . size ( ) ) ; <nl> - WrittenOffsetColumns tmp_offset_columns ; <nl> - IDataType : : SerializeBinaryBulkSettings settings ; <nl> - <nl> - for ( const auto & col : columns_list ) <nl> - { <nl> - settings . getter = createStreamGetter ( col . name , tmp_offset_columns , false ) ; <nl> - serialization_states . emplace_back ( nullptr ) ; <nl> - col . type - > serializeBinaryBulkStatePrefix ( settings , serialization_states . back ( ) ) ; <nl> - } <nl> - } <nl> - <nl> - size_t new_index_offset = 0 ; <nl> - / / / Now write the data . <nl> - auto it = columns_list . begin ( ) ; <nl> - for ( size_t i = 0 ; i < columns_list . size ( ) ; + + i , + + it ) <nl> - { <nl> - const ColumnWithTypeAndName & column = block . getByName ( it - > name ) ; <nl> - <nl> - if ( permutation ) <nl> - { <nl> - auto primary_column_it = primary_key_column_name_to_position . find ( it - > name ) ; <nl> - auto skip_index_column_it = skip_indexes_column_name_to_position . find ( it - > name ) ; <nl> - if ( primary_key_column_name_to_position . end ( ) ! = primary_column_it ) <nl> - { <nl> - const auto & primary_column = * primary_key_columns [ primary_column_it - > second ] . column ; <nl> - std : : tie ( std : : ignore , new_index_offset ) = writeColumn ( column . name , * column . type , primary_column , offset_columns , false , serialization_states [ i ] , current_mark ) ; <nl> - } <nl> - else if ( skip_indexes_column_name_to_position . end ( ) ! = skip_index_column_it ) <nl> - { <nl> - const auto & index_column = * skip_indexes_columns [ skip_index_column_it - > second ] . column ; <nl> - std : : tie ( std : : ignore , new_index_offset ) = writeColumn ( column . name , * column . type , index_column , offset_columns , false , serialization_states [ i ] , current_mark ) ; <nl> - } <nl> - else <nl> - { <nl> - / / / We rearrange the columns that are not included in the primary key here ; Then the result is released - to save RAM . <nl> - ColumnPtr permuted_column = column . column - > permute ( * permutation , 0 ) ; <nl> - std : : tie ( std : : ignore , new_index_offset ) = writeColumn ( column . name , * column . type , * permuted_column , offset_columns , false , serialization_states [ i ] , current_mark ) ; <nl> - } <nl> - } <nl> - else <nl> - { <nl> - std : : tie ( std : : ignore , new_index_offset ) = writeColumn ( column . name , * column . type , * column . column , offset_columns , false , serialization_states [ i ] , current_mark ) ; <nl> - } <nl> - } <nl> + writer - > write ( block , permutation , primary_key_block , skip_indexes_block ) ; <nl> + writer - > calculateAndSerializeSkipIndices ( skip_indexes_block , rows ) ; <nl> + writer - > calculateAndSerializePrimaryIndex ( primary_key_block , rows ) ; <nl> + writer - > next ( ) ; <nl> <nl> rows_count + = rows ; <nl> - <nl> - / / / Should be written before index offset update , because we calculate , <nl> - / / / indices of currently written granules <nl> - calculateAndSerializeSkipIndices ( skip_indexes_columns , rows ) ; <nl> - <nl> - { <nl> - / * * While filling index ( index_columns ) , disable memory tracker . <nl> - * Because memory is allocated here ( maybe in context of INSERT query ) , <nl> - * but then freed in completely different place ( while merging parts ) , where query memory_tracker is not available . <nl> - * And otherwise it will look like excessively growing memory consumption in context of query . <nl> - * ( observed in long INSERT SELECTs ) <nl> - * / <nl> - auto temporarily_disable_memory_tracker = getCurrentMemoryTrackerActionLock ( ) ; <nl> - <nl> - / / / Write index . The index contains Primary Key value for each ` index_granularity ` row . <nl> - for ( size_t i = index_offset ; i < rows ; ) <nl> - { <nl> - if ( storage . hasPrimaryKey ( ) ) <nl> - { <nl> - for ( size_t j = 0 , size = primary_key_columns . size ( ) ; j < size ; + + j ) <nl> - { <nl> - const IColumn & primary_column = * primary_key_columns [ j ] . column . get ( ) ; <nl> - index_columns [ j ] - > insertFrom ( primary_column , i ) ; <nl> - primary_key_columns [ j ] . type - > serializeBinary ( primary_column , i , * index_stream ) ; <nl> - } <nl> - } <nl> - <nl> - + + current_mark ; <nl> - if ( current_mark < index_granularity . getMarksCount ( ) ) <nl> - i + = index_granularity . getMarkRows ( current_mark ) ; <nl> - else <nl> - break ; <nl> - } <nl> - } <nl> - <nl> - / / / store last index row to write final mark at the end of column <nl> - for ( size_t j = 0 , size = primary_key_columns . size ( ) ; j < size ; + + j ) <nl> - { <nl> - const IColumn & primary_column = * primary_key_columns [ j ] . column . get ( ) ; <nl> - auto mutable_column = std : : move ( * last_index_row [ j ] . column ) . mutate ( ) ; <nl> - if ( ! mutable_column - > empty ( ) ) <nl> - mutable_column - > popBack ( 1 ) ; <nl> - mutable_column - > insertFrom ( primary_column , rows - 1 ) ; <nl> - last_index_row [ j ] . column = std : : move ( mutable_column ) ; <nl> - } <nl> - <nl> - index_offset = new_index_offset ; <nl> } <nl> <nl> } <nl> mmm a / dbms / src / Storages / MergeTree / MergedBlockOutputStream . h <nl> ppp b / dbms / src / Storages / MergeTree / MergedBlockOutputStream . h <nl> class MergedBlockOutputStream final : public IMergedBlockOutputStream <nl> { <nl> public : <nl> MergedBlockOutputStream ( <nl> - MergeTreeData & storage_ , <nl> - const String & part_path_ , <nl> + const MergeTreeDataPartPtr & data_part , <nl> const NamesAndTypesList & columns_list_ , <nl> - CompressionCodecPtr default_codec_ , <nl> - bool blocks_are_granules_size_ = false ) ; <nl> + CompressionCodecPtr default_codec , <nl> + bool blocks_are_granules_size = false ) ; <nl> <nl> MergedBlockOutputStream ( <nl> - MergeTreeData & storage_ , <nl> - const String & part_path_ , <nl> + const MergeTreeDataPartPtr & data_part , <nl> const NamesAndTypesList & columns_list_ , <nl> - CompressionCodecPtr default_codec_ , <nl> - const MergeTreeData : : DataPart : : ColumnToSize & merged_column_to_size_ , <nl> - size_t aio_threshold_ , <nl> - bool blocks_are_granules_size_ = false ) ; <nl> + CompressionCodecPtr default_codec , <nl> + const MergeTreeData : : DataPart : : ColumnToSize & merged_column_to_size , <nl> + size_t aio_threshold , <nl> + bool blocks_are_granules_size = false ) ; <nl> <nl> std : : string getPartPath ( ) const ; <nl> <nl> class MergedBlockOutputStream final : public IMergedBlockOutputStream <nl> const NamesAndTypesList * total_columns_list = nullptr , <nl> MergeTreeData : : DataPart : : Checksums * additional_column_checksums = nullptr ) ; <nl> <nl> - const MergeTreeIndexGranularity & getIndexGranularity ( ) const <nl> - { <nl> - return index_granularity ; <nl> - } <nl> - <nl> private : <nl> - void init ( ) ; <nl> - <nl> / * * If ` permutation ` is given , it rearranges the values in the columns when writing . <nl> * This is necessary to not keep the whole block in the RAM to sort it . <nl> * / <nl> class MergedBlockOutputStream final : public IMergedBlockOutputStream <nl> NamesAndTypesList columns_list ; <nl> <nl> size_t rows_count = 0 ; <nl> - <nl> - std : : unique_ptr < WriteBufferFromFile > index_file_stream ; <nl> - std : : unique_ptr < HashingWriteBuffer > index_stream ; <nl> - MutableColumns index_columns ; <nl> - / / / Index columns values from the last row from the last block <nl> - / / / It ' s written to index file in the ` writeSuffixAndFinalizePart ` method <nl> - ColumnsWithTypeAndName last_index_row ; <nl> } ; <nl> <nl> } <nl> mmm a / dbms / src / Storages / MergeTree / MergedColumnOnlyOutputStream . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergedColumnOnlyOutputStream . cpp <nl> namespace DB <nl> { <nl> <nl> MergedColumnOnlyOutputStream : : MergedColumnOnlyOutputStream ( <nl> - MergeTreeData & storage_ , const Block & header_ , const String & part_path_ , bool sync_ , <nl> - CompressionCodecPtr default_codec_ , bool skip_offsets_ , <nl> - const std : : vector < MergeTreeIndexPtr > & indices_to_recalc_ , <nl> - WrittenOffsetColumns & already_written_offset_columns_ , <nl> - const MergeTreeIndexGranularity & index_granularity_ , <nl> - const MergeTreeIndexGranularityInfo * index_granularity_info_ ) <nl> - : IMergedBlockOutputStream ( <nl> - storage_ , part_path_ , storage_ . global_context . getSettings ( ) . min_compress_block_size , <nl> - storage_ . global_context . getSettings ( ) . max_compress_block_size , default_codec_ , <nl> - storage_ . global_context . getSettings ( ) . min_bytes_to_use_direct_io , <nl> - false , <nl> - indices_to_recalc_ , <nl> - index_granularity_ , <nl> - index_granularity_info_ ) , <nl> - header ( header_ ) , sync ( sync_ ) , skip_offsets ( skip_offsets_ ) , <nl> - already_written_offset_columns ( already_written_offset_columns_ ) <nl> + const MergeTreeDataPartPtr & data_part , const Block & header_ , bool sync_ , <nl> + CompressionCodecPtr default_codec , bool skip_offsets_ , <nl> + const std : : vector < MergeTreeIndexPtr > & indices_to_recalc , <nl> + WrittenOffsetColumns * offset_columns_ , <nl> + const MergeTreeIndexGranularity & index_granularity , <nl> + const MergeTreeIndexGranularityInfo * index_granularity_info , <nl> + bool is_writing_temp_files ) <nl> + : IMergedBlockOutputStream ( data_part ) , <nl> + header ( header_ ) , sync ( sync_ ) <nl> { <nl> - serialization_states . reserve ( header . columns ( ) ) ; <nl> - WrittenOffsetColumns tmp_offset_columns ; <nl> - IDataType : : SerializeBinaryBulkSettings settings ; <nl> - <nl> - for ( const auto & column_name : header . getNames ( ) ) <nl> - { <nl> - const auto & col = header . getByName ( column_name ) ; <nl> - <nl> - const auto columns = storage . getColumns ( ) ; <nl> - addStreams ( part_path , col . name , * col . type , columns . getCodecOrDefault ( col . name , codec ) , 0 , skip_offsets ) ; <nl> - serialization_states . emplace_back ( nullptr ) ; <nl> - settings . getter = createStreamGetter ( col . name , tmp_offset_columns , false ) ; <nl> - col . type - > serializeBinaryBulkStatePrefix ( settings , serialization_states . back ( ) ) ; <nl> - } <nl> - <nl> - initSkipIndices ( ) ; <nl> + const auto & global_settings = data_part - > storage . global_context . getSettings ( ) ; <nl> + MergeTreeWriterSettings writer_settings ( <nl> + global_settings , <nl> + index_granularity_info ? index_granularity_info - > is_adaptive : data_part - > storage . canUseAdaptiveGranularity ( ) , <nl> + global_settings . min_bytes_to_use_direct_io ) ; <nl> + <nl> + writer_settings . is_writing_temp_files = is_writing_temp_files ; <nl> + writer_settings . skip_offsets = skip_offsets_ ; <nl> + <nl> + writer = data_part - > getWriter ( header . getNamesAndTypesList ( ) , indices_to_recalc , <nl> + default_codec , std : : move ( writer_settings ) , index_granularity ) ; <nl> + writer - > setWrittenOffsetColumns ( offset_columns_ ) ; <nl> + writer - > initSkipIndices ( ) ; <nl> } <nl> <nl> void MergedColumnOnlyOutputStream : : write ( const Block & block ) <nl> { <nl> - std : : set < String > skip_indexes_column_names_set ; <nl> - for ( const auto & index : skip_indices ) <nl> + std : : unordered_set < String > skip_indexes_column_names_set ; <nl> + for ( const auto & index : writer - > getSkipIndices ( ) ) <nl> std : : copy ( index - > columns . cbegin ( ) , index - > columns . cend ( ) , <nl> std : : inserter ( skip_indexes_column_names_set , skip_indexes_column_names_set . end ( ) ) ) ; <nl> Names skip_indexes_column_names ( skip_indexes_column_names_set . begin ( ) , skip_indexes_column_names_set . end ( ) ) ; <nl> <nl> - std : : vector < ColumnWithTypeAndName > skip_indexes_columns ( skip_indexes_column_names . size ( ) ) ; <nl> - std : : map < String , size_t > skip_indexes_column_name_to_position ; <nl> - for ( size_t i = 0 , size = skip_indexes_column_names . size ( ) ; i < size ; + + i ) <nl> - { <nl> - const auto & name = skip_indexes_column_names [ i ] ; <nl> - skip_indexes_column_name_to_position . emplace ( name , i ) ; <nl> - skip_indexes_columns [ i ] = block . getByName ( name ) ; <nl> - } <nl> + Block skip_indexes_block = getBlockAndPermute ( block , skip_indexes_column_names , nullptr ) ; <nl> <nl> size_t rows = block . rows ( ) ; <nl> if ( ! rows ) <nl> return ; <nl> <nl> - size_t new_index_offset = 0 ; <nl> - size_t new_current_mark = 0 ; <nl> - WrittenOffsetColumns offset_columns = already_written_offset_columns ; <nl> - for ( size_t i = 0 ; i < header . columns ( ) ; + + i ) <nl> - { <nl> - const ColumnWithTypeAndName & column = block . getByName ( header . getByPosition ( i ) . name ) ; <nl> - std : : tie ( new_current_mark , new_index_offset ) = writeColumn ( column . name , * column . type , * column . column , offset_columns , skip_offsets , serialization_states [ i ] , current_mark ) ; <nl> - } <nl> - <nl> - / / / Should be written before index offset update , because we calculate , <nl> - / / / indices of currently written granules <nl> - calculateAndSerializeSkipIndices ( skip_indexes_columns , rows ) ; <nl> - <nl> - index_offset = new_index_offset ; <nl> - current_mark = new_current_mark ; <nl> + writer - > write ( block ) ; <nl> + writer - > calculateAndSerializeSkipIndices ( skip_indexes_block , rows ) ; <nl> + writer - > next ( ) ; <nl> } <nl> <nl> void MergedColumnOnlyOutputStream : : writeSuffix ( ) <nl> void MergedColumnOnlyOutputStream : : writeSuffix ( ) <nl> MergeTreeData : : DataPart : : Checksums MergedColumnOnlyOutputStream : : writeSuffixAndGetChecksums ( ) <nl> { <nl> / / / Finish columns serialization . <nl> - auto & settings = storage . global_context . getSettingsRef ( ) ; <nl> - IDataType : : SerializeBinaryBulkSettings serialize_settings ; <nl> - serialize_settings . low_cardinality_max_dictionary_size = settings . low_cardinality_max_dictionary_size ; <nl> - serialize_settings . low_cardinality_use_single_dictionary_for_part = settings . low_cardinality_use_single_dictionary_for_part ! = 0 ; <nl> - <nl> - WrittenOffsetColumns offset_columns ; <nl> - for ( size_t i = 0 , size = header . columns ( ) ; i < size ; + + i ) <nl> - { <nl> - auto & column = header . getByPosition ( i ) ; <nl> - serialize_settings . getter = createStreamGetter ( column . name , already_written_offset_columns , skip_offsets ) ; <nl> - column . type - > serializeBinaryBulkStateSuffix ( serialize_settings , serialization_states [ i ] ) ; <nl> - <nl> - / / / We wrote at least one row <nl> - if ( with_final_mark & & ( index_offset ! = 0 | | current_mark ! = 0 ) ) <nl> - writeFinalMark ( column . name , column . type , offset_columns , skip_offsets , serialize_settings . path ) ; <nl> - } <nl> - <nl> MergeTreeData : : DataPart : : Checksums checksums ; <nl> - <nl> - for ( auto & column_stream : column_streams ) <nl> - { <nl> - column_stream . second - > finalize ( ) ; <nl> - if ( sync ) <nl> - column_stream . second - > sync ( ) ; <nl> - <nl> - column_stream . second - > addToChecksums ( checksums ) ; <nl> - } <nl> - <nl> - finishSkipIndicesSerialization ( checksums ) ; <nl> - <nl> - column_streams . clear ( ) ; <nl> - serialization_states . clear ( ) ; <nl> + writer - > finishDataSerialization ( checksums , sync ) ; <nl> + writer - > finishSkipIndicesSerialization ( checksums ) ; <nl> <nl> return checksums ; <nl> } <nl> mmm a / dbms / src / Storages / MergeTree / MergedColumnOnlyOutputStream . h <nl> ppp b / dbms / src / Storages / MergeTree / MergedColumnOnlyOutputStream . h <nl> <nl> namespace DB <nl> { <nl> <nl> + class MergeTreeDataPartWriterWide ; <nl> + <nl> / / / Writes only those columns that are in ` header ` <nl> class MergedColumnOnlyOutputStream final : public IMergedBlockOutputStream <nl> { <nl> class MergedColumnOnlyOutputStream final : public IMergedBlockOutputStream <nl> / / / Pass empty ' already_written_offset_columns ' first time then and pass the same object to subsequent instances of MergedColumnOnlyOutputStream <nl> / / / if you want to serialize elements of Nested data structure in different instances of MergedColumnOnlyOutputStream . <nl> MergedColumnOnlyOutputStream ( <nl> - MergeTreeData & storage_ , const Block & header_ , const String & part_path_ , bool sync_ , <nl> + const MergeTreeDataPartPtr & data_part , const Block & header_ , bool sync_ , <nl> CompressionCodecPtr default_codec_ , bool skip_offsets_ , <nl> const std : : vector < MergeTreeIndexPtr > & indices_to_recalc_ , <nl> - WrittenOffsetColumns & already_written_offset_columns_ , <nl> - const MergeTreeIndexGranularity & index_granularity_ , <nl> - const MergeTreeIndexGranularityInfo * index_granularity_info_ = nullptr ) ; <nl> + WrittenOffsetColumns * offset_columns_ = nullptr , <nl> + const MergeTreeIndexGranularity & index_granularity = { } , <nl> + const MergeTreeIndexGranularityInfo * index_granularity_info_ = nullptr , <nl> + bool is_writing_temp_files = false ) ; <nl> <nl> Block getHeader ( ) const override { return header ; } <nl> void write ( const Block & block ) override ; <nl> class MergedColumnOnlyOutputStream final : public IMergedBlockOutputStream <nl> <nl> private : <nl> Block header ; <nl> - <nl> bool sync ; <nl> - bool skip_offsets ; <nl> - <nl> - / / / To correctly write Nested elements column - by - column . <nl> - WrittenOffsetColumns & already_written_offset_columns ; <nl> } ; <nl> <nl> <nl> mmm a / dbms / src / Storages / MergeTree / ReplicatedMergeTreeBlockOutputStream . cpp <nl> ppp b / dbms / src / Storages / MergeTree / ReplicatedMergeTreeBlockOutputStream . cpp <nl> void ReplicatedMergeTreeBlockOutputStream : : writeExistingPart ( MergeTreeData : : Muta <nl> <nl> void ReplicatedMergeTreeBlockOutputStream : : commitPart ( zkutil : : ZooKeeperPtr & zookeeper , MergeTreeData : : MutableDataPartPtr & part , const String & block_id ) <nl> { <nl> - storage . check ( part - > columns ) ; <nl> + storage . check ( part - > getColumns ( ) ) ; <nl> assertSessionIsNotExpired ( zookeeper ) ; <nl> <nl> / / / Obtain incremental block number and lock it . The lock holds our intention to add the block to the filesystem . <nl> mmm a / dbms / src / Storages / MergeTree / ReplicatedMergeTreeLogEntry . cpp <nl> ppp b / dbms / src / Storages / MergeTree / ReplicatedMergeTreeLogEntry . cpp <nl> <nl> # include < IO / Operators . h > <nl> # include < IO / ReadBufferFromString . h > <nl> # include < IO / WriteBufferFromString . h > <nl> + # include < IO / ReadHelpers . h > <nl> <nl> <nl> namespace DB <nl> void ReplicatedMergeTreeLogEntryData : : writeText ( WriteBuffer & out ) const <nl> <nl> out < < ' \ n ' ; <nl> <nl> + if ( new_part_type ! = MergeTreeDataPartType : : WIDE & & new_part_type ! = MergeTreeDataPartType : : UNKNOWN ) <nl> + out < < " part_type : " < < new_part_type . toString ( ) < < " \ n " ; <nl> + <nl> if ( quorum ) <nl> out < < " quorum : " < < quorum < < ' \ n ' ; <nl> } <nl> void ReplicatedMergeTreeLogEntryData : : readText ( ReadBuffer & in ) <nl> } <nl> <nl> in > > " \ n " ; <nl> + if ( checkString ( " part_type : " , in ) ) <nl> + { <nl> + String part_type_str ; <nl> + in > > type_str ; <nl> + new_part_type . fromString ( type_str ) ; <nl> + in > > " \ n " ; <nl> + } <nl> + else <nl> + new_part_type = MergeTreeDataPartType : : WIDE ; <nl> <nl> / / / Optional field . <nl> if ( ! in . eof ( ) ) <nl> mmm a / dbms / src / Storages / MergeTree / ReplicatedMergeTreeLogEntry . h <nl> ppp b / dbms / src / Storages / MergeTree / ReplicatedMergeTreeLogEntry . h <nl> <nl> # include < Common / ZooKeeper / Types . h > <nl> # include < Core / Types . h > <nl> # include < IO / WriteHelpers . h > <nl> + # include < Storages / MergeTree / MergeTreeDataPartType . h > <nl> <nl> # include < mutex > <nl> # include < condition_variable > <nl> struct ReplicatedMergeTreeLogEntryData <nl> / / / The name of resulting part for GET_PART and MERGE_PARTS <nl> / / / Part range for DROP_RANGE and CLEAR_COLUMN <nl> String new_part_name ; <nl> + MergeTreeDataPartType new_part_type ; <nl> String block_id ; / / / For parts of level zero , the block identifier for deduplication ( node name in / blocks / ) . <nl> mutable String actual_new_part_name ; / / / GET_PART could actually fetch a part covering ' new_part_name ' . <nl> <nl> mmm a / dbms / src / Storages / MergeTree / ReplicatedMergeTreePartCheckThread . cpp <nl> ppp b / dbms / src / Storages / MergeTree / ReplicatedMergeTreePartCheckThread . cpp <nl> CheckResult ReplicatedMergeTreePartCheckThread : : checkPart ( const String & part_na <nl> auto table_lock = storage . lockStructureForShare ( false , RWLockImpl : : NO_QUERY ) ; <nl> <nl> auto local_part_header = ReplicatedMergeTreePartHeader : : fromColumnsAndChecksums ( <nl> - part - > columns , part - > checksums ) ; <nl> + part - > getColumns ( ) , part - > checksums ) ; <nl> <nl> String part_path = storage . replica_path + " / parts / " + part_name ; <nl> String part_znode ; <nl> CheckResult ReplicatedMergeTreePartCheckThread : : checkPart ( const String & part_na <nl> checkDataPart ( <nl> part , <nl> true , <nl> - storage . primary_key_data_types , <nl> - storage . skip_indices , <nl> [ this ] { return need_stop . load ( ) ; } ) ; <nl> <nl> if ( need_stop ) <nl> mmm a / dbms / src / Storages / MergeTree / ReplicatedMergeTreeQueue . cpp <nl> ppp b / dbms / src / Storages / MergeTree / ReplicatedMergeTreeQueue . cpp <nl> <nl> # include < Storages / StorageReplicatedMergeTree . h > <nl> # include < IO / ReadHelpers . h > <nl> # include < IO / WriteHelpers . h > <nl> - # include < Storages / MergeTree / MergeTreeDataPart . h > <nl> + # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> # include < Storages / MergeTree / MergeTreeDataMergerMutator . h > <nl> # include < Storages / MergeTree / ReplicatedMergeTreeQuorumEntry . h > <nl> # include < Common / StringUtils / StringUtils . h > <nl> mmm a / dbms / src / Storages / MergeTree / ReplicatedMergeTreeQuorumAddedParts . h <nl> ppp b / dbms / src / Storages / MergeTree / ReplicatedMergeTreeQuorumAddedParts . h <nl> <nl> # include < IO / ReadHelpers . h > <nl> # include < IO / Operators . h > <nl> <nl> - # include < Storages / MergeTree / MergeTreeDataPart . h > <nl> + # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> <nl> namespace DB <nl> { <nl> mmm a / dbms / src / Storages / MergeTree / StorageFromMergeTreeDataPart . h <nl> ppp b / dbms / src / Storages / MergeTree / StorageFromMergeTreeDataPart . h <nl> <nl> # pragma once <nl> <nl> # include < Storages / IStorage . h > <nl> - # include < Storages / MergeTree / MergeTreeDataPart . h > <nl> + # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> # include < Storages / MergeTree / MergeTreeDataSelectExecutor . h > <nl> # include < Core / Defines . h > <nl> <nl> mmm a / dbms / src / Storages / MergeTree / checkDataPart . cpp <nl> ppp b / dbms / src / Storages / MergeTree / checkDataPart . cpp <nl> <nl> <nl> # include < Storages / MergeTree / MergeTreeIndexGranularity . h > <nl> # include < Storages / MergeTree / checkDataPart . h > <nl> + # include < Storages / MergeTree / MergeTreeDataPartCompact . h > <nl> # include < DataStreams / MarkInCompressedFile . h > <nl> # include < Compression / CompressedReadBuffer . h > <nl> # include < IO / HashingReadBuffer . h > <nl> namespace DB <nl> namespace ErrorCodes <nl> { <nl> extern const int CORRUPTED_DATA ; <nl> - extern const int LOGICAL_ERROR ; <nl> - extern const int INCORRECT_MARK ; <nl> - extern const int EMPTY_LIST_OF_COLUMNS_PASSED ; <nl> + extern const int UNKNOWN_PART_TYPE ; <nl> } <nl> <nl> <nl> - namespace <nl> - { <nl> - <nl> - / * * To read and checksum single stream ( a pair of . bin , . mrk files ) for a single column or secondary index . <nl> - * / <nl> - class Stream <nl> - { <nl> - public : <nl> - String base_name ; <nl> - String bin_file_extension ; <nl> - String mrk_file_extension ; <nl> - String bin_file_path ; <nl> - String mrk_file_path ; <nl> - private : <nl> - const MergeTreeIndexGranularity & index_granularity ; <nl> - ReadBufferFromFile file_buf ; <nl> - HashingReadBuffer compressed_hashing_buf ; <nl> - CompressedReadBuffer uncompressing_buf ; <nl> - size_t mark_position = 0 ; <nl> - public : <nl> - HashingReadBuffer uncompressed_hashing_buf ; <nl> - <nl> - private : <nl> - ReadBufferFromFile mrk_file_buf ; <nl> - <nl> - std : : pair < MarkInCompressedFile , size_t > readMarkFromFile ( ) <nl> - { <nl> - size_t mrk_rows ; <nl> - MarkInCompressedFile mrk_mark ; <nl> - readIntBinary ( mrk_mark . offset_in_compressed_file , mrk_hashing_buf ) ; <nl> - readIntBinary ( mrk_mark . offset_in_decompressed_block , mrk_hashing_buf ) ; <nl> - if ( mrk_file_extension = = " . mrk2 " ) <nl> - readIntBinary ( mrk_rows , mrk_hashing_buf ) ; <nl> - else <nl> - mrk_rows = index_granularity . getMarkRows ( mark_position ) ; <nl> - <nl> - return { mrk_mark , mrk_rows } ; <nl> - } <nl> - public : <nl> - HashingReadBuffer mrk_hashing_buf ; <nl> - <nl> - Stream ( <nl> - const String & path , <nl> - const String & base_name_ , <nl> - const String & bin_file_extension_ , <nl> - const String & mrk_file_extension_ , <nl> - const MergeTreeIndexGranularity & index_granularity_ ) <nl> - : base_name ( base_name_ ) <nl> - , bin_file_extension ( bin_file_extension_ ) <nl> - , mrk_file_extension ( mrk_file_extension_ ) <nl> - , bin_file_path ( path + base_name + bin_file_extension ) <nl> - , mrk_file_path ( path + base_name + mrk_file_extension ) <nl> - , index_granularity ( index_granularity_ ) <nl> - , file_buf ( bin_file_path ) <nl> - , compressed_hashing_buf ( file_buf ) <nl> - , uncompressing_buf ( compressed_hashing_buf ) <nl> - , uncompressed_hashing_buf ( uncompressing_buf ) <nl> - , mrk_file_buf ( mrk_file_path ) <nl> - , mrk_hashing_buf ( mrk_file_buf ) <nl> - { } <nl> - <nl> - void assertMark ( bool only_read = false ) <nl> - { <nl> - <nl> - auto [ mrk_mark , mrk_rows ] = readMarkFromFile ( ) ; <nl> - bool has_alternative_mark = false ; <nl> - MarkInCompressedFile alternative_data_mark = { } ; <nl> - MarkInCompressedFile data_mark = { } ; <nl> - <nl> - / / / If the mark should be exactly at the border of blocks , we can also use a mark pointing to the end of previous block , <nl> - / / / and the beginning of next . <nl> - if ( ! uncompressed_hashing_buf . hasPendingData ( ) ) <nl> - { <nl> - / / / Get a mark pointing to the end of previous block . <nl> - has_alternative_mark = true ; <nl> - alternative_data_mark . offset_in_compressed_file = compressed_hashing_buf . count ( ) - uncompressing_buf . getSizeCompressed ( ) ; <nl> - alternative_data_mark . offset_in_decompressed_block = uncompressed_hashing_buf . offset ( ) ; <nl> - <nl> - if ( mrk_mark = = alternative_data_mark ) <nl> - { <nl> - mark_position + + ; <nl> - return ; <nl> - } <nl> - <nl> - uncompressed_hashing_buf . next ( ) ; <nl> - <nl> - / / / At the end of file ` compressed_hashing_buf . count ( ) ` points to the end of the file even before ` calling next ( ) ` , <nl> - / / / and the check you just performed does not work correctly . For simplicity , we will not check the last mark . <nl> - if ( uncompressed_hashing_buf . eof ( ) ) <nl> - { <nl> - mark_position + + ; <nl> - return ; <nl> - } <nl> - } <nl> - <nl> - data_mark . offset_in_compressed_file = compressed_hashing_buf . count ( ) - uncompressing_buf . getSizeCompressed ( ) ; <nl> - data_mark . offset_in_decompressed_block = uncompressed_hashing_buf . offset ( ) ; <nl> - <nl> - if ( ! only_read & & ( mrk_mark ! = data_mark | | mrk_rows ! = index_granularity . getMarkRows ( mark_position ) ) ) <nl> - throw Exception ( " Incorrect mark : " + data_mark . toStringWithRows ( index_granularity . getMarkRows ( mark_position ) ) + <nl> - ( has_alternative_mark ? " or " + alternative_data_mark . toString ( ) : " " ) + " in data , " + <nl> - mrk_mark . toStringWithRows ( mrk_rows ) + " in " + mrk_file_path + " file " , ErrorCodes : : INCORRECT_MARK ) ; <nl> - <nl> - mark_position + + ; <nl> - } <nl> - <nl> - void assertEnd ( ) <nl> - { <nl> - if ( ! uncompressed_hashing_buf . eof ( ) ) <nl> - throw Exception ( " EOF expected in " + bin_file_path + " file " <nl> - + " at position " <nl> - + toString ( compressed_hashing_buf . count ( ) ) + " ( compressed ) , " <nl> - + toString ( uncompressed_hashing_buf . count ( ) ) + " ( uncompressed ) " , ErrorCodes : : CORRUPTED_DATA ) ; <nl> - <nl> - / / / Maybe we have final mark . <nl> - if ( index_granularity . hasFinalMark ( ) ) <nl> - { <nl> - auto final_mark_rows = readMarkFromFile ( ) . second ; <nl> - if ( final_mark_rows ! = 0 ) <nl> - throw Exception ( " Incorrect final mark at the end of " + mrk_file_path + " expected 0 rows , got " + toString ( final_mark_rows ) , ErrorCodes : : CORRUPTED_DATA ) ; <nl> - } <nl> - <nl> - if ( ! mrk_hashing_buf . eof ( ) ) <nl> - throw Exception ( " EOF expected in " + mrk_file_path + " file " <nl> - + " at position " <nl> - + toString ( mrk_hashing_buf . count ( ) ) , ErrorCodes : : CORRUPTED_DATA ) ; <nl> - } <nl> - <nl> - void saveChecksums ( MergeTreeData : : DataPart : : Checksums & checksums ) <nl> - { <nl> - checksums . files [ base_name + bin_file_extension ] = MergeTreeData : : DataPart : : Checksums : : Checksum ( <nl> - compressed_hashing_buf . count ( ) , compressed_hashing_buf . getHash ( ) , <nl> - uncompressed_hashing_buf . count ( ) , uncompressed_hashing_buf . getHash ( ) ) ; <nl> - <nl> - checksums . files [ base_name + mrk_file_extension ] = MergeTreeData : : DataPart : : Checksums : : Checksum ( <nl> - mrk_hashing_buf . count ( ) , mrk_hashing_buf . getHash ( ) ) ; <nl> - } <nl> - } ; <nl> - <nl> - } <nl> - <nl> - <nl> - MergeTreeData : : DataPart : : Checksums checkDataPart ( <nl> + IMergeTreeDataPart : : Checksums checkDataPart ( <nl> const String & full_path , <nl> - const MergeTreeIndexGranularity & adaptive_index_granularity , <nl> - const String & mrk_file_extension , <nl> + const NamesAndTypesList & columns_list , <nl> + const MergeTreeDataPartType & part_type , <nl> bool require_checksums , <nl> - const DataTypes & primary_key_data_types , <nl> - const MergeTreeIndices & indices , <nl> std : : function < bool ( ) > is_cancelled ) <nl> { <nl> - Logger * log = & Logger : : get ( " checkDataPart " ) ; <nl> - <nl> / * * Responsibility : <nl> * - read list of columns from columns . txt ; <nl> * - read checksums if exist ; <nl> - * - read ( and validate checksum ) of primary . idx ; obtain number of marks ; <nl> - * - read data files and marks for each stream of each column ; calculate and validate checksums ; <nl> - * - check that there are the same number of rows in each column ; <nl> - * - check that all marks files have the same size ; <nl> + * - validate list of columns and checksums <nl> * / <nl> <nl> CurrentMetrics : : Increment metric_increment { CurrentMetrics : : ReplicatedChecks } ; <nl> MergeTreeData : : DataPart : : Checksums checkDataPart ( <nl> if ( ! path . empty ( ) & & path . back ( ) ! = ' / ' ) <nl> path + = " / " ; <nl> <nl> - NamesAndTypesList columns ; <nl> + NamesAndTypesList columns_txt ; <nl> <nl> { <nl> ReadBufferFromFile buf ( path + " columns . txt " ) ; <nl> - columns . readText ( buf ) ; <nl> + columns_txt . readText ( buf ) ; <nl> assertEOF ( buf ) ; <nl> } <nl> <nl> - / / / Checksums from file checksums . txt . May be absent . If present , they are subsequently compared with the actual data checksums . <nl> - MergeTreeData : : DataPart : : Checksums checksums_txt ; <nl> - <nl> - if ( require_checksums | | Poco : : File ( path + " checksums . txt " ) . exists ( ) ) <nl> - { <nl> - ReadBufferFromFile buf ( path + " checksums . txt " ) ; <nl> - checksums_txt . read ( buf ) ; <nl> - assertEOF ( buf ) ; <nl> - } <nl> + if ( columns_txt ! = columns_list ) <nl> + throw Exception ( " Columns doesn ' t match in part " + path <nl> + + " . Expected : " + columns_list . toString ( ) <nl> + + " . Found : " + columns_txt . toString ( ) , ErrorCodes : : CORRUPTED_DATA ) ; <nl> <nl> / / / Real checksums based on contents of data . Must correspond to checksums . txt . If not - it means the data is broken . <nl> - MergeTreeData : : DataPart : : Checksums checksums_data ; <nl> + IMergeTreeDataPart : : Checksums checksums_data ; <nl> <nl> - size_t marks_in_primary_key = 0 ; <nl> - if ( ! primary_key_data_types . empty ( ) ) <nl> + auto checksum_compressed_file = [ ] ( const String & file_path ) <nl> { <nl> - ReadBufferFromFile file_buf ( path + " primary . idx " ) ; <nl> - HashingReadBuffer hashing_buf ( file_buf ) ; <nl> + ReadBufferFromFile file_buf ( file_path ) ; <nl> + HashingReadBuffer compressed_hashing_buf ( file_buf ) ; <nl> + CompressedReadBuffer uncompressing_buf ( compressed_hashing_buf ) ; <nl> + HashingReadBuffer uncompressed_hashing_buf ( uncompressing_buf ) ; <nl> <nl> - size_t key_size = primary_key_data_types . size ( ) ; <nl> - MutableColumns tmp_columns ( key_size ) ; <nl> - <nl> - for ( size_t j = 0 ; j < key_size ; + + j ) <nl> - tmp_columns [ j ] = primary_key_data_types [ j ] - > createColumn ( ) ; <nl> - <nl> - while ( ! hashing_buf . eof ( ) ) <nl> + uncompressed_hashing_buf . tryIgnore ( std : : numeric_limits < size_t > : : max ( ) ) ; <nl> + return IMergeTreeDataPart : : Checksums : : Checksum <nl> { <nl> - if ( is_cancelled ( ) ) <nl> - return { } ; <nl> + compressed_hashing_buf . count ( ) , compressed_hashing_buf . getHash ( ) , <nl> + uncompressed_hashing_buf . count ( ) , uncompressed_hashing_buf . getHash ( ) <nl> + } ; <nl> + } ; <nl> <nl> - + + marks_in_primary_key ; <nl> - for ( size_t j = 0 ; j < key_size ; + + j ) <nl> - primary_key_data_types [ j ] - > deserializeBinary ( * tmp_columns [ j ] . get ( ) , hashing_buf ) ; <nl> + if ( part_type = = MergeTreeDataPartType : : COMPACT ) <nl> + { <nl> + const auto & file_name = MergeTreeDataPartCompact : : DATA_FILE_NAME_WITH_EXTENSION ; <nl> + checksums_data . files [ file_name ] = checksum_compressed_file ( path + file_name ) ; <nl> + } <nl> + else if ( part_type = = MergeTreeDataPartType : : WIDE ) <nl> + { <nl> + for ( const auto & column : columns_list ) <nl> + { <nl> + column . type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> + { <nl> + String file_name = IDataType : : getFileNameForStream ( column . name , substream_path ) + " . bin " ; <nl> + checksums_data . files [ file_name ] = checksum_compressed_file ( path + file_name ) ; <nl> + } , { } ) ; <nl> } <nl> - <nl> - size_t primary_idx_size = hashing_buf . count ( ) ; <nl> - <nl> - checksums_data . files [ " primary . idx " ] = MergeTreeData : : DataPart : : Checksums : : Checksum ( primary_idx_size , hashing_buf . getHash ( ) ) ; <nl> + } <nl> + else <nl> + { <nl> + throw Exception ( " Unknown type in part " + path , ErrorCodes : : UNKNOWN_PART_TYPE ) ; <nl> } <nl> <nl> - / / / Optional files count . txt , partition . dat , minmax_ * . idx , ttl . txt . Just calculate checksums for existing files . <nl> Poco : : DirectoryIterator dir_end ; <nl> for ( Poco : : DirectoryIterator dir_it ( path ) ; dir_it ! = dir_end ; + + dir_it ) <nl> { <nl> const String & file_name = dir_it . name ( ) ; <nl> - if ( file_name = = " count . txt " <nl> - | | file_name = = " partition . dat " <nl> - | | ( startsWith ( file_name , " minmax_ " ) & & endsWith ( file_name , " . idx " ) ) <nl> - | | file_name = = " ttl . txt " ) <nl> + auto checksum_it = checksums_data . files . find ( file_name ) ; <nl> + if ( checksum_it = = checksums_data . files . end ( ) & & file_name ! = " checksums . txt " & & file_name ! = " columns . txt " ) <nl> { <nl> ReadBufferFromFile file_buf ( dir_it - > path ( ) ) ; <nl> HashingReadBuffer hashing_buf ( file_buf ) ; <nl> hashing_buf . tryIgnore ( std : : numeric_limits < size_t > : : max ( ) ) ; <nl> - checksums_data . files [ file_name ] = MergeTreeData : : DataPart : : Checksums : : Checksum ( hashing_buf . count ( ) , hashing_buf . getHash ( ) ) ; <nl> + checksums_data . files [ file_name ] = IMergeTreeDataPart : : Checksums : : Checksum ( hashing_buf . count ( ) , hashing_buf . getHash ( ) ) ; <nl> } <nl> } <nl> <nl> - if ( is_cancelled ( ) ) <nl> - return { } ; <nl> - <nl> - / / / If count . txt file exists , use it as source of truth for number of rows . Otherwise just check that all columns have equal amount of rows . <nl> - std : : optional < size_t > rows ; <nl> + / / / Checksums from file checksums . txt . May be absent . If present , they are subsequently compared with the actual data checksums . <nl> + IMergeTreeDataPart : : Checksums checksums_txt ; <nl> <nl> - if ( Poco : : File ( path + " count . txt " ) . exists ( ) ) <nl> + if ( require_checksums | | Poco : : File ( path + " checksums . txt " ) . exists ( ) ) <nl> { <nl> - ReadBufferFromFile buf ( path + " count . txt " ) ; <nl> - size_t count = 0 ; <nl> - readText ( count , buf ) ; <nl> + ReadBufferFromFile buf ( path + " checksums . txt " ) ; <nl> + checksums_txt . read ( buf ) ; <nl> assertEOF ( buf ) ; <nl> - rows = count ; <nl> - } <nl> - <nl> - / / / Read and check skip indices . <nl> - for ( const auto & index : indices ) <nl> - { <nl> - Stream stream ( path , index - > getFileName ( ) , " . idx " , mrk_file_extension , adaptive_index_granularity ) ; <nl> - size_t mark_num = 0 ; <nl> - <nl> - while ( ! stream . uncompressed_hashing_buf . eof ( ) ) <nl> - { <nl> - if ( stream . mrk_hashing_buf . eof ( ) ) <nl> - throw Exception ( " Unexpected end of mrk file while reading index " + index - > name , <nl> - ErrorCodes : : CORRUPTED_DATA ) ; <nl> - try <nl> - { <nl> - stream . assertMark ( ) ; <nl> - } <nl> - catch ( Exception & e ) <nl> - { <nl> - e . addMessage ( " Cannot read mark " + toString ( mark_num ) <nl> - + " in file " + stream . mrk_file_path <nl> - + " , mrk file offset : " + toString ( stream . mrk_hashing_buf . count ( ) ) ) ; <nl> - throw ; <nl> - } <nl> - try <nl> - { <nl> - index - > createIndexGranule ( ) - > deserializeBinary ( stream . uncompressed_hashing_buf ) ; <nl> - } <nl> - catch ( Exception & e ) <nl> - { <nl> - e . addMessage ( " Cannot read granule " + toString ( mark_num ) <nl> - + " in file " + stream . bin_file_path <nl> - + " , mrk file offset : " + toString ( stream . mrk_hashing_buf . count ( ) ) ) ; <nl> - throw ; <nl> - } <nl> - + + mark_num ; <nl> - if ( is_cancelled ( ) ) <nl> - return { } ; <nl> - } <nl> - <nl> - stream . assertEnd ( ) ; <nl> - stream . saveChecksums ( checksums_data ) ; <nl> - } <nl> - <nl> - / / / Read all columns , calculate checksums and validate marks . <nl> - for ( const NameAndTypePair & name_type : columns ) <nl> - { <nl> - LOG_DEBUG ( log , " Checking column " + name_type . name + " in " + path ) ; <nl> - <nl> - std : : map < String , Stream > streams ; <nl> - size_t column_size = 0 ; <nl> - size_t mark_num = 0 ; <nl> - <nl> - IDataType : : DeserializeBinaryBulkStatePtr state ; <nl> - IDataType : : DeserializeBinaryBulkSettings settings ; <nl> - settings . getter = [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - String file_name = IDataType : : getFileNameForStream ( name_type . name , substream_path ) ; <nl> - auto & stream = streams . try_emplace ( file_name , path , file_name , " . bin " , mrk_file_extension , adaptive_index_granularity ) . first - > second ; <nl> - return & stream . uncompressed_hashing_buf ; <nl> - } ; <nl> - <nl> - / / / Prefixes have to be read before data because first mark points after prefix <nl> - name_type . type - > deserializeBinaryBulkStatePrefix ( settings , state ) ; <nl> - <nl> - while ( true ) <nl> - { <nl> - <nl> - / / / Check that mark points to current position in file . <nl> - bool marks_eof = false ; <nl> - name_type . type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - String file_name = IDataType : : getFileNameForStream ( name_type . name , substream_path ) ; <nl> - <nl> - auto & stream = streams . try_emplace ( file_name , path , file_name , " . bin " , mrk_file_extension , adaptive_index_granularity ) . first - > second ; <nl> - try <nl> - { <nl> - / / / LowCardinality dictionary column is not read monotonically , so marks maybe inconsistent with <nl> - / / / offset position in file . So we just read data and marks file , but doesn ' t check marks equality . <nl> - bool only_read = ! substream_path . empty ( ) & & substream_path . back ( ) . type = = IDataType : : Substream : : DictionaryKeys ; <nl> - if ( ! stream . mrk_hashing_buf . eof ( ) ) <nl> - stream . assertMark ( only_read ) ; <nl> - else <nl> - marks_eof = true ; <nl> - } <nl> - catch ( Exception & e ) <nl> - { <nl> - e . addMessage ( " Cannot read mark " + toString ( mark_num ) + " at row " + toString ( column_size ) <nl> - + " in file " + stream . mrk_file_path <nl> - + " , mrk file offset : " + toString ( stream . mrk_hashing_buf . count ( ) ) ) ; <nl> - throw ; <nl> - } <nl> - } , settings . path ) ; <nl> - <nl> - size_t rows_after_mark = adaptive_index_granularity . getMarkRows ( mark_num ) ; <nl> - + + mark_num ; <nl> - <nl> - / / / Read index_granularity rows from column . <nl> - / / / NOTE Shared array sizes of Nested columns are read more than once . That ' s Ok . <nl> - <nl> - MutableColumnPtr tmp_column = name_type . type - > createColumn ( ) ; <nl> - name_type . type - > deserializeBinaryBulkWithMultipleStreams ( * tmp_column , rows_after_mark , settings , state ) ; <nl> - <nl> - size_t read_size = tmp_column - > size ( ) ; <nl> - column_size + = read_size ; <nl> - <nl> - / / / We already checked all marks except final ( it will be checked in assertEnd ( ) ) . <nl> - if ( mark_num = = adaptive_index_granularity . getMarksCountWithoutFinal ( ) ) <nl> - break ; <nl> - else if ( marks_eof ) <nl> - throw Exception ( " Unexpected end of mrk file while reading column " + name_type . name , ErrorCodes : : CORRUPTED_DATA ) ; <nl> - <nl> - if ( is_cancelled ( ) ) <nl> - return { } ; <nl> - } <nl> - <nl> - / / / Check that number of rows are equal in each column . <nl> - if ( ! rows ) <nl> - rows = column_size ; <nl> - else if ( * rows ! = column_size ) <nl> - throw Exception { " Unexpected number of rows in column " <nl> - + name_type . name + " ( " + toString ( column_size ) + " , expected : " + toString ( * rows ) + " ) " , <nl> - ErrorCodes : : SIZES_OF_COLUMNS_DOESNT_MATCH } ; <nl> - <nl> - / / / Save checksums for column . <nl> - name_type . type - > enumerateStreams ( [ & ] ( const IDataType : : SubstreamPath & substream_path ) <nl> - { <nl> - String file_name = IDataType : : getFileNameForStream ( name_type . name , substream_path ) ; <nl> - auto stream_it = streams . find ( file_name ) ; <nl> - if ( stream_it = = streams . end ( ) ) <nl> - throw Exception ( " Logical error : cannot find stream " + file_name , ErrorCodes : : LOGICAL_ERROR ) ; <nl> - <nl> - stream_it - > second . assertEnd ( ) ; <nl> - stream_it - > second . saveChecksums ( checksums_data ) ; <nl> - } , { } ) ; <nl> - <nl> - if ( is_cancelled ( ) ) <nl> - return { } ; <nl> } <nl> <nl> - if ( ! rows ) <nl> - throw Exception ( " No columns in data part " , ErrorCodes : : EMPTY_LIST_OF_COLUMNS_PASSED ) ; <nl> - <nl> - if ( ! primary_key_data_types . empty ( ) ) <nl> - { <nl> - size_t expected_marks = adaptive_index_granularity . getMarksCount ( ) ; <nl> - if ( expected_marks ! = marks_in_primary_key ) <nl> - { <nl> - throw Exception ( " Size of primary key doesn ' t match expected number of marks . " <nl> - " Number of rows in columns : " + toString ( * rows ) <nl> - + " , expected number of marks : " + toString ( expected_marks ) <nl> - + " , size of primary key : " + toString ( marks_in_primary_key ) , <nl> - ErrorCodes : : CORRUPTED_DATA ) ; <nl> - } <nl> - } <nl> + if ( is_cancelled ( ) ) <nl> + return { } ; <nl> <nl> if ( require_checksums | | ! checksums_txt . files . empty ( ) ) <nl> checksums_txt . checkEqual ( checksums_data , true ) ; <nl> MergeTreeData : : DataPart : : Checksums checkDataPart ( <nl> return checksums_data ; <nl> } <nl> <nl> - MergeTreeData : : DataPart : : Checksums checkDataPart ( <nl> + IMergeTreeDataPart : : Checksums checkDataPart ( <nl> MergeTreeData : : DataPartPtr data_part , <nl> bool require_checksums , <nl> - const DataTypes & primary_key_data_types , <nl> - const MergeTreeIndices & indices , <nl> std : : function < bool ( ) > is_cancelled ) <nl> { <nl> return checkDataPart ( <nl> data_part - > getFullPath ( ) , <nl> - data_part - > index_granularity , <nl> - data_part - > index_granularity_info . marks_file_extension , <nl> + data_part - > getColumns ( ) , <nl> + data_part - > getType ( ) , <nl> require_checksums , <nl> - primary_key_data_types , <nl> - indices , <nl> is_cancelled ) ; <nl> } <nl> <nl> - <nl> } <nl> mmm a / dbms / src / Storages / MergeTree / checkDataPart . h <nl> ppp b / dbms / src / Storages / MergeTree / checkDataPart . h <nl> <nl> namespace DB <nl> { <nl> <nl> - / * * Completely checks the part data <nl> - * - Calculates checksums and compares them with checksums . txt . <nl> - * - For arrays and strings , checks the correspondence of the size and amount of data . <nl> - * - Checks the correctness of marks . <nl> - * Throws an exception if the part is corrupted or if the check fails ( TODO : you can try to separate these cases ) . <nl> - * / <nl> - MergeTreeData : : DataPart : : Checksums checkDataPart ( <nl> + / / / Calculates checksums and compares them with checksums . txt . <nl> + IMergeTreeDataPart : : Checksums checkDataPart ( <nl> MergeTreeData : : DataPartPtr data_part , <nl> bool require_checksums , <nl> - const DataTypes & primary_key_data_types , <nl> - const MergeTreeIndices & indices = { } , / / / Check skip indices <nl> std : : function < bool ( ) > is_cancelled = [ ] { return false ; } ) ; <nl> <nl> - MergeTreeData : : DataPart : : Checksums checkDataPart ( <nl> + IMergeTreeDataPart : : Checksums checkDataPart ( <nl> const String & full_path , <nl> - const MergeTreeIndexGranularity & index_granularity , <nl> - const String & marks_file_extension , <nl> + const NamesAndTypesList & columns_list , <nl> + const MergeTreeDataPartType & part_type , <nl> bool require_checksums , <nl> - const DataTypes & primary_key_data_types , <nl> - const MergeTreeIndices & indices = { } , / / / Check skip indices <nl> std : : function < bool ( ) > is_cancelled = [ ] { return false ; } ) ; <nl> } <nl> mmm a / dbms / src / Storages / MutationCommands . h <nl> ppp b / dbms / src / Storages / MutationCommands . h <nl> <nl> <nl> # include < Parsers / ASTAlterQuery . h > <nl> # include < Storages / IStorage_fwd . h > <nl> + # include < Core / Names . h > <nl> <nl> # include < optional > <nl> # include < unordered_map > <nl> class MutationCommands : public std : : vector < MutationCommand > <nl> <nl> void writeText ( WriteBuffer & out ) const ; <nl> void readText ( ReadBuffer & in ) ; <nl> + <nl> + / / / Extra columns that we need to read except ones needed for expressions . <nl> + Names additional_columns ; <nl> } ; <nl> <nl> } <nl> mmm a / dbms / src / Storages / StorageMergeTree . cpp <nl> ppp b / dbms / src / Storages / StorageMergeTree . cpp <nl> struct CurrentlyMergingPartsTagger <nl> reserved_space = storage . tryReserveSpace ( total_size , future_part_ . parts [ 0 ] - > disk ) ; <nl> else <nl> { <nl> - MergeTreeDataPart : : TTLInfos ttl_infos ; <nl> + IMergeTreeDataPart : : TTLInfos ttl_infos ; <nl> size_t max_volume_index = 0 ; <nl> for ( auto & part_ptr : future_part_ . parts ) <nl> { <nl> bool StorageMergeTree : : tryMutatePart ( ) <nl> future_part . parts . push_back ( part ) ; <nl> future_part . part_info = new_part_info ; <nl> future_part . name = part - > getNewName ( new_part_info ) ; <nl> + future_part . type = part - > getType ( ) ; <nl> <nl> tagger . emplace ( future_part , MergeTreeDataMergerMutator : : estimateNeededDiskSpace ( { part } ) , * this , true ) ; <nl> break ; <nl> CheckResults StorageMergeTree : : checkData ( const ASTPtr & query , const Context & c <nl> { <nl> try <nl> { <nl> - auto calculated_checksums = checkDataPart ( part , false , primary_key_data_types , skip_indices ) ; <nl> + auto calculated_checksums = checkDataPart ( part , false ) ; <nl> calculated_checksums . checkEqual ( part - > checksums , true ) ; <nl> WriteBufferFromFile out ( tmp_checksums_path , 4096 ) ; <nl> part - > checksums . write ( out ) ; <nl> CheckResults StorageMergeTree : : checkData ( const ASTPtr & query , const Context & c <nl> { <nl> try <nl> { <nl> - checkDataPart ( part , true , primary_key_data_types , skip_indices ) ; <nl> + checkDataPart ( part , true ) ; <nl> results . emplace_back ( part - > name , true , " " ) ; <nl> } <nl> catch ( const Exception & ex ) <nl> mmm a / dbms / src / Storages / StorageReplicatedMergeTree . cpp <nl> ppp b / dbms / src / Storages / StorageReplicatedMergeTree . cpp <nl> <nl> # include < Storages / PartitionCommands . h > <nl> # include < Storages / ColumnsDescription . h > <nl> # include < Storages / StorageReplicatedMergeTree . h > <nl> - # include < Storages / MergeTree / MergeTreeDataPart . h > <nl> + # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> # include < Storages / MergeTree / MergeList . h > <nl> # include < Storages / MergeTree / ReplicatedMergeTreeTableMetadata . h > <nl> # include < Storages / MergeTree / ReplicatedMergeTreeBlockOutputStream . h > <nl> void StorageReplicatedMergeTree : : checkPartChecksumsAndAddCommitOps ( const zkutil : <nl> if ( part_name . empty ( ) ) <nl> part_name = part - > name ; <nl> <nl> - check ( part - > columns ) ; <nl> + check ( part - > getColumns ( ) ) ; <nl> int expected_columns_version = columns_version ; <nl> <nl> auto local_part_header = ReplicatedMergeTreePartHeader : : fromColumnsAndChecksums ( <nl> - part - > columns , part - > checksums ) ; <nl> + part - > getColumns ( ) , part - > checksums ) ; <nl> <nl> Strings replicas = zookeeper - > getChildren ( zookeeper_path + " / replicas " ) ; <nl> std : : shuffle ( replicas . begin ( ) , replicas . end ( ) , thread_local_rng ) ; <nl> void StorageReplicatedMergeTree : : checkPartChecksumsAndAddCommitOps ( const zkutil : <nl> ops . emplace_back ( zkutil : : makeCreateRequest ( <nl> part_path , " " , zkutil : : CreateMode : : Persistent ) ) ; <nl> ops . emplace_back ( zkutil : : makeCreateRequest ( <nl> - part_path + " / columns " , part - > columns . toString ( ) , zkutil : : CreateMode : : Persistent ) ) ; <nl> + part_path + " / columns " , part - > getColumns ( ) . toString ( ) , zkutil : : CreateMode : : Persistent ) ) ; <nl> ops . emplace_back ( zkutil : : makeCreateRequest ( <nl> part_path + " / checksums " , getChecksumsForZooKeeper ( part - > checksums ) , zkutil : : CreateMode : : Persistent ) ) ; <nl> } <nl> bool StorageReplicatedMergeTree : : tryExecuteMerge ( const LogEntry & entry ) <nl> size_t estimated_space_for_merge = MergeTreeDataMergerMutator : : estimateNeededDiskSpace ( parts ) ; <nl> <nl> / / / Can throw an exception while reserving space . <nl> - MergeTreeDataPart : : TTLInfos ttl_infos ; <nl> + IMergeTreeDataPart : : TTLInfos ttl_infos ; <nl> size_t max_volume_index = 0 ; <nl> for ( auto & part_ptr : parts ) <nl> { <nl> bool StorageReplicatedMergeTree : : tryExecuteMerge ( const LogEntry & entry ) <nl> <nl> auto table_lock = lockStructureForShare ( false , RWLockImpl : : NO_QUERY ) ; <nl> <nl> - FutureMergedMutatedPart future_merged_part ( parts ) ; <nl> + FutureMergedMutatedPart future_merged_part ( parts , entry . new_part_type ) ; <nl> if ( future_merged_part . name ! = entry . new_part_name ) <nl> { <nl> throw Exception ( " Future merged part name " + backQuote ( future_merged_part . name ) + " differs from part name in log entry : " <nl> bool StorageReplicatedMergeTree : : tryExecutePartMutation ( const StorageReplicatedM <nl> future_mutated_part . part_info = new_part_info ; <nl> future_mutated_part . name = entry . new_part_name ; <nl> future_mutated_part . updatePath ( * this , reserved_space ) ; <nl> + future_mutated_part . type = source_part - > getType ( ) ; <nl> <nl> auto table_id = getStorageID ( ) ; <nl> MergeList : : EntryPtr merge_entry = global_context . getMergeList ( ) . insert ( <nl> void StorageReplicatedMergeTree : : mergeSelectingTask ( ) <nl> merger_mutator . selectPartsToMerge ( future_merged_part , false , max_source_parts_size_for_merge , merge_pred ) ) <nl> { <nl> success = createLogEntryToMergeParts ( zookeeper , future_merged_part . parts , <nl> - future_merged_part . name , deduplicate , force_ttl ) ; <nl> + future_merged_part . name , future_merged_part . type , deduplicate , force_ttl ) ; <nl> } <nl> / / / If there are many mutations in queue it may happen , that we cannot enqueue enough merges to merge all new parts <nl> else if ( max_source_part_size_for_mutation > 0 & & queue . countMutations ( ) > 0 <nl> bool StorageReplicatedMergeTree : : createLogEntryToMergeParts ( <nl> zkutil : : ZooKeeperPtr & zookeeper , <nl> const DataPartsVector & parts , <nl> const String & merged_name , <nl> + const MergeTreeDataPartType & merged_part_type , <nl> bool deduplicate , <nl> bool force_ttl , <nl> ReplicatedMergeTreeLogEntryData * out_log_entry ) <nl> bool StorageReplicatedMergeTree : : createLogEntryToMergeParts ( <nl> entry . type = LogEntry : : MERGE_PARTS ; <nl> entry . source_replica = replica_name ; <nl> entry . new_part_name = merged_name ; <nl> + entry . new_part_type = merged_part_type ; <nl> entry . deduplicate = deduplicate ; <nl> entry . force_ttl = force_ttl ; <nl> entry . create_time = time ( nullptr ) ; <nl> <nl> for ( const auto & part : parts ) <nl> + { <nl> entry . source_parts . push_back ( part - > name ) ; <nl> + } <nl> <nl> String path_created = zookeeper - > create ( zookeeper_path + " / log / log - " , entry . toString ( ) , zkutil : : CreateMode : : PersistentSequential ) ; <nl> entry . znode_name = path_created . substr ( path_created . find_last_of ( ' / ' ) + 1 ) ; <nl> bool StorageReplicatedMergeTree : : createLogEntryToMergeParts ( <nl> } <nl> <nl> <nl> - bool StorageReplicatedMergeTree : : createLogEntryToMutatePart ( const MergeTreeDataPart & part , Int64 mutation_version ) <nl> + bool StorageReplicatedMergeTree : : createLogEntryToMutatePart ( const IMergeTreeDataPart & part , Int64 mutation_version ) <nl> { <nl> auto zookeeper = getZooKeeper ( ) ; <nl> <nl> bool StorageReplicatedMergeTree : : fetchPart ( const String & part_name , const Strin <nl> { <nl> const auto part_info = MergeTreePartInfo : : fromPartName ( part_name , format_version ) ; <nl> <nl> - if ( auto part = getPartIfExists ( part_info , { MergeTreeDataPart : : State : : Outdated , MergeTreeDataPart : : State : : Deleting } ) ) <nl> + if ( auto part = getPartIfExists ( part_info , { IMergeTreeDataPart : : State : : Outdated , IMergeTreeDataPart : : State : : Deleting } ) ) <nl> { <nl> LOG_DEBUG ( log , " Part " < < part - > name < < " should be deleted after previous attempt before fetch " ) ; <nl> / / / Force immediate parts cleanup to delete the part that was left from the previous fetch attempt . <nl> bool StorageReplicatedMergeTree : : optimize ( const ASTPtr & query , const ASTPtr & p <nl> future_merged_part , disk_space , can_merge , partition_id , true , nullptr ) ; <nl> ReplicatedMergeTreeLogEntryData merge_entry ; <nl> if ( selected & & ! createLogEntryToMergeParts ( zookeeper , future_merged_part . parts , <nl> - future_merged_part . name , deduplicate , force_ttl , & merge_entry ) ) <nl> + future_merged_part . name , future_merged_part . type , deduplicate , force_ttl , & merge_entry ) ) <nl> return handle_noop ( " Can ' t create merge queue node in ZooKeeper " ) ; <nl> if ( merge_entry . type ! = ReplicatedMergeTreeLogEntryData : : Type : : EMPTY ) <nl> merge_entries . push_back ( std : : move ( merge_entry ) ) ; <nl> bool StorageReplicatedMergeTree : : optimize ( const ASTPtr & query , const ASTPtr & p <nl> <nl> ReplicatedMergeTreeLogEntryData merge_entry ; <nl> if ( ! createLogEntryToMergeParts ( zookeeper , future_merged_part . parts , <nl> - future_merged_part . name , deduplicate , force_ttl , & merge_entry ) ) <nl> + future_merged_part . name , future_merged_part . type , deduplicate , force_ttl , & merge_entry ) ) <nl> return handle_noop ( " Can ' t create merge queue node in ZooKeeper " ) ; <nl> if ( merge_entry . type ! = ReplicatedMergeTreeLogEntryData : : Type : : EMPTY ) <nl> merge_entries . push_back ( std : : move ( merge_entry ) ) ; <nl> void StorageReplicatedMergeTree : : getCommitPartOps ( <nl> { <nl> ops . emplace_back ( zkutil : : makeCreateRequest ( <nl> replica_path + " / parts / " + part - > name , <nl> - ReplicatedMergeTreePartHeader : : fromColumnsAndChecksums ( part - > columns , part - > checksums ) . toString ( ) , <nl> + ReplicatedMergeTreePartHeader : : fromColumnsAndChecksums ( part - > getColumns ( ) , part - > checksums ) . toString ( ) , <nl> zkutil : : CreateMode : : Persistent ) ) ; <nl> } <nl> else <nl> void StorageReplicatedMergeTree : : getCommitPartOps ( <nl> zkutil : : CreateMode : : Persistent ) ) ; <nl> ops . emplace_back ( zkutil : : makeCreateRequest ( <nl> replica_path + " / parts / " + part - > name + " / columns " , <nl> - part - > columns . toString ( ) , <nl> + part - > getColumns ( ) . toString ( ) , <nl> zkutil : : CreateMode : : Persistent ) ) ; <nl> ops . emplace_back ( zkutil : : makeCreateRequest ( <nl> replica_path + " / parts / " + part - > name + " / checksums " , <nl> mmm a / dbms / src / Storages / StorageReplicatedMergeTree . h <nl> ppp b / dbms / src / Storages / StorageReplicatedMergeTree . h <nl> class StorageReplicatedMergeTree : public ext : : shared_ptr_helper < StorageReplicat <nl> zkutil : : ZooKeeperPtr & zookeeper , <nl> const DataPartsVector & parts , <nl> const String & merged_name , <nl> + const MergeTreeDataPartType & merged_part_type , <nl> bool deduplicate , <nl> bool force_ttl , <nl> ReplicatedMergeTreeLogEntryData * out_log_entry = nullptr ) ; <nl> <nl> - bool createLogEntryToMutatePart ( const MergeTreeDataPart & part , Int64 mutation_version ) ; <nl> + bool createLogEntryToMutatePart ( const IMergeTreeDataPart & part , Int64 mutation_version ) ; <nl> <nl> / / / Exchange parts . <nl> <nl> mmm a / dbms / src / Storages / System / StorageSystemParts . cpp <nl> ppp b / dbms / src / Storages / System / StorageSystemParts . cpp <nl> StorageSystemParts : : StorageSystemParts ( const std : : string & name_ ) <nl> { <nl> { " partition " , std : : make_shared < DataTypeString > ( ) } , <nl> { " name " , std : : make_shared < DataTypeString > ( ) } , <nl> + { " part_type " , std : : make_shared < DataTypeString > ( ) } , <nl> { " active " , std : : make_shared < DataTypeUInt8 > ( ) } , <nl> { " marks " , std : : make_shared < DataTypeUInt64 > ( ) } , <nl> { " rows " , std : : make_shared < DataTypeUInt64 > ( ) } , <nl> StorageSystemParts : : StorageSystemParts ( const std : : string & name_ ) <nl> <nl> void StorageSystemParts : : processNextStorage ( MutableColumns & columns_ , const StoragesInfo & info , bool has_state_column ) <nl> { <nl> - using State = MergeTreeDataPart : : State ; <nl> + using State = IMergeTreeDataPart : : State ; <nl> MergeTreeData : : DataPartStateVector all_parts_state ; <nl> MergeTreeData : : DataPartsVector all_parts ; <nl> <nl> void StorageSystemParts : : processNextStorage ( MutableColumns & columns_ , const Sto <nl> columns_ [ i + + ] - > insert ( out . str ( ) ) ; <nl> } <nl> columns_ [ i + + ] - > insert ( part - > name ) ; <nl> + columns_ [ i + + ] - > insert ( part - > getTypeName ( ) ) ; <nl> columns_ [ i + + ] - > insert ( part_state = = State : : Committed ) ; <nl> columns_ [ i + + ] - > insert ( part - > getMarksCount ( ) ) ; <nl> columns_ [ i + + ] - > insert ( part - > rows_count ) ; <nl> void StorageSystemParts : : processNextStorage ( MutableColumns & columns_ , const Sto <nl> <nl> MinimalisticDataPartChecksums helper ; <nl> { <nl> - / / / TODO : MergeTreeDataPart structure is too error - prone . <nl> + / / / TODO : IMergeTreeDataPart structure is too error - prone . <nl> std : : shared_lock < std : : shared_mutex > lock ( part - > columns_lock ) ; <nl> helper . computeTotalChecksums ( part - > checksums ) ; <nl> } <nl> mmm a / dbms / src / Storages / System / StorageSystemPartsColumns . cpp <nl> ppp b / dbms / src / Storages / System / StorageSystemPartsColumns . cpp <nl> StorageSystemPartsColumns : : StorageSystemPartsColumns ( const std : : string & name_ ) <nl> { <nl> { " partition " , std : : make_shared < DataTypeString > ( ) } , <nl> { " name " , std : : make_shared < DataTypeString > ( ) } , <nl> + { " part_type " , std : : make_shared < DataTypeString > ( ) } , <nl> { " active " , std : : make_shared < DataTypeUInt8 > ( ) } , <nl> { " marks " , std : : make_shared < DataTypeUInt64 > ( ) } , <nl> { " rows " , std : : make_shared < DataTypeUInt64 > ( ) } , <nl> void StorageSystemPartsColumns : : processNextStorage ( MutableColumns & columns_ , co <nl> auto index_size_in_bytes = part - > getIndexSizeInBytes ( ) ; <nl> auto index_size_in_allocated_bytes = part - > getIndexSizeInAllocatedBytes ( ) ; <nl> <nl> - using State = MergeTreeDataPart : : State ; <nl> + using State = IMergeTreeDataPart : : State ; <nl> <nl> - for ( const auto & column : part - > columns ) <nl> + for ( const auto & column : part - > getColumns ( ) ) <nl> <nl> { <nl> size_t j = 0 ; <nl> void StorageSystemPartsColumns : : processNextStorage ( MutableColumns & columns_ , co <nl> columns_ [ j + + ] - > insert ( out . str ( ) ) ; <nl> } <nl> columns_ [ j + + ] - > insert ( part - > name ) ; <nl> + columns_ [ j + + ] - > insert ( part - > getTypeName ( ) ) ; <nl> columns_ [ j + + ] - > insert ( part_state = = State : : Committed ) ; <nl> columns_ [ j + + ] - > insert ( part - > getMarksCount ( ) ) ; <nl> <nl> mmm a / dbms / src / Storages / tests / gtest_aux_funcs_for_adaptive_granularity . cpp <nl> ppp b / dbms / src / Storages / tests / gtest_aux_funcs_for_adaptive_granularity . cpp <nl> <nl> # include < Columns / ColumnVector . h > <nl> <nl> / / I know that inclusion of . cpp is not good at all <nl> - # include < Storages / MergeTree / IMergedBlockOutputStream . cpp > <nl> + # include < Storages / MergeTree / IMergeTreeDataPartWriter . cpp > <nl> <nl> using namespace DB ; <nl> static Block getBlockWithSize ( size_t required_size_in_bytes , size_t size_of_row_in_bytes ) <nl> mmm a / dbms / tests / integration / helpers / cluster . py <nl> ppp b / dbms / tests / integration / helpers / cluster . py <nl> def exec_in_container ( self , cmd , detach = False , * * kwargs ) : <nl> <nl> def contains_in_log ( self , substring ) : <nl> result = self . exec_in_container ( <nl> - [ " bash " , " - c " , " grep ' { } ' / var / log / clickhouse - server / clickhouse - server . log | | true " . format ( substring ) ] ) <nl> + [ " bash " , " - c " , ' grep " { } " / var / log / clickhouse - server / clickhouse - server . log | | true ' . format ( substring ) ] ) <nl> return len ( result ) > 0 <nl> <nl> def copy_file_to_container ( self , local_path , dest_path ) : <nl> new file mode 100644 <nl> index 00000000000 . . e69de29bb2d <nl> new file mode 100644 <nl> index 00000000000 . . d4e550f8893 <nl> mmm / dev / null <nl> ppp b / dbms / tests / integration / test_polymorphic_parts / configs / compact_parts . xml <nl> <nl> + < yandex > <nl> + < merge_tree > <nl> + < min_rows_for_wide_part > 512 < / min_rows_for_wide_part > <nl> + < / merge_tree > <nl> + < / yandex > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . 3cc3d787bea <nl> mmm / dev / null <nl> ppp b / dbms / tests / integration / test_polymorphic_parts / configs / no_leader . xml <nl> <nl> + < yandex > <nl> + < merge_tree > <nl> + < replicated_can_become_leader > 0 < / replicated_can_become_leader > <nl> + < / merge_tree > <nl> + < / yandex > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . caec593aa40 <nl> mmm / dev / null <nl> ppp b / dbms / tests / integration / test_polymorphic_parts / configs / users . d / not_optimize_count . xml <nl> <nl> + < yandex > <nl> + < profiles > <nl> + < default > <nl> + < optimize_trivial_count_query > 0 < / optimize_trivial_count_query > <nl> + < / default > <nl> + < / profiles > <nl> + < / yandex > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . 49c68eea52a <nl> mmm / dev / null <nl> ppp b / dbms / tests / integration / test_polymorphic_parts / test . py <nl> <nl> + import time <nl> + import pytest <nl> + import random <nl> + import string <nl> + <nl> + from helpers . test_tools import TSV <nl> + from helpers . test_tools import assert_eq_with_retry <nl> + from helpers . cluster import ClickHouseCluster <nl> + <nl> + cluster = ClickHouseCluster ( __file__ ) <nl> + <nl> + def get_random_array ( ) : <nl> + return [ random . randint ( 0 , 1000 ) % 1000 for _ in range ( random . randint ( 0 , 1000 ) ) ] <nl> + <nl> + def get_random_string ( ) : <nl> + length = random . randint ( 0 , 1000 ) <nl> + return ' ' . join ( random . choice ( string . ascii_uppercase + string . digits ) for _ in range ( length ) ) <nl> + <nl> + def insert_random_data ( table , node , size ) : <nl> + data = [ <nl> + ' ( ' + ' , ' . join ( ( <nl> + " ' 2019 - 10 - 11 ' " , <nl> + str ( i ) , <nl> + " ' " + get_random_string ( ) + " ' " , <nl> + str ( get_random_array ( ) ) ) ) + <nl> + ' ) ' for i in range ( size ) <nl> + ] <nl> + <nl> + node . query ( " INSERT INTO { } VALUES { } " . format ( table , ' , ' . join ( data ) ) ) <nl> + <nl> + def create_tables ( name , nodes , node_settings , shard ) : <nl> + for i , ( node , settings ) in enumerate ( zip ( nodes , node_settings ) ) : <nl> + node . query ( <nl> + ' ' ' <nl> + CREATE TABLE { name } ( date Date , id UInt32 , s String , arr Array ( Int32 ) ) <nl> + ENGINE = ReplicatedMergeTree ( ' / clickhouse / tables / test / { shard } / { name } ' , ' { repl } ' ) <nl> + PARTITION BY toYYYYMM ( date ) <nl> + ORDER BY id <nl> + SETTINGS index_granularity = { index_granularity } , index_granularity_bytes = { index_granularity_bytes } , <nl> + min_rows_for_wide_part = { min_rows_for_wide_part } , min_bytes_for_wide_part = { min_bytes_for_wide_part } <nl> + ' ' ' . format ( name = name , shard = shard , repl = i , * * settings ) ) <nl> + <nl> + def create_tables_old_format ( name , nodes , shard ) : <nl> + for i , node in enumerate ( nodes ) : <nl> + node . query ( <nl> + ' ' ' <nl> + CREATE TABLE { name } ( date Date , id UInt32 , s String , arr Array ( Int32 ) ) <nl> + ENGINE = ReplicatedMergeTree ( ' / clickhouse / tables / test / { shard } / { name } ' , ' { repl } ' , date , id , 64 ) <nl> + ' ' ' . format ( name = name , shard = shard , repl = i ) ) <nl> + <nl> + node1 = cluster . add_instance ( ' node1 ' , config_dir = " configs " , with_zookeeper = True ) <nl> + node2 = cluster . add_instance ( ' node2 ' , config_dir = " configs " , with_zookeeper = True ) <nl> + <nl> + settings_default = { ' index_granularity ' : 64 , ' index_granularity_bytes ' : 10485760 , ' min_rows_for_wide_part ' : 512 , ' min_bytes_for_wide_part ' : 0 } <nl> + settings_not_adaptive = { ' index_granularity ' : 64 , ' index_granularity_bytes ' : 0 , ' min_rows_for_wide_part ' : 512 , ' min_bytes_for_wide_part ' : 0 } <nl> + <nl> + node3 = cluster . add_instance ( ' node3 ' , config_dir = " configs " , with_zookeeper = True ) <nl> + node4 = cluster . add_instance ( ' node4 ' , config_dir = " configs " , main_configs = [ ' configs / no_leader . xml ' ] , with_zookeeper = True ) <nl> + <nl> + settings_compact = { ' index_granularity ' : 64 , ' index_granularity_bytes ' : 10485760 , ' min_rows_for_wide_part ' : 512 , ' min_bytes_for_wide_part ' : 0 } <nl> + settings_wide = { ' index_granularity ' : 64 , ' index_granularity_bytes ' : 10485760 , ' min_rows_for_wide_part ' : 0 , ' min_bytes_for_wide_part ' : 0 } <nl> + <nl> + node5 = cluster . add_instance ( ' node5 ' , config_dir = ' configs ' , main_configs = [ ' configs / compact_parts . xml ' ] , with_zookeeper = True ) <nl> + node6 = cluster . add_instance ( ' node6 ' , config_dir = ' configs ' , main_configs = [ ' configs / compact_parts . xml ' ] , with_zookeeper = True ) <nl> + <nl> + @ pytest . fixture ( scope = " module " ) <nl> + def start_cluster ( ) : <nl> + try : <nl> + cluster . start ( ) <nl> + <nl> + create_tables ( ' polymorphic_table ' , [ node1 , node2 ] , [ settings_default , settings_default ] , " shard1 " ) <nl> + create_tables ( ' non_adaptive_table ' , [ node1 , node2 ] , [ settings_not_adaptive , settings_default ] , " shard1 " ) <nl> + create_tables ( ' polymorphic_table_compact ' , [ node3 , node4 ] , [ settings_compact , settings_wide ] , " shard2 " ) <nl> + create_tables ( ' polymorphic_table_wide ' , [ node3 , node4 ] , [ settings_wide , settings_compact ] , " shard2 " ) <nl> + create_tables_old_format ( ' polymorphic_table ' , [ node5 , node6 ] , " shard3 " ) <nl> + <nl> + yield cluster <nl> + <nl> + finally : <nl> + cluster . shutdown ( ) <nl> + <nl> + @ pytest . mark . parametrize ( <nl> + ( ' first_node ' , ' second_node ' ) , <nl> + [ <nl> + ( node1 , node2 ) , <nl> + ( node5 , node6 ) <nl> + ] <nl> + ) <nl> + def test_polymorphic_parts_basics ( start_cluster , first_node , second_node ) : <nl> + first_node . query ( " SYSTEM STOP MERGES " ) <nl> + second_node . query ( " SYSTEM STOP MERGES " ) <nl> + <nl> + for size in [ 300 , 300 , 600 ] : <nl> + insert_random_data ( ' polymorphic_table ' , first_node , size ) <nl> + second_node . query ( " SYSTEM SYNC REPLICA polymorphic_table " , timeout = 20 ) <nl> + <nl> + assert first_node . query ( " SELECT count ( ) FROM polymorphic_table " ) = = " 1200 \ n " <nl> + assert second_node . query ( " SELECT count ( ) FROM polymorphic_table " ) = = " 1200 \ n " <nl> + <nl> + expected = " Compact \ t2 \ nWide \ t1 \ n " <nl> + <nl> + assert TSV ( first_node . query ( " SELECT part_type , count ( ) FROM system . parts " \ <nl> + " WHERE table = ' polymorphic_table ' AND active GROUP BY part_type ORDER BY part_type " ) ) = = TSV ( expected ) <nl> + assert TSV ( second_node . query ( " SELECT part_type , count ( ) FROM system . parts " \ <nl> + " WHERE table = ' polymorphic_table ' AND active GROUP BY part_type ORDER BY part_type " ) ) = = TSV ( expected ) <nl> + <nl> + first_node . query ( " SYSTEM START MERGES " ) <nl> + second_node . query ( " SYSTEM START MERGES " ) <nl> + <nl> + for _ in range ( 40 ) : <nl> + insert_random_data ( ' polymorphic_table ' , first_node , 10 ) <nl> + insert_random_data ( ' polymorphic_table ' , second_node , 10 ) <nl> + <nl> + first_node . query ( " SYSTEM SYNC REPLICA polymorphic_table " , timeout = 20 ) <nl> + second_node . query ( " SYSTEM SYNC REPLICA polymorphic_table " , timeout = 20 ) <nl> + <nl> + assert first_node . query ( " SELECT count ( ) FROM polymorphic_table " ) = = " 2000 \ n " <nl> + assert second_node . query ( " SELECT count ( ) FROM polymorphic_table " ) = = " 2000 \ n " <nl> + <nl> + first_node . query ( " OPTIMIZE TABLE polymorphic_table FINAL " ) <nl> + second_node . query ( " SYSTEM SYNC REPLICA polymorphic_table " , timeout = 20 ) <nl> + <nl> + assert first_node . query ( " SELECT count ( ) FROM polymorphic_table " ) = = " 2000 \ n " <nl> + assert second_node . query ( " SELECT count ( ) FROM polymorphic_table " ) = = " 2000 \ n " <nl> + <nl> + assert first_node . query ( " SELECT DISTINCT part_type FROM system . parts WHERE table = ' polymorphic_table ' AND active " ) = = " Wide \ n " <nl> + assert second_node . query ( " SELECT DISTINCT part_type FROM system . parts WHERE table = ' polymorphic_table ' AND active " ) = = " Wide \ n " <nl> + <nl> + # Check alters and mutations also work <nl> + first_node . query ( " ALTER TABLE polymorphic_table ADD COLUMN ss String " ) <nl> + first_node . query ( " ALTER TABLE polymorphic_table UPDATE ss = toString ( id ) WHERE 1 " ) <nl> + <nl> + second_node . query ( " SYSTEM SYNC REPLICA polymorphic_table " , timeout = 20 ) <nl> + <nl> + first_node . query ( " SELECT count ( ss ) FROM polymorphic_table " ) = = " 2000 \ n " <nl> + first_node . query ( " SELECT uniqExact ( ss ) FROM polymorphic_table " ) = = " 600 \ n " <nl> + <nl> + second_node . query ( " SELECT count ( ss ) FROM polymorphic_table " ) = = " 2000 \ n " <nl> + second_node . query ( " SELECT uniqExact ( ss ) FROM polymorphic_table " ) = = " 600 \ n " <nl> + <nl> + <nl> + # Check that follower replicas create parts of the same type , which leader has chosen at merge . <nl> + @ pytest . mark . parametrize ( <nl> + ( ' table ' , ' part_type ' ) , <nl> + [ <nl> + ( ' polymorphic_table_compact ' , ' Compact ' ) , <nl> + ( ' polymorphic_table_wide ' , ' Wide ' ) <nl> + ] <nl> + ) <nl> + def test_different_part_types_on_replicas ( start_cluster , table , part_type ) : <nl> + leader = node3 <nl> + follower = node4 <nl> + <nl> + assert leader . query ( " SELECT is_leader FROM system . replicas WHERE table = ' { } ' " . format ( table ) ) = = " 1 \ n " <nl> + assert node4 . query ( " SELECT is_leader FROM system . replicas WHERE table = ' { } ' " . format ( table ) ) = = " 0 \ n " <nl> + <nl> + for _ in range ( 3 ) : <nl> + insert_random_data ( table , leader , 100 ) <nl> + <nl> + leader . query ( " OPTIMIZE TABLE { } FINAL " . format ( table ) ) <nl> + follower . query ( " SYSTEM SYNC REPLICA { } " . format ( table ) , timeout = 20 ) <nl> + <nl> + expected = " { } \ t1 \ n " . format ( part_type ) <nl> + <nl> + assert TSV ( leader . query ( " SELECT part_type , count ( ) FROM system . parts " \ <nl> + " WHERE table = ' { } ' AND active GROUP BY part_type ORDER BY part_type " . format ( table ) ) ) = = TSV ( expected ) <nl> + assert TSV ( follower . query ( " SELECT part_type , count ( ) FROM system . parts " \ <nl> + " WHERE table = ' { } ' AND active GROUP BY part_type ORDER BY part_type " . format ( table ) ) ) = = TSV ( expected ) <nl> + <nl> + <nl> + node7 = cluster . add_instance ( ' node7 ' , config_dir = " configs " , with_zookeeper = True , image = ' yandex / clickhouse - server : 19 . 17 . 8 . 54 ' , stay_alive = True , with_installed_binary = True ) <nl> + node8 = cluster . add_instance ( ' node8 ' , config_dir = " configs " , with_zookeeper = True ) <nl> + <nl> + settings7 = { ' index_granularity ' : 64 , ' index_granularity_bytes ' : 10485760 } <nl> + settings8 = { ' index_granularity ' : 64 , ' index_granularity_bytes ' : 10485760 , ' min_rows_for_wide_part ' : 512 , ' min_bytes_for_wide_part ' : 0 } <nl> + <nl> + @ pytest . fixture ( scope = " module " ) <nl> + def start_cluster_diff_versions ( ) : <nl> + try : <nl> + for name in [ ' polymorphic_table ' , ' polymorphic_table_2 ' ] : <nl> + cluster . start ( ) <nl> + node7 . query ( <nl> + ' ' ' <nl> + CREATE TABLE { name } ( date Date , id UInt32 , s String , arr Array ( Int32 ) ) <nl> + ENGINE = ReplicatedMergeTree ( ' / clickhouse / tables / test / shard5 / { name } ' , ' 1 ' ) <nl> + PARTITION BY toYYYYMM ( date ) <nl> + ORDER BY id <nl> + SETTINGS index_granularity = { index_granularity } , index_granularity_bytes = { index_granularity_bytes } <nl> + ' ' ' . format ( name = name , * * settings7 ) <nl> + ) <nl> + <nl> + node8 . query ( <nl> + ' ' ' <nl> + CREATE TABLE { name } ( date Date , id UInt32 , s String , arr Array ( Int32 ) ) <nl> + ENGINE = ReplicatedMergeTree ( ' / clickhouse / tables / test / shard5 / { name } ' , ' 2 ' ) <nl> + PARTITION BY toYYYYMM ( date ) <nl> + ORDER BY id <nl> + SETTINGS index_granularity = { index_granularity } , index_granularity_bytes = { index_granularity_bytes } , <nl> + min_rows_for_wide_part = { min_rows_for_wide_part } , min_bytes_for_wide_part = { min_bytes_for_wide_part } <nl> + ' ' ' . format ( name = name , * * settings8 ) <nl> + ) <nl> + <nl> + yield cluster <nl> + <nl> + finally : <nl> + cluster . shutdown ( ) <nl> + <nl> + <nl> + def test_polymorphic_parts_diff_versions ( start_cluster_diff_versions ) : <nl> + # Check that replication with Wide parts works between different versions . <nl> + <nl> + node_old = node7 <nl> + node_new = node8 <nl> + <nl> + insert_random_data ( ' polymorphic_table ' , node7 , 100 ) <nl> + node8 . query ( " SYSTEM SYNC REPLICA polymorphic_table " , timeout = 20 ) <nl> + <nl> + assert node8 . query ( " SELECT count ( ) FROM polymorphic_table " ) = = " 100 \ n " <nl> + assert node8 . query ( " SELECT DISTINCT part_type FROM system . parts WHERE table = ' polymorphic_table ' and active " ) = = " Wide \ n " <nl> + <nl> + <nl> + def test_polymorphic_parts_diff_versions_2 ( start_cluster_diff_versions ) : <nl> + # Replication doesn ' t work on old version if part is created in compact format , because <nl> + # this version doesn ' t know anything about it . It ' s considered to be ok . <nl> + <nl> + node_old = node7 <nl> + node_new = node8 <nl> + <nl> + insert_random_data ( ' polymorphic_table_2 ' , node_new , 100 ) <nl> + <nl> + assert node_new . query ( " SELECT count ( ) FROM polymorphic_table_2 " ) = = " 100 \ n " <nl> + assert node_old . query ( " SELECT count ( ) FROM polymorphic_table_2 " ) = = " 0 \ n " <nl> + with pytest . raises ( Exception ) : <nl> + node_old . query ( " SYSTEM SYNC REPLICA polymorphic_table_2 " , timeout = 3 ) <nl> + <nl> + node_old . restart_with_latest_version ( ) <nl> + <nl> + node_old . query ( " SYSTEM SYNC REPLICA polymorphic_table_2 " , timeout = 20 ) <nl> + <nl> + # Works after update <nl> + assert node_old . query ( " SELECT count ( ) FROM polymorphic_table_2 " ) = = " 100 \ n " <nl> + assert node_old . query ( " SELECT DISTINCT part_type FROM system . parts WHERE table = ' polymorphic_table_2 ' and active " ) = = " Compact \ n " <nl> + <nl> + <nl> + def test_polymorphic_parts_non_adaptive ( start_cluster ) : <nl> + node1 . query ( " SYSTEM STOP MERGES " ) <nl> + node2 . query ( " SYSTEM STOP MERGES " ) <nl> + <nl> + insert_random_data ( ' non_adaptive_table ' , node1 , 100 ) <nl> + node2 . query ( " SYSTEM SYNC REPLICA non_adaptive_table " , timeout = 20 ) <nl> + <nl> + insert_random_data ( ' non_adaptive_table ' , node2 , 100 ) <nl> + node1 . query ( " SYSTEM SYNC REPLICA non_adaptive_table " , timeout = 20 ) <nl> + <nl> + assert TSV ( node1 . query ( " SELECT part_type , count ( ) FROM system . parts " \ <nl> + " WHERE table = ' non_adaptive_table ' AND active GROUP BY part_type ORDER BY part_type " ) ) = = TSV ( " Wide \ t2 \ n " ) <nl> + assert TSV ( node2 . query ( " SELECT part_type , count ( ) FROM system . parts " \ <nl> + " WHERE table = ' non_adaptive_table ' AND active GROUP BY part_type ORDER BY part_type " ) ) = = TSV ( " Wide \ t2 \ n " ) <nl> + <nl> + assert node1 . contains_in_log ( " < Warning > default . non_adaptive_table : Table can ' t create parts with adaptive granularity " ) <nl> new file mode 100644 <nl> index 00000000000 . . c839ca13281 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01055_compact_parts . reference <nl> <nl> + 0 0 1167657 [ 0 , 0 , 0 ] [ ' a ' , ' b ' , ' c ' ] baz <nl> + 1 1 2072334 [ 0 , 0 , 0 ] [ ' a ' , ' b ' , ' c ' ] bar <nl> + 2 4 843568 [ 0 , 0 , 0 ] [ ' a ' , ' b ' , ' c ' ] baz <nl> + 3 9 1748245 [ 0 , 0 , 0 ] [ ' a ' , ' b ' , ' c ' ] bar <nl> + 4 16 519479 [ 0 , 0 , 0 ] [ ' a ' , ' b ' , ' c ' ] baz <nl> + 5 25 1424156 [ 0 , 0 , 0 ] [ ' a ' , ' b ' , ' c ' ] bar <nl> + 6 36 195390 [ 0 , 0 , 0 ] [ ' a ' , ' b ' , ' c ' ] baz <nl> + 7 49 1100067 [ 0 , 0 , 0 ] [ ' a ' , ' b ' , ' c ' ] bar <nl> + 8 64 2004744 [ 0 , 0 , 0 ] [ ' a ' , ' b ' , ' c ' ] baz <nl> + 9 81 775978 [ 0 , 0 , 0 ] [ ' a ' , ' b ' , ' c ' ] bar <nl> + = = = = = = = = = = = = = = = = = = = = = <nl> + Compact <nl> + Compact 7 <nl> + Wide 3 <nl> + 0 0 1167657 [ 0 , 0 , 0 ] [ ' a ' , ' b ' , ' c ' ] baz <nl> + 0 0 645645 [ 1 , 2 ] [ ' ' , ' ' ] 0 <nl> + 0 0 804292 [ 1 , 2 ] [ ' ' , ' ' ] 3 <nl> + 1 1 1409675 [ 1 , 2 ] [ ' ' , ' ' ] 1 <nl> + 1 1 1568322 [ 1 , 2 ] [ ' ' , ' ' ] 4 <nl> + 1 1 2072334 [ 0 , 0 , 0 ] [ ' a ' , ' b ' , ' c ' ] bar <nl> + 2 4 40262 [ 1 , 2 ] [ ' ' , ' ' ] 2 <nl> + 2 4 843568 [ 0 , 0 , 0 ] [ ' a ' , ' b ' , ' c ' ] baz <nl> + 3 9 1748245 [ 0 , 0 , 0 ] [ ' a ' , ' b ' , ' c ' ] bar <nl> + 4 16 519479 [ 0 , 0 , 0 ] [ ' a ' , ' b ' , ' c ' ] baz <nl> + = = = = = = = = = = = = = = = = = = = = = <nl> + 0 0 1167657 [ 0 , 0 , 0 ] [ ' qwqw ' ] baz <nl> + 0 0 645645 [ 1 , 2 ] [ ' qwqw ' ] 0 <nl> + 0 0 804292 [ 1 , 2 ] [ ' qwqw ' ] 3 <nl> + 1 1 1409675 [ 1 , 2 ] [ ' qwqw ' ] 1 <nl> + 1 1 1568322 [ 1 , 2 ] [ ' qwqw ' ] 4 <nl> + 1 1 2072334 [ 0 , 0 , 0 ] [ ' qwqw ' ] bar <nl> + 2 4 40262 [ 1 , 2 ] [ ' qwqw ' ] 2 <nl> + 2 4 843568 [ 0 , 0 , 0 ] [ ' qwqw ' ] baz <nl> + 3 9 1748245 [ 0 , 0 , 0 ] [ ' qwqw ' ] bar <nl> + 4 16 519479 [ 0 , 0 , 0 ] [ ' qwqw ' ] baz <nl> + = = = = = = = = = = = = = = = = = = = = = <nl> + 2 42 40262 [ 1 , 2 ] [ ' qwqw ' ] 2 <nl> + 2 42 843568 [ 0 , 0 , 0 ] [ ' qwqw ' ] baz <nl> + 3 42 1748245 [ 0 , 0 , 0 ] [ ' qwqw ' ] bar <nl> + 4 42 519479 [ 0 , 0 , 0 ] [ ' qwqw ' ] baz <nl> + 5 42 1424156 [ 0 , 0 , 0 ] [ ' qwqw ' ] bar <nl> + 6 42 195390 [ 0 , 0 , 0 ] [ ' qwqw ' ] baz <nl> + 7 42 1100067 [ 0 , 0 , 0 ] [ ' qwqw ' ] bar <nl> + 8 42 2004744 [ 0 , 0 , 0 ] [ ' qwqw ' ] baz <nl> + 9 42 775978 [ 0 , 0 , 0 ] [ ' qwqw ' ] bar <nl> + 10 42 1680655 [ 0 , 0 , 0 ] [ ' qwqw ' ] baz <nl> + = = = = = = = = = = = = = = = = = = = = = <nl> new file mode 100755 <nl> index 00000000000 . . 05b0657ba7e <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01055_compact_parts . sql <nl> <nl> + - - Testing basic functionality with compact parts <nl> + set mutations_sync = 2 ; <nl> + drop table if exists mt_compact ; <nl> + <nl> + create table mt_compact ( a UInt64 , b UInt64 DEFAULT a * a , s String , n Nested ( x UInt32 , y String ) , lc LowCardinality ( String ) ) <nl> + engine = MergeTree <nl> + order by a partition by a % 10 <nl> + settings index_granularity = 8 , <nl> + min_rows_for_wide_part = 10 ; <nl> + <nl> + insert into mt_compact ( a , s , n . y , lc ) select number , toString ( ( number * 2132214234 + 5434543 ) % 2133443 ) , [ ' a ' , ' b ' , ' c ' ] , number % 2 ? ' bar ' : ' baz ' from numbers ( 90 ) ; <nl> + <nl> + select * from mt_compact order by a limit 10 ; <nl> + select ' = = = = = = = = = = = = = = = = = = = = = ' ; <nl> + <nl> + select distinct part_type from system . parts where database = currentDatabase ( ) and table = ' mt_compact ' and active ; <nl> + <nl> + insert into mt_compact ( a , s , n . x , lc ) select number % 3 , toString ( ( number * 75434535 + 645645 ) % 2133443 ) , [ 1 , 2 ] , toString ( number ) from numbers ( 5 ) ; <nl> + <nl> + optimize table mt_compact final ; <nl> + <nl> + select part_type , count ( ) from system . parts where database = currentDatabase ( ) and table = ' mt_compact ' and active group by part_type order by part_type ; <nl> + select * from mt_compact order by a , s limit 10 ; <nl> + select ' = = = = = = = = = = = = = = = = = = = = = ' ; <nl> + <nl> + alter table mt_compact drop column n . y ; <nl> + alter table mt_compact add column n . y Array ( String ) DEFAULT [ ' qwqw ' ] after n . x ; <nl> + select * from mt_compact order by a , s limit 10 ; <nl> + select ' = = = = = = = = = = = = = = = = = = = = = ' ; <nl> + <nl> + alter table mt_compact update b = 42 where 1 ; <nl> + <nl> + select * from mt_compact where a > 1 order by a , s limit 10 ; <nl> + select ' = = = = = = = = = = = = = = = = = = = = = ' ; <nl> + <nl> + drop table if exists mt_compact ; <nl> new file mode 100644 <nl> index 00000000000 . . 932c45ad086 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01055_compact_parts_1 . reference <nl> <nl> + CREATE TABLE default . mt_compact ( ` a ` Int , ` s ` String ) ENGINE = MergeTree PARTITION BY a ORDER BY a SETTINGS index_granularity_bytes = 0 , index_granularity = 8192 <nl> + CREATE TABLE default . mt_compact ( ` a ` Int , ` s ` String ) ENGINE = MergeTree PARTITION BY a ORDER BY a SETTINGS index_granularity_bytes = 0 , min_rows_for_wide_part = 0 , index_granularity = 8192 , parts_to_delay_insert = 300 <nl> new file mode 100644 <nl> index 00000000000 . . 91941b90860 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01055_compact_parts_1 . sql <nl> <nl> + drop table if exists mt_compact ; <nl> + <nl> + create table mt_compact ( a Int , s String ) engine = MergeTree order by a partition by a <nl> + settings index_granularity_bytes = 0 ; <nl> + alter table mt_compact modify setting min_rows_for_wide_part = 1000 ; - - { serverError 48 } <nl> + show create table mt_compact ; <nl> + <nl> + create table mt_compact_2 ( a Int , s String ) engine = MergeTree order by a partition by a <nl> + settings min_rows_for_wide_part = 1000 ; <nl> + insert into mt_compact_2 values ( 1 , ' a ' ) ; <nl> + alter table mt_compact attach partition 1 from mt_compact_2 ; - - { serverError 36 } <nl> + <nl> + drop table mt_compact ; <nl> + <nl> + set send_logs_level = ' error ' ; <nl> + create table mt_compact ( a Int , s String ) engine = MergeTree order by a partition by a <nl> + settings index_granularity_bytes = 0 , min_rows_for_wide_part = 1000 ; <nl> + <nl> + - - Check that alter of other settings works <nl> + alter table mt_compact modify setting parts_to_delay_insert = 300 ; <nl> + alter table mt_compact modify setting min_rows_for_wide_part = 0 ; <nl> + <nl> + show create table mt_compact ; <nl> + <nl> + drop table mt_compact <nl> new file mode 100644 <nl> index 00000000000 . . 543593fb9b0 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01055_compact_parts_granularity . reference <nl> <nl> + 100 <nl> + 1 9 <nl> new file mode 100644 <nl> index 00000000000 . . 8828ffc2da8 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01055_compact_parts_granularity . sql <nl> <nl> + drop table if exists mt_compact ; <nl> + <nl> + - - Checks that granularity correctly computed from small parts . <nl> + <nl> + create table mt_compact ( a Int , s String ) engine = MergeTree order by a <nl> + settings min_rows_for_wide_part = 1000 , <nl> + index_granularity = 14 ; <nl> + <nl> + system stop merges mt_compact ; <nl> + set max_block_size = 1 ; <nl> + set min_insert_block_size_rows = 1 ; <nl> + insert into mt_compact select number , ' aaa ' from numbers ( 100 ) ; <nl> + <nl> + select count ( ) from system . parts where table = ' mt_compact ' and database = currentDatabase ( ) and active ; <nl> + <nl> + system start merges mt_compact ; <nl> + optimize table mt_compact final ; <nl> + <nl> + select count ( ) , sum ( marks ) from system . parts where table = ' mt_compact ' and database = currentDatabase ( ) and active ; <nl> + <nl> + drop table mt_compact ; <nl> new file mode 100644 <nl> index 00000000000 . . 0f5a8eb904e <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01055_minmax_index_compact_parts . reference <nl> <nl> + 0 5 4 . 7 6 . 50 cba b 2014 - 01 - 04 <nl> + 1 5 4 . 7 6 . 50 cba b 2014 - 03 - 11 <nl> + 11 5 4 . 7 6 . 50 cba b 2014 - 06 - 11 <nl> + 12 5 4 . 7 6 . 50 cba b 2015 - 01 - 01 <nl> + " rows_read " : 4 , <nl> + " rows_read " : 2 , <nl> new file mode 100755 <nl> index 00000000000 . . 3e7659120c6 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01055_minmax_index_compact_parts . sh <nl> <nl> + # ! / usr / bin / env bash <nl> + <nl> + # It ' s test 00837_minmax_index , buts with compact part . <nl> + # Checks , that basic functionality with compact parts and skip indices is not broken . <nl> + <nl> + CURDIR = $ ( cd " $ ( dirname " $ { BASH_SOURCE [ 0 ] } " ) " & & pwd ) <nl> + . $ CURDIR / . . / shell_config . sh <nl> + <nl> + $ CLICKHOUSE_CLIENT - - query = " DROP TABLE IF EXISTS minmax_idx ; " <nl> + <nl> + <nl> + $ CLICKHOUSE_CLIENT - n - - query = " <nl> + CREATE TABLE minmax_idx <nl> + ( <nl> + u64 UInt64 , <nl> + i32 Int32 , <nl> + f64 Float64 , <nl> + d Decimal ( 10 , 2 ) , <nl> + s String , <nl> + e Enum8 ( ' a ' = 1 , ' b ' = 2 , ' c ' = 3 ) , <nl> + dt Date , <nl> + INDEX idx_all ( i32 , i32 + f64 , d , s , e , dt ) TYPE minmax GRANULARITY 1 , <nl> + INDEX idx_all2 ( i32 , i32 + f64 , d , s , e , dt ) TYPE minmax GRANULARITY 2 , <nl> + INDEX idx_2 ( u64 + toYear ( dt ) , substring ( s , 2 , 4 ) ) TYPE minmax GRANULARITY 3 <nl> + ) ENGINE = MergeTree ( ) <nl> + ORDER BY u64 <nl> + SETTINGS index_granularity = 2 , min_rows_for_wide_part = 1000000 " <nl> + <nl> + <nl> + $ CLICKHOUSE_CLIENT - - query = " INSERT INTO minmax_idx VALUES <nl> + ( 0 , 5 , 4 . 7 , 6 . 5 , ' cba ' , ' b ' , ' 2014 - 01 - 04 ' ) , <nl> + ( 1 , 5 , 4 . 7 , 6 . 5 , ' cba ' , ' b ' , ' 2014 - 03 - 11 ' ) , <nl> + ( 2 , 2 , 4 . 5 , 2 . 5 , ' abc ' , ' a ' , ' 2014 - 01 - 01 ' ) , <nl> + ( 3 , 5 , 6 . 9 , 1 . 57 , ' bac ' , ' c ' , ' 2017 - 01 - 01 ' ) , <nl> + ( 4 , 2 , 4 . 5 , 2 . 5 , ' abc ' , ' a ' , ' 2016 - 01 - 01 ' ) , <nl> + ( 5 , 5 , 6 . 9 , 1 . 57 , ' bac ' , ' c ' , ' 2014 - 11 - 11 ' ) , <nl> + ( 6 , 2 , 4 . 5 , 2 . 5 , ' abc ' , ' a ' , ' 2014 - 02 - 11 ' ) , <nl> + ( 7 , 5 , 6 . 9 , 1 . 57 , ' bac ' , ' c ' , ' 2014 - 04 - 11 ' ) , <nl> + ( 8 , 2 , 4 . 5 , 2 . 5 , ' abc ' , ' a ' , ' 2014 - 05 - 11 ' ) , <nl> + ( 9 , 5 , 6 . 9 , 1 . 57 , ' bac ' , ' c ' , ' 2014 - 07 - 11 ' ) , <nl> + ( 11 , 5 , 4 . 7 , 6 . 5 , ' cba ' , ' b ' , ' 2014 - 06 - 11 ' ) , <nl> + ( 12 , 5 , 4 . 7 , 6 . 5 , ' cba ' , ' b ' , ' 2015 - 01 - 01 ' ) " <nl> + <nl> + # simple select <nl> + $ CLICKHOUSE_CLIENT - - query = " SELECT * FROM minmax_idx WHERE i32 = 5 AND i32 + f64 < 12 AND 3 < d AND d < 7 AND ( s = ' bac ' OR s = ' cba ' ) ORDER BY dt " <nl> + $ CLICKHOUSE_CLIENT - - query = " SELECT * FROM minmax_idx WHERE i32 = 5 AND i32 + f64 < 12 AND 3 < d AND d < 7 AND ( s = ' bac ' OR s = ' cba ' ) ORDER BY dt FORMAT JSON " | grep " rows_read " <nl> + <nl> + # select with hole made by primary key <nl> + $ CLICKHOUSE_CLIENT - - query = " SELECT * FROM minmax_idx WHERE ( u64 < 2 OR u64 > 10 ) AND e ! = ' b ' ORDER BY dt " <nl> + $ CLICKHOUSE_CLIENT - - query = " SELECT * FROM minmax_idx WHERE ( u64 < 2 OR u64 > 10 ) AND e ! = ' b ' ORDER BY dt FORMAT JSON " | grep " rows_read " <nl> + <nl> + $ CLICKHOUSE_CLIENT - - query = " DROP TABLE minmax_idx " <nl> \ No newline at end of file <nl> mmm a / utils / convert - month - partitioned - parts / main . cpp <nl> ppp b / utils / convert - month - partitioned - parts / main . cpp <nl> <nl> # include < Storages / MergeTree / MergeTreePartInfo . h > <nl> - # include < Storages / MergeTree / MergeTreeDataPart . h > <nl> + # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> # include < DataTypes / DataTypeDate . h > <nl> # include < IO / ReadBufferFromFile . h > <nl> # include < IO / WriteBufferFromFile . h > <nl> void run ( String part_path , String date_column , String dest_path ) <nl> checksums . files [ " count . txt " ] . file_size = count_out_hashing . count ( ) ; <nl> checksums . files [ " count . txt " ] . file_hash = count_out_hashing . getHash ( ) ; <nl> <nl> - MergeTreeDataPart : : MinMaxIndex minmax_idx ( min_date , max_date ) ; <nl> + IMergeTreeDataPart : : MinMaxIndex minmax_idx ( min_date , max_date ) ; <nl> Names minmax_idx_columns = { date_column } ; <nl> DataTypes minmax_idx_column_types = { std : : make_shared < DataTypeDate > ( ) } ; <nl> minmax_idx . store ( minmax_idx_columns , minmax_idx_column_types , new_tmp_part_path_str , checksums ) ; <nl>
Merge pull request from CurtizJ / polymorphic - parts
ClickHouse/ClickHouse
d0fea62079a0fd4f6d0e6a83c1bb1d369807686a
2020-02-23T01:01:07Z
mmm a / dbms / src / Storages / MergeTree / MergeTreeData . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeData . cpp <nl> MergeTreeData : : DataPartsVector MergeTreeData : : getDataPartsVector ( const DataPartS <nl> DataPartsVector res ; <nl> { <nl> std : : lock_guard < std : : mutex > lock ( data_parts_mutex ) ; <nl> - std : : copy_if ( data_parts . begin ( ) , data_parts . end ( ) , std : : back_inserter ( res ) , <nl> - DataPart : : getStatesFilter ( { DataPartState : : Committed } ) ) ; <nl> + std : : copy_if ( data_parts . begin ( ) , data_parts . end ( ) , std : : back_inserter ( res ) , DataPart : : getStatesFilter ( affordable_states ) ) ; <nl> } <nl> return res ; <nl> } <nl> MergeTreeData : : DataParts MergeTreeData : : getDataParts ( const DataPartStates & affo <nl> DataParts res ; <nl> { <nl> std : : lock_guard < std : : mutex > lock ( data_parts_mutex ) ; <nl> - std : : copy_if ( data_parts . begin ( ) , data_parts . end ( ) , std : : inserter ( res , res . end ( ) ) , <nl> - DataPart : : getStatesFilter ( { DataPartState : : Committed } ) ) ; <nl> + std : : copy_if ( data_parts . begin ( ) , data_parts . end ( ) , std : : inserter ( res , res . end ( ) ) , DataPart : : getStatesFilter ( affordable_states ) ) ; <nl> } <nl> return res ; <nl> } <nl>
Fixed awful copypaste . [ # CLICKHOUSE - 3178 ]
ClickHouse/ClickHouse
d6ea9c3dcf03282acc8826f7d9defbb58f4fbc86
2017-10-26T14:16:06Z
new file mode 100644 <nl> index 000000000000 . . 953750a247f9 <nl> mmm / dev / null <nl> ppp b / spec / fixtures / module / process - stdout . js <nl> @ @ - 0 , 0 + 1 @ @ <nl> + process . stdout . write ( ' pipes stdio ' ) <nl> mmm a / spec / node - spec . js <nl> ppp b / spec / node - spec . js <nl> describe ( ' node feature ' , function ( ) { <nl> } ) <nl> child . send ( ' message ' ) <nl> } ) <nl> + <nl> + it ( ' pipes stdio ' , function ( done ) { <nl> + let child = child_process . fork ( path . join ( fixtures , ' module ' , ' process - stdout . js ' ) , { silent : true } ) <nl> + let data = ' ' <nl> + child . stdout . on ( ' data ' , ( chunk ) = > { <nl> + data + = String ( chunk ) <nl> + } ) <nl> + child . on ( ' exit ' , ( code ) = > { <nl> + assert . equal ( code , 0 ) <nl> + assert . equal ( data , ' pipes stdio ' ) <nl> + done ( ) <nl> + } ) <nl> + } ) <nl> } ) <nl> } ) <nl> <nl>
spec : child_process . fork shouble be able to pipe stdio
electron/electron
4d8994df921087653c2e134e39a1da16a957bf39
2016-05-31T02:05:58Z
mmm a / DIRECTORY . md <nl> ppp b / DIRECTORY . md <nl> <nl> * [ Segtree ] ( https : / / github . com / TheAlgorithms / C - Plus - Plus / blob / master / range_queries / segTree . cpp ) <nl> <nl> # # Search <nl> - * [ Binary Search ] ( https : / / github . com / TheAlgorithms / C - Plus - Plus / blob / master / search / Binary % 20Search . cpp ) <nl> + * [ Binary Search ] ( https : / / github . com / TheAlgorithms / C - Plus - Plus / blob / master / search / binary_search . cpp ) <nl> * [ Exponential Search ] ( https : / / github . com / TheAlgorithms / C - Plus - Plus / blob / master / search / exponential_search . cpp ) <nl> * [ Hash Search ] ( https : / / github . com / TheAlgorithms / C - Plus - Plus / blob / master / search / hash_search . cpp ) <nl> * [ Interpolation Search ] ( https : / / github . com / TheAlgorithms / C - Plus - Plus / blob / master / search / Interpolation % 20Search . cpp ) <nl>
updating DIRECTORY . md
TheAlgorithms/C-Plus-Plus
b8a6a6470f90561111b9a5e68097569e352e432e
2020-04-26T09:28:13Z
mmm a / cmake / modules / AddSwift . cmake <nl> ppp b / cmake / modules / AddSwift . cmake <nl> endfunction ( ) <nl> # Sources to add into this library . <nl> function ( add_swift_host_library name ) <nl> set ( options <nl> - FORCE_BUILD_OPTIMIZED <nl> SHARED <nl> STATIC ) <nl> set ( single_parameter_options ) <nl> set ( multiple_parameter_options <nl> - C_COMPILE_FLAGS <nl> - DEPENDS <nl> - FILE_DEPENDS <nl> - LINK_LIBRARIES <nl> LLVM_LINK_COMPONENTS ) <nl> <nl> cmake_parse_arguments ( ASHL <nl> function ( add_swift_host_library name ) <nl> $ { ARGN } ) <nl> set ( ASHL_SOURCES $ { ASHL_UNPARSED_ARGUMENTS } ) <nl> <nl> - if ( ASHL_FORCE_BUILD_OPTIMIZED ) <nl> - message ( SEND_ERROR " library $ { name } is using FORCE_BUILD_OPTIMIZED flag which is deprecated . Please use target_compile_options instead " ) <nl> - endif ( ) <nl> - if ( ASHL_C_COMPILE_FLAGS ) <nl> - message ( SEND_ERROR " library $ { name } is using C_COMPILE_FLAGS parameter which is deprecated . Please use target_compile_definitions , target_compile_options , or target_include_directories instead " ) <nl> - endif ( ) <nl> - if ( ASHL_DEPENDS ) <nl> - message ( SEND_ERROR " library $ { name } is using DEPENDS parameter which is deprecated . Please use add_dependencies instead " ) <nl> - endif ( ) <nl> - if ( ASHL_FILE_DEPENDS ) <nl> - message ( SEND_ERROR " library $ { name } is using FILE_DEPENDS parameter which is deprecated . " ) <nl> - endif ( ) <nl> - if ( ASHL_LINK_LIBRARIES ) <nl> - message ( SEND_ERROR " library $ { name } is using LINK_LIBRARIES parameter which is deprecated . Please use target_link_libraries instead " ) <nl> - endif ( ) <nl> - <nl> translate_flags ( ASHL " $ { options } " ) <nl> <nl> if ( NOT ASHL_SHARED AND NOT ASHL_STATIC ) <nl> endfunction ( ) <nl> # [ ARCHITECTURE architecture ] <nl> # Architecture to build for . <nl> function ( _add_swift_executable_single name ) <nl> - # Parse the arguments we were given . <nl> + set ( options ) <nl> + set ( single_parameter_options <nl> + ARCHITECTURE <nl> + SDK ) <nl> + set ( multiple_parameter_options <nl> + COMPILE_FLAGS <nl> + DEPENDS <nl> + LLVM_LINK_COMPONENTS ) <nl> cmake_parse_arguments ( SWIFTEXE_SINGLE <nl> - " EXCLUDE_FROM_ALL " <nl> - " SDK ; ARCHITECTURE " <nl> - " DEPENDS ; LLVM_LINK_COMPONENTS ; LINK_LIBRARIES ; COMPILE_FLAGS " <nl> + " $ { options } " <nl> + " $ { single_parameter_options } " <nl> + " $ { multiple_parameter_options } " <nl> $ { ARGN } ) <nl> <nl> set ( SWIFTEXE_SINGLE_SOURCES $ { SWIFTEXE_SINGLE_UNPARSED_ARGUMENTS } ) <nl> <nl> - if ( SWIFTEXE_SINGLE_EXCLUDE_FROM_ALL ) <nl> - message ( SEND_ERROR " $ { name } is using EXCLUDE_FROM_ALL option which is deprecated . " ) <nl> - endif ( ) <nl> - if ( SWIFTEXE_SINGLE_LINK_LIBRARIES ) <nl> - message ( SEND_ERROR " $ { name } is using LINK_LIBRARIES parameter which is deprecated . Please use target_link_libraries instead " ) <nl> - endif ( ) <nl> - <nl> # Check arguments . <nl> precondition ( SWIFTEXE_SINGLE_SDK MESSAGE " Should specify an SDK " ) <nl> precondition ( SWIFTEXE_SINGLE_ARCHITECTURE MESSAGE " Should specify an architecture " ) <nl> endmacro ( ) <nl> function ( add_swift_host_tool executable ) <nl> set ( options ) <nl> set ( single_parameter_options SWIFT_COMPONENT ) <nl> - set ( multiple_parameter_options LINK_LIBRARIES ) <nl> + set ( multiple_parameter_options ) <nl> <nl> cmake_parse_arguments ( ASHT <nl> " $ { options } " <nl> function ( add_swift_host_tool executable ) <nl> " $ { multiple_parameter_options } " <nl> $ { ARGN } ) <nl> <nl> - if ( ASHT_LINK_LIBRARIES ) <nl> - message ( SEND_ERROR " $ { executable } is using LINK_LIBRARIES parameter which is deprecated . Please use target_link_libraries instead " ) <nl> - endif ( ) <nl> - <nl> precondition ( ASHT_SWIFT_COMPONENT <nl> MESSAGE " Swift Component is required to add a host tool " ) <nl> <nl>
build : obsolete options which have long been deprecated
apple/swift
66627d803745985ae4ecb6c5d9fe5c5b5a3fac12
2020-02-13T22:19:12Z
mmm a / src / mongo / SConscript <nl> ppp b / src / mongo / SConscript <nl> env . Library ( ' clientdriver ' , [ <nl> " client / dbclient . cpp " , <nl> " client / dbclient_rs . cpp " , <nl> " client / dbclientcursor . cpp " , <nl> + ' client / native_sasl_client_session . cpp ' , <nl> " client / replica_set_monitor . cpp " , <nl> ' client / sasl_client_authenticate . cpp ' , <nl> + " client / sasl_client_authenticate_impl . cpp " , <nl> + ' client / sasl_client_session . cpp ' , <nl> " client / syncclusterconnection . cpp " , <nl> " db / dbmessage . cpp " <nl> ] , <nl> if env [ ' MONGO_BUILD_SASL_CLIENT ' ] : <nl> if env [ ' PYSYSPLATFORM ' ] = = " win32 " : <nl> saslLibs . extend ( [ " secur32 " ] ) <nl> <nl> - env . Library ( ' sasl_client_session ' , <nl> - [ ' client / sasl_client_session . cpp ' , <nl> + env . Library ( ' cyrus_sasl_client_session ' , <nl> + [ ' client / cyrus_sasl_client_session . cpp ' , <nl> ' client / sasl_sspi . cpp ' ] , <nl> LIBDEPS = [ <nl> + ' clientdriver ' , <nl> ' foundation ' , <nl> ' signal_handlers_synchronous ' , <nl> ] , <nl> SYSLIBDEPS = saslLibs ) <nl> - commonFiles . extend ( [ ' client / sasl_client_authenticate_impl . cpp ' ] ) <nl> - extraCommonLibdeps . append ( ' sasl_client_session ' ) <nl> + extraCommonLibdeps . append ( ' cyrus_sasl_client_session ' ) <nl> <nl> # handle processinfo * <nl> processInfoFiles = [ " util / processinfo . cpp " ] <nl> new file mode 100644 <nl> index 000000000000 . . 1c7d92c1924e <nl> mmm / dev / null <nl> ppp b / src / mongo / client / cyrus_sasl_client_session . cpp <nl> <nl> + / * Copyright 2014 MongoDB Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU Affero General Public License , version 3 , <nl> + * as published by the Free Software Foundation . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU Affero General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU Affero General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the GNU Affero General Public License in all respects <nl> + * for all of the code used other than as permitted herein . If you modify <nl> + * file ( s ) with this exception , you may extend this exception to your <nl> + * version of the file ( s ) , but you are not obligated to do so . If you do not <nl> + * wish to do so , delete this exception statement from your version . If you <nl> + * delete this exception statement from all source files in the program , <nl> + * then also delete it in the license file . <nl> + * / <nl> + <nl> + # include " mongo / platform / basic . h " <nl> + <nl> + # include " mongo / client / cyrus_sasl_client_session . h " <nl> + <nl> + # include " mongo / base / init . h " <nl> + # include " mongo / util / allocator . h " <nl> + # include " mongo / util / assert_util . h " <nl> + # include " mongo / util / concurrency / mutex . h " <nl> + # include " mongo / util / mongoutils / str . h " <nl> + # include " mongo / util / signal_handlers_synchronous . h " <nl> + <nl> + namespace mongo { <nl> + namespace { <nl> + <nl> + SaslClientSession * createCyrusSaslClientSession ( ) { <nl> + return new CyrusSaslClientSession ( ) ; <nl> + } <nl> + <nl> + / * <nl> + * Allocator functions to be used by the SASL library , if the client <nl> + * doesn ' t initialize the library for us . <nl> + * / <nl> + <nl> + / / Version 2 . 1 . 26 is the first version to use size_t in the allocator signatures <nl> + # if ( SASL_VERSION_FULL > = ( ( 2 < < 16 ) | ( 1 < < 8 ) | 26 ) ) <nl> + typedef size_t SaslAllocSize ; <nl> + # else <nl> + typedef unsigned long SaslAllocSize ; <nl> + # endif <nl> + <nl> + typedef int ( * SaslCallbackFn ) ( ) ; <nl> + <nl> + void * saslOurMalloc ( SaslAllocSize sz ) { <nl> + return mongoMalloc ( sz ) ; <nl> + } <nl> + <nl> + void * saslOurCalloc ( SaslAllocSize count , SaslAllocSize size ) { <nl> + void * ptr = calloc ( count , size ) ; <nl> + if ( ! ptr ) { <nl> + reportOutOfMemoryErrorAndExit ( ) ; <nl> + } <nl> + return ptr ; <nl> + } <nl> + <nl> + void * saslOurRealloc ( void * ptr , SaslAllocSize sz ) { <nl> + return mongoRealloc ( ptr , sz ) ; <nl> + } <nl> + <nl> + / * <nl> + * Mutex functions to be used by the SASL library , if the client doesn ' t initialize the library <nl> + * for us . <nl> + * / <nl> + <nl> + void * saslMutexAlloc ( void ) { <nl> + return new SimpleMutex ( " sasl " ) ; <nl> + } <nl> + <nl> + int saslMutexLock ( void * mutex ) { <nl> + static_cast < SimpleMutex * > ( mutex ) - > lock ( ) ; <nl> + return SASL_OK ; <nl> + } <nl> + <nl> + int saslMutexUnlock ( void * mutex ) { <nl> + static_cast < SimpleMutex * > ( mutex ) - > unlock ( ) ; <nl> + return SASL_OK ; <nl> + } <nl> + <nl> + void saslMutexFree ( void * mutex ) { <nl> + delete static_cast < SimpleMutex * > ( mutex ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Configures the SASL library to use allocator and mutex functions we specify , <nl> + * unless the client application has previously initialized the SASL library . <nl> + * / <nl> + MONGO_INITIALIZER ( CyrusSaslAllocatorsAndMutexes ) ( InitializerContext * ) { <nl> + sasl_set_alloc ( saslOurMalloc , <nl> + saslOurCalloc , <nl> + saslOurRealloc , <nl> + free ) ; <nl> + <nl> + sasl_set_mutex ( saslMutexAlloc , <nl> + saslMutexLock , <nl> + saslMutexUnlock , <nl> + saslMutexFree ) ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + int saslClientLogSwallow ( void * context , int priority , const char * message ) { <nl> + return SASL_OK ; / / do nothing <nl> + } <nl> + <nl> + / * * <nl> + * Initializes the client half of the SASL library , but is effectively a no - op if the client <nl> + * application has already done it . <nl> + * <nl> + * If a client wishes to override this initialization but keep the allocator and mutex <nl> + * initialization , it should implement a MONGO_INITIALIZER_GENERAL with <nl> + * CyrusSaslAllocatorsAndMutexes as a prerequisite and CyrusSaslClientContext as a <nl> + * dependent . If it wishes to override both , it should implement a MONGO_INITIALIZER_GENERAL <nl> + * with CyrusSaslAllocatorsAndMutexes and CyrusSaslClientContext as dependents , or <nl> + * initialize the library before calling mongo : : runGlobalInitializersOrDie ( ) . <nl> + * / <nl> + MONGO_INITIALIZER_WITH_PREREQUISITES ( CyrusSaslClientContext , <nl> + ( " NativeSaslClientContext " , <nl> + " CyrusSaslAllocatorsAndMutexes " ) ) <nl> + ( InitializerContext * context ) { <nl> + <nl> + static sasl_callback_t saslClientGlobalCallbacks [ ] = <nl> + { { SASL_CB_LOG , SaslCallbackFn ( saslClientLogSwallow ) , NULL / * context * / } , <nl> + { SASL_CB_LIST_END } } ; <nl> + <nl> + / / If the client application has previously called sasl_client_init ( ) , the callbacks passed <nl> + / / in here are ignored . <nl> + / / <nl> + / / TODO : Call sasl_client_done ( ) at shutdown when we have a story for orderly shutdown . <nl> + int result = sasl_client_init ( saslClientGlobalCallbacks ) ; <nl> + if ( result ! = SASL_OK ) { <nl> + return Status ( ErrorCodes : : UnknownError , <nl> + mongoutils : : str : : stream ( ) < < <nl> + " Could not initialize sasl client components ( " < < <nl> + sasl_errstring ( result , NULL , NULL ) < < <nl> + " ) " ) ; <nl> + } <nl> + <nl> + SaslClientSession : : create = createCyrusSaslClientSession ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Callback registered on the sasl_conn_t underlying a CyrusSaslClientSession to allow the Cyrus SASL <nl> + * library to query for the authentication id and other simple string configuration parameters . <nl> + * <nl> + * Note that in Mongo , the authentication and authorization ids ( authid and authzid ) are always <nl> + * the same . These correspond to SASL_CB_AUTHNAME and SASL_CB_USER . <nl> + * / <nl> + int saslClientGetSimple ( void * context , <nl> + int id , <nl> + const char * * result , <nl> + unsigned * resultLen ) throw ( ) { <nl> + CyrusSaslClientSession * session = static_cast < CyrusSaslClientSession * > ( context ) ; <nl> + if ( ! session | | ! result ) <nl> + return SASL_BADPARAM ; <nl> + <nl> + CyrusSaslClientSession : : Parameter requiredParameterId ; <nl> + switch ( id ) { <nl> + case SASL_CB_AUTHNAME : <nl> + case SASL_CB_USER : <nl> + requiredParameterId = CyrusSaslClientSession : : parameterUser ; <nl> + break ; <nl> + default : <nl> + return SASL_FAIL ; <nl> + } <nl> + <nl> + if ( ! session - > hasParameter ( requiredParameterId ) ) <nl> + return SASL_FAIL ; <nl> + StringData value = session - > getParameter ( requiredParameterId ) ; <nl> + * result = value . rawData ( ) ; <nl> + if ( resultLen ) <nl> + * resultLen = static_cast < unsigned > ( value . size ( ) ) ; <nl> + return SASL_OK ; <nl> + } <nl> + <nl> + / * * <nl> + * Callback registered on the sasl_conn_t underlying a CyrusSaslClientSession to allow <nl> + * the Cyrus SASL library to query for the password data . <nl> + * / <nl> + int saslClientGetPassword ( sasl_conn_t * conn , <nl> + void * context , <nl> + int id , <nl> + sasl_secret_t * * outSecret ) throw ( ) { <nl> + <nl> + CyrusSaslClientSession * session = static_cast < CyrusSaslClientSession * > ( context ) ; <nl> + if ( ! session | | ! outSecret ) <nl> + return SASL_BADPARAM ; <nl> + <nl> + sasl_secret_t * secret = session - > getPasswordAsSecret ( ) ; <nl> + if ( secret = = NULL ) { <nl> + sasl_seterror ( conn , 0 , " No password data provided " ) ; <nl> + return SASL_FAIL ; <nl> + } <nl> + <nl> + * outSecret = secret ; <nl> + return SASL_OK ; <nl> + } <nl> + } / / namespace <nl> + <nl> + CyrusSaslClientSession : : CyrusSaslClientSession ( ) : <nl> + SaslClientSession ( ) , <nl> + _saslConnection ( NULL ) , <nl> + _step ( 0 ) , <nl> + _done ( false ) { <nl> + <nl> + const sasl_callback_t callbackTemplate [ maxCallbacks ] = { <nl> + { SASL_CB_AUTHNAME , SaslCallbackFn ( saslClientGetSimple ) , this } , <nl> + { SASL_CB_USER , SaslCallbackFn ( saslClientGetSimple ) , this } , <nl> + { SASL_CB_PASS , SaslCallbackFn ( saslClientGetPassword ) , this } , <nl> + { SASL_CB_LIST_END } <nl> + } ; <nl> + std : : copy ( callbackTemplate , callbackTemplate + maxCallbacks , _callbacks ) ; <nl> + } <nl> + <nl> + CyrusSaslClientSession : : ~ CyrusSaslClientSession ( ) { <nl> + sasl_dispose ( & _saslConnection ) ; <nl> + } <nl> + <nl> + void CyrusSaslClientSession : : setParameter ( Parameter id , const StringData & value ) { <nl> + fassert ( 18665 , id > = 0 & & id < numParameters ) ; <nl> + if ( id = = parameterPassword ) { <nl> + / / The parameterPassword is stored as a sasl_secret_t , while other <nl> + / / parameters are stored directly . This facilitates memory ownership management for <nl> + / / getPasswordAsSecret ( ) . <nl> + _secret . reset ( new char [ sizeof ( sasl_secret_t ) + value . size ( ) + 1 ] ) ; <nl> + sasl_secret_t * secret = <nl> + static_cast < sasl_secret_t * > ( static_cast < void * > ( _secret . get ( ) ) ) ; <nl> + secret - > len = value . size ( ) ; <nl> + value . copyTo ( static_cast < char * > ( static_cast < void * > ( & secret - > data [ 0 ] ) ) , false ) ; <nl> + } <nl> + SaslClientSession : : setParameter ( id , value ) ; <nl> + } <nl> + <nl> + sasl_secret_t * CyrusSaslClientSession : : getPasswordAsSecret ( ) { <nl> + / / See comment in setParameter ( ) about the special storage of parameterPassword . <nl> + return static_cast < sasl_secret_t * > ( <nl> + static_cast < void * > ( _secret . get ( ) ) ) ; <nl> + } <nl> + <nl> + Status CyrusSaslClientSession : : initialize ( ) { <nl> + if ( _saslConnection ! = NULL ) <nl> + return Status ( ErrorCodes : : AlreadyInitialized , <nl> + " Cannot reinitialize CyrusSaslClientSession . " ) ; <nl> + <nl> + int result = sasl_client_new ( getParameter ( parameterServiceName ) . toString ( ) . c_str ( ) , <nl> + getParameter ( parameterServiceHostname ) . toString ( ) . c_str ( ) , <nl> + NULL , <nl> + NULL , <nl> + _callbacks , <nl> + 0 , <nl> + & _saslConnection ) ; <nl> + <nl> + if ( SASL_OK ! = result ) { <nl> + return Status ( ErrorCodes : : UnknownError , <nl> + mongoutils : : str : : stream ( ) < < sasl_errstring ( result , NULL , NULL ) ) ; <nl> + } <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status CyrusSaslClientSession : : step ( const StringData & inputData , std : : string * outputData ) { <nl> + const char * output = NULL ; <nl> + unsigned outputSize = 0xFFFFFFFF ; <nl> + <nl> + int result ; <nl> + if ( _step = = 0 ) { <nl> + const char * actualMechanism ; <nl> + result = sasl_client_start ( _saslConnection , <nl> + getParameter ( parameterMechanism ) . toString ( ) . c_str ( ) , <nl> + NULL , <nl> + & output , <nl> + & outputSize , <nl> + & actualMechanism ) ; <nl> + } <nl> + else { <nl> + result = sasl_client_step ( _saslConnection , <nl> + inputData . rawData ( ) , <nl> + static_cast < unsigned > ( inputData . size ( ) ) , <nl> + NULL , <nl> + & output , <nl> + & outputSize ) ; <nl> + } <nl> + + + _step ; <nl> + switch ( result ) { <nl> + case SASL_OK : <nl> + _done = true ; <nl> + / / Fall through <nl> + case SASL_CONTINUE : <nl> + * outputData = std : : string ( output , outputSize ) ; <nl> + return Status : : OK ( ) ; <nl> + case SASL_NOMECH : <nl> + return Status ( ErrorCodes : : BadValue , sasl_errdetail ( _saslConnection ) ) ; <nl> + case SASL_BADAUTH : <nl> + return Status ( ErrorCodes : : AuthenticationFailed , sasl_errdetail ( _saslConnection ) ) ; <nl> + default : <nl> + return Status ( ErrorCodes : : ProtocolError , sasl_errdetail ( _saslConnection ) ) ; <nl> + } <nl> + } <nl> + } / / namespace <nl> new file mode 100644 <nl> index 000000000000 . . 2b6d04fe95fd <nl> mmm / dev / null <nl> ppp b / src / mongo / client / cyrus_sasl_client_session . h <nl> <nl> + / * Copyright 2014 MongoDB Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU Affero General Public License , version 3 , <nl> + * as published by the Free Software Foundation . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU Affero General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU Affero General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the GNU Affero General Public License in all respects <nl> + * for all of the code used other than as permitted herein . If you modify <nl> + * file ( s ) with this exception , you may extend this exception to your <nl> + * version of the file ( s ) , but you are not obligated to do so . If you do not <nl> + * wish to do so , delete this exception statement from your version . If you <nl> + * delete this exception statement from all source files in the program , <nl> + * then also delete it in the license file . <nl> + * / <nl> + <nl> + # include " mongo / client / sasl_client_session . h " <nl> + <nl> + # include < sasl / sasl . h > <nl> + <nl> + namespace mongo { <nl> + <nl> + / * * <nl> + * Implementation of the client side of a SASL authentication conversation . <nl> + * using the Cyrus SASL library . <nl> + * / <nl> + class MONGO_CLIENT_API CyrusSaslClientSession : public SaslClientSession { <nl> + MONGO_DISALLOW_COPYING ( CyrusSaslClientSession ) ; <nl> + public : <nl> + <nl> + CyrusSaslClientSession ( ) ; <nl> + ~ CyrusSaslClientSession ( ) ; <nl> + <nl> + / * * <nl> + * Overriding to store the password data in sasl_secret_t format <nl> + * / <nl> + virtual void setParameter ( Parameter id , const StringData & value ) ; <nl> + <nl> + / * * <nl> + * Returns the value of the parameterPassword parameter in the form of a sasl_secret_t , used <nl> + * by the Cyrus SASL library ' s SASL_CB_PASS callback . The session object owns the storage <nl> + * referenced by the returned sasl_secret_t * , which will remain in scope according to the <nl> + * same rules as given for SaslClientSession : : getParameter ( ) . <nl> + * / <nl> + sasl_secret_t * getPasswordAsSecret ( ) ; <nl> + <nl> + virtual Status initialize ( ) ; <nl> + <nl> + virtual Status step ( const StringData & inputData , std : : string * outputData ) ; <nl> + <nl> + virtual bool isDone ( ) const { return _done ; } <nl> + <nl> + private : <nl> + / / / Maximum number of Cyrus SASL callbacks stored in _callbacks . <nl> + static const int maxCallbacks = 4 ; <nl> + <nl> + / / / Underlying Cyrus SASL library connection object . <nl> + sasl_conn_t * _saslConnection ; <nl> + <nl> + / / Number of successfully completed conversation steps . <nl> + int _step ; <nl> + <nl> + / / / See isDone ( ) . <nl> + bool _done ; <nl> + <nl> + / / / Stored of password in sasl_secret_t format <nl> + boost : : scoped_array < char > _secret ; <nl> + <nl> + / / / Callbacks registered on _saslConnection for providing the Cyrus SASL library with <nl> + / / / parameter values , etc . <nl> + sasl_callback_t _callbacks [ maxCallbacks ] ; <nl> + } ; <nl> + <nl> + } / / namespace mongo <nl> new file mode 100644 <nl> index 000000000000 . . 705626dc1064 <nl> mmm / dev / null <nl> ppp b / src / mongo / client / native_sasl_client_session . cpp <nl> <nl> + / * Copyright 2014 MongoDB Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU Affero General Public License , version 3 , <nl> + * as published by the Free Software Foundation . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU Affero General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU Affero General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the GNU Affero General Public License in all respects <nl> + * for all of the code used other than as permitted herein . If you modify <nl> + * file ( s ) with this exception , you may extend this exception to your <nl> + * version of the file ( s ) , but you are not obligated to do so . If you do not <nl> + * wish to do so , delete this exception statement from your version . If you <nl> + * delete this exception statement from all source files in the program , <nl> + * then also delete it in the license file . <nl> + * / <nl> + <nl> + # include " mongo / platform / basic . h " <nl> + <nl> + # include " mongo / client / native_sasl_client_session . h " <nl> + <nl> + # include " mongo / base / init . h " <nl> + # include " mongo / util / mongoutils / str . h " <nl> + <nl> + namespace mongo { <nl> + namespace { <nl> + <nl> + SaslClientSession * createNativeSaslClientSession ( ) { <nl> + return new NativeSaslClientSession ( ) ; <nl> + } <nl> + <nl> + MONGO_INITIALIZER ( NativeSaslClientContext ) ( InitializerContext * context ) { <nl> + SaslClientSession : : create = createNativeSaslClientSession ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> + NativeSaslClientSession : : NativeSaslClientSession ( ) : <nl> + SaslClientSession ( ) , <nl> + _step ( 0 ) , <nl> + _done ( false ) { <nl> + } <nl> + <nl> + NativeSaslClientSession : : ~ NativeSaslClientSession ( ) { } <nl> + <nl> + Status NativeSaslClientSession : : initialize ( ) { <nl> + return Status ( ErrorCodes : : BadValue , <nl> + mongoutils : : str : : stream ( ) < < " SASL authentication not supported in client " ) ; <nl> + } <nl> + <nl> + Status NativeSaslClientSession : : step ( const StringData & inputData , std : : string * outputData ) { <nl> + return Status ( ErrorCodes : : BadValue , <nl> + mongoutils : : str : : stream ( ) < < " SASL authentication not supported in client " ) ; <nl> + } <nl> + } / / namespace <nl> new file mode 100644 <nl> index 000000000000 . . d80b9cc57ed0 <nl> mmm / dev / null <nl> ppp b / src / mongo / client / native_sasl_client_session . h <nl> <nl> + / * Copyright 2014 MongoDB Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU Affero General Public License , version 3 , <nl> + * as published by the Free Software Foundation . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU Affero General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU Affero General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the GNU Affero General Public License in all respects <nl> + * for all of the code used other than as permitted herein . If you modify <nl> + * file ( s ) with this exception , you may extend this exception to your <nl> + * version of the file ( s ) , but you are not obligated to do so . If you do not <nl> + * wish to do so , delete this exception statement from your version . If you <nl> + * delete this exception statement from all source files in the program , <nl> + * then also delete it in the license file . <nl> + * / <nl> + <nl> + # include " mongo / client / sasl_client_session . h " <nl> + <nl> + namespace mongo { <nl> + <nl> + / * * <nl> + * Implementation of the client side of a SASL authentication conversation using the <nl> + * native SASL implementation . <nl> + * / <nl> + class MONGO_CLIENT_API NativeSaslClientSession : public SaslClientSession { <nl> + MONGO_DISALLOW_COPYING ( NativeSaslClientSession ) ; <nl> + public : <nl> + <nl> + NativeSaslClientSession ( ) ; <nl> + ~ NativeSaslClientSession ( ) ; <nl> + <nl> + virtual Status initialize ( ) ; <nl> + <nl> + virtual Status step ( const StringData & inputData , std : : string * outputData ) ; <nl> + <nl> + virtual bool isDone ( ) const { return _done ; } <nl> + <nl> + private : <nl> + / / / Number of successfully completed conversation steps . <nl> + int _step ; <nl> + <nl> + / / / See isDone ( ) . <nl> + bool _done ; <nl> + } ; <nl> + <nl> + } / / namespace mongo <nl> mmm a / src / mongo / client / sasl_client_authenticate_impl . cpp <nl> ppp b / src / mongo / client / sasl_client_authenticate_impl . cpp <nl> namespace { <nl> return ex . toStatus ( ) ; <nl> } <nl> <nl> - SaslClientSession session ; <nl> - Status status = configureSession ( & session , client , targetDatabase , saslParameters ) ; <nl> + boost : : scoped_ptr < SaslClientSession > session ( SaslClientSession : : create ( ) ) ; <nl> + Status status = configureSession ( session . get ( ) , client , targetDatabase , saslParameters ) ; <nl> + <nl> if ( ! status . isOK ( ) ) <nl> return status ; <nl> <nl> BSONObj saslFirstCommandPrefix = BSON ( <nl> saslStartCommandName < < 1 < < <nl> saslCommandMechanismFieldName < < <nl> - session . getParameter ( SaslClientSession : : parameterMechanism ) ) ; <nl> + session - > getParameter ( SaslClientSession : : parameterMechanism ) ) ; <nl> <nl> BSONObj saslFollowupCommandPrefix = BSON ( saslContinueCommandName < < 1 ) ; <nl> BSONObj saslCommandPrefix = saslFirstCommandPrefix ; <nl> BSONObj inputObj = BSON ( saslCommandPayloadFieldName < < " " ) ; <nl> bool isServerDone = false ; <nl> - while ( ! session . isDone ( ) ) { <nl> + while ( ! session - > isDone ( ) ) { <nl> std : : string payload ; <nl> BSONType type ; <nl> <nl> namespace { <nl> LOG ( saslLogLevel ) < < " sasl client input : " < < base64 : : encode ( payload ) < < endl ; <nl> <nl> std : : string responsePayload ; <nl> - status = session . step ( payload , & responsePayload ) ; <nl> + status = session - > step ( payload , & responsePayload ) ; <nl> if ( ! status . isOK ( ) ) <nl> return status ; <nl> <nl> mmm a / src / mongo / client / sasl_client_session . cpp <nl> ppp b / src / mongo / client / sasl_client_session . cpp <nl> <nl> # include " mongo / util / signal_handlers_synchronous . h " <nl> <nl> namespace mongo { <nl> - namespace { <nl> + SaslClientSession : : SaslClientSessionFactoryFn SaslClientSession : : create = NULL ; <nl> + <nl> + SaslClientSession : : SaslClientSession ( ) { } <nl> <nl> - / * <nl> - * Allocator functions to be used by the SASL library , if the client <nl> - * doesn ' t initialize the library for us . <nl> - * / <nl> - <nl> - / / Version 2 . 1 . 26 is the first version to use size_t in the allocator signatures <nl> - # if ( SASL_VERSION_FULL > = ( ( 2 < < 16 ) | ( 1 < < 8 ) | 26 ) ) <nl> - typedef size_t SaslAllocSize ; <nl> - # else <nl> - typedef unsigned long SaslAllocSize ; <nl> - # endif <nl> - <nl> - typedef int ( * SaslCallbackFn ) ( ) ; <nl> - <nl> - void * saslOurMalloc ( SaslAllocSize sz ) { <nl> - return mongoMalloc ( sz ) ; <nl> - } <nl> - <nl> - void * saslOurCalloc ( SaslAllocSize count , SaslAllocSize size ) { <nl> - void * ptr = calloc ( count , size ) ; <nl> - if ( ! ptr ) { <nl> - reportOutOfMemoryErrorAndExit ( ) ; <nl> - } <nl> - return ptr ; <nl> - } <nl> - <nl> - void * saslOurRealloc ( void * ptr , SaslAllocSize sz ) { <nl> - return mongoRealloc ( ptr , sz ) ; <nl> - } <nl> - <nl> - / * <nl> - * Mutex functions to be used by the SASL library , if the client doesn ' t initialize the library <nl> - * for us . <nl> - * / <nl> - <nl> - void * saslMutexAlloc ( void ) { <nl> - return new SimpleMutex ( " sasl " ) ; <nl> - } <nl> - <nl> - int saslMutexLock ( void * mutex ) { <nl> - static_cast < SimpleMutex * > ( mutex ) - > lock ( ) ; <nl> - return SASL_OK ; <nl> - } <nl> - <nl> - int saslMutexUnlock ( void * mutex ) { <nl> - static_cast < SimpleMutex * > ( mutex ) - > unlock ( ) ; <nl> - return SASL_OK ; <nl> - } <nl> - <nl> - void saslMutexFree ( void * mutex ) { <nl> - delete static_cast < SimpleMutex * > ( mutex ) ; <nl> - } <nl> - <nl> - / * * <nl> - * Configures the SASL library to use allocator and mutex functions we specify , <nl> - * unless the client application has previously initialized the SASL library . <nl> - * / <nl> - MONGO_INITIALIZER ( CyrusSaslAllocatorsAndMutexes ) ( InitializerContext * ) { <nl> - sasl_set_alloc ( saslOurMalloc , <nl> - saslOurCalloc , <nl> - saslOurRealloc , <nl> - free ) ; <nl> - <nl> - sasl_set_mutex ( saslMutexAlloc , <nl> - saslMutexLock , <nl> - saslMutexUnlock , <nl> - saslMutexFree ) ; <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - int saslClientLogSwallow ( void * context , int priority , const char * message ) { <nl> - return SASL_OK ; / / do nothing <nl> - } <nl> - <nl> - / * * <nl> - * Initializes the client half of the SASL library , but is effectively a no - op if the client <nl> - * application has already done it . <nl> - * <nl> - * If a client wishes to override this initialization but keep the allocator and mutex <nl> - * initialization , it should implement a MONGO_INITIALIZER_GENERAL with <nl> - * CyrusSaslAllocatorsAndMutexes as a prerequisite and SaslClientContext as a dependent . If it <nl> - * wishes to override both , it should implement a MONGO_INITIALIZER_GENERAL with <nl> - * CyrusSaslAllocatorsAndMutexes and SaslClientContext as dependents , or initialize the library <nl> - * before calling mongo : : runGlobalInitializersOrDie ( ) . <nl> - * / <nl> - MONGO_INITIALIZER_WITH_PREREQUISITES ( SaslClientContext , ( " CyrusSaslAllocatorsAndMutexes " ) ) ( <nl> - InitializerContext * context ) { <nl> - <nl> - static sasl_callback_t saslClientGlobalCallbacks [ ] = <nl> - { { SASL_CB_LOG , SaslCallbackFn ( saslClientLogSwallow ) , NULL / * context * / } , <nl> - { SASL_CB_LIST_END } } ; <nl> - <nl> - / / If the client application has previously called sasl_client_init ( ) , the callbacks passed <nl> - / / in here are ignored . <nl> - / / <nl> - / / TODO : Call sasl_client_done ( ) at shutdown when we have a story for orderly shutdown . <nl> - int result = sasl_client_init ( saslClientGlobalCallbacks ) ; <nl> - if ( result ! = SASL_OK ) { <nl> - return Status ( ErrorCodes : : UnknownError , <nl> - mongoutils : : str : : stream ( ) < < <nl> - " Could not initialize sasl client components ( " < < <nl> - sasl_errstring ( result , NULL , NULL ) < < <nl> - " ) " ) ; <nl> - } <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - / * * <nl> - * Callback registered on the sasl_conn_t underlying a SaslClientSession to allow the Cyrus SASL <nl> - * library to query for the authentication id and other simple string configuration parameters . <nl> - * <nl> - * Note that in Mongo , the authentication and authorization ids ( authid and authzid ) are always <nl> - * the same . These correspond to SASL_CB_AUTHNAME and SASL_CB_USER . <nl> - * / <nl> - int saslClientGetSimple ( void * context , <nl> - int id , <nl> - const char * * result , <nl> - unsigned * resultLen ) throw ( ) { <nl> - SaslClientSession * session = static_cast < SaslClientSession * > ( context ) ; <nl> - if ( ! session | | ! result ) <nl> - return SASL_BADPARAM ; <nl> - <nl> - SaslClientSession : : Parameter requiredParameterId ; <nl> - switch ( id ) { <nl> - case SASL_CB_AUTHNAME : <nl> - case SASL_CB_USER : <nl> - requiredParameterId = SaslClientSession : : parameterUser ; <nl> - break ; <nl> - default : <nl> - return SASL_FAIL ; <nl> - } <nl> - <nl> - if ( ! session - > hasParameter ( requiredParameterId ) ) <nl> - return SASL_FAIL ; <nl> - StringData value = session - > getParameter ( requiredParameterId ) ; <nl> - * result = value . rawData ( ) ; <nl> - if ( resultLen ) <nl> - * resultLen = static_cast < unsigned > ( value . size ( ) ) ; <nl> - return SASL_OK ; <nl> - } <nl> - <nl> - / * * <nl> - * Callback registered on the sasl_conn_t underlying a SaslClientSession to allow the Cyrus SASL <nl> - * library to query for the password data . <nl> - * / <nl> - int saslClientGetPassword ( sasl_conn_t * conn , <nl> - void * context , <nl> - int id , <nl> - sasl_secret_t * * outSecret ) throw ( ) { <nl> - <nl> - SaslClientSession * session = static_cast < SaslClientSession * > ( context ) ; <nl> - if ( ! session | | ! outSecret ) <nl> - return SASL_BADPARAM ; <nl> - <nl> - sasl_secret_t * secret = session - > getPasswordAsSecret ( ) ; <nl> - if ( secret = = NULL ) { <nl> - sasl_seterror ( conn , 0 , " No password data provided " ) ; <nl> - return SASL_FAIL ; <nl> - } <nl> - <nl> - * outSecret = secret ; <nl> - return SASL_OK ; <nl> - } <nl> - <nl> - } / / namespace <nl> - <nl> - SaslClientSession : : SaslClientSession ( ) : <nl> - _saslConnection ( NULL ) , <nl> - _step ( 0 ) , <nl> - _done ( false ) { <nl> - <nl> - const sasl_callback_t callbackTemplate [ maxCallbacks ] = { <nl> - { SASL_CB_AUTHNAME , SaslCallbackFn ( saslClientGetSimple ) , this } , <nl> - { SASL_CB_USER , SaslCallbackFn ( saslClientGetSimple ) , this } , <nl> - { SASL_CB_PASS , SaslCallbackFn ( saslClientGetPassword ) , this } , <nl> - { SASL_CB_LIST_END } <nl> - } ; <nl> - std : : copy ( callbackTemplate , callbackTemplate + maxCallbacks , _callbacks ) ; <nl> - } <nl> - <nl> - SaslClientSession : : ~ SaslClientSession ( ) { <nl> - sasl_dispose ( & _saslConnection ) ; <nl> - } <nl> + SaslClientSession : : ~ SaslClientSession ( ) { } <nl> <nl> void SaslClientSession : : setParameter ( Parameter id , const StringData & value ) { <nl> fassert ( 16807 , id > = 0 & & id < numParameters ) ; <nl> DataBuffer & buffer = _parameters [ id ] ; <nl> - if ( id = = parameterPassword ) { <nl> - / / The parameterPassword is stored as a sasl_secret_t inside its DataBuffer , while other <nl> - / / parameters are stored directly . This facilitates memory ownership management for <nl> - / / getPasswordAsSecret ( ) . <nl> - buffer . size = sizeof ( sasl_secret_t ) + value . size ( ) ; <nl> - buffer . data . reset ( new char [ buffer . size + 1 ] ) ; <nl> - sasl_secret_t * secret = <nl> - static_cast < sasl_secret_t * > ( static_cast < void * > ( buffer . data . get ( ) ) ) ; <nl> - secret - > len = value . size ( ) ; <nl> - value . copyTo ( static_cast < char * > ( static_cast < void * > ( & secret - > data [ 0 ] ) ) , false ) ; <nl> - } <nl> - else { <nl> - buffer . size = value . size ( ) ; <nl> - buffer . data . reset ( new char [ buffer . size + 1 ] ) ; <nl> - / / Note that we append a terminal NUL to buffer . data , so it may be treated as a C - style <nl> - / / string . This is required for parameterServiceName , parameterServiceHostname , <nl> - / / parameterMechanism and parameterUser . <nl> - value . copyTo ( buffer . data . get ( ) , true ) ; <nl> - } <nl> + buffer . size = value . size ( ) ; <nl> + buffer . data . reset ( new char [ buffer . size + 1 ] ) ; <nl> + <nl> + / / Note that we append a terminal NUL to buffer . data , so it may be treated as a C - style <nl> + / / string . This is required for parameterServiceName , parameterServiceHostname , <nl> + / / parameterMechanism and parameterUser . <nl> + value . copyTo ( buffer . data . get ( ) , true ) ; <nl> } <nl> <nl> bool SaslClientSession : : hasParameter ( Parameter id ) { <nl> namespace { <nl> if ( ! hasParameter ( id ) ) <nl> return StringData ( ) ; <nl> <nl> - if ( id = = parameterPassword ) { <nl> - / / See comment in setParameter ( ) about the special storage of parameterPassword . <nl> - sasl_secret_t * secret = getPasswordAsSecret ( ) ; <nl> - return StringData ( static_cast < char * > ( static_cast < void * > ( secret - > data ) ) , secret - > len ) ; <nl> - } <nl> - else { <nl> - DataBuffer & buffer = _parameters [ id ] ; <nl> - return StringData ( buffer . data . get ( ) , buffer . size ) ; <nl> - } <nl> - } <nl> - <nl> - sasl_secret_t * SaslClientSession : : getPasswordAsSecret ( ) { <nl> - / / See comment in setParameter ( ) about the special storage of parameterPassword . <nl> - return static_cast < sasl_secret_t * > ( <nl> - static_cast < void * > ( _parameters [ parameterPassword ] . data . get ( ) ) ) ; <nl> - } <nl> - <nl> - Status SaslClientSession : : initialize ( ) { <nl> - if ( _saslConnection ! = NULL ) <nl> - return Status ( ErrorCodes : : AlreadyInitialized , " Cannot reinitialize SaslClientSession . " ) ; <nl> - <nl> - int result = sasl_client_new ( _parameters [ parameterServiceName ] . data . get ( ) , <nl> - _parameters [ parameterServiceHostname ] . data . get ( ) , <nl> - NULL , <nl> - NULL , <nl> - _callbacks , <nl> - 0 , <nl> - & _saslConnection ) ; <nl> - <nl> - if ( SASL_OK ! = result ) { <nl> - return Status ( ErrorCodes : : UnknownError , <nl> - mongoutils : : str : : stream ( ) < < sasl_errstring ( result , NULL , NULL ) ) ; <nl> - } <nl> - <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - Status SaslClientSession : : step ( const StringData & inputData , std : : string * outputData ) { <nl> - const char * output = NULL ; <nl> - unsigned outputSize = 0xFFFFFFFF ; <nl> - <nl> - int result ; <nl> - if ( _step = = 0 ) { <nl> - const char * actualMechanism ; <nl> - result = sasl_client_start ( _saslConnection , <nl> - getParameter ( parameterMechanism ) . toString ( ) . c_str ( ) , <nl> - NULL , <nl> - & output , <nl> - & outputSize , <nl> - & actualMechanism ) ; <nl> - } <nl> - else { <nl> - result = sasl_client_step ( _saslConnection , <nl> - inputData . rawData ( ) , <nl> - static_cast < unsigned > ( inputData . size ( ) ) , <nl> - NULL , <nl> - & output , <nl> - & outputSize ) ; <nl> - } <nl> - + + _step ; <nl> - switch ( result ) { <nl> - case SASL_OK : <nl> - _done = true ; <nl> - / / Fall through <nl> - case SASL_CONTINUE : <nl> - * outputData = std : : string ( output , outputSize ) ; <nl> - return Status : : OK ( ) ; <nl> - case SASL_NOMECH : <nl> - return Status ( ErrorCodes : : BadValue , sasl_errdetail ( _saslConnection ) ) ; <nl> - case SASL_BADAUTH : <nl> - return Status ( ErrorCodes : : AuthenticationFailed , sasl_errdetail ( _saslConnection ) ) ; <nl> - default : <nl> - return Status ( ErrorCodes : : ProtocolError , sasl_errdetail ( _saslConnection ) ) ; <nl> - } <nl> + DataBuffer & buffer = _parameters [ id ] ; <nl> + return StringData ( buffer . data . get ( ) , buffer . size ) ; <nl> } <nl> <nl> } / / namespace mongo <nl> mmm a / src / mongo / client / sasl_client_session . h <nl> ppp b / src / mongo / client / sasl_client_session . h <nl> <nl> * / <nl> <nl> # include < boost / scoped_array . hpp > <nl> - # include < sasl / sasl . h > <nl> # include < string > <nl> - # include < vector > <nl> <nl> # include " mongo / base / disallow_copying . h " <nl> # include " mongo / base / status . h " <nl> # include " mongo / base / string_data . h " <nl> # include " mongo / client / export_macros . h " <nl> + # include " mongo / stdx / functional . h " <nl> <nl> namespace mongo { <nl> <nl> / * * <nl> - * Implementation of the client side of a SASL authentication conversation . <nl> + * Base class for the client side of a SASL authentication conversation . <nl> * <nl> * To use , create an instance , then use setParameter ( ) to configure the authentication <nl> * parameters . Once all parameters are set , call initialize ( ) to initialize the client state <nl> namespace mongo { <nl> class MONGO_CLIENT_API SaslClientSession { <nl> MONGO_DISALLOW_COPYING ( SaslClientSession ) ; <nl> public : <nl> + typedef stdx : : function < SaslClientSession * ( ) > SaslClientSessionFactoryFn ; <nl> + static SaslClientSessionFactoryFn create ; <nl> + <nl> / * * <nl> * Identifiers of parameters used to configure a SaslClientSession . <nl> * / <nl> namespace mongo { <nl> } ; <nl> <nl> SaslClientSession ( ) ; <nl> - ~ SaslClientSession ( ) ; <nl> + virtual ~ SaslClientSession ( ) ; <nl> <nl> / * * <nl> * Sets the parameter identified by " id " to " value " . <nl> namespace mongo { <nl> * <nl> * The session object makes and owns a copy of the data in " value " . <nl> * / <nl> - void setParameter ( Parameter id , const StringData & value ) ; <nl> + virtual void setParameter ( Parameter id , const StringData & value ) ; <nl> <nl> / * * <nl> * Returns true if " id " identifies a parameter previously set by a call to setParameter ( ) . <nl> * / <nl> - bool hasParameter ( Parameter id ) ; <nl> + virtual bool hasParameter ( Parameter id ) ; <nl> <nl> / * * <nl> * Returns the value of a previously set parameter . <nl> namespace mongo { <nl> * valid until setParameter ( ) is called with the same value of " id " , or the session object <nl> * goes out of scope . <nl> * / <nl> - StringData getParameter ( Parameter id ) ; <nl> - <nl> - / * * <nl> - * Returns the value of the parameterPassword parameter in the form of a sasl_secret_t , used <nl> - * by the Cyrus SASL library ' s SASL_CB_PASS callback . The session object owns the storage <nl> - * referenced by the returned sasl_secret_t * , which will remain in scope according to the <nl> - * same rules as given for getParameter ( ) , above . <nl> - * / <nl> - sasl_secret_t * getPasswordAsSecret ( ) ; <nl> + virtual StringData getParameter ( Parameter id ) ; <nl> <nl> / * * <nl> * Initializes a session for use . <nl> * <nl> * Call exactly once , after setting any parameters you intend to set via setParameter ( ) . <nl> * / <nl> - Status initialize ( ) ; <nl> + virtual Status initialize ( ) = 0 ; <nl> <nl> / * * <nl> * Takes one step of the SASL protocol on behalf of the client . <nl> namespace mongo { <nl> * determine if the conversation has completed . When step ( ) returns Status : : OK ( ) and <nl> * isDone ( ) returns true , authentication has completed successfully . <nl> * / <nl> - Status step ( const StringData & inputData , std : : string * outputData ) ; <nl> + virtual Status step ( const StringData & inputData , std : : string * outputData ) = 0 ; <nl> <nl> / * * <nl> * Returns true if the authentication completed successfully . <nl> * / <nl> - bool isDone ( ) const { return _done ; } <nl> + virtual bool isDone ( ) const = 0 ; <nl> <nl> private : <nl> / * * <nl> namespace mongo { <nl> size_t size ; <nl> } ; <nl> <nl> - / / / Maximum number of Cyrus SASL callbacks stored in _callbacks . <nl> - static const int maxCallbacks = 4 ; <nl> - <nl> - / / / Underlying Cyrus SASL library connection object . <nl> - sasl_conn_t * _saslConnection ; <nl> - <nl> - / / / Callbacks registered on _saslConnection for providing the Cyrus SASL library with <nl> - / / / parameter values , etc . <nl> - sasl_callback_t _callbacks [ maxCallbacks ] ; <nl> - <nl> / / / Buffers for each of the settable parameters . <nl> DataBuffer _parameters [ numParameters ] ; <nl> - <nl> - / / / Number of successfully completed conversation steps . <nl> - int _step ; <nl> - <nl> - / / / See isDone ( ) . <nl> - bool _done ; <nl> } ; <nl> <nl> } / / namespace mongo <nl> mmm a / src / mongo / client / sasl_sspi . cpp <nl> ppp b / src / mongo / client / sasl_sspi . cpp <nl> namespace { <nl> * / <nl> MONGO_INITIALIZER_WITH_PREREQUISITES ( SaslSspiClientPlugin , <nl> ( " CyrusSaslAllocatorsAndMutexes " , <nl> - " SaslClientContext " ) ) <nl> + " CyrusSaslClientContext " ) ) <nl> ( InitializerContext * ) { <nl> <nl> int ret = sasl_client_add_plugin ( sspiPluginName , <nl> namespace { <nl> } <nl> MONGO_INITIALIZER_WITH_PREREQUISITES ( SaslCramClientPlugin , <nl> ( " CyrusSaslAllocatorsAndMutexes " , <nl> - " SaslClientContext " ) ) <nl> + " CyrusSaslClientContext " ) ) <nl> ( InitializerContext * ) { <nl> int ret = sasl_client_add_plugin ( " CRAMMD5 " , <nl> crammd5_client_plug_init ) ; <nl> namespace { <nl> <nl> MONGO_INITIALIZER_WITH_PREREQUISITES ( SaslPlainClientPlugin , <nl> ( " CyrusSaslAllocatorsAndMutexes " , <nl> - " SaslClientContext " ) ) <nl> + " CyrusSaslClientContext " ) ) <nl> ( InitializerContext * ) { <nl> int ret = sasl_client_add_plugin ( " PLAIN " , <nl> plain_client_plug_init ) ; <nl> mmm a / src / mongo / db / auth / sasl_authentication_session . cpp <nl> ppp b / src / mongo / db / auth / sasl_authentication_session . cpp <nl> <nl> # include " mongo / util / mongoutils / str . h " <nl> <nl> namespace mongo { <nl> - SaslAuthenticationSession : : SaslSessionFactoryFn SaslAuthenticationSession : : create = NULL ; <nl> + SaslAuthenticationSession : : SaslAuthenticationSessionFactoryFn <nl> + SaslAuthenticationSession : : create = NULL ; <nl> <nl> / / Mechanism name constants . <nl> const char SaslAuthenticationSession : : mechanismCRAMMD5 [ ] = " CRAM - MD5 " ; <nl> mmm a / src / mongo / db / auth / sasl_authentication_session . h <nl> ppp b / src / mongo / db / auth / sasl_authentication_session . h <nl> namespace mongo { <nl> MONGO_DISALLOW_COPYING ( SaslAuthenticationSession ) ; <nl> public : <nl> typedef stdx : : function < SaslAuthenticationSession * ( AuthorizationSession * ) > <nl> - SaslSessionFactoryFn ; <nl> - static SaslSessionFactoryFn create ; <nl> + SaslAuthenticationSessionFactoryFn ; <nl> + static SaslAuthenticationSessionFactoryFn create ; <nl> <nl> / / Mechanism name constants . <nl> static const char mechanismCRAMMD5 [ ] ; <nl>
SERVER - 7596 SASL client code refactor for SCRAM
mongodb/mongo
c14c1f113d56da4e32ac13a123028c916f0c8cff
2014-09-04T18:54:11Z
mmm a / src / mongo / db / pagefault . cpp <nl> ppp b / src / mongo / db / pagefault . cpp <nl> namespace mongo { <nl> } <nl> else { <nl> cc ( ) . _pageFaultRetryableSection = this ; <nl> - cc ( ) . _hasWrittenThisOperation = false ; <nl> } <nl> } <nl> <nl>
SERVER - 11432 PageFaultRetryableSection shouldn ' t set hasWrittenThisOp
mongodb/mongo
3c70a417f2442c40ad60e290edc02a9c43806a94
2014-02-25T19:35:18Z
mmm a / scene / 3d / particles . cpp <nl> ppp b / scene / 3d / particles . cpp <nl> void Particles : : _notification ( int p_what ) { <nl> set_process_internal ( false ) ; <nl> } <nl> } <nl> + <nl> + if ( p_what = = NOTIFICATION_VISIBILITY_CHANGED ) { <nl> + / / make sure particles are updated before rendering occurs if they were active before <nl> + if ( is_visible_in_tree ( ) & & ! VS : : get_singleton ( ) - > particles_is_inactive ( particles ) ) { <nl> + VS : : get_singleton ( ) - > particles_request_process ( particles ) ; <nl> + } <nl> + } <nl> } <nl> <nl> void Particles : : _bind_methods ( ) { <nl> mmm a / servers / visual / visual_server_raster . h <nl> ppp b / servers / visual / visual_server_raster . h <nl> class VisualServerRaster : public VisualServer { <nl> BIND2 ( particles_set_process_material , RID , RID ) <nl> BIND2 ( particles_set_fixed_fps , RID , int ) <nl> BIND2 ( particles_set_fractional_delta , RID , bool ) <nl> + BIND1R ( bool , particles_is_inactive , RID ) <nl> + BIND1 ( particles_request_process , RID ) <nl> BIND1 ( particles_restart , RID ) <nl> <nl> BIND2 ( particles_set_draw_order , RID , VS : : ParticlesDrawOrder ) <nl> mmm a / servers / visual / visual_server_wrap_mt . h <nl> ppp b / servers / visual / visual_server_wrap_mt . h <nl> class VisualServerWrapMT : public VisualServer { <nl> FUNC2 ( particles_set_process_material , RID , RID ) <nl> FUNC2 ( particles_set_fixed_fps , RID , int ) <nl> FUNC2 ( particles_set_fractional_delta , RID , bool ) <nl> + FUNC1R ( bool , particles_is_inactive , RID ) <nl> + FUNC1 ( particles_request_process , RID ) <nl> FUNC1 ( particles_restart , RID ) <nl> <nl> FUNC2 ( particles_set_draw_order , RID , VS : : ParticlesDrawOrder ) <nl> mmm a / servers / visual_server . cpp <nl> ppp b / servers / visual_server . cpp <nl> void VisualServer : : _bind_methods ( ) { <nl> ClassDB : : bind_method ( D_METHOD ( " particles_set_process_material " , " particles " , " material " ) , & VisualServer : : particles_set_process_material ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " particles_set_fixed_fps " , " particles " , " fps " ) , & VisualServer : : particles_set_fixed_fps ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " particles_set_fractional_delta " , " particles " , " enable " ) , & VisualServer : : particles_set_fractional_delta ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " particles_is_inactive " , " particles " ) , & VisualServer : : particles_is_inactive ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " particles_request_process " , " particles " ) , & VisualServer : : particles_request_process ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " particles_restart " , " particles " ) , & VisualServer : : particles_restart ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " particles_set_draw_order " , " particles " , " order " ) , & VisualServer : : particles_set_draw_order ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " particles_set_draw_passes " , " particles " , " count " ) , & VisualServer : : particles_set_draw_passes ) ; <nl> mmm a / servers / visual_server . h <nl> ppp b / servers / visual_server . h <nl> class VisualServer : public Object { <nl> virtual void particles_set_process_material ( RID p_particles , RID p_material ) = 0 ; <nl> virtual void particles_set_fixed_fps ( RID p_particles , int p_fps ) = 0 ; <nl> virtual void particles_set_fractional_delta ( RID p_particles , bool p_enable ) = 0 ; <nl> + virtual bool particles_is_inactive ( RID p_particles ) = 0 ; <nl> + virtual void particles_request_process ( RID p_particles ) = 0 ; <nl> virtual void particles_restart ( RID p_particles ) = 0 ; <nl> <nl> enum ParticlesDrawOrder { <nl>
Merge pull request from nekomatata / particles - restart - glitch
godotengine/godot
7afa1a64eceae9a6a27d84335d79f3f9238bd277
2019-11-09T17:53:34Z
mmm a / aten / src / ATen / Version . cpp <nl> ppp b / aten / src / ATen / Version . cpp <nl> <nl> <nl> # include < caffe2 / core / common . h > <nl> <nl> + # include < ATen / native / DispatchStub . h > <nl> + <nl> # include < sstream > <nl> <nl> namespace at { <nl> std : : string get_openmp_version ( ) { <nl> return ss . str ( ) ; <nl> } <nl> <nl> + std : : string used_cpu_capability ( ) { <nl> + / / It is possible that we override the cpu_capability with <nl> + / / environment variable <nl> + std : : ostringstream ss ; <nl> + ss < < " CPU capability usage : " ; <nl> + auto capability = native : : get_cpu_capability ( ) ; <nl> + switch ( capability ) { <nl> + case native : : CPUCapability : : DEFAULT : <nl> + ss < < " NO AVX " ; <nl> + break ; <nl> + case native : : CPUCapability : : AVX : <nl> + ss < < " AVX " ; <nl> + break ; <nl> + case native : : CPUCapability : : AVX2 : <nl> + ss < < " AVX2 " ; <nl> + break ; <nl> + default : <nl> + break ; <nl> + } <nl> + return ss . str ( ) ; <nl> + } <nl> + <nl> std : : string show_config ( ) { <nl> std : : ostringstream ss ; <nl> ss < < " PyTorch built with : \ n " ; / / TODO add the version of PyTorch <nl> std : : string show_config ( ) { <nl> ss < < " - NNPACK is enabled \ n " ; <nl> # endif <nl> <nl> + ss < < " - " < < used_cpu_capability ( ) < < " \ n " ; <nl> + <nl> if ( hasCUDA ( ) ) { <nl> ss < < detail : : getCUDAHooks ( ) . showConfig ( ) ; <nl> } <nl> new file mode 100644 <nl> index 000000000000 . . 7b34a0af369c <nl> mmm / dev / null <nl> ppp b / test / cpp / api / dispatch . cpp <nl> <nl> + # include < gtest / gtest . h > <nl> + <nl> + # include < torch / torch . h > <nl> + # include < ATen / native / Pow . h > <nl> + # include < torch / types . h > <nl> + # include < torch / utils . h > <nl> + # include < test / cpp / api / support . h > <nl> + # include < iostream > <nl> + # include < vector > <nl> + # include < type_traits > <nl> + # include < cstdlib > <nl> + <nl> + using namespace at ; <nl> + using namespace torch : : test ; <nl> + <nl> + struct DispatchTest : torch : : test : : SeedingFixture { } ; <nl> + <nl> + TEST_F ( DispatchTest , TestAVX2 ) { <nl> + const std : : vector < int > ints { 1 , 2 , 3 , 4 } ; <nl> + const std : : vector < int > result { 1 , 4 , 27 , 256 } ; <nl> + const auto vals_tensor = torch : : tensor ( ints ) ; <nl> + const auto pows_tensor = torch : : tensor ( ints ) ; <nl> + setenv ( " ATEN_CPU_CAPABILITY " , " avx2 " , 1 ) ; <nl> + const auto actual_pow_avx2 = vals_tensor . pow ( pows_tensor ) ; <nl> + for ( int i = 0 ; i < 4 ; i + + ) { <nl> + ASSERT_EQ ( result [ i ] , actual_pow_avx2 [ i ] . item < int > ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_F ( DispatchTest , TestAVX ) { <nl> + const std : : vector < int > ints { 1 , 2 , 3 , 4 } ; <nl> + const std : : vector < int > result { 1 , 4 , 27 , 256 } ; <nl> + const auto vals_tensor = torch : : tensor ( ints ) ; <nl> + const auto pows_tensor = torch : : tensor ( ints ) ; <nl> + setenv ( " ATEN_CPU_CAPABILITY " , " avx " , 1 ) ; <nl> + const auto actual_pow_avx = vals_tensor . pow ( pows_tensor ) ; <nl> + for ( int i = 0 ; i < 4 ; i + + ) { <nl> + ASSERT_EQ ( result [ i ] , actual_pow_avx [ i ] . item < int > ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_F ( DispatchTest , TestDefault ) { <nl> + const std : : vector < int > ints { 1 , 2 , 3 , 4 } ; <nl> + const std : : vector < int > result { 1 , 4 , 27 , 256 } ; <nl> + const auto vals_tensor = torch : : tensor ( ints ) ; <nl> + const auto pows_tensor = torch : : tensor ( ints ) ; <nl> + setenv ( " ATEN_CPU_CAPABILITY " , " default " , 1 ) ; <nl> + const auto actual_pow_default = vals_tensor . pow ( pows_tensor ) ; <nl> + for ( int i = 0 ; i < 4 ; i + + ) { <nl> + ASSERT_EQ ( result [ i ] , actual_pow_default [ i ] . item < int > ( ) ) ; <nl> + } <nl> + } <nl>
Add the build for runtime dispatch for AVX , AVX2 instruction set ( )
pytorch/pytorch
09296c34a4b582316bfd40547e367bee0c883044
2020-03-10T22:32:57Z
mmm a / project / BuildDependencies / scripts / 0_package . list <nl> ppp b / project / BuildDependencies / scripts / 0_package . list <nl> freetype - 2 . 4 . 6 - win32 - 3 . 7z <nl> giflib - 5 . 0 . 5p - win32 . 7z <nl> gnutls - 3 . 2 . 3 - win32 . zip <nl> jsonschemabuilder - 1 . 0 . 0 - win32 - 3 . 7z <nl> - libass - 0 . 10 . 2 - win32 . 7z <nl> + libass - 0 . 12 . 1 - win32 . 7z <nl> libbluray - 0 . 4 . 0 - win32 . zip <nl> libcdio - 0 . 83 - win32 - 2 . 7z <nl> libcec - 2 . 2 . 0 - win32 - 1 . 7z <nl> mmm a / tools / depends / target / libass / Makefile <nl> ppp b / tools / depends / target / libass / Makefile <nl> DEPS = . . / . . / Makefile . include Makefile <nl> <nl> # lib name , version <nl> LIBNAME = libass <nl> - VERSION = 0 . 10 . 1 <nl> + VERSION = 0 . 12 . 1 <nl> SOURCE = $ ( LIBNAME ) - $ ( VERSION ) <nl> ARCHIVE = $ ( SOURCE ) . tar . gz <nl> <nl> $ ( LIBDYLIB ) : $ ( PLATFORM ) <nl> . installed - $ ( PLATFORM ) : $ ( LIBDYLIB ) <nl> $ ( MAKE ) - C $ ( PLATFORM ) install <nl> ifeq ( $ ( OS ) , android ) <nl> - rm - f $ ( PREFIX ) / lib / libass . la $ ( PREFIX ) / lib / libass . so $ ( PREFIX ) / lib / libass . so . 4 <nl> - mv - f $ ( PREFIX ) / lib / libass . so . 4 . 1 . 0 $ ( PREFIX ) / lib / libass . so <nl> - $ ( RPL ) - e " libass . so . 4 " " libass . so \ x00 \ x00 " $ ( PREFIX ) / lib / libass . so <nl> + rm - f $ ( PREFIX ) / lib / libass . la $ ( PREFIX ) / lib / libass . so $ ( PREFIX ) / lib / libass . so . 5 <nl> + mv - f $ ( PREFIX ) / lib / libass . so . 5 . 1 . 0 $ ( PREFIX ) / lib / libass . so <nl> + $ ( RPL ) - e " libass . so . 5 " " libass . so \ x00 \ x00 " $ ( PREFIX ) / lib / libass . so <nl> - $ ( READELF ) - - dynamic $ ( PREFIX ) / lib / libass . so | grep ibrary <nl> endif <nl> touch $ @ <nl>
bump libass to version 0 . 12 . 1
xbmc/xbmc
4b728cb09412ecd85fb4adc268e624a08a94c39c
2015-04-07T15:32:41Z
mmm a / dbms / src / Common / PoolWithFailoverBase . h <nl> ppp b / dbms / src / Common / PoolWithFailoverBase . h <nl> <nl> # include < cstdlib > <nl> # include < climits > <nl> # include < random > <nl> + # include < functional > <nl> # include < common / Types . h > <nl> # include < ext / scope_guard . h > <nl> # include < Core / Types . h > <nl>
Fix compile with gcc7
ClickHouse/ClickHouse
f6f85440d42583ed5cd78e7ef5420edb18817899
2017-06-12T23:50:34Z
mmm a / tensorflow / contrib / linear_optimizer / python / kernel_tests / sdca_ops_test . py <nl> ppp b / tensorflow / contrib / linear_optimizer / python / kernel_tests / sdca_ops_test . py <nl> def make_variable_dict ( max_age , max_gender ) : <nl> dense_features_weights = [ ] ) <nl> <nl> <nl> - def make_dense_variable_dict ( num_dense_features , num_examples ) : <nl> + def make_dense_variable_dict ( num_dense_features ) : <nl> feature_weights = ( [ <nl> tf . Variable ( tf . zeros ( [ 1 ] , <nl> dtype = tf . float32 ) ) <nl> for _ in xrange ( 0 , num_dense_features ) <nl> ] ) <nl> return dict ( sparse_features_weights = [ ] , <nl> - dense_features_weights = feature_weights , <nl> - dual = tf . Variable ( tf . zeros ( <nl> - [ num_examples ] , <nl> - dtype = tf . float32 ) ) , <nl> - primal_loss = tf . Variable ( tf . zeros ( <nl> - [ ] , <nl> - dtype = tf . float64 ) ) ) <nl> + dense_features_weights = feature_weights ) <nl> <nl> <nl> def get_binary_predictions_for_logistic ( predictions , cutoff = 0 . 5 ) : <nl> def testDenseFeaturesWithDefaultWeights ( self ) : <nl> dense_feature_values = [ [ 1 . 0 , 0 . 0 ] , [ 0 . 0 , 1 . 0 ] ] , <nl> weights = [ 1 . 0 , 1 . 0 ] , <nl> labels = [ 10 . 0 , - 5 . 0 ] ) <nl> - variables = make_dense_variable_dict ( 2 , 2 ) <nl> + variables = make_dense_variable_dict ( 2 ) <nl> options = dict ( symmetric_l2_regularization = 1 . 0 , <nl> symmetric_l1_regularization = 0 , <nl> loss_type = ' squared_loss ' ) <nl> def testDenseFeaturesWithArbitraryWeights ( self ) : <nl> dense_feature_values = [ [ 1 . 0 , 0 . 0 ] , [ 0 . 0 , 1 . 0 ] ] , <nl> weights = [ 20 . 0 , 10 . 0 ] , <nl> labels = [ 10 . 0 , - 5 . 0 ] ) <nl> - variables = make_dense_variable_dict ( 2 , 2 ) <nl> + variables = make_dense_variable_dict ( 2 ) <nl> options = dict ( symmetric_l2_regularization = 5 . 0 , <nl> symmetric_l1_regularization = 0 , <nl> loss_type = ' squared_loss ' ) <nl> def testDenseFeaturesPerfectlySeparable ( self ) : <nl> dense_feature_values = [ [ 1 . 0 , 1 . 0 ] , [ 1 . 0 , - 1 . 0 ] ] , <nl> weights = [ 1 . 0 , 1 . 0 ] , <nl> labels = [ 1 . 0 , 0 . 0 ] ) <nl> - variables = make_dense_variable_dict ( 2 , 2 ) <nl> + variables = make_dense_variable_dict ( 2 ) <nl> options = dict ( symmetric_l2_regularization = 1 . 0 , <nl> symmetric_l1_regularization = 0 , <nl> loss_type = ' hinge_loss ' ) <nl> def testDenseFeaturesSeparableWithinMargins ( self ) : <nl> dense_feature_values = [ [ 1 . 0 , 1 . 0 ] , [ 0 . 5 , - 0 . 5 ] ] , <nl> weights = [ 1 . 0 , 1 . 0 ] , <nl> labels = [ 1 . 0 , 0 . 0 ] ) <nl> - variables = make_dense_variable_dict ( 2 , 2 ) <nl> + variables = make_dense_variable_dict ( 2 ) <nl> options = dict ( symmetric_l2_regularization = 1 . 0 , <nl> symmetric_l1_regularization = 0 , <nl> loss_type = ' hinge_loss ' ) <nl> def testDenseFeaturesWeightedExamples ( self ) : <nl> dense_feature_values = [ [ 1 . 0 , 1 . 0 ] , [ 0 . 5 , - 0 . 5 ] ] , <nl> weights = [ 3 . 0 , 1 . 0 ] , <nl> labels = [ 1 . 0 , 0 . 0 ] ) <nl> - variables = make_dense_variable_dict ( 2 , 2 ) <nl> + variables = make_dense_variable_dict ( 2 ) <nl> options = dict ( symmetric_l2_regularization = 1 . 0 , <nl> symmetric_l1_regularization = 0 , <nl> loss_type = ' hinge_loss ' ) <nl>
Remove dual and primal_loss variables from variables dict created in auxiliary test method make_dense_variable_dict . These 2 variables are nowhere used in SDCASolver . Right now this auxiliary method is more confusing than helpful .
tensorflow/tensorflow
d17ec785a85417f34adff9c311e82e8359855c12
2016-06-01T22:20:33Z
mmm a / tensorflow / core / profiler / convert / BUILD <nl> ppp b / tensorflow / core / profiler / convert / BUILD <nl> tf_cc_test ( <nl> " @ com_google_absl / / absl / strings " , <nl> ] , <nl> ) <nl> + <nl> + cc_library ( <nl> + name = " op_stats_combiner " , <nl> + srcs = [ " op_stats_combiner . cc " ] , <nl> + hdrs = [ " op_stats_combiner . h " ] , <nl> + deps = [ <nl> + " : op_metrics_db_combiner " , <nl> + " : xplane_to_tf_functions " , <nl> + " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core / profiler / protobuf : diagnostics_proto_cc " , <nl> + " / / tensorflow / core / profiler / protobuf : hardware_types_proto_cc " , <nl> + " / / tensorflow / core / profiler / protobuf : kernel_stats_proto_cc " , <nl> + " / / tensorflow / core / profiler / protobuf : op_stats_proto_cc " , <nl> + " / / tensorflow / core / profiler / protobuf : steps_db_proto_cc " , <nl> + " / / tensorflow / core / profiler / utils : hardware_type_utils " , <nl> + " / / tensorflow / core / profiler / utils : step_interval " , <nl> + " @ com_google_absl / / absl / container : flat_hash_map " , <nl> + ] , <nl> + ) <nl> new file mode 100644 <nl> index 0000000000000 . . aafc61953a870 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / profiler / convert / op_stats_combiner . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / core / profiler / convert / op_stats_combiner . h " <nl> + <nl> + # include " absl / container / flat_hash_map . h " <nl> + # include " tensorflow / core / platform / logging . h " <nl> + # include " tensorflow / core / platform / macros . h " <nl> + # include " tensorflow / core / platform / protobuf . h " <nl> + # include " tensorflow / core / profiler / convert / op_metrics_db_combiner . h " <nl> + # include " tensorflow / core / profiler / convert / xplane_to_tf_functions . h " <nl> + # include " tensorflow / core / profiler / protobuf / diagnostics . pb . h " <nl> + # include " tensorflow / core / profiler / protobuf / hardware_types . pb . h " <nl> + # include " tensorflow / core / profiler / protobuf / kernel_stats . pb . h " <nl> + # include " tensorflow / core / profiler / protobuf / op_stats . pb . h " <nl> + # include " tensorflow / core / profiler / protobuf / steps_db . pb . h " <nl> + # include " tensorflow / core / profiler / utils / hardware_type_utils . h " <nl> + # include " tensorflow / core / profiler / utils / step_interval . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace profiler { <nl> + <nl> + namespace { <nl> + <nl> + / / Combines the src PerCoreStepInfo into the dst PerCoreStepInfo . <nl> + void CombinePerCoreStepInfo ( <nl> + int src_host_id , bool use_incomplete_step , const PerCoreStepInfo & src , <nl> + PerCoreStepInfo * dst , <nl> + OpMetricsDbCombiner * hlo_metrics_db_complete_steps_only_combiner , <nl> + OpMetricsDbCombiner * hlo_metrics_db_per_step_combiner ) { <nl> + DCHECK_EQ ( dst - > step_num ( ) , src . step_num ( ) ) ; <nl> + CombineCoreIdMap ( src_host_id , src . step_info_per_core ( ) , <nl> + dst - > mutable_step_info_per_core ( ) ) ; <nl> + if ( ! use_incomplete_step ) { <nl> + hlo_metrics_db_complete_steps_only_combiner - > Combine ( src . hlo_metrics_db ( ) ) ; <nl> + } <nl> + hlo_metrics_db_per_step_combiner - > Combine ( src . hlo_metrics_db ( ) ) ; <nl> + CombineCoreIdMap ( src_host_id , src . flow_db_per_core ( ) , <nl> + dst - > mutable_flow_db_per_core ( ) ) ; <nl> + CombineCoreIdMap ( src_host_id , src . all_reduce_db_per_core ( ) , <nl> + dst - > mutable_all_reduce_db_per_core ( ) ) ; <nl> + CombineCoreIdMap ( src_host_id , src . core_id_to_replica_id_map ( ) , <nl> + dst - > mutable_core_id_to_replica_id_map ( ) ) ; <nl> + } <nl> + <nl> + void CombineStepDatabase ( <nl> + int src_host_id , StepInterval step_intersection , <nl> + const StepDatabaseResult & src , StepDatabaseResult * dst , <nl> + OpMetricsDbCombiner * hlo_metrics_db_complete_steps_only_combiner , <nl> + std : : vector < OpMetricsDbCombiner > * hlo_metrics_db_per_step_combiners ) { <nl> + if ( src . use_incomplete_step ( ) ) { <nl> + dst - > set_use_incomplete_step ( true ) ; <nl> + } <nl> + for ( const PerCoreStepInfo & src_step_info : src . step_sequence ( ) ) { <nl> + uint32 step_num = src_step_info . step_num ( ) ; <nl> + if ( ! step_intersection . Contains ( step_num ) ) { <nl> + continue ; <nl> + } <nl> + uint32 dst_step_sequence_index = step_intersection . Index ( step_num ) ; <nl> + CombinePerCoreStepInfo ( <nl> + src_host_id , src . use_incomplete_step ( ) , src_step_info , <nl> + dst - > mutable_step_sequence ( dst_step_sequence_index ) , <nl> + hlo_metrics_db_complete_steps_only_combiner , <nl> + & ( * hlo_metrics_db_per_step_combiners ) [ dst_step_sequence_index ] ) ; <nl> + } <nl> + } <nl> + <nl> + void CombineRunEnvironment ( const RunEnvironment & src , RunEnvironment * dst ) { <nl> + dst - > mutable_hostnames ( ) - > insert ( src . hostnames ( ) . begin ( ) , <nl> + src . hostnames ( ) . end ( ) ) ; <nl> + dst - > set_host_count ( dst - > hostnames_size ( ) ) ; <nl> + if ( src . device_type ( ) ! = " CPU " ) { <nl> + dst - > set_device_type ( src . device_type ( ) ) ; <nl> + / / TODO ( b / 111402648 ) : Batch size may differ per - core . Currently , we report <nl> + / / the max batch size . We need to come up with a better measure . <nl> + dst - > set_per_core_batch_size ( <nl> + std : : max ( src . per_core_batch_size ( ) , dst - > per_core_batch_size ( ) ) ) ; <nl> + dst - > set_device_core_count ( src . device_core_count ( ) + <nl> + dst - > device_core_count ( ) ) ; <nl> + / / Replica count and num cores per replica must be same for all copies . <nl> + dst - > set_replica_count ( std : : max ( src . replica_count ( ) , dst - > replica_count ( ) ) ) ; <nl> + dst - > set_num_cores_per_replica ( <nl> + std : : max ( src . num_cores_per_replica ( ) , dst - > num_cores_per_replica ( ) ) ) ; <nl> + * dst - > mutable_topology ( ) = src . topology ( ) ; <nl> + } <nl> + dst - > set_task_count ( src . task_count ( ) + dst - > task_count ( ) ) ; <nl> + ( * dst - > mutable_host_independent_job_info ( ) ) = src . host_independent_job_info ( ) ; <nl> + for ( const auto & job_info : src . host_dependent_job_info ( ) ) { <nl> + * ( dst - > add_host_dependent_job_info ( ) ) = job_info ; <nl> + } <nl> + dst - > set_host_trace_level ( src . host_trace_level ( ) ) ; <nl> + } <nl> + <nl> + / / Combines the src PerfEnv into the dst PerfEnv . <nl> + void CombinePerfEnv ( const PerfEnv & src , PerfEnv * dst ) { <nl> + dst - > set_peak_tera_flops_per_second ( src . peak_tera_flops_per_second ( ) ) ; <nl> + dst - > set_peak_hbm_bw_giga_bytes_per_second ( <nl> + src . peak_hbm_bw_giga_bytes_per_second ( ) ) ; <nl> + dst - > set_ridge_point ( src . ridge_point ( ) ) ; <nl> + } <nl> + <nl> + / / Combines the src Diagnostics into the dst Diagnostics . <nl> + void CombineDiagnostics ( const Diagnostics & src , Diagnostics * dst ) { <nl> + dst - > mutable_info ( ) - > MergeFrom ( src . info ( ) ) ; <nl> + dst - > mutable_warnings ( ) - > MergeFrom ( src . warnings ( ) ) ; <nl> + dst - > mutable_errors ( ) - > MergeFrom ( src . errors ( ) ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> + bool IsCoordinator ( bool no_accelerator_in_system , HardwareType hardware_type ) { <nl> + / / A host is a coordinator if : <nl> + / / ( 1 ) The host doesn ' t have a device , and <nl> + / / ( 2 ) The system does use accelerator ( if not , it uses CPU only and so this <nl> + / / host should be regarded as a worker as well ) . <nl> + return ! HasDevice ( hardware_type ) & & ! no_accelerator_in_system ; <nl> + } <nl> + <nl> + uint32 GlobalCoreId ( int host_id , uint32 device_ordinal ) { <nl> + constexpr uint32 kMaxDevicesPerHost = 1000 ; / / power - of - 10 for debuggability <nl> + return host_id * kMaxDevicesPerHost + device_ordinal ; <nl> + } <nl> + <nl> + void CombineOpStats ( <nl> + bool no_accelerator_in_system , int src_host_id , HardwareType hardware_type , <nl> + StepInterval step_intersection , const OpStats & src , OpStats * dst , <nl> + OpMetricsDbCombiner * host_op_metrics_db_combiner , <nl> + OpMetricsDbCombiner * device_op_metrics_db_combiner , <nl> + OpMetricsDbCombiner * hlo_metrics_db_complete_steps_only_combiner , <nl> + std : : vector < OpMetricsDbCombiner > * hlo_metrics_db_per_step_combiners ) { <nl> + / / Combine host_metrics_db . <nl> + host_op_metrics_db_combiner - > Combine ( src . host_op_metrics_db ( ) ) ; <nl> + / / Combine device_metrics_db . <nl> + device_op_metrics_db_combiner - > Combine ( src . device_op_metrics_db ( ) ) ; <nl> + <nl> + / / Combine step_db . <nl> + if ( ! IsCoordinator ( no_accelerator_in_system , hardware_type ) ) { <nl> + CombineStepDatabase ( src_host_id , step_intersection , src . step_db ( ) , <nl> + dst - > mutable_step_db ( ) , <nl> + hlo_metrics_db_complete_steps_only_combiner , <nl> + hlo_metrics_db_per_step_combiners ) ; <nl> + } <nl> + <nl> + / / Combine run environment info . <nl> + CombineRunEnvironment ( src . run_environment ( ) , dst - > mutable_run_environment ( ) ) ; <nl> + <nl> + / / Combine the perf environment info . <nl> + CombinePerfEnv ( src . perf_env ( ) , dst - > mutable_perf_env ( ) ) ; <nl> + <nl> + / / Combine diagnostics . <nl> + CombineDiagnostics ( src . diagnostics ( ) , dst - > mutable_diagnostics ( ) ) ; <nl> + <nl> + / / Combine kernel stats . <nl> + dst - > mutable_kernel_stats_db ( ) - > mutable_reports ( ) - > MergeFrom ( <nl> + src . kernel_stats_db ( ) . reports ( ) ) ; <nl> + <nl> + / / Combine tf - function stats . <nl> + CombineTfFunctionDb ( src . tf_function_db ( ) , dst - > mutable_tf_function_db ( ) ) ; <nl> + } <nl> + <nl> + } / / namespace profiler <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . 1b338c938e249 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / profiler / convert / op_stats_combiner . h <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef TENSORFLOW_CORE_PROFILER_CONVERT_OP_STATS_COMBINER_H_ <nl> + # define TENSORFLOW_CORE_PROFILER_CONVERT_OP_STATS_COMBINER_H_ <nl> + <nl> + # include " absl / container / flat_hash_map . h " <nl> + # include " tensorflow / core / platform / logging . h " <nl> + # include " tensorflow / core / platform / macros . h " <nl> + # include " tensorflow / core / profiler / convert / op_metrics_db_combiner . h " <nl> + # include " tensorflow / core / profiler / protobuf / hardware_types . pb . h " <nl> + # include " tensorflow / core / profiler / protobuf / op_stats . pb . h " <nl> + # include " tensorflow / core / profiler / utils / step_interval . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace profiler { <nl> + <nl> + / / Whether a host is a coordinator . <nl> + bool IsCoordinator ( bool no_accelerator_in_system , HardwareType hardware_type ) ; <nl> + <nl> + / / Translates the core id from single host to the one for multiple - host . <nl> + / / We need this translation because the device_ordinal was assigned when a <nl> + / / single host response was given . Now , we need a global core_id to distinguish <nl> + / / it with multiple hosts . <nl> + uint32 GlobalCoreId ( int host_id , uint32 device_ordinal ) ; <nl> + <nl> + / / Combines the src map into the dst map . <nl> + / / The src map keys are local core_ids . The src_host_id is used to convert them <nl> + / / into global core_ids used as keys in the dst map . <nl> + / / REQUIRED : cores from src_host_id are not already in dst . <nl> + template < typename CoreIdMap > <nl> + void CombineCoreIdMap ( int src_host_id , const CoreIdMap & src , CoreIdMap * dst ) { <nl> + for ( const auto & core_id_and_value : src ) { <nl> + uint32 global_core_id = GlobalCoreId ( src_host_id , core_id_and_value . first ) ; <nl> + auto iter_and_inserted = <nl> + dst - > insert ( { global_core_id , core_id_and_value . second } ) ; <nl> + DCHECK ( iter_and_inserted . second ) <nl> + < < " Duplicated core_id : " < < iter_and_inserted . first - > first ; <nl> + } <nl> + } <nl> + <nl> + / / Combine the src OpStats into the dst OpStats . <nl> + void CombineOpStats ( <nl> + bool no_accelerator_in_system , int src_host_id , HardwareType hardware_type , <nl> + StepInterval step_intersection , const OpStats & src , OpStats * dst , <nl> + OpMetricsDbCombiner * host_op_metrics_db_combiner , <nl> + OpMetricsDbCombiner * device_op_metrics_db_combiner , <nl> + OpMetricsDbCombiner * hlo_metrics_db_complete_steps_only_combiner , <nl> + std : : vector < OpMetricsDbCombiner > * hlo_metrics_db_per_step_combiners ) ; <nl> + <nl> + } / / namespace profiler <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / TENSORFLOW_CORE_PROFILER_CONVERT_OP_STATS_COMBINER_H_ <nl> mmm a / tensorflow / core / profiler / utils / hardware_type_utils . cc <nl> ppp b / tensorflow / core / profiler / utils / hardware_type_utils . cc <nl> HardwareType ParseHardwareType ( absl : : string_view device_type ) { <nl> return HardwareType : : UNKNOWN_HARDWARE ; <nl> } <nl> <nl> + bool HasDevice ( HardwareType x ) { return x > tensorflow : : profiler : : CPU_ONLY ; } <nl> + <nl> } / / namespace profiler <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / profiler / utils / hardware_type_utils . h <nl> ppp b / tensorflow / core / profiler / utils / hardware_type_utils . h <nl> double GetFlopMaxThroughputPerSM ( const DeviceCapabilities & device_cap ) ; <nl> <nl> HardwareType ParseHardwareType ( absl : : string_view device_type ) ; <nl> <nl> + / / Returns true if the given hardware type has a device . <nl> + bool HasDevice ( HardwareType x ) ; <nl> + <nl> } / / namespace profiler <nl> } / / namespace tensorflow <nl> <nl>
Implement a OpStats combiner , with fixes for tensorflow build .
tensorflow/tensorflow
4a85d439a03426536d0ac114973cc43bd1adcbcc
2020-09-03T20:51:41Z
mmm a / modules / dnn / include / opencv2 / dnn / all_layers . hpp <nl> ppp b / modules / dnn / include / opencv2 / dnn / all_layers . hpp <nl> CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> bool globalPooling ; <nl> bool computeMaxIdx ; <nl> String padMode ; <nl> + bool ceilMode ; <nl> <nl> static Ptr < PoolingLayer > create ( const LayerParams & params ) ; <nl> } ; <nl> CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> static Ptr < SoftmaxLayer > create ( const LayerParams & params ) ; <nl> } ; <nl> <nl> + class CV_EXPORTS LPNormalizeLayer : public Layer <nl> + { <nl> + public : <nl> + float pnorm , epsilon ; <nl> + <nl> + static Ptr < LPNormalizeLayer > create ( const LayerParams & params ) ; <nl> + } ; <nl> + <nl> class CV_EXPORTS InnerProductLayer : public Layer <nl> { <nl> public : <nl> CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> { <nl> public : <nl> int axis ; <nl> + / * * <nl> + * @ brief Add zero padding in case of concatenation of blobs with different <nl> + * spatial sizes . <nl> + * <nl> + * Details : https : / / github . com / torch / nn / blob / master / doc / containers . md # depthconcat <nl> + * / <nl> + bool padding ; <nl> <nl> static Ptr < ConcatLayer > create ( const LayerParams & params ) ; <nl> } ; <nl> mmm a / modules / dnn / src / dnn . cpp <nl> ppp b / modules / dnn / src / dnn . cpp <nl> struct Net : : Impl <nl> / / ( and so we eliminate the concatenation layer , because the channels <nl> / / are concatenated implicitly ) . <nl> Ptr < ConcatLayer > concatLayer = ld . layerInstance . dynamicCast < ConcatLayer > ( ) ; <nl> - if ( ! concatLayer . empty ( ) & & concatLayer - > axis = = 1 & & <nl> + if ( ! concatLayer . empty ( ) & & concatLayer - > axis = = 1 & & ! concatLayer - > padding & & <nl> ld . outputBlobs . size ( ) = = 1 ) <nl> { <nl> Mat & output = ld . outputBlobs [ 0 ] ; <nl> mmm a / modules / dnn / src / init . cpp <nl> ppp b / modules / dnn / src / init . cpp <nl> void initializeLayerFactory ( ) <nl> CV_DNN_REGISTER_LAYER_CLASS ( InnerProduct , InnerProductLayer ) ; <nl> CV_DNN_REGISTER_LAYER_CLASS ( Softmax , SoftmaxLayer ) ; <nl> CV_DNN_REGISTER_LAYER_CLASS ( MVN , MVNLayer ) ; <nl> + CV_DNN_REGISTER_LAYER_CLASS ( LPNormalize , LPNormalizeLayer ) ; <nl> <nl> CV_DNN_REGISTER_LAYER_CLASS ( ReLU , ReLULayer ) ; <nl> CV_DNN_REGISTER_LAYER_CLASS ( ChannelsPReLU , ChannelsPReLULayer ) ; <nl> mmm a / modules / dnn / src / layers / concat_layer . cpp <nl> ppp b / modules / dnn / src / layers / concat_layer . cpp <nl> class ConcatLayerImpl : public ConcatLayer <nl> { <nl> setParamsFrom ( params ) ; <nl> axis = params . get < int > ( " axis " , 1 ) ; <nl> + padding = params . get < bool > ( " padding " , false ) ; <nl> } <nl> <nl> virtual bool getMemoryShapes ( const std : : vector < MatShape > & inputs , <nl> class ConcatLayerImpl : public ConcatLayer <nl> std : : vector < MatShape > & internals ) const <nl> { <nl> CV_Assert ( inputs . size ( ) > 0 ) ; <nl> - outputs . clear ( ) ; <nl> - outputs . push_back ( inputs [ 0 ] ) ; <nl> + outputs . resize ( 1 , inputs [ 0 ] ) ; <nl> int cAxis = clamp ( axis , inputs [ 0 ] ) ; <nl> <nl> int axisSum = 0 ; <nl> class ConcatLayerImpl : public ConcatLayer <nl> { <nl> MatShape curShape = inputs [ i ] ; <nl> <nl> - CV_Assert ( curShape . size ( ) = = outputs . back ( ) . size ( ) ) ; <nl> - for ( int curAxis = 0 ; curAxis < outputs . back ( ) . size ( ) ; curAxis + + ) <nl> + if ( padding ) <nl> { <nl> - if ( curAxis ! = cAxis & & outputs . back ( ) [ curAxis ] ! = curShape [ curAxis ] ) <nl> - CV_Error ( Error : : StsBadSize , " Inconsitent shape for ConcatLayer " ) ; <nl> + for ( int curAxis = 0 ; curAxis < outputs [ 0 ] . size ( ) ; curAxis + + ) <nl> + { <nl> + outputs [ 0 ] [ curAxis ] = std : : max ( outputs [ 0 ] [ curAxis ] , curShape [ curAxis ] ) ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + CV_Assert ( curShape . size ( ) = = outputs [ 0 ] . size ( ) ) ; <nl> + for ( int curAxis = 0 ; curAxis < outputs [ 0 ] . size ( ) ; curAxis + + ) <nl> + { <nl> + if ( curAxis ! = cAxis & & outputs [ 0 ] [ curAxis ] ! = curShape [ curAxis ] ) <nl> + CV_Error ( Error : : StsBadSize , " Inconsitent shape for ConcatLayer " ) ; <nl> + } <nl> } <nl> <nl> axisSum + = curShape [ cAxis ] ; <nl> } <nl> - <nl> - outputs . back ( ) [ cAxis ] = axisSum ; <nl> - <nl> + outputs [ 0 ] [ cAxis ] = axisSum ; <nl> return false ; <nl> } <nl> <nl> virtual bool supportBackend ( int backendId ) <nl> { <nl> return backendId = = DNN_BACKEND_DEFAULT | | <nl> - backendId = = DNN_BACKEND_HALIDE & & haveHalide ( ) & & axis = = 1 ; / / By channels <nl> + backendId = = DNN_BACKEND_HALIDE & & haveHalide ( ) & & axis = = 1 & & ! padding ; / / By channels <nl> } <nl> <nl> class ChannelConcatInvoker : public ParallelLoopBody <nl> class ConcatLayerImpl : public ConcatLayer <nl> int cAxis = clamp ( axis , inputs [ 0 ] - > dims ) ; <nl> Mat & outMat = outputs [ 0 ] ; <nl> <nl> - if ( cAxis = = 1 & & outMat . dims = = 4 ) <nl> + if ( padding ) <nl> + outMat . setTo ( 0 ) ; <nl> + <nl> + if ( cAxis = = 1 & & outMat . dims = = 4 & & ! padding ) <nl> { <nl> int nstripes = getNumThreads ( ) ; <nl> ChannelConcatInvoker : : run ( inputs , outMat , nstripes ) ; <nl> class ConcatLayerImpl : public ConcatLayer <nl> for ( size_t i = 0 ; i < inputs . size ( ) ; i + + ) <nl> { <nl> ranges [ cAxis ] . end = ranges [ cAxis ] . start + inputs [ i ] - > size [ cAxis ] ; <nl> + for ( int j = 0 ; j < outMat . dims ; + + j ) <nl> + { <nl> + if ( j = = cAxis ) continue ; <nl> + ranges [ j ] . start = ( outMat . size [ j ] - inputs [ i ] - > size [ j ] ) / 2 ; <nl> + ranges [ j ] . end = ranges [ j ] . start + inputs [ i ] - > size [ j ] ; <nl> + } <nl> inputs [ i ] - > copyTo ( outMat ( & ranges [ 0 ] ) ) ; <nl> ranges [ cAxis ] . start = ranges [ cAxis ] . end ; <nl> } <nl> mmm a / modules / dnn / src / layers / convolution_layer . cpp <nl> ppp b / modules / dnn / src / layers / convolution_layer . cpp <nl> class ConvolutionLayerImpl : public BaseConvolutionLayerImpl <nl> } <nl> <nl> int ngroups = inpCn / blobs [ 0 ] . size [ 1 ] ; <nl> - CV_Assert ( inpCn % ngroups = = 0 & & outCn % ngroups = = 0 ) ; <nl> + CV_Assert ( ngroups > 0 & & inpCn % ngroups = = 0 & & outCn % ngroups = = 0 ) ; <nl> <nl> int dims [ ] = { inputs [ 0 ] [ 0 ] , outCn , out . height , out . width } ; <nl> outputs . resize ( inputs . size ( ) , shape ( dims ) ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 9efb9b80b4e <nl> mmm / dev / null <nl> ppp b / modules / dnn / src / layers / lp_normalize_layer . cpp <nl> <nl> + / / This file is part of OpenCV project . <nl> + / / It is subject to the license terms in the LICENSE file found in the top - level directory <nl> + / / of this distribution and at http : / / opencv . org / license . html . <nl> + / / <nl> + / / Copyright ( C ) 2017 , Intel Corporation , all rights reserved . <nl> + / / Third party copyrights are property of their respective owners . <nl> + <nl> + # include " . . / precomp . hpp " <nl> + # include " layers_common . hpp " <nl> + # include < iostream > <nl> + namespace cv { namespace dnn { <nl> + <nl> + class LPNormalizeLayerImpl : public LPNormalizeLayer <nl> + { <nl> + public : <nl> + <nl> + LPNormalizeLayerImpl ( const LayerParams & params ) <nl> + { <nl> + setParamsFrom ( params ) ; <nl> + pnorm = params . get < float > ( " p " , 2 ) ; <nl> + epsilon = params . get < float > ( " eps " , 1e - 10f ) ; <nl> + CV_Assert ( pnorm > 0 ) ; <nl> + } <nl> + <nl> + bool getMemoryShapes ( const std : : vector < MatShape > & inputs , <nl> + const int requiredOutputs , <nl> + std : : vector < MatShape > & outputs , <nl> + std : : vector < MatShape > & internals ) const <nl> + { <nl> + Layer : : getMemoryShapes ( inputs , requiredOutputs , outputs , internals ) ; <nl> + if ( pnorm ! = 1 & & pnorm ! = 2 ) <nl> + { <nl> + internals . resize ( 1 , inputs [ 0 ] ) ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + virtual bool supportBackend ( int backendId ) <nl> + { <nl> + return backendId = = DNN_BACKEND_DEFAULT ; <nl> + } <nl> + <nl> + void forward ( std : : vector < Mat * > & inputs , std : : vector < Mat > & outputs , std : : vector < Mat > & internals ) <nl> + { <nl> + CV_TRACE_FUNCTION ( ) ; <nl> + CV_TRACE_ARG_VALUE ( name , " name " , name . c_str ( ) ) ; <nl> + <nl> + CV_Assert ( inputs [ 0 ] - > total ( ) = = outputs [ 0 ] . total ( ) ) ; <nl> + float norm ; <nl> + if ( pnorm = = 1 ) <nl> + norm = cv : : norm ( * inputs [ 0 ] , NORM_L1 ) ; <nl> + else if ( pnorm = = 2 ) <nl> + norm = cv : : norm ( * inputs [ 0 ] , NORM_L2 ) ; <nl> + else <nl> + { <nl> + pow ( abs ( * inputs [ 0 ] ) , pnorm , internals [ 0 ] ) ; <nl> + norm = pow ( sum ( internals [ 0 ] ) [ 0 ] , 1 . 0f / pnorm ) ; <nl> + } <nl> + multiply ( * inputs [ 0 ] , 1 . 0f / ( norm + epsilon ) , outputs [ 0 ] ) ; <nl> + } <nl> + <nl> + int64 getFLOPS ( const std : : vector < MatShape > & inputs , <nl> + const std : : vector < MatShape > & ) const <nl> + { <nl> + int64 flops = 0 ; <nl> + for ( int i = 0 ; i < inputs . size ( ) ; i + + ) <nl> + flops + = 3 * total ( inputs [ i ] ) ; <nl> + return flops ; <nl> + } <nl> + } ; <nl> + <nl> + Ptr < LPNormalizeLayer > LPNormalizeLayer : : create ( const LayerParams & params ) <nl> + { <nl> + return Ptr < LPNormalizeLayer > ( new LPNormalizeLayerImpl ( params ) ) ; <nl> + } <nl> + <nl> + } / / namespace dnn <nl> + } / / namespace cv <nl> mmm a / modules / dnn / src / layers / pooling_layer . cpp <nl> ppp b / modules / dnn / src / layers / pooling_layer . cpp <nl> namespace cv <nl> namespace dnn <nl> { <nl> <nl> - / / TODO : add ceil_mode param <nl> class PoolingLayerImpl : public PoolingLayer <nl> { <nl> public : <nl> class PoolingLayerImpl : public PoolingLayer <nl> getPoolingKernelParams ( params , kernel . height , kernel . width , globalPooling , <nl> pad . height , pad . width , stride . height , stride . width , padMode ) ; <nl> setParamsFrom ( params ) ; <nl> + ceilMode = params . get < bool > ( " ceil_mode " , true ) ; <nl> } <nl> <nl> void finalize ( const std : : vector < Mat * > & inputs , std : : vector < Mat > & outputs ) <nl> class PoolingLayerImpl : public PoolingLayer <nl> } <nl> else if ( padMode . empty ( ) ) <nl> { <nl> - / / Yeah , something strange Caffe scheme - ) <nl> - out . height = static_cast < int > ( ceil ( static_cast < float > ( in . height + 2 * pad . height - <nl> - kernel . height ) / stride . height ) ) + 1 ; <nl> - out . width = static_cast < int > ( ceil ( static_cast < float > ( in . width + 2 * pad . width - <nl> - kernel . width ) / stride . width ) ) + 1 ; <nl> + float height = ( float ) ( in . height + 2 * pad . height - kernel . height ) / stride . height ; <nl> + float width = ( float ) ( in . width + 2 * pad . width - kernel . width ) / stride . width ; <nl> + out . height = 1 + ( ceilMode ? ceil ( height ) : floor ( height ) ) ; <nl> + out . width = 1 + ( ceilMode ? ceil ( width ) : floor ( width ) ) ; <nl> <nl> if ( pad . height | | pad . width ) <nl> { <nl> mmm a / modules / dnn / src / layers / reshape_layer . cpp <nl> ppp b / modules / dnn / src / layers / reshape_layer . cpp <nl> static void computeShapeByReshapeMask ( const MatShape & srcShape , <nl> if ( explicitMask ) <nl> { <nl> int maskTotal = total ( maskShape ) ; <nl> - for ( int i = srcRange . start + 1 ; i < srcRange . end ; + + i ) <nl> + / / Go from the end of mask until we collect required total . <nl> + bool matched = false ; <nl> + for ( int i = srcRange . end - 1 ; i > = srcRange . start ; - - i ) <nl> { <nl> - if ( total ( srcShape , i , srcRange . end ) ! = maskTotal ) <nl> + if ( matched ) <nl> { <nl> - srcRange . start = i - 1 ; <nl> - break ; <nl> + if ( i = = 0 | | total ( srcShape , i , srcRange . end ) ! = maskTotal ) <nl> + { <nl> + srcRange . start = i + 1 ; <nl> + break ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + matched = total ( srcShape , i , srcRange . end ) = = maskTotal ; <nl> } <nl> } <nl> CV_Assert ( total ( srcShape , srcRange . start , srcRange . end ) = = maskTotal ) ; <nl> mmm a / modules / dnn / src / torch / torch_importer . cpp <nl> ppp b / modules / dnn / src / torch / torch_importer . cpp <nl> struct TorchImporter : public : : cv : : dnn : : Importer <nl> if ( vtype = = TYPE_TORCH ) <nl> { <nl> int index = readInt ( ) ; <nl> + int numModules = curModule - > modules . size ( ) ; <nl> readTorchObject ( index ) ; <nl> <nl> if ( tensors . count ( index ) ) / / tensor was readed <nl> struct TorchImporter : public : : cv : : dnn : : Importer <nl> DictValue scalar = DictValue : : arrayReal ( matCasted . ptr < double > ( ) , matCasted . total ( ) ) ; <nl> scalarParams . set ( key , scalar ) ; <nl> } <nl> + else <nl> + { <nl> + / / Only tensors and scalars are supported for table fields . <nl> + / / i . e . nn . Inception has field ` transfer ` which is an <nl> + / / activation layer . So we remove added modules as readTorchObject ( index ) . <nl> + while ( curModule - > modules . size ( ) > numModules ) <nl> + curModule - > modules . pop_back ( ) ; <nl> + } <nl> } <nl> else if ( vtype = = TYPE_NUMBER ) <nl> { <nl> struct TorchImporter : public : : cv : : dnn : : Importer <nl> layerParams . set ( " torch_index " , index ) ; <nl> <nl> if ( nnName = = " Sequential " | | nnName = = " Parallel " | | <nl> - nnName = = " Concat " | | nnName = = " ConcatTable " | | nnName = = " JoinTable " ) <nl> + nnName = = " Concat " | | nnName = = " ConcatTable " | | nnName = = " JoinTable " | | <nl> + nnName = = " DepthConcat " | | nnName = = " Inception " ) <nl> { <nl> Module * parentModule = curModule ; <nl> curModule - > modules . push_back ( newModule ) ; <nl> struct TorchImporter : public : : cv : : dnn : : Importer <nl> { <nl> layerParams . set ( " dimension " , scalarParams . get < int > ( " dimension " ) ) ; <nl> } <nl> + if ( nnName = = " DepthConcat " ) <nl> + { <nl> + layerParams . set ( " dimension " , scalarParams . get < int > ( " dimension " ) ) ; <nl> + } <nl> } <nl> - else if ( nnName = = " SpatialConvolution " ) <nl> + else if ( nnName = = " SpatialConvolution " | | nnName = = " SpatialConvolutionMM " ) <nl> { <nl> newModule - > apiType = " Convolution " ; <nl> readTorchTable ( scalarParams , tensorParams ) ; <nl> struct TorchImporter : public : : cv : : dnn : : Importer <nl> layerParams . set ( " num_output " , scalarParams . get < int > ( " nOutputPlane " ) ) ; <nl> convertTorchKernelsParams ( scalarParams , layerParams ) ; <nl> <nl> + if ( nnName = = " SpatialConvolutionMM " ) <nl> + { <nl> + / / Split weights from a [ outCh x inCh * kH * kW ] 2D matrix <nl> + / / onto a 4D [ outCh x inCh x kH x kW ] blob . <nl> + CV_Assert ( layerParams . blobs [ 0 ] . dims = = 2 ) ; <nl> + const int kernel = layerParams . blobs [ 0 ] . size [ 1 ] ; / / inCh * kH * kW <nl> + MatShape kernelShape ( 4 ) ; <nl> + kernelShape [ 0 ] = layerParams . blobs [ 0 ] . size [ 0 ] ; / / outCh . <nl> + kernelShape [ 2 ] = layerParams . get < int > ( " kernel_h " ) ; <nl> + kernelShape [ 3 ] = layerParams . get < int > ( " kernel_w " ) ; <nl> + kernelShape [ 1 ] = kernel / ( kernelShape [ 2 ] * kernelShape [ 3 ] ) ; / / inCh . <nl> + layerParams . blobs [ 0 ] = layerParams . blobs [ 0 ] . reshape ( 1 , kernelShape ) ; <nl> + } <nl> curModule - > modules . push_back ( newModule ) ; <nl> } <nl> + else if ( nnName = = " SpatialLPPooling " ) <nl> + { <nl> + / / nn . Sequential { <nl> + / / [ input - > ( 1 ) - > ( 2 ) - > output ] <nl> + / / ( 1 ) : nn . Sequential { <nl> + / / [ input - > ( 1 ) - > ( 2 ) - > ( 3 ) - > ( 4 ) - > output ] <nl> + / / ( 1 ) : nn . Power <nl> + / / ( 2 ) : nn . SpatialAveragePooling ( . . . ) <nl> + / / ( 3 ) : nn . MulConstant <nl> + / / ( 4 ) : nn . Power <nl> + / / } <nl> + / / ( 2 ) : nn . Sigmoid <nl> + / / } <nl> + / / nn . SpatialLPPooling is just a table so we skip it . <nl> + readTorchTable ( scalarParams , tensorParams ) ; <nl> + } <nl> else if ( nnName = = " SpatialMaxPooling " | | nnName = = " SpatialAveragePooling " ) <nl> { <nl> newModule - > apiType = " Pooling " ; <nl> struct TorchImporter : public : : cv : : dnn : : Importer <nl> layerParams . set ( " pool " , " AVE " ) ; <nl> convertTorchKernelsParams ( scalarParams , layerParams ) ; <nl> <nl> + CV_Assert ( scalarParams . has ( " ceil_mode " ) ) ; <nl> + layerParams . set ( " ceil_mode " , scalarParams . get < bool > ( " ceil_mode " ) ) ; <nl> + <nl> curModule - > modules . push_back ( newModule ) ; <nl> } <nl> else if ( nnName = = " Linear " ) <nl> struct TorchImporter : public : : cv : : dnn : : Importer <nl> layerParams . set ( " num_output " , weightBlob . size [ 0 ] ) ; <nl> curModule - > modules . push_back ( newModule ) ; <nl> } <nl> - else if ( nnName = = " Reshape " ) <nl> + else if ( nnName = = " Reshape " | | nnName = = " View " ) <nl> { <nl> newModule - > apiType = " Reshape " ; <nl> <nl> struct TorchImporter : public : : cv : : dnn : : Importer <nl> newModule - > apiType = " BatchNorm " ; <nl> readTorchTable ( scalarParams , tensorParams ) ; <nl> <nl> - CV_Assert ( tensorParams . count ( " running_var " ) & & <nl> - tensorParams . count ( " running_mean " ) ) ; <nl> - layerParams . blobs . push_back ( tensorParams [ " running_mean " ] . second ) ; <nl> - layerParams . blobs . push_back ( tensorParams [ " running_var " ] . second ) ; <nl> - <nl> CV_Assert ( scalarParams . has ( " eps " ) ) ; <nl> float eps = float ( scalarParams . get < double > ( " eps " ) ) ; <nl> layerParams . set ( " eps " , eps ) ; <nl> <nl> + CV_Assert ( ( tensorParams . count ( " running_var " ) | | tensorParams . count ( " running_std " ) ) & & <nl> + tensorParams . count ( " running_mean " ) ) ; <nl> + layerParams . blobs . push_back ( tensorParams [ " running_mean " ] . second ) ; <nl> + if ( tensorParams . count ( " running_var " ) ) <nl> + { <nl> + layerParams . blobs . push_back ( tensorParams [ " running_var " ] . second ) ; <nl> + } <nl> + else <nl> + { <nl> + layerParams . blobs . push_back ( tensorParams [ " running_std " ] . second ) ; <nl> + pow ( layerParams . blobs . back ( ) , - 2 , layerParams . blobs . back ( ) ) ; <nl> + subtract ( layerParams . blobs . back ( ) , eps , layerParams . blobs . back ( ) ) ; <nl> + } <nl> + <nl> if ( tensorParams . count ( " weight " ) ) <nl> { <nl> layerParams . set ( " has_weight " , true ) ; <nl> struct TorchImporter : public : : cv : : dnn : : Importer <nl> newModule - > apiType = " Identity " ; <nl> curModule - > modules . push_back ( newModule ) ; <nl> } <nl> + else if ( nnName = = " Normalize " ) <nl> + { <nl> + readTorchTable ( scalarParams , tensorParams ) ; <nl> + CV_Assert ( scalarParams . has ( " p " ) ) ; <nl> + <nl> + layerParams . set ( " p " , scalarParams . get < float > ( " p " ) ) ; <nl> + if ( scalarParams . has ( " eps " ) ) <nl> + layerParams . set ( " eps " , scalarParams . get < float > ( " eps " ) ) ; <nl> + <nl> + newModule - > apiType = " LPNormalize " ; <nl> + curModule - > modules . push_back ( newModule ) ; <nl> + } <nl> else if ( nnName = = " Padding " ) <nl> { <nl> readTorchTable ( scalarParams , tensorParams ) ; <nl> struct TorchImporter : public : : cv : : dnn : : Importer <nl> layerParams . set ( " log_softmax " , true ) ; <nl> curModule - > modules . push_back ( newModule ) ; <nl> } <nl> + else if ( nnName = = " SpatialCrossMapLRN " ) <nl> + { <nl> + newModule - > apiType = " LRN " ; <nl> + readTorchTable ( scalarParams , tensorParams ) ; <nl> + <nl> + CV_Assert ( scalarParams . has ( " alpha " ) ) ; <nl> + CV_Assert ( scalarParams . has ( " beta " ) ) ; <nl> + CV_Assert ( scalarParams . has ( " k " ) ) ; <nl> + CV_Assert ( scalarParams . has ( " size " ) ) ; <nl> + <nl> + layerParams . set ( " norm_region " , " ACROSS_CHANNELS " ) ; <nl> + layerParams . set ( " alpha " , scalarParams . get < float > ( " alpha " ) ) ; <nl> + layerParams . set ( " beta " , scalarParams . get < float > ( " beta " ) ) ; <nl> + layerParams . set ( " bias " , scalarParams . get < float > ( " k " ) ) ; <nl> + layerParams . set ( " local_size " , scalarParams . get < int > ( " size " ) ) ; <nl> + layerParams . set ( " norm_by_size " , true ) ; <nl> + <nl> + curModule - > modules . push_back ( newModule ) ; <nl> + } <nl> + else if ( nnName = = " Square " | | nnName = = " Sqrt " | | nnName = = " Power " ) <nl> + { <nl> + readTorchTable ( scalarParams , tensorParams ) ; <nl> + <nl> + float power ; <nl> + if ( nnName = = " Square " ) power = 2 . 0f ; <nl> + else if ( nnName = = " Sqrt " ) power = 0 . 5f ; <nl> + else if ( nnName = = " Power " ) power = scalarParams . get < float > ( " pow " , 1 . 0f ) ; <nl> + <nl> + newModule - > apiType = " Power " ; <nl> + layerParams . set ( " power " , power ) ; <nl> + curModule - > modules . push_back ( newModule ) ; <nl> + } <nl> + else if ( nnName = = " MulConstant " ) <nl> + { <nl> + readTorchTable ( scalarParams , tensorParams ) ; <nl> + CV_Assert ( scalarParams . has ( " constant_scalar " ) ) ; <nl> + newModule - > apiType = " Power " ; <nl> + layerParams . set ( " scale " , scalarParams . get < float > ( " constant_scalar " ) ) ; <nl> + curModule - > modules . push_back ( newModule ) ; <nl> + } <nl> else <nl> { <nl> CV_Error ( Error : : StsNotImplemented , " Unknown nn class \ " " + className + " \ " " ) ; <nl> struct TorchImporter : public : : cv : : dnn : : Importer <nl> } <nl> else <nl> { <nl> - if ( module - > thName = = " Sequential " ) <nl> + if ( module - > thName = = " Sequential " | | module - > thName = = " Inception " ) <nl> { <nl> for ( size_t i = 0 ; i < module - > modules . size ( ) ; i + + ) <nl> { <nl> struct TorchImporter : public : : cv : : dnn : : Importer <nl> addedModules . push_back ( std : : make_pair ( mergeId , module ) ) ; <nl> return mergeId ; <nl> } <nl> + else if ( module - > thName = = " DepthConcat " ) <nl> + { <nl> + int newId , mergeId ; <nl> + LayerParams mergeParams ; <nl> + mergeParams . set ( " axis " , module - > params . get < int > ( " dimension " ) - 1 ) ; <nl> + mergeParams . set ( " padding " , true ) ; <nl> + <nl> + std : : vector < int > branchIds ; <nl> + for ( int i = 0 ; i < ( int ) module - > modules . size ( ) ; i + + ) <nl> + { <nl> + newId = fill ( module - > modules [ i ] , addedModules , prevLayerId , prevOutNum ) ; <nl> + branchIds . push_back ( newId ) ; <nl> + } <nl> + <nl> + mergeId = net . addLayer ( generateLayerName ( " torchMerge " ) , " Concat " , mergeParams ) ; <nl> + <nl> + for ( int i = 0 ; i < branchIds . size ( ) ; i + + ) <nl> + { <nl> + net . connect ( branchIds [ i ] , 0 , mergeId , i ) ; <nl> + } <nl> + <nl> + addedModules . push_back ( std : : make_pair ( mergeId , module ) ) ; <nl> + return mergeId ; <nl> + } <nl> else if ( module - > thName = = " Parallel " ) <nl> { <nl> int newId , splitId , mergeId , reshapeId ; <nl> mmm a / modules / dnn / test / test_torch_importer . cpp <nl> ppp b / modules / dnn / test / test_torch_importer . cpp <nl> using namespace cv : : dnn ; <nl> template < typename TStr > <nl> static std : : string _tf ( TStr filename , bool inTorchDir = true ) <nl> { <nl> - String path = getOpenCVExtraDir ( ) + " / dnn / " ; <nl> + String path = " dnn / " ; <nl> if ( inTorchDir ) <nl> path + = " torch / " ; <nl> path + = filename ; <nl> - return path ; <nl> + return findDataFile ( path , false ) ; <nl> } <nl> <nl> TEST ( Torch_Importer , simple_read ) <nl> TEST ( Torch_Importer , run_reshape ) <nl> runTorchNet ( " net_reshape " ) ; <nl> runTorchNet ( " net_reshape_batch " ) ; <nl> runTorchNet ( " net_reshape_single_sample " ) ; <nl> + runTorchNet ( " net_reshape_channels " , " " , false , true ) ; <nl> } <nl> <nl> TEST ( Torch_Importer , run_linear ) <nl> TEST ( Torch_Importer , run_paralel ) <nl> TEST ( Torch_Importer , run_concat ) <nl> { <nl> runTorchNet ( " net_concat " , " l5_torchMerge " ) ; <nl> + runTorchNet ( " net_depth_concat " , " " , false , true ) ; <nl> } <nl> <nl> TEST ( Torch_Importer , run_deconv ) <nl> TEST ( Torch_Importer , net_logsoftmax ) <nl> runTorchNet ( " net_logsoftmax_spatial " ) ; <nl> } <nl> <nl> + TEST ( Torch_Importer , net_lp_pooling ) <nl> + { <nl> + runTorchNet ( " net_lp_pooling_square " , " " , false , true ) ; <nl> + runTorchNet ( " net_lp_pooling_power " , " " , false , true ) ; <nl> + } <nl> + <nl> + TEST ( Torch_Importer , net_conv_gemm_lrn ) <nl> + { <nl> + runTorchNet ( " net_conv_gemm_lrn " , " " , false , true ) ; <nl> + } <nl> + <nl> + TEST ( Torch_Importer , net_inception_block ) <nl> + { <nl> + runTorchNet ( " net_inception_block " , " " , false , true ) ; <nl> + } <nl> + <nl> + TEST ( Torch_Importer , net_normalize ) <nl> + { <nl> + runTorchNet ( " net_normalize " , " " , false , true ) ; <nl> + } <nl> + <nl> TEST ( Torch_Importer , ENet_accuracy ) <nl> { <nl> Net net ; <nl> TEST ( Torch_Importer , ENet_accuracy ) <nl> } <nl> } <nl> <nl> + TEST ( Torch_Importer , OpenFace_accuracy ) <nl> + { <nl> + const string model = findDataFile ( " dnn / openface_nn4 . small2 . v1 . t7 " , false ) ; <nl> + Net net = readNetFromTorch ( model ) ; <nl> + <nl> + Mat sample = imread ( findDataFile ( " cv / shared / lena . png " , false ) ) ; <nl> + Mat sampleF32 ( sample . size ( ) , CV_32FC3 ) ; <nl> + sample . convertTo ( sampleF32 , sampleF32 . type ( ) ) ; <nl> + sampleF32 / = 255 ; <nl> + resize ( sampleF32 , sampleF32 , Size ( 96 , 96 ) , 0 , 0 , INTER_NEAREST ) ; <nl> + <nl> + Mat inputBlob = blobFromImage ( sampleF32 ) ; <nl> + <nl> + net . setInput ( inputBlob ) ; <nl> + Mat out = net . forward ( ) ; <nl> + <nl> + Mat outRef = readTorchBlob ( _tf ( " net_openface_output . dat " ) , true ) ; <nl> + normAssert ( out , outRef ) ; <nl> + } <nl> + <nl> } <nl> <nl> # endif <nl>
Layers for OpenFace face recognition network
opencv/opencv
7dc6b1d7d4bb2f7930e36deba7ef1cbaee9591b7
2017-09-14T06:11:31Z
mmm a / src / rdb_protocol / protocol . cc <nl> ppp b / src / rdb_protocol / protocol . cc <nl> void read_t : : unshard ( read_response_t * responses , size_t count , <nl> } <nl> } <nl> <nl> + / / Only use snapshotting if we ' re doing a range get . <nl> + bool read_t : : use_snapshot ( ) const THROWS_NOTHING { <nl> + return boost : : get < rget_read_t > ( & read ) ; <nl> + } <nl> + <nl> + <nl> + <nl> / * write_t : : get_region ( ) implementation * / <nl> <nl> / / TODO : This entire type is suspect , given the performance for <nl> mmm a / src / rdb_protocol / protocol . hpp <nl> ppp b / src / rdb_protocol / protocol . hpp <nl> struct read_t { <nl> <nl> <nl> / / Only use snapshotting if we ' re doing a range get . <nl> - bool use_snapshot ( ) const THROWS_NOTHING { return boost : : get < rget_read_t > ( & read ) ; } <nl> + bool use_snapshot ( ) const THROWS_NOTHING ; <nl> <nl> / / Returns true if this read should be sent to every replica . <nl> bool all_read ( ) const THROWS_NOTHING { return boost : : get < sindex_status_t > ( & read ) ; } <nl>
Moved read_t : : use_snapshot to protocol . cc .
rethinkdb/rethinkdb
944ce9b01bc30c5d8e965d719b47faa96b02cd8d
2014-10-28T20:50:11Z
mmm a / src / runtime / vm / translator / hopt / irtranslator . cpp <nl> ppp b / src / runtime / vm / translator / hopt / irtranslator . cpp <nl> TranslatorX64 : : irTranslateInstrWork ( const Tracelet & t , <nl> } <nl> } <nl> <nl> + void <nl> + TranslatorX64 : : irPassPredictedAndInferredTypes ( const NormalizedInstruction & i ) { <nl> + ASSERT ( m_useHHIR ) ; <nl> + <nl> + if ( ! i . outStack | | i . breaksTracelet ) return ; <nl> + <nl> + NormalizedInstruction : : OutputUse u = i . outputIsUsed ( i . outStack ) ; <nl> + <nl> + if ( ( u = = NormalizedInstruction : : OutputUsed & & i . outputPredicted ) | | <nl> + ( u = = NormalizedInstruction : : OutputInferred ) ) { <nl> + JIT : : Type : : Tag jitType = JIT : : Type : : fromRuntimeType ( i . outStack - > rtt ) ; <nl> + if ( u = = NormalizedInstruction : : OutputInferred ) { <nl> + TRACE ( 1 , " HHIR : irPassPredictedAndInferredTypes : output inferred as % s \ n " , <nl> + JIT : : Type : : Strings [ jitType ] ) ; <nl> + m_hhbcTrans - > assertTypeStack ( 0 , jitType ) ; <nl> + } else { <nl> + TRACE ( 1 , " HHIR : irPassPredictedAndInferredTypes : output predicted as % s \ n " , <nl> + JIT : : Type : : Strings [ jitType ] ) ; <nl> + m_hhbcTrans - > checkTypeStack ( 0 , jitType , i . next - > offset ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> void <nl> TranslatorX64 : : irTranslateInstr ( const Tracelet & t , <nl> const NormalizedInstruction & i ) { <nl> TranslatorX64 : : irTranslateInstr ( const Tracelet & t , <nl> <nl> irTranslateInstrWork ( t , i ) ; <nl> <nl> - emitPredictionGuards ( i ) ; <nl> + irPassPredictedAndInferredTypes ( i ) ; <nl> } <nl> <nl> void TranslatorX64 : : irAssertType ( const Location & l , <nl> mmm a / src / runtime / vm / translator / translator - x64 . cpp <nl> ppp b / src / runtime / vm / translator / translator - x64 . cpp <nl> TranslatorX64 : : emitPredictionGuards ( const NormalizedInstruction & i ) { <nl> if ( ! i . outputPredicted | | i . breaksTracelet ) return ; <nl> NormalizedInstruction : : OutputUse u = i . outputIsUsed ( i . outStack ) ; <nl> <nl> - if ( m_useHHIR ) { <nl> - if ( u = = NormalizedInstruction : : OutputUsed | | <nl> - u = = NormalizedInstruction : : OutputInferred ) { <nl> - JIT : : Type : : Tag jitType = JIT : : Type : : fromRuntimeType ( i . outStack - > rtt ) ; <nl> - if ( u = = NormalizedInstruction : : OutputInferred ) { <nl> - TRACE ( 1 , " HHIR : emitPredictionGuards : output inferred to be % s \ n " , <nl> - JIT : : Type : : Strings [ jitType ] ) ; <nl> - m_hhbcTrans - > assertTypeStack ( 0 , jitType ) ; <nl> - } else { <nl> - TRACE ( 1 , " HHIR : emitPredictionGuards : output predicted to be % s \ n " , <nl> - JIT : : Type : : Strings [ jitType ] ) ; <nl> - m_hhbcTrans - > checkTypeStack ( 0 , jitType , i . next - > offset ( ) ) ; <nl> - } <nl> - } <nl> - return ; <nl> - } <nl> - <nl> switch ( u ) { <nl> case NormalizedInstruction : : OutputUsed : <nl> break ; <nl> mmm a / src / runtime / vm / translator / translator - x64 . h <nl> ppp b / src / runtime / vm / translator / translator - x64 . h <nl> PSEUDOINSTRS <nl> const TCA start , <nl> const TCA stubStart , <nl> vector < TransBCMapping > * bcMap ) ; <nl> + void irPassPredictedAndInferredTypes ( const NormalizedInstruction & i ) ; <nl> + <nl> void emitStringCheck ( Asm & _a , PhysReg base , int offset ) ; <nl> void emitTypeCheck ( Asm & _a , DataType dt , <nl> PhysReg base , int offset , <nl> mmm a / src / test / test_code_run . cpp <nl> ppp b / src / test / test_code_run . cpp <nl> bool TestCodeRun : : TestTypeAssertions ( ) { <nl> " $ d1 - > d1prop = 3 ; \ n " <nl> " $ d1 - > doStuff ( ) ; \ n " ) ; <nl> <nl> + MVCR ( " < ? php \ n " <nl> + " function foo ( ) { \ n " <nl> + " } \ n " <nl> + " function main ( ) { \ n " <nl> + " var_dump ( is_null ( foo ( ) ) ) ; \ n " <nl> + " } \ n " <nl> + " main ( ) ; \ n " ) ; <nl> + <nl> return true ; <nl> } <nl> <nl>
Pass to the IR statically inferred types for tracelet - produced values
facebook/hhvm
47c9c5c0c21883ca6f7870e4284f3e3781a85c83
2012-12-17T23:55:57Z
mmm a / ports / azure - kinect - sensor - sdk / CONTROL <nl> ppp b / ports / azure - kinect - sensor - sdk / CONTROL <nl> <nl> Source : azure - kinect - sensor - sdk <nl> - Version : 1 . 4 . 0 - alpha . 0 - 1 <nl> + Version : 1 . 4 . 0 - alpha . 0 - 2 <nl> Homepage : https : / / github . com / microsoft / Azure - Kinect - Sensor - SDK <nl> Description : Azure Kinect SDK is a cross platform ( Linux and Windows ) user mode SDK to read data from your Azure Kinect device . <nl> Build - Depends : azure - c - shared - utility , glfw3 , gtest , imgui , libusb , spdlog , cjson , ebml , libjpeg - turbo , matroska , libsoundio , libyuv <nl> deleted file mode 100644 <nl> index a8aa0773e40 . . 00000000000 <nl> mmm a / ports / azure - kinect - sensor - sdk / fix - components - path . patch <nl> ppp / dev / null <nl> <nl> mmmmmm a / src / record / sdk / CMakeLists . txt <nl> - ppp b / src / record / sdk / CMakeLists . txt <nl> - install ( <nl> - development <nl> - RUNTIME <nl> - DESTINATION <nl> - - $ { CMAKE_INSTALL_BINDIR } <nl> - + tools <nl> - COMPONENT <nl> - runtime <nl> - ) <nl> mmmmmm a / src / sdk / CMakeLists . txt <nl> - ppp b / src / sdk / CMakeLists . txt <nl> - install ( <nl> - development <nl> - RUNTIME <nl> - DESTINATION <nl> - - $ { CMAKE_INSTALL_BINDIR } <nl> - + tools <nl> - COMPONENT <nl> - runtime <nl> - ) <nl> mmm a / ports / azure - kinect - sensor - sdk / portfile . cmake <nl> ppp b / ports / azure - kinect - sensor - sdk / portfile . cmake <nl> vcpkg_from_github ( <nl> PATCHES <nl> fix - builds . patch <nl> disable - c4275 . patch <nl> - fix - components - path . patch <nl> ) <nl> <nl> vcpkg_find_acquire_program ( PYTHON3 ) <nl>
[ azure - kinect - sensor - sdk ] Fix * . dll install path ( )
microsoft/vcpkg
f787fa41130dd37092fda2dedbb6430489cd91ca
2020-01-27T20:03:32Z
mmm a / hphp / hack / src / monitor / serverMonitor . ml <nl> ppp b / hphp / hack / src / monitor / serverMonitor . ml <nl> module Make_monitor ( SC : ServerMonitorUtils . Server_config ) <nl> purgatory_clients : ( MonitorRpc . handoff_options * Unix . file_descr ) Queue . t ; <nl> } <nl> <nl> + type t = env * ServerMonitorUtils . monitor_config * Unix . file_descr <nl> + <nl> let fd_to_int ( x : Unix . file_descr ) : int = Obj . magic x <nl> <nl> let msg_to_channel fd msg = <nl> module Make_monitor ( SC : ServerMonitorUtils . Server_config ) <nl> <nl> let kill_server process = <nl> try Unix . kill process . pid Sys . sigusr2 with <nl> - | _ - > <nl> - Hh_logger . log <nl> + | _ - > Hh_logger . log <nl> " Failed to send sigusr2 signal to server process . Trying \ <nl> violently " ; <nl> try Unix . kill process . pid Sys . sigkill with e - > <nl> module Make_monitor ( SC : ServerMonitorUtils . Server_config ) <nl> " Accepting on socket failed . Ignoring client connection attempt . " ; <nl> env ) <nl> <nl> - let start_monitoring ~ waiting_client ~ max_purgatory_clients <nl> + let check_and_run_loop_once ( env , monitor_config , socket ) = <nl> + let env = check_and_run_loop_ env monitor_config socket in <nl> + env , monitor_config , socket <nl> + <nl> + let start_monitor ~ waiting_client ~ max_purgatory_clients <nl> server_start_options informant_init_env <nl> monitor_config = <nl> let socket = Socket . init_unix_socket monitor_config . socket_file in <nl> module Make_monitor ( SC : ServerMonitorUtils . Server_config ) <nl> server_start_options ; <nl> retries = 0 ; <nl> } in <nl> + env , monitor_config , socket <nl> + <nl> + let start_monitoring ~ waiting_client ~ max_purgatory_clients <nl> + server_start_options informant_init_env <nl> + monitor_config = <nl> + let env , monitor_config , socket = start_monitor ~ waiting_client ~ max_purgatory_clients <nl> + server_start_options informant_init_env monitor_config in <nl> check_and_run_loop env monitor_config socket <nl> end <nl> mmm a / hphp / hack / src / monitor / serverMonitor . mli <nl> ppp b / hphp / hack / src / monitor / serverMonitor . mli <nl> module Make_monitor : <nl> functor ( SC : ServerMonitorUtils . Server_config ) - > <nl> functor ( Informant : Informant_sig . S ) - > <nl> sig <nl> + type t <nl> + <nl> + ( * * Start a monitor without running the check loop . Useful for testing . * ) <nl> + val start_monitor : <nl> + waiting_client : Unix . file_descr option - > <nl> + max_purgatory_clients : int - > <nl> + SC . server_start_options - > <nl> + Informant . init_env - > <nl> + ServerMonitorUtils . monitor_config - > <nl> + t <nl> + <nl> + ( * * Run the check loop once . Useful for testing . * ) <nl> + val check_and_run_loop_once : t - > t <nl> + <nl> + ( * * Start the monitor and repeatedly run the check and run loop . <nl> + * Does not return . * ) <nl> val start_monitoring : <nl> waiting_client : Unix . file_descr option - > <nl> max_purgatory_clients : int - > <nl>
refactor ServerMonitor to make it testable
facebook/hhvm
e72ff6d84df82151f51594f05fdb81d082babab7
2017-10-31T23:16:21Z
mmm a / plugins / net_plugin / net_plugin . cpp <nl> ppp b / plugins / net_plugin / net_plugin . cpp <nl> namespace eosio { <nl> my_impl - > chain_plug - > accept_transaction ( trx , <nl> [ weak , trx ] ( const static_variant < fc : : exception_ptr , transaction_trace_ptr > & result ) mutable { <nl> / / next ( this lambda ) called from application thread <nl> - bool accepted = false ; <nl> if ( result . contains < fc : : exception_ptr > ( ) ) { <nl> fc_dlog ( logger , " bad packed_transaction : $ { m } " , ( " m " , result . get < fc : : exception_ptr > ( ) - > what ( ) ) ) ; <nl> } else { <nl> const transaction_trace_ptr & trace = result . get < transaction_trace_ptr > ( ) ; <nl> - if ( ! trace - > except ) { <nl> + if ( ! trace - > except ) { <nl> fc_dlog ( logger , " chain accepted transaction , bcast $ { id } " , ( " id " , trace - > id ) ) ; <nl> - accepted = true ; <nl> - } <nl> - <nl> - if ( ! accepted ) { <nl> + } else { <nl> fc_elog ( logger , " bad packed_transaction : $ { m } " , ( " m " , trace - > except - > what ( ) ) ) ; <nl> } <nl> } <nl> + connection_ptr conn = weak . lock ( ) ; <nl> + if ( conn ) { <nl> + conn - > trx_in_progress_size - = calc_trx_size ( trx ) ; <nl> + } <nl> } ) ; <nl> } ) ; <nl> } <nl>
Add back in correct trx_in_progress_size reduction
EOSIO/eos
9aee3f7cb8b279d2827e0ff0670275ec95e6f8c7
2019-08-22T02:23:20Z
mmm a / tensorflow / compiler / xla / service / gpu / BUILD <nl> ppp b / tensorflow / compiler / xla / service / gpu / BUILD <nl> tf_cc_test ( <nl> ) <nl> <nl> cc_library ( <nl> - name = " cudnn_conv_padding_legalization " , <nl> - srcs = [ " cudnn_conv_padding_legalization . cc " ] , <nl> - hdrs = [ " cudnn_conv_padding_legalization . h " ] , <nl> + name = " gpu_conv_padding_legalization " , <nl> + srcs = [ " gpu_conv_padding_legalization . cc " ] , <nl> + hdrs = [ " gpu_conv_padding_legalization . h " ] , <nl> deps = [ <nl> " : ir_emission_utils " , <nl> " / / tensorflow / compiler / xla : literal " , <nl> cc_library ( <nl> deps = [ <nl> " : alias_passthrough_params " , <nl> " : cudnn_batchnorm_rewriter " , <nl> - " : cudnn_conv_padding_legalization " , <nl> " : cudnn_conv_rewriter " , <nl> " : cudnn_pad_for_convolutions " , <nl> " : fusion_merger " , <nl> " : gpu_constants " , <nl> + " : gpu_conv_padding_legalization " , <nl> " : gpu_copy_insertion " , <nl> " : gpu_executable " , <nl> " : gpu_hlo_schedule " , <nl> cc_library ( <nl> deps = [ <nl> " : cublas_gemm_pad_for_tensor_cores " , <nl> " : cudnn_conv_algorithm_picker " , <nl> - " : cudnn_conv_padding_legalization " , <nl> " : cudnn_conv_rewriter " , <nl> " : cudnn_fused_conv_rewriter " , <nl> " : cudnn_pad_for_convolutions " , <nl> cc_library ( <nl> " : gemm_algorithm_picker " , <nl> " : gemm_rewriter " , <nl> " : gpu_compiler " , <nl> + " : gpu_conv_padding_legalization " , <nl> " : gpu_layout_assignment " , <nl> " : stream_executor_util " , <nl> " : target_constants " , <nl> cc_library ( <nl> " amdgpu_compiler . h " , <nl> ] , <nl> deps = [ <nl> - " : cudnn_conv_padding_legalization " , <nl> " : cudnn_conv_rewriter " , <nl> " : gpu_compiler " , <nl> + " : gpu_conv_padding_legalization " , <nl> " : gpu_layout_assignment " , <nl> " : target_constants " , <nl> " / / tensorflow / compiler / xla : statusor " , <nl> mmm a / tensorflow / compiler / xla / service / gpu / amdgpu_compiler . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / amdgpu_compiler . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / service / gpu / amdgpu_compiler . h " <nl> <nl> # include " tensorflow / compiler / xla / service / algebraic_simplifier . h " <nl> - # include " tensorflow / compiler / xla / service / gpu / cudnn_conv_padding_legalization . h " <nl> # include " tensorflow / compiler / xla / service / gpu / cudnn_conv_rewriter . h " <nl> + # include " tensorflow / compiler / xla / service / gpu / gpu_conv_padding_legalization . h " <nl> / / TODO ( whchung @ gmail . com ) : Add gpu_conv_algorithm_picker after its PR merged . <nl> # include " tensorflow / compiler / xla / service / gpu / gpu_layout_assignment . h " <nl> # include " tensorflow / compiler / xla / service / gpu / llvm_gpu_backend / gpu_backend_lib . h " <nl> Status AMDGPUCompiler : : OptimizeHloConvolutionCanonicalization ( <nl> pipeline . AddInvariantChecker < HloVerifier > ( / * layout_sensitive = * / false , <nl> / * allow_mixed_precision = * / false ) ; <nl> pipeline . AddPass < CudnnConvRewriter > ( ) ; <nl> - pipeline . AddPass < CudnnConvPaddingLegalization > ( ) ; <nl> + pipeline . AddPass < GpuConvPaddingLegalization > ( ) ; <nl> <nl> pipeline . AddPass < HloConstantFolding > ( ) ; <nl> TF_RETURN_IF_ERROR ( pipeline . Run ( hlo_module ) . status ( ) ) ; <nl> mmm a / tensorflow / compiler / xla / service / gpu / cudnn_conv_rewriter . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / cudnn_conv_rewriter . cc <nl> MatchBackwardFilter ( HloInstruction * conv ) { <nl> / / the amount of high padding the same as the amount of low padding as long <nl> / / as it is between min_padding_high and max_padding_high . If it is not in <nl> / / that range , we pick the one that ' s closest to dim - > padding_low ( ) and let <nl> - / / CudnnConvPaddingLegalization canonicalize the resultant backward <nl> + / / GpuConvPaddingLegalization canonicalize the resultant backward <nl> / / convolution later . Picking the closest one minimizes the cost of the kPad <nl> - / / instruction to be inserted by CudnnConvPaddingLegalization . <nl> + / / instruction to be inserted by GpuConvPaddingLegalization . <nl> if ( dim - > padding_low ( ) > = min_padding_high & & <nl> dim - > padding_low ( ) < = max_padding_high ) { <nl> dim - > set_padding_high ( dim - > padding_low ( ) ) ; <nl> MatchBackwardFilter ( HloInstruction * conv ) { <nl> " negative padding ( " <nl> < < dim - > padding_high ( ) <nl> < < " ) on right / bottom of the weight gradients , which is not " <nl> - " supported by CudnnConvPaddingLegalization ( b / 32744257 ) . " <nl> + " supported by GpuConvPaddingLegalization ( b / 32744257 ) . " <nl> " Falling back to " <nl> " unfused convolution for instruction : " <nl> < < conv - > ToString ( ) ; <nl> MatchBackwardInput ( HloInstruction * conv ) { <nl> LOG ( ERROR ) <nl> < < " The low padding of the backward convolution would be negative ( " <nl> < < backward_padding_low <nl> - < < " ) , which isn ' t supported by CudnnConvPaddingLegalization " <nl> + < < " ) , which isn ' t supported by GpuConvPaddingLegalization " <nl> " for now ( b / 32744257 ) . " ; <nl> return no_match_result ; <nl> } <nl> MatchBackwardInput ( HloInstruction * conv ) { <nl> dim - > set_padding_high ( backward_padding_low ) ; <nl> } else { <nl> / / Otherwise , we choose the amount that ' s closest to backward_padding_low , <nl> - / / and CudnnConvPaddingLegalization will later insert kSlice <nl> + / / and GpuConvPaddingLegalization will later insert kSlice <nl> / / instructions to enforce even padding . <nl> / / <nl> / / For example , consider the backward convolution pattern <nl> MatchBackwardInput ( HloInstruction * conv ) { <nl> dim - > set_padding_high ( max_padding_high ) ; <nl> } <nl> } <nl> - / / CudnnConvPaddingLegalization doesn ' t handle backward input <nl> + / / GpuConvPaddingLegalization doesn ' t handle backward input <nl> / / convolution with negative padding for now . So fall back to unfused <nl> / / convolution in case of negative padding . For example , <nl> / / ABCD = Conv ( abc , reverse ( xy ) , padding_high = 2 ) <nl> MatchBackwardInput ( HloInstruction * conv ) { <nl> " negative padding ( " <nl> < < dim - > padding_high ( ) <nl> < < " ) on right / bottom of the activations , which is not " <nl> - " supported by CudnnConvPaddingLegalization ( b / 32744257 ) . " <nl> + " supported by GpuConvPaddingLegalization ( b / 32744257 ) . " <nl> " Falling back to unfused convolution for instruction : " <nl> < < conv - > ToString ( ) ; <nl> return no_match_result ; <nl> mmm a / tensorflow / compiler / xla / service / gpu / cudnn_conv_rewriter_test . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / cudnn_conv_rewriter_test . cc <nl> TEST_F ( CudnnConvRewriterTest , BackwardInputConvolveLowPaddingTooLarge ) { <nl> / / padding_low = 2 , padding_high = 1 , base_dilation = 2 ) <nl> / / <nl> / / We should fuse BC even though padding on activations is uneven , because <nl> - / / CudnnConvPaddingLegalization will canonicalize the fusion HLO . <nl> + / / GpuConvPaddingLegalization will canonicalize the fusion HLO . <nl> TEST_F ( CudnnConvRewriterTest , BackwardInputConvolveUnevenPaddingOnActivations ) { <nl> auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> / / The gradients are in NCHW layout . <nl> TEST_F ( CudnnConvRewriterTest , BackwardInputConvolveUnevenPaddingOnActivations ) { <nl> / / BC = BackwardInput ( FC ) does : <nl> / / [ 4 ] = conv ( [ 3 ] , reverse ( [ 2 ] ) , padding_high = 2 ) <nl> / / <nl> - / / We currently don ' t fuse BC because CudnnConvPaddingLegalization <nl> + / / We currently don ' t fuse BC because GpuConvPaddingLegalization <nl> / / doesn ' t support negative padding on the gradients of backward convolution <nl> / / ( b / 32744257 ) . <nl> TEST_F ( CudnnConvRewriterTest , <nl> similarity index 97 % <nl> rename from tensorflow / compiler / xla / service / gpu / cudnn_conv_padding_legalization . cc <nl> rename to tensorflow / compiler / xla / service / gpu / gpu_conv_padding_legalization . cc <nl> mmm a / tensorflow / compiler / xla / service / gpu / cudnn_conv_padding_legalization . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / gpu_conv_padding_legalization . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / compiler / xla / service / gpu / cudnn_conv_padding_legalization . h " <nl> + # include " tensorflow / compiler / xla / service / gpu / gpu_conv_padding_legalization . h " <nl> <nl> # include " absl / memory / memory . h " <nl> # include " tensorflow / compiler / xla / literal . h " <nl> HloInstruction * MaybePaddedKernel ( const Window & conv_window , <nl> } <nl> } / / namespace <nl> <nl> - bool CudnnConvPaddingLegalization : : CanonicalizeForwardConvolution ( <nl> + bool GpuConvPaddingLegalization : : CanonicalizeForwardConvolution ( <nl> HloInstruction * conv ) { <nl> if ( IsForwardConvolutionCanonical ( * conv ) ) { <nl> return false ; <nl> void IncreasePaddingHighBy ( int64 delta , WindowDimension * window_dim ) { <nl> } <nl> } / / namespace <nl> <nl> - bool CudnnConvPaddingLegalization : : CanonicalizeBackwardFilterConvolution ( <nl> + bool GpuConvPaddingLegalization : : CanonicalizeBackwardFilterConvolution ( <nl> HloInstruction * backward_conv ) { <nl> CHECK_EQ ( backward_conv - > custom_call_target ( ) , <nl> kCudnnConvBackwardFilterCallTarget ) ; <nl> bool CudnnConvPaddingLegalization : : CanonicalizeBackwardFilterConvolution ( <nl> return true ; <nl> } <nl> <nl> - bool CudnnConvPaddingLegalization : : CanonicalizeBackwardInputConvolution ( <nl> + bool GpuConvPaddingLegalization : : CanonicalizeBackwardInputConvolution ( <nl> HloInstruction * backward_conv ) { <nl> if ( window_util : : HasSymmetricPadding ( backward_conv - > window ( ) ) ) { <nl> return false ; <nl> bool CudnnConvPaddingLegalization : : CanonicalizeBackwardInputConvolution ( <nl> return true ; <nl> } <nl> <nl> - StatusOr < bool > CudnnConvPaddingLegalization : : RunOnComputation ( <nl> + StatusOr < bool > GpuConvPaddingLegalization : : RunOnComputation ( <nl> HloComputation * computation ) { <nl> bool changed = false ; <nl> std : : vector < HloCustomCallInstruction * > convs ; <nl> StatusOr < bool > CudnnConvPaddingLegalization : : RunOnComputation ( <nl> return changed ; <nl> } <nl> <nl> - StatusOr < bool > CudnnConvPaddingLegalization : : Run ( HloModule * module ) { <nl> + StatusOr < bool > GpuConvPaddingLegalization : : Run ( HloModule * module ) { <nl> bool changed = false ; <nl> for ( HloComputation * computation : module - > MakeNonfusionComputations ( ) ) { <nl> TF_ASSIGN_OR_RETURN ( bool result , RunOnComputation ( computation ) ) ; <nl> similarity index 77 % <nl> rename from tensorflow / compiler / xla / service / gpu / cudnn_conv_padding_legalization . h <nl> rename to tensorflow / compiler / xla / service / gpu / gpu_conv_padding_legalization . h <nl> mmm a / tensorflow / compiler / xla / service / gpu / cudnn_conv_padding_legalization . h <nl> ppp b / tensorflow / compiler / xla / service / gpu / gpu_conv_padding_legalization . h <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_CUDNN_CONV_PADDING_LEGALIZATION_H_ <nl> - # define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_CUDNN_CONV_PADDING_LEGALIZATION_H_ <nl> + # ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_CONV_PADDING_LEGALIZATION_H_ <nl> + # define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_CONV_PADDING_LEGALIZATION_H_ <nl> <nl> # include " tensorflow / compiler / xla / service / hlo_pass_interface . h " <nl> <nl> namespace gpu { <nl> <nl> / / An HLO pass that canonicalizes convolution instructions for GPU codegen . It <nl> / / inserts Pad instructions before Convolution instructions with uncanonicalized <nl> - / / padding , so that they can be lowered to cuDNN convolution . <nl> - class CudnnConvPaddingLegalization : public HloModulePass { <nl> + / / padding , so that they can be lowered to Cudnn / Miopen convolution . <nl> + class GpuConvPaddingLegalization : public HloModulePass { <nl> public : <nl> absl : : string_view name ( ) const override { <nl> - return " cudnn - conv - padding - legalization " ; <nl> + return " gpu - conv - padding - legalization " ; <nl> } <nl> <nl> StatusOr < bool > Run ( HloModule * module ) override ; <nl> class CudnnConvPaddingLegalization : public HloModulePass { <nl> } / / namespace gpu <nl> } / / namespace xla <nl> <nl> - # endif / / TENSORFLOW_COMPILER_XLA_SERVICE_GPU_CUDNN_CONV_PADDING_LEGALIZATION_H_ <nl> + # endif / / TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_CONV_PADDING_LEGALIZATION_H_ <nl> mmm a / tensorflow / compiler / xla / service / gpu / nvptx_compiler . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / nvptx_compiler . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / service / dump . h " <nl> # include " tensorflow / compiler / xla / service / gpu / cublas_gemm_pad_for_tensor_cores . h " <nl> # include " tensorflow / compiler / xla / service / gpu / cudnn_conv_algorithm_picker . h " <nl> - # include " tensorflow / compiler / xla / service / gpu / cudnn_conv_padding_legalization . h " <nl> # include " tensorflow / compiler / xla / service / gpu / cudnn_conv_rewriter . h " <nl> # include " tensorflow / compiler / xla / service / gpu / cudnn_fused_conv_rewriter . h " <nl> # include " tensorflow / compiler / xla / service / gpu / cudnn_pad_for_convolutions . h " <nl> # include " tensorflow / compiler / xla / service / gpu / cusolver_rewriter . h " <nl> # include " tensorflow / compiler / xla / service / gpu / gemm_algorithm_picker . h " <nl> # include " tensorflow / compiler / xla / service / gpu / gemm_rewriter . h " <nl> + # include " tensorflow / compiler / xla / service / gpu / gpu_conv_padding_legalization . h " <nl> # include " tensorflow / compiler / xla / service / gpu / gpu_layout_assignment . h " <nl> # include " tensorflow / compiler / xla / service / gpu / llvm_gpu_backend / gpu_backend_lib . h " <nl> # include " tensorflow / compiler / xla / service / gpu / stream_executor_util . h " <nl> Status NVPTXCompiler : : OptimizeHloConvolutionCanonicalization ( <nl> HloModule * hlo_module , se : : StreamExecutor * stream_exec , <nl> se : : DeviceMemoryAllocator * device_allocator ) { <nl> / / Convert convolutions into CustomCalls to cudnn , then canonicalize them <nl> - / / ( CudnnConvPaddingLegalization ) . Also expand cuSolver calls . <nl> + / / ( GpuConvPaddingLegalization ) . Also expand cuSolver calls . <nl> HloPassPipeline pipeline ( " conv_canonicalization " ) ; <nl> pipeline . AddInvariantChecker < HloVerifier > ( / * layout_sensitive = * / false , <nl> / * allow_mixed_precision = * / false ) ; <nl> pipeline . AddPass < CusolverRewriter > ( ) ; <nl> pipeline . AddPass < CudnnConvRewriter > ( ) ; <nl> pipeline . AddPass < CudnnFusedConvRewriter > ( ) ; <nl> - pipeline . AddPass < CudnnConvPaddingLegalization > ( ) ; <nl> + pipeline . AddPass < GpuConvPaddingLegalization > ( ) ; <nl> pipeline . AddPass < CudnnPadForConvolutions > ( IsVoltaOrLater ( * stream_exec ) ) ; <nl> / / CudnnConvPadForIntegerConvolutions and CudnnConvPadForTensorCores leaves <nl> / / behind unnecessary tuple / get - tuple - element pairs that TupleSimplifier <nl> Status NVPTXCompiler : : OptimizeHloConvolutionCanonicalization ( <nl> pass . AddPass < AlgebraicSimplifier > ( options ) ; <nl> } <nl> <nl> - / / CudnnConvRewriter , CudnnConvPaddingLegalization and <nl> + / / CudnnConvRewriter , GpuConvPaddingLegalization and <nl> / / CudnnConvPadForTensorCores may add instructions which can be simplified <nl> / / by constant folding . <nl> pipeline . AddPass < HloConstantFolding > ( ) ; <nl>
Merge pull request from ROCmSoftwarePlatform : google - upstream - pr - rename - cudnn_conv_padding_legalization
tensorflow/tensorflow
d3b2f11bff430bdb0930696df5df0a4a11c542b6
2019-09-20T15:19:53Z
mmm a / tensorflow / core / kernels / data / dataset_test_base . h <nl> ppp b / tensorflow / core / kernels / data / dataset_test_base . h <nl> class DatasetParams { <nl> <nl> virtual int op_version ( ) const { return op_version_ ; } <nl> <nl> + virtual bool sloppy ( ) const { return false ; } <nl> + <nl> protected : <nl> std : : vector < std : : shared_ptr < DatasetParams > > input_dataset_params_ ; <nl> DataTypeVector output_dtypes_ ; <nl> class DatasetOpsTestBaseV2 : public DatasetOpsTestBase { <nl> TEST_P ( ParameterizedGetNextTest , GetNext ) { \ <nl> auto test_case = GetParam ( ) ; \ <nl> TF_ASSERT_OK ( Initialize ( test_case . dataset_params ) ) ; \ <nl> - TF_ASSERT_OK ( CheckIteratorGetNext ( test_case . expected_outputs , \ <nl> - / * compare_order = * / true ) ) ; \ <nl> + TF_ASSERT_OK ( CheckIteratorGetNext ( \ <nl> + test_case . expected_outputs , \ <nl> + / * compare_order = * / ! test_case . dataset_params . sloppy ( ) ) ) ; \ <nl> } \ <nl> \ <nl> INSTANTIATE_TEST_SUITE_P ( \ <nl> mmm a / tensorflow / core / kernels / data / parallel_map_dataset_op_test . cc <nl> ppp b / tensorflow / core / kernels / data / parallel_map_dataset_op_test . cc <nl> namespace { <nl> <nl> constexpr char kNodeName [ ] = " parallel_map_dataset " ; <nl> <nl> - class ParallelMapDatasetOpTest : public DatasetOpsTestBase { <nl> - protected : <nl> - / / Creates a new ` ParallelMapDataset ` op kernel <nl> - Status CreateParallelMapDatasetOpKernel ( <nl> - const FunctionDefHelper : : AttrValueWrapper & func , <nl> - const DataTypeVector & output_types , <nl> - const std : : vector < PartialTensorShape > & output_shapes , <nl> - bool use_inter_op_parallelism , bool sloppy , bool preserve_cardinality , <nl> - std : : unique_ptr < OpKernel > * parallel_map_kernel ) { <nl> - NodeDef node_def = test : : function : : NDef ( <nl> - kNodeName , name_utils : : OpName ( ParallelMapDatasetOp : : kDatasetType ) , <nl> - { ParallelMapDatasetOp : : kInputDataset , <nl> - ParallelMapDatasetOp : : kNumParallelCalls } , <nl> - { { ParallelMapDatasetOp : : kFunc , func } , <nl> - { ParallelMapDatasetOp : : kTarguments , { } } , <nl> - { ParallelMapDatasetOp : : kOutputTypes , output_types } , <nl> - { ParallelMapDatasetOp : : kOutputShapes , output_shapes } , <nl> - { ParallelMapDatasetOp : : kUseInterOpParallelism , <nl> - use_inter_op_parallelism } , <nl> - { ParallelMapDatasetOp : : kSloppy , sloppy } , <nl> - { ParallelMapDatasetOp : : kPreserveCardinality , preserve_cardinality } } ) ; <nl> - TF_RETURN_IF_ERROR ( CreateOpKernel ( node_def , parallel_map_kernel ) ) ; <nl> + class ParallelMapDatasetParams : public DatasetParams { <nl> + public : <nl> + template < typename T > <nl> + ParallelMapDatasetParams ( T input_dataset_params , <nl> + std : : vector < Tensor > other_arguments , <nl> + int num_parallel_calls , <nl> + FunctionDefHelper : : AttrValueWrapper func , <nl> + std : : vector < FunctionDef > func_lib , <nl> + DataTypeVector type_arguments , <nl> + DataTypeVector output_dtypes , <nl> + std : : vector < PartialTensorShape > output_shapes , <nl> + bool use_inter_op_parallelism , bool sloppy , <nl> + bool preserve_cardinality , string node_name ) <nl> + : DatasetParams ( std : : move ( output_dtypes ) , std : : move ( output_shapes ) , <nl> + std : : move ( node_name ) ) , <nl> + other_arguments_ ( std : : move ( other_arguments ) ) , <nl> + num_parallel_calls_ ( num_parallel_calls ) , <nl> + func_ ( std : : move ( func ) ) , <nl> + func_lib_ ( std : : move ( func_lib ) ) , <nl> + type_arguments_ ( std : : move ( type_arguments ) ) , <nl> + use_inter_op_parallelism_ ( use_inter_op_parallelism ) , <nl> + sloppy_ ( sloppy ) , <nl> + preserve_cardinality_ ( preserve_cardinality ) { <nl> + input_dataset_params_ . push_back ( absl : : make_unique < T > ( input_dataset_params ) ) ; <nl> + iterator_prefix_ = <nl> + name_utils : : IteratorPrefix ( input_dataset_params . dataset_type ( ) , <nl> + input_dataset_params . iterator_prefix ( ) ) ; <nl> + } <nl> + <nl> + std : : vector < Tensor > GetInputTensors ( ) const override { <nl> + auto input_tensors = other_arguments_ ; <nl> + input_tensors . emplace_back ( <nl> + CreateTensor < int32 > ( TensorShape ( { } ) , { num_parallel_calls_ } ) ) ; <nl> + return input_tensors ; <nl> + } <nl> + <nl> + Status GetInputNames ( std : : vector < string > * input_names ) const override { <nl> + input_names - > emplace_back ( ParallelMapDatasetOp : : kInputDataset ) ; <nl> + for ( int i = 0 ; i < other_arguments_ . size ( ) ; + + i ) { <nl> + input_names - > emplace_back ( <nl> + absl : : StrCat ( ParallelMapDatasetOp : : kOtherArguments , " _ " , i ) ) ; <nl> + } <nl> + input_names - > emplace_back ( ParallelMapDatasetOp : : kNumParallelCalls ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - / / Creates a new ` ParallelMapDataset ` op kernel context . <nl> - Status CreateParallelMapDatasetContext ( <nl> - OpKernel * const op_kernel , <nl> - gtl : : InlinedVector < TensorValue , 4 > * const inputs , <nl> - std : : unique_ptr < OpKernelContext > * context ) { <nl> - TF_RETURN_IF_ERROR ( CheckOpKernelInput ( * op_kernel , * inputs ) ) ; <nl> - TF_RETURN_IF_ERROR ( CreateOpKernelContext ( op_kernel , inputs , context ) ) ; <nl> + Status GetAttributes ( AttributeVector * attr_vector ) const override { <nl> + * attr_vector = { <nl> + { ParallelMapDatasetOp : : kFunc , func_ } , <nl> + { ParallelMapDatasetOp : : kTarguments , type_arguments_ } , <nl> + { ParallelMapDatasetOp : : kOutputShapes , output_shapes_ } , <nl> + { ParallelMapDatasetOp : : kOutputTypes , output_dtypes_ } , <nl> + { ParallelMapDatasetOp : : kUseInterOpParallelism , <nl> + use_inter_op_parallelism_ } , <nl> + { ParallelMapDatasetOp : : kSloppy , sloppy_ } , <nl> + { ParallelMapDatasetOp : : kPreserveCardinality , preserve_cardinality_ } } ; <nl> return Status : : OK ( ) ; <nl> } <nl> - } ; <nl> <nl> - struct RangeDatasetParam { <nl> - int64 start ; <nl> - int64 end ; <nl> - int64 step ; <nl> - } ; <nl> + string dataset_type ( ) const override { <nl> + return ParallelMapDatasetOp : : kDatasetType ; <nl> + } <nl> + <nl> + std : : vector < FunctionDef > func_lib ( ) const override { return func_lib_ ; } <nl> + <nl> + bool sloppy ( ) const override { return sloppy_ ; } <nl> <nl> - struct TestCase { <nl> - RangeDatasetParam range_data_param ; <nl> - Tensor num_parallel_calls ; <nl> - FunctionDefHelper : : AttrValueWrapper func ; <nl> - std : : vector < FunctionDef > func_lib ; <nl> - bool use_inter_op_parallelism ; <nl> - bool sloppy ; <nl> - bool preserve_cardinality ; <nl> - std : : vector < Tensor > expected_outputs ; <nl> - DataTypeVector expected_output_dtypes ; <nl> - std : : vector < PartialTensorShape > expected_output_shapes ; <nl> - int64 expected_cardinality ; <nl> - std : : vector < int > breakpoints ; <nl> + private : <nl> + std : : vector < Tensor > other_arguments_ ; <nl> + int num_parallel_calls_ ; <nl> + FunctionDefHelper : : AttrValueWrapper func_ ; <nl> + std : : vector < FunctionDef > func_lib_ ; <nl> + DataTypeVector type_arguments_ ; <nl> + bool use_inter_op_parallelism_ ; <nl> + bool sloppy_ ; <nl> + bool preserve_cardinality_ ; <nl> } ; <nl> <nl> + class ParallelMapDatasetOpTest : public DatasetOpsTestBaseV2 { } ; <nl> + <nl> FunctionDefHelper : : AttrValueWrapper MapFunc ( const string & func_name , <nl> const DataType & dtype ) { <nl> return FunctionDefHelper : : FunctionRef ( func_name , { { " T " , dtype } } ) ; <nl> FunctionDefHelper : : AttrValueWrapper MapFunc ( const string & func_name , <nl> <nl> / / test case 1 : num_parallel_calls = 1 , use_inter_op_parallelism = false , <nl> / / sloppy = false , preserve_cardinality = false , MapFunc = XTimesTwo <nl> - TestCase TestCase1 ( ) { <nl> - return { / * range_data_param * / { 0 , 10 , 3 } , <nl> - / * num_parallel_calls * / <nl> - CreateTensor < int32 > ( TensorShape ( { } ) , { 1 } ) , <nl> - / * func * / MapFunc ( " XTimesTwo " , DT_INT64 ) , <nl> - / * func_lib * / { test : : function : : XTimesTwo ( ) } , <nl> - / * use_inter_op_parallelism * / false , <nl> - / * sloppy * / false , <nl> - / * preserve_cardinality * / false , <nl> - / * expected_outputs * / <nl> - { CreateTensor < int64 > ( TensorShape ( { } ) , { 0 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 6 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 12 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 18 } ) } , <nl> - / * expected_output_dtypes * / { DT_INT64 } , <nl> - / * expected_output_shapes * / { PartialTensorShape ( { } ) } , <nl> - / * expected_cardinality * / 4 , <nl> - / * breakpoints * / { 0 , 1 , 9 } } ; <nl> + ParallelMapDatasetParams ParallelMapDatasetParams1 ( ) { <nl> + return ParallelMapDatasetParams ( RangeDatasetParams ( 0 , 10 , 3 ) , <nl> + / * other_arguments = * / { } , <nl> + / * num_parallel_calls = * / 1 , <nl> + / * func = * / MapFunc ( " XTimesTwo " , DT_INT64 ) , <nl> + / * func_lib * / { test : : function : : XTimesTwo ( ) } , <nl> + / * type_arguments = * / { } , <nl> + / * output_dtypes = * / { DT_INT64 } , <nl> + / * output_shapes = * / { PartialTensorShape ( { } ) } , <nl> + / * use_inter_op_parallelism = * / false , <nl> + / * sloppy = * / false , <nl> + / * preserve_cardinality = * / false , <nl> + / * node_name = * / kNodeName ) ; <nl> } <nl> <nl> / / test case 2 : num_parallel_calls = 2 , use_inter_op_parallelism = true , <nl> / / sloppy = true , preserve_cardinality = true , MapFunc = XTimesTwo <nl> - TestCase TestCase2 ( ) { <nl> - return { / * range_data_param * / { 0 , 10 , 3 } , <nl> - / * num_parallel_calls * / <nl> - CreateTensor < int32 > ( TensorShape ( { } ) , { 2 } ) , <nl> - / * func * / MapFunc ( " XTimesTwo " , DT_INT64 ) , <nl> - / * func_lib * / { test : : function : : XTimesTwo ( ) } , <nl> - / * use_inter_op_parallelism * / true , <nl> - / * sloppy * / true , <nl> - / * preserve_cardinality * / true , <nl> - / * expected_outputs * / <nl> - { CreateTensor < int64 > ( TensorShape ( { } ) , { 0 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 6 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 12 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 18 } ) } , <nl> - / * expected_output_dtypes * / { DT_INT64 } , <nl> - / * expected_output_shapes * / { PartialTensorShape ( { } ) } , <nl> - / * expected_cardinality * / 4 , <nl> - / * breakpoints * / { 0 , 1 , 5 } } ; <nl> + ParallelMapDatasetParams ParallelMapDatasetParams2 ( ) { <nl> + return ParallelMapDatasetParams ( RangeDatasetParams ( 0 , 10 , 3 ) , <nl> + / * other_arguments = * / { } , <nl> + / * num_parallel_calls = * / 2 , <nl> + / * func = * / MapFunc ( " XTimesTwo " , DT_INT64 ) , <nl> + / * func_lib * / { test : : function : : XTimesTwo ( ) } , <nl> + / * type_arguments = * / { } , <nl> + / * output_dtypes = * / { DT_INT64 } , <nl> + / * output_shapes = * / { PartialTensorShape ( { } ) } , <nl> + / * use_inter_op_parallelism = * / true , <nl> + / * sloppy = * / true , <nl> + / * preserve_cardinality = * / true , <nl> + / * node_name = * / kNodeName ) ; <nl> } <nl> <nl> / / test case 3 : num_parallel_calls = 3 , use_inter_op_parallelism = true , <nl> / / sloppy = false , preserve_cardinality = false , MapFunc = XTimesFour <nl> - TestCase TestCase3 ( ) { <nl> - return { <nl> - / * range_data_param * / { 0 , 10 , 3 } , <nl> - / * num_parallel_calls * / <nl> - CreateTensor < int32 > ( TensorShape ( { } ) , { 3 } ) , <nl> - / * func * / MapFunc ( " XTimesFour " , DT_INT64 ) , <nl> + ParallelMapDatasetParams ParallelMapDatasetParams3 ( ) { <nl> + return ParallelMapDatasetParams ( <nl> + RangeDatasetParams ( 0 , 10 , 3 ) , <nl> + / * other_arguments = * / { } , <nl> + / * num_parallel_calls = * / 3 , <nl> + / * func = * / MapFunc ( " XTimesFour " , DT_INT64 ) , <nl> / * func_lib * / { test : : function : : XTimesTwo ( ) , test : : function : : XTimesFour ( ) } , <nl> - / * use_inter_op_parallelism * / true , <nl> - / * sloppy * / false , <nl> - / * preserve_cardinality * / false , <nl> - / * expected_outputs * / <nl> - { CreateTensor < int64 > ( TensorShape ( { } ) , { 0 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 12 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 24 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 36 } ) } , <nl> - / * expected_output_dtypes * / { DT_INT64 } , <nl> - / * expected_output_shapes * / { PartialTensorShape ( { } ) } , <nl> - / * expected_cardinality * / 4 , <nl> - / * breakpoints * / { 0 , 1 , 5 } } ; <nl> + / * type_arguments = * / { } , <nl> + / * output_dtypes = * / { DT_INT64 } , <nl> + / * output_shapes = * / { PartialTensorShape ( { } ) } , <nl> + / * use_inter_op_parallelism = * / true , <nl> + / * sloppy = * / false , <nl> + / * preserve_cardinality = * / false , <nl> + / * node_name = * / kNodeName ) ; <nl> } <nl> <nl> / / test case 4 : num_parallel_calls = 4 , use_inter_op_parallelism = false , <nl> / / sloppy = false , preserve_cardinality = false , MapFunc = XTimesTwo <nl> - TestCase TestCase4 ( ) { <nl> - return { / * range_data_param * / { 0 , 10 , 3 } , <nl> - / * num_parallel_calls * / <nl> - CreateTensor < int32 > ( TensorShape ( { } ) , { 4 } ) , <nl> - / * func * / MapFunc ( " XTimesTwo " , DT_INT64 ) , <nl> - / * func_lib * / { test : : function : : XTimesTwo ( ) } , <nl> - / * use_inter_op_parallelism * / false , <nl> - / * sloppy * / false , <nl> - / * preserve_cardinality * / false , <nl> - / * expected_outputs * / <nl> - { CreateTensor < int64 > ( TensorShape ( { } ) , { 0 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 6 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 12 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 18 } ) } , <nl> - / * expected_output_dtypes * / { DT_INT64 } , <nl> - / * expected_output_shapes * / { PartialTensorShape ( { } ) } , <nl> - / * expected_cardinality * / 4 , <nl> - / * breakpoints * / { 0 , 1 , 5 } } ; <nl> + ParallelMapDatasetParams ParallelMapDatasetParams4 ( ) { <nl> + return ParallelMapDatasetParams ( RangeDatasetParams ( 0 , 10 , 3 ) , <nl> + / * other_arguments = * / { } , <nl> + / * num_parallel_calls = * / 4 , <nl> + / * func = * / MapFunc ( " XTimesTwo " , DT_INT64 ) , <nl> + / * func_lib * / { test : : function : : XTimesTwo ( ) } , <nl> + / * type_arguments = * / { } , <nl> + / * output_dtypes = * / { DT_INT64 } , <nl> + / * output_shapes = * / { PartialTensorShape ( { } ) } , <nl> + / * use_inter_op_parallelism = * / false , <nl> + / * sloppy = * / false , <nl> + / * preserve_cardinality = * / false , <nl> + / * node_name = * / kNodeName ) ; <nl> } <nl> <nl> / / test case 5 : num_parallel_calls = kAutotune , use_inter_op_parallelism = true , <nl> / / sloppy = true , preserve_cardinality = true , MapFunc = XTimesFour <nl> - TestCase TestCase5 ( ) { <nl> - return { <nl> - / * range_data_param * / { 0 , 10 , 3 } , <nl> - / * num_parallel_calls * / <nl> - CreateTensor < int32 > ( TensorShape ( { } ) , { model : : kAutotune } ) , <nl> - / * func * / MapFunc ( " XTimesFour " , DT_INT64 ) , <nl> + ParallelMapDatasetParams ParallelMapDatasetParams5 ( ) { <nl> + return ParallelMapDatasetParams ( <nl> + RangeDatasetParams ( 0 , 10 , 3 ) , <nl> + / * other_arguments = * / { } , <nl> + / * num_parallel_calls = * / model : : kAutotune , <nl> + / * func = * / MapFunc ( " XTimesFour " , DT_INT64 ) , <nl> / * func_lib * / { test : : function : : XTimesTwo ( ) , test : : function : : XTimesFour ( ) } , <nl> - / * use_inter_op_parallelism * / true , <nl> - / * sloppy * / true , <nl> - / * preserve_cardinality * / true , <nl> - / * expected_outputs * / <nl> - { CreateTensor < int64 > ( TensorShape ( { } ) , { 0 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 12 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 24 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 36 } ) } , <nl> - / * expected_output_dtypes * / { DT_INT64 } , <nl> - / * expected_output_shapes * / { PartialTensorShape ( { } ) } , <nl> - / * expected_cardinality * / 4 , <nl> - / * breakpoints * / { 0 , 1 , 5 } } ; <nl> + / * type_arguments = * / { } , <nl> + / * output_dtypes = * / { DT_INT64 } , <nl> + / * output_shapes = * / { PartialTensorShape ( { } ) } , <nl> + / * use_inter_op_parallelism = * / true , <nl> + / * sloppy = * / true , <nl> + / * preserve_cardinality = * / true , <nl> + / * node_name = * / kNodeName ) ; <nl> } <nl> <nl> / / test case 6 : num_parallel_calls = 4 , use_inter_op_parallelism = true , <nl> / / sloppy = false , preserve_cardinality = false , MapFunc = XTimesFour <nl> - TestCase TestCase6 ( ) { <nl> - return { <nl> - / * range_data_param * / { 0 , 10 , 3 } , <nl> - / * num_parallel_calls * / <nl> - CreateTensor < int32 > ( TensorShape ( { } ) , { 4 } ) , <nl> - / * func * / MapFunc ( " XTimesFour " , DT_INT64 ) , <nl> + ParallelMapDatasetParams ParallelMapDatasetParams6 ( ) { <nl> + return ParallelMapDatasetParams ( <nl> + RangeDatasetParams ( 0 , 10 , 3 ) , <nl> + / * other_arguments = * / { } , <nl> + / * num_parallel_calls = * / 4 , <nl> + / * func = * / MapFunc ( " XTimesFour " , DT_INT64 ) , <nl> / * func_lib * / { test : : function : : XTimesTwo ( ) , test : : function : : XTimesFour ( ) } , <nl> - / * use_inter_op_parallelism * / true , <nl> - / * sloppy * / false , <nl> - / * preserve_cardinality * / false , <nl> - / * expected_outputs * / <nl> - { CreateTensor < int64 > ( TensorShape ( { } ) , { 0 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 12 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 24 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 36 } ) } , <nl> - / * expected_output_dtypes * / { DT_INT64 } , <nl> - / * expected_output_shapes * / { PartialTensorShape ( { } ) } , <nl> - / * expected_cardinality * / 4 , <nl> - / * breakpoints * / { 0 , 1 , 5 } } ; <nl> + / * type_arguments = * / { } , <nl> + / * output_dtypes = * / { DT_INT64 } , <nl> + / * output_shapes = * / { PartialTensorShape ( { } ) } , <nl> + / * use_inter_op_parallelism = * / true , <nl> + / * sloppy = * / false , <nl> + / * preserve_cardinality = * / false , <nl> + / * node_name = * / kNodeName ) ; <nl> } <nl> <nl> / / TODO ( feihugis ) : make this test case work . <nl> / / test case 7 : num_parallel_calls = 2 , use_inter_op_parallelism = false , <nl> / / sloppy = false , preserve_cardinality = false , MapFunc = XTimesFour <nl> - TestCase TestCase7 ( ) { <nl> - return { <nl> - / * range_data_param * / { 0 , 10 , 3 } , <nl> - / * num_parallel_calls * / <nl> - CreateTensor < int32 > ( TensorShape ( { } ) , { 2 } ) , <nl> - / * func * / MapFunc ( " XTimesFour " , DT_INT64 ) , <nl> + ParallelMapDatasetParams ParallelMapDatasetParams7 ( ) { <nl> + return ParallelMapDatasetParams ( <nl> + RangeDatasetParams ( 0 , 10 , 3 ) , <nl> + / * other_arguments = * / { } , <nl> + / * num_parallel_calls = * / 2 , <nl> + / * func = * / MapFunc ( " XTimesFour " , DT_INT64 ) , <nl> / * func_lib * / { test : : function : : XTimesTwo ( ) , test : : function : : XTimesFour ( ) } , <nl> - / * use_inter_op_parallelism * / false , <nl> - / * sloppy * / false , <nl> - / * preserve_cardinality * / false , <nl> - / * expected_outputs * / <nl> - { CreateTensor < int64 > ( TensorShape ( { } ) , { 0 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 12 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 24 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 36 } ) } , <nl> - / * expected_output_dtypes * / { DT_INT64 } , <nl> - / * expected_output_shapes * / { PartialTensorShape ( { } ) } , <nl> - / * expected_cardinality * / 4 , <nl> - / * breakpoints * / { 0 , 1 , 5 } } ; <nl> + / * type_arguments = * / { } , <nl> + / * output_dtypes = * / { DT_INT64 } , <nl> + / * output_shapes = * / { PartialTensorShape ( { } ) } , <nl> + / * use_inter_op_parallelism = * / false , <nl> + / * sloppy = * / false , <nl> + / * preserve_cardinality = * / false , <nl> + / * node_name = * / kNodeName ) ; <nl> } <nl> <nl> / / TODO ( feihugis ) : make this test case work . <nl> / / test case 8 : num_parallel_calls = kAutotune , use_inter_op_parallelism = <nl> / / false , sloppy = true , preserve_cardinality = true , MapFunc = XTimesFour <nl> - TestCase TestCase8 ( ) { <nl> - return { <nl> - / * range_data_param * / { 0 , 10 , 3 } , <nl> - / * num_parallel_calls * / <nl> - CreateTensor < int32 > ( TensorShape ( { } ) , { model : : kAutotune } ) , <nl> - / * func * / MapFunc ( " XTimesFour " , DT_INT64 ) , <nl> + ParallelMapDatasetParams ParallelMapDatasetParams8 ( ) { <nl> + return ParallelMapDatasetParams ( <nl> + RangeDatasetParams ( 0 , 10 , 3 ) , <nl> + / * other_arguments = * / { } , <nl> + / * num_parallel_calls = * / model : : kAutotune , <nl> + / * func = * / MapFunc ( " XTimesFour " , DT_INT64 ) , <nl> / * func_lib * / { test : : function : : XTimesTwo ( ) , test : : function : : XTimesFour ( ) } , <nl> - / * use_inter_op_parallelism * / false , <nl> - / * sloppy * / true , <nl> - / * preserve_cardinality * / true , <nl> - / * expected_outputs * / <nl> - { CreateTensor < int64 > ( TensorShape ( { } ) , { 0 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 12 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 24 } ) , <nl> - CreateTensor < int64 > ( TensorShape ( { } ) , { 36 } ) } , <nl> - / * expected_output_dtypes * / { DT_INT64 } , <nl> - / * expected_output_shapes * / { PartialTensorShape ( { } ) } , <nl> - / * expected_cardinality * / 4 , <nl> - / * breakpoints * / { 0 , 1 , 5 } } ; <nl> + / * type_arguments = * / { } , <nl> + / * output_dtypes = * / { DT_INT64 } , <nl> + / * output_shapes = * / { PartialTensorShape ( { } ) } , <nl> + / * use_inter_op_parallelism = * / false , <nl> + / * sloppy = * / true , <nl> + / * preserve_cardinality = * / true , <nl> + / * node_name = * / kNodeName ) ; <nl> } <nl> <nl> - TestCase InvalidNumParallelCallsTestCase ( ) { <nl> - return { / * range_data_param * / { 0 , 10 , 3 } , <nl> - / * num_parallel_calls * / <nl> - CreateTensor < int32 > ( TensorShape ( { } ) , { - 4 } ) , <nl> - / * func * / MapFunc ( " XTimesTwo " , DT_INT64 ) , <nl> - / * func_lib * / { test : : function : : XTimesTwo ( ) } , <nl> - / * use_inter_op_parallelism * / true , <nl> - / * sloppy * / true , <nl> - / * preserve_cardinality * / true , <nl> - / * expected_outputs * / { } , <nl> - / * expected_output_dtypes * / { DT_INT64 } , <nl> - / * expected_output_shapes * / { PartialTensorShape ( { } ) } , <nl> - / * expected_cardinality * / - 1 , <nl> - / * breakpoints * / { 0 , 1 , 5 } } ; <nl> + ParallelMapDatasetParams ParallelMapDatasetParamsWithInvalidNumParallelCalls ( ) { <nl> + return ParallelMapDatasetParams ( RangeDatasetParams ( 0 , 10 , 3 ) , <nl> + / * other_arguments = * / { } , <nl> + / * num_parallel_calls = * / - 4 , <nl> + / * func = * / MapFunc ( " XTimesTwo " , DT_INT64 ) , <nl> + / * func_lib * / { test : : function : : XTimesTwo ( ) } , <nl> + / * type_arguments = * / { } , <nl> + / * output_dtypes = * / { DT_INT64 } , <nl> + / * output_shapes = * / { PartialTensorShape ( { } ) } , <nl> + / * use_inter_op_parallelism = * / true , <nl> + / * sloppy = * / true , <nl> + / * preserve_cardinality = * / true , <nl> + / * node_name = * / kNodeName ) ; <nl> } <nl> <nl> - class ParameterizedParallelMapDatasetOpTest <nl> - : public ParallelMapDatasetOpTest , <nl> - public : : testing : : WithParamInterface < TestCase > { } ; <nl> - <nl> - TEST_P ( ParameterizedParallelMapDatasetOpTest , GetNext ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TestCase test_case = GetParam ( ) ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( test_case . func_lib , cpu_num ) ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > parallel_map_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetOpKernel ( <nl> - test_case . func , test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , test_case . use_inter_op_parallelism , <nl> - test_case . sloppy , test_case . preserve_cardinality , <nl> - & parallel_map_dataset_kernel ) ) ; <nl> - <nl> - DatasetBase * range_dataset ; <nl> - TF_ASSERT_OK ( CreateRangeDataset < int64 > ( <nl> - test_case . range_data_param . start , test_case . range_data_param . end , <nl> - test_case . range_data_param . step , " range " , & range_dataset ) ) ; <nl> - Tensor range_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - TF_ASSERT_OK ( <nl> - StoreDatasetInVariantTensor ( range_dataset , & range_dataset_tensor ) ) ; <nl> - Tensor num_parallel_calls = test_case . num_parallel_calls ; <nl> - gtl : : InlinedVector < TensorValue , 4 > parallel_map_dataset_inputs ( <nl> - { TensorValue ( & range_dataset_tensor ) , TensorValue ( & num_parallel_calls ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernelContext > parallel_map_dataset_context ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetContext ( <nl> - parallel_map_dataset_kernel . get ( ) , & parallel_map_dataset_inputs , <nl> - & parallel_map_dataset_context ) ) ; <nl> - DatasetBase * parallel_map_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( parallel_map_dataset_kernel . get ( ) , <nl> - parallel_map_dataset_context . get ( ) , <nl> - & parallel_map_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref_map_dataset ( parallel_map_dataset ) ; <nl> - <nl> - std : : unique_ptr < IteratorContext > iterator_ctx ; <nl> - TF_ASSERT_OK ( <nl> - CreateIteratorContext ( parallel_map_dataset_context . get ( ) , & iterator_ctx ) ) ; <nl> - std : : unique_ptr < IteratorBase > iterator ; <nl> - TF_ASSERT_OK ( parallel_map_dataset - > MakeIterator ( iterator_ctx . get ( ) , <nl> - " Iterator " , & iterator ) ) ; <nl> - <nl> - bool end_of_sequence = false ; <nl> - std : : vector < Tensor > out_tensors ; <nl> - while ( ! end_of_sequence ) { <nl> - std : : vector < Tensor > next ; <nl> - TF_EXPECT_OK ( <nl> - iterator - > GetNext ( iterator_ctx . get ( ) , & next , & end_of_sequence ) ) ; <nl> - out_tensors . insert ( out_tensors . end ( ) , next . begin ( ) , next . end ( ) ) ; <nl> - } <nl> - <nl> - TF_EXPECT_OK ( ExpectEqual ( out_tensors , test_case . expected_outputs , <nl> - / * compare_order * / ! test_case . sloppy ) ) ; <nl> + std : : vector < GetNextTestCase < ParallelMapDatasetParams > > GetNextTestCases ( ) { <nl> + return { { / * dataset_params = * / ParallelMapDatasetParams1 ( ) , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( TensorShape { } , { { 0 } , { 6 } , { 12 } , { 18 } } ) } , <nl> + { / * dataset_params = * / ParallelMapDatasetParams2 ( ) , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( TensorShape { } , { { 0 } , { 6 } , { 12 } , { 18 } } ) } , <nl> + { / * dataset_params = * / ParallelMapDatasetParams3 ( ) , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( TensorShape { } , { { 0 } , { 12 } , { 24 } , { 36 } } ) } , <nl> + { / * dataset_params = * / ParallelMapDatasetParams4 ( ) , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( TensorShape { } , { { 0 } , { 6 } , { 12 } , { 18 } } ) } , <nl> + { / * dataset_params = * / ParallelMapDatasetParams5 ( ) , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( TensorShape { } , { { 0 } , { 12 } , { 24 } , { 36 } } ) } , <nl> + { / * dataset_params = * / <nl> + ParallelMapDatasetParams6 ( ) , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( TensorShape { } , { { 0 } , { 12 } , { 24 } , { 36 } } ) } } ; <nl> } <nl> <nl> + ITERATOR_GET_NEXT_TEST_P ( ParallelMapDatasetOpTest , ParallelMapDatasetParams , <nl> + GetNextTestCases ( ) ) <nl> + <nl> TEST_F ( ParallelMapDatasetOpTest , DatasetNodeName ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TestCase test_case = TestCase1 ( ) ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( test_case . func_lib , cpu_num ) ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > parallel_map_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetOpKernel ( <nl> - test_case . func , test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , test_case . use_inter_op_parallelism , <nl> - test_case . sloppy , test_case . preserve_cardinality , <nl> - & parallel_map_dataset_kernel ) ) ; <nl> - <nl> - DatasetBase * range_dataset ; <nl> - TF_ASSERT_OK ( CreateRangeDataset < int64 > ( <nl> - test_case . range_data_param . start , test_case . range_data_param . end , <nl> - test_case . range_data_param . step , " range " , & range_dataset ) ) ; <nl> - Tensor range_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - TF_ASSERT_OK ( <nl> - StoreDatasetInVariantTensor ( range_dataset , & range_dataset_tensor ) ) ; <nl> - Tensor num_parallel_calls = test_case . num_parallel_calls ; <nl> - gtl : : InlinedVector < TensorValue , 4 > parallel_map_dataset_inputs ( <nl> - { TensorValue ( & range_dataset_tensor ) , TensorValue ( & num_parallel_calls ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernelContext > parallel_map_dataset_context ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetContext ( <nl> - parallel_map_dataset_kernel . get ( ) , & parallel_map_dataset_inputs , <nl> - & parallel_map_dataset_context ) ) ; <nl> - DatasetBase * parallel_map_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( parallel_map_dataset_kernel . get ( ) , <nl> - parallel_map_dataset_context . get ( ) , <nl> - & parallel_map_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref_map_dataset ( parallel_map_dataset ) ; <nl> - <nl> - EXPECT_EQ ( parallel_map_dataset - > node_name ( ) , kNodeName ) ; <nl> + auto dataset_params = ParallelMapDatasetParams1 ( ) ; <nl> + TF_ASSERT_OK ( Initialize ( dataset_params ) ) ; <nl> + TF_ASSERT_OK ( CheckDatasetNodeName ( dataset_params . node_name ( ) ) ) ; <nl> } <nl> <nl> TEST_F ( ParallelMapDatasetOpTest , DatasetTypeString ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TestCase test_case = TestCase1 ( ) ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( test_case . func_lib , cpu_num ) ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > parallel_map_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetOpKernel ( <nl> - test_case . func , test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , test_case . use_inter_op_parallelism , <nl> - test_case . sloppy , test_case . preserve_cardinality , <nl> - & parallel_map_dataset_kernel ) ) ; <nl> - <nl> - DatasetBase * range_dataset ; <nl> - TF_ASSERT_OK ( CreateRangeDataset < int64 > ( <nl> - test_case . range_data_param . start , test_case . range_data_param . end , <nl> - test_case . range_data_param . step , " range " , & range_dataset ) ) ; <nl> - Tensor range_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - TF_ASSERT_OK ( <nl> - StoreDatasetInVariantTensor ( range_dataset , & range_dataset_tensor ) ) ; <nl> - Tensor num_parallel_calls = test_case . num_parallel_calls ; <nl> - gtl : : InlinedVector < TensorValue , 4 > parallel_map_dataset_inputs ( <nl> - { TensorValue ( & range_dataset_tensor ) , TensorValue ( & num_parallel_calls ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernelContext > parallel_map_dataset_context ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetContext ( <nl> - parallel_map_dataset_kernel . get ( ) , & parallel_map_dataset_inputs , <nl> - & parallel_map_dataset_context ) ) ; <nl> - DatasetBase * parallel_map_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( parallel_map_dataset_kernel . get ( ) , <nl> - parallel_map_dataset_context . get ( ) , <nl> - & parallel_map_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref_map_dataset ( parallel_map_dataset ) ; <nl> - <nl> - EXPECT_EQ ( parallel_map_dataset - > type_string ( ) , <nl> - name_utils : : OpName ( ParallelMapDatasetOp : : kDatasetType ) ) ; <nl> + auto dataset_params = ParallelMapDatasetParams1 ( ) ; <nl> + TF_ASSERT_OK ( Initialize ( dataset_params ) ) ; <nl> + TF_ASSERT_OK ( CheckDatasetTypeString ( <nl> + name_utils : : OpName ( ParallelMapDatasetOp : : kDatasetType ) ) ) ; <nl> } <nl> <nl> - TEST_P ( ParameterizedParallelMapDatasetOpTest , DatasetOutputDtypes ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TestCase test_case = GetParam ( ) ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( test_case . func_lib , cpu_num ) ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > parallel_map_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetOpKernel ( <nl> - test_case . func , test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , test_case . use_inter_op_parallelism , <nl> - test_case . sloppy , test_case . preserve_cardinality , <nl> - & parallel_map_dataset_kernel ) ) ; <nl> - <nl> - DatasetBase * range_dataset ; <nl> - TF_ASSERT_OK ( CreateRangeDataset < int64 > ( <nl> - test_case . range_data_param . start , test_case . range_data_param . end , <nl> - test_case . range_data_param . step , " range " , & range_dataset ) ) ; <nl> - Tensor range_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - TF_ASSERT_OK ( <nl> - StoreDatasetInVariantTensor ( range_dataset , & range_dataset_tensor ) ) ; <nl> - Tensor num_parallel_calls = test_case . num_parallel_calls ; <nl> - gtl : : InlinedVector < TensorValue , 4 > parallel_map_dataset_inputs ( <nl> - { TensorValue ( & range_dataset_tensor ) , TensorValue ( & num_parallel_calls ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernelContext > parallel_map_dataset_context ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetContext ( <nl> - parallel_map_dataset_kernel . get ( ) , & parallel_map_dataset_inputs , <nl> - & parallel_map_dataset_context ) ) ; <nl> - DatasetBase * parallel_map_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( parallel_map_dataset_kernel . get ( ) , <nl> - parallel_map_dataset_context . get ( ) , <nl> - & parallel_map_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref_map_dataset ( parallel_map_dataset ) ; <nl> - <nl> - TF_EXPECT_OK ( VerifyTypesMatch ( parallel_map_dataset - > output_dtypes ( ) , <nl> - test_case . expected_output_dtypes ) ) ; <nl> + TEST_F ( ParallelMapDatasetOpTest , DatasetOutputDtypes ) { <nl> + auto dataset_params = ParallelMapDatasetParams1 ( ) ; <nl> + TF_ASSERT_OK ( Initialize ( dataset_params ) ) ; <nl> + TF_ASSERT_OK ( CheckDatasetOutputDtypes ( { DT_INT64 } ) ) ; <nl> } <nl> <nl> - TEST_P ( ParameterizedParallelMapDatasetOpTest , DatasetOutputShapes ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TestCase test_case = GetParam ( ) ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( test_case . func_lib , cpu_num ) ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > parallel_map_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetOpKernel ( <nl> - test_case . func , test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , test_case . use_inter_op_parallelism , <nl> - test_case . sloppy , test_case . preserve_cardinality , <nl> - & parallel_map_dataset_kernel ) ) ; <nl> - <nl> - DatasetBase * range_dataset ; <nl> - TF_ASSERT_OK ( CreateRangeDataset < int64 > ( <nl> - test_case . range_data_param . start , test_case . range_data_param . end , <nl> - test_case . range_data_param . step , " range " , & range_dataset ) ) ; <nl> - Tensor range_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - TF_ASSERT_OK ( <nl> - StoreDatasetInVariantTensor ( range_dataset , & range_dataset_tensor ) ) ; <nl> - Tensor num_parallel_calls = test_case . num_parallel_calls ; <nl> - gtl : : InlinedVector < TensorValue , 4 > parallel_map_dataset_inputs ( <nl> - { TensorValue ( & range_dataset_tensor ) , TensorValue ( & num_parallel_calls ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernelContext > parallel_map_dataset_context ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetContext ( <nl> - parallel_map_dataset_kernel . get ( ) , & parallel_map_dataset_inputs , <nl> - & parallel_map_dataset_context ) ) ; <nl> - DatasetBase * parallel_map_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( parallel_map_dataset_kernel . get ( ) , <nl> - parallel_map_dataset_context . get ( ) , <nl> - & parallel_map_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref_map_dataset ( parallel_map_dataset ) ; <nl> - <nl> - TF_EXPECT_OK ( VerifyShapesCompatible ( parallel_map_dataset - > output_shapes ( ) , <nl> - test_case . expected_output_shapes ) ) ; <nl> + TEST_F ( ParallelMapDatasetOpTest , DatasetOutputShapes ) { <nl> + auto dataset_params = ParallelMapDatasetParams1 ( ) ; <nl> + TF_ASSERT_OK ( Initialize ( dataset_params ) ) ; <nl> + TF_ASSERT_OK ( CheckDatasetOutputShapes ( { PartialTensorShape ( { } ) } ) ) ; <nl> } <nl> <nl> - TEST_P ( ParameterizedParallelMapDatasetOpTest , Cardinality ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TestCase test_case = GetParam ( ) ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( test_case . func_lib , cpu_num ) ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > parallel_map_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetOpKernel ( <nl> - test_case . func , test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , test_case . use_inter_op_parallelism , <nl> - test_case . sloppy , test_case . preserve_cardinality , <nl> - & parallel_map_dataset_kernel ) ) ; <nl> - <nl> - DatasetBase * range_dataset ; <nl> - TF_ASSERT_OK ( CreateRangeDataset < int64 > ( <nl> - test_case . range_data_param . start , test_case . range_data_param . end , <nl> - test_case . range_data_param . step , " range " , & range_dataset ) ) ; <nl> - Tensor range_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - TF_ASSERT_OK ( <nl> - StoreDatasetInVariantTensor ( range_dataset , & range_dataset_tensor ) ) ; <nl> - Tensor num_parallel_calls = test_case . num_parallel_calls ; <nl> - gtl : : InlinedVector < TensorValue , 4 > parallel_map_dataset_inputs ( <nl> - { TensorValue ( & range_dataset_tensor ) , TensorValue ( & num_parallel_calls ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernelContext > parallel_map_dataset_context ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetContext ( <nl> - parallel_map_dataset_kernel . get ( ) , & parallel_map_dataset_inputs , <nl> - & parallel_map_dataset_context ) ) ; <nl> - DatasetBase * parallel_map_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( parallel_map_dataset_kernel . get ( ) , <nl> - parallel_map_dataset_context . get ( ) , <nl> - & parallel_map_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref_map_dataset ( parallel_map_dataset ) ; <nl> - <nl> - EXPECT_EQ ( parallel_map_dataset - > Cardinality ( ) , <nl> - test_case . expected_cardinality ) ; <nl> + std : : vector < CardinalityTestCase < ParallelMapDatasetParams > > <nl> + CardinalityTestCases ( ) { <nl> + return { { / * dataset_params = * / ParallelMapDatasetParams1 ( ) , <nl> + / * expected_cardinality = * / 4 } , <nl> + { / * dataset_params = * / ParallelMapDatasetParams2 ( ) , <nl> + / * expected_cardinality = * / 4 } , <nl> + { / * dataset_params = * / ParallelMapDatasetParams3 ( ) , <nl> + / * expected_cardinality = * / 4 } , <nl> + { / * dataset_params = * / ParallelMapDatasetParams4 ( ) , <nl> + / * expected_cardinality = * / 4 } , <nl> + { / * dataset_params = * / ParallelMapDatasetParams5 ( ) , <nl> + / * expected_cardinality = * / 4 } , <nl> + { / * dataset_params = * / ParallelMapDatasetParams6 ( ) , <nl> + / * expected_cardinality = * / 4 } } ; <nl> } <nl> <nl> - TEST_P ( ParameterizedParallelMapDatasetOpTest , IteratorOutputDtypes ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TestCase test_case = GetParam ( ) ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( test_case . func_lib , cpu_num ) ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > parallel_map_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetOpKernel ( <nl> - test_case . func , test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , test_case . use_inter_op_parallelism , <nl> - test_case . sloppy , test_case . preserve_cardinality , <nl> - & parallel_map_dataset_kernel ) ) ; <nl> - <nl> - DatasetBase * range_dataset ; <nl> - TF_ASSERT_OK ( CreateRangeDataset < int64 > ( <nl> - test_case . range_data_param . start , test_case . range_data_param . end , <nl> - test_case . range_data_param . step , " range " , & range_dataset ) ) ; <nl> - Tensor range_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - TF_ASSERT_OK ( <nl> - StoreDatasetInVariantTensor ( range_dataset , & range_dataset_tensor ) ) ; <nl> - Tensor num_parallel_calls = test_case . num_parallel_calls ; <nl> - gtl : : InlinedVector < TensorValue , 4 > parallel_map_dataset_inputs ( <nl> - { TensorValue ( & range_dataset_tensor ) , TensorValue ( & num_parallel_calls ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernelContext > parallel_map_dataset_context ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetContext ( <nl> - parallel_map_dataset_kernel . get ( ) , & parallel_map_dataset_inputs , <nl> - & parallel_map_dataset_context ) ) ; <nl> - DatasetBase * parallel_map_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( parallel_map_dataset_kernel . get ( ) , <nl> - parallel_map_dataset_context . get ( ) , <nl> - & parallel_map_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref_map_dataset ( parallel_map_dataset ) ; <nl> - <nl> - std : : unique_ptr < IteratorContext > iterator_ctx ; <nl> - TF_ASSERT_OK ( <nl> - CreateIteratorContext ( parallel_map_dataset_context . get ( ) , & iterator_ctx ) ) ; <nl> - std : : unique_ptr < IteratorBase > iterator ; <nl> - TF_ASSERT_OK ( parallel_map_dataset - > MakeIterator ( iterator_ctx . get ( ) , <nl> - " Iterator " , & iterator ) ) ; <nl> - <nl> - TF_EXPECT_OK ( VerifyTypesMatch ( iterator - > output_dtypes ( ) , <nl> - test_case . expected_output_dtypes ) ) ; <nl> + DATASET_CARDINALITY_TEST_P ( ParallelMapDatasetOpTest , ParallelMapDatasetParams , <nl> + CardinalityTestCases ( ) ) <nl> + <nl> + TEST_F ( ParallelMapDatasetOpTest , IteratorOutputDtypes ) { <nl> + auto dataset_params = ParallelMapDatasetParams1 ( ) ; <nl> + TF_ASSERT_OK ( Initialize ( dataset_params ) ) ; <nl> + TF_ASSERT_OK ( CheckIteratorOutputDtypes ( { DT_INT64 } ) ) ; <nl> } <nl> <nl> - TEST_P ( ParameterizedParallelMapDatasetOpTest , IteratorOutputShapes ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TestCase test_case = GetParam ( ) ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( test_case . func_lib , cpu_num ) ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > parallel_map_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetOpKernel ( <nl> - test_case . func , test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , test_case . use_inter_op_parallelism , <nl> - test_case . sloppy , test_case . preserve_cardinality , <nl> - & parallel_map_dataset_kernel ) ) ; <nl> - <nl> - DatasetBase * range_dataset ; <nl> - TF_ASSERT_OK ( CreateRangeDataset < int64 > ( <nl> - test_case . range_data_param . start , test_case . range_data_param . end , <nl> - test_case . range_data_param . step , " range " , & range_dataset ) ) ; <nl> - Tensor range_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - TF_ASSERT_OK ( <nl> - StoreDatasetInVariantTensor ( range_dataset , & range_dataset_tensor ) ) ; <nl> - Tensor num_parallel_calls = test_case . num_parallel_calls ; <nl> - gtl : : InlinedVector < TensorValue , 4 > parallel_map_dataset_inputs ( <nl> - { TensorValue ( & range_dataset_tensor ) , TensorValue ( & num_parallel_calls ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernelContext > parallel_map_dataset_context ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetContext ( <nl> - parallel_map_dataset_kernel . get ( ) , & parallel_map_dataset_inputs , <nl> - & parallel_map_dataset_context ) ) ; <nl> - DatasetBase * parallel_map_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( parallel_map_dataset_kernel . get ( ) , <nl> - parallel_map_dataset_context . get ( ) , <nl> - & parallel_map_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref_map_dataset ( parallel_map_dataset ) ; <nl> - <nl> - std : : unique_ptr < IteratorContext > iterator_ctx ; <nl> - TF_ASSERT_OK ( <nl> - CreateIteratorContext ( parallel_map_dataset_context . get ( ) , & iterator_ctx ) ) ; <nl> - std : : unique_ptr < IteratorBase > iterator ; <nl> - TF_ASSERT_OK ( parallel_map_dataset - > MakeIterator ( iterator_ctx . get ( ) , <nl> - " Iterator " , & iterator ) ) ; <nl> - <nl> - TF_EXPECT_OK ( VerifyShapesCompatible ( iterator - > output_shapes ( ) , <nl> - test_case . expected_output_shapes ) ) ; <nl> + TEST_F ( ParallelMapDatasetOpTest , IteratorOutputShapes ) { <nl> + auto dataset_params = ParallelMapDatasetParams1 ( ) ; <nl> + TF_ASSERT_OK ( Initialize ( dataset_params ) ) ; <nl> + TF_ASSERT_OK ( CheckIteratorOutputShapes ( { PartialTensorShape ( { } ) } ) ) ; <nl> } <nl> <nl> - TEST_F ( ParallelMapDatasetOpTest , IteratorOutputPrefix ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TestCase test_case = TestCase1 ( ) ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( test_case . func_lib , cpu_num ) ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > parallel_map_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetOpKernel ( <nl> - test_case . func , test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , test_case . use_inter_op_parallelism , <nl> - test_case . sloppy , test_case . preserve_cardinality , <nl> - & parallel_map_dataset_kernel ) ) ; <nl> - <nl> - DatasetBase * range_dataset ; <nl> - TF_ASSERT_OK ( CreateRangeDataset < int64 > ( <nl> - test_case . range_data_param . start , test_case . range_data_param . end , <nl> - test_case . range_data_param . step , " range " , & range_dataset ) ) ; <nl> - Tensor range_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - TF_ASSERT_OK ( <nl> - StoreDatasetInVariantTensor ( range_dataset , & range_dataset_tensor ) ) ; <nl> - Tensor num_parallel_calls = test_case . num_parallel_calls ; <nl> - gtl : : InlinedVector < TensorValue , 4 > parallel_map_dataset_inputs ( <nl> - { TensorValue ( & range_dataset_tensor ) , TensorValue ( & num_parallel_calls ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernelContext > parallel_map_dataset_context ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetContext ( <nl> - parallel_map_dataset_kernel . get ( ) , & parallel_map_dataset_inputs , <nl> - & parallel_map_dataset_context ) ) ; <nl> - DatasetBase * parallel_map_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( parallel_map_dataset_kernel . get ( ) , <nl> - parallel_map_dataset_context . get ( ) , <nl> - & parallel_map_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref_map_dataset ( parallel_map_dataset ) ; <nl> - <nl> - std : : unique_ptr < IteratorContext > iterator_ctx ; <nl> - TF_ASSERT_OK ( <nl> - CreateIteratorContext ( parallel_map_dataset_context . get ( ) , & iterator_ctx ) ) ; <nl> - std : : unique_ptr < IteratorBase > iterator ; <nl> - TF_ASSERT_OK ( parallel_map_dataset - > MakeIterator ( iterator_ctx . get ( ) , <nl> - " Iterator " , & iterator ) ) ; <nl> - <nl> - EXPECT_EQ ( iterator - > prefix ( ) , <nl> - name_utils : : IteratorPrefix ( ParallelMapDatasetOp : : kDatasetType , <nl> - " Iterator " ) ) ; <nl> + TEST_F ( ParallelMapDatasetOpTest , IteratorPrefix ) { <nl> + auto dataset_params = ParallelMapDatasetParams1 ( ) ; <nl> + TF_ASSERT_OK ( Initialize ( dataset_params ) ) ; <nl> + TF_ASSERT_OK ( CheckIteratorPrefix ( name_utils : : IteratorPrefix ( <nl> + ParallelMapDatasetOp : : kDatasetType , dataset_params . iterator_prefix ( ) ) ) ) ; <nl> } <nl> <nl> - TEST_P ( ParameterizedParallelMapDatasetOpTest , Roundtrip ) { <nl> - int thread_num = 3 , cpu_num = 2 ; <nl> - TestCase test_case = GetParam ( ) ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( test_case . func_lib , cpu_num ) ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > parallel_map_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetOpKernel ( <nl> - test_case . func , test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , test_case . use_inter_op_parallelism , <nl> - test_case . sloppy , test_case . preserve_cardinality , <nl> - & parallel_map_dataset_kernel ) ) ; <nl> - <nl> - DatasetBase * range_dataset ; <nl> - TF_ASSERT_OK ( CreateRangeDataset < int64 > ( <nl> - test_case . range_data_param . start , test_case . range_data_param . end , <nl> - test_case . range_data_param . step , " range " , & range_dataset ) ) ; <nl> - Tensor range_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - TF_ASSERT_OK ( <nl> - StoreDatasetInVariantTensor ( range_dataset , & range_dataset_tensor ) ) ; <nl> - Tensor num_parallel_calls = test_case . num_parallel_calls ; <nl> - gtl : : InlinedVector < TensorValue , 4 > parallel_map_dataset_inputs ( <nl> - { TensorValue ( & range_dataset_tensor ) , TensorValue ( & num_parallel_calls ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernelContext > parallel_map_dataset_context ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetContext ( <nl> - parallel_map_dataset_kernel . get ( ) , & parallel_map_dataset_inputs , <nl> - & parallel_map_dataset_context ) ) ; <nl> - DatasetBase * parallel_map_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( parallel_map_dataset_kernel . get ( ) , <nl> - parallel_map_dataset_context . get ( ) , <nl> - & parallel_map_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref_map_dataset ( parallel_map_dataset ) ; <nl> - <nl> - std : : unique_ptr < IteratorContext > iterator_ctx ; <nl> - TF_ASSERT_OK ( <nl> - CreateIteratorContext ( parallel_map_dataset_context . get ( ) , & iterator_ctx ) ) ; <nl> - std : : unique_ptr < IteratorBase > iterator ; <nl> - TF_ASSERT_OK ( parallel_map_dataset - > MakeIterator ( iterator_ctx . get ( ) , <nl> - " Iterator " , & iterator ) ) ; <nl> + std : : vector < IteratorSaveAndRestoreTestCase < ParallelMapDatasetParams > > <nl> + IteratorSaveAndRestoreTestCases ( ) { <nl> + return { { / * dataset_params = * / ParallelMapDatasetParams1 ( ) , <nl> + / * breakpoints = * / { 0 , 1 , 5 } , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( TensorShape { } , { { 0 } , { 6 } , { 12 } , { 18 } } ) } , <nl> + { / * dataset_params = * / ParallelMapDatasetParams2 ( ) , <nl> + / * breakpoints = * / { 0 , 1 , 5 } , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( TensorShape { } , { { 0 } , { 6 } , { 12 } , { 18 } } ) } , <nl> + { / * dataset_params = * / ParallelMapDatasetParams3 ( ) , <nl> + / * breakpoints = * / { 0 , 1 , 5 } , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( TensorShape { } , { { 0 } , { 12 } , { 24 } , { 36 } } ) } , <nl> + { / * dataset_params = * / ParallelMapDatasetParams4 ( ) , <nl> + / * breakpoints = * / { 0 , 1 , 5 } , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( TensorShape { } , { { 0 } , { 6 } , { 12 } , { 18 } } ) } , <nl> + { / * dataset_params = * / ParallelMapDatasetParams5 ( ) , <nl> + / * breakpoints = * / { 0 , 1 , 5 } , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( TensorShape { } , { { 0 } , { 12 } , { 24 } , { 36 } } ) } , <nl> + { / * dataset_params = * / <nl> + ParallelMapDatasetParams6 ( ) , <nl> + / * breakpoints = * / { 0 , 1 , 5 } , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( TensorShape { } , { { 0 } , { 12 } , { 24 } , { 36 } } ) } } ; <nl> + } <nl> + <nl> + class ParameterizedIteratorSaveAndRestoreTest <nl> + : public ParallelMapDatasetOpTest , <nl> + public : : testing : : WithParamInterface < <nl> + IteratorSaveAndRestoreTestCase < ParallelMapDatasetParams > > { } ; <nl> + <nl> + TEST_P ( ParameterizedIteratorSaveAndRestoreTest , Roundtrip ) { <nl> + auto test_case = GetParam ( ) ; <nl> + TF_ASSERT_OK ( Initialize ( test_case . dataset_params ) ) ; <nl> <nl> std : : unique_ptr < SerializationContext > serialization_ctx ; <nl> TF_ASSERT_OK ( CreateSerializationContext ( & serialization_ctx ) ) ; <nl> - <nl> bool end_of_sequence = false ; <nl> std : : vector < Tensor > out_tensors ; <nl> int cur_iteration = 0 ; <nl> - const std : : vector < int > & breakpoints = test_case . breakpoints ; <nl> - for ( int breakpoint : breakpoints ) { <nl> + for ( int breakpoint : test_case . breakpoints ) { <nl> VariantTensorData data ; <nl> VariantTensorDataWriter writer ( & data ) ; <nl> - TF_EXPECT_OK ( iterator - > Save ( serialization_ctx . get ( ) , & writer ) ) ; <nl> + TF_EXPECT_OK ( iterator_ - > Save ( serialization_ctx . get ( ) , & writer ) ) ; <nl> TF_EXPECT_OK ( writer . Flush ( ) ) ; <nl> VariantTensorDataReader reader ( & data ) ; <nl> - TF_EXPECT_OK ( RestoreIterator ( iterator_ctx . get ( ) , & reader , " Iterator " , <nl> - * parallel_map_dataset , & iterator ) ) ; <nl> + TF_EXPECT_OK ( RestoreIterator ( iterator_ctx_ . get ( ) , & reader , <nl> + test_case . dataset_params . iterator_prefix ( ) , <nl> + * dataset_ , & iterator_ ) ) ; <nl> <nl> while ( cur_iteration < = breakpoint ) { <nl> std : : vector < Tensor > next ; <nl> TF_EXPECT_OK ( <nl> - iterator - > GetNext ( iterator_ctx . get ( ) , & next , & end_of_sequence ) ) ; <nl> + iterator_ - > GetNext ( iterator_ctx_ . get ( ) , & next , & end_of_sequence ) ) ; <nl> out_tensors . insert ( out_tensors . end ( ) , next . begin ( ) , next . end ( ) ) ; <nl> cur_iteration + + ; <nl> } <nl> } <nl> <nl> - TF_EXPECT_OK ( ExpectEqual ( out_tensors , test_case . expected_outputs , <nl> - / * compare_order * / ! test_case . sloppy ) ) ; <nl> + TF_EXPECT_OK ( <nl> + ExpectEqual ( out_tensors , test_case . expected_outputs , <nl> + / * compare_order = * / ! test_case . dataset_params . sloppy ( ) ) ) ; <nl> } <nl> <nl> + INSTANTIATE_TEST_CASE_P ( ParallelMapDatasetOpTest , <nl> + ParameterizedIteratorSaveAndRestoreTest , <nl> + : : testing : : ValuesIn ( IteratorSaveAndRestoreTestCases ( ) ) ) ; <nl> + <nl> TEST_F ( ParallelMapDatasetOpTest , InvalidNumParallelCalls ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TestCase test_case = InvalidNumParallelCallsTestCase ( ) ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( test_case . func_lib , cpu_num ) ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > parallel_map_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetOpKernel ( <nl> - test_case . func , test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , test_case . use_inter_op_parallelism , <nl> - test_case . sloppy , test_case . preserve_cardinality , <nl> - & parallel_map_dataset_kernel ) ) ; <nl> - <nl> - DatasetBase * range_dataset ; <nl> - TF_ASSERT_OK ( CreateRangeDataset < int64 > ( <nl> - test_case . range_data_param . start , test_case . range_data_param . end , <nl> - test_case . range_data_param . step , " range " , & range_dataset ) ) ; <nl> - Tensor range_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - TF_ASSERT_OK ( <nl> - StoreDatasetInVariantTensor ( range_dataset , & range_dataset_tensor ) ) ; <nl> - Tensor num_parallel_calls = test_case . num_parallel_calls ; <nl> - gtl : : InlinedVector < TensorValue , 4 > parallel_map_dataset_inputs ( <nl> - { TensorValue ( & range_dataset_tensor ) , TensorValue ( & num_parallel_calls ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernelContext > parallel_map_dataset_context ; <nl> - TF_ASSERT_OK ( CreateParallelMapDatasetContext ( <nl> - parallel_map_dataset_kernel . get ( ) , & parallel_map_dataset_inputs , <nl> - & parallel_map_dataset_context ) ) ; <nl> - DatasetBase * parallel_map_dataset ; <nl> - EXPECT_EQ ( <nl> - CreateDataset ( parallel_map_dataset_kernel . get ( ) , <nl> - parallel_map_dataset_context . get ( ) , & parallel_map_dataset ) <nl> - . code ( ) , <nl> - tensorflow : : error : : INVALID_ARGUMENT ) ; <nl> + auto dataset_params = ParallelMapDatasetParamsWithInvalidNumParallelCalls ( ) ; <nl> + EXPECT_EQ ( Initialize ( dataset_params ) . code ( ) , <nl> + tensorflow : : error : : INVALID_ARGUMENT ) ; <nl> } <nl> <nl> - INSTANTIATE_TEST_SUITE_P ( ParallelMapDatasetOpTest , <nl> - ParameterizedParallelMapDatasetOpTest , <nl> - : : testing : : ValuesIn ( std : : vector < TestCase > ( <nl> - { TestCase1 ( ) , TestCase2 ( ) , TestCase3 ( ) , <nl> - TestCase4 ( ) , TestCase5 ( ) , TestCase6 ( ) } ) ) ) ; <nl> - <nl> } / / namespace <nl> } / / namespace data <nl> } / / namespace tensorflow <nl>
Refactor ParallelMapDatasetOpTest
tensorflow/tensorflow
05f474c6041e4e73bd578e274f828f559db376f0
2019-10-25T22:17:49Z
mmm a / README . md <nl> ppp b / README . md <nl> Git user attention <nl> 2 . After cloning the repo , please execute ` download - deps . py ` to download and install dependencies . <nl> <nl> $ cd cocos2d - x <nl> - $ python download - deps . py <nl> + cocos2d - x $ python download - deps . py <nl> <nl> 3 . After running ` download - deps . py ` . <nl> <nl> - $ cd cocos2d - x <nl> - $ git submodule update - - init <nl> + cocos2d - x $ git submodule update - - init <nl> <nl> How to start a new game <nl> mmmmmmmmmmmmmmmmmmmmm - - <nl>
Merge pull request from oscr / fix_readme
cocos2d/cocos2d-x
4d464999f451f7be76f3fc1cf1faed6090e50d12
2015-06-12T01:56:11Z
mmm a / admin / static / coffee / body . coffee <nl> ppp b / admin / static / coffee / body . coffee <nl> class MainContainer extends Backbone . View <nl> <nl> fetch_data : ( server_uuid ) = > <nl> query = r . expr <nl> - tables : r . db ( system_db ) . table ( ' table_config ' ) . merge ( { id : r . row ( " id " ) } ) . pluck ( ' db ' , ' name ' , ' id ' ) . coerceTo ( " ARRAY " ) <nl> - servers : r . db ( system_db ) . table ( ' server_config ' ) . merge ( { id : r . row ( " id " ) } ) . pluck ( ' name ' , ' id ' ) . coerceTo ( " ARRAY " ) <nl> + tables : r . db ( system_db ) . table ( ' table_config ' ) . merge ( ( row ) - > id : row ( " id " ) ) . pluck ( ' db ' , ' name ' , ' id ' ) . coerceTo ( " ARRAY " ) <nl> + servers : r . db ( system_db ) . table ( ' server_config ' ) . merge ( ( row ) - > id : row ( " id " ) ) . pluck ( ' name ' , ' id ' ) . coerceTo ( " ARRAY " ) <nl> issues : driver . queries . issues_with_ids ( ) <nl> num_issues : r . db ( system_db ) . table ( ' current_issues ' ) . count ( ) <nl> num_servers : r . db ( system_db ) . table ( ' server_config ' ) . count ( ) <nl>
Fixed incorrect r . row usage in webui
rethinkdb/rethinkdb
6aeb60a618c86b60bc068767beced7adb5e9fde4
2015-03-19T21:35:03Z
mmm a / src / mongo / SConscript <nl> ppp b / src / mongo / SConscript <nl> env . StaticLibrary ( " coredb " , [ <nl> " db / commands / hashcmd . cpp " , <nl> " db / commands / isself . cpp " , <nl> " db / commands / mr_common . cpp " , <nl> + " db / commands / rename_collection_common . cpp " , <nl> " db / commands / server_status . cpp " , <nl> " db / commands / parameters . cpp " , <nl> " db / pipeline / pipeline . cpp " , <nl> mmm a / src / mongo / db / auth / action_types . txt <nl> ppp b / src / mongo / db / auth / action_types . txt <nl> <nl> " captrunc " , <nl> " clean " , <nl> " clone " , <nl> + " cloneCollectionLocalSource " , <nl> + " cloneCollectionTarget " , <nl> " closeAllDatabases " , <nl> " collMod " , <nl> " collStats " , <nl> <nl> " reIndex " , <nl> " remove " , <nl> " removeShard " , <nl> - " renameCollection " , <nl> + " renameCollectionSameDB " , <nl> " repairDatabase " , <nl> " replSetElect " , <nl> " replSetFreeze " , <nl> mmm a / src / mongo / db / auth / authorization_manager . cpp <nl> ppp b / src / mongo / db / auth / authorization_manager . cpp <nl> namespace { <nl> / / Read role <nl> / / TODO : Remove OLD_READ once commands require the proper actions <nl> readRoleActions . addAction ( ActionType : : oldRead ) ; <nl> + readWriteRoleActions . addAction ( ActionType : : cloneCollectionLocalSource ) ; <nl> readRoleActions . addAction ( ActionType : : collStats ) ; <nl> readRoleActions . addAction ( ActionType : : dbHash ) ; <nl> readRoleActions . addAction ( ActionType : : dbStats ) ; <nl> namespace { <nl> readWriteRoleActions . addAllActionsFromSet ( readRoleActions ) ; <nl> / / TODO : Remove OLD_WRITE once commands require the proper actions <nl> readWriteRoleActions . addAction ( ActionType : : oldWrite ) ; <nl> + readWriteRoleActions . addAction ( ActionType : : cloneCollectionTarget ) ; <nl> readWriteRoleActions . addAction ( ActionType : : convertToCapped ) ; <nl> - readWriteRoleActions . addAction ( ActionType : : createCollection ) ; / / TODO : should db admin get this also ? <nl> + readWriteRoleActions . addAction ( ActionType : : createCollection ) ; / / db admin gets this also <nl> readWriteRoleActions . addAction ( ActionType : : dropCollection ) ; <nl> readWriteRoleActions . addAction ( ActionType : : dropIndexes ) ; <nl> readWriteRoleActions . addAction ( ActionType : : emptycapped ) ; <nl> readWriteRoleActions . addAction ( ActionType : : ensureIndex ) ; <nl> readWriteRoleActions . addAction ( ActionType : : insert ) ; <nl> readWriteRoleActions . addAction ( ActionType : : remove ) ; <nl> + readWriteRoleActions . addAction ( ActionType : : renameCollectionSameDB ) ; / / db admin gets this also <nl> readWriteRoleActions . addAction ( ActionType : : update ) ; <nl> <nl> / / User admin role <nl> namespace { <nl> dbAdminRoleActions . addAction ( ActionType : : collStats ) ; <nl> dbAdminRoleActions . addAction ( ActionType : : compact ) ; <nl> dbAdminRoleActions . addAction ( ActionType : : convertToCapped ) ; <nl> + dbAdminRoleActions . addAction ( ActionType : : createCollection ) ; / / read_write gets this also <nl> dbAdminRoleActions . addAction ( ActionType : : dbStats ) ; <nl> dbAdminRoleActions . addAction ( ActionType : : dropCollection ) ; <nl> dbAdminRoleActions . addAction ( ActionType : : profileEnable ) ; <nl> dbAdminRoleActions . addAction ( ActionType : : profileRead ) ; <nl> dbAdminRoleActions . addAction ( ActionType : : reIndex ) ; / / TODO : Should readWrite have this also ? This isn ' t consistent with ENSURE_INDEX and DROP_INDEXES <nl> - dbAdminRoleActions . addAction ( ActionType : : renameCollection ) ; <nl> + dbAdminRoleActions . addAction ( ActionType : : renameCollectionSameDB ) ; / / read_write gets this also <nl> dbAdminRoleActions . addAction ( ActionType : : validate ) ; <nl> <nl> / / Server admin role <nl> mmm a / src / mongo / db / cloner . cpp <nl> ppp b / src / mongo / db / cloner . cpp <nl> <nl> # include " mongo / bson / util / builder . h " <nl> # include " mongo / db / cloner . h " <nl> # include " mongo / db / commands . h " <nl> + # include " mongo / db / commands / rename_collection . h " <nl> # include " mongo / db / db . h " <nl> # include " mongo / db / instance . h " <nl> # include " mongo / db / jsobj . h " <nl> namespace mongo { <nl> virtual bool logTheOp ( ) { <nl> return true ; / / can ' t log steps when doing fast rename within a db , so always log the op rather than individual steps comprising it . <nl> } <nl> + virtual void addRequiredPrivileges ( const std : : string & dbname , <nl> + const BSONObj & cmdObj , <nl> + std : : vector < Privilege > * out ) { <nl> + rename_collection : : addPrivilegesRequiredForRenameCollection ( dbname , cmdObj , out ) ; <nl> + } <nl> virtual void help ( stringstream & help ) const { <nl> help < < " example : { renameCollection : foo . a , to : bar . b } " ; <nl> } <nl> new file mode 100644 <nl> index 000000000000 . . ac8ba4c7fa43 <nl> mmm / dev / null <nl> ppp b / src / mongo / db / commands / rename_collection . h <nl> <nl> + / * * <nl> + * Copyright ( C ) 2012 10gen Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU Affero General Public License , version 3 , <nl> + * as published by the Free Software Foundation . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU Affero General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU Affero General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * / <nl> + <nl> + # pragma once <nl> + <nl> + # include < string > <nl> + # include < vector > <nl> + <nl> + # include " mongo / db / auth / privilege . h " <nl> + # include " mongo / db / jsobj . h " <nl> + <nl> + namespace mongo { <nl> + namespace rename_collection { <nl> + <nl> + void addPrivilegesRequiredForRenameCollection ( const std : : string & dbname , <nl> + const BSONObj & cmdObj , <nl> + std : : vector < Privilege > * out ) ; <nl> + <nl> + } / / namespace rename_collection <nl> + } / / namespace mongo <nl> + <nl> + <nl> new file mode 100644 <nl> index 000000000000 . . 9b1541f14796 <nl> mmm / dev / null <nl> ppp b / src / mongo / db / commands / rename_collection_common . cpp <nl> <nl> + / * * <nl> + * Copyright ( C ) 2012 10gen Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU Affero General Public License , version 3 , <nl> + * as published by the Free Software Foundation . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU Affero General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU Affero General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * / <nl> + <nl> + # include " mongo / db / commands / rename_collection . h " <nl> + <nl> + # include < string > <nl> + # include < vector > <nl> + <nl> + # include " mongo / db / auth / action_set . h " <nl> + # include " mongo / db / auth / action_type . h " <nl> + # include " mongo / db / auth / privilege . h " <nl> + # include " mongo / db / jsobj . h " <nl> + # include " mongo / db / namespacestring . h " <nl> + <nl> + namespace mongo { <nl> + namespace rename_collection { <nl> + <nl> + void addPrivilegesRequiredForRenameCollection ( const std : : string & dbname , <nl> + const BSONObj & cmdObj , <nl> + std : : vector < Privilege > * out ) { <nl> + NamespaceString sourceNS = NamespaceString ( cmdObj . getStringField ( " renameCollection " ) ) ; <nl> + NamespaceString targetNS = NamespaceString ( cmdObj . getStringField ( " to " ) ) ; <nl> + if ( sourceNS . db = = targetNS . db ) { <nl> + ActionSet actions ; <nl> + actions . addAction ( ActionType : : renameCollectionSameDB ) ; <nl> + out - > push_back ( Privilege ( dbname , actions ) ) ; <nl> + return ; <nl> + } <nl> + <nl> + ActionSet sourceActions ; <nl> + sourceActions . addAction ( ActionType : : cloneCollectionLocalSource ) ; <nl> + sourceActions . addAction ( ActionType : : dropCollection ) ; <nl> + out - > push_back ( Privilege ( sourceNS . ns ( ) , sourceActions ) ) ; <nl> + <nl> + ActionSet targetActions ; <nl> + targetActions . addAction ( ActionType : : createCollection ) ; <nl> + targetActions . addAction ( ActionType : : cloneCollectionTarget ) ; <nl> + targetActions . addAction ( ActionType : : ensureIndex ) ; <nl> + out - > push_back ( Privilege ( targetNS . ns ( ) , targetActions ) ) ; <nl> + } <nl> + <nl> + } / / namespace rename_collection <nl> + } / / namespace mongo <nl> mmm a / src / mongo / s / commands_public . cpp <nl> ppp b / src / mongo / s / commands_public . cpp <nl> <nl> # include " mongo / db / auth / privilege . h " <nl> # include " mongo / db / commands / find_and_modify . h " <nl> # include " mongo / db / commands / mr . h " <nl> + # include " mongo / db / commands / rename_collection . h " <nl> # include " . . / util / net / message . h " <nl> # include " . . / db / dbmessage . h " <nl> # include " . . / client / connpool . h " <nl> namespace mongo { <nl> virtual void addRequiredPrivileges ( const std : : string & dbname , <nl> const BSONObj & cmdObj , <nl> std : : vector < Privilege > * out ) { <nl> - ActionSet actions ; <nl> - actions . addAction ( ActionType : : renameCollection ) ; <nl> - out - > push_back ( Privilege ( dbname , actions ) ) ; <nl> + rename_collection : : addPrivilegesRequiredForRenameCollection ( dbname , cmdObj , out ) ; <nl> } <nl> bool run ( const string & dbName , BSONObj & cmdObj , int , string & errmsg , BSONObjBuilder & result , bool ) { <nl> string fullnsFrom = cmdObj . firstElement ( ) . valuestrsafe ( ) ; <nl>
SERVER - 7122 Assign required privileges to renameCollection command
mongodb/mongo
58c18a5eb7bd05e50adac43a39f24f625f41cd43
2012-12-19T23:09:29Z
mmm a / docs / development / build - instructions - linux . md <nl> ppp b / docs / development / build - instructions - linux . md <nl> <nl> # # Prerequisites <nl> <nl> * [ Node . js ] ( http : / / nodejs . org ) <nl> - * clang , development headers of GTK + and libnotify <nl> + * Clang 3 . 4 or later <nl> + * Development headers of GTK + and libnotify <nl> <nl> On Ubuntu you could install the libraries via : <nl> <nl>
Mention clang 3 . 4 is required , refs
electron/electron
5b167e3684f50c362e66fd58563d826cd9bc7357
2014-10-24T01:55:31Z
mmm a / tensorflow / python / ops / losses / util . py <nl> ppp b / tensorflow / python / ops / losses / util . py <nl> def get_losses ( scope = None , loss_collection = ops . GraphKeys . LOSSES ) : <nl> <nl> <nl> def get_regularization_losses ( scope = None ) : <nl> - " " " Gets the regularization losses . <nl> + " " " Gets the list of regularization losses . <nl> <nl> Args : <nl> scope : An optional scope for filtering the losses to return . <nl> def get_regularization_loss ( scope = None , name = " total_regularization_loss " ) : <nl> def get_total_loss ( add_regularization_losses = True , name = " total_loss " ) : <nl> " " " Returns a tensor whose value represents the total loss . <nl> <nl> - Notice that the function adds the given losses to the regularization losses . <nl> + In particular , this adds any losses you have added with ` tf . add_loss ( ) ` to <nl> + any regularization losses that have been added by regularization parameters <nl> + on layers constructors e . g . ` tf . layers ` . Be very sure to use this if you <nl> + are constructing a loss_op manually . Otherwise regularization arguments <nl> + on ` tf . layers ` methods will not function . <nl> <nl> Args : <nl> add_regularization_losses : A boolean indicating whether or not to use the <nl>
Fix losses documentation .
tensorflow/tensorflow
c590b00b2cbd757a94594da55de89d1b66a8b064
2017-05-02T21:30:23Z
mmm a / arangod / Aql / Ast . cpp <nl> ppp b / arangod / Aql / Ast . cpp <nl> AstNode * Ast : : createNodeReference ( Variable const * variable ) { <nl> return node ; <nl> } <nl> <nl> + / / / @ brief create an AST subquery reference node <nl> + AstNode * Ast : : createNodeSubqueryReference ( std : : string const & variableName ) { <nl> + AstNode * node = createNode ( NODE_TYPE_REFERENCE ) ; <nl> + node - > setFlag ( AstNodeFlagType : : FLAG_SUBQUERY_REFERENCE ) ; <nl> + <nl> + auto variable = _scopes . getVariable ( variableName ) ; <nl> + <nl> + if ( variable = = nullptr ) { <nl> + THROW_ARANGO_EXCEPTION_MESSAGE ( TRI_ERROR_INTERNAL , <nl> + " variable not found in reference AstNode " ) ; <nl> + } <nl> + <nl> + node - > setData ( variable ) ; <nl> + <nl> + return node ; <nl> + } <nl> + <nl> / / / @ brief create an AST variable access <nl> AstNode * Ast : : createNodeAccess ( Variable const * variable , <nl> std : : vector < basics : : AttributeName > const & field ) { <nl> mmm a / arangod / Aql / Ast . h <nl> ppp b / arangod / Aql / Ast . h <nl> class Ast { <nl> / / / @ brief create an AST reference node <nl> AstNode * createNodeReference ( Variable const * variable ) ; <nl> <nl> + / / / @ brief create an AST subquery reference node <nl> + AstNode * createNodeSubqueryReference ( std : : string const & variableName ) ; <nl> + <nl> / / / @ brief create an AST parameter node for a value literal <nl> AstNode * createNodeParameter ( char const * name , size_t length ) ; <nl> <nl> mmm a / arangod / Aql / AstNode . h <nl> ppp b / arangod / Aql / AstNode . h <nl> enum AstNodeFlagType : AstNodeFlagsType { <nl> FLAG_BIND_PARAMETER = 0x0020000 , / / node was created from a bind parameter <nl> FLAG_FINALIZED = 0x0040000 , / / node has been finalized and should not be modified ; only <nl> / / set and checked in maintainer mode <nl> + FLAG_SUBQUERY_REFERENCE = 0x0080000 , / / node references a subquery <nl> } ; <nl> <nl> / / / @ brief enumeration of AST node value types <nl> mmm a / arangod / Aql / grammar . cpp <nl> ppp b / arangod / Aql / grammar . cpp <nl> void Aqlerror ( YYLTYPE * locp , <nl> <nl> / / / @ brief check if any of the variables used in the INTO expression were <nl> / / / introduced by the COLLECT itself , in which case it would fail <nl> - static void CheckIntoVariables ( Parser * parser , AstNode const * expression , int line , int column , <nl> + static void CheckIntoVariables ( Parser * parser , AstNode const * expression , <nl> + int line , int column , <nl> : : arangodb : : containers : : HashSet < Variable const * > const & variablesIntroduced ) { <nl> if ( expression = = nullptr ) { <nl> return ; <nl> static const yytype_uint16 yyrline [ ] = <nl> 0 , 392 , 392 , 395 , 408 , 412 , 416 , 423 , 425 , 425 , <nl> 437 , 442 , 447 , 449 , 452 , 455 , 458 , 461 , 467 , 469 , <nl> 474 , 476 , 478 , 480 , 482 , 484 , 486 , 488 , 490 , 492 , <nl> - 494 , 499 , 506 , 513 , 519 , 526 , 553 , 577 , 590 , 613 , <nl> - 636 , 636 , 694 , 694 , 717 , 735 , 757 , 765 , 770 , 772 , <nl> - 777 , 784 , 794 , 794 , 808 , 817 , 829 , 853 , 909 , 928 , <nl> - 955 , 957 , 962 , 969 , 972 , 975 , 984 , 998 , 1015 , 1015 , <nl> - 1029 , 1029 , 1039 , 1039 , 1050 , 1053 , 1059 , 1065 , 1068 , 1071 , <nl> - 1074 , 1080 , 1085 , 1092 , 1100 , 1103 , 1109 , 1119 , 1129 , 1137 , <nl> - 1148 , 1153 , 1161 , 1172 , 1177 , 1180 , 1186 , 1190 , 1186 , 1242 , <nl> - 1245 , 1248 , 1254 , 1254 , 1264 , 1270 , 1273 , 1276 , 1279 , 1282 , <nl> - 1285 , 1291 , 1294 , 1310 , 1310 , 1319 , 1319 , 1329 , 1332 , 1335 , <nl> - 1341 , 1344 , 1347 , 1350 , 1353 , 1356 , 1359 , 1362 , 1365 , 1368 , <nl> - 1371 , 1374 , 1377 , 1380 , 1383 , 1386 , 1393 , 1400 , 1406 , 1412 , <nl> - 1418 , 1425 , 1428 , 1431 , 1434 , 1437 , 1440 , 1443 , 1446 , 1450 , <nl> - 1454 , 1461 , 1464 , 1470 , 1472 , 1477 , 1480 , 1480 , 1496 , 1499 , <nl> - 1505 , 1508 , 1514 , 1514 , 1523 , 1525 , 1530 , 1533 , 1539 , 1542 , <nl> - 1568 , 1588 , 1591 , 1605 , 1605 , 1614 , 1616 , 1621 , 1623 , 1628 , <nl> - 1642 , 1646 , 1655 , 1662 , 1665 , 1671 , 1674 , 1680 , 1683 , 1686 , <nl> - 1692 , 1695 , 1701 , 1704 , 1707 , 1711 , 1717 , 1721 , 1728 , 1734 , <nl> - 1734 , 1743 , 1747 , 1756 , 1759 , 1762 , 1768 , 1771 , 1777 , 1809 , <nl> - 1812 , 1815 , 1822 , 1832 , 1832 , 1845 , 1860 , 1874 , 1888 , 1888 , <nl> - 1931 , 1934 , 1940 , 1947 , 1957 , 1960 , 1963 , 1966 , 1969 , 1975 , <nl> - 1979 , 1983 , 1993 , 2000 , 2006 , 2009 , 2014 <nl> + 494 , 499 , 506 , 513 , 519 , 526 , 553 , 576 , 589 , 612 , <nl> + 635 , 635 , 693 , 693 , 725 , 743 , 765 , 773 , 778 , 780 , <nl> + 785 , 792 , 802 , 802 , 816 , 825 , 837 , 861 , 917 , 936 , <nl> + 963 , 965 , 970 , 977 , 980 , 983 , 992 , 1006 , 1023 , 1023 , <nl> + 1037 , 1037 , 1047 , 1047 , 1058 , 1061 , 1067 , 1073 , 1076 , 1079 , <nl> + 1082 , 1088 , 1093 , 1100 , 1108 , 1111 , 1117 , 1127 , 1137 , 1145 , <nl> + 1156 , 1161 , 1169 , 1180 , 1185 , 1188 , 1194 , 1198 , 1194 , 1250 , <nl> + 1253 , 1256 , 1262 , 1262 , 1272 , 1278 , 1281 , 1284 , 1287 , 1290 , <nl> + 1293 , 1299 , 1302 , 1318 , 1318 , 1327 , 1327 , 1337 , 1340 , 1343 , <nl> + 1349 , 1352 , 1355 , 1358 , 1361 , 1364 , 1367 , 1370 , 1373 , 1376 , <nl> + 1379 , 1382 , 1385 , 1388 , 1391 , 1394 , 1401 , 1408 , 1414 , 1420 , <nl> + 1426 , 1433 , 1436 , 1439 , 1442 , 1445 , 1448 , 1451 , 1454 , 1458 , <nl> + 1462 , 1469 , 1472 , 1478 , 1480 , 1485 , 1488 , 1488 , 1504 , 1507 , <nl> + 1513 , 1516 , 1522 , 1522 , 1531 , 1533 , 1538 , 1541 , 1547 , 1550 , <nl> + 1576 , 1596 , 1599 , 1613 , 1613 , 1622 , 1624 , 1629 , 1631 , 1636 , <nl> + 1650 , 1654 , 1663 , 1670 , 1673 , 1679 , 1682 , 1688 , 1691 , 1694 , <nl> + 1700 , 1703 , 1709 , 1712 , 1715 , 1719 , 1725 , 1729 , 1736 , 1742 , <nl> + 1742 , 1751 , 1755 , 1764 , 1767 , 1770 , 1776 , 1779 , 1785 , 1817 , <nl> + 1820 , 1823 , 1830 , 1840 , 1840 , 1853 , 1868 , 1882 , 1896 , 1896 , <nl> + 1939 , 1942 , 1948 , 1955 , 1965 , 1968 , 1971 , 1974 , 1977 , 1983 , <nl> + 1987 , 1991 , 2001 , 2008 , 2014 , 2017 , 2022 <nl> } ; <nl> # endif <nl> <nl> YYLTYPE yylloc = yyloc_default ; <nl> node - > addMember ( ( yyvsp [ - 2 ] . node ) ) ; <nl> / / Options <nl> node - > addMember ( ( yyvsp [ 0 ] . node ) ) ; <nl> - <nl> } <nl> - # line 2487 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2486 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 37 : <nl> - # line 577 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 576 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto infoNode = parser - > ast ( ) - > createNodeArray ( ) ; <nl> / / Direction <nl> YYLTYPE yylloc = yyloc_default ; <nl> infoNode - > addMember ( ( yyvsp [ 0 ] . node ) ) ; <nl> ( yyval . node ) = infoNode ; <nl> } <nl> - # line 2502 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2501 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 38 : <nl> - # line 590 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 589 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ! TRI_CaseEqualString ( ( yyvsp [ - 3 ] . strval ) . value , " TO " ) ) { <nl> parser - > registerParseError ( TRI_ERROR_QUERY_PARSE , " unexpected qualifier ' % s ' , expecting ' TO ' " , ( yyvsp [ - 3 ] . strval ) . value , yylloc . first_line , yylloc . first_column ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> infoNode - > addMember ( opts ) ; <nl> ( yyval . node ) = infoNode ; <nl> } <nl> - # line 2527 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2526 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 39 : <nl> - # line 613 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 612 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ! TRI_CaseEqualString ( ( yyvsp [ - 3 ] . strval ) . value , " TO " ) ) { <nl> parser - > registerParseError ( TRI_ERROR_QUERY_PARSE , " unexpected qualifier ' % s ' , expecting ' TO ' " , ( yyvsp [ - 3 ] . strval ) . value , yylloc . first_line , yylloc . first_column ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> infoNode - > addMember ( opts ) ; <nl> ( yyval . node ) = infoNode ; <nl> } <nl> - # line 2552 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2551 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 40 : <nl> - # line 636 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 635 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / first open a new scope ( after expression is evaluated ) <nl> parser - > ast ( ) - > scopes ( ) - > start ( arangodb : : aql : : AQL_SCOPE_FOR ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> <nl> parser - > pushStack ( variableNode ) ; <nl> } <nl> - # line 2576 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2575 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 41 : <nl> - # line 654 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 653 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / now we can handle the optional SEARCH condition and OPTIONS . <nl> AstNode * variableNode = static_cast < AstNode * > ( parser - > popStack ( ) ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 2621 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2620 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 42 : <nl> - # line 694 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 693 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / first open a new scope ( after expression is evaluated ) <nl> parser - > ast ( ) - > scopes ( ) - > start ( arangodb : : aql : : AQL_SCOPE_FOR ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> parser - > pushStack ( graphInfoNode ) ; <nl> / / This stack push / pop magic is necessary to allow v , e , and p in the prune condition <nl> } <nl> - # line 2644 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2643 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 43 : <nl> - # line 711 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 710 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto graphInfoNode = static_cast < AstNode * > ( parser - > popStack ( ) ) ; <nl> auto variablesNode = static_cast < AstNode * > ( parser - > popStack ( ) ) ; <nl> + <nl> + auto prune = graphInfoNode - > getMember ( 3 ) ; <nl> + if ( prune ! = nullptr ) { <nl> + Ast : : traverseReadOnly ( prune , [ & ] ( AstNode const * node ) { <nl> + if ( node - > type = = NODE_TYPE_REFERENCE & & node - > hasFlag ( AstNodeFlagType : : FLAG_SUBQUERY_REFERENCE ) ) { <nl> + parser - > registerParseError ( TRI_ERROR_QUERY_PARSE , " prune condition must not use a subquery " , yylloc . first_line , yylloc . first_column ) ; <nl> + } <nl> + } ) ; <nl> + } <nl> auto node = parser - > ast ( ) - > createNodeTraversal ( variablesNode , graphInfoNode ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 2655 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2663 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 44 : <nl> - # line 717 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 725 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / first open a new scope ( after expression is evaluated ) <nl> parser - > ast ( ) - > scopes ( ) - > start ( arangodb : : aql : : AQL_SCOPE_FOR ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> <nl> } <nl> - # line 2678 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2686 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 45 : <nl> - # line 735 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 743 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / first open a new scope ( after expression is evaluated ) <nl> parser - > ast ( ) - > scopes ( ) - > start ( arangodb : : aql : : AQL_SCOPE_FOR ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> <nl> } <nl> - # line 2701 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2709 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 46 : <nl> - # line 757 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 765 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / operand is a reference . can use it directly <nl> auto node = parser - > ast ( ) - > createNodeFilter ( ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 2711 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2719 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 47 : <nl> - # line 765 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 773 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> } <nl> - # line 2718 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2726 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 48 : <nl> - # line 770 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 778 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> } <nl> - # line 2725 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2733 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 49 : <nl> - # line 772 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 780 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> } <nl> - # line 2732 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2740 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 50 : <nl> - # line 777 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 785 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = parser - > ast ( ) - > createNodeLet ( ( yyvsp [ - 2 ] . strval ) . value , ( yyvsp [ - 2 ] . strval ) . length , ( yyvsp [ 0 ] . node ) , true ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 2741 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2749 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 51 : <nl> - # line 784 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 792 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ! TRI_CaseEqualString ( ( yyvsp [ - 2 ] . strval ) . value , " COUNT " ) ) { <nl> parser - > registerParseError ( TRI_ERROR_QUERY_PARSE , " unexpected qualifier ' % s ' , expecting ' COUNT ' " , ( yyvsp [ - 2 ] . strval ) . value , yylloc . first_line , yylloc . first_column ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> <nl> ( yyval . strval ) = ( yyvsp [ 0 ] . strval ) ; <nl> } <nl> - # line 2753 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2761 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 52 : <nl> - # line 794 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 802 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = parser - > ast ( ) - > createNodeArray ( ) ; <nl> parser - > pushStack ( node ) ; <nl> } <nl> - # line 2762 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2770 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 53 : <nl> - # line 797 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 805 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto list = static_cast < AstNode * > ( parser - > popStack ( ) ) ; <nl> <nl> YYLTYPE yylloc = yyloc_default ; <nl> } <nl> ( yyval . node ) = list ; <nl> } <nl> - # line 2775 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2783 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 54 : <nl> - # line 808 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 816 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / * COLLECT WITH COUNT INTO var OPTIONS . . . * / <nl> auto scopes = parser - > ast ( ) - > scopes ( ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> auto node = parser - > ast ( ) - > createNodeCollectCount ( parser - > ast ( ) - > createNodeArray ( ) , ( yyvsp [ - 1 ] . strval ) . value , ( yyvsp [ - 1 ] . strval ) . length , ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 2789 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2797 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 55 : <nl> - # line 817 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 825 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / * COLLECT var = expr WITH COUNT INTO var OPTIONS . . . * / <nl> auto scopes = parser - > ast ( ) - > scopes ( ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> auto node = parser - > ast ( ) - > createNodeCollectCount ( ( yyvsp [ - 2 ] . node ) , ( yyvsp [ - 1 ] . strval ) . value , ( yyvsp [ - 1 ] . strval ) . length , ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 2806 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2814 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 56 : <nl> - # line 829 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 837 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / * AGGREGATE var = expr OPTIONS . . . * / <nl> : : arangodb : : containers : : HashSet < Variable const * > variablesIntroduced ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> auto node = parser - > ast ( ) - > createNodeCollect ( parser - > ast ( ) - > createNodeArray ( ) , ( yyvsp [ - 2 ] . node ) , into , intoExpression , nullptr , ( yyvsp [ - 1 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 2835 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2843 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 57 : <nl> - # line 853 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 861 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / * COLLECT var = expr AGGREGATE var = expr OPTIONS . . . * / <nl> : : arangodb : : containers : : HashSet < Variable const * > variablesIntroduced ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> auto node = parser - > ast ( ) - > createNodeCollect ( ( yyvsp [ - 3 ] . node ) , ( yyvsp [ - 2 ] . node ) , into , intoExpression , nullptr , ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 2896 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2904 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 58 : <nl> - # line 909 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 917 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / * COLLECT var = expr INTO var OPTIONS . . . * / <nl> : : arangodb : : containers : : HashSet < Variable const * > variablesIntroduced ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> auto node = parser - > ast ( ) - > createNodeCollect ( ( yyvsp [ - 2 ] . node ) , parser - > ast ( ) - > createNodeArray ( ) , into , intoExpression , nullptr , ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 2920 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2928 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 59 : <nl> - # line 928 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 936 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / * COLLECT var = expr INTO var KEEP . . . OPTIONS . . . * / <nl> : : arangodb : : containers : : HashSet < Variable const * > variablesIntroduced ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> auto node = parser - > ast ( ) - > createNodeCollect ( ( yyvsp [ - 3 ] . node ) , parser - > ast ( ) - > createNodeArray ( ) , into , intoExpression , ( yyvsp [ - 1 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 2949 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2957 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 60 : <nl> - # line 955 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 963 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> } <nl> - # line 2956 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2964 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 61 : <nl> - # line 957 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 965 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> } <nl> - # line 2963 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2971 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 62 : <nl> - # line 962 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 970 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = parser - > ast ( ) - > createNodeAssign ( ( yyvsp [ - 2 ] . strval ) . value , ( yyvsp [ - 2 ] . strval ) . length , ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > pushArrayElement ( node ) ; <nl> } <nl> - # line 2972 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2980 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 63 : <nl> - # line 969 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 977 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = nullptr ; <nl> } <nl> - # line 2980 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2988 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 64 : <nl> - # line 972 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 980 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeValueString ( ( yyvsp [ 0 ] . strval ) . value , ( yyvsp [ 0 ] . strval ) . length ) ; <nl> } <nl> - # line 2988 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 2996 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 65 : <nl> - # line 975 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 983 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = parser - > ast ( ) - > createNodeArray ( ) ; <nl> node - > addMember ( parser - > ast ( ) - > createNodeValueString ( ( yyvsp [ - 2 ] . strval ) . value , ( yyvsp [ - 2 ] . strval ) . length ) ) ; <nl> node - > addMember ( ( yyvsp [ 0 ] . node ) ) ; <nl> ( yyval . node ) = node ; <nl> } <nl> - # line 2999 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3007 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 66 : <nl> - # line 984 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 992 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ! parser - > ast ( ) - > scopes ( ) - > existsVariable ( ( yyvsp [ 0 ] . strval ) . value , ( yyvsp [ 0 ] . strval ) . length ) ) { <nl> parser - > registerParseError ( TRI_ERROR_QUERY_PARSE , " use of unknown variable ' % s ' for KEEP " , ( yyvsp [ 0 ] . strval ) . value , yylloc . first_line , yylloc . first_column ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> node - > setFlag ( FLAG_KEEP_VARIABLENAME ) ; <nl> parser - > pushArrayElement ( node ) ; <nl> } <nl> - # line 3018 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3026 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 67 : <nl> - # line 998 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1006 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ! parser - > ast ( ) - > scopes ( ) - > existsVariable ( ( yyvsp [ 0 ] . strval ) . value , ( yyvsp [ 0 ] . strval ) . length ) ) { <nl> parser - > registerParseError ( TRI_ERROR_QUERY_PARSE , " use of unknown variable ' % s ' for KEEP " , ( yyvsp [ 0 ] . strval ) . value , yylloc . first_line , yylloc . first_column ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> node - > setFlag ( FLAG_KEEP_VARIABLENAME ) ; <nl> parser - > pushArrayElement ( node ) ; <nl> } <nl> - # line 3037 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3045 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 68 : <nl> - # line 1015 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1023 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ! TRI_CaseEqualString ( ( yyvsp [ 0 ] . strval ) . value , " KEEP " ) ) { <nl> parser - > registerParseError ( TRI_ERROR_QUERY_PARSE , " unexpected qualifier ' % s ' , expecting ' KEEP ' " , ( yyvsp [ 0 ] . strval ) . value , yylloc . first_line , yylloc . first_column ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> auto node = parser - > ast ( ) - > createNodeArray ( ) ; <nl> parser - > pushStack ( node ) ; <nl> } <nl> - # line 3050 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3058 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 69 : <nl> - # line 1022 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1030 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto list = static_cast < AstNode * > ( parser - > popStack ( ) ) ; <nl> ( yyval . node ) = list ; <nl> } <nl> - # line 3059 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3067 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 70 : <nl> - # line 1029 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1037 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = parser - > ast ( ) - > createNodeArray ( ) ; <nl> parser - > pushStack ( node ) ; <nl> } <nl> - # line 3068 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3076 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 71 : <nl> - # line 1032 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1040 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto list = static_cast < AstNode * > ( parser - > popStack ( ) ) ; <nl> ( yyval . node ) = list ; <nl> } <nl> - # line 3077 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3085 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 72 : <nl> - # line 1039 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1047 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = parser - > ast ( ) - > createNodeArray ( ) ; <nl> parser - > pushStack ( node ) ; <nl> } <nl> - # line 3086 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3094 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 73 : <nl> - # line 1042 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1050 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto list = static_cast < AstNode const * > ( parser - > popStack ( ) ) ; <nl> auto node = parser - > ast ( ) - > createNodeSort ( list ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 3096 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3104 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 74 : <nl> - # line 1050 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1058 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> parser - > pushArrayElement ( ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3104 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3112 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 75 : <nl> - # line 1053 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1061 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> parser - > pushArrayElement ( ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3112 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3120 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 76 : <nl> - # line 1059 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1067 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeSortElement ( ( yyvsp [ - 1 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3120 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3128 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 77 : <nl> - # line 1065 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1073 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeValueBool ( true ) ; <nl> } <nl> - # line 3128 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3136 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 78 : <nl> - # line 1068 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1076 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeValueBool ( true ) ; <nl> } <nl> - # line 3136 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3144 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 79 : <nl> - # line 1071 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1079 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeValueBool ( false ) ; <nl> } <nl> - # line 3144 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3152 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 80 : <nl> - # line 1074 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1082 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 3152 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3160 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 81 : <nl> - # line 1080 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1088 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto offset = parser - > ast ( ) - > createNodeValueInt ( 0 ) ; <nl> auto node = parser - > ast ( ) - > createNodeLimit ( offset , ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 3162 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3170 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 82 : <nl> - # line 1085 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1093 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = parser - > ast ( ) - > createNodeLimit ( ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 3171 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3179 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 83 : <nl> - # line 1092 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1100 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = parser - > ast ( ) - > createNodeReturn ( ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> parser - > ast ( ) - > scopes ( ) - > endNested ( ) ; <nl> } <nl> - # line 3181 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3189 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 84 : <nl> - # line 1100 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1108 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 3189 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3197 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 85 : <nl> - # line 1103 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1111 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 3197 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3205 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 86 : <nl> - # line 1109 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1117 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ! parser - > configureWriteQuery ( ( yyvsp [ - 1 ] . node ) , ( yyvsp [ 0 ] . node ) ) ) { <nl> YYABORT ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> auto node = parser - > ast ( ) - > createNodeRemove ( ( yyvsp [ - 2 ] . node ) , ( yyvsp [ - 1 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 3209 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3217 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 87 : <nl> - # line 1119 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1127 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ! parser - > configureWriteQuery ( ( yyvsp [ - 1 ] . node ) , ( yyvsp [ 0 ] . node ) ) ) { <nl> YYABORT ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> auto node = parser - > ast ( ) - > createNodeInsert ( ( yyvsp [ - 2 ] . node ) , ( yyvsp [ - 1 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 3221 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3229 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 88 : <nl> - # line 1129 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1137 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ! parser - > configureWriteQuery ( ( yyvsp [ - 1 ] . node ) , ( yyvsp [ 0 ] . node ) ) ) { <nl> YYABORT ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> AstNode * node = parser - > ast ( ) - > createNodeUpdate ( nullptr , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ - 1 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 3234 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3242 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 89 : <nl> - # line 1137 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1145 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ! parser - > configureWriteQuery ( ( yyvsp [ - 1 ] . node ) , ( yyvsp [ 0 ] . node ) ) ) { <nl> YYABORT ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> AstNode * node = parser - > ast ( ) - > createNodeUpdate ( ( yyvsp [ - 4 ] . node ) , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ - 1 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 3247 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3255 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 90 : <nl> - # line 1148 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1156 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> } <nl> - # line 3254 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3262 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 91 : <nl> - # line 1153 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1161 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ! parser - > configureWriteQuery ( ( yyvsp [ - 1 ] . node ) , ( yyvsp [ 0 ] . node ) ) ) { <nl> YYABORT ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> AstNode * node = parser - > ast ( ) - > createNodeReplace ( nullptr , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ - 1 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 3267 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3275 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 92 : <nl> - # line 1161 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1169 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ! parser - > configureWriteQuery ( ( yyvsp [ - 1 ] . node ) , ( yyvsp [ 0 ] . node ) ) ) { <nl> YYABORT ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> AstNode * node = parser - > ast ( ) - > createNodeReplace ( ( yyvsp [ - 4 ] . node ) , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ - 1 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 3280 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3288 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 93 : <nl> - # line 1172 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1180 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> } <nl> - # line 3287 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3295 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 94 : <nl> - # line 1177 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1185 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . intval ) = static_cast < int64_t > ( NODE_TYPE_UPDATE ) ; <nl> } <nl> - # line 3295 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3303 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 95 : <nl> - # line 1180 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1188 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . intval ) = static_cast < int64_t > ( NODE_TYPE_REPLACE ) ; <nl> } <nl> - # line 3303 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3311 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 96 : <nl> - # line 1186 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1194 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / reserve a variable named " $ OLD " , we might need it in the update expression <nl> / / and in a later return thing <nl> parser - > pushStack ( parser - > ast ( ) - > createNodeVariable ( TRI_CHAR_LENGTH_PAIR ( Variable : : NAME_OLD ) , true ) ) ; <nl> } <nl> - # line 3313 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3321 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 97 : <nl> - # line 1190 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1198 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> AstNode * variableNode = static_cast < AstNode * > ( parser - > popStack ( ) ) ; <nl> <nl> YYLTYPE yylloc = yyloc_default ; <nl> <nl> parser - > pushStack ( forNode ) ; <nl> } <nl> - # line 3357 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3365 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 98 : <nl> - # line 1228 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1236 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> AstNode * forNode = static_cast < AstNode * > ( parser - > popStack ( ) ) ; <nl> forNode - > changeMember ( 1 , ( yyvsp [ - 1 ] . node ) ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> auto node = parser - > ast ( ) - > createNodeUpsert ( static_cast < AstNodeType > ( ( yyvsp [ - 3 ] . intval ) ) , parser - > ast ( ) - > createNodeReference ( TRI_CHAR_LENGTH_PAIR ( Variable : : NAME_OLD ) ) , ( yyvsp [ - 4 ] . node ) , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ - 1 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> - # line 3373 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3381 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 99 : <nl> - # line 1242 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1250 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeQuantifier ( Quantifier : : ALL ) ; <nl> } <nl> - # line 3381 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3389 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 100 : <nl> - # line 1245 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1253 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeQuantifier ( Quantifier : : ANY ) ; <nl> } <nl> - # line 3389 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3397 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 101 : <nl> - # line 1248 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1256 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeQuantifier ( Quantifier : : NONE ) ; <nl> } <nl> - # line 3397 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3405 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 102 : <nl> - # line 1254 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1262 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto const scopeType = parser - > ast ( ) - > scopes ( ) - > type ( ) ; <nl> <nl> YYLTYPE yylloc = yyloc_default ; <nl> parser - > registerParseError ( TRI_ERROR_QUERY_PARSE , " cannot use DISTINCT modifier on top - level query element " , yylloc . first_line , yylloc . first_column ) ; <nl> } <nl> } <nl> - # line 3410 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3418 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 103 : <nl> - # line 1261 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1269 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeDistinct ( ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3418 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3426 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 104 : <nl> - # line 1264 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1272 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 3426 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3434 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 105 : <nl> - # line 1270 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1278 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 3434 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3442 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 106 : <nl> - # line 1273 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1281 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 3442 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3450 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 107 : <nl> - # line 1276 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1284 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 3450 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3458 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 108 : <nl> - # line 1279 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1287 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 3458 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3466 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 109 : <nl> - # line 1282 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1290 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 3466 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3474 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 110 : <nl> - # line 1285 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1293 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeRange ( ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3474 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3482 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 111 : <nl> - # line 1291 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1299 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . strval ) = ( yyvsp [ 0 ] . strval ) ; <nl> } <nl> - # line 3482 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3490 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 112 : <nl> - # line 1294 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1302 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> std : : string temp ( ( yyvsp [ - 2 ] . strval ) . value , ( yyvsp [ - 2 ] . strval ) . length ) ; <nl> temp . append ( " : : " ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> ( yyval . strval ) . value = p ; <nl> ( yyval . strval ) . length = temp . size ( ) ; <nl> } <nl> - # line 3500 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3508 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 113 : <nl> - # line 1310 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1318 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> parser - > pushStack ( ( yyvsp [ - 1 ] . strval ) . value ) ; <nl> <nl> auto node = parser - > ast ( ) - > createNodeArray ( ) ; <nl> parser - > pushStack ( node ) ; <nl> } <nl> - # line 3511 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3519 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 114 : <nl> - # line 1315 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1323 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto list = static_cast < AstNode const * > ( parser - > popStack ( ) ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeFunctionCall ( static_cast < char const * > ( parser - > popStack ( ) ) , list ) ; <nl> } <nl> - # line 3520 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3528 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 115 : <nl> - # line 1319 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1327 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = parser - > ast ( ) - > createNodeArray ( ) ; <nl> parser - > pushStack ( node ) ; <nl> } <nl> - # line 3529 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3537 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 116 : <nl> - # line 1322 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1330 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto list = static_cast < AstNode const * > ( parser - > popStack ( ) ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeFunctionCall ( TRI_CHAR_LENGTH_PAIR ( " LIKE " ) , list ) ; <nl> } <nl> - # line 3538 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3546 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 117 : <nl> - # line 1329 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1337 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeUnaryOperator ( NODE_TYPE_OPERATOR_UNARY_PLUS , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3546 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3554 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 118 : <nl> - # line 1332 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1340 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeUnaryOperator ( NODE_TYPE_OPERATOR_UNARY_MINUS , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3554 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3562 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 119 : <nl> - # line 1335 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1343 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeUnaryOperator ( NODE_TYPE_OPERATOR_UNARY_NOT , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3562 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3570 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 120 : <nl> - # line 1341 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1349 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryOperator ( NODE_TYPE_OPERATOR_BINARY_OR , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3570 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3578 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 121 : <nl> - # line 1344 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1352 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryOperator ( NODE_TYPE_OPERATOR_BINARY_AND , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3578 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3586 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 122 : <nl> - # line 1347 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1355 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryOperator ( NODE_TYPE_OPERATOR_BINARY_PLUS , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3586 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3594 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 123 : <nl> - # line 1350 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1358 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryOperator ( NODE_TYPE_OPERATOR_BINARY_MINUS , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3594 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3602 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 124 : <nl> - # line 1353 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1361 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryOperator ( NODE_TYPE_OPERATOR_BINARY_TIMES , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3602 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3610 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 125 : <nl> - # line 1356 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1364 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryOperator ( NODE_TYPE_OPERATOR_BINARY_DIV , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3610 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3618 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 126 : <nl> - # line 1359 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1367 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryOperator ( NODE_TYPE_OPERATOR_BINARY_MOD , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3618 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3626 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 127 : <nl> - # line 1362 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1370 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryOperator ( NODE_TYPE_OPERATOR_BINARY_EQ , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3626 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3634 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 128 : <nl> - # line 1365 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1373 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryOperator ( NODE_TYPE_OPERATOR_BINARY_NE , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3634 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3642 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 129 : <nl> - # line 1368 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1376 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryOperator ( NODE_TYPE_OPERATOR_BINARY_LT , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3642 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3650 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 130 : <nl> - # line 1371 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1379 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryOperator ( NODE_TYPE_OPERATOR_BINARY_GT , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3650 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3658 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 131 : <nl> - # line 1374 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1382 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryOperator ( NODE_TYPE_OPERATOR_BINARY_LE , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3658 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3666 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 132 : <nl> - # line 1377 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1385 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryOperator ( NODE_TYPE_OPERATOR_BINARY_GE , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3666 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3674 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 133 : <nl> - # line 1380 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1388 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryOperator ( NODE_TYPE_OPERATOR_BINARY_IN , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3674 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3682 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 134 : <nl> - # line 1383 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1391 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryOperator ( NODE_TYPE_OPERATOR_BINARY_NIN , ( yyvsp [ - 3 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3682 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3690 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 135 : <nl> - # line 1386 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1394 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> AstNode * arguments = parser - > ast ( ) - > createNodeArray ( 2 ) ; <nl> arguments - > addMember ( ( yyvsp [ - 3 ] . node ) ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> AstNode * expression = parser - > ast ( ) - > createNodeFunctionCall ( TRI_CHAR_LENGTH_PAIR ( " LIKE " ) , arguments ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeUnaryOperator ( NODE_TYPE_OPERATOR_UNARY_NOT , expression ) ; <nl> } <nl> - # line 3694 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3702 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 136 : <nl> - # line 1393 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1401 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> AstNode * arguments = parser - > ast ( ) - > createNodeArray ( 2 ) ; <nl> arguments - > addMember ( ( yyvsp [ - 3 ] . node ) ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> AstNode * expression = parser - > ast ( ) - > createNodeFunctionCall ( TRI_CHAR_LENGTH_PAIR ( " REGEX_TEST " ) , arguments ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeUnaryOperator ( NODE_TYPE_OPERATOR_UNARY_NOT , expression ) ; <nl> } <nl> - # line 3706 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3714 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 137 : <nl> - # line 1400 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1408 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> AstNode * arguments = parser - > ast ( ) - > createNodeArray ( 2 ) ; <nl> arguments - > addMember ( ( yyvsp [ - 3 ] . node ) ) ; <nl> arguments - > addMember ( ( yyvsp [ 0 ] . node ) ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeFunctionCall ( TRI_CHAR_LENGTH_PAIR ( " REGEX_TEST " ) , arguments ) ; <nl> } <nl> - # line 3717 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3725 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 138 : <nl> - # line 1406 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1414 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> AstNode * arguments = parser - > ast ( ) - > createNodeArray ( 2 ) ; <nl> arguments - > addMember ( ( yyvsp [ - 2 ] . node ) ) ; <nl> arguments - > addMember ( ( yyvsp [ 0 ] . node ) ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeFunctionCall ( TRI_CHAR_LENGTH_PAIR ( " LIKE " ) , arguments ) ; <nl> } <nl> - # line 3728 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3736 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 139 : <nl> - # line 1412 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1420 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> AstNode * arguments = parser - > ast ( ) - > createNodeArray ( 2 ) ; <nl> arguments - > addMember ( ( yyvsp [ - 2 ] . node ) ) ; <nl> arguments - > addMember ( ( yyvsp [ 0 ] . node ) ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeFunctionCall ( TRI_CHAR_LENGTH_PAIR ( " REGEX_TEST " ) , arguments ) ; <nl> } <nl> - # line 3739 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3747 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 140 : <nl> - # line 1418 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1426 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> AstNode * arguments = parser - > ast ( ) - > createNodeArray ( 2 ) ; <nl> arguments - > addMember ( ( yyvsp [ - 2 ] . node ) ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> AstNode * node = parser - > ast ( ) - > createNodeFunctionCall ( TRI_CHAR_LENGTH_PAIR ( " REGEX_TEST " ) , arguments ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeUnaryOperator ( NODE_TYPE_OPERATOR_UNARY_NOT , node ) ; <nl> } <nl> - # line 3751 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3759 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 141 : <nl> - # line 1425 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1433 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryArrayOperator ( NODE_TYPE_OPERATOR_BINARY_ARRAY_EQ , ( yyvsp [ - 3 ] . node ) , ( yyvsp [ 0 ] . node ) , ( yyvsp [ - 2 ] . node ) ) ; <nl> } <nl> - # line 3759 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3767 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 142 : <nl> - # line 1428 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1436 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryArrayOperator ( NODE_TYPE_OPERATOR_BINARY_ARRAY_NE , ( yyvsp [ - 3 ] . node ) , ( yyvsp [ 0 ] . node ) , ( yyvsp [ - 2 ] . node ) ) ; <nl> } <nl> - # line 3767 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3775 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 143 : <nl> - # line 1431 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1439 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryArrayOperator ( NODE_TYPE_OPERATOR_BINARY_ARRAY_LT , ( yyvsp [ - 3 ] . node ) , ( yyvsp [ 0 ] . node ) , ( yyvsp [ - 2 ] . node ) ) ; <nl> } <nl> - # line 3775 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3783 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 144 : <nl> - # line 1434 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1442 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryArrayOperator ( NODE_TYPE_OPERATOR_BINARY_ARRAY_GT , ( yyvsp [ - 3 ] . node ) , ( yyvsp [ 0 ] . node ) , ( yyvsp [ - 2 ] . node ) ) ; <nl> } <nl> - # line 3783 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3791 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 145 : <nl> - # line 1437 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1445 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryArrayOperator ( NODE_TYPE_OPERATOR_BINARY_ARRAY_LE , ( yyvsp [ - 3 ] . node ) , ( yyvsp [ 0 ] . node ) , ( yyvsp [ - 2 ] . node ) ) ; <nl> } <nl> - # line 3791 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3799 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 146 : <nl> - # line 1440 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1448 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryArrayOperator ( NODE_TYPE_OPERATOR_BINARY_ARRAY_GE , ( yyvsp [ - 3 ] . node ) , ( yyvsp [ 0 ] . node ) , ( yyvsp [ - 2 ] . node ) ) ; <nl> } <nl> - # line 3799 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3807 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 147 : <nl> - # line 1443 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1451 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryArrayOperator ( NODE_TYPE_OPERATOR_BINARY_ARRAY_IN , ( yyvsp [ - 3 ] . node ) , ( yyvsp [ 0 ] . node ) , ( yyvsp [ - 2 ] . node ) ) ; <nl> } <nl> - # line 3807 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3815 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 148 : <nl> - # line 1446 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1454 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto quantifier = parser - > ast ( ) - > createNodeQuantifier ( Quantifier : : ALL ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryArrayOperator ( NODE_TYPE_OPERATOR_BINARY_ARRAY_NIN , ( yyvsp [ - 4 ] . node ) , ( yyvsp [ 0 ] . node ) , quantifier ) ; <nl> } <nl> - # line 3816 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3824 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 149 : <nl> - # line 1450 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1458 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto quantifier = parser - > ast ( ) - > createNodeQuantifier ( Quantifier : : ANY ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryArrayOperator ( NODE_TYPE_OPERATOR_BINARY_ARRAY_NIN , ( yyvsp [ - 4 ] . node ) , ( yyvsp [ 0 ] . node ) , quantifier ) ; <nl> } <nl> - # line 3825 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3833 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 150 : <nl> - # line 1454 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1462 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto quantifier = parser - > ast ( ) - > createNodeQuantifier ( Quantifier : : NONE ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBinaryArrayOperator ( NODE_TYPE_OPERATOR_BINARY_ARRAY_NIN , ( yyvsp [ - 4 ] . node ) , ( yyvsp [ 0 ] . node ) , quantifier ) ; <nl> } <nl> - # line 3834 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3842 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 151 : <nl> - # line 1461 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1469 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeTernaryOperator ( ( yyvsp [ - 4 ] . node ) , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3842 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3850 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 152 : <nl> - # line 1464 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1472 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeTernaryOperator ( ( yyvsp [ - 3 ] . node ) , ( yyvsp [ - 3 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3850 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3858 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 153 : <nl> - # line 1470 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1478 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> } <nl> - # line 3857 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3865 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 154 : <nl> - # line 1472 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1480 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> } <nl> - # line 3864 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3872 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 155 : <nl> - # line 1477 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1485 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 3872 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3880 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 156 : <nl> - # line 1480 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1488 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> parser - > ast ( ) - > scopes ( ) - > start ( arangodb : : aql : : AQL_SCOPE_SUBQUERY ) ; <nl> parser - > ast ( ) - > startSubQuery ( ) ; <nl> } <nl> - # line 3881 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3889 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 157 : <nl> - # line 1483 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1491 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> AstNode * node = parser - > ast ( ) - > endSubQuery ( ) ; <nl> parser - > ast ( ) - > scopes ( ) - > endCurrent ( ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> auto subQuery = parser - > ast ( ) - > createNodeLet ( variableName . c_str ( ) , variableName . size ( ) , node , false ) ; <nl> parser - > ast ( ) - > addOperation ( subQuery ) ; <nl> <nl> - ( yyval . node ) = parser - > ast ( ) - > createNodeReference ( variableName ) ; <nl> + ( yyval . node ) = parser - > ast ( ) - > createNodeSubqueryReference ( variableName ) ; <nl> } <nl> - # line 3896 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3904 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 158 : <nl> - # line 1496 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1504 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> parser - > pushArrayElement ( ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3904 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3912 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 159 : <nl> - # line 1499 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1507 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> parser - > pushArrayElement ( ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3912 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3920 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 160 : <nl> - # line 1505 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1513 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 3920 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3928 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 161 : <nl> - # line 1508 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1516 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 3928 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3936 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 162 : <nl> - # line 1514 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1522 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = parser - > ast ( ) - > createNodeArray ( ) ; <nl> parser - > pushArray ( node ) ; <nl> } <nl> - # line 3937 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3945 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 163 : <nl> - # line 1517 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1525 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > popArray ( ) ; <nl> } <nl> - # line 3945 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3953 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 164 : <nl> - # line 1523 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1531 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> } <nl> - # line 3952 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3960 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 165 : <nl> - # line 1525 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1533 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> } <nl> - # line 3959 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3967 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 166 : <nl> - # line 1530 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1538 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> parser - > pushArrayElement ( ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3967 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3975 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 167 : <nl> - # line 1533 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1541 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> parser - > pushArrayElement ( ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 3975 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3983 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 168 : <nl> - # line 1539 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1547 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = nullptr ; <nl> } <nl> - # line 3983 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 3991 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 169 : <nl> - # line 1542 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1550 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ( yyvsp [ 0 ] . node ) = = nullptr ) { <nl> ABORT_OOM <nl> YYLTYPE yylloc = yyloc_default ; <nl> <nl> ( yyval . node ) = node ; <nl> } <nl> - # line 4014 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4022 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 170 : <nl> - # line 1568 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1576 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ( yyvsp [ - 2 ] . node ) = = nullptr ) { <nl> ABORT_OOM <nl> YYLTYPE yylloc = yyloc_default ; <nl> node - > addMember ( ( yyvsp [ 0 ] . node ) ) ; <nl> ( yyval . node ) = node ; <nl> } <nl> - # line 4036 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4044 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 171 : <nl> - # line 1588 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1596 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = nullptr ; <nl> } <nl> - # line 4044 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4052 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 172 : <nl> - # line 1591 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1599 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ( yyvsp [ 0 ] . node ) = = nullptr ) { <nl> ABORT_OOM <nl> YYLTYPE yylloc = yyloc_default ; <nl> <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 4060 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4068 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 173 : <nl> - # line 1605 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1613 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = parser - > ast ( ) - > createNodeObject ( ) ; <nl> parser - > pushStack ( node ) ; <nl> } <nl> - # line 4069 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4077 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 174 : <nl> - # line 1608 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1616 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = static_cast < AstNode * > ( parser - > popStack ( ) ) ; <nl> } <nl> - # line 4077 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4085 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 175 : <nl> - # line 1614 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1622 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> } <nl> - # line 4084 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4092 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 176 : <nl> - # line 1616 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1624 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> } <nl> - # line 4091 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4099 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 177 : <nl> - # line 1621 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1629 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> } <nl> - # line 4098 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4106 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 178 : <nl> - # line 1623 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1631 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> } <nl> - # line 4105 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4113 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 179 : <nl> - # line 1628 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1636 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / attribute - name - only ( comparable to JS enhanced object literals , e . g . { foo , bar } ) <nl> auto ast = parser - > ast ( ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> auto node = ast - > createNodeReference ( variable ) ; <nl> parser - > pushObjectElement ( ( yyvsp [ 0 ] . strval ) . value , ( yyvsp [ 0 ] . strval ) . length , node ) ; <nl> } <nl> - # line 4124 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4132 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 180 : <nl> - # line 1642 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1650 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / attribute - name : attribute - value <nl> parser - > pushObjectElement ( ( yyvsp [ - 2 ] . strval ) . value , ( yyvsp [ - 2 ] . strval ) . length , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 4133 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4141 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 181 : <nl> - # line 1646 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1654 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / bind - parameter : attribute - value <nl> if ( ( yyvsp [ - 2 ] . strval ) . length < 1 | | ( yyvsp [ - 2 ] . strval ) . value [ 0 ] = = ' @ ' ) { <nl> YYLTYPE yylloc = yyloc_default ; <nl> auto param = parser - > ast ( ) - > createNodeParameter ( ( yyvsp [ - 2 ] . strval ) . value , ( yyvsp [ - 2 ] . strval ) . length ) ; <nl> parser - > pushObjectElement ( param , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 4147 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4155 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 182 : <nl> - # line 1655 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1663 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / [ attribute - name - expression ] : attribute - value <nl> parser - > pushObjectElement ( ( yyvsp [ - 3 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 4156 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4164 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 183 : <nl> - # line 1662 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1670 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . intval ) = 1 ; <nl> } <nl> - # line 4164 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4172 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 184 : <nl> - # line 1665 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1673 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . intval ) = ( yyvsp [ - 1 ] . intval ) + 1 ; <nl> } <nl> - # line 4172 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4180 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 185 : <nl> - # line 1671 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1679 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = nullptr ; <nl> } <nl> - # line 4180 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4188 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 186 : <nl> - # line 1674 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1682 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 4188 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4196 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 187 : <nl> - # line 1680 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1688 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = nullptr ; <nl> } <nl> - # line 4196 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4204 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 188 : <nl> - # line 1683 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1691 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeArrayLimit ( nullptr , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 4204 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4212 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 189 : <nl> - # line 1686 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1694 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeArrayLimit ( ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 4212 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4220 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 190 : <nl> - # line 1692 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1700 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = nullptr ; <nl> } <nl> - # line 4220 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4228 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 191 : <nl> - # line 1695 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1703 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 4228 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4236 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 192 : <nl> - # line 1701 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1709 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeValueString ( ( yyvsp [ 0 ] . strval ) . value , ( yyvsp [ 0 ] . strval ) . length ) ; <nl> } <nl> - # line 4236 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4244 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 193 : <nl> - # line 1704 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1712 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 4244 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4252 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 194 : <nl> - # line 1707 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1715 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto tmp = parser - > ast ( ) - > createNodeValueString ( ( yyvsp [ 0 ] . strval ) . value , ( yyvsp [ 0 ] . strval ) . length ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeCollectionDirection ( ( yyvsp [ - 1 ] . intval ) , tmp ) ; <nl> } <nl> - # line 4253 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4261 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 195 : <nl> - # line 1711 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1719 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeCollectionDirection ( ( yyvsp [ - 1 ] . intval ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 4261 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4269 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 196 : <nl> - # line 1717 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1725 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = static_cast < AstNode * > ( parser - > peekStack ( ) ) ; <nl> node - > addMember ( ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 4270 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4278 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 197 : <nl> - # line 1721 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1729 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = static_cast < AstNode * > ( parser - > peekStack ( ) ) ; <nl> node - > addMember ( ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> - # line 4279 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4287 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 198 : <nl> - # line 1728 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1736 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = parser - > ast ( ) - > createNodeArray ( ) ; <nl> node - > addMember ( ( yyvsp [ 0 ] . node ) ) ; <nl> auto const & resolver = parser - > query ( ) - > resolver ( ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeCollectionList ( node , resolver ) ; <nl> } <nl> - # line 4290 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4298 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 199 : <nl> - # line 1734 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1742 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = parser - > ast ( ) - > createNodeArray ( ) ; <nl> parser - > pushStack ( node ) ; <nl> node - > addMember ( ( yyvsp [ - 1 ] . node ) ) ; <nl> } <nl> - # line 4300 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4308 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 200 : <nl> - # line 1738 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1746 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto node = static_cast < AstNode * > ( parser - > popStack ( ) ) ; <nl> auto const & resolver = parser - > query ( ) - > resolver ( ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeCollectionList ( node , resolver ) ; <nl> } <nl> - # line 4310 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4318 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 201 : <nl> - # line 1743 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1751 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / graph name <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 4319 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4327 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 202 : <nl> - # line 1747 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1755 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / graph name <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeValueString ( ( yyvsp [ 0 ] . strval ) . value , ( yyvsp [ 0 ] . strval ) . length ) ; <nl> } <nl> - # line 4328 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4336 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 203 : <nl> - # line 1756 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1764 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . intval ) = 2 ; <nl> } <nl> - # line 4336 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4344 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 204 : <nl> - # line 1759 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1767 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . intval ) = 1 ; <nl> } <nl> - # line 4344 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4352 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 205 : <nl> - # line 1762 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1770 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . intval ) = 0 ; <nl> } <nl> - # line 4352 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4360 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 206 : <nl> - # line 1768 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1776 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeDirection ( ( yyvsp [ 0 ] . intval ) , 1 ) ; <nl> } <nl> - # line 4360 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4368 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 207 : <nl> - # line 1771 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1779 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeDirection ( ( yyvsp [ 0 ] . intval ) , ( yyvsp [ - 1 ] . node ) ) ; <nl> } <nl> - # line 4368 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4376 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 208 : <nl> - # line 1777 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1785 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / variable or collection or view <nl> auto ast = parser - > ast ( ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> <nl> ( yyval . node ) = node ; <nl> } <nl> - # line 4405 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4413 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 209 : <nl> - # line 1809 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1817 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 4413 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4421 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 210 : <nl> - # line 1812 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1820 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 4421 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4429 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 211 : <nl> - # line 1815 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1823 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> <nl> YYLTYPE yylloc = yyloc_default ; <nl> ABORT_OOM <nl> } <nl> } <nl> - # line 4433 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4441 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 212 : <nl> - # line 1822 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1830 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ( yyvsp [ - 1 ] . node ) - > type = = NODE_TYPE_EXPANSION ) { <nl> / / create a dummy passthru node that reduces and evaluates the expansion first <nl> YYLTYPE yylloc = yyloc_default ; <nl> ( yyval . node ) = ( yyvsp [ - 1 ] . node ) ; <nl> } <nl> } <nl> - # line 4448 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4456 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 213 : <nl> - # line 1832 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1840 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> parser - > ast ( ) - > scopes ( ) - > start ( arangodb : : aql : : AQL_SCOPE_SUBQUERY ) ; <nl> parser - > ast ( ) - > startSubQuery ( ) ; <nl> } <nl> - # line 4457 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4465 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 214 : <nl> - # line 1835 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1843 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> AstNode * node = parser - > ast ( ) - > endSubQuery ( ) ; <nl> parser - > ast ( ) - > scopes ( ) - > endCurrent ( ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeReference ( variableName ) ; <nl> } <nl> - # line 4472 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4480 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 215 : <nl> - # line 1845 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1853 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / named variable access , e . g . variable . reference <nl> if ( ( yyvsp [ - 2 ] . node ) - > type = = NODE_TYPE_EXPANSION ) { <nl> YYLTYPE yylloc = yyloc_default ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeAttributeAccess ( ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . strval ) . value , ( yyvsp [ 0 ] . strval ) . length ) ; <nl> } <nl> } <nl> - # line 4492 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4500 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 216 : <nl> - # line 1860 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1868 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / named variable access , e . g . variable . @ reference <nl> if ( ( yyvsp [ - 2 ] . node ) - > type = = NODE_TYPE_EXPANSION ) { <nl> YYLTYPE yylloc = yyloc_default ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeBoundAttributeAccess ( ( yyvsp [ - 2 ] . node ) , ( yyvsp [ 0 ] . node ) ) ; <nl> } <nl> } <nl> - # line 4511 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4519 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 217 : <nl> - # line 1874 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1882 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / indexed variable access , e . g . variable [ index ] <nl> if ( ( yyvsp [ - 3 ] . node ) - > type = = NODE_TYPE_EXPANSION ) { <nl> YYLTYPE yylloc = yyloc_default ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeIndexedAccess ( ( yyvsp [ - 3 ] . node ) , ( yyvsp [ - 1 ] . node ) ) ; <nl> } <nl> } <nl> - # line 4530 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4538 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 218 : <nl> - # line 1888 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1896 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> / / variable expansion , e . g . variable [ * ] , with optional FILTER , LIMIT and RETURN clauses <nl> if ( ( yyvsp [ 0 ] . intval ) > 1 & & ( yyvsp [ - 2 ] . node ) - > type = = NODE_TYPE_EXPANSION ) { <nl> YYLTYPE yylloc = yyloc_default ; <nl> auto scopes = parser - > ast ( ) - > scopes ( ) ; <nl> scopes - > stackCurrentVariable ( scopes - > getVariable ( nextName ) ) ; <nl> } <nl> - # line 4558 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4566 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 219 : <nl> - # line 1910 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1918 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto scopes = parser - > ast ( ) - > scopes ( ) ; <nl> scopes - > unstackCurrentVariable ( ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeExpansion ( ( yyvsp [ - 5 ] . intval ) , iterator , parser - > ast ( ) - > createNodeReference ( variable - > name ) , ( yyvsp [ - 3 ] . node ) , ( yyvsp [ - 2 ] . node ) , ( yyvsp [ - 1 ] . node ) ) ; <nl> } <nl> } <nl> - # line 4581 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4589 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 220 : <nl> - # line 1931 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1939 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 4589 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4597 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 221 : <nl> - # line 1934 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1942 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 4597 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4605 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 222 : <nl> - # line 1940 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1948 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ( yyvsp [ 0 ] . node ) = = nullptr ) { <nl> ABORT_OOM <nl> YYLTYPE yylloc = yyloc_default ; <nl> <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 4609 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4617 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 223 : <nl> - # line 1947 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1955 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ( yyvsp [ 0 ] . node ) = = nullptr ) { <nl> ABORT_OOM <nl> YYLTYPE yylloc = yyloc_default ; <nl> <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 4621 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4629 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 224 : <nl> - # line 1957 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1965 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeValueString ( ( yyvsp [ 0 ] . strval ) . value , ( yyvsp [ 0 ] . strval ) . length ) ; <nl> } <nl> - # line 4629 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4637 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 225 : <nl> - # line 1960 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1968 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = ( yyvsp [ 0 ] . node ) ; <nl> } <nl> - # line 4637 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4645 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 226 : <nl> - # line 1963 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1971 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeValueNull ( ) ; <nl> } <nl> - # line 4645 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4653 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 227 : <nl> - # line 1966 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1974 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeValueBool ( true ) ; <nl> } <nl> - # line 4653 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4661 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 228 : <nl> - # line 1969 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1977 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeValueBool ( false ) ; <nl> } <nl> - # line 4661 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4669 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 229 : <nl> - # line 1975 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1983 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto const & resolver = parser - > query ( ) - > resolver ( ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeCollection ( resolver , ( yyvsp [ 0 ] . strval ) . value , ( yyvsp [ 0 ] . strval ) . length , arangodb : : AccessMode : : Type : : WRITE ) ; <nl> } <nl> - # line 4670 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4678 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 230 : <nl> - # line 1979 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1987 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> auto const & resolver = parser - > query ( ) - > resolver ( ) ; <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeCollection ( resolver , ( yyvsp [ 0 ] . strval ) . value , ( yyvsp [ 0 ] . strval ) . length , arangodb : : AccessMode : : Type : : WRITE ) ; <nl> } <nl> - # line 4679 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4687 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 231 : <nl> - # line 1983 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 1991 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ( yyvsp [ 0 ] . strval ) . length < 2 | | ( yyvsp [ 0 ] . strval ) . value [ 0 ] ! = ' @ ' ) { <nl> parser - > registerParseError ( TRI_ERROR_QUERY_BIND_PARAMETER_TYPE , TRI_errno_string ( TRI_ERROR_QUERY_BIND_PARAMETER_TYPE ) , ( yyvsp [ 0 ] . strval ) . value , yylloc . first_line , yylloc . first_column ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeParameterDatasource ( ( yyvsp [ 0 ] . strval ) . value , ( yyvsp [ 0 ] . strval ) . length ) ; <nl> } <nl> - # line 4691 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4699 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 232 : <nl> - # line 1993 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 2001 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> if ( ( yyvsp [ 0 ] . strval ) . length < 2 | | ( yyvsp [ 0 ] . strval ) . value [ 0 ] ! = ' @ ' ) { <nl> parser - > registerParseError ( TRI_ERROR_QUERY_BIND_PARAMETER_TYPE , TRI_errno_string ( TRI_ERROR_QUERY_BIND_PARAMETER_TYPE ) , ( yyvsp [ 0 ] . strval ) . value , yylloc . first_line , yylloc . first_column ) ; <nl> YYLTYPE yylloc = yyloc_default ; <nl> <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeParameterDatasource ( ( yyvsp [ 0 ] . strval ) . value , ( yyvsp [ 0 ] . strval ) . length ) ; <nl> } <nl> - # line 4703 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4711 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 233 : <nl> - # line 2000 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 2008 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . node ) = parser - > ast ( ) - > createNodeParameter ( ( yyvsp [ 0 ] . strval ) . value , ( yyvsp [ 0 ] . strval ) . length ) ; <nl> } <nl> - # line 4711 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4719 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 234 : <nl> - # line 2006 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 2014 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . strval ) = ( yyvsp [ 0 ] . strval ) ; <nl> } <nl> - # line 4719 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4727 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 235 : <nl> - # line 2009 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 2017 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . strval ) = ( yyvsp [ 0 ] . strval ) ; <nl> } <nl> - # line 4727 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4735 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> case 236 : <nl> - # line 2014 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> + # line 2022 " Aql / grammar . y " / * yacc . c : 1652 * / <nl> { <nl> ( yyval . strval ) = ( yyvsp [ 0 ] . strval ) ; <nl> } <nl> - # line 4735 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4743 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> break ; <nl> <nl> <nl> - # line 4739 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> + # line 4747 " Aql / grammar . cpp " / * yacc . c : 1652 * / <nl> default : break ; <nl> } <nl> / * User semantic actions sometimes alter yychar , and that requires <nl> mmm a / arangod / Aql / grammar . y <nl> ppp b / arangod / Aql / grammar . y <nl> prune_and_options : <nl> node - > addMember ( $ 2 ) ; <nl> / / Options <nl> node - > addMember ( $ 4 ) ; <nl> - <nl> } <nl> ; <nl> <nl> for_statement : <nl> } prune_and_options { <nl> auto graphInfoNode = static_cast < AstNode * > ( parser - > popStack ( ) ) ; <nl> auto variablesNode = static_cast < AstNode * > ( parser - > popStack ( ) ) ; <nl> + <nl> + auto prune = graphInfoNode - > getMember ( 3 ) ; <nl> + if ( prune ! = nullptr ) { <nl> + Ast : : traverseReadOnly ( prune , [ & ] ( AstNode const * node ) { <nl> + if ( node - > type = = NODE_TYPE_REFERENCE & & node - > hasFlag ( AstNodeFlagType : : FLAG_SUBQUERY_REFERENCE ) ) { <nl> + parser - > registerParseError ( TRI_ERROR_QUERY_PARSE , " prune condition must not use a subquery " , yylloc . first_line , yylloc . first_column ) ; <nl> + } <nl> + } ) ; <nl> + } <nl> auto node = parser - > ast ( ) - > createNodeTraversal ( variablesNode , graphInfoNode ) ; <nl> parser - > ast ( ) - > addOperation ( node ) ; <nl> } <nl> expression_or_query : <nl> auto subQuery = parser - > ast ( ) - > createNodeLet ( variableName . c_str ( ) , variableName . size ( ) , node , false ) ; <nl> parser - > ast ( ) - > addOperation ( subQuery ) ; <nl> <nl> - $ $ = parser - > ast ( ) - > createNodeReference ( variableName ) ; <nl> + $ $ = parser - > ast ( ) - > createNodeSubqueryReference ( variableName ) ; <nl> } <nl> ; <nl> <nl> mmm a / tests / js / server / aql / aql - graph - traverser . js <nl> ppp b / tests / js / server / aql / aql - graph - traverser . js <nl> function complexFilteringSuite ( ) { <nl> <nl> tearDownAll : cleanup , <nl> <nl> + testPruneWithSubquery : function ( ) { <nl> + let query = ` FOR v , e , p IN 1 . . 100 OUTBOUND @ start @ ecol PRUNE 2 < = LENGTH ( FOR w IN p . vertices FILTER w . _id = = v . _id RETURN 1 ) RETURN p ` ; <nl> + try { <nl> + let bindVars = { <nl> + ' @ eCol ' : en , <nl> + ' start ' : vertex . Tri1 <nl> + } ; <nl> + db . _query ( query , bindVars ) ; <nl> + fail ( ) ; <nl> + } catch ( err ) { <nl> + assertEqual ( err . errorNum , errors . ERROR_QUERY_PARSE . code ) ; <nl> + } <nl> + } , <nl> + <nl> testVertexEarlyPruneHighDepth : function ( ) { <nl> var query = ` WITH $ { vn } <nl> FOR v , e , p IN 100 OUTBOUND @ start @ @ eCol <nl>
disallow subqueries in AQL traversal PRUNE conditions ( )
arangodb/arangodb
a3da5cec88271223509cbfc6fc673889dd712a40
2019-10-14T14:01:55Z
deleted file mode 100644 <nl> index c95d16a4ed27c . . 0000000000000 <nl> mmm a / tensorflow / lite / kernels / parse_example / BUILD <nl> ppp / dev / null <nl> <nl> - # Kernel for custom parse_example <nl> - package ( <nl> - default_visibility = [ <nl> - " / / visibility : public " , <nl> - ] , <nl> - licenses = [ " notice " ] , # Apache 2 . 0 <nl> - ) <nl> - <nl> - cc_library ( <nl> - name = " parse_example " , <nl> - srcs = [ <nl> - " example_proto_fast_parsing . cc " , <nl> - " parse_example . cc " , <nl> - ] , <nl> - hdrs = [ <nl> - " example_proto_fast_parsing . h " , <nl> - " parse_example . h " , <nl> - ] , <nl> - deps = [ <nl> - " @ com_google_absl / / absl / base " , <nl> - " @ com_google_absl / / absl / container : flat_hash_map " , <nl> - " @ flatbuffers " , <nl> - " / / tensorflow / lite : framework " , <nl> - " / / tensorflow / lite / c : common " , <nl> - " / / tensorflow / lite / kernels : kernel_util " , <nl> - " / / tensorflow / lite / kernels / internal : tensor " , <nl> - " / / tensorflow / lite : string_util " , <nl> - ] + select ( { <nl> - " / / tensorflow : android " : [ <nl> - " / / tensorflow / core : portable_tensorflow_lib_lite " , <nl> - ] , <nl> - " / / tensorflow : ios " : [ <nl> - " / / tensorflow / core : portable_tensorflow_lib_lite " , <nl> - ] , <nl> - " / / conditions : default " : [ <nl> - " / / tensorflow / core : core_cpu " , <nl> - " / / tensorflow / core : feature_util " , <nl> - " / / tensorflow / core : framework " , <nl> - " / / tensorflow / core : framework_internal " , <nl> - " / / tensorflow / core : lib " , <nl> - " / / tensorflow / core : lib_internal " , <nl> - " / / tensorflow / core : protos_all_cc " , <nl> - ] , <nl> - } ) , <nl> - ) <nl> - <nl> - cc_test ( <nl> - name = " parse_example_test " , <nl> - srcs = [ " parse_example_test . cc " ] , <nl> - deps = [ <nl> - " : parse_example " , <nl> - " @ flatbuffers " , <nl> - " / / tensorflow / lite / c : common " , <nl> - " / / tensorflow / lite / core / api : op_resolver " , <nl> - " / / tensorflow / lite / kernels : builtin_ops " , <nl> - " / / tensorflow / lite / kernels : test_main " , <nl> - " / / tensorflow / lite / kernels : test_util " , <nl> - " / / tensorflow / lite / schema : schema_fbs " , <nl> - " / / tensorflow / lite : framework " , <nl> - " / / tensorflow / lite : string_util " , <nl> - ] + select ( { <nl> - " / / tensorflow : android " : [ <nl> - " / / tensorflow / core : portable_tensorflow_lib_lite " , <nl> - ] , <nl> - " / / tensorflow : ios " : [ <nl> - " / / tensorflow / core : portable_tensorflow_lib_lite " , <nl> - ] , <nl> - " / / conditions : default " : [ <nl> - " / / tensorflow / core : protos_all_cc " , <nl> - " / / tensorflow / core / example : feature_util " , <nl> - " / / tensorflow / core / platform : protobuf " , <nl> - " / / tensorflow / core / platform : tstring " , <nl> - ] , <nl> - } ) , <nl> - ) <nl> deleted file mode 100644 <nl> index 5490963b5c480 . . 0000000000000 <nl> mmm a / tensorflow / lite / kernels / parse_example / example_proto_fast_parsing . cc <nl> ppp / dev / null <nl> <nl> - / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - # include " tensorflow / lite / kernels / parse_example / example_proto_fast_parsing . h " <nl> - <nl> - namespace tensorflow { <nl> - namespace example { <nl> - <nl> - string ExampleName ( const gtl : : ArraySlice < tstring > example_names , int n ) { <nl> - return example_names . empty ( ) ? " < unknown > " : example_names [ n ] ; <nl> - } <nl> - <nl> - void CountSparseFeatures ( <nl> - const std : : vector < std : : vector < SparseBuffer > > & sparse_buffers , size_t d , <nl> - size_t * total_num_features , size_t * max_num_features ) { <nl> - for ( auto & sparse_values_tmp : sparse_buffers ) { <nl> - const std : : vector < size_t > & end_indices = <nl> - sparse_values_tmp [ d ] . example_end_indices ; <nl> - * total_num_features + = end_indices . back ( ) ; <nl> - * max_num_features = std : : max ( * max_num_features , end_indices [ 0 ] ) ; <nl> - for ( size_t i = 1 ; i < end_indices . size ( ) ; + + i ) { <nl> - size_t example_size = end_indices [ i ] - end_indices [ i - 1 ] ; <nl> - * max_num_features = std : : max ( * max_num_features , example_size ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - void CopySparseBufferToTensor ( DataType dtype , size_t offset , SparseBuffer * src , <nl> - Tensor * dst ) { <nl> - switch ( dtype ) { <nl> - case DT_INT64 : { <nl> - std : : copy ( src - > int64_list . begin ( ) , src - > int64_list . end ( ) , <nl> - dst - > flat < int64 > ( ) . data ( ) + offset ) ; <nl> - break ; <nl> - } <nl> - case DT_FLOAT : { <nl> - std : : copy ( src - > float_list . begin ( ) , src - > float_list . end ( ) , <nl> - dst - > flat < float > ( ) . data ( ) + offset ) ; <nl> - break ; <nl> - } <nl> - case DT_STRING : { <nl> - std : : move ( src - > bytes_list . begin ( ) , src - > bytes_list . end ( ) , <nl> - dst - > flat < tstring > ( ) . data ( ) + offset ) ; <nl> - break ; <nl> - } <nl> - default : <nl> - ReportUnexpectedDataType ( dtype ) ; <nl> - } <nl> - } <nl> - <nl> - uint8 PeekTag ( protobuf : : io : : CodedInputStream * stream ) { <nl> - DCHECK ( stream ! = nullptr ) ; <nl> - const void * ptr ; <nl> - int size ; <nl> - if ( ! stream - > GetDirectBufferPointer ( & ptr , & size ) ) return 0 ; <nl> - return * static_cast < const uint8 * > ( ptr ) ; <nl> - } <nl> - <nl> - bool ParseString ( protobuf : : io : : CodedInputStream * stream , StringPiece * result ) { <nl> - DCHECK ( stream ! = nullptr ) ; <nl> - DCHECK ( result ! = nullptr ) ; <nl> - uint32 length ; <nl> - if ( ! stream - > ReadVarint32 ( & length ) ) return false ; <nl> - if ( length = = 0 ) { <nl> - * result = StringPiece ( nullptr , 0 ) ; <nl> - return true ; <nl> - } <nl> - const void * stream_alias ; <nl> - int stream_size ; <nl> - if ( ! stream - > GetDirectBufferPointer ( & stream_alias , & stream_size ) ) { <nl> - return false ; <nl> - } <nl> - if ( static_cast < uint32 > ( stream_size ) < length ) return false ; <nl> - * result = StringPiece ( static_cast < const char * > ( stream_alias ) , length ) ; <nl> - stream - > Skip ( length ) ; <nl> - return true ; <nl> - } <nl> - <nl> - bool ParseFeatureMapEntry ( protobuf : : io : : CodedInputStream * stream , <nl> - parsed : : FeatureMapEntry * feature_map_entry ) { <nl> - DCHECK ( stream ! = nullptr ) ; <nl> - DCHECK ( feature_map_entry ! = nullptr ) ; <nl> - uint32 length ; <nl> - if ( ! stream - > ReadVarint32 ( & length ) ) return false ; <nl> - auto limit = stream - > PushLimit ( length ) ; <nl> - if ( ! stream - > ExpectTag ( kDelimitedTag ( 1 ) ) ) return false ; <nl> - if ( ! ParseString ( stream , & feature_map_entry - > first ) ) return false ; <nl> - if ( ! stream - > ExpectTag ( kDelimitedTag ( 2 ) ) ) return false ; <nl> - StringPiece feature_string_piece ; <nl> - if ( ! ParseString ( stream , & feature_string_piece ) ) return false ; <nl> - feature_map_entry - > second = parsed : : Feature ( feature_string_piece ) ; <nl> - if ( ! stream - > ExpectAtEnd ( ) ) return false ; <nl> - stream - > PopLimit ( limit ) ; <nl> - return true ; <nl> - } <nl> - <nl> - bool ParseFeatures ( protobuf : : io : : CodedInputStream * stream , <nl> - parsed : : Example * example ) { <nl> - DCHECK ( stream ! = nullptr ) ; <nl> - DCHECK ( example ! = nullptr ) ; <nl> - uint32 length ; <nl> - if ( ! stream - > ReadVarint32 ( & length ) ) return false ; <nl> - auto limit = stream - > PushLimit ( length ) ; <nl> - while ( ! stream - > ExpectAtEnd ( ) ) { <nl> - parsed : : FeatureMapEntry feature_map_entry ; <nl> - if ( ! stream - > ExpectTag ( kDelimitedTag ( 1 ) ) ) return false ; <nl> - if ( ! ParseFeatureMapEntry ( stream , & feature_map_entry ) ) return false ; <nl> - example - > push_back ( std : : move ( feature_map_entry ) ) ; <nl> - } <nl> - stream - > PopLimit ( limit ) ; <nl> - return true ; <nl> - } <nl> - <nl> - bool ParseExample ( protobuf : : io : : CodedInputStream * stream , <nl> - parsed : : Example * example ) { <nl> - DCHECK ( stream ! = nullptr ) ; <nl> - DCHECK ( example ! = nullptr ) ; <nl> - / / Loop over the input stream which may contain multiple serialized Example <nl> - / / protos merged together as strings . This behavior is consistent with Proto ' s <nl> - / / ParseFromString when string representations are concatenated . <nl> - while ( ! stream - > ExpectAtEnd ( ) ) { <nl> - if ( ! stream - > ExpectTag ( kDelimitedTag ( 1 ) ) ) { <nl> - if ( ! SkipExtraneousTag ( stream ) ) return false ; <nl> - } else { <nl> - if ( ! ParseFeatures ( stream , example ) ) return false ; <nl> - } <nl> - } <nl> - return true ; <nl> - } <nl> - <nl> - bool ParseExample ( StringPiece serialized , parsed : : Example * example ) { <nl> - DCHECK ( example ! = nullptr ) ; <nl> - protobuf : : io : : CodedInputStream stream ( <nl> - reinterpret_cast < const uint8 * > ( serialized . data ( ) ) , serialized . size ( ) ) ; <nl> - EnableAliasing ( & stream ) ; <nl> - return ParseExample ( & stream , example ) ; <nl> - } <nl> - <nl> - template < > <nl> - void CopyOrMoveBlock ( const tstring * b , const tstring * e , tstring * t ) { <nl> - std : : move ( b , e , t ) ; <nl> - } <nl> - <nl> - template < > <nl> - const SmallVector < int64 > & GetListFromBuffer < int64 > ( const SparseBuffer & buffer ) { <nl> - return buffer . int64_list ; <nl> - } <nl> - template < > <nl> - const SmallVector < float > & GetListFromBuffer < float > ( const SparseBuffer & buffer ) { <nl> - return buffer . float_list ; <nl> - } <nl> - template < > <nl> - const SmallVector < tstring > & GetListFromBuffer < tstring > ( <nl> - const SparseBuffer & buffer ) { <nl> - return buffer . bytes_list ; <nl> - } <nl> - <nl> - } / / namespace example <nl> - } / / namespace tensorflow <nl> deleted file mode 100644 <nl> index dc0252d256ea2 . . 0000000000000 <nl> mmm a / tensorflow / lite / kernels / parse_example / example_proto_fast_parsing . h <nl> ppp / dev / null <nl> <nl> - / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - # ifndef TENSORFLOW_LITE_KERNELS_PARSE_EXAMPLE_EXAMPLE_PROTO_FAST_PARSING_H_ <nl> - # define TENSORFLOW_LITE_KERNELS_PARSE_EXAMPLE_EXAMPLE_PROTO_FAST_PARSING_H_ <nl> - # include " tensorflow / core / util / example_proto_fast_parsing . h " <nl> - <nl> - # include < vector > <nl> - <nl> - # include " absl / base / casts . h " <nl> - # include " absl / container / flat_hash_map . h " <nl> - # include " tensorflow / core / example / example . pb . h " <nl> - # include " tensorflow / core / example / feature . pb . h " <nl> - # include " tensorflow / core / framework / allocator . h " <nl> - # include " tensorflow / core / framework / numeric_op . h " <nl> - # include " tensorflow / core / framework / op_kernel . h " <nl> - # include " tensorflow / core / framework / register_types . h " <nl> - # include " tensorflow / core / framework / types . pb . h " <nl> - # include " tensorflow / core / lib / core / blocking_counter . h " <nl> - # include " tensorflow / core / lib / core / errors . h " <nl> - # include " tensorflow / core / lib / core / threadpool . h " <nl> - # include " tensorflow / core / lib / gtl / inlined_vector . h " <nl> - # include " tensorflow / core / lib / monitoring / counter . h " <nl> - # include " tensorflow / core / platform / byte_order . h " <nl> - # include " tensorflow / core / platform / logging . h " <nl> - # include " tensorflow / core / platform / protobuf . h " <nl> - # include " tensorflow / core / util / presized_cuckoo_map . h " <nl> - # include " tensorflow / core / util / sparse / sparse_tensor . h " <nl> - <nl> - namespace tensorflow { <nl> - namespace example { <nl> - <nl> - template < typename T > <nl> - using SmallVector = gtl : : InlinedVector < T , 4 > ; <nl> - <nl> - template < typename T > <nl> - class LimitedArraySlice { <nl> - public : <nl> - using value_type = T ; <nl> - <nl> - LimitedArraySlice ( T * begin , size_t num_elements ) <nl> - : current_ ( begin ) , begin_ ( begin ) , end_ ( begin + num_elements ) { } <nl> - <nl> - / / May return negative if there were push_back calls after slice was filled . <nl> - int64 EndDistance ( ) const { return end_ - current_ ; } <nl> - <nl> - / / Attempts to push value to the back of this . If the slice has <nl> - / / already been filled , this method has no effect on the underlying data , but <nl> - / / it changes the number returned by EndDistance into negative values . <nl> - void push_back ( T & & value ) { <nl> - if ( EndDistance ( ) > 0 ) * current_ = std : : move ( value ) ; <nl> - + + current_ ; <nl> - } <nl> - <nl> - / / " Constructs " an element at the back of this by resizing the slice , and <nl> - / / returns a mutable reference to the new last element . <nl> - / / REQUIRES : EndDistance ( ) > 0 . <nl> - T & construct_at_end ( ) { <nl> - DCHECK_GT ( EndDistance ( ) , 0 ) ; <nl> - return * ( current_ + + ) ; <nl> - } <nl> - <nl> - / / Returns a mutable reference to the last element in the slice . <nl> - / / REQUIRES : size ( ) > 0 . <nl> - T & back ( ) { return * ( current_ - 1 ) ; } <nl> - <nl> - / / Returns the number of elements in the slice . <nl> - size_t size ( ) const { return std : : min ( current_ - begin_ , end_ - begin_ ) ; } <nl> - <nl> - / / Attempts to resize the vector to the given size . It does so by advancing <nl> - / / the pointer to the current element , possibly beyond the end of the slice . <nl> - / / As a consequence , calling ` size ( ) ` after ` resize ( x ) ` was called might <nl> - / / return a value less than ` x ` . <nl> - void resize ( size_t size ) { current_ = begin_ + size ; } <nl> - <nl> - / / Returns the pointer to the underlying data buffer . <nl> - T * data ( ) { return begin_ ; } <nl> - <nl> - private : <nl> - T * current_ ; <nl> - T * begin_ ; <nl> - T * end_ ; <nl> - } ; <nl> - <nl> - template < typename A > <nl> - auto EnableAliasing ( A * a ) - > decltype ( a - > EnableAliasing ( true ) , void ( ) ) { <nl> - a - > EnableAliasing ( true ) ; <nl> - } <nl> - <nl> - template < typename A > <nl> - void EnableAliasing ( A & & a ) { } <nl> - <nl> - uint8 PeekTag ( protobuf : : io : : CodedInputStream * stream ) ; <nl> - <nl> - constexpr uint8 kVarintTag ( uint32 tag ) { return ( tag < < 3 ) | 0 ; } <nl> - constexpr uint8 kDelimitedTag ( uint32 tag ) { return ( tag < < 3 ) | 2 ; } <nl> - constexpr uint8 kFixed32Tag ( uint32 tag ) { return ( tag < < 3 ) | 5 ; } <nl> - <nl> - namespace parsed { <nl> - <nl> - / / ParseDataType has to be called first , then appropriate ParseZzzzList . <nl> - class Feature { <nl> - public : <nl> - Feature ( ) { } <nl> - explicit Feature ( StringPiece serialized ) : serialized_ ( serialized ) { } <nl> - <nl> - Status ParseDataType ( DataType * dtype ) { <nl> - DCHECK ( dtype ! = nullptr ) ; <nl> - if ( serialized_ . empty ( ) ) { <nl> - * dtype = DT_INVALID ; <nl> - return Status : : OK ( ) ; <nl> - } <nl> - uint8 oneof_tag = static_cast < uint8 > ( * serialized_ . data ( ) ) ; <nl> - serialized_ . remove_prefix ( 1 ) ; <nl> - switch ( oneof_tag ) { <nl> - case kDelimitedTag ( 1 ) : <nl> - * dtype = DT_STRING ; <nl> - break ; <nl> - case kDelimitedTag ( 2 ) : <nl> - * dtype = DT_FLOAT ; <nl> - break ; <nl> - case kDelimitedTag ( 3 ) : <nl> - * dtype = DT_INT64 ; <nl> - break ; <nl> - default : <nl> - / / Initialize variable to avoid compiler warning <nl> - * dtype = DT_INVALID ; <nl> - return errors : : InvalidArgument ( " Unsupported datatype . " ) ; <nl> - } <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - bool GetNumElementsInBytesList ( int * num_elements ) { <nl> - protobuf : : io : : CodedInputStream stream ( <nl> - reinterpret_cast < const uint8 * > ( serialized_ . data ( ) ) , serialized_ . size ( ) ) ; <nl> - EnableAliasing ( & stream ) ; <nl> - uint32 length = 0 ; <nl> - if ( ! stream . ReadVarint32 ( & length ) ) return false ; <nl> - auto limit = stream . PushLimit ( length ) ; <nl> - * num_elements = 0 ; <nl> - while ( ! stream . ExpectAtEnd ( ) ) { <nl> - if ( ! stream . ExpectTag ( kDelimitedTag ( 1 ) ) ) return false ; <nl> - uint32 bytes_length = 0 ; <nl> - if ( ! stream . ReadVarint32 ( & bytes_length ) ) return false ; <nl> - if ( ! stream . Skip ( bytes_length ) ) return false ; <nl> - + + * num_elements ; <nl> - } <nl> - stream . PopLimit ( limit ) ; <nl> - return true ; <nl> - } <nl> - <nl> - / / Helper methods <nl> - tstring * construct_at_end ( LimitedArraySlice < tstring > * bytes_list ) { <nl> - if ( bytes_list - > EndDistance ( ) < = 0 ) { <nl> - return nullptr ; <nl> - } <nl> - return & bytes_list - > construct_at_end ( ) ; <nl> - } <nl> - tstring * construct_at_end ( SmallVector < tstring > * bytes_list ) { <nl> - return & bytes_list - > emplace_back ( ) ; <nl> - } <nl> - <nl> - template < typename Result > <nl> - bool ParseBytesList ( Result * bytes_list ) { <nl> - DCHECK ( bytes_list ! = nullptr ) ; <nl> - <nl> - protobuf : : io : : CodedInputStream stream ( <nl> - reinterpret_cast < const uint8 * > ( serialized_ . data ( ) ) , serialized_ . size ( ) ) ; <nl> - <nl> - EnableAliasing ( & stream ) ; <nl> - <nl> - uint32 length ; <nl> - if ( ! stream . ReadVarint32 ( & length ) ) return false ; <nl> - auto limit = stream . PushLimit ( length ) ; <nl> - <nl> - while ( ! stream . ExpectAtEnd ( ) ) { <nl> - if ( ! stream . ExpectTag ( kDelimitedTag ( 1 ) ) ) return false ; <nl> - / / parse string <nl> - uint32 bytes_length ; <nl> - if ( ! stream . ReadVarint32 ( & bytes_length ) ) return false ; <nl> - tstring * bytes = construct_at_end ( bytes_list ) ; <nl> - if ( bytes = = nullptr ) return false ; <nl> - bytes - > resize_uninitialized ( bytes_length ) ; <nl> - if ( ! stream . ReadRaw ( bytes - > data ( ) , bytes_length ) ) return false ; <nl> - } <nl> - stream . PopLimit ( limit ) ; <nl> - return true ; <nl> - } <nl> - <nl> - template < typename Result > <nl> - bool ParseFloatList ( Result * float_list ) { <nl> - DCHECK ( float_list ! = nullptr ) ; <nl> - protobuf : : io : : CodedInputStream stream ( <nl> - reinterpret_cast < const uint8 * > ( serialized_ . data ( ) ) , serialized_ . size ( ) ) ; <nl> - EnableAliasing ( & stream ) ; <nl> - uint32 length ; <nl> - if ( ! stream . ReadVarint32 ( & length ) ) return false ; <nl> - auto limit = stream . PushLimit ( length ) ; <nl> - <nl> - if ( ! stream . ExpectAtEnd ( ) ) { <nl> - uint8 peek_tag = PeekTag ( & stream ) ; <nl> - if ( peek_tag ! = kDelimitedTag ( 1 ) & & peek_tag ! = kFixed32Tag ( 1 ) ) { <nl> - return false ; <nl> - } <nl> - <nl> - constexpr int32 kNumFloatBytes = 4 ; <nl> - if ( peek_tag = = kDelimitedTag ( 1 ) ) { / / packed <nl> - if ( ! stream . ExpectTag ( kDelimitedTag ( 1 ) ) ) return false ; / / packed tag <nl> - uint32 packed_length ; <nl> - if ( ! stream . ReadVarint32 ( & packed_length ) ) return false ; <nl> - auto packed_limit = stream . PushLimit ( packed_length ) ; <nl> - <nl> - / / Store the initial size to know the offset we have to start writing <nl> - / / data from before resizing the output " vector " . <nl> - const size_t initial_size = float_list - > size ( ) ; <nl> - float_list - > resize ( initial_size + packed_length / kNumFloatBytes ) ; <nl> - <nl> - / / If the result data type is float and we are on a little endian <nl> - / / machine then we can simply memcpy the data from the proto into the <nl> - / / result vector . <nl> - if ( port : : kLittleEndian & & <nl> - sizeof ( typename Result : : value_type ) = = kNumFloatBytes ) { <nl> - / / Calculate the length of the buffer available what can be less than <nl> - / / what we requested in resize in case of a LimitedArraySlice . <nl> - const uint32 bytes_to_copy = <nl> - std : : min ( static_cast < uint32 > ( ( float_list - > size ( ) - initial_size ) * <nl> - kNumFloatBytes ) , <nl> - packed_length ) ; <nl> - if ( ! stream . ReadRaw ( float_list - > data ( ) + initial_size , bytes_to_copy ) ) <nl> - return false ; <nl> - } else { <nl> - int64 index = initial_size ; <nl> - while ( ! stream . ExpectAtEnd ( ) ) { <nl> - uint32 buffer32 ; <nl> - if ( ! stream . ReadLittleEndian32 ( & buffer32 ) ) return false ; <nl> - if ( index < float_list - > size ( ) ) { <nl> - float_list - > data ( ) [ index ] = absl : : bit_cast < float > ( buffer32 ) ; <nl> - + + index ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - stream . PopLimit ( packed_limit ) ; <nl> - } else { / / non - packed <nl> - const size_t initial_size = float_list - > size ( ) ; <nl> - / / 1 byte for the tag ( ` 1 ` encoded as Variant32 ) and kNumFloatBytes for <nl> - / / the value . <nl> - const int64 num_elements = <nl> - stream . BytesUntilLimit ( ) / ( 1 + kNumFloatBytes ) ; <nl> - float_list - > resize ( initial_size + num_elements ) ; <nl> - int64 index = initial_size ; <nl> - while ( ! stream . ExpectAtEnd ( ) ) { <nl> - if ( ! stream . ExpectTag ( kFixed32Tag ( 1 ) ) ) return false ; <nl> - uint32 buffer32 ; <nl> - if ( ! stream . ReadLittleEndian32 ( & buffer32 ) ) return false ; <nl> - float_list - > data ( ) [ index ] = absl : : bit_cast < float > ( buffer32 ) ; <nl> - + + index ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - stream . PopLimit ( limit ) ; <nl> - return true ; <nl> - } <nl> - <nl> - template < typename Result > <nl> - bool ParseInt64List ( Result * int64_list ) { <nl> - DCHECK ( int64_list ! = nullptr ) ; <nl> - protobuf : : io : : CodedInputStream stream ( <nl> - reinterpret_cast < const uint8 * > ( serialized_ . data ( ) ) , serialized_ . size ( ) ) ; <nl> - EnableAliasing ( & stream ) ; <nl> - uint32 length ; <nl> - if ( ! stream . ReadVarint32 ( & length ) ) return false ; <nl> - auto limit = stream . PushLimit ( length ) ; <nl> - <nl> - if ( ! stream . ExpectAtEnd ( ) ) { <nl> - uint8 peek_tag = PeekTag ( & stream ) ; <nl> - if ( peek_tag ! = kDelimitedTag ( 1 ) & & peek_tag ! = kVarintTag ( 1 ) ) { <nl> - return false ; <nl> - } <nl> - if ( peek_tag = = kDelimitedTag ( 1 ) ) { / / packed <nl> - if ( ! stream . ExpectTag ( kDelimitedTag ( 1 ) ) ) return false ; / / packed tag <nl> - uint32 packed_length ; <nl> - if ( ! stream . ReadVarint32 ( & packed_length ) ) return false ; <nl> - auto packed_limit = stream . PushLimit ( packed_length ) ; <nl> - <nl> - while ( ! stream . ExpectAtEnd ( ) ) { <nl> - protobuf_uint64 n ; / / There is no API for int64 <nl> - if ( ! stream . ReadVarint64 ( & n ) ) return false ; <nl> - int64_list - > push_back ( static_cast < int64 > ( n ) ) ; <nl> - } <nl> - <nl> - stream . PopLimit ( packed_limit ) ; <nl> - } else { / / non - packed <nl> - while ( ! stream . ExpectAtEnd ( ) ) { <nl> - if ( ! stream . ExpectTag ( kVarintTag ( 1 ) ) ) return false ; <nl> - protobuf_uint64 n ; / / There is no API for int64 <nl> - if ( ! stream . ReadVarint64 ( & n ) ) return false ; <nl> - int64_list - > push_back ( static_cast < int64 > ( n ) ) ; <nl> - } <nl> - } <nl> - } <nl> - stream . PopLimit ( limit ) ; <nl> - return true ; <nl> - } <nl> - <nl> - StringPiece GetSerialized ( ) const { return serialized_ ; } <nl> - <nl> - private : <nl> - StringPiece serialized_ ; <nl> - } ; <nl> - <nl> - using FeatureMapEntry = std : : pair < StringPiece , Feature > ; <nl> - using Example = std : : vector < FeatureMapEntry > ; <nl> - <nl> - } / / namespace parsed <nl> - <nl> - inline bool SkipExtraneousTag ( protobuf : : io : : CodedInputStream * stream ) { <nl> - uint32 data ; <nl> - protobuf_uint64 dummy ; <nl> - switch ( stream - > ReadTag ( ) & 0x7 ) { <nl> - case 0 : / / varint <nl> - if ( ! stream - > ReadVarint32 ( & data ) ) return false ; <nl> - return true ; <nl> - case 1 : / / fixed64 <nl> - if ( ! stream - > ReadLittleEndian64 ( & dummy ) ) return false ; <nl> - return true ; <nl> - case 2 : / / length delimited <nl> - if ( ! stream - > ReadVarint32 ( & data ) ) return false ; <nl> - stream - > Skip ( data ) ; <nl> - return true ; <nl> - case 3 : / / group begin <nl> - return false ; / / groups not supported . <nl> - case 4 : / / group end <nl> - return false ; / / groups not supported . <nl> - case 5 : / / fixed32 <nl> - if ( ! stream - > ReadLittleEndian32 ( & data ) ) return false ; <nl> - return true ; <nl> - } <nl> - return false ; / / unrecognized tag type <nl> - } <nl> - <nl> - bool ParseString ( protobuf : : io : : CodedInputStream * stream , StringPiece * result ) ; <nl> - <nl> - bool ParseFeatureMapEntry ( protobuf : : io : : CodedInputStream * stream , <nl> - parsed : : FeatureMapEntry * feature_map_entry ) ; <nl> - <nl> - bool ParseFeatures ( protobuf : : io : : CodedInputStream * stream , <nl> - parsed : : Example * example ) ; <nl> - <nl> - bool ParseExample ( protobuf : : io : : CodedInputStream * stream , <nl> - parsed : : Example * example ) ; <nl> - <nl> - bool ParseExample ( StringPiece serialized , parsed : : Example * example ) ; <nl> - <nl> - using Config = FastParseExampleConfig ; <nl> - <nl> - / / Enumeration for distinguishing feature types . <nl> - / / Note : FastParseSequenceExample constructs a map that includes Type values , <nl> - / / and relies on the fact that they are default - initialized to Dense . <nl> - enum class Type { Dense , Sparse , Ragged } ; <nl> - <nl> - / / Note : We use SparseBuffer for sparse , ragged , and dense_varlen features . <nl> - struct SparseBuffer { <nl> - / / Features are in one of the 3 vectors below depending on config ' s dtype . <nl> - / / Other 2 vectors remain empty . <nl> - SmallVector < tstring > bytes_list ; <nl> - SmallVector < float > float_list ; <nl> - SmallVector < int64 > int64_list ; <nl> - <nl> - / / Features of example i are elements with indices <nl> - / / from example_end_indices [ i - 1 ] to example_end_indices [ i ] - 1 on the <nl> - / / appropriate xxxxx_list <nl> - std : : vector < size_t > example_end_indices ; <nl> - } ; <nl> - <nl> - struct SeededHasher { <nl> - uint64 operator ( ) ( StringPiece s ) const { <nl> - return Hash64 ( s . data ( ) , s . size ( ) , seed ) ; <nl> - } <nl> - uint64 seed { 0xDECAFCAFFE } ; <nl> - } ; <nl> - <nl> - / / Use this in the " default " clause of switch statements when dispatching <nl> - / / on a dtype variable that was checked by CheckConfigDataType ( ) : <nl> - inline void ReportUnexpectedDataType ( DataType dtype ) { <nl> - DCHECK ( false ) <nl> - < < " Encountered unexpected DataType " < < DataTypeString ( dtype ) <nl> - < < " in variable that should have been checked by CheckConfigDataType ( ) . " ; <nl> - } <nl> - <nl> - template < typename T > <nl> - const SmallVector < T > & GetListFromBuffer ( const SparseBuffer & buffer ) ; <nl> - <nl> - template < > <nl> - const SmallVector < int64 > & GetListFromBuffer < int64 > ( const SparseBuffer & buffer ) ; <nl> - <nl> - template < > <nl> - const SmallVector < float > & GetListFromBuffer < float > ( const SparseBuffer & buffer ) ; <nl> - <nl> - template < > <nl> - const SmallVector < tstring > & GetListFromBuffer < tstring > ( <nl> - const SparseBuffer & buffer ) ; <nl> - <nl> - template < typename T > <nl> - void CopyOrMoveBlock ( const T * b , const T * e , T * t ) { <nl> - std : : copy ( b , e , t ) ; <nl> - } <nl> - template < > <nl> - void CopyOrMoveBlock ( const tstring * b , const tstring * e , tstring * t ) ; <nl> - <nl> - void CountSparseFeatures ( <nl> - const std : : vector < std : : vector < SparseBuffer > > & sparse_buffers , size_t d , <nl> - size_t * total_num_features , size_t * max_num_features ) ; <nl> - <nl> - void CopySparseBufferToTensor ( DataType dtype , size_t offset , SparseBuffer * src , <nl> - Tensor * dst ) ; <nl> - <nl> - / / A struct used by FastParseSequenceExample to hold the serialized proto <nl> - / / substrings for a single feature , plus some auxiliary information derived <nl> - / / from those protos ( such as the total value length ) . <nl> - struct FeatureProtos { <nl> - / / Proto substrings from each serialized SequenceExample that correspond <nl> - / / with this feature . ` protos_present ` records whether the proto had a <nl> - / / value defined ( even if that value is empty ) . <nl> - std : : vector < StringPiece > protos ; <nl> - std : : vector < bool > protos_present ; <nl> - <nl> - / / Information derived from protos : <nl> - size_t length ; / / total length for ragged / sparse , max row length for dense . <nl> - size_t num_rows ; / / only populated for ragged sequence features . <nl> - <nl> - / / Information from the config : <nl> - Type type ; / / Whether this feature is sparse , ragged , or dense . <nl> - DataType dtype ; <nl> - } ; <nl> - <nl> - / / Map from feature name to FeatureProtos for that feature . <nl> - using FeatureProtosMap = absl : : flat_hash_map < StringPiece , FeatureProtos > ; <nl> - <nl> - string ExampleName ( const gtl : : ArraySlice < tstring > example_names , int n ) ; <nl> - <nl> - / / Return the number of bytes elements parsed , or - 1 on error . If out is null , <nl> - / / this method simply counts the number of elements without any copying . <nl> - inline int ParseBytesFeature ( protobuf : : io : : CodedInputStream * stream , <nl> - tstring * out ) { <nl> - int num_elements = 0 ; <nl> - uint32 length ; <nl> - if ( ! stream - > ExpectTag ( kDelimitedTag ( 1 ) ) | | ! stream - > ReadVarint32 ( & length ) ) { <nl> - return - 1 ; <nl> - } <nl> - if ( length > 0 ) { <nl> - auto limit = stream - > PushLimit ( length ) ; <nl> - while ( ! stream - > ExpectAtEnd ( ) ) { <nl> - uint32 bytes_length ; <nl> - if ( ! stream - > ExpectTag ( kDelimitedTag ( 1 ) ) | | <nl> - ! stream - > ReadVarint32 ( & bytes_length ) ) { <nl> - return - 1 ; <nl> - } <nl> - if ( out = = nullptr ) { <nl> - stream - > Skip ( bytes_length ) ; <nl> - } else { <nl> - out - > resize_uninitialized ( bytes_length ) ; <nl> - if ( ! stream - > ReadRaw ( out - > data ( ) , bytes_length ) ) { <nl> - return - 1 ; <nl> - } <nl> - out + + ; <nl> - } <nl> - num_elements + + ; <nl> - } <nl> - stream - > PopLimit ( limit ) ; <nl> - } <nl> - return num_elements ; <nl> - } <nl> - <nl> - inline void PadFloatFeature ( int num_to_pad , float * out ) { <nl> - for ( int i = 0 ; i < num_to_pad ; i + + ) { <nl> - * out + + = 0 . 0 ; <nl> - } <nl> - } <nl> - <nl> - inline void PadInt64Feature ( int num_to_pad , int64 * out ) { <nl> - for ( int i = 0 ; i < num_to_pad ; i + + ) { <nl> - * out + + = 0 ; <nl> - } <nl> - } <nl> - <nl> - / / Return the number of float elements parsed , or - 1 on error . If out is null , <nl> - / / this method simply counts the number of elements without any copying . <nl> - inline int ParseFloatFeature ( protobuf : : io : : CodedInputStream * stream , <nl> - float * out ) { <nl> - int num_elements = 0 ; <nl> - uint32 length ; <nl> - if ( ! stream - > ExpectTag ( kDelimitedTag ( 2 ) ) | | ! stream - > ReadVarint32 ( & length ) ) { <nl> - return - 1 ; <nl> - } <nl> - if ( length > 0 ) { <nl> - auto limit = stream - > PushLimit ( length ) ; <nl> - uint8 peek_tag = PeekTag ( stream ) ; <nl> - if ( peek_tag = = kDelimitedTag ( 1 ) ) { / / packed <nl> - uint32 packed_length ; <nl> - if ( ! stream - > ExpectTag ( kDelimitedTag ( 1 ) ) | | <nl> - ! stream - > ReadVarint32 ( & packed_length ) ) { <nl> - return - 1 ; <nl> - } <nl> - auto packed_limit = stream - > PushLimit ( packed_length ) ; <nl> - while ( ! stream - > ExpectAtEnd ( ) ) { <nl> - uint32 buffer32 ; <nl> - if ( ! stream - > ReadLittleEndian32 ( & buffer32 ) ) { <nl> - return - 1 ; <nl> - } <nl> - if ( out ! = nullptr ) { <nl> - * out + + = absl : : bit_cast < float > ( buffer32 ) ; <nl> - } <nl> - num_elements + + ; <nl> - } <nl> - stream - > PopLimit ( packed_limit ) ; <nl> - } else if ( peek_tag = = kFixed32Tag ( 1 ) ) { <nl> - while ( ! stream - > ExpectAtEnd ( ) ) { <nl> - uint32 buffer32 ; <nl> - if ( ! stream - > ExpectTag ( kFixed32Tag ( 1 ) ) | | <nl> - ! stream - > ReadLittleEndian32 ( & buffer32 ) ) { <nl> - return - 1 ; <nl> - } <nl> - if ( out ! = nullptr ) { <nl> - * out + + = absl : : bit_cast < float > ( buffer32 ) ; <nl> - } <nl> - num_elements + + ; <nl> - } <nl> - } else { <nl> - / / Unknown tag . <nl> - return - 1 ; <nl> - } <nl> - stream - > PopLimit ( limit ) ; <nl> - } <nl> - return num_elements ; <nl> - } <nl> - <nl> - / / Return the number of int64 elements parsed , or - 1 on error . If out is null , <nl> - / / this method simply counts the number of elements without any copying . <nl> - inline int ParseInt64Feature ( protobuf : : io : : CodedInputStream * stream , <nl> - int64 * out ) { <nl> - int num_elements = 0 ; <nl> - uint32 length ; <nl> - if ( ! stream - > ExpectTag ( kDelimitedTag ( 3 ) ) | | ! stream - > ReadVarint32 ( & length ) ) { <nl> - return - 1 ; <nl> - } <nl> - if ( length > 0 ) { <nl> - auto limit = stream - > PushLimit ( length ) ; <nl> - uint8 peek_tag = PeekTag ( stream ) ; <nl> - if ( peek_tag = = kDelimitedTag ( 1 ) ) { / / packed <nl> - uint32 packed_length ; <nl> - if ( ! stream - > ExpectTag ( kDelimitedTag ( 1 ) ) | | <nl> - ! stream - > ReadVarint32 ( & packed_length ) ) { <nl> - return - 1 ; <nl> - } <nl> - auto packed_limit = stream - > PushLimit ( packed_length ) ; <nl> - while ( ! stream - > ExpectAtEnd ( ) ) { <nl> - protobuf_uint64 n ; / / There is no API for int64 <nl> - if ( ! stream - > ReadVarint64 ( & n ) ) { <nl> - return - 1 ; <nl> - } <nl> - if ( out ! = nullptr ) { <nl> - * out + + = n ; <nl> - } <nl> - num_elements + + ; <nl> - } <nl> - stream - > PopLimit ( packed_limit ) ; <nl> - } else if ( peek_tag = = kVarintTag ( 1 ) ) { <nl> - while ( ! stream - > ExpectAtEnd ( ) ) { <nl> - protobuf_uint64 n ; / / There is no API for int64 <nl> - if ( ! stream - > ExpectTag ( kVarintTag ( 1 ) ) | | ! stream - > ReadVarint64 ( & n ) ) { <nl> - return - 1 ; <nl> - } <nl> - if ( out ! = nullptr ) { <nl> - * out + + = n ; <nl> - } <nl> - num_elements + + ; <nl> - } <nl> - } else { <nl> - / / Unknown tag . <nl> - return - 1 ; <nl> - } <nl> - stream - > PopLimit ( limit ) ; <nl> - } <nl> - return num_elements ; <nl> - } <nl> - <nl> - / / Parses the next feature on ` stream ` into ` out ` starting at ` out_offset ` . <nl> - / / Updates ` out_offset ` , and returns the number of values added . <nl> - / / Returns - 1 if the next feature on ` stream ` doesn ' t match ` dtype ` . <nl> - inline int ParseFeature ( DataType dtype , protobuf : : io : : CodedInputStream * stream , <nl> - Tensor * out , size_t * out_offset ) { <nl> - int delta ; <nl> - switch ( dtype ) { <nl> - case DT_STRING : <nl> - delta = <nl> - ParseBytesFeature ( stream , out - > flat < tstring > ( ) . data ( ) + * out_offset ) ; <nl> - break ; <nl> - case DT_FLOAT : <nl> - delta = <nl> - ParseFloatFeature ( stream , out - > flat < float > ( ) . data ( ) + * out_offset ) ; <nl> - break ; <nl> - case DT_INT64 : <nl> - delta = <nl> - ParseInt64Feature ( stream , out - > flat < int64 > ( ) . data ( ) + * out_offset ) ; <nl> - break ; <nl> - default : <nl> - ReportUnexpectedDataType ( dtype ) ; <nl> - delta = 0 ; <nl> - } <nl> - if ( delta > 0 ) { <nl> - * out_offset + = delta ; <nl> - } <nl> - return delta ; <nl> - } <nl> - <nl> - / / Returns the length of the next feature on ` stream ` . <nl> - / / Returns - 1 if the next feature on ` stream ` doesn ' t match ` dtype ` . <nl> - inline int GetFeatureLength ( DataType dtype , <nl> - protobuf : : io : : CodedInputStream * stream ) { <nl> - switch ( dtype ) { <nl> - case DT_STRING : <nl> - return ParseBytesFeature ( stream , nullptr ) ; <nl> - case DT_FLOAT : <nl> - return ParseFloatFeature ( stream , nullptr ) ; <nl> - case DT_INT64 : <nl> - return ParseInt64Feature ( stream , nullptr ) ; <nl> - default : <nl> - ReportUnexpectedDataType ( dtype ) ; <nl> - return - 1 ; <nl> - } <nl> - } <nl> - <nl> - inline DataType ParseDataType ( protobuf : : io : : CodedInputStream * stream ) { <nl> - uint8 peek_tag = PeekTag ( stream ) ; <nl> - switch ( peek_tag ) { <nl> - case kDelimitedTag ( 1 ) : <nl> - return DT_STRING ; <nl> - case kDelimitedTag ( 2 ) : <nl> - return DT_FLOAT ; <nl> - case kDelimitedTag ( 3 ) : <nl> - return DT_INT64 ; <nl> - default : <nl> - return DT_INVALID ; <nl> - } <nl> - } <nl> - <nl> - inline bool SkipEmptyFeature ( protobuf : : io : : CodedInputStream * stream , <nl> - DataType dtype ) { <nl> - switch ( dtype ) { <nl> - case DT_STRING : <nl> - if ( ! stream - > ExpectTag ( kDelimitedTag ( 1 ) ) ) { <nl> - return false ; <nl> - } <nl> - break ; <nl> - case DT_FLOAT : <nl> - if ( ! stream - > ExpectTag ( kDelimitedTag ( 2 ) ) ) { <nl> - return false ; <nl> - } <nl> - break ; <nl> - case DT_INT64 : <nl> - if ( ! stream - > ExpectTag ( kDelimitedTag ( 3 ) ) ) { <nl> - return false ; <nl> - } <nl> - break ; <nl> - default : <nl> - return false ; <nl> - } <nl> - uint32 length ; <nl> - return stream - > ReadVarint32 ( & length ) & & length = = 0 ; <nl> - } <nl> - <nl> - } / / namespace example <nl> - } / / namespace tensorflow <nl> - <nl> - # endif / / TENSORFLOW_LITE_KERNELS_PARSE_EXAMPLE_EXAMPLE_PROTO_FAST_PARSING_H_ <nl> deleted file mode 100644 <nl> index 5b159d1993687 . . 0000000000000 <nl> mmm a / tensorflow / lite / kernels / parse_example / parse_example . cc <nl> ppp / dev / null <nl> <nl> - / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - # include " tensorflow / lite / kernels / parse_example / parse_example . h " <nl> - <nl> - # include < algorithm > <nl> - # include < cstddef > <nl> - # include < memory > <nl> - # include < unordered_map > <nl> - <nl> - # include " flatbuffers / flexbuffers . h " / / from @ flatbuffers <nl> - # include " tensorflow / core / example / feature . pb . h " <nl> - # include " tensorflow / core / framework / attr_value . pb . h " <nl> - # include " tensorflow / core / framework / node_def . pb . h " <nl> - # include " tensorflow / core / framework / op_kernel . h " <nl> - # include " tensorflow / core / lib / core / blocking_counter . h " <nl> - # include " tensorflow / core / platform / errors . h " <nl> - # include " tensorflow / core / platform / fingerprint . h " <nl> - # include " tensorflow / core / public / session_options . h " <nl> - # include " tensorflow / core / util / example_proto_fast_parsing . h " <nl> - # include " tensorflow / core / util / presized_cuckoo_map . h " <nl> - # include " tensorflow / lite / c / common . h " <nl> - # include " tensorflow / lite / kernels / internal / tensor . h " <nl> - # include " tensorflow / lite / kernels / kernel_util . h " <nl> - # include " tensorflow / lite / kernels / parse_example / example_proto_fast_parsing . h " <nl> - # include " tensorflow / lite / mutable_op_resolver . h " <nl> - # include " tensorflow / lite / string_util . h " <nl> - <nl> - namespace tflite { <nl> - namespace ops { <nl> - namespace custom { <nl> - namespace parse_example { <nl> - namespace { <nl> - <nl> - namespace tf = : : tensorflow ; <nl> - using tf : : Status ; <nl> - using tf : : StringPiece ; <nl> - using tf : : tstring ; <nl> - using tf : : example : : CopyOrMoveBlock ; <nl> - using tf : : example : : FastParseExampleConfig ; <nl> - using tf : : example : : GetListFromBuffer ; <nl> - using tf : : example : : LimitedArraySlice ; <nl> - using tf : : example : : ParseExample ; <nl> - using tf : : example : : SeededHasher ; <nl> - using tf : : example : : SmallVector ; <nl> - using tf : : example : : SparseBuffer ; <nl> - using tf : : example : : Type ; <nl> - using tf : : example : : parsed : : Example ; <nl> - <nl> - using ConfigIndex = tf : : PresizedCuckooMap < std : : pair < int32_t , Type > > ; <nl> - <nl> - struct TfLiteResult { <nl> - std : : vector < TfLiteTensor * > dense_values ; <nl> - std : : vector < TfLiteTensor * > sparse_values ; <nl> - std : : vector < TfLiteTensor * > sparse_indices ; <nl> - std : : vector < TfLiteTensor * > sparse_shapes ; <nl> - std : : map < int , tf : : Tensor > dense_tensors ; <nl> - } ; <nl> - <nl> - template < typename T > <nl> - void FillAndCopyVarLen ( const int d , const size_t num_elements , <nl> - const size_t num_elements_per_minibatch , <nl> - const FastParseExampleConfig & config , <nl> - std : : vector < SparseBuffer > & varlen_dense_buffers , <nl> - TfLiteTensor * values ) { <nl> - const tf : : Tensor & default_value = config . dense [ d ] . default_value ; <nl> - <nl> - / / Copy - fill the tensors ( creating the zero / fill - padding ) <nl> - std : : fill ( reinterpret_cast < T * > ( values - > data . raw ) , <nl> - reinterpret_cast < T * > ( values - > data . raw ) + num_elements , <nl> - default_value . flat < T > ( ) ( 0 ) ) ; <nl> - <nl> - auto data = reinterpret_cast < T * > ( values - > data . raw ) ; <nl> - <nl> - const SparseBuffer & buffer = varlen_dense_buffers [ d ] ; <nl> - / / Number of examples being stored in this buffer <nl> - const auto & end_indices = buffer . example_end_indices ; <nl> - const size_t examples_in_buffer = end_indices . size ( ) ; <nl> - <nl> - const auto & list = GetListFromBuffer < T > ( buffer ) ; <nl> - auto list_ptr = list . begin ( ) ; <nl> - <nl> - size_t elements_tally = 0 ; <nl> - / / Iterate through all the examples stored in this buffer . <nl> - for ( size_t j = 0 ; j < examples_in_buffer ; + + j ) { <nl> - / / Number of elements stored for this example . <nl> - const size_t num_elems = end_indices [ j ] - elements_tally ; <nl> - CopyOrMoveBlock ( list_ptr , list_ptr + num_elems , data ) ; <nl> - / / Move forward this many elements in the varlen buffer . <nl> - list_ptr + = num_elems ; <nl> - / / Move forward to the next minibatch entry in the values output . <nl> - data + = num_elements_per_minibatch ; <nl> - elements_tally = end_indices [ j ] ; <nl> - } <nl> - DCHECK ( elements_tally = = list . size ( ) ) ; <nl> - } <nl> - <nl> - bool ParseExample ( StringRef serialized , Example * example ) { <nl> - DCHECK ( example ! = nullptr ) ; <nl> - tf : : protobuf : : io : : CodedInputStream stream ( <nl> - reinterpret_cast < const uint8 * > ( serialized . str ) , serialized . len ) ; <nl> - tensorflow : : example : : EnableAliasing ( & stream ) ; <nl> - return ParseExample ( & stream , example ) ; <nl> - } <nl> - <nl> - Status FastParseSerializedExample ( <nl> - StringRef serialized_example , const tstring & example_name , <nl> - const size_t example_index , const FastParseExampleConfig & config , <nl> - bool * quick_filter , int quick_filter_size , <nl> - const std : : unique_ptr < ConfigIndex > & config_index , int config_index_size , <nl> - SeededHasher * hasher , std : : vector < TfLiteTensor * > * output_dense , <nl> - std : : vector < SparseBuffer > * output_varlen_dense , <nl> - std : : vector < SparseBuffer > * output_sparse , <nl> - std : : map < absl : : string_view , int > & stats , TfLiteResult * result ) { <nl> - DCHECK ( output_dense ! = nullptr ) ; <nl> - tensorflow : : example : : parsed : : Example parsed_example ; <nl> - if ( ! ParseExample ( serialized_example , & parsed_example ) ) { <nl> - return tf : : errors : : Internal ( " Failed to parse example " ) ; <nl> - } <nl> - std : : vector < int64 > dense_feature_last_example ( config . dense . size ( ) , - 1 ) ; <nl> - std : : vector < int64 > sparse_feature_last_example ( config . sparse . size ( ) , - 1 ) ; <nl> - / / Handle features present in the example . <nl> - const size_t parsed_example_size = parsed_example . size ( ) ; <nl> - for ( size_t i = 0 ; i < parsed_example_size ; + + i ) { <nl> - / / This is a logic that standard protobuf parsing is implementing . <nl> - / / I . e . last entry in the map overwrites all the previous ones . <nl> - tensorflow : : example : : parsed : : FeatureMapEntry & name_and_feature = <nl> - parsed_example [ parsed_example_size - i - 1 ] ; <nl> - const StringPiece feature_name = name_and_feature . first ; <nl> - tensorflow : : example : : parsed : : Feature & feature = name_and_feature . second ; <nl> - if ( feature_name . length ( ) > = quick_filter_size | | <nl> - ! quick_filter [ feature_name . length ( ) ] ) { <nl> - continue ; <nl> - } <nl> - const uint64_t h = ( * hasher ) ( feature_name ) ; <nl> - std : : pair < int32_t , Type > d_and_type ; <nl> - if ( ! config_index - > Find ( h , & d_and_type ) ) { <nl> - continue ; <nl> - } <nl> - size_t d = d_and_type . first ; <nl> - bool is_dense = d_and_type . second = = Type : : Dense ; <nl> - <nl> - auto example_error = [ & ] ( StringPiece suffix ) { <nl> - return tf : : errors : : Internal ( " Name : " , example_name , <nl> - " , Key : " , feature_name , <nl> - " , Index : " , example_index , " . " , suffix ) ; <nl> - } ; <nl> - <nl> - auto parse_error = [ & ] { <nl> - return example_error ( " Can ' t parse serialized Example . " ) ; <nl> - } ; <nl> - <nl> - tf : : DataType example_dtype ; <nl> - if ( feature . ParseDataType ( & example_dtype ) ! = Status : : OK ( ) ) { <nl> - return parse_error ( ) ; <nl> - } <nl> - if ( is_dense ) { <nl> - if ( example_dtype = = tf : : DT_INVALID ) continue ; <nl> - <nl> - dense_feature_last_example [ d ] = example_index ; <nl> - <nl> - if ( example_dtype ! = config . dense [ d ] . dtype ) { <nl> - return example_error ( absl : : StrCat ( <nl> - " Data types don ' t match . Data type : " , <nl> - DataTypeString ( example_dtype ) , <nl> - " but expected type : " , DataTypeString ( config . dense [ d ] . dtype ) ) ) ; <nl> - } <nl> - if ( ! config . dense [ d ] . variable_length ) { <nl> - TfLiteTensor * out = ( * output_dense ) [ d ] ; <nl> - <nl> - const std : : size_t num_elements = config . dense [ d ] . elements_per_stride ; <nl> - const std : : size_t offset = example_index * num_elements ; <nl> - <nl> - auto shape_error = [ & ] ( size_t size , StringPiece type_str ) { <nl> - return example_error ( absl : : StrCat ( <nl> - " Number of " , type_str , <nl> - " values ! = expected . " <nl> - " Values size : " , <nl> - size , <nl> - " but output shape : " , config . dense [ d ] . shape . DebugString ( ) ) ) ; <nl> - } ; <nl> - <nl> - switch ( config . dense [ d ] . dtype ) { <nl> - case tf : : DT_INT64 : { <nl> - auto out_p = reinterpret_cast < int64 * > ( out - > data . raw ) + offset ; <nl> - LimitedArraySlice < int64 > slice ( out_p , num_elements ) ; <nl> - if ( ! feature . ParseInt64List ( & slice ) ) return parse_error ( ) ; <nl> - if ( slice . EndDistance ( ) ! = 0 ) { <nl> - return shape_error ( num_elements - slice . EndDistance ( ) , " int64 " ) ; <nl> - } <nl> - break ; <nl> - } <nl> - case tf : : DT_FLOAT : { <nl> - auto out_p = reinterpret_cast < float * > ( out - > data . raw ) + offset ; <nl> - LimitedArraySlice < float > slice ( out_p , num_elements ) ; <nl> - if ( ! feature . ParseFloatList ( & slice ) ) return parse_error ( ) ; <nl> - if ( slice . EndDistance ( ) ! = 0 ) { <nl> - return shape_error ( num_elements - slice . EndDistance ( ) , " float " ) ; <nl> - } <nl> - break ; <nl> - } <nl> - case tf : : DT_STRING : { <nl> - auto & out_tensor = result - > dense_tensors [ d ] ; <nl> - auto out_p = out_tensor . flat < tstring > ( ) . data ( ) + offset ; <nl> - LimitedArraySlice < tstring > slice ( out_p , num_elements ) ; <nl> - if ( ! feature . ParseBytesList ( & slice ) ) return parse_error ( ) ; <nl> - if ( slice . EndDistance ( ) ! = 0 ) { <nl> - return shape_error ( num_elements - slice . EndDistance ( ) , " bytes " ) ; <nl> - } <nl> - break ; <nl> - } <nl> - default : <nl> - return tf : : errors : : Internal ( " Unrecognized dense type : " , <nl> - config . dense [ d ] . dtype ) ; <nl> - } <nl> - } else { / / if dense variable length <nl> - SparseBuffer & out = ( * output_varlen_dense ) [ d ] ; <nl> - <nl> - const std : : size_t num_elements = config . dense [ d ] . elements_per_stride ; <nl> - <nl> - if ( example_dtype ! = tf : : DT_INVALID & & <nl> - example_dtype ! = config . dense [ d ] . dtype ) { <nl> - return example_error ( absl : : StrCat ( <nl> - " Data types don ' t match . " , <nl> - " Expected type : " , DataTypeString ( config . dense [ d ] . dtype ) ) ) ; <nl> - } <nl> - <nl> - auto shape_error = [ & ] ( size_t size , StringPiece type_str ) { <nl> - return example_error ( <nl> - absl : : StrCat ( " Number of " , type_str , <nl> - " values is not a multiple of stride length . Saw " , <nl> - size , " values but output shape is : " , <nl> - config . dense [ d ] . shape . DebugString ( ) ) ) ; <nl> - } ; <nl> - <nl> - switch ( config . dense [ d ] . dtype ) { <nl> - case tf : : DT_INT64 : { <nl> - if ( example_dtype ! = tf : : DT_INVALID ) { <nl> - if ( ! feature . ParseInt64List ( & out . int64_list ) ) { <nl> - return parse_error ( ) ; <nl> - } <nl> - if ( out . int64_list . size ( ) % num_elements ! = 0 ) { <nl> - return shape_error ( out . int64_list . size ( ) , " int64 " ) ; <nl> - } <nl> - } <nl> - out . example_end_indices . push_back ( out . int64_list . size ( ) ) ; <nl> - break ; <nl> - } <nl> - case tf : : DT_FLOAT : { <nl> - if ( example_dtype ! = tf : : DT_INVALID ) { <nl> - if ( ! feature . ParseFloatList ( & out . float_list ) ) { <nl> - return parse_error ( ) ; <nl> - } <nl> - if ( out . float_list . size ( ) % num_elements ! = 0 ) { <nl> - return shape_error ( out . float_list . size ( ) , " float " ) ; <nl> - } <nl> - } <nl> - out . example_end_indices . push_back ( out . float_list . size ( ) ) ; <nl> - break ; <nl> - } <nl> - case tf : : DT_STRING : { <nl> - if ( example_dtype ! = tf : : DT_INVALID ) { <nl> - if ( ! feature . ParseBytesList ( & out . bytes_list ) ) { <nl> - return parse_error ( ) ; <nl> - } <nl> - if ( out . bytes_list . size ( ) % num_elements ! = 0 ) { <nl> - return shape_error ( out . bytes_list . size ( ) , " byte " ) ; <nl> - } <nl> - } <nl> - out . example_end_indices . push_back ( out . bytes_list . size ( ) ) ; <nl> - break ; <nl> - } <nl> - default : <nl> - return tf : : errors : : Internal ( " Should not happen : " , <nl> - config . dense [ d ] . dtype ) ; <nl> - } <nl> - } <nl> - } else { <nl> - / / is sparse or ragged <nl> - auto & last_example = sparse_feature_last_example ; <nl> - if ( last_example [ d ] = = example_index ) { <nl> - continue ; <nl> - } <nl> - last_example [ d ] = example_index ; <nl> - SparseBuffer & out = ( * output_sparse ) [ d ] ; <nl> - tf : : DataType feature_dtype = config . sparse [ d ] . dtype ; <nl> - if ( example_dtype ! = tf : : DT_INVALID & & example_dtype ! = feature_dtype ) { <nl> - return tf : : errors : : Internal ( " Data types don ' t match : " , example_dtype , <nl> - " ! = " , feature_dtype ) ; <nl> - } <nl> - switch ( feature_dtype ) { <nl> - case tf : : DT_INT64 : { <nl> - if ( example_dtype ! = tf : : DT_INVALID ) { <nl> - if ( ! feature . ParseInt64List ( & out . int64_list ) ) { <nl> - return parse_error ( ) ; <nl> - } <nl> - } <nl> - out . example_end_indices . push_back ( out . int64_list . size ( ) ) ; <nl> - break ; <nl> - } <nl> - case tf : : DT_FLOAT : { <nl> - if ( example_dtype ! = tf : : DT_INVALID ) { <nl> - if ( ! feature . ParseFloatList ( & out . float_list ) ) { <nl> - return parse_error ( ) ; <nl> - } <nl> - } <nl> - out . example_end_indices . push_back ( out . float_list . size ( ) ) ; <nl> - break ; <nl> - } <nl> - case tf : : DT_STRING : { <nl> - if ( example_dtype ! = tf : : DT_INVALID ) { <nl> - if ( ! feature . ParseBytesList ( & out . bytes_list ) ) { <nl> - return parse_error ( ) ; <nl> - } <nl> - } <nl> - out . example_end_indices . push_back ( out . bytes_list . size ( ) ) ; <nl> - break ; <nl> - } <nl> - default : <nl> - return tf : : errors : : Internal ( " Should not happen : " , feature_dtype ) ; <nl> - } <nl> - } <nl> - } <nl> - / / Handle missing dense features for fixed strides . <nl> - for ( size_t d = 0 ; d < config . dense . size ( ) ; + + d ) { <nl> - if ( config . dense [ d ] . variable_length ) continue ; <nl> - if ( dense_feature_last_example [ d ] = = example_index ) continue ; <nl> - if ( config . dense [ d ] . default_value . NumElements ( ) = = 0 ) { <nl> - return tf : : errors : : Internal ( <nl> - " Name : " , example_name , " , Feature : " , config . dense [ d ] . feature_name , <nl> - " ( data type : " , DataTypeString ( config . dense [ d ] . dtype ) , " ) " , <nl> - " is required but could not be found . " ) ; <nl> - } <nl> - const tf : : Tensor & in = config . dense [ d ] . default_value ; <nl> - TfLiteTensor * out = result - > dense_values [ d ] ; <nl> - const std : : size_t num_elements = in . shape ( ) . num_elements ( ) ; <nl> - const std : : size_t offset = example_index * num_elements ; <nl> - switch ( config . dense [ d ] . dtype ) { <nl> - case tf : : DT_INT64 : { <nl> - std : : copy_n ( in . flat < int64 > ( ) . data ( ) , num_elements , <nl> - out - > data . i64 + offset ) ; <nl> - break ; <nl> - } <nl> - case tf : : DT_FLOAT : { <nl> - std : : copy_n ( in . flat < float > ( ) . data ( ) , num_elements , <nl> - out - > data . f + offset ) ; <nl> - break ; <nl> - } <nl> - case tf : : DT_STRING : { <nl> - auto & out_tensor = result - > dense_tensors [ d ] ; <nl> - std : : copy_n ( in . flat < tstring > ( ) . data ( ) , num_elements , <nl> - out_tensor . flat < tstring > ( ) . data ( ) + offset ) ; <nl> - break ; <nl> - } <nl> - default : <nl> - return tf : : errors : : Internal ( " Should not happen : " , <nl> - config . dense [ d ] . dtype ) ; <nl> - } <nl> - } <nl> - for ( size_t d = 0 ; d < config . dense . size ( ) ; + + d ) { <nl> - if ( ! config . dense [ d ] . variable_length ) continue ; <nl> - if ( dense_feature_last_example [ d ] = = example_index ) continue ; <nl> - SparseBuffer & out = ( * output_varlen_dense ) [ d ] ; <nl> - size_t prev_example_end_index = <nl> - out . example_end_indices . empty ( ) ? 0 : out . example_end_indices . back ( ) ; <nl> - out . example_end_indices . push_back ( prev_example_end_index ) ; <nl> - } <nl> - <nl> - for ( size_t d = 0 ; d < config . sparse . size ( ) ; + + d ) { <nl> - if ( sparse_feature_last_example [ d ] = = example_index ) continue ; <nl> - SparseBuffer & out = ( * output_sparse ) [ d ] ; <nl> - size_t prev_example_end_index = <nl> - out . example_end_indices . empty ( ) ? 0 : out . example_end_indices . back ( ) ; <nl> - out . example_end_indices . push_back ( prev_example_end_index ) ; <nl> - } <nl> - <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - void CountSparseFeatures ( const SparseBuffer & sparse_buffer , <nl> - size_t * total_num_features , size_t * max_num_features ) { <nl> - const std : : vector < size_t > & end_indices = sparse_buffer . example_end_indices ; <nl> - * total_num_features + = end_indices . back ( ) ; <nl> - * max_num_features = std : : max ( * max_num_features , end_indices [ 0 ] ) ; <nl> - for ( size_t i = 1 ; i < end_indices . size ( ) ; + + i ) { <nl> - size_t example_size = end_indices [ i ] - end_indices [ i - 1 ] ; <nl> - * max_num_features = std : : max ( * max_num_features , example_size ) ; <nl> - } <nl> - } <nl> - <nl> - void CopySparseBufferToTensor ( tf : : DataType dtype , size_t offset , <nl> - SparseBuffer * src , TfLiteTensor * dst ) { <nl> - switch ( dtype ) { <nl> - case tf : : DT_INT64 : { <nl> - std : : copy ( src - > int64_list . begin ( ) , src - > int64_list . end ( ) , <nl> - reinterpret_cast < int64_t * > ( dst - > data . raw ) + offset ) ; <nl> - break ; <nl> - } <nl> - case tf : : DT_FLOAT : { <nl> - std : : copy ( src - > float_list . begin ( ) , src - > float_list . end ( ) , <nl> - reinterpret_cast < float * > ( dst - > data . raw ) + offset ) ; <nl> - break ; <nl> - } <nl> - case tf : : DT_STRING : { <nl> - DynamicBuffer buffer ; <nl> - for ( auto * begin = src - > bytes_list . begin ( ) ; <nl> - begin ! = src - > bytes_list . end ( ) ; begin + + ) { <nl> - buffer . AddString ( begin - > c_str ( ) , begin - > size ( ) ) ; <nl> - } <nl> - buffer . WriteToTensor ( dst , nullptr ) ; <nl> - break ; <nl> - } <nl> - default : <nl> - DCHECK ( false ) < < " Encountered unexpected DataType " <nl> - < < DataTypeString ( dtype ) <nl> - < < " in variable that should have been checked . " ; <nl> - } <nl> - } <nl> - <nl> - inline void CopyToBuffer ( gtl : : ArraySlice < tstring > vec , char * tensor_buffer , <nl> - int num_examples , int batch_size , <nl> - int elements_per_stride ) { <nl> - int i = 0 , k = 0 ; <nl> - int start = 0 ; <nl> - for ( ; i < num_examples ; + + i ) { <nl> - for ( int j = 0 ; j < elements_per_stride ; + + j ) { <nl> - memcpy ( tensor_buffer + start , vec [ k ] . c_str ( ) , vec [ k ] . size ( ) ) ; <nl> - start + = vec [ k ] . size ( ) ; <nl> - k + + ; <nl> - } <nl> - } <nl> - / / Will happen if the number of examples is less than the desired batch size . <nl> - for ( ; i < batch_size ; + + i ) { <nl> - for ( int j = 0 ; j < elements_per_stride ; + + j ) { <nl> - memcpy ( tensor_buffer + start , vec [ k ] . c_str ( ) , vec [ k ] . size ( ) ) ; <nl> - start + = vec [ k ] . size ( ) ; <nl> - k + + ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - Status FastParseExampleLite ( <nl> - const FastParseExampleConfig & config , const TfLiteTensor * serialized , <nl> - gtl : : ArraySlice < tstring > example_names , bool * quick_filter , <nl> - int quick_filter_size , const std : : unique_ptr < ConfigIndex > & config_index , <nl> - int config_index_size , SeededHasher * hasher , TfLiteResult * result , <nl> - std : : map < absl : : string_view , int > & stats , TfLiteContext * context ) { <nl> - if ( result = = nullptr ) { <nl> - return tf : : errors : : Internal ( " Result is null " ) ; <nl> - } <nl> - const int count = GetStringCount ( serialized ) ; <nl> - std : : vector < tf : : Tensor > fixed_dense_values ( config . dense . size ( ) ) ; <nl> - std : : vector < SparseBuffer > sparse_buffers ( config . sparse . size ( ) ) ; <nl> - std : : vector < SparseBuffer > varlen_dense_buffers ( config . dense . size ( ) ) ; <nl> - Status status_of_minibatch ; <nl> - for ( size_t e = 0 ; e < count ; + + e ) { <nl> - Status status_of_minibatch = FastParseSerializedExample ( <nl> - GetString ( serialized , e ) , <nl> - ( ! example_names . empty ( ) ? example_names [ e ] : " < unknown > " ) , e , config , <nl> - quick_filter , quick_filter_size , config_index , config_index_size , <nl> - hasher , & result - > dense_values , & varlen_dense_buffers , & sparse_buffers , <nl> - / * arena , * / stats , result ) ; <nl> - if ( ! status_of_minibatch . ok ( ) ) break ; <nl> - } <nl> - if ( ! status_of_minibatch . ok ( ) ) { <nl> - return status_of_minibatch ; <nl> - } <nl> - / / Merge SparseBuffers from all minibatches for every config . sparse . <nl> - / / auto MergeSparseMinibatches = [ & ] ( size_t d ) { <nl> - / / Loop over minibatches <nl> - for ( size_t d = 0 ; d < config . sparse . size ( ) ; + + d ) { <nl> - size_t total_num_features = 0 ; <nl> - size_t max_num_features = 0 ; <nl> - CountSparseFeatures ( sparse_buffers [ d ] , & total_num_features , <nl> - & max_num_features ) ; <nl> - tf : : TensorShape indices_shape ; <nl> - TfLiteTensor * indices = result - > sparse_indices [ d ] ; <nl> - TfLiteTensor * values = result - > sparse_values [ d ] ; <nl> - <nl> - TfLiteTensor * dense_shape = result - > sparse_shapes [ d ] ; <nl> - auto * dense_shape_ptr = reinterpret_cast < int64_t * > ( dense_shape - > data . raw ) ; <nl> - dense_shape_ptr [ 1 ] = max_num_features ; <nl> - <nl> - TfLiteIntArray * index_shape = TfLiteIntArrayCreate ( 2 ) ; <nl> - index_shape - > data [ 0 ] = total_num_features ; <nl> - index_shape - > data [ 1 ] = 2 ; <nl> - context - > ResizeTensor ( context , indices , index_shape ) ; <nl> - <nl> - TfLiteIntArray * output_shape = TfLiteIntArrayCreate ( 1 ) ; <nl> - output_shape - > data [ 0 ] = total_num_features ; <nl> - context - > ResizeTensor ( context , values , output_shape ) ; <nl> - <nl> - SparseBuffer & buffer = sparse_buffers [ d ] ; <nl> - <nl> - / / Update indices . <nl> - auto * indices_p = reinterpret_cast < int64_t * > ( indices - > data . raw ) ; <nl> - if ( ! indices_p ) { <nl> - return tf : : errors : : Internal ( " Indices tensor not allocated ! " ) ; <nl> - } <nl> - <nl> - if ( total_num_features > 0 ) { <nl> - int64_t * ix_p = indices_p ; <nl> - size_t example_index = 0 ; <nl> - int idx0 = 0 ; <nl> - size_t delta = 0 ; <nl> - for ( size_t example_end_index : buffer . example_end_indices ) { <nl> - size_t feature_index = 0 ; <nl> - for ( ; delta < example_end_index ; + + delta ) { <nl> - / / Column 0 : example index <nl> - if ( idx0 < total_num_features ) { <nl> - * ix_p = example_index ; <nl> - / / Column 1 : the feature index buffer example <nl> - * ( ix_p + 1 ) = feature_index ; <nl> - ix_p + = 2 ; <nl> - } <nl> - + + feature_index ; <nl> - + + idx0 ; <nl> - } <nl> - + + example_index ; <nl> - } <nl> - CopySparseBufferToTensor ( config . sparse [ d ] . dtype , 0 , & buffer , values ) ; <nl> - } <nl> - } <nl> - <nl> - / / Merge SparseBuffers from all minibatches for every config . dense having <nl> - / / variable_length . <nl> - for ( size_t d = 0 ; d < config . dense . size ( ) ; + + d ) { <nl> - if ( ! config . dense [ d ] . variable_length ) { <nl> - continue ; <nl> - } <nl> - size_t max_num_features = 0 ; <nl> - std : : vector < size_t > & end_indices = <nl> - varlen_dense_buffers [ d ] . example_end_indices ; <nl> - max_num_features = std : : max ( max_num_features , end_indices [ 0 ] ) ; <nl> - for ( size_t i = 1 ; i < end_indices . size ( ) ; + + i ) { <nl> - size_t example_size = end_indices [ i ] - end_indices [ i - 1 ] ; <nl> - max_num_features = std : : max ( max_num_features , example_size ) ; <nl> - } <nl> - <nl> - const size_t stride_size = config . dense [ d ] . elements_per_stride ; <nl> - const size_t max_num_elements = max_num_features / stride_size ; <nl> - tf : : TensorShape values_shape ; <nl> - DCHECK_EQ ( max_num_features % config . dense [ d ] . elements_per_stride , 0 ) ; <nl> - const size_t batch_size = GetStringCount ( serialized ) ; <nl> - values_shape . AddDim ( batch_size ) ; <nl> - values_shape . AddDim ( max_num_elements ) ; <nl> - for ( int i = 1 ; i < config . dense [ d ] . shape . dims ( ) ; + + i ) { <nl> - values_shape . AddDim ( config . dense [ d ] . shape . dim_size ( i ) ) ; <nl> - } <nl> - TfLiteTensor * values = result - > dense_values [ d ] ; <nl> - const size_t num_elements = GetTensorShape ( values ) . FlatSize ( ) ; <nl> - <nl> - / / Nothing to write , exit early . <nl> - if ( num_elements = = 0 ) { <nl> - continue ; <nl> - } <nl> - <nl> - const size_t num_elements_per_minibatch = num_elements / batch_size ; <nl> - switch ( config . dense [ d ] . dtype ) { <nl> - case tf : : DT_INT64 : { <nl> - FillAndCopyVarLen < int64 > ( d , num_elements , num_elements_per_minibatch , <nl> - config , varlen_dense_buffers , values ) ; <nl> - break ; <nl> - } <nl> - case tf : : DT_FLOAT : { <nl> - FillAndCopyVarLen < float > ( d , num_elements , num_elements_per_minibatch , <nl> - config , varlen_dense_buffers , values ) ; <nl> - break ; <nl> - } <nl> - default : <nl> - DCHECK ( false ) < < " Encountered unexpected DataType " <nl> - < < config . dense [ d ] . dtype <nl> - < < " in variable that should have been checked " ; <nl> - } <nl> - } <nl> - <nl> - / / Merge tflite string buffers if necessary . <nl> - for ( size_t d = 0 ; d < config . dense . size ( ) ; + + d ) { <nl> - if ( config . dense [ d ] . variable_length ) { <nl> - continue ; <nl> - } <nl> - if ( result - > dense_values [ d ] - > type = = kTfLiteString ) { <nl> - auto & in = result - > dense_tensors [ d ] ; <nl> - auto vec = in . vec < tstring > ( ) ; <nl> - const int batch_size = result - > dense_values [ d ] - > dims - > data [ 0 ] ; <nl> - const int elements_per_stride = config . dense [ d ] . elements_per_stride ; <nl> - int total_size = 0 ; <nl> - std : : vector < int32_t > offsets ; <nl> - offsets . reserve ( vec . size ( ) + 1 ) ; <nl> - offsets . push_back ( 0 ) ; <nl> - int k = 0 ; <nl> - for ( int i = 0 ; i < batch_size ; + + i ) { <nl> - for ( int j = 0 ; j < elements_per_stride ; + + j ) { <nl> - if ( i < count ) { <nl> - total_size + = vec ( k + + ) . size ( ) ; <nl> - offsets . push_back ( total_size ) ; <nl> - } else { <nl> - offsets . push_back ( total_size ) ; <nl> - } <nl> - } <nl> - } <nl> - int32_t num_strings = offsets . size ( ) - 1 ; <nl> - size_t required_bytes = total_size + sizeof ( int32_t ) * ( num_strings + 2 ) ; <nl> - char * tensor_buffer = <nl> - reinterpret_cast < char * > ( result - > dense_values [ d ] - > data . raw ) ; <nl> - if ( result - > dense_values [ d ] - > bytes < required_bytes ) { <nl> - if ( result - > dense_values [ d ] - > data . raw ) { <nl> - free ( result - > dense_values [ d ] - > data . raw ) ; <nl> - } <nl> - tensor_buffer = reinterpret_cast < char * > ( malloc ( required_bytes ) ) ; <nl> - result - > dense_values [ d ] - > data . raw = tensor_buffer ; <nl> - result - > dense_values [ d ] - > bytes = required_bytes ; <nl> - } <nl> - int32_t start = sizeof ( int32_t ) * ( num_strings + 2 ) ; <nl> - memcpy ( tensor_buffer , & num_strings , sizeof ( int32_t ) ) ; <nl> - for ( size_t i = 0 ; i < offsets . size ( ) ; i + + ) { <nl> - int32_t offset_i = start + offsets [ i ] ; <nl> - memcpy ( tensor_buffer + sizeof ( int32_t ) * ( i + 1 ) , & offset_i , <nl> - sizeof ( int32_t ) ) ; <nl> - } <nl> - gtl : : ArraySlice < tstring > slice ( vec . data ( ) , vec . size ( ) ) ; <nl> - CopyToBuffer ( slice , tensor_buffer + start , count , batch_size , <nl> - elements_per_stride ) ; <nl> - } <nl> - } <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - } / / namespace <nl> - <nl> - enum InputTensor { <nl> - kExampleTensor = 0 , <nl> - kNamesTensor = 1 , <nl> - kSparseKeysTensor = 2 , <nl> - kDenseKeysTensor = 3 , <nl> - kRaggedKeysTensor = 4 , <nl> - } ; <nl> - <nl> - struct OpData { <nl> - FastParseExampleConfig config ; <nl> - std : : vector < tf : : TensorShape > dense_shapes ; <nl> - int dense_size = 0 ; <nl> - int sparse_size = 0 ; <nl> - std : : unique_ptr < ConfigIndex > config_index ; <nl> - int config_index_size ; <nl> - SeededHasher hasher ; <nl> - TfLiteResult got ; <nl> - bool * quick_filter = nullptr ; <nl> - int quick_filter_size ; <nl> - bool created = false ; <nl> - ~ OpData ( ) { <nl> - if ( quick_filter ) { <nl> - free ( quick_filter ) ; <nl> - } <nl> - } <nl> - } ; <nl> - <nl> - void * Init ( TfLiteContext * context , const char * buffer , size_t length ) { <nl> - return new OpData ; <nl> - } <nl> - <nl> - template < typename T > <nl> - tf : : Tensor AsTensor ( const std : : vector < T > & val ) { <nl> - tf : : Tensor ret ( tf : : DataTypeToEnum < T > : : value , <nl> - { static_cast < int64 > ( val . size ( ) ) } ) ; <nl> - std : : copy_n ( val . begin ( ) , val . size ( ) , ret . flat < T > ( ) . data ( ) ) ; <nl> - return ret ; <nl> - } <nl> - <nl> - enum Version { <nl> - V1 , <nl> - V2 , <nl> - } ; <nl> - <nl> - tf : : TensorShape TfLiteToTfShape ( TfLiteIntArray * array ) { <nl> - tf : : TensorShape shape ; <nl> - for ( int i = 0 ; i < array - > size ; i + + ) { <nl> - shape . AddDim ( array - > data [ i ] ) ; <nl> - } <nl> - return shape ; <nl> - } <nl> - <nl> - template < Version version > <nl> - TfLiteStatus PrepareParseExample ( TfLiteContext * context , TfLiteNode * node ) { <nl> - OpData * data = reinterpret_cast < OpData * > ( node - > user_data ) ; <nl> - TF_LITE_ENSURE ( context , node - > custom_initial_data ) ; <nl> - data - > config . dense . clear ( ) ; <nl> - data - > config . sparse . clear ( ) ; <nl> - data - > got . dense_values . clear ( ) ; <nl> - const flexbuffers : : Vector & v = <nl> - flexbuffers : : GetRoot ( <nl> - reinterpret_cast < const uint8_t * > ( node - > custom_initial_data ) , <nl> - node - > custom_initial_data_size ) <nl> - . AsVector ( ) ; <nl> - if ( v . size ( ) = = 2 ) { <nl> - tf : : NodeDef nodedef ; <nl> - TF_LITE_ENSURE_EQ ( context , nodedef . ParseFromString ( v [ 1 ] . AsString ( ) . str ( ) ) , <nl> - true ) ; <nl> - if ( version = = V1 ) { <nl> - data - > dense_size = nodedef . attr ( ) . at ( " Ndense " ) . i ( ) ; <nl> - data - > sparse_size = nodedef . attr ( ) . at ( " Nsparse " ) . i ( ) ; <nl> - } else if ( version = = V2 ) { <nl> - data - > dense_size = nodedef . attr ( ) . at ( " Tdense " ) . list ( ) . type_size ( ) ; <nl> - data - > sparse_size = nodedef . attr ( ) . at ( " num_sparse " ) . i ( ) ; <nl> - } <nl> - auto dense_shapes = nodedef . attr ( ) . at ( " dense_shapes " ) . list ( ) ; <nl> - for ( int i = 0 ; i < dense_shapes . shape_size ( ) ; + + i ) { <nl> - data - > dense_shapes . push_back ( dense_shapes . shape ( i ) ) ; <nl> - } <nl> - } else { <nl> - const flexbuffers : : Map & m = <nl> - flexbuffers : : GetRoot ( <nl> - reinterpret_cast < const uint8_t * > ( node - > custom_initial_data ) , <nl> - node - > custom_initial_data_size ) <nl> - . AsMap ( ) ; <nl> - const flexbuffers : : TypedVector keys = m . Keys ( ) ; <nl> - int num_sparse = 0 ; <nl> - int num_dense = 0 ; <nl> - for ( int k = 0 ; k < keys . size ( ) ; + + k ) { <nl> - const std : : string key = keys [ k ] . ToString ( ) ; <nl> - const auto value = m [ key ] ; <nl> - if ( key = = " Nsparse " | | key = = " num_sparse " ) { <nl> - num_sparse = value . AsInt32 ( ) ; <nl> - } <nl> - if ( key = = " Ndense " ) { <nl> - num_dense = value . AsInt32 ( ) ; <nl> - } <nl> - } <nl> - data - > sparse_size = num_sparse ; <nl> - data - > dense_size = num_dense ; <nl> - if ( version = = V2 ) { <nl> - const TfLiteTensor * dense_key_tensor = <nl> - GetInput ( context , node , kDenseKeysTensor ) ; <nl> - data - > dense_size = GetTensorShape ( dense_key_tensor ) . FlatSize ( ) ; <nl> - } <nl> - } <nl> - <nl> - data - > config . dense . reserve ( data - > dense_size ) ; <nl> - data - > config . sparse . reserve ( data - > sparse_size ) ; <nl> - data - > dense_shapes . reserve ( data - > dense_size ) ; <nl> - const auto * serialized = GetInput ( context , node , 0 ) ; <nl> - const int batch_size = <nl> - serialized - > dims - > size > 0 ? serialized - > dims - > data [ 0 ] : 1 ; <nl> - <nl> - for ( int i = 0 ; i < data - > dense_size ; i + + ) { <nl> - TfLiteTensor * dense_key_tensor = <nl> - GetOutput ( context , node , data - > sparse_size * 3 + i ) ; <nl> - TfLiteIntArray * output_size = TfLiteIntArrayCopy ( dense_key_tensor - > dims ) ; <nl> - if ( data - > dense_size > 0 & & data - > dense_shapes . empty ( ) ) { <nl> - RuntimeShape runtime_shape = GetTensorShape ( dense_key_tensor ) ; <nl> - data - > dense_shapes . push_back ( TfLiteToTfShape ( output_size ) ) ; <nl> - } <nl> - output_size - > data [ 0 ] = batch_size * output_size - > data [ 0 ] ; <nl> - context - > ResizeTensor ( context , dense_key_tensor , output_size ) ; <nl> - } <nl> - <nl> - size_t offset = 0 ; <nl> - for ( int i = 0 ; i < data - > sparse_size ; i + + ) { <nl> - auto * parse_output = GetOutput ( context , node , i + offset ) ; <nl> - SetTensorToDynamic ( parse_output ) ; <nl> - TfLiteIntArray * sparse_size = TfLiteIntArrayCreate ( 2 ) ; <nl> - sparse_size - > data [ 0 ] = batch_size ; <nl> - sparse_size - > data [ 1 ] = 2 ; <nl> - context - > ResizeTensor ( context , parse_output , sparse_size ) ; <nl> - data - > got . sparse_indices . push_back ( parse_output ) ; <nl> - } <nl> - offset + = data - > sparse_size ; <nl> - for ( int i = 0 ; i < data - > sparse_size ; i + + ) { <nl> - auto * parse_output = GetOutput ( context , node , i + offset ) ; <nl> - SetTensorToDynamic ( parse_output ) ; <nl> - TfLiteIntArray * sparse_size = TfLiteIntArrayCreate ( 1 ) ; <nl> - sparse_size - > data [ 0 ] = 0 ; <nl> - context - > ResizeTensor ( context , parse_output , sparse_size ) ; <nl> - data - > got . sparse_values . push_back ( parse_output ) ; <nl> - } <nl> - offset + = data - > sparse_size ; <nl> - for ( int i = 0 ; i < data - > sparse_size ; i + + ) { <nl> - TfLiteTensor * parse_output = GetOutput ( context , node , i + offset ) ; <nl> - SetTensorToDynamic ( parse_output ) ; <nl> - TfLiteIntArray * sparse_size = TfLiteIntArrayCreate ( 1 ) ; <nl> - sparse_size - > data [ 0 ] = 2 ; <nl> - context - > ResizeTensor ( context , parse_output , sparse_size ) ; <nl> - auto * shapes_shape_t = reinterpret_cast < int64_t * > ( parse_output - > data . i64 ) ; <nl> - shapes_shape_t [ 0 ] = batch_size ; <nl> - shapes_shape_t [ 1 ] = 1 ; <nl> - data - > got . sparse_shapes . push_back ( parse_output ) ; <nl> - } <nl> - data - > created = false ; <nl> - return kTfLiteOk ; <nl> - } <nl> - <nl> - template < Version version > <nl> - TfLiteStatus EvalParseExample ( TfLiteContext * context , TfLiteNode * node ) { <nl> - OpData * data = reinterpret_cast < OpData * > ( node - > user_data ) ; <nl> - if ( ! data - > created ) { <nl> - for ( int i = 0 ; i < data - > sparse_size ; i + + ) { <nl> - int input_index = <nl> - version = = V1 ? kSparseKeysTensor + i : kSparseKeysTensor ; <nl> - int string_index = version = = V1 ? 0 : i ; <nl> - const TfLiteTensor * sparse_key_tensor = <nl> - GetInput ( context , node , input_index ) ; <nl> - const auto key = GetString ( sparse_key_tensor , string_index ) ; <nl> - const auto * sparse_output = <nl> - GetOutput ( context , node , i + data - > sparse_size ) ; <nl> - std : : string k ( key . str , key . len ) ; <nl> - switch ( sparse_output - > type ) { <nl> - case kTfLiteInt64 : <nl> - data - > config . sparse . emplace_back ( k , tf : : DataTypeToEnum < int64 > : : value ) ; <nl> - break ; <nl> - case kTfLiteFloat32 : <nl> - data - > config . sparse . emplace_back ( k , tf : : DataTypeToEnum < float > : : value ) ; <nl> - break ; <nl> - case kTfLiteString : <nl> - data - > config . sparse . emplace_back ( k , <nl> - tf : : DataTypeToEnum < tstring > : : value ) ; <nl> - break ; <nl> - default : <nl> - return kTfLiteError ; <nl> - } <nl> - } <nl> - <nl> - const auto & dense_shapes = data - > dense_shapes ; <nl> - for ( int i = 0 ; i < data - > dense_size ; i + + ) { <nl> - const int input_index = version = = V1 <nl> - ? kSparseKeysTensor + data - > sparse_size + i <nl> - : kDenseKeysTensor ; <nl> - const int dense_defaults_index = <nl> - version = = V1 <nl> - ? kSparseKeysTensor + data - > sparse_size + data - > dense_size + i <nl> - : kRaggedKeysTensor + i + 1 ; <nl> - int string_index = version = = V1 ? 0 : i ; <nl> - const TfLiteTensor * dense_key_tensor = <nl> - GetInput ( context , node , input_index ) ; <nl> - const auto * dense_output = <nl> - GetOutput ( context , node , i + data - > sparse_size * 3 ) ; <nl> - const auto * dense_defaults = <nl> - GetInput ( context , node , dense_defaults_index ) ; <nl> - const auto key = GetString ( dense_key_tensor , string_index ) ; <nl> - std : : string k ( key . str , key . len ) ; <nl> - const int elements_per_stride = <nl> - dense_shapes [ i ] . dims ( ) ? dense_shapes [ i ] . num_elements ( ) : 1 ; <nl> - switch ( dense_output - > type ) { <nl> - case kTfLiteInt64 : <nl> - data - > config . dense . emplace_back ( <nl> - k , tf : : DataTypeToEnum < int64 > : : value , dense_shapes [ i ] , <nl> - AsTensor < int64 > ( std : : vector < int64 > ( <nl> - dense_defaults - > data . i64 , <nl> - dense_defaults - > data . i64 + elements_per_stride ) ) , <nl> - false , elements_per_stride ) ; <nl> - break ; <nl> - case kTfLiteFloat32 : <nl> - data - > config . dense . emplace_back ( <nl> - k , tf : : DataTypeToEnum < float > : : value , dense_shapes [ i ] , <nl> - AsTensor < float > ( std : : vector < float > ( <nl> - dense_defaults - > data . f , <nl> - dense_defaults - > data . f + elements_per_stride ) ) , <nl> - false , elements_per_stride ) ; <nl> - break ; <nl> - case kTfLiteString : { <nl> - const int num_strings = GetStringCount ( dense_defaults ) ; <nl> - std : : vector < tstring > values ; <nl> - for ( int i = 0 ; i < num_strings ; + + i ) { <nl> - auto ref = GetString ( dense_defaults , i ) ; <nl> - values . emplace_back ( ref . str , ref . len ) ; <nl> - } <nl> - data - > config . dense . emplace_back ( <nl> - k , tf : : DataTypeToEnum < tstring > : : value , dense_shapes [ i ] , <nl> - AsTensor < tstring > ( values ) , false , elements_per_stride ) ; <nl> - break ; <nl> - } <nl> - default : <nl> - return kTfLiteError ; <nl> - } <nl> - } <nl> - <nl> - int offset = 3 * data - > sparse_size ; <nl> - for ( int i = 0 ; i < data - > dense_size ; i + + ) { <nl> - auto * parse_output = GetOutput ( context , node , i + offset ) ; <nl> - data - > got . dense_values . push_back ( parse_output ) ; <nl> - if ( parse_output - > type = = kTfLiteString ) { <nl> - tf : : TensorShape shape ; <nl> - if ( parse_output - > dims - > size = = 1 ) { <nl> - shape . AddDim ( parse_output - > dims - > data [ 0 ] ) ; <nl> - } else { <nl> - shape . AddDim ( GetTensorShape ( parse_output ) . FlatSize ( ) ) ; <nl> - } <nl> - data - > got . dense_tensors [ i ] = <nl> - tf : : Tensor ( tf : : DataTypeToEnum < tstring > : : value , shape ) ; <nl> - } <nl> - } <nl> - <nl> - size_t config_size = data - > config . dense . size ( ) ; <nl> - config_size + = data - > config . sparse . size ( ) ; <nl> - data - > config_index_size = config_size ; <nl> - auto config_index = std : : make_unique < ConfigIndex > ( config_size ) ; <nl> - bool ok = true ; <nl> - int max_length = 0 ; <nl> - for ( size_t d = 0 ; d < data - > config . dense . size ( ) ; + + d ) { <nl> - auto s = data - > config . dense [ d ] . feature_name ; <nl> - max_length = s . length ( ) > max_length ? s . length ( ) : max_length ; <nl> - } <nl> - for ( size_t d = 0 ; d < data - > config . sparse . size ( ) ; + + d ) { <nl> - auto s = data - > config . sparse [ d ] . feature_name ; <nl> - max_length = s . length ( ) > max_length ? s . length ( ) : max_length ; <nl> - } <nl> - if ( data - > quick_filter ) { <nl> - free ( data - > quick_filter ) ; <nl> - } <nl> - data - > quick_filter = <nl> - static_cast < bool * > ( malloc ( + + max_length * sizeof ( bool ) ) ) ; <nl> - memset ( data - > quick_filter , 0 , max_length * sizeof ( bool ) ) ; <nl> - data - > quick_filter_size = max_length ; <nl> - for ( size_t d = 0 ; d < data - > config . dense . size ( ) ; + + d ) { <nl> - const auto & s = data - > config . dense [ d ] . feature_name ; <nl> - data - > quick_filter [ s . length ( ) ] = true ; <nl> - } <nl> - for ( size_t d = 0 ; d < data - > config . sparse . size ( ) ; + + d ) { <nl> - const auto & s = data - > config . sparse [ d ] . feature_name ; <nl> - data - > quick_filter [ s . length ( ) ] = true ; <nl> - } <nl> - <nl> - for ( int i = 0 ; i < 1000 ; + + i ) { <nl> - for ( size_t d = 0 ; d < data - > config . dense . size ( ) ; + + d ) { <nl> - ok & = config_index - > InsertUnique ( <nl> - data - > hasher ( data - > config . dense [ d ] . feature_name ) , { d , Type : : Dense } ) ; <nl> - } <nl> - for ( size_t d = 0 ; d < data - > config . sparse . size ( ) ; + + d ) { <nl> - ok & = config_index - > InsertUnique ( <nl> - data - > hasher ( data - > config . sparse [ d ] . feature_name ) , <nl> - { d , Type : : Sparse } ) ; <nl> - } <nl> - if ( ok ) { <nl> - break ; <nl> - } <nl> - data - > hasher . seed + + ; <nl> - config_index - > Clear ( config_size ) ; <nl> - ok = true ; <nl> - } <nl> - if ( ! ok ) { <nl> - return kTfLiteError ; <nl> - } <nl> - data - > config_index = std : : move ( config_index ) ; <nl> - data - > created = true ; <nl> - } <nl> - <nl> - const TfLiteTensor * serialized = GetInput ( context , node , kExampleTensor ) ; <nl> - <nl> - std : : map < absl : : string_view , int > stats ; <nl> - const auto status = FastParseExampleLite ( <nl> - data - > config , serialized , { } , data - > quick_filter , data - > quick_filter_size , <nl> - data - > config_index , data - > config_index_size , & data - > hasher , & data - > got , <nl> - stats , context ) ; <nl> - if ( status ! = tf : : Status : : OK ( ) ) { <nl> - TF_LITE_KERNEL_LOG ( context , status . ToString ( ) . c_str ( ) ) ; <nl> - return kTfLiteError ; <nl> - } <nl> - return kTfLiteOk ; <nl> - } <nl> - <nl> - void Free ( TfLiteContext * context , void * buffer ) { <nl> - auto * obj = reinterpret_cast < OpData * > ( buffer ) ; <nl> - delete obj ; <nl> - } <nl> - <nl> - } / / namespace parse_example <nl> - <nl> - TfLiteRegistration * Register_PARSE_EXAMPLE ( ) { <nl> - static TfLiteRegistration r = { <nl> - parse_example : : Init , parse_example : : Free , <nl> - parse_example : : PrepareParseExample < parse_example : : V1 > , <nl> - parse_example : : EvalParseExample < parse_example : : V1 > } ; <nl> - return & r ; <nl> - } <nl> - <nl> - TfLiteRegistration * Register_PARSE_EXAMPLE_V2 ( ) { <nl> - static TfLiteRegistration r = { <nl> - parse_example : : Init , parse_example : : Free , <nl> - parse_example : : PrepareParseExample < parse_example : : V2 > , <nl> - parse_example : : EvalParseExample < parse_example : : V2 > } ; <nl> - return & r ; <nl> - } <nl> - <nl> - extern " C " void AddParseExampleOp ( : : tflite : : MutableOpResolver * resolver ) { <nl> - resolver - > AddCustom ( " ParseExample " , Register_PARSE_EXAMPLE ( ) ) ; <nl> - resolver - > AddCustom ( " ParseExampleV2 " , Register_PARSE_EXAMPLE_V2 ( ) ) ; <nl> - } <nl> - <nl> - } / / namespace custom <nl> - } / / namespace ops <nl> - } / / namespace tflite <nl> deleted file mode 100644 <nl> index ccda8579bbbcb . . 0000000000000 <nl> mmm a / tensorflow / lite / kernels / parse_example / parse_example . h <nl> ppp / dev / null <nl> <nl> - / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - # ifndef TENSORFLOW_LITE_KERNELS_PARSE_EXAMPLE_PARSE_EXAMPLE_H_ <nl> - # define TENSORFLOW_LITE_KERNELS_PARSE_EXAMPLE_PARSE_EXAMPLE_H_ <nl> - <nl> - # include " tensorflow / lite / mutable_op_resolver . h " <nl> - <nl> - namespace tflite { <nl> - namespace ops { <nl> - namespace custom { <nl> - <nl> - TfLiteRegistration * Register_PARSE_EXAMPLE ( ) ; <nl> - TfLiteRegistration * Register_PARSE_EXAMPLE_V2 ( ) ; <nl> - <nl> - extern " C " void AddParseExampleOp ( : : tflite : : MutableOpResolver * resolver ) ; <nl> - <nl> - } / / namespace custom <nl> - } / / namespace ops <nl> - } / / namespace tflite <nl> - <nl> - # endif / / TENSORFLOW_LITE_KERNELS_PARSE_EXAMPLE_PARSE_EXAMPLE_H_ <nl> deleted file mode 100644 <nl> index ca35da38c5231 . . 0000000000000 <nl> mmm a / tensorflow / lite / kernels / parse_example / parse_example_test . cc <nl> ppp / dev / null <nl> <nl> - / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - # include " tensorflow / lite / kernels / parse_example / parse_example . h " <nl> - <nl> - # include < initializer_list > <nl> - <nl> - # include " flatbuffers / flexbuffers . h " / / from @ flatbuffers <nl> - # include " tensorflow / core / example / feature_util . h " <nl> - # include " tensorflow / core / framework / node_def . pb . h " <nl> - # include " tensorflow / core / platform / protobuf . h " <nl> - # include " tensorflow / core / platform / tstring . h " <nl> - # include " tensorflow / lite / c / common . h " <nl> - # include " tensorflow / lite / core / api / op_resolver . h " <nl> - # include " tensorflow / lite / interpreter . h " <nl> - # include " tensorflow / lite / interpreter_builder . h " <nl> - # include " tensorflow / lite / kernels / register . h " <nl> - # include " tensorflow / lite / kernels / test_util . h " <nl> - # include " tensorflow / lite / model_builder . h " <nl> - # include " tensorflow / lite / schema / schema_generated . h " <nl> - # include " tensorflow / lite / string_util . h " <nl> - <nl> - namespace tflite { <nl> - namespace ops { <nl> - namespace custom { <nl> - <nl> - namespace tf = : : tensorflow ; <nl> - <nl> - const char * kNodeDefTxt = R " pb ( <nl> - name : " ParseExample / ParseExample " <nl> - op : " ParseExample " <nl> - input : " serialized " <nl> - input : " ParseExample / ParseExample / names " <nl> - input : " ParseExample / ParseExample / dense_keys_0 " <nl> - input : " ParseExample / Const " <nl> - attr { <nl> - key : " Ndense " <nl> - value { i : 1 } <nl> - } <nl> - attr { <nl> - key : " Nsparse " <nl> - value { i : 0 } <nl> - } <nl> - attr { <nl> - key : " Tdense " <nl> - value { list { type : DT_FLOAT } } <nl> - } <nl> - attr { <nl> - key : " dense_shapes " <nl> - value { list { shape { dim { size : 2 } } } } <nl> - } <nl> - attr { <nl> - key : " sparse_types " <nl> - value { list { type : DT_FLOAT } } <nl> - } <nl> - ) pb " ; <nl> - <nl> - const char * kNodeDefTxt2 = R " pb ( <nl> - name : " ParseExample / ParseExample " <nl> - op : " ParseExample " <nl> - input : " serialized " <nl> - input : " ParseExample / ParseExample / names " <nl> - input : " ParseExample / ParseExample / sparse_keys_0 " <nl> - attr { <nl> - key : " Ndense " <nl> - value { i : 0 } <nl> - } <nl> - attr { <nl> - key : " Nsparse " <nl> - value { i : 1 } <nl> - } <nl> - attr { <nl> - key : " Tdense " <nl> - value { } <nl> - } <nl> - attr { <nl> - key : " dense_shapes " <nl> - value { } <nl> - } <nl> - attr { <nl> - key : " sparse_types " <nl> - value { list { type : DT_FLOAT } } <nl> - } <nl> - ) pb " ; <nl> - <nl> - const char * kNodeDefTxt3 = R " pb ( <nl> - name : " ParseExample / ParseExample " <nl> - op : " ParseExample " <nl> - input : " serialized " <nl> - input : " ParseExample / ParseExample / names " <nl> - input : " ParseExample / ParseExample / sparse_keys_0 " <nl> - attr { <nl> - key : " Ndense " <nl> - value { i : 1 } <nl> - } <nl> - attr { <nl> - key : " Nsparse " <nl> - value { i : 0 } <nl> - } <nl> - attr { <nl> - key : " Tdense " <nl> - value { list { type : DT_STRING } } <nl> - } <nl> - attr { <nl> - key : " dense_shapes " <nl> - value { list { shape { dim { size : 1 } } } } <nl> - } <nl> - attr { <nl> - key : " sparse_types " <nl> - value { list { type : DT_FLOAT } } <nl> - } <nl> - ) pb " ; <nl> - <nl> - const char * kNodeDefTxt4 = R " pb ( <nl> - name : " ParseExample / ParseExample " <nl> - op : " ParseExample " <nl> - input : " serialized " <nl> - input : " ParseExample / ParseExample / names " <nl> - input : " ParseExample / ParseExample / sparse_keys_0 " <nl> - attr { <nl> - key : " Ndense " <nl> - value { i : 0 } <nl> - } <nl> - attr { <nl> - key : " Nsparse " <nl> - value { i : 1 } <nl> - } <nl> - attr { <nl> - key : " Tdense " <nl> - value { } <nl> - } <nl> - attr { <nl> - key : " dense_shapes " <nl> - value { } <nl> - } <nl> - attr { <nl> - key : " sparse_types " <nl> - value { list { type : DT_STRING } } <nl> - } <nl> - ) pb " ; <nl> - <nl> - template < typename DefaultType > <nl> - class ParseExampleOpModel : public SingleOpModel { <nl> - public : <nl> - ParseExampleOpModel ( std : : string serialized_example , <nl> - std : : vector < std : : string > sparse_keys , <nl> - std : : vector < std : : string > dense_keys , <nl> - std : : initializer_list < DefaultType > dense_defaults , <nl> - std : : vector < TensorType > dense_types , <nl> - std : : vector < TensorType > sparse_types , <nl> - const char * text_def , int dense_size = 2 ) { <nl> - / / Example <nl> - string_indices_ . push_back ( AddInput ( TensorData ( TensorType_STRING , { 1 } ) ) ) ; <nl> - / / Names <nl> - string_indices_ . push_back ( <nl> - AddConstInput < std : : string > ( TensorData ( TensorType_STRING , { 0 } ) , { " " } ) ) ; <nl> - std : : for_each ( sparse_keys . begin ( ) , sparse_keys . end ( ) , [ & ] ( auto & & ) { <nl> - string_indices_ . push_back ( AddInput ( TensorData ( TensorType_STRING , { 1 } ) ) ) ; <nl> - } ) ; <nl> - std : : for_each ( dense_keys . begin ( ) , dense_keys . end ( ) , [ & ] ( auto & & ) { <nl> - string_indices_ . push_back ( AddInput ( TensorData ( TensorType_STRING , { 1 } ) ) ) ; <nl> - } ) ; <nl> - if ( dense_size > 0 ) { <nl> - dense_defaults_ = AddConstInput < DefaultType > ( <nl> - TensorData ( dense_types [ 0 ] , { dense_size } ) , dense_defaults ) ; <nl> - } <nl> - if ( ! sparse_keys . empty ( ) ) { <nl> - for ( int i = 0 ; i < sparse_keys . size ( ) ; i + + ) { <nl> - sparse_indices_outputs_ . push_back ( AddOutput ( TensorType_INT64 ) ) ; <nl> - } <nl> - for ( int i = 0 ; i < sparse_keys . size ( ) ; i + + ) { <nl> - sparse_values_outputs_ . push_back ( AddOutput ( sparse_types [ i ] ) ) ; <nl> - } <nl> - for ( int i = 0 ; i < sparse_keys . size ( ) ; i + + ) { <nl> - sparse_shapes_outputs_ . push_back ( AddOutput ( { TensorType_INT64 , { 2 } } ) ) ; <nl> - } <nl> - } <nl> - for ( int i = 0 ; i < dense_keys . size ( ) ; i + + ) { <nl> - dense_outputs_ . push_back ( AddOutput ( { dense_types [ i ] , { dense_size } } ) ) ; <nl> - } <nl> - <nl> - tf : : NodeDef nodedef ; <nl> - tf : : protobuf : : TextFormat : : Parser parser ; <nl> - tf : : protobuf : : io : : ArrayInputStream input_stream ( text_def , strlen ( text_def ) ) ; <nl> - if ( ! parser . Parse ( & input_stream , & nodedef ) ) { <nl> - abort ( ) ; <nl> - } <nl> - std : : string serialized_nodedef ; <nl> - nodedef . SerializeToString ( & serialized_nodedef ) ; <nl> - flexbuffers : : Builder fbb ; <nl> - fbb . Vector ( [ & ] ( ) { <nl> - fbb . String ( nodedef . op ( ) ) ; <nl> - fbb . String ( serialized_nodedef ) ; <nl> - } ) ; <nl> - fbb . Finish ( ) ; <nl> - const auto buffer = fbb . GetBuffer ( ) ; <nl> - SetCustomOp ( " ParseExample " , buffer , Register_PARSE_EXAMPLE ) ; <nl> - BuildInterpreter ( { } ) ; <nl> - int idx = 0 ; <nl> - PopulateStringTensor ( string_indices_ [ idx + + ] , { serialized_example } ) ; <nl> - PopulateStringTensor ( string_indices_ [ idx + + ] , { " " } ) ; <nl> - for ( const auto & key : sparse_keys ) { <nl> - PopulateStringTensor ( string_indices_ [ idx + + ] , { key } ) ; <nl> - } <nl> - for ( const auto & key : dense_keys ) { <nl> - PopulateStringTensor ( string_indices_ [ idx + + ] , { key } ) ; <nl> - } <nl> - } <nl> - <nl> - template < typename T > <nl> - std : : vector < T > GetSparseIndicesOutput ( int i ) { <nl> - return ExtractVector < T > ( sparse_indices_outputs_ [ i ] ) ; <nl> - } <nl> - <nl> - template < typename T > <nl> - std : : vector < T > GetSparseValuesOutput ( int i ) { <nl> - return ExtractVector < T > ( sparse_values_outputs_ [ i ] ) ; <nl> - } <nl> - <nl> - template < typename T > <nl> - std : : vector < T > GetSparseShapesOutput ( int i ) { <nl> - return ExtractVector < T > ( sparse_shapes_outputs_ [ i ] ) ; <nl> - } <nl> - <nl> - template < typename T > <nl> - std : : vector < T > GetDenseOutput ( int i ) { <nl> - return ExtractVector < T > ( dense_outputs_ [ i ] ) ; <nl> - } <nl> - <nl> - std : : vector < std : : string > GetStringOutput ( int i ) { <nl> - auto * t = interpreter_ - > tensor ( i ) ; <nl> - int count = GetStringCount ( t ) ; <nl> - std : : vector < std : : string > v ; <nl> - for ( int i = 0 ; i < count ; + + i ) { <nl> - auto ref = GetString ( t , i ) ; <nl> - v . emplace_back ( ref . str , ref . len ) ; <nl> - } <nl> - return v ; <nl> - } <nl> - <nl> - int DenseDefaults ( ) { return dense_defaults_ ; } <nl> - <nl> - int SparseValuesOutputs ( int i ) { return sparse_values_outputs_ [ i ] ; } <nl> - <nl> - int DenseOutputs ( int i ) { return dense_outputs_ [ i ] ; } <nl> - <nl> - std : : vector < int > dense_outputs_ ; <nl> - std : : vector < int > sparse_indices_outputs_ ; <nl> - std : : vector < int > sparse_shapes_outputs_ ; <nl> - std : : vector < int > sparse_values_outputs_ ; <nl> - std : : vector < int > string_indices_ ; <nl> - int dense_defaults_ = - 1 ; <nl> - } ; <nl> - <nl> - TEST ( ParseExampleOpsTest , SimpleTest ) { <nl> - tf : : Example example ; <nl> - tf : : AppendFeatureValues < float > ( { 1 . 5f , 1 . 5f } , " time " , & example ) ; <nl> - tf : : AppendFeatureValues < float > ( { 1 . 0f , 1 . 0f } , " num " , & example ) ; <nl> - ParseExampleOpModel < float > m ( example . SerializeAsString ( ) , { } , { " time " } , <nl> - { 0 . f , 0 . f } , { TensorType_FLOAT32 } , { } , <nl> - kNodeDefTxt ) ; <nl> - m . Invoke ( ) ; <nl> - EXPECT_THAT ( m . GetDenseOutput < float > ( 0 ) , <nl> - ElementsAreArray ( ArrayFloatNear ( { 1 . 5f , 1 . 5f } ) ) ) ; <nl> - } <nl> - <nl> - TEST ( ParseExampleOpsTest , SparseTest ) { <nl> - tf : : Example example ; <nl> - tf : : AppendFeatureValues < float > ( { 1 . 5f } , " time " , & example ) ; <nl> - ParseExampleOpModel < float > m ( example . SerializeAsString ( ) , { " time " } , { } , { } , <nl> - { } , { TensorType_FLOAT32 } , kNodeDefTxt2 , 0 ) ; <nl> - m . Invoke ( ) ; <nl> - EXPECT_THAT ( m . GetSparseIndicesOutput < int64_t > ( 0 ) , <nl> - ElementsAreArray ( ArrayFloatNear ( { 0 , 0 } ) ) ) ; <nl> - EXPECT_THAT ( m . GetSparseValuesOutput < float > ( 0 ) , <nl> - ElementsAreArray ( ArrayFloatNear ( { 1 . 5f } ) ) ) ; <nl> - EXPECT_THAT ( m . GetSparseShapesOutput < int64_t > ( 0 ) , <nl> - ElementsAreArray ( ArrayFloatNear ( { 1 , 1 } ) ) ) ; <nl> - } <nl> - <nl> - TEST ( ParseExampleOpsTest , SimpleBytesTest ) { <nl> - tf : : Example example ; <nl> - const std : : string test_data = " simpletest " ; <nl> - tf : : AppendFeatureValues < tensorflow : : tstring > ( { test_data } , " time " , & example ) ; <nl> - tf : : AppendFeatureValues < float > ( { 1 . 0f , 1 . 0f } , " num " , & example ) ; <nl> - std : : string default_value = " missing " ; <nl> - ParseExampleOpModel < std : : string > m ( example . SerializeAsString ( ) , { } , { " time " } , <nl> - { default_value } , { TensorType_STRING } , { } , <nl> - kNodeDefTxt3 , 1 ) ; <nl> - m . PopulateStringTensor ( m . DenseDefaults ( ) , { default_value } ) ; <nl> - m . Invoke ( ) ; <nl> - std : : vector < string > c = m . GetStringOutput ( m . DenseOutputs ( 0 ) ) ; <nl> - EXPECT_EQ ( 1 , c . size ( ) ) ; <nl> - EXPECT_EQ ( test_data , c [ 0 ] ) ; <nl> - } <nl> - <nl> - TEST ( ParseExampleOpsTest , SparseBytesTest ) { <nl> - tf : : Example example ; <nl> - const std : : string test_data = " simpletest " ; <nl> - tf : : AppendFeatureValues < tensorflow : : tstring > ( { test_data , test_data } , " time " , <nl> - & example ) ; <nl> - tf : : AppendFeatureValues < float > ( { 1 . 0f , 1 . 0f } , " num " , & example ) ; <nl> - ParseExampleOpModel < std : : string > m ( example . SerializeAsString ( ) , { " time " } , { } , <nl> - { } , { } , { TensorType_STRING } , kNodeDefTxt4 , <nl> - 0 ) ; <nl> - m . Invoke ( ) ; <nl> - EXPECT_THAT ( m . GetSparseIndicesOutput < int64_t > ( 0 ) , <nl> - testing : : ElementsAreArray ( { 0 , 0 , 0 , 1 } ) ) ; <nl> - auto values = m . GetStringOutput ( m . SparseValuesOutputs ( 0 ) ) ; <nl> - EXPECT_EQ ( 2 , values . size ( ) ) ; <nl> - EXPECT_EQ ( test_data , values [ 0 ] ) ; <nl> - EXPECT_EQ ( test_data , values [ 1 ] ) ; <nl> - EXPECT_THAT ( m . GetSparseShapesOutput < int64_t > ( 0 ) , <nl> - testing : : ElementsAreArray ( { 1 , 2 } ) ) ; <nl> - } <nl> - <nl> - } / / namespace custom <nl> - } / / namespace ops <nl> - } / / namespace tflite <nl> mmm a / tensorflow / lite / testing / BUILD <nl> ppp b / tensorflow / lite / testing / BUILD <nl> cc_library ( <nl> " / / tensorflow / lite / kernels : reference_ops " , <nl> " / / tensorflow / lite / kernels : test_delegate_providers_lib " , <nl> " / / tensorflow / lite / kernels / hashtable : hashtable_op_kernels " , <nl> - " / / tensorflow / lite / kernels / parse_example : parse_example " , <nl> " / / tensorflow / lite / tools / evaluation : utils " , <nl> ] + select ( { <nl> " / / tensorflow : ios " : [ ] , <nl> mmm a / tensorflow / lite / testing / tflite_driver . cc <nl> ppp b / tensorflow / lite / testing / tflite_driver . cc <nl> limitations under the License . <nl> # endif <nl> # include " tensorflow / lite / kernels / custom_ops_register . h " <nl> # include " tensorflow / lite / kernels / hashtable / hashtable_ops . h " <nl> - # include " tensorflow / lite / kernels / parse_example / parse_example . h " <nl> # include " tensorflow / lite / kernels / register . h " <nl> # include " tensorflow / lite / kernels / register_ref . h " <nl> # include " tensorflow / lite / kernels / test_delegate_providers . h " <nl> TfLiteDriver : : TfLiteDriver ( DelegateType delegate_type , bool reference_kernel ) <nl> ops : : builtin : : BuiltinOpResolver * buildinop_resolver_ = <nl> reinterpret_cast < ops : : builtin : : BuiltinOpResolver * > ( resolver_ . get ( ) ) ; <nl> tflite : : ops : : custom : : AddHashtableOps ( buildinop_resolver_ ) ; <nl> - tflite : : ops : : custom : : AddParseExampleOp ( buildinop_resolver_ ) ; <nl> } <nl> <nl> switch ( delegate_type ) { <nl>
Add experimental custom parse_example op
tensorflow/tensorflow
af93fc729455e71279cc0860d551d9792c46e1a7
2020-12-15T18:36:16Z
mmm a / Documentation / Books / Users / General - Graphs / Management . mdpp <nl> ppp b / Documentation / Books / Users / General - Graphs / Management . mdpp <nl> <nl> - ! CHAPTER Graph Module <nl> + ! CHAPTER Graph Management <nl> <nl> - The graph module provides functions dealing with graph structures . <nl> + ! SECTION Create a graph <nl> <nl> - ! SECTION First Steps with Graphs <nl> - <nl> - A Graph consists of * vertices * and * edges * . Edges are stored as documents in * edge <nl> - collections * . A vertex can be a document of a * document collection * or of an edge <nl> - collection ( so edges can be used as vertices ) . Which collections are used within <nl> - a graph is defined via * edge definitions * . A graph can contain more than one edge <nl> - definition , at least one is needed . <nl> - <nl> - ! SUBSECTION Create a graph <nl> + < ! - - @ startDocuBlock JSF_general_graph_create - - > <nl> <nl> The creation of a graph requires the name of the graph and a definition of its edges . <nl> <nl> To add further edge definitions to the array one must call : <nl> <nl> ! SUBSUBSECTION Undirected Relation <nl> <nl> - <nl> - < br / > <nl> ` general - graph . _undirectedRelationDefinition ( relationName , vertexCollections ) ` <nl> * Define an undirected relation . * <nl> < br / > <nl> + < br / > <nl> Defines an undirected relation with the name * relationName * using the <nl> list of * vertexCollections * . This relation allows the user to store <nl> edges in any direction between any pair of vertices within the <nl> arangosh > graph . _undirectedRelationDefinition ( " marriage " , [ " female " , " male " ] ) ; <nl> ! SUBSUBSECTION Directed Relation <nl> <nl> <nl> - < br / > <nl> ` general - graph . _directedRelationDefinition ( relationName , fromVertexCollections , toVertexCollections ) ` <nl> * Define a directed relation . * <nl> < br / > <nl> + < br / > <nl> The * relationName * defines the name of this relation and references to the underlying edge collection . <nl> The * fromVertexCollections * is an Array of document collections holding the start vertices . <nl> The * toVertexCollections * is an Array of document collections holding the target vertices . <nl> arangosh > var g = graph . _create ( " myGraph " , [ ed1 ] ) ; <nl> ` ` ` <nl> < br / > <nl> <nl> + ! SUBSECTION List available graphs <nl> + <nl> <nl> - ! SUBSECTION Read a graph <nl> + < ! - - @ startDocuBlock JSF_general_graph_list_call - - > <nl> + ` general - graph . _list ( ) ` * List all graphs . * <nl> + < br / > <nl> + < br / > <nl> + <nl> + < ! - - @ startDocuBlock JSF_general_graph_list_info - - > <nl> + <nl> + < br / > <nl> + @ EXAMPLES <nl> + < br / > <nl> + <nl> + < ! - - @ startDocuBlock JSF_general_graph_list_examples - - > <nl> + ` ` ` <nl> + arangosh > var graph = require ( " org / arangodb / general - graph " ) ; <nl> + arangosh > graph . _list ( ) ; <nl> + [ <nl> + " social " <nl> + ] <nl> + ` ` ` <nl> + <nl> + ! SUBSECTION Load a graph <nl> <nl> ` ` ` js <nl> > var graph = require ( " org / arangodb / graph " ) ; <nl> dropCollections : bool - optional . * true * all collections of the graph will be de <nl> <nl> ! SUBSECTION Save <nl> <nl> + < ! - - @ startDocuBlock JSF_general_graph_vertex_collection_save - - > <nl> <nl> - Creates and saves a new vertex in collection * vertexCollectionName * <nl> + ` general - graph . vertexCollectionName . save ( data ) ` * Creates a new vertex * <nl> < br / > <nl> - ` general - graph . vertexCollectionName . save ( data ) ` <nl> < br / > <nl> + Creates a new vertex in collection * vertexCollectionName * . <nl> * data * : json - data of vertex <nl> < br / > <nl> @ EXAMPLES <nl> arangosh > g . _getToVertex ( " relation / aliceAndBob " ) <nl> } <nl> ` ` ` <nl> < br / > <nl> - <nl> - <nl> - <nl> - ! SECTION Some Methods <nl> - <nl> - ` ` ` javascript <nl> - graph . listCommonNeighbors ( vertex1 , vertex2 , options ) <nl> - ` ` ` <nl> - <nl> - vertex1 : string - vertex id <nl> - vertex2 : string - vertex id <nl> - options : <nl> - * see getNeighbors <nl> - <nl> - ` ` ` javascript <nl> - graph . amountCommonNeighbors ( vertex1 , vertex2 , options ) <nl> - ` ` ` <nl> - <nl> - vertex1 : string - vertex id <nl> - vertex2 : string - vertex id <nl> - options : <nl> - * see getNeighbors <nl> - <nl> - <nl> - <nl> - ` ` ` javascript <nl> - graph . listCommonProperties ( ( vertex1 , vertex2 ) <nl> - ` ` ` <nl> - <nl> - vertex1 : string - vertex id <nl> - vertex2 : string - vertex id <nl> - <nl> - <nl> - ` ` ` javascript <nl> - graph . amountCommonProperties ( ( vertex1 , vertex2 ) <nl> - ` ` ` <nl> - <nl> - vertex1 : string - vertex id <nl> - vertex2 : string - vertex id <nl> - <nl> - <nl> - <nl> - <nl> - ` ` ` javascript <nl> - graph . pathTo ( vertex1 , vertex2 , options ) <nl> - ` ` ` <nl> - <nl> - <nl> - vertex1 : string - vertex id <nl> - vertex2 : string - vertex id <nl> - options : see determinePredecessors <nl> - <nl> - <nl> - ` ` ` javascript <nl> - graph . distanceTo ( vertex1 , vertex2 , options ) <nl> - ` ` ` <nl> - <nl> - <nl> - vertex1 : string - vertex id <nl> - vertex2 : string - vertex id <nl> - options : see determinePredecessors <nl> - <nl> - <nl> - ` ` ` javascript <nl> - graph . determinePredecessors ( vertex1 , source , options ) <nl> - ` ` ` <nl> - <nl> - <nl> - vertex1 : string - vertex id <nl> - source : ? ? ? <nl> - options : <nl> - * cached : Boolean - > If true a cached version will be used <nl> - <nl> - <nl> - ` ` ` javascript <nl> - graph . pathesForTree ( vertex1 , tree , path_to_here ) <nl> - ` ` ` <nl> - <nl> - vertex1 : string - vertex id <nl> - tree : ? ? ? <nl> - path_to_here : Internal Array , should initially be undefined or an empty array <nl> - <nl> - <nl> - ` ` ` javascript <nl> - graph . getNeighbors ( vertex1 , options ) <nl> - ` ` ` <nl> - <nl> - vertex1 : string - vertex id <nl> - options : <nl> - <nl> - * direction : <nl> - " inbound " - > consider only inbound edges <nl> - " outbount " - > consider only outbound edges <nl> - " any " ( default ) - > consider both directions <nl> - * weight : attribute - name - > use this attribute to determine edgeweight <nl> - * weight_function : function - > use this function to calculate the weight <nl> - * default - weight - > us this value if weight could not be calculated otherwise , default is Infinity <nl> - * only : function - > will be invoked on any edge , neighbors will only be included if this returns true or is not defined . <nl> - <nl> - <nl> - ` ` ` javascript <nl> - graph . measurement ( vertex1 , measurement ) <nl> - ` ` ` <nl> - <nl> - vertex1 : string - vertex id <nl> - measurement : String <nl> - * " eccentricity " : Calculates the eccentricity of the vertex <nl> - * " betweenness " : Calculates the betweenness of the vertex <nl> - * " closeness " : Calculates the closeness of the vertex <nl> - <nl> - ! SECTION Using Graphs in AQL { # JSModuleGraphAQL } <nl> - <nl> - Complete Documentation can be copied from normal AQL documentation , with : <nl> - <nl> - * replace VertexCollection / EdgeCollection by Graph <nl> - <nl> - ! SUBSECTION PATHS <nl> - <nl> - * * * BUILD ON * * ` ahuacatl . js ` : 4090 ` GRAPH_PATHS ` - > uses ` COLLECTION ` on second arg , has to use ` COLLECTION ` or ` GRAPH ` accordingly . Has to pass the graph to traverser <nl> - <nl> - Paths returns a handle for all paths included in the graph : <nl> - <nl> - ` GRAPH_PATHS ( graphname , direction , followcycles ) ` <nl> - <nl> - * ` graphname ` defines the graph <nl> - * ` direction ` defines the direction <nl> - * ` followcycles ` defines if cyclic paths should be followed <nl> - <nl> - Example calls : <nl> - <nl> - ` ` ` javascript <nl> - FOR p in PATHS ( shop , " outbound " ) <nl> - FILTER p . source . _id = = " 123456 / 123456 " & & LENGTH ( p . edges ) = = 2 <nl> - RETURN p . vertices [ * ] . name <nl> - ` ` ` <nl> - <nl> - <nl> - ! SUBSECTION TRAVERSAL <nl> - <nl> - <nl> - ` GRAPH_TRAVERSAL ( graphname , startVertex , direction , options ) } ` <nl> - <nl> - * * * BUILD ON * * ` ahuacatl . js ` : 4243 ` TRAVERSAL_FUNC ` - > uses ` COLLECTION ` on first and second arg , has to use ` COLLECTION ` or ` GRAPH ` accordingly . Has to pass the graph to traverser <nl> - * * * TO CHANGE * * ` common / modules / org / arangodb / graph / traversal . js ` : 106 ` collectionDatasourceFactory ` should be able to work on Graphs <nl> - <nl> - Traverses the graph described by the ` graphname ` , <nl> - starting at the vertex identified by id ` startVertex ` . Vertex connectivity is <nl> - specified by the ` direction ` parameter : <nl> - <nl> - - ` " outbound " ` : Vertices are connected in ` _from ` to ` _to ` order <nl> - - ` " inbound " ` : Vertices are connected in ` _to ` to ` _from ` order <nl> - - ` " any " ` : Vertices are connected in both ` _to ` to ` _from ` and in <nl> - ` _from ` to ` _to ` order <nl> - <nl> - All this is defined already for TRAVERSAL , no changes should be applied here <nl> - <nl> - ` ` ` javascript <nl> - TRAVERSAL ( shop , " products / arangodb " , " outbound " , { <nl> - strategy : " depthfirst " , <nl> - order : " postorder " , <nl> - itemOrder : " backward " , <nl> - maxDepth : 6 , <nl> - paths : true <nl> - } ) <nl> - ` ` ` <nl> - <nl> - <nl> - ! SUBSECTION TRAVERSAL_TREES <nl> - <nl> - ` GRAPH_TRAVERSAL_TREE ( graphname , startVertex , direction , connectName , options ) ` <nl> - <nl> - * * * BUILD ON * * ` ahuacatl . js ` : 4243 ` TRAVERSAL_FUNC ` - > uses ` COLLECTION ` on first and second arg , has to use ` COLLECTION ` or ` GRAPH ` accordingly . Has to pass the graph to traverser <nl> - * * * TO CHANGE * * ` common / modules / org / arangodb / graph / traversal . js ` : 106 ` collectionDatasourceFactory ` should be able to work on Graphs <nl> - <nl> - ` ` ` javascript <nl> - GRAPH_TRAVERSAL_TREE ( shop , " products / arangodb " , " inbound " , " sold " , { <nl> - itemOrder : " forward " <nl> - } ) <nl> - ` ` ` <nl> - <nl> - Makes internal use of TRAVERSAL , modyfing that is sufficient . <nl> - <nl> - ! SUBSECTION SHORTEST_PATHS <nl> - <nl> - <nl> - * * * BUILD ON * * ` ahuacatl . js ` : 4243 ` TRAVERSAL_FUNC ` - > uses ` COLLECTION ` on first and second arg , has to use ` COLLECTION ` or ` GRAPH ` accordingly . Has to pass the graph to traverser <nl> - * * * TO CHANGE * * ` common / modules / org / arangodb / graph / traversal . js ` : 106 ` collectionDatasourceFactory ` should be able to work on Graphs <nl> - <nl> - ` GRAPH_SHORTEST_PATH ( graphname , startVertex , endVertex , direction , options ) ` : <nl> - <nl> - Equal to functionality of ` SHORTEST_PATH ` . <nl> - Makes internal use of TRAVERSAL , modyfing that is sufficient . <nl> - <nl> - <nl> - ! SUBSECTION EDGES <nl> - <nl> - <nl> - * * * BUILD ON * * ` ahuacatl . js ` : 4479 ` GRAPH_EDGES ` - > uses ` COLLECTION ` on first argument , has to use ` COLLECTION ` or ` GRAPH ` accordingly . <nl> - <nl> - ` GRAPH_EDGES ( graphname , startvertex , direction , edgeexamples , collectionRestrictions ) ` <nl> - <nl> - Same as original , but with optional ` collectionRestrictions ` to define which edge collections have to be included . Default is all . <nl> - <nl> - <nl> - ! SUBSECTION NEIGHBORS <nl> - <nl> - <nl> - * * * BUILD ON * * ` ahuacatl . js ` : 4508 ` GRAPH_NEIGHBORS ` - > uses ` COLLECTION ` on first , has to use ` COLLECTION ` or ` GRAPH ` accordingly . <nl> - <nl> - ` GRAPH_NEIGHBORS ( graphname , startvertex , direction , edgeexamples ) ` <nl> - <nl> - * Each of the graph functions in AQL ( ` PATHS ` , ` TRAVERSAL ` , ` TRAVERSAL_TREES ` , ` SHORTEST_PATHS ` , ` EDGES ` , ` NEIGHBORS ` ) will take the graph as its first argument ( which parts of the other arguments will be pushed to be defined in FILTER and not in the signature of the function was discussed , but postponed because it is a detail ) . <nl> - <nl>
Beautified Management documentation of graph module
arangodb/arangodb
e9f3f5fdc161a46793f092c5511cd23ef4a19d71
2014-06-17T07:55:40Z
new file mode 100644 <nl> index 0000000000 . . e98e7cd14f <nl> mmm / dev / null <nl> ppp b / change / react - native - windows - 2020 - 05 - 28 - 23 - 34 - 14 - projectImports . json <nl> <nl> + { <nl> + " type " : " prerelease " , <nl> + " comment " : " blacklist all ProjectImports . zip " , <nl> + " packageName " : " react - native - windows " , <nl> + " email " : " asklar @ microsoft . com " , <nl> + " dependentChangeType " : " patch " , <nl> + " date " : " 2020 - 05 - 29T06 : 34 : 13 . 953Z " <nl> + } <nl> mmm a / packages / E2ETest / metro . config . js <nl> ppp b / packages / E2ETest / metro . config . js <nl> module . exports = { <nl> ' react - native - windows ' : rnwPath , <nl> } , <nl> blacklistRE : blacklist ( [ <nl> - new RegExp ( ' . * E2ETest / msbuild . * ' . replace ( / [ / \ \ ] / g , ' \ \ / ' ) ) , / / Avoid error EBUSY : resource busy or locked , open ' D : \ a \ 1 \ s \ packages \ E2ETest \ msbuild . ProjectImports . zip ' in pipeline <nl> + / / Avoid error EBUSY : resource busy or locked , open ' D : \ a \ 1 \ s \ packages \ E2ETest \ msbuild . ProjectImports . zip ' in pipeline <nl> + / . * \ . ProjectImports \ . zip / , <nl> / / This stops " react - native run - windows " from causing the metro server to crash if its already running <nl> new RegExp ( <nl> ` $ { path . resolve ( __dirname , ' windows ' ) . replace ( / [ / \ \ ] / g , ' / ' ) } . * ` <nl> mmm a / packages / microsoft - reactnative - sampleapps / metro . config . js <nl> ppp b / packages / microsoft - reactnative - sampleapps / metro . config . js <nl> module . exports = { <nl> ' react - native - windows ' : rnwPath , <nl> } , <nl> blacklistRE : blacklist ( [ <nl> - new RegExp ( <nl> - ' . * microsoft - reactnative - sampleapps / msbuild . * ' . replace ( / [ / \ \ ] / g , ' \ \ / ' ) , <nl> - ) , / / Avoid error EBUSY : resource busy or locked , open ' D : \ a \ 1 \ s \ packages \ E2ETest \ msbuild . ProjectImports . zip ' in pipeline <nl> + / / Avoid error EBUSY : resource busy or locked , open ' D : \ a \ 1 \ s \ packages \ E2ETest \ msbuild . ProjectImports . zip ' in pipeline <nl> + / . * \ . ProjectImports \ . zip / , <nl> / / This stops " react - native run - windows " from causing the metro server to crash if its already running <nl> new RegExp ( <nl> ` $ { path . resolve ( __dirname , ' windows ' ) . replace ( / [ / \ \ ] / g , ' / ' ) } . * ` , <nl> mmm a / packages / playground / metro . config . js <nl> ppp b / packages / playground / metro . config . js <nl> module . exports = { <nl> ' react - native - windows ' : rnwPath , <nl> } , <nl> blacklistRE : blacklist ( [ <nl> + / / Avoid error EBUSY : resource busy or locked , open ' D : \ a \ 1 \ s \ packages \ playground \ msbuild . ProjectImports . zip ' in pipeline <nl> + / . * \ . ProjectImports \ . zip / , <nl> + <nl> / / This stops " react - native run - windows " from causing the metro server to crash if its already running <nl> new RegExp ( <nl> ` $ { path . resolve ( __dirname , ' windows ' ) . replace ( / [ / \ \ ] / g , ' / ' ) } . * ` , <nl> mmm a / vnext / local - cli / generator - windows / templates / metro . config . js <nl> ppp b / vnext / local - cli / generator - windows / templates / metro . config . js <nl> module . exports = { <nl> ` $ { path . resolve ( __dirname , ' windows ' ) . replace ( / [ / \ \ ] / g , ' / ' ) } . * ` , <nl> ) , <nl> / / This prevents " react - native run - windows " from hitting : EBUSY : resource busy or locked , open msbuild . ProjectImports . zip <nl> - new RegExp ( <nl> - ` $ { path <nl> - . resolve ( __dirname , ' msbuild . ProjectImports . zip ' ) <nl> - . replace ( / [ / \ \ ] / g , ' / ' ) } . * ` , <nl> - ) , <nl> + / . * \ . ProjectImports \ . zip / , <nl> ] ) , <nl> } , <nl> transformer : { <nl>
Block all ProjectImports . zip files ( )
microsoft/react-native-windows
e2d328a545a2520f760dbd0c782e2da20f0eb355
2020-05-30T01:04:10Z
mmm a / src / translations / flags / flags . qrc <nl> ppp b / src / translations / flags / flags . qrc <nl> <nl> < RCC > <nl> < qresource prefix = " / flags " > <nl> + < file alias = " ar_AR " > ar . png < / file > <nl> < file alias = " de_DE " > de . png < / file > <nl> < file alias = " es_ES " > es . png < / file > <nl> < file alias = " en_US " > us . png < / file > <nl>
Update flags . qrc
sqlitebrowser/sqlitebrowser
6ed563777ab8dad5d363f0a94189c0d745f4ff31
2016-07-11T00:25:24Z
new file mode 100644 <nl> index 000000000000 . . 06659fc7b064 <nl> mmm / dev / null <nl> ppp b / validation - test / compiler_crashers / 1480 - swift - typebase - getcanonicaltype - edited . swift <nl> <nl> + / / RUN : not - - crash % target - swift - frontend % s - parse <nl> + / / XFAIL : asan <nl> + <nl> + / / Distributed under the terms of the MIT license <nl> + / / Test case submitted to project by https : / / github . com / practicalswift ( practicalswift ) <nl> + / / Test case found by fuzzing <nl> + <nl> + / / This test was tweaked slightly from the original <nl> + / / 1480 - swift - typebase - getcanonicaltype . swift , because an unrelated change <nl> + / / happened to avoid the crash . <nl> + <nl> + func a ( a : a { <nl> + return x in c { <nl> + } <nl> + struct c { <nl> + class A , e : a { <nl> + } <nl> + } <nl> + protocol a { <nl> + } <nl> + } <nl> + protocol a { <nl> + protocol c in c { <nl> + case c : a { <nl> + } <nl> + } <nl> + protocol c : b where d : b : a { <nl> + return " ) <nl> + case . . c < c > ( n : String { <nl> + } <nl> + typealias b : d <nl> + init ( i ( self ] { <nl> + f : A ) { <nl> + self . join ( x ) { <nl> + return ! ( true { <nl> + } <nl> + var b = a ( ) - > Any ) ) <nl> + } <nl> + } <nl> + } <nl> + class A : String { <nl> + } <nl> + protocol a { <nl> + } <nl> + protocol a { <nl> + } <nl> + typealias d > ( ) <nl> new file mode 100644 <nl> index 000000000000 . . 4234cfc3ab73 <nl> mmm / dev / null <nl> ppp b / validation - test / compiler_crashers / 1509 - bool - edited . swift <nl> <nl> + / / RUN : not - - crash % target - swift - frontend % s - parse <nl> + <nl> + / / Distributed under the terms of the MIT license <nl> + / / Test case submitted to project by https : / / github . com / practicalswift ( practicalswift ) <nl> + / / Test case found by fuzzing <nl> + <nl> + / / This test was tweaked slightly from the original 1509 - bool . swift , because an <nl> + / / unrelated change happened to avoid the crash . <nl> + <nl> + ance ( x ) { self . c ] <nl> + var b { <nl> + } <nl> + case . . d < T > , U , ( c = = { <nl> + return self . h = = e ? = { <nl> + } <nl> + for c : SequenceType where f : C { <nl> + func b : Int <nl> + } <nl> + } <nl> + } <nl> + protocol a { <nl> + func b ( Any ] = " a < T ) { <nl> + typealias A : A { <nl> similarity index 91 % <nl> rename from validation - test / compiler_crashers / 1480 - swift - typebase - getcanonicaltype . swift <nl> rename to validation - test / compiler_crashers_fixed / 1480 - swift - typebase - getcanonicaltype . swift <nl> mmm a / validation - test / compiler_crashers / 1480 - swift - typebase - getcanonicaltype . swift <nl> ppp b / validation - test / compiler_crashers_fixed / 1480 - swift - typebase - getcanonicaltype . swift <nl> <nl> - / / RUN : not - - crash % target - swift - frontend % s - parse <nl> + / / RUN : not % target - swift - frontend % s - parse <nl> / / XFAIL : asan <nl> <nl> / / Distributed under the terms of the MIT license <nl> similarity index 86 % <nl> rename from validation - test / compiler_crashers / 1509 - bool . swift <nl> rename to validation - test / compiler_crashers_fixed / 1509 - bool . swift <nl> mmm a / validation - test / compiler_crashers / 1509 - bool . swift <nl> ppp b / validation - test / compiler_crashers_fixed / 1509 - bool . swift <nl> <nl> - / / RUN : not - - crash % target - swift - frontend % s - parse <nl> + / / RUN : not % target - swift - frontend % s - parse <nl> <nl> / / Distributed under the terms of the MIT license <nl> / / Test case submitted to project by https : / / github . com / practicalswift ( practicalswift ) <nl>
Update compiler crashers after r29031
apple/swift
190208b187d2b46c35d8242682e6a3aa5365be66
2015-05-26T17:48:07Z
mmm a / tests / queries / 0_stateless / 01602_insert_into_table_function_cluster . reference <nl> ppp b / tests / queries / 0_stateless / 01602_insert_into_table_function_cluster . reference <nl> <nl> + 0 <nl> + 0 <nl> + 1 <nl> + 1 <nl> + 2 <nl> + 2 <nl> + 3 <nl> + 3 <nl> + 4 <nl> + 4 <nl> + 5 <nl> + 5 <nl> + 6 <nl> + 6 <nl> + 7 <nl> + 7 <nl> + 8 <nl> + 8 <nl> + 9 <nl> + 9 <nl> mmm a / tests / queries / 0_stateless / 01602_insert_into_table_function_cluster . sql <nl> ppp b / tests / queries / 0_stateless / 01602_insert_into_table_function_cluster . sql <nl> <nl> DROP TABLE IF EXISTS default . x ; <nl> <nl> - CREATE TABLE default . x ON CLUSTER test_cluster_two_shards_localhost AS system . numbers ENGINE = Log ; <nl> + CREATE TABLE default . x ON CLUSTER test_shard_localhost AS system . numbers ENGINE = Log ; <nl> <nl> - INSERT INTO FUNCTION cluster ( ' test_cluster_two_shards_localhost ' , default , x , rand ( ) ) SELECT * FROM numbers ( 10 ) ; <nl> + INSERT INTO FUNCTION cluster ( ' test_shard_localhost ' , default , x ) SELECT * FROM numbers ( 10 ) ; <nl> + - - In fact , in this case ( just one shard ) , sharding key is not required <nl> + INSERT INTO FUNCTION cluster ( ' test_shard_localhost ' , default , x , rand ( ) ) SELECT * FROM numbers ( 10 ) ; <nl> <nl> - DROP TABLE default . x ON CLUSTER test_cluster_two_shards_localhost ; <nl> + SELECT * FROM default . x ORDER BY number ; <nl> + <nl> + DROP TABLE default . x ON CLUSTER test_shard_localhost ; <nl>
fix test
ClickHouse/ClickHouse
ec7202939c667fba476036d7eb6363e62b0e0d0e
2020-12-20T09:12:29Z
mmm a / README . md <nl> ppp b / README . md <nl> Supported Cars <nl> | Toyota | Corolla Hybrid 2020 | All | openpilot | 0mph | 0mph | <nl> | Toyota | Highlander 2017 - 19 | All | Stock < sup > 4 < / sup > | 0mph | 0mph | <nl> | Toyota | Highlander Hybrid 2017 - 19 | All | Stock < sup > 4 < / sup > | 0mph | 0mph | <nl> + | Toyota | Highlander 2020 Limited | All | openpilot | 0mph | 0mph | <nl> | Toyota | Prius 2016 | TSS - P | Stock < sup > 4 < / sup > | 0mph | 0mph | <nl> | Toyota | Prius 2017 - 19 | All | Stock < sup > 4 < / sup > | 0mph | 0mph | <nl> | Toyota | Prius Prime 2017 - 20 | All | Stock < sup > 4 < / sup > | 0mph | 0mph | <nl> mmm a / selfdrive / car / toyota / interface . py <nl> ppp b / selfdrive / car / toyota / interface . py <nl> def get_params ( candidate , fingerprint = gen_empty_fingerprint ( ) , has_relay = False , <nl> ret . lateralTuning . pid . kpV , ret . lateralTuning . pid . kiV = [ [ 0 . 6 ] , [ 0 . 1 ] ] <nl> ret . lateralTuning . pid . kf = 0 . 00006 <nl> <nl> + elif candidate = = CAR . HIGHLANDER_TSS2 : <nl> + stop_and_go = True <nl> + ret . safetyParam = 73 <nl> + ret . wheelbase = 2 . 84988 # 112 . 2 in = 2 . 84988 m <nl> + ret . steerRatio = 16 . 0 <nl> + tire_stiffness_factor = 0 . 8 <nl> + ret . mass = 4700 . * CV . LB_TO_KG + STD_CARGO_KG # 4260 + 4 - 5 people <nl> + ret . lateralTuning . pid . kpV , ret . lateralTuning . pid . kiV = [ [ 0 . 18 ] , [ 0 . 015 ] ] # community tuning <nl> + ret . lateralTuning . pid . kf = 0 . 00012 # community tuning <nl> + <nl> elif candidate in [ CAR . HIGHLANDER , CAR . HIGHLANDERH ] : <nl> stop_and_go = True <nl> ret . safetyParam = 73 <nl> def get_params ( candidate , fingerprint = gen_empty_fingerprint ( ) , has_relay = False , <nl> ret . lateralTuning . pid . kpV , ret . lateralTuning . pid . kiV = [ [ 0 . 6 ] , [ 0 . 1 ] ] <nl> ret . mass = 3370 . * CV . LB_TO_KG + STD_CARGO_KG <nl> ret . lateralTuning . pid . kf = 0 . 00007818594 <nl> - <nl> + <nl> elif candidate = = CAR . RAV4H_TSS2 : <nl> stop_and_go = True <nl> ret . safetyParam = 73 <nl> mmm a / selfdrive / car / toyota / values . py <nl> ppp b / selfdrive / car / toyota / values . py <nl> class CAR : <nl> CAMRY = " TOYOTA CAMRY 2018 " <nl> CAMRYH = " TOYOTA CAMRY HYBRID 2018 " <nl> HIGHLANDER = " TOYOTA HIGHLANDER 2017 " <nl> + HIGHLANDER_TSS2 = " TOYOTA HIGHLANDER 2020 " <nl> HIGHLANDERH = " TOYOTA HIGHLANDER HYBRID 2018 " <nl> AVALON = " TOYOTA AVALON 2016 " <nl> RAV4_TSS2 = " TOYOTA RAV4 2019 " <nl> class CAR : <nl> { <nl> 36 : 8 , 37 : 8 , 114 : 5 , 119 : 6 , 120 : 4 , 170 : 8 , 180 : 8 , 186 : 4 , 238 : 4 , 355 : 5 , 426 : 6 , 452 : 8 , 464 : 8 , 466 : 8 , 467 : 8 , 544 : 4 , 545 : 5 , 550 : 8 , 552 : 4 , 608 : 8 , 610 : 5 , 643 : 7 , 705 : 8 , 725 : 2 , 740 : 5 , 800 : 8 , 835 : 8 , 836 : 8 , 849 : 4 , 869 : 7 , 870 : 7 , 871 : 2 , 896 : 8 , 900 : 6 , 902 : 6 , 905 : 8 , 911 : 8 , 916 : 3 , 918 : 7 , 921 : 8 , 922 : 8 , 933 : 8 , 944 : 8 , 945 : 8 , 951 : 8 , 955 : 8 , 956 : 8 , 979 : 2 , 998 : 5 , 999 : 7 , 1000 : 8 , 1001 : 8 , 1008 : 2 , 1014 : 8 , 1017 : 8 , 1020 : 8 , 1041 : 8 , 1042 : 8 , 1043 : 8 , 1044 : 8 , 1056 : 8 , 1059 : 1 , 1114 : 8 , 1161 : 8 , 1162 : 8 , 1163 : 8 , 1176 : 8 , 1177 : 8 , 1178 : 8 , 1179 : 8 , 1180 : 8 , 1181 : 8 , 1182 : 8 , 1183 : 8 , 1189 : 8 , 1190 : 8 , 1191 : 8 , 1192 : 8 , 1196 : 8 , 1197 : 8 , 1198 : 8 , 1199 : 8 , 1206 : 8 , 1207 : 8 , 1212 : 8 , 1227 : 8 , 1235 : 8 , 1237 : 8 , 1263 : 8 , 1279 : 8 , 1408 : 8 , 1409 : 8 , 1410 : 8 , 1552 : 8 , 1553 : 8 , 1554 : 8 , 1556 : 8 , 1557 : 8 , 1561 : 8 , 1562 : 8 , 1568 : 8 , 1569 : 8 , 1570 : 8 , 1571 : 8 , 1572 : 8 , 1584 : 8 , 1585 : 8 , 1589 : 8 , 1592 : 8 , 1593 : 8 , 1595 : 8 , 1599 : 8 , 1656 : 8 , 1728 : 8 , 1745 : 8 , 1779 : 8 , 1872 : 8 , 1880 : 8 , 1904 : 8 , 1912 : 8 , 1988 : 8 , 1990 : 8 , 1996 : 8 , 1998 : 8 , 2015 : 8 , 2016 : 8 , 2024 : 8 <nl> } ] , <nl> + CAR . HIGHLANDER_TSS2 : [ { <nl> + # 2020 highlander limited <nl> + 36 : 8 , 37 : 8 , 114 : 5 , 119 : 6 , 120 : 4 , 170 : 8 , 180 : 8 , 186 : 4 , 355 : 5 , 401 : 8 , 426 : 6 , 452 : 8 , 464 : 8 , 466 : 8 , 467 : 8 , 544 : 4 , 550 : 8 , 552 : 4 , 562 : 6 , 565 : 8 , 608 : 8 , 610 : 8 , 643 : 7 , 658 : 8 , 705 : 8 , 728 : 8 , 740 : 5 , 761 : 8 , 764 : 8 , 765 : 8 , 800 : 8 , 810 : 2 , 812 : 8 , 814 : 8 , 818 : 8 , 824 : 8 , 830 : 7 , 835 : 8 , 836 : 8 , 865 : 8 , 869 : 7 , 870 : 7 , 871 : 2 , 877 : 8 , 881 : 8 , 885 : 8 , 889 : 8 , 896 : 8 , 898 : 8 , 900 : 6 , 902 : 6 , 905 : 8 , 918 : 8 , 921 : 8 , 933 : 8 , 934 : 8 , 935 : 8 , 944 : 8 , 945 : 8 , 951 : 8 , 955 : 8 , 956 : 8 , 976 : 1 , 987 : 8 , 998 : 5 , 999 : 7 , 1000 : 8 , 1001 : 8 , 1002 : 8 , 1014 : 8 , 1017 : 8 , 1020 : 8 , 1041 : 8 , 1042 : 8 , 1044 : 8 , 1056 : 8 , 1059 : 1 , 1063 : 8 , 1076 : 8 , 1077 : 8 , 1082 : 8 , 1114 : 8 , 1161 : 8 , 1162 : 8 , 1163 : 8 , 1164 : 8 , 1165 : 8 , 1166 : 8 , 1167 : 8 , 1228 : 8 , 1235 : 8 , 1237 : 8 , 1264 : 8 , 1279 : 8 , 1541 : 8 , 1552 : 8 , 1553 : 8 , 1556 : 8 , 1557 : 8 , 1568 : 8 , 1570 : 8 , 1571 : 8 , 1572 : 8 , 1592 : 8 , 1594 : 8 , 1595 : 8 , 1649 : 8 , 1696 : 8 , 1775 : 8 , 1779 : 8 , 1786 : 8 , 1787 : 8 , 1788 : 8 , 1789 : 8 , 1808 : 8 , 1816 : 8 , 1904 : 8 , 1912 : 8 , 1952 : 8 , 1960 : 8 , 1990 : 8 , 1998 : 8 <nl> + } ] , <nl> CAR . HIGHLANDERH : [ { <nl> 36 : 8 , 37 : 8 , 170 : 8 , 180 : 8 , 296 : 8 , 426 : 6 , 452 : 8 , 466 : 8 , 467 : 8 , 550 : 8 , 552 : 4 , 560 : 7 , 581 : 5 , 608 : 8 , 610 : 5 , 643 : 7 , 713 : 8 , 740 : 5 , 800 : 8 , 835 : 8 , 836 : 8 , 849 : 4 , 869 : 7 , 870 : 7 , 871 : 2 , 896 : 8 , 897 : 8 , 900 : 6 , 902 : 6 , 905 : 8 , 911 : 8 , 916 : 3 , 918 : 7 , 921 : 8 , 933 : 8 , 944 : 8 , 945 : 8 , 950 : 8 , 951 : 8 , 953 : 3 , 955 : 8 , 956 : 8 , 979 : 2 , 998 : 5 , 999 : 7 , 1000 : 8 , 1001 : 8 , 1005 : 2 , 1014 : 8 , 1017 : 8 , 1020 : 8 , 1041 : 8 , 1042 : 8 , 1043 : 8 , 1044 : 8 , 1056 : 8 , 1059 : 1 , 1112 : 8 , 1114 : 8 , 1161 : 8 , 1162 : 8 , 1163 : 8 , 1176 : 8 , 1177 : 8 , 1178 : 8 , 1179 : 8 , 1180 : 8 , 1181 : 8 , 1184 : 8 , 1185 : 8 , 1186 : 8 , 1189 : 8 , 1190 : 8 , 1191 : 8 , 1192 : 8 , 1196 : 8 , 1197 : 8 , 1198 : 8 , 1199 : 8 , 1206 : 8 , 1212 : 8 , 1227 : 8 , 1232 : 8 , 1235 : 8 , 1237 : 8 , 1263 : 8 , 1264 : 8 , 1279 : 8 , 1552 : 8 , 1553 : 8 , 1554 : 8 , 1556 : 8 , 1557 : 8 , 1561 : 8 , 1562 : 8 , 1568 : 8 , 1569 : 8 , 1570 : 8 , 1571 : 8 , 1572 : 8 , 1584 : 8 , 1589 : 8 , 1592 : 8 , 1593 : 8 , 1595 : 8 , 1599 : 8 , 1656 : 8 , 1728 : 8 , 1745 : 8 , 1779 : 8 , 1904 : 8 , 1912 : 8 , 1990 : 8 , 1998 : 8 <nl> } , <nl> class CAR : <nl> CAR . CAMRY : dbc_dict ( ' toyota_nodsu_pt_generated ' , ' toyota_adas ' ) , <nl> CAR . CAMRYH : dbc_dict ( ' toyota_camry_hybrid_2018_pt_generated ' , ' toyota_adas ' ) , <nl> CAR . HIGHLANDER : dbc_dict ( ' toyota_highlander_2017_pt_generated ' , ' toyota_adas ' ) , <nl> + CAR . HIGHLANDER_TSS2 : dbc_dict ( ' toyota_nodsu_pt_generated ' , ' toyota_tss2_adas ' ) , <nl> CAR . HIGHLANDERH : dbc_dict ( ' toyota_highlander_hybrid_2018_pt_generated ' , ' toyota_adas ' ) , <nl> CAR . AVALON : dbc_dict ( ' toyota_avalon_2017_pt_generated ' , ' toyota_adas ' ) , <nl> CAR . RAV4_TSS2 : dbc_dict ( ' toyota_nodsu_pt_generated ' , ' toyota_tss2_adas ' ) , <nl> class CAR : <nl> CAR . RAV4H_TSS2 : dbc_dict ( ' toyota_nodsu_hybrid_pt_generated ' , ' toyota_tss2_adas ' ) , <nl> } <nl> <nl> - NO_DSU_CAR = [ CAR . CHR , CAR . CHRH , CAR . CAMRY , CAR . CAMRYH , CAR . RAV4_TSS2 , CAR . COROLLA_TSS2 , CAR . COROLLAH_TSS2 , CAR . LEXUS_ES_TSS2 , CAR . LEXUS_ESH_TSS2 , CAR . RAV4H_TSS2 , CAR . LEXUS_RX_TSS2 ] <nl> - TSS2_CAR = [ CAR . RAV4_TSS2 , CAR . COROLLA_TSS2 , CAR . COROLLAH_TSS2 , CAR . LEXUS_ES_TSS2 , CAR . LEXUS_ESH_TSS2 , CAR . RAV4H_TSS2 , CAR . LEXUS_RX_TSS2 ] <nl> - NO_STOP_TIMER_CAR = [ CAR . RAV4H , CAR . HIGHLANDERH , CAR . HIGHLANDER , CAR . RAV4_TSS2 , CAR . COROLLA_TSS2 , CAR . COROLLAH_TSS2 , CAR . LEXUS_ES_TSS2 , CAR . LEXUS_ESH_TSS2 , CAR . SIENNA , CAR . RAV4H_TSS2 , CAR . LEXUS_RX_TSS2 ] # no resume button press required <nl> + NO_DSU_CAR = [ CAR . CHR , CAR . CHRH , CAR . CAMRY , CAR . CAMRYH , CAR . RAV4_TSS2 , CAR . COROLLA_TSS2 , CAR . COROLLAH_TSS2 , CAR . LEXUS_ES_TSS2 , CAR . LEXUS_ESH_TSS2 , CAR . RAV4H_TSS2 , CAR . LEXUS_RX_TSS2 , CAR . HIGHLANDER_TSS2 ] <nl> + TSS2_CAR = [ CAR . RAV4_TSS2 , CAR . COROLLA_TSS2 , CAR . COROLLAH_TSS2 , CAR . LEXUS_ES_TSS2 , CAR . LEXUS_ESH_TSS2 , CAR . RAV4H_TSS2 , CAR . LEXUS_RX_TSS2 , CAR . HIGHLANDER_TSS2 ] <nl> + NO_STOP_TIMER_CAR = [ CAR . RAV4H , CAR . HIGHLANDERH , CAR . HIGHLANDER , CAR . RAV4_TSS2 , CAR . COROLLA_TSS2 , CAR . COROLLAH_TSS2 , CAR . LEXUS_ES_TSS2 , CAR . LEXUS_ESH_TSS2 , CAR . SIENNA , CAR . RAV4H_TSS2 , CAR . LEXUS_RX_TSS2 , CAR . HIGHLANDER_TSS2 ] # no resume button press required <nl> mmm a / selfdrive / test / test_car_models . py <nl> ppp b / selfdrive / test / test_car_models . py <nl> def get_route_log ( route_name ) : <nl> # ' enableDsu ' : False , <nl> # } , <nl> # TODO : missingsome combos for highlander <nl> + " 0a302ffddbb3e3d3 | 2020 - 02 - 08 - - 16 - 19 - 08 " : { <nl> + ' carFingerprint ' : TOYOTA . HIGHLANDER_TSS2 , <nl> + ' enableCamera ' : True , <nl> + ' enableDsu ' : False , <nl> + } , <nl> " aa659debdd1a7b54 | 2018 - 08 - 31 - - 11 - 12 - 01 " : { <nl> ' carFingerprint ' : TOYOTA . HIGHLANDER , <nl> ' enableCamera ' : False , <nl>
2020 Toyota Highlander ( TSS2 ) ( )
commaai/openpilot
4ea729460762251686d74c1d0d5af23a93c7493d
2020-02-11T19:06:50Z
mmm a / src / containers / buffer_group . hpp <nl> ppp b / src / containers / buffer_group . hpp <nl> class const_buffer_group_t { <nl> } <nl> return * this ; <nl> } <nl> - iterator operator + + ( int ) { <nl> - return operator + + ( ) ; <nl> - } <nl> bool operator = = ( iterator const & other ) { <nl> return it = = other . it & & offset = = other . offset ; <nl> } <nl> mmm a / src / riak / structures . cc <nl> ppp b / src / riak / structures . cc <nl> object_t : : object_t ( std : : string const & key , std : : string const & bucket , riak_value <nl> <nl> / * grab the content type * / <nl> content_type . reserve ( val - > content_type_len ) ; <nl> - for ( unsigned i = 0 ; i < val - > content_type_len ; i + + ) { <nl> + for ( unsigned i = 0 ; i < val - > content_type_len ; + + i ) { <nl> content_type + = * it ; <nl> - it + + ; <nl> + + + it ; <nl> } <nl> } <nl> <nl> object_t : : object_t ( std : : string const & key , std : : string const & bucket , riak_value <nl> const_buffer_group_t : : iterator it = buffer_group . begin ( ) ; <nl> <nl> resize_content ( val - > value_len ) ; <nl> - for ( char * hd = content . get ( ) ; hd - content . get ( ) < val - > value_len ; hd + + ) { <nl> + for ( char * hd = content . get ( ) ; hd - content . get ( ) < val - > value_len ; + + hd ) { <nl> * hd = * it ; <nl> - it + + ; <nl> + + + it ; <nl> } <nl> } else { <nl> / / getting a range <nl> object_t : : object_t ( std : : string const & key , std : : string const & bucket , riak_value <nl> const_buffer_group_t : : iterator it = buffer_group . begin ( ) ; <nl> <nl> resize_content ( range . second - range . first ) ; <nl> - for ( char * hd = content . get ( ) ; hd - content . get ( ) < range . second - range . first ; hd + + ) { <nl> + for ( char * hd = content . get ( ) ; hd - content . get ( ) < range . second - range . first ; + + hd ) { <nl> * hd = * it ; <nl> - it + + ; <nl> + + + it ; <nl> } <nl> } <nl> } <nl> object_t : : object_t ( std : : string const & key , std : : string const & bucket , riak_value <nl> link_hdr_t link_hdr ; <nl> link_t link ; <nl> <nl> + / / TODO : Is this right ? * it should return a char , no ? <nl> + / / That ' s signed . Is it right for these to be signed <nl> + / / interpretations of the byte ? <nl> link_hdr . bucket_len = * it ; <nl> - it + + ; <nl> + + + it ; <nl> link_hdr . key_len = * it ; <nl> - it + + ; <nl> + + + it ; <nl> link_hdr . tag_len = * it ; <nl> - it + + ; <nl> + + + it ; <nl> <nl> link . bucket . reserve ( link_hdr . bucket_len ) ; <nl> for ( int j = 0 ; j < link_hdr . bucket_len ; j + + ) { <nl> link . bucket . push_back ( * it ) ; <nl> - it + + ; <nl> + + + it ; <nl> } <nl> <nl> <nl> link . key . reserve ( link_hdr . key_len ) ; <nl> for ( int j = 0 ; j < link_hdr . key_len ; j + + ) { <nl> link . key . push_back ( * it ) ; <nl> - it + + ; <nl> + + + it ; <nl> } <nl> <nl> link . key . reserve ( link_hdr . tag_len ) ; <nl> for ( int j = 0 ; j < link_hdr . tag_len ; j + + ) { <nl> link . tag . push_back ( * it ) ; <nl> - it + + ; <nl> + + + it ; <nl> } <nl> <nl> links . push_back ( link ) ; <nl>
Removed the broken postfix operator + + of const_buffer_group_t : : iterator or whatever it is called . Fixed callers of it , too .
rethinkdb/rethinkdb
8237bf7cc5c89692d969a215573c567a65af0c71
2012-03-15T07:36:31Z
mmm a / ports / spdlog / CONTROL <nl> ppp b / ports / spdlog / CONTROL <nl> <nl> Source : spdlog <nl> - Version : 1 . 3 . 1 <nl> + Version : 1 . 3 . 1 - 1 <nl> Homepage : https : / / github . com / gabime / spdlog <nl> Description : Very fast , header only , C + + logging library <nl> Build - Depends : fmt <nl> + <nl> + Feature : benchmark <nl> + Description : Use google benchmark <nl> + Build - Depends : benchmark <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . ad1239fd797 <nl> mmm / dev / null <nl> ppp b / ports / spdlog / fix - feature - export . patch <nl> <nl> pppmmm a / bench / CMakeLists . txt <nl> ppp + b / bench / CMakeLists . txt <nl> + add_executable ( formatter - bench formatter - bench . cpp ) <nl> + target_link_libraries ( formatter - bench PRIVATE benchmark : : benchmark spdlog : : spdlog Threads : : Threads ) <nl> + <nl> + file ( MAKE_DIRECTORY " $ { CMAKE_CURRENT_BINARY_DIR } / logs " ) <nl> + + <nl> + + install ( TARGETS bench async_bench latency <nl> + + RUNTIME DESTINATION tools / spdlog <nl> + + LIBRARY DESTINATION lib <nl> + + ARCHIVE DESTINATION lib ) <nl> + \ No newline at end of file <nl> mmm a / ports / spdlog / portfile . cmake <nl> ppp b / ports / spdlog / portfile . cmake <nl> <nl> # header - only library <nl> include ( vcpkg_common_functions ) <nl> + <nl> vcpkg_from_github ( <nl> OUT_SOURCE_PATH SOURCE_PATH <nl> REPO gabime / spdlog <nl> vcpkg_from_github ( <nl> HEAD_REF v1 . x <nl> PATCHES <nl> disable - master - project - check . patch <nl> + fix - feature - export . patch <nl> ) <nl> <nl> + set ( SPDLOG_USE_BENCHMARK OFF ) <nl> + if ( " benchmark " IN_LIST FEATURES ) <nl> + set ( SPDLOG_USE_BENCHMARK ON ) <nl> + endif ( ) <nl> + <nl> vcpkg_configure_cmake ( <nl> SOURCE_PATH $ { SOURCE_PATH } <nl> PREFER_NINJA <nl> OPTIONS <nl> - DSPDLOG_FMT_EXTERNAL = ON <nl> + - DSPDLOG_BUILD_BENCH = $ { SPDLOG_USE_BENCHMARK } <nl> ) <nl> <nl> vcpkg_install_cmake ( ) <nl> <nl> vcpkg_fixup_cmake_targets ( CONFIG_PATH lib / cmake / spdlog ) <nl> <nl> + vcpkg_copy_pdbs ( ) <nl> + <nl> file ( REMOVE_RECURSE $ { CURRENT_PACKAGES_DIR } / debug ) <nl> file ( REMOVE_RECURSE $ { CURRENT_PACKAGES_DIR } / lib ) <nl> <nl>
[ spdlog ] Add feature [ benchmark ] ( )
microsoft/vcpkg
49d1759ec85c31eb50d07b42d245b989b99df37c
2019-06-18T18:11:06Z
mmm a / src / tests / test . h <nl> ppp b / src / tests / test . h <nl> <nl> # include < gtest / gtest . h > <nl> # include < allegro . h > <nl> <nl> + # ifdef TEST_GUI <nl> + # include " jinete / jinete . h " <nl> + # endif <nl> + <nl> / / Allegro - friendly main ( ) routine <nl> int main ( int argc , char * argv [ ] ) <nl> { <nl>
Include jinete / jinete . h for GUI tests .
aseprite/aseprite
04f92262c53e82b03c8364e37f4822e572706464
2010-08-25T20:51:55Z
mmm a / hphp / hack / src / client / clientArgs . ml <nl> ppp b / hphp / hack / src / client / clientArgs . ml <nl> let parse_check_args cmd = <nl> let root , paths = <nl> match mode , args with <nl> | MODE_LINT , _ <nl> - | MODE_FILE_DEPENDENTS , _ - > ClientArgsUtils . get_root None , args <nl> - | _ , [ ] - > ClientArgsUtils . get_root None , [ ] <nl> - | _ , [ x ] - > ClientArgsUtils . get_root ( Some x ) , [ ] <nl> + | MODE_FILE_DEPENDENTS , _ - > Wwwroot . get None , args <nl> + | _ , [ ] - > Wwwroot . get None , [ ] <nl> + | _ , [ x ] - > Wwwroot . get ( Some x ) , [ ] <nl> | _ , _ - > <nl> Printf . fprintf stderr <nl> " Error : please provide at most one www directory \ n % ! " ; <nl> let parse_start_env command = <nl> let args = parse_without_command options usage command in <nl> let root = <nl> match args with <nl> - | [ ] - > ClientArgsUtils . get_root None <nl> - | [ x ] - > ClientArgsUtils . get_root ( Some x ) <nl> + | [ ] - > Wwwroot . get None <nl> + | [ x ] - > Wwwroot . get ( Some x ) <nl> | _ - > <nl> Printf . fprintf stderr <nl> " Error : please provide at most one www directory \ n % ! " ; <nl> let parse_stop_args ( ) = <nl> let args = parse_without_command options usage " stop " in <nl> let root = <nl> match args with <nl> - | [ ] - > ClientArgsUtils . get_root None <nl> - | [ x ] - > ClientArgsUtils . get_root ( Some x ) <nl> + | [ ] - > Wwwroot . get None <nl> + | [ x ] - > Wwwroot . get ( Some x ) <nl> | _ - > <nl> Printf . fprintf stderr <nl> " Error : please provide at most one www directory \ n % ! " ; <nl> let parse_debug_args ( ) = <nl> let args = parse_without_command options usage " debug " in <nl> let root = <nl> match args with <nl> - | [ ] - > ClientArgsUtils . get_root None <nl> - | [ x ] - > ClientArgsUtils . get_root ( Some x ) <nl> + | [ ] - > Wwwroot . get None <nl> + | [ x ] - > Wwwroot . get ( Some x ) <nl> | _ - > Printf . printf " % s \ n " usage ; exit 2 in <nl> CDebug { ClientDebug . <nl> root ; <nl> deleted file mode 100644 <nl> index 0746f20f92c . . 00000000000 <nl> mmm a / hphp / hack / src / client / clientArgsUtils . ml <nl> ppp / dev / null <nl> <nl> - ( * * <nl> - * Copyright ( c ) 2016 , Facebook , Inc . <nl> - * All rights reserved . <nl> - * <nl> - * This source code is licensed under the MIT license found in the <nl> - * LICENSE file in the " hack " directory of this source tree . <nl> - * <nl> - * ) <nl> - <nl> - <nl> - ( * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ) <nl> - ( * * Utils for processing parsed client args . * ) <nl> - ( * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ) <nl> - <nl> - let rec guess_root config start recursion_limit : Path . t option = <nl> - if start = Path . parent start then None ( * Reach fs root , nothing to do . * ) <nl> - else if Wwwroot . is_www_directory ~ config start then Some start <nl> - else if recursion_limit < = 0 then None <nl> - else guess_root config ( Path . parent start ) ( recursion_limit - 1 ) <nl> - <nl> - <nl> - let get_root ? ( config = " . hhconfig " ) path_opt = <nl> - let start_str = match path_opt with <nl> - | None - > " . " <nl> - | Some s - > s in <nl> - let start_path = Path . make start_str in <nl> - let root = match guess_root config start_path 50 with <nl> - | None - > start_path <nl> - | Some r - > r in <nl> - Wwwroot . assert_www_directory ~ config root ; <nl> - root <nl> mmm a / hphp / hack / src / client / clientLsp . ml <nl> ppp b / hphp / hack / src / client / clientLsp . ml <nl> let get_root_opt ( ) : Path . t option = <nl> None ( * haven ' t yet received initialize so we don ' t know * ) <nl> | Some initialize_params - > <nl> let path = Some ( Lsp_helpers . get_root initialize_params ) in <nl> - Some ( ClientArgsUtils . get_root path ) <nl> + Some ( Wwwroot . get path ) <nl> <nl> <nl> let get_root_wait ( ) : Path . t Lwt . t = <nl> let % lwt initialize_params = initialize_params_promise in <nl> let path = Lsp_helpers . get_root initialize_params in <nl> - Lwt . return ( ClientArgsUtils . get_root ( Some path ) ) <nl> + Lwt . return ( Wwwroot . get ( Some path ) ) <nl> <nl> <nl> let read_hhconfig_version ( ) : string = <nl> mmm a / hphp / hack / src / utils / wwwroot . ml <nl> ppp b / hphp / hack / src / utils / wwwroot . ml <nl> let assert_www_directory ? ( config = " . hhconfig " ) ( path : Path . t ) : unit = <nl> flush stderr ; <nl> exit 1 <nl> end <nl> + <nl> + let rec guess_root config start ~ recursion_limit : Path . t option = <nl> + if start = Path . parent start then None ( * Reached file system root * ) <nl> + else if is_www_directory ~ config start then Some start <nl> + else if recursion_limit < = 0 then None <nl> + else guess_root config ( Path . parent start ) ( recursion_limit - 1 ) <nl> + <nl> + let get ? ( config = " . hhconfig " ) ( path : string option ) : Path . t = <nl> + let start_str = match path with <nl> + | None - > " . " <nl> + | Some s - > s in <nl> + let start_path = Path . make start_str in <nl> + let root = match guess_root config start_path ~ recursion_limit : 50 with <nl> + | None - > start_path <nl> + | Some r - > r in <nl> + assert_www_directory ~ config root ; <nl> + root <nl> mmm a / hphp / hack / src / utils / wwwroot . mli <nl> ppp b / hphp / hack / src / utils / wwwroot . mli <nl> shape . * ) <nl> <nl> val is_www_directory : ? config : string - > Path . t - > bool <nl> val assert_www_directory : ? config : string - > Path . t - > unit <nl> + val get : ? config : string - > string option - > Path . t <nl>
Move get_root into a more common location
facebook/hhvm
1352321f9db7b75d07308c8dfebabbbb99a71166
2019-05-24T22:28:46Z
mmm a / configure . py <nl> ppp b / configure . py <nl> def set_tf_cuda_compute_capabilities ( environ_cp ) : <nl> if ver < 3 : <nl> print ( ' Only compute capabilities 3 . 0 or higher are supported . ' ) <nl> all_valid = False <nl> - elif float ( compute_capability ) < 3 . 5 : <nl> + elif warning_flag = False and float ( compute_capability ) < 3 . 5 : <nl> warning_flag = True <nl> <nl> if warning_flag : <nl>
modify the if condition that sets the warning flag for compute capabilities
tensorflow/tensorflow
6313548a1f3e786affe88092ed0021676f03c0bf
2019-02-19T09:16:38Z
mmm a / src / mongo / db / client . cpp <nl> ppp b / src / mongo / db / client . cpp <nl> <nl> # include " mongo / db / catalog / database_holder . h " <nl> # include " mongo / db / commands . h " <nl> # include " mongo / db / curop . h " <nl> - # include " mongo / db / dbwebserver . h " <nl> # include " mongo / db / instance . h " <nl> # include " mongo / db / json . h " <nl> # include " mongo / db / lasterror . h " <nl> namespace mongo { <nl> _threadId ( boost : : this_thread : : get_id ( ) ) , <nl> _connectionId ( p ? p - > connectionId ( ) : 0 ) , <nl> _inDirectClient ( false ) , <nl> - _txn ( NULL ) , <nl> - _shutdown ( false ) { <nl> + _txn ( NULL ) { <nl> <nl> _curOp = new CurOp ( this ) ; <nl> } <nl> namespace mongo { <nl> / / we can ' t clean up safely once we ' re in shutdown <nl> { <nl> boost : : lock_guard < boost : : mutex > clientLock ( clientsMutex ) ; <nl> - if ( ! _shutdown ) <nl> - clients . erase ( this ) ; <nl> + clients . erase ( this ) ; <nl> } <nl> <nl> CurOp * last ; <nl> namespace mongo { <nl> } <nl> <nl> bool Client : : shutdown ( ) { <nl> - _shutdown = true ; <nl> - if ( inShutdown ( ) ) <nl> - return false ; <nl> - { <nl> + if ( ! inShutdown ( ) ) { <nl> boost : : lock_guard < boost : : mutex > clientLock ( clientsMutex ) ; <nl> clients . erase ( this ) ; <nl> } <nl> - <nl> return false ; <nl> } <nl> <nl> mmm a / src / mongo / db / client . h <nl> ppp b / src / mongo / db / client . h <nl> namespace mongo { <nl> <nl> / / Changes , based on what operation is running . Some of this should be in OperationContext . <nl> CurOp * _curOp ; <nl> - <nl> - / / Tracks if Client : : shutdown ( ) gets called ( TODO : Is this necessary ? ) <nl> - bool _shutdown ; <nl> } ; <nl> <nl> / * * get the Client object for this thread . * / <nl> mmm a / src / mongo / s / s_only . cpp <nl> ppp b / src / mongo / s / s_only . cpp <nl> namespace mongo { <nl> ClientBasic ( serviceContext , p ) , <nl> _desc ( desc ) , <nl> _connectionId ( ) , <nl> - _inDirectClient ( false ) , <nl> - _shutdown ( false ) { <nl> + _inDirectClient ( false ) { <nl> } <nl> Client : : ~ Client ( ) { } <nl> bool Client : : shutdown ( ) { return true ; } <nl>
SERVER - 17817 Remove inoperative code from client . h / cpp
mongodb/mongo
9e2cc1588f25f81983de48f2c69130203e325175
2015-04-06T22:19:51Z
mmm a / csharp / src / Google . Protobuf / WritingPrimitives . cs <nl> ppp b / csharp / src / Google . Protobuf / WritingPrimitives . cs <nl> <nl> <nl> using System ; <nl> using System . Runtime . CompilerServices ; <nl> + using System . Runtime . InteropServices ; <nl> using System . Security ; <nl> using System . Text ; <nl> <nl> public static void WriteDouble ( ref Span < byte > buffer , ref WriterInternalState st <nl> / / / Writes a float field value , without a tag , to the stream . <nl> / / / < / summary > <nl> [ MethodImpl ( MethodImplOptions . AggressiveInlining ) ] <nl> - public static void WriteFloat ( ref Span < byte > buffer , ref WriterInternalState state , float value ) <nl> + public static unsafe void WriteFloat ( ref Span < byte > buffer , ref WriterInternalState state , float value ) <nl> { <nl> - / / TODO : avoid allocating a byte array ! ! ! <nl> - byte [ ] rawBytes = BitConverter . GetBytes ( value ) ; <nl> - if ( ! BitConverter . IsLittleEndian ) <nl> + const int length = sizeof ( float ) ; <nl> + if ( state . limit - state . position > = length ) <nl> { <nl> - ByteArray . Reverse ( rawBytes ) ; <nl> - } <nl> + / / if there ' s enough space in the buffer , write the float directly into the buffer <nl> + var floatSpan = buffer . Slice ( state . position , length ) ; <nl> + Unsafe . WriteUnaligned ( ref MemoryMarshal . GetReference ( floatSpan ) , value ) ; <nl> <nl> - if ( state . limit - state . position > = 4 ) <nl> - { <nl> - buffer [ state . position + + ] = rawBytes [ 0 ] ; <nl> - buffer [ state . position + + ] = rawBytes [ 1 ] ; <nl> - buffer [ state . position + + ] = rawBytes [ 2 ] ; <nl> - buffer [ state . position + + ] = rawBytes [ 3 ] ; <nl> + if ( ! BitConverter . IsLittleEndian ) <nl> + { <nl> + floatSpan . Reverse ( ) ; <nl> + } <nl> + state . position + = length ; <nl> } <nl> else <nl> { <nl> - WriteRawBytes ( ref buffer , ref state , rawBytes , 0 , 4 ) ; <nl> + Span < byte > floatSpan = stackalloc byte [ length ] ; <nl> + Unsafe . WriteUnaligned ( ref MemoryMarshal . GetReference ( floatSpan ) , value ) ; <nl> + <nl> + if ( ! BitConverter . IsLittleEndian ) <nl> + { <nl> + floatSpan . Reverse ( ) ; <nl> + } <nl> + <nl> + WriteRawByte ( ref buffer , ref state , floatSpan [ 0 ] ) ; <nl> + WriteRawByte ( ref buffer , ref state , floatSpan [ 1 ] ) ; <nl> + WriteRawByte ( ref buffer , ref state , floatSpan [ 2 ] ) ; <nl> + WriteRawByte ( ref buffer , ref state , floatSpan [ 3 ] ) ; <nl> } <nl> } <nl> <nl>
optimize WriteFloat
protocolbuffers/protobuf
a296413b5abdcd86c219d4589b6e825a157706fb
2020-06-05T12:41:07Z
mmm a / tensorflow / contrib / metrics / python / ops / metric_ops . py <nl> ppp b / tensorflow / contrib / metrics / python / ops / metric_ops . py <nl> def _safe_div ( numerator , denominator , name ) : <nl> name = name ) <nl> <nl> <nl> - def _create_local ( name , shape , collections = None , validate_shape = True , <nl> + def _create_local ( name , <nl> + shape , <nl> + collections = None , <nl> + validate_shape = True , <nl> dtype = dtypes . float32 ) : <nl> " " " Creates a new local variable . <nl> <nl> def _assert_weights_rank ( weights , values ) : <nl> return check_ops . assert_rank_in ( weights , ( 0 , array_ops . rank ( values ) ) ) <nl> <nl> <nl> - def _count_condition ( values , weights = None , metrics_collections = None , <nl> + def _count_condition ( values , <nl> + weights = None , <nl> + metrics_collections = None , <nl> updates_collections = None ) : <nl> " " " Sums the weights of cases where the given values are True . <nl> <nl> def _count_condition ( values , weights = None , metrics_collections = None , <nl> return value_tensor , update_op <nl> <nl> <nl> - def streaming_true_positives ( predictions , labels , weights = None , <nl> + def streaming_true_positives ( predictions , <nl> + labels , <nl> + weights = None , <nl> metrics_collections = None , <nl> updates_collections = None , <nl> name = None ) : <nl> def streaming_true_positives ( predictions , labels , weights = None , <nl> tuple . <nl> " " " <nl> return metrics . true_positives ( <nl> - predictions = predictions , labels = labels , weights = weights , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> - def streaming_true_negatives ( predictions , labels , weights = None , <nl> + def streaming_true_negatives ( predictions , <nl> + labels , <nl> + weights = None , <nl> metrics_collections = None , <nl> updates_collections = None , <nl> name = None ) : <nl> def streaming_true_negatives ( predictions , labels , weights = None , <nl> either ` metrics_collections ` or ` updates_collections ` are not a list or <nl> tuple . <nl> " " " <nl> - with variable_scope . variable_scope ( <nl> - name , ' true_negatives ' , ( predictions , labels , weights ) ) : <nl> + with variable_scope . variable_scope ( name , ' true_negatives ' , <nl> + ( predictions , labels , weights ) ) : <nl> <nl> predictions , labels , weights = _remove_squeezable_dimensions ( <nl> predictions = math_ops . cast ( predictions , dtype = dtypes . bool ) , <nl> labels = math_ops . cast ( labels , dtype = dtypes . bool ) , <nl> weights = weights ) <nl> - is_true_negative = math_ops . logical_and ( math_ops . equal ( labels , False ) , <nl> - math_ops . equal ( predictions , False ) ) <nl> + is_true_negative = math_ops . logical_and ( <nl> + math_ops . equal ( labels , False ) , math_ops . equal ( predictions , False ) ) <nl> return _count_condition ( is_true_negative , weights , metrics_collections , <nl> updates_collections ) <nl> <nl> <nl> - def streaming_false_positives ( predictions , labels , weights = None , <nl> + def streaming_false_positives ( predictions , <nl> + labels , <nl> + weights = None , <nl> metrics_collections = None , <nl> updates_collections = None , <nl> name = None ) : <nl> def streaming_false_positives ( predictions , labels , weights = None , <nl> tuple . <nl> " " " <nl> return metrics . false_positives ( <nl> - predictions = predictions , labels = labels , weights = weights , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> - def streaming_false_negatives ( predictions , labels , weights = None , <nl> + def streaming_false_negatives ( predictions , <nl> + labels , <nl> + weights = None , <nl> metrics_collections = None , <nl> updates_collections = None , <nl> name = None ) : <nl> def streaming_false_negatives ( predictions , labels , weights = None , <nl> or tuple . <nl> " " " <nl> return metrics . false_negatives ( <nl> - predictions = predictions , labels = labels , weights = weights , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> # TODO ( ptucker ) : Move this somewhere common , to share with ops / losses / losses . py . <nl> def _broadcast_weights ( weights , values ) : <nl> with ops . name_scope ( None , ' broadcast_weights ' , ( values , weights ) ) as scope : <nl> weights_shape = weights . get_shape ( ) <nl> values_shape = values . get_shape ( ) <nl> - if ( weights_shape . is_fully_defined ( ) and <nl> - values_shape . is_fully_defined ( ) and <nl> + if ( weights_shape . is_fully_defined ( ) and values_shape . is_fully_defined ( ) and <nl> weights_shape . is_compatible_with ( values_shape ) ) : <nl> return weights <nl> with ops . control_dependencies ( ( _assert_weights_rank ( weights , values ) , ) ) : <nl> - return math_ops . multiply ( <nl> - weights , array_ops . ones_like ( values ) , name = scope ) <nl> + return math_ops . multiply ( weights , array_ops . ones_like ( values ) , name = scope ) <nl> <nl> <nl> - def streaming_mean ( values , weights = None , metrics_collections = None , <nl> - updates_collections = None , name = None ) : <nl> + def streaming_mean ( values , <nl> + weights = None , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> + name = None ) : <nl> " " " Computes the ( weighted ) mean of the given values . <nl> <nl> The ` streaming_mean ` function creates two local variables , ` total ` and ` count ` <nl> def streaming_mean ( values , weights = None , metrics_collections = None , <nl> or tuple . <nl> " " " <nl> return metrics . mean ( <nl> - values = values , weights = weights , metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + values = values , <nl> + weights = weights , <nl> + metrics_collections = metrics_collections , <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> - def streaming_mean_tensor ( values , weights = None , metrics_collections = None , <nl> - updates_collections = None , name = None ) : <nl> + def streaming_mean_tensor ( values , <nl> + weights = None , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> + name = None ) : <nl> " " " Computes the element - wise ( weighted ) mean of the given tensors . <nl> <nl> In contrast to the ` streaming_mean ` function which returns a scalar with the <nl> def streaming_mean_tensor ( values , weights = None , metrics_collections = None , <nl> or tuple . <nl> " " " <nl> return metrics . mean_tensor ( <nl> - values = values , weights = weights , metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + values = values , <nl> + weights = weights , <nl> + metrics_collections = metrics_collections , <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> - def streaming_accuracy ( predictions , labels , weights = None , <nl> - metrics_collections = None , updates_collections = None , <nl> + def streaming_accuracy ( predictions , <nl> + labels , <nl> + weights = None , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> name = None ) : <nl> " " " Calculates how often ` predictions ` matches ` labels ` . <nl> <nl> def streaming_accuracy ( predictions , labels , weights = None , <nl> tuple . <nl> " " " <nl> return metrics . accuracy ( <nl> - predictions = predictions , labels = labels , weights = weights , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> - def streaming_precision ( predictions , labels , weights = None , <nl> - metrics_collections = None , updates_collections = None , <nl> + def streaming_precision ( predictions , <nl> + labels , <nl> + weights = None , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> name = None ) : <nl> " " " Computes the precision of the predictions with respect to the labels . <nl> <nl> def streaming_precision ( predictions , labels , weights = None , <nl> tuple . <nl> " " " <nl> return metrics . precision ( <nl> - predictions = predictions , labels = labels , weights = weights , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> - def streaming_recall ( predictions , labels , weights = None , <nl> - metrics_collections = None , updates_collections = None , <nl> + def streaming_recall ( predictions , <nl> + labels , <nl> + weights = None , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> name = None ) : <nl> " " " Computes the recall of the predictions with respect to the labels . <nl> <nl> def streaming_recall ( predictions , labels , weights = None , <nl> tuple . <nl> " " " <nl> return metrics . recall ( <nl> - predictions = predictions , labels = labels , weights = weights , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> - def _true_negatives ( labels , predictions , weights = None , <nl> + def _true_negatives ( labels , <nl> + predictions , <nl> + weights = None , <nl> metrics_collections = None , <nl> updates_collections = None , <nl> name = None ) : <nl> def _true_negatives ( labels , predictions , weights = None , <nl> either ` metrics_collections ` or ` updates_collections ` are not a list or <nl> tuple . <nl> " " " <nl> - with variable_scope . variable_scope ( <nl> - name , ' true_negatives ' , ( predictions , labels , weights ) ) : <nl> + with variable_scope . variable_scope ( name , ' true_negatives ' , <nl> + ( predictions , labels , weights ) ) : <nl> <nl> predictions , labels , weights = _remove_squeezable_dimensions ( <nl> predictions = math_ops . cast ( predictions , dtype = dtypes . bool ) , <nl> labels = math_ops . cast ( labels , dtype = dtypes . bool ) , <nl> weights = weights ) <nl> - is_true_negative = math_ops . logical_and ( math_ops . equal ( labels , False ) , <nl> - math_ops . equal ( predictions , False ) ) <nl> + is_true_negative = math_ops . logical_and ( <nl> + math_ops . equal ( labels , False ) , math_ops . equal ( predictions , False ) ) <nl> return _count_condition ( is_true_negative , weights , metrics_collections , <nl> updates_collections ) <nl> <nl> <nl> - def streaming_false_positive_rate ( predictions , labels , weights = None , <nl> + def streaming_false_positive_rate ( predictions , <nl> + labels , <nl> + weights = None , <nl> metrics_collections = None , <nl> updates_collections = None , <nl> name = None ) : <nl> def streaming_false_positive_rate ( predictions , labels , weights = None , <nl> either ` metrics_collections ` or ` updates_collections ` are not a list or <nl> tuple . <nl> " " " <nl> - with variable_scope . variable_scope ( <nl> - name , ' false_positive_rate ' , ( predictions , labels , weights ) ) : <nl> + with variable_scope . variable_scope ( name , ' false_positive_rate ' , <nl> + ( predictions , labels , weights ) ) : <nl> predictions , labels , weights = _remove_squeezable_dimensions ( <nl> predictions = math_ops . cast ( predictions , dtype = dtypes . bool ) , <nl> labels = math_ops . cast ( labels , dtype = dtypes . bool ) , <nl> weights = weights ) <nl> <nl> false_p , false_positives_update_op = metrics . false_positives ( <nl> - labels , predictions , weights , metrics_collections = None , <nl> - updates_collections = None , name = None ) <nl> + labels , <nl> + predictions , <nl> + weights , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> + name = None ) <nl> true_n , true_negatives_update_op = _true_negatives ( <nl> - labels , predictions , weights , metrics_collections = None , <nl> - updates_collections = None , name = None ) <nl> + labels , <nl> + predictions , <nl> + weights , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> + name = None ) <nl> <nl> def compute_fpr ( fp , tn , name ) : <nl> return array_ops . where ( <nl> - math_ops . greater ( fp + tn , 0 ) , <nl> - math_ops . div ( fp , fp + tn ) , <nl> - 0 , <nl> - name ) <nl> + math_ops . greater ( fp + tn , 0 ) , math_ops . div ( fp , fp + tn ) , 0 , name ) <nl> <nl> fpr = compute_fpr ( false_p , true_n , ' value ' ) <nl> - update_op = compute_fpr ( <nl> - false_positives_update_op , true_negatives_update_op , ' update_op ' ) <nl> + update_op = compute_fpr ( false_positives_update_op , true_negatives_update_op , <nl> + ' update_op ' ) <nl> <nl> if metrics_collections : <nl> ops . add_to_collections ( metrics_collections , fpr ) <nl> def compute_fpr ( fp , tn , name ) : <nl> return fpr , update_op <nl> <nl> <nl> - def streaming_false_negative_rate ( predictions , labels , weights = None , <nl> + def streaming_false_negative_rate ( predictions , <nl> + labels , <nl> + weights = None , <nl> metrics_collections = None , <nl> updates_collections = None , <nl> name = None ) : <nl> def streaming_false_negative_rate ( predictions , labels , weights = None , <nl> either ` metrics_collections ` or ` updates_collections ` are not a list or <nl> tuple . <nl> " " " <nl> - with variable_scope . variable_scope ( <nl> - name , ' false_negative_rate ' , ( predictions , labels , weights ) ) : <nl> + with variable_scope . variable_scope ( name , ' false_negative_rate ' , <nl> + ( predictions , labels , weights ) ) : <nl> predictions , labels , weights = _remove_squeezable_dimensions ( <nl> predictions = math_ops . cast ( predictions , dtype = dtypes . bool ) , <nl> labels = math_ops . cast ( labels , dtype = dtypes . bool ) , <nl> weights = weights ) <nl> <nl> false_n , false_negatives_update_op = metrics . false_negatives ( <nl> - labels , predictions , weights , metrics_collections = None , <nl> - updates_collections = None , name = None ) <nl> + labels , <nl> + predictions , <nl> + weights , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> + name = None ) <nl> true_p , true_positives_update_op = metrics . true_positives ( <nl> - labels , predictions , weights , metrics_collections = None , <nl> - updates_collections = None , name = None ) <nl> + labels , <nl> + predictions , <nl> + weights , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> + name = None ) <nl> <nl> def compute_fnr ( fn , tp , name ) : <nl> return array_ops . where ( <nl> - math_ops . greater ( fn + tp , 0 ) , <nl> - math_ops . div ( fn , fn + tp ) , <nl> - 0 , <nl> - name ) <nl> + math_ops . greater ( fn + tp , 0 ) , math_ops . div ( fn , fn + tp ) , 0 , name ) <nl> <nl> fnr = compute_fnr ( false_n , true_p , ' value ' ) <nl> - update_op = compute_fnr ( <nl> - false_negatives_update_op , true_positives_update_op , ' update_op ' ) <nl> + update_op = compute_fnr ( false_negatives_update_op , true_positives_update_op , <nl> + ' update_op ' ) <nl> <nl> if metrics_collections : <nl> ops . add_to_collections ( metrics_collections , fnr ) <nl> def compute_fnr ( fn , tp , name ) : <nl> return fnr , update_op <nl> <nl> <nl> - def _streaming_confusion_matrix_at_thresholds ( <nl> - predictions , labels , thresholds , weights = None , includes = None ) : <nl> + def _streaming_confusion_matrix_at_thresholds ( predictions , <nl> + labels , <nl> + thresholds , <nl> + weights = None , <nl> + includes = None ) : <nl> " " " Computes true_positives , false_negatives , true_negatives , false_positives . <nl> <nl> This function creates up to four local variables , ` true_positives ` , <nl> def _streaming_confusion_matrix_at_thresholds ( <nl> if weights is not None : <nl> broadcast_weights = weights_broadcast_ops . broadcast_weights ( <nl> math_ops . to_float ( weights ) , predictions ) <nl> - weights_tiled = array_ops . tile ( array_ops . reshape ( <nl> - broadcast_weights , [ 1 , - 1 ] ) , [ num_thresholds , 1 ] ) <nl> + weights_tiled = array_ops . tile ( <nl> + array_ops . reshape ( broadcast_weights , [ 1 , - 1 ] ) , [ num_thresholds , 1 ] ) <nl> thresh_tiled . get_shape ( ) . assert_is_compatible_with ( <nl> weights_tiled . get_shape ( ) ) <nl> else : <nl> def _streaming_confusion_matrix_at_thresholds ( <nl> math_ops . logical_and ( label_is_pos , pred_is_pos ) ) <nl> if weights_tiled is not None : <nl> is_true_positive * = weights_tiled <nl> - update_ops [ ' tp ' ] = state_ops . assign_add ( <nl> - true_positives , math_ops . reduce_sum ( is_true_positive , 1 ) ) <nl> + update_ops [ ' tp ' ] = state_ops . assign_add ( true_positives , <nl> + math_ops . reduce_sum ( <nl> + is_true_positive , 1 ) ) <nl> values [ ' tp ' ] = true_positives <nl> <nl> if ' fn ' in includes : <nl> def _streaming_confusion_matrix_at_thresholds ( <nl> math_ops . logical_and ( label_is_pos , pred_is_neg ) ) <nl> if weights_tiled is not None : <nl> is_false_negative * = weights_tiled <nl> - update_ops [ ' fn ' ] = state_ops . assign_add ( <nl> - false_negatives , math_ops . reduce_sum ( is_false_negative , 1 ) ) <nl> + update_ops [ ' fn ' ] = state_ops . assign_add ( false_negatives , <nl> + math_ops . reduce_sum ( <nl> + is_false_negative , 1 ) ) <nl> values [ ' fn ' ] = false_negatives <nl> <nl> if ' tn ' in includes : <nl> def _streaming_confusion_matrix_at_thresholds ( <nl> math_ops . logical_and ( label_is_neg , pred_is_neg ) ) <nl> if weights_tiled is not None : <nl> is_true_negative * = weights_tiled <nl> - update_ops [ ' tn ' ] = state_ops . assign_add ( <nl> - true_negatives , math_ops . reduce_sum ( is_true_negative , 1 ) ) <nl> + update_ops [ ' tn ' ] = state_ops . assign_add ( true_negatives , <nl> + math_ops . reduce_sum ( <nl> + is_true_negative , 1 ) ) <nl> values [ ' tn ' ] = true_negatives <nl> <nl> if ' fp ' in includes : <nl> def _streaming_confusion_matrix_at_thresholds ( <nl> math_ops . logical_and ( label_is_neg , pred_is_pos ) ) <nl> if weights_tiled is not None : <nl> is_false_positive * = weights_tiled <nl> - update_ops [ ' fp ' ] = state_ops . assign_add ( <nl> - false_positives , math_ops . reduce_sum ( is_false_positive , 1 ) ) <nl> + update_ops [ ' fp ' ] = state_ops . assign_add ( false_positives , <nl> + math_ops . reduce_sum ( <nl> + is_false_positive , 1 ) ) <nl> values [ ' fp ' ] = false_positives <nl> <nl> return values , update_ops <nl> <nl> <nl> - def streaming_true_positives_at_thresholds ( <nl> - predictions , labels , thresholds , weights = None ) : <nl> + def streaming_true_positives_at_thresholds ( predictions , <nl> + labels , <nl> + thresholds , <nl> + weights = None ) : <nl> values , update_ops = _streaming_confusion_matrix_at_thresholds ( <nl> predictions , labels , thresholds , weights = weights , includes = ( ' tp ' , ) ) <nl> return values [ ' tp ' ] , update_ops [ ' tp ' ] <nl> <nl> <nl> - def streaming_false_negatives_at_thresholds ( <nl> - predictions , labels , thresholds , weights = None ) : <nl> + def streaming_false_negatives_at_thresholds ( predictions , <nl> + labels , <nl> + thresholds , <nl> + weights = None ) : <nl> values , update_ops = _streaming_confusion_matrix_at_thresholds ( <nl> predictions , labels , thresholds , weights = weights , includes = ( ' fn ' , ) ) <nl> return values [ ' fn ' ] , update_ops [ ' fn ' ] <nl> <nl> <nl> - def streaming_false_positives_at_thresholds ( <nl> - predictions , labels , thresholds , weights = None ) : <nl> + def streaming_false_positives_at_thresholds ( predictions , <nl> + labels , <nl> + thresholds , <nl> + weights = None ) : <nl> values , update_ops = _streaming_confusion_matrix_at_thresholds ( <nl> predictions , labels , thresholds , weights = weights , includes = ( ' fp ' , ) ) <nl> return values [ ' fp ' ] , update_ops [ ' fp ' ] <nl> <nl> <nl> - def streaming_true_negatives_at_thresholds ( <nl> - predictions , labels , thresholds , weights = None ) : <nl> + def streaming_true_negatives_at_thresholds ( predictions , <nl> + labels , <nl> + thresholds , <nl> + weights = None ) : <nl> values , update_ops = _streaming_confusion_matrix_at_thresholds ( <nl> predictions , labels , thresholds , weights = weights , includes = ( ' tn ' , ) ) <nl> return values [ ' tn ' ] , update_ops [ ' tn ' ] <nl> def streaming_curve_points ( labels = None , <nl> either ` metrics_collections ` or ` updates_collections ` are not a list or <nl> tuple . <nl> " " " <nl> - with variable_scope . variable_scope ( name , ' curve_points ' , ( labels , predictions , <nl> - weights ) ) : <nl> + with variable_scope . variable_scope ( name , ' curve_points ' , <nl> + ( labels , predictions , weights ) ) : <nl> if curve ! = ' ROC ' and curve ! = ' PR ' : <nl> raise ValueError ( ' curve must be either ROC or PR , % s unknown ' % ( curve ) ) <nl> kepsilon = 1e - 7 # to account for floating point imprecisions <nl> def compute_points ( tp , fn , tn , fp ) : <nl> return points , update_op <nl> <nl> <nl> - def streaming_auc ( predictions , labels , weights = None , num_thresholds = 200 , <nl> - metrics_collections = None , updates_collections = None , <nl> - curve = ' ROC ' , name = None ) : <nl> + def streaming_auc ( predictions , <nl> + labels , <nl> + weights = None , <nl> + num_thresholds = 200 , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> + curve = ' ROC ' , <nl> + name = None ) : <nl> " " " Computes the approximate AUC via a Riemann sum . <nl> <nl> The ` streaming_auc ` function creates four local variables , ` true_positives ` , <nl> def streaming_auc ( predictions , labels , weights = None , num_thresholds = 200 , <nl> tuple . <nl> " " " <nl> return metrics . auc ( <nl> - predictions = predictions , labels = labels , weights = weights , <nl> - metrics_collections = metrics_collections , num_thresholds = num_thresholds , <nl> - curve = curve , updates_collections = updates_collections , name = name ) <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> + metrics_collections = metrics_collections , <nl> + num_thresholds = num_thresholds , <nl> + curve = curve , <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> - def streaming_specificity_at_sensitivity ( <nl> - predictions , labels , sensitivity , weights = None , num_thresholds = 200 , <nl> - metrics_collections = None , updates_collections = None , name = None ) : <nl> + def streaming_specificity_at_sensitivity ( predictions , <nl> + labels , <nl> + sensitivity , <nl> + weights = None , <nl> + num_thresholds = 200 , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> + name = None ) : <nl> " " " Computes the specificity at a given sensitivity . <nl> <nl> The ` streaming_specificity_at_sensitivity ` function creates four local <nl> def streaming_specificity_at_sensitivity ( <nl> or ` updates_collections ` are not a list or tuple . <nl> " " " <nl> return metrics . specificity_at_sensitivity ( <nl> - sensitivity = sensitivity , num_thresholds = num_thresholds , <nl> - predictions = predictions , labels = labels , weights = weights , <nl> + sensitivity = sensitivity , <nl> + num_thresholds = num_thresholds , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> - def streaming_sensitivity_at_specificity ( <nl> - predictions , labels , specificity , weights = None , num_thresholds = 200 , <nl> - metrics_collections = None , updates_collections = None , name = None ) : <nl> + def streaming_sensitivity_at_specificity ( predictions , <nl> + labels , <nl> + specificity , <nl> + weights = None , <nl> + num_thresholds = 200 , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> + name = None ) : <nl> " " " Computes the sensitivity at a given specificity . <nl> <nl> The ` streaming_sensitivity_at_specificity ` function creates four local <nl> def streaming_sensitivity_at_specificity ( <nl> or ` updates_collections ` are not a list or tuple . <nl> " " " <nl> return metrics . sensitivity_at_specificity ( <nl> - specificity = specificity , num_thresholds = num_thresholds , <nl> - predictions = predictions , labels = labels , weights = weights , <nl> + specificity = specificity , <nl> + num_thresholds = num_thresholds , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> - def streaming_precision_at_thresholds ( predictions , labels , thresholds , <nl> + def streaming_precision_at_thresholds ( predictions , <nl> + labels , <nl> + thresholds , <nl> weights = None , <nl> metrics_collections = None , <nl> - updates_collections = None , name = None ) : <nl> + updates_collections = None , <nl> + name = None ) : <nl> " " " Computes precision values for different ` thresholds ` on ` predictions ` . <nl> <nl> The ` streaming_precision_at_thresholds ` function creates four local variables , <nl> def streaming_precision_at_thresholds ( predictions , labels , thresholds , <nl> " " " <nl> return metrics . precision_at_thresholds ( <nl> thresholds = thresholds , <nl> - predictions = predictions , labels = labels , weights = weights , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> - def streaming_recall_at_thresholds ( predictions , labels , thresholds , <nl> - weights = None , metrics_collections = None , <nl> - updates_collections = None , name = None ) : <nl> + def streaming_recall_at_thresholds ( predictions , <nl> + labels , <nl> + thresholds , <nl> + weights = None , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> + name = None ) : <nl> " " " Computes various recall values for different ` thresholds ` on ` predictions ` . <nl> <nl> The ` streaming_recall_at_thresholds ` function creates four local variables , <nl> def streaming_recall_at_thresholds ( predictions , labels , thresholds , <nl> " " " <nl> return metrics . recall_at_thresholds ( <nl> thresholds = thresholds , <nl> - predictions = predictions , labels = labels , weights = weights , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> - def streaming_false_positive_rate_at_thresholds ( <nl> - predictions , labels , thresholds , weights = None , metrics_collections = None , <nl> - updates_collections = None , name = None ) : <nl> + def streaming_false_positive_rate_at_thresholds ( predictions , <nl> + labels , <nl> + thresholds , <nl> + weights = None , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> + name = None ) : <nl> " " " Computes various fpr values for different ` thresholds ` on ` predictions ` . <nl> <nl> The ` streaming_false_positive_rate_at_thresholds ` function creates two <nl> def streaming_false_positive_rate_at_thresholds ( <nl> either ` metrics_collections ` or ` updates_collections ` are not a list or <nl> tuple . <nl> " " " <nl> - with variable_scope . variable_scope ( <nl> - name , ' false_positive_rate_at_thresholds ' , <nl> - ( predictions , labels , weights ) ) : <nl> + with variable_scope . variable_scope ( name , ' false_positive_rate_at_thresholds ' , <nl> + ( predictions , labels , weights ) ) : <nl> values , update_ops = _streaming_confusion_matrix_at_thresholds ( <nl> predictions , labels , thresholds , weights , includes = ( ' fp ' , ' tn ' ) ) <nl> <nl> # Avoid division by zero . <nl> epsilon = 1e - 7 <nl> + <nl> def compute_fpr ( fp , tn , name ) : <nl> return math_ops . div ( fp , epsilon + fp + tn , name = ' fpr_ ' + name ) <nl> <nl> fpr = compute_fpr ( values [ ' fp ' ] , values [ ' tn ' ] , ' value ' ) <nl> - update_op = compute_fpr ( <nl> - update_ops [ ' fp ' ] , update_ops [ ' tn ' ] , ' update_op ' ) <nl> + update_op = compute_fpr ( update_ops [ ' fp ' ] , update_ops [ ' tn ' ] , ' update_op ' ) <nl> <nl> if metrics_collections : <nl> ops . add_to_collections ( metrics_collections , fpr ) <nl> def compute_fpr ( fp , tn , name ) : <nl> return fpr , update_op <nl> <nl> <nl> - def streaming_false_negative_rate_at_thresholds ( <nl> - predictions , labels , thresholds , weights = None , metrics_collections = None , <nl> - updates_collections = None , name = None ) : <nl> + def streaming_false_negative_rate_at_thresholds ( predictions , <nl> + labels , <nl> + thresholds , <nl> + weights = None , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> + name = None ) : <nl> " " " Computes various fnr values for different ` thresholds ` on ` predictions ` . <nl> <nl> The ` streaming_false_negative_rate_at_thresholds ` function creates two <nl> def streaming_false_negative_rate_at_thresholds ( <nl> either ` metrics_collections ` or ` updates_collections ` are not a list or <nl> tuple . <nl> " " " <nl> - with variable_scope . variable_scope ( <nl> - name , ' false_negative_rate_at_thresholds ' , <nl> - ( predictions , labels , weights ) ) : <nl> + with variable_scope . variable_scope ( name , ' false_negative_rate_at_thresholds ' , <nl> + ( predictions , labels , weights ) ) : <nl> values , update_ops = _streaming_confusion_matrix_at_thresholds ( <nl> predictions , labels , thresholds , weights , includes = ( ' fn ' , ' tp ' ) ) <nl> <nl> # Avoid division by zero . <nl> epsilon = 1e - 7 <nl> + <nl> def compute_fnr ( fn , tp , name ) : <nl> return math_ops . div ( fn , epsilon + fn + tp , name = ' fnr_ ' + name ) <nl> <nl> fnr = compute_fnr ( values [ ' fn ' ] , values [ ' tp ' ] , ' value ' ) <nl> - update_op = compute_fnr ( <nl> - update_ops [ ' fn ' ] , update_ops [ ' tp ' ] , ' update_op ' ) <nl> + update_op = compute_fnr ( update_ops [ ' fn ' ] , update_ops [ ' tp ' ] , ' update_op ' ) <nl> <nl> if metrics_collections : <nl> ops . add_to_collections ( metrics_collections , fnr ) <nl> def _at_k_name ( name , k = None , class_id = None ) : <nl> <nl> @ deprecated ( ' 2016 - 11 - 08 ' , ' Please use ` streaming_sparse_recall_at_k ` , ' <nl> ' and reshape labels from [ batch_size ] to [ batch_size , 1 ] . ' ) <nl> - def streaming_recall_at_k ( predictions , labels , k , weights = None , <nl> - metrics_collections = None , updates_collections = None , <nl> + def streaming_recall_at_k ( predictions , <nl> + labels , <nl> + k , <nl> + weights = None , <nl> + metrics_collections = None , <nl> + updates_collections = None , <nl> name = None ) : <nl> " " " Computes the recall @ k of the predictions with respect to dense labels . <nl> <nl> def streaming_recall_at_k ( predictions , labels , k , weights = None , <nl> tuple . <nl> " " " <nl> in_top_k = math_ops . to_float ( nn . in_top_k ( predictions , labels , k ) ) <nl> - return streaming_mean ( in_top_k , <nl> - weights , <nl> - metrics_collections , <nl> - updates_collections , <nl> - name or _at_k_name ( ' recall ' , k ) ) <nl> + return streaming_mean ( in_top_k , weights , metrics_collections , <nl> + updates_collections , name or _at_k_name ( ' recall ' , k ) ) <nl> <nl> <nl> # TODO ( ptucker ) : Validate range of values in labels ? <nl> def streaming_sparse_recall_at_k ( predictions , <nl> are not a list or tuple . <nl> " " " <nl> return metrics . recall_at_k ( <nl> - k = k , class_id = class_id , <nl> - predictions = predictions , labels = labels , weights = weights , <nl> + k = k , <nl> + class_id = class_id , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> # TODO ( ptucker ) : Validate range of values in labels ? <nl> def streaming_sparse_precision_at_k ( predictions , <nl> are not a list or tuple . <nl> " " " <nl> return metrics . sparse_precision_at_k ( <nl> - k = k , class_id = class_id , <nl> - predictions = predictions , labels = labels , weights = weights , <nl> + k = k , <nl> + class_id = class_id , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> # TODO ( ptucker ) : Validate range of values in labels ? <nl> def streaming_sparse_precision_at_top_k ( top_k_predictions , <nl> ValueError : If ` top_k_predictions ` has rank < 2 . <nl> " " " <nl> default_name = _at_k_name ( ' precision ' , class_id = class_id ) <nl> - with ops . name_scope ( <nl> - name , default_name , <nl> - ( top_k_predictions , labels , weights ) ) as name_scope : <nl> + with ops . name_scope ( name , default_name , <nl> + ( top_k_predictions , labels , weights ) ) as name_scope : <nl> return metrics_impl . _sparse_precision_at_top_k ( # pylint : disable = protected - access <nl> labels = labels , <nl> predictions_idx = top_k_predictions , <nl> def sparse_recall_at_top_k ( labels , <nl> are not a list or tuple . <nl> " " " <nl> default_name = _at_k_name ( ' recall ' , class_id = class_id ) <nl> - with ops . name_scope ( name , default_name , ( top_k_predictions , labels , <nl> - weights ) ) as name_scope : <nl> + with ops . name_scope ( name , default_name , <nl> + ( top_k_predictions , labels , weights ) ) as name_scope : <nl> return metrics_impl . _sparse_recall_at_top_k ( # pylint : disable = protected - access <nl> labels = labels , <nl> predictions_idx = top_k_predictions , <nl> def streaming_sparse_average_precision_at_k ( predictions , <nl> value matches ` metric ` . <nl> " " " <nl> return metrics . sparse_average_precision_at_k ( <nl> - k = k , predictions = predictions , labels = labels , weights = weights , <nl> + k = k , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> def streaming_sparse_average_precision_at_top_k ( top_k_predictions , <nl> def streaming_sparse_average_precision_at_top_k ( top_k_predictions , <nl> name = name ) <nl> <nl> <nl> - def streaming_mean_absolute_error ( predictions , labels , weights = None , <nl> + def streaming_mean_absolute_error ( predictions , <nl> + labels , <nl> + weights = None , <nl> metrics_collections = None , <nl> updates_collections = None , <nl> name = None ) : <nl> def streaming_mean_absolute_error ( predictions , labels , weights = None , <nl> tuple . <nl> " " " <nl> return metrics . mean_absolute_error ( <nl> - predictions = predictions , labels = labels , weights = weights , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> - def streaming_mean_relative_error ( predictions , labels , normalizer , weights = None , <nl> + def streaming_mean_relative_error ( predictions , <nl> + labels , <nl> + normalizer , <nl> + weights = None , <nl> metrics_collections = None , <nl> updates_collections = None , <nl> name = None ) : <nl> def streaming_mean_relative_error ( predictions , labels , normalizer , weights = None , <nl> tuple . <nl> " " " <nl> return metrics . mean_relative_error ( <nl> - normalizer = normalizer , predictions = predictions , labels = labels , <nl> - weights = weights , metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + normalizer = normalizer , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> + metrics_collections = metrics_collections , <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> - def streaming_mean_squared_error ( predictions , labels , weights = None , <nl> + def streaming_mean_squared_error ( predictions , <nl> + labels , <nl> + weights = None , <nl> metrics_collections = None , <nl> updates_collections = None , <nl> name = None ) : <nl> def streaming_mean_squared_error ( predictions , labels , weights = None , <nl> tuple . <nl> " " " <nl> return metrics . mean_squared_error ( <nl> - predictions = predictions , labels = labels , weights = weights , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> - def streaming_root_mean_squared_error ( predictions , labels , weights = None , <nl> + def streaming_root_mean_squared_error ( predictions , <nl> + labels , <nl> + weights = None , <nl> metrics_collections = None , <nl> updates_collections = None , <nl> name = None ) : <nl> def streaming_root_mean_squared_error ( predictions , labels , weights = None , <nl> tuple . <nl> " " " <nl> return metrics . root_mean_squared_error ( <nl> - predictions = predictions , labels = labels , weights = weights , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> def streaming_covariance ( predictions , <nl> def streaming_covariance ( predictions , <nl> ValueError : If labels and predictions are of different sizes or if either <nl> ` metrics_collections ` or ` updates_collections ` are not a list or tuple . <nl> " " " <nl> - with variable_scope . variable_scope ( <nl> - name , ' covariance ' , ( predictions , labels , weights ) ) : <nl> + with variable_scope . variable_scope ( name , ' covariance ' , <nl> + ( predictions , labels , weights ) ) : <nl> predictions , labels , weights = _remove_squeezable_dimensions ( <nl> predictions , labels , weights ) <nl> predictions . get_shape ( ) . assert_is_compatible_with ( labels . get_shape ( ) ) <nl> def streaming_covariance ( predictions , <nl> # prev_mean_label is E [ y_A ] in the update equation <nl> prev_mean_label = update_mean_label - delta_mean_label <nl> <nl> - unweighted_batch_coresiduals = ( <nl> - ( predictions - batch_mean_prediction ) * ( labels - batch_mean_label ) ) <nl> + unweighted_batch_coresiduals = ( ( predictions - batch_mean_prediction ) * <nl> + ( labels - batch_mean_label ) ) <nl> # batch_comoment is C_B in the update equation <nl> if weights is None : <nl> batch_comoment = math_ops . reduce_sum ( unweighted_batch_coresiduals ) <nl> else : <nl> - batch_comoment = math_ops . reduce_sum ( unweighted_batch_coresiduals * <nl> - weights ) <nl> + batch_comoment = math_ops . reduce_sum ( <nl> + unweighted_batch_coresiduals * weights ) <nl> <nl> # View delta_comoment as = C_AB - C_A in the update equation above . <nl> # Since C_A is stored in a var , by how much do we need to increment that var <nl> # to make the var = C_AB ? <nl> - delta_comoment = ( batch_comoment + <nl> - ( prev_mean_prediction - batch_mean_prediction ) * <nl> - ( prev_mean_label - batch_mean_label ) * <nl> - ( prev_count * batch_count / update_count ) ) <nl> + delta_comoment = ( <nl> + batch_comoment + ( prev_mean_prediction - batch_mean_prediction ) * <nl> + ( prev_mean_label - batch_mean_label ) * <nl> + ( prev_count * batch_count / update_count ) ) <nl> update_comoment = state_ops . assign_add ( comoment , delta_comoment ) <nl> <nl> covariance = array_ops . where ( <nl> def streaming_pearson_correlation ( predictions , <nl> ` weights ` is the wrong size , or if either ` metrics_collections ` or <nl> ` updates_collections ` are not a ` list ` or ` tuple ` . <nl> " " " <nl> - with variable_scope . variable_scope ( <nl> - name , ' pearson_r ' , ( predictions , labels , weights ) ) : <nl> + with variable_scope . variable_scope ( name , ' pearson_r ' , <nl> + ( predictions , labels , weights ) ) : <nl> predictions , labels , weights = _remove_squeezable_dimensions ( <nl> predictions , labels , weights ) <nl> predictions . get_shape ( ) . assert_is_compatible_with ( labels . get_shape ( ) ) <nl> def streaming_pearson_correlation ( predictions , <nl> <nl> pearson_r = math_ops . truediv ( <nl> cov , <nl> - math_ops . multiply ( math_ops . sqrt ( var_predictions ) , <nl> - math_ops . sqrt ( var_labels ) ) , <nl> + math_ops . multiply ( <nl> + math_ops . sqrt ( var_predictions ) , math_ops . sqrt ( var_labels ) ) , <nl> name = ' pearson_r ' ) <nl> update_op = math_ops . truediv ( <nl> update_cov , <nl> - math_ops . multiply ( math_ops . sqrt ( update_var_predictions ) , <nl> - math_ops . sqrt ( update_var_labels ) ) , <nl> + math_ops . multiply ( <nl> + math_ops . sqrt ( update_var_predictions ) , <nl> + math_ops . sqrt ( update_var_labels ) ) , <nl> name = ' update_op ' ) <nl> <nl> if metrics_collections : <nl> def streaming_pearson_correlation ( predictions , <nl> <nl> # TODO ( nsilberman ) : add a ' normalized ' flag so that the user can request <nl> # normalization if the inputs are not normalized . <nl> - def streaming_mean_cosine_distance ( predictions , labels , dim , weights = None , <nl> + def streaming_mean_cosine_distance ( predictions , <nl> + labels , <nl> + dim , <nl> + weights = None , <nl> metrics_collections = None , <nl> updates_collections = None , <nl> name = None ) : <nl> def streaming_mean_cosine_distance ( predictions , labels , dim , weights = None , <nl> predictions , labels , weights ) <nl> predictions . get_shape ( ) . assert_is_compatible_with ( labels . get_shape ( ) ) <nl> radial_diffs = math_ops . multiply ( predictions , labels ) <nl> - radial_diffs = math_ops . reduce_sum ( radial_diffs , <nl> - reduction_indices = [ dim , ] , <nl> - keep_dims = True ) <nl> - mean_distance , update_op = streaming_mean ( radial_diffs , weights , <nl> - None , <nl> - None , <nl> + radial_diffs = math_ops . reduce_sum ( <nl> + radial_diffs , reduction_indices = [ <nl> + dim , <nl> + ] , keep_dims = True ) <nl> + mean_distance , update_op = streaming_mean ( radial_diffs , weights , None , None , <nl> name or ' mean_cosine_distance ' ) <nl> mean_distance = math_ops . subtract ( 1 . 0 , mean_distance ) <nl> update_op = math_ops . subtract ( 1 . 0 , update_op ) <nl> def streaming_mean_cosine_distance ( predictions , labels , dim , weights = None , <nl> return mean_distance , update_op <nl> <nl> <nl> - def streaming_percentage_less ( values , threshold , weights = None , <nl> + def streaming_percentage_less ( values , <nl> + threshold , <nl> + weights = None , <nl> metrics_collections = None , <nl> updates_collections = None , <nl> name = None ) : <nl> def streaming_percentage_less ( values , threshold , weights = None , <nl> or tuple . <nl> " " " <nl> return metrics . percentage_below ( <nl> - values = values , threshold = threshold , weights = weights , <nl> + values = values , <nl> + threshold = threshold , <nl> + weights = weights , <nl> metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> def streaming_mean_iou ( predictions , <nl> def streaming_mean_iou ( predictions , <nl> tuple . <nl> " " " <nl> return metrics . mean_iou ( <nl> - num_classes = num_classes , predictions = predictions , labels = labels , <nl> - weights = weights , metrics_collections = metrics_collections , <nl> - updates_collections = updates_collections , name = name ) <nl> + num_classes = num_classes , <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights , <nl> + metrics_collections = metrics_collections , <nl> + updates_collections = updates_collections , <nl> + name = name ) <nl> <nl> <nl> def _next_array_size ( required_size , growth_factor = 1 . 5 ) : <nl> def _next_array_size ( required_size , growth_factor = 1 . 5 ) : <nl> tf . Tensor with dtype = int32 giving the next array size . <nl> " " " <nl> exponent = math_ops . ceil ( <nl> - math_ops . log ( math_ops . cast ( required_size , dtypes . float32 ) ) <nl> - / math_ops . log ( math_ops . cast ( growth_factor , dtypes . float32 ) ) ) <nl> - return math_ops . cast ( math_ops . ceil ( growth_factor * * exponent ) , dtypes . int32 ) <nl> + math_ops . log ( math_ops . cast ( required_size , dtypes . float32 ) ) / math_ops . log ( <nl> + math_ops . cast ( growth_factor , dtypes . float32 ) ) ) <nl> + return math_ops . cast ( math_ops . ceil ( growth_factor * * exponent ) , dtypes . int32 ) <nl> <nl> <nl> def streaming_concat ( values , <nl> def streaming_concat ( values , <nl> if not 0 < = axis < ndim : <nl> raise ValueError ( ' axis = % r not in [ 0 , % r ) ' % ( axis , ndim ) ) <nl> <nl> - fixed_shape = [ dim . value for n , dim in enumerate ( values_shape ) <nl> - if n ! = axis ] <nl> + fixed_shape = [ dim . value for n , dim in enumerate ( values_shape ) if n ! = axis ] <nl> if any ( value is None for value in fixed_shape ) : <nl> raise ValueError ( ' all dimensions of ` values ` other than the dimension to ' <nl> ' concatenate along must have statically known size ' ) <nl> def _remove_squeezable_dimensions ( predictions , labels , weights ) : <nl> # Use static rank . <nl> if weights_rank - predictions_rank = = 1 : <nl> weights = array_ops . squeeze ( weights , [ - 1 ] ) <nl> - elif ( weights_rank is None ) or ( <nl> - weights_shape . dims [ - 1 ] . is_compatible_with ( 1 ) ) : <nl> + elif ( weights_rank is <nl> + None ) or ( weights_shape . dims [ - 1 ] . is_compatible_with ( 1 ) ) : <nl> # Use dynamic rank <nl> weights = control_flow_ops . cond ( <nl> - math_ops . equal ( array_ops . rank ( weights ) , <nl> - math_ops . add ( array_ops . rank ( predictions ) , 1 ) ) , <nl> - lambda : array_ops . squeeze ( weights , [ - 1 ] ) , <nl> - lambda : weights ) <nl> + math_ops . equal ( <nl> + array_ops . rank ( weights ) , <nl> + math_ops . add ( array_ops . rank ( predictions ) , 1 ) ) , <nl> + lambda : array_ops . squeeze ( weights , [ - 1 ] ) , lambda : weights ) <nl> return predictions , labels , weights <nl> <nl> <nl>
Formatting metric_ops .
tensorflow/tensorflow
71bdc0efa737e3094033f0c6ea3779b1fc3c8a94
2017-10-20T18:38:20Z
mmm a / src / compiler / typer . cc <nl> ppp b / src / compiler / typer . cc <nl> Type * Typer : : Visitor : : FalsifyUndefined ( Type * type , Typer * t ) { <nl> <nl> Type * Typer : : Visitor : : Rangify ( Type * type , Typer * t ) { <nl> if ( type - > IsRange ( ) ) return type ; / / Shortcut . <nl> - if ( ! type - > Is ( t - > integer ) & & ! type - > Is ( Type : : Integral32 ( ) ) ) { <nl> - return type ; / / Give up . <nl> - } <nl> + if ( ! type - > Is ( t - > integer ) ) return type ; / / Give up . <nl> Factory * f = t - > isolate ( ) - > factory ( ) ; <nl> return Type : : Range ( f - > NewNumber ( type - > Min ( ) ) , f - > NewNumber ( type - > Max ( ) ) , <nl> t - > zone ( ) ) ; <nl>
Revert " [ turbofan ] Fix bug in Rangify . "
v8/v8
84355c829592a3e538357a631451bcab05280b29
2014-11-03T12:36:45Z
mmm a / include / swift / ABI / MetadataValues . h <nl> ppp b / include / swift / ABI / MetadataValues . h <nl> enum class FunctionMetadataConvention : uint8_t { <nl> template < typename int_type > <nl> class TargetFunctionTypeFlags { <nl> enum : int_type { <nl> - NumArgumentsMask = 0x00FFFFFFU , <nl> - ConventionMask = 0x0F000000U , <nl> - ConventionShift = 24U , <nl> - ThrowsMask = 0x10000000U , <nl> + NumParametersMask = 0x00FFFFFFU , <nl> + ConventionMask = 0x0F000000U , <nl> + ConventionShift = 25U , <nl> + ThrowsMask = 0x10000000U , <nl> + ParamFlagsMask = 0x01000000U , <nl> } ; <nl> int_type Data ; <nl> <nl> class TargetFunctionTypeFlags { <nl> public : <nl> constexpr TargetFunctionTypeFlags ( ) : Data ( 0 ) { } <nl> <nl> - constexpr TargetFunctionTypeFlags withNumArguments ( unsigned numArguments ) const { <nl> - return TargetFunctionTypeFlags ( ( Data & ~ NumArgumentsMask ) | numArguments ) ; <nl> + constexpr TargetFunctionTypeFlags <nl> + withNumParameters ( unsigned numParams ) const { <nl> + return TargetFunctionTypeFlags ( ( Data & ~ NumParametersMask ) | numParams ) ; <nl> } <nl> <nl> constexpr TargetFunctionTypeFlags < int_type > <nl> class TargetFunctionTypeFlags { <nl> return TargetFunctionTypeFlags < int_type > ( ( Data & ~ ThrowsMask ) | <nl> ( throws ? ThrowsMask : 0 ) ) ; <nl> } <nl> - <nl> - unsigned getNumArguments ( ) const { <nl> - return Data & NumArgumentsMask ; <nl> + <nl> + constexpr TargetFunctionTypeFlags < int_type > <nl> + withParameterFlags ( bool hasFlags ) const { <nl> + return TargetFunctionTypeFlags < int_type > ( ( Data & ~ ParamFlagsMask ) | <nl> + ( hasFlags ? ParamFlagsMask : 0 ) ) ; <nl> } <nl> - <nl> + <nl> + unsigned getNumParameters ( ) const { return Data & NumParametersMask ; } <nl> + <nl> FunctionMetadataConvention getConvention ( ) const { <nl> return FunctionMetadataConvention ( ( Data & ConventionMask ) > > ConventionShift ) ; <nl> } <nl> class TargetFunctionTypeFlags { <nl> bool throws ( ) const { <nl> return bool ( Data & ThrowsMask ) ; <nl> } <nl> - <nl> + <nl> + bool hasParameterFlags ( ) const { return bool ( Data & ParamFlagsMask ) ; } <nl> + <nl> int_type getIntValue ( ) const { <nl> return Data ; <nl> } <nl> class TargetFunctionTypeFlags { <nl> } ; <nl> using FunctionTypeFlags = TargetFunctionTypeFlags < size_t > ; <nl> <nl> + template < typename int_type > <nl> + class TargetParameterTypeFlags { <nl> + enum : int_type { <nl> + InOutMask = 1 < < 0 , <nl> + SharedMask = 1 < < 1 , <nl> + VariadicMask = 1 < < 2 , <nl> + } ; <nl> + int_type Data ; <nl> + <nl> + constexpr TargetParameterTypeFlags ( int_type Data ) : Data ( Data ) { } <nl> + <nl> + public : <nl> + constexpr TargetParameterTypeFlags ( ) : Data ( 0 ) { } <nl> + <nl> + constexpr TargetParameterTypeFlags < int_type > withInOut ( bool isInOut ) const { <nl> + return TargetParameterTypeFlags < int_type > ( ( Data & ~ InOutMask ) | <nl> + ( isInOut ? InOutMask : 0 ) ) ; <nl> + } <nl> + <nl> + constexpr TargetParameterTypeFlags < int_type > withShared ( bool isShared ) const { <nl> + return TargetParameterTypeFlags < int_type > ( ( Data & ~ SharedMask ) | <nl> + ( isShared ? SharedMask : 0 ) ) ; <nl> + } <nl> + <nl> + constexpr TargetParameterTypeFlags < int_type > <nl> + withVariadic ( bool isVariadic ) const { <nl> + return TargetParameterTypeFlags < int_type > ( ( Data & ~ VariadicMask ) | <nl> + ( isVariadic ? VariadicMask : 0 ) ) ; <nl> + } <nl> + <nl> + bool isNone ( ) const { return Data = = 0 ; } <nl> + bool isInOut ( ) const { return Data & InOutMask ; } <nl> + bool isShared ( ) const { return Data & SharedMask ; } <nl> + bool isVariadic ( ) const { return Data & VariadicMask ; } <nl> + <nl> + int_type getIntValue ( ) const { return Data ; } <nl> + <nl> + static TargetParameterTypeFlags < int_type > fromIntValue ( int_type Data ) { <nl> + return TargetParameterTypeFlags ( Data ) ; <nl> + } <nl> + <nl> + bool operator = = ( TargetParameterTypeFlags < int_type > other ) const { <nl> + return Data = = other . Data ; <nl> + } <nl> + bool operator ! = ( TargetParameterTypeFlags < int_type > other ) const { <nl> + return Data ! = other . Data ; <nl> + } <nl> + } ; <nl> + using ParameterFlags = TargetParameterTypeFlags < uint32_t > ; <nl> + <nl> / / / Field types and flags as represented in a nominal type ' s field / case type <nl> / / / vector . <nl> class FieldType { <nl> mmm a / include / swift / Reflection / TypeRef . h <nl> ppp b / include / swift / Reflection / TypeRef . h <nl> class FunctionTypeRef final : public TypeRef { <nl> for ( const auto & Param : Parameters ) { <nl> ID . addString ( Param . getLabel ( ) . str ( ) ) ; <nl> ID . addPointer ( Param . getType ( ) ) ; <nl> - ID . addInteger ( static_cast < uint32_t > ( Param . getFlags ( ) . toRaw ( ) ) ) ; <nl> + ID . addInteger ( static_cast < uint32_t > ( Param . getFlags ( ) . getIntValue ( ) ) ) ; <nl> } <nl> ID . addPointer ( Result ) ; <nl> ID . addInteger ( static_cast < uint64_t > ( Flags . getIntValue ( ) ) ) ; <nl> mmm a / include / swift / Remote / MetadataReader . h <nl> ppp b / include / swift / Remote / MetadataReader . h <nl> <nl> # ifndef SWIFT_REMOTE_METADATAREADER_H <nl> # define SWIFT_REMOTE_METADATAREADER_H <nl> <nl> - # include " swift / AST / Types . h " <nl> # include " swift / Runtime / Metadata . h " <nl> # include " swift / Remote / MemoryReader . h " <nl> # include " swift / Demangling / Demangler . h " <nl> namespace remote { <nl> template < typename BuiltType > class FunctionParam { <nl> StringRef Label ; <nl> BuiltType Type ; <nl> - ParameterTypeFlags Flags ; <nl> + ParameterFlags Flags ; <nl> <nl> - FunctionParam ( StringRef label , BuiltType type , ParameterTypeFlags flags ) <nl> + FunctionParam ( StringRef label , BuiltType type , ParameterFlags flags ) <nl> : Label ( label ) , Type ( type ) , Flags ( flags ) { } <nl> <nl> public : <nl> template < typename BuiltType > class FunctionParam { <nl> <nl> StringRef getLabel ( ) const { return Label ; } <nl> BuiltType getType ( ) const { return Type ; } <nl> - ParameterTypeFlags getFlags ( ) const { return Flags ; } <nl> + ParameterFlags getFlags ( ) const { return Flags ; } <nl> <nl> void setLabel ( StringRef label ) { Label = label ; } <nl> void setType ( BuiltType type ) { Type = type ; } <nl> template < typename BuiltType > class FunctionParam { <nl> void setVariadic ( ) { Flags = Flags . withVariadic ( true ) ; } <nl> void setShared ( ) { Flags = Flags . withShared ( true ) ; } <nl> void setInOut ( ) { Flags = Flags . withInOut ( true ) ; } <nl> + void setFlags ( ParameterFlags flags ) { Flags = flags ; } ; <nl> <nl> FunctionParam withLabel ( StringRef label ) const { <nl> return FunctionParam ( label , Type , Flags ) ; <nl> template < typename BuiltType > class FunctionParam { <nl> return FunctionParam ( Label , type , Flags ) ; <nl> } <nl> <nl> - FunctionParam withFlags ( ParameterTypeFlags flags ) const { <nl> + FunctionParam withFlags ( ParameterFlags flags ) const { <nl> return FunctionParam ( Label , Type , flags ) ; <nl> } <nl> } ; <nl> class MetadataReader { <nl> } <nl> case MetadataKind : : Function : { <nl> auto Function = cast < TargetFunctionTypeMetadata < Runtime > > ( Meta ) ; <nl> + auto * const parameters = Function - > getParameters ( ) ; <nl> <nl> std : : vector < FunctionParam < BuiltType > > Parameters ; <nl> - StoredPointer ArgumentAddress = MetadataAddress + <nl> - sizeof ( TargetFunctionTypeMetadata < Runtime > ) ; <nl> - for ( StoredPointer i = 0 ; i < Function - > getNumArguments ( ) ; + + i , <nl> - ArgumentAddress + = sizeof ( StoredPointer ) ) { <nl> - StoredPointer FlaggedArgumentAddress ; <nl> - if ( ! Reader - > readInteger ( RemoteAddress ( ArgumentAddress ) , <nl> - & FlaggedArgumentAddress ) ) <nl> + for ( unsigned i = 0 , n = Function - > getNumParameters ( ) ; i ! = n ; + + i ) { <nl> + StoredPointer ParamMetadata ; <nl> + if ( ! Reader - > readInteger ( RemoteAddress ( parameters + i ) , & ParamMetadata ) ) <nl> return BuiltType ( ) ; <nl> <nl> - FunctionParam < BuiltType > Param ; <nl> - <nl> - / / TODO : Use target - agnostic FlaggedPointer to mask this ! <nl> - const auto InOutMask = ( StoredPointer ) 1 ; <nl> - / / FIXME : Add import parameter related flags from metadata <nl> - if ( ( FlaggedArgumentAddress & InOutMask ) ! = 0 ) <nl> - Param . setInOut ( ) ; <nl> - <nl> - FlaggedArgumentAddress & = ~ InOutMask ; <nl> - if ( auto ParamTypeRef = readTypeFromMetadata ( FlaggedArgumentAddress ) ) { <nl> - Param . setType ( ParamTypeRef ) ; <nl> - Parameters . push_back ( std : : move ( Param ) ) ; <nl> - } else { <nl> + auto ParamTypeRef = readTypeFromMetadata ( ParamMetadata ) ; <nl> + if ( ! ParamTypeRef ) <nl> return BuiltType ( ) ; <nl> - } <nl> + <nl> + FunctionParam < BuiltType > Param ; <nl> + Param . setType ( ParamTypeRef ) ; <nl> + Param . setFlags ( Function - > getParameterFlags ( i ) ) ; <nl> + Parameters . push_back ( std : : move ( Param ) ) ; <nl> } <nl> <nl> auto Result = readTypeFromMetadata ( Function - > ResultType ) ; <nl> if ( ! Result ) <nl> return BuiltType ( ) ; <nl> <nl> - auto flags = FunctionTypeFlags ( ) . withConvention ( Function - > getConvention ( ) ) <nl> - . withThrows ( Function - > throws ( ) ) ; <nl> + auto flags = FunctionTypeFlags ( ) <nl> + . withConvention ( Function - > getConvention ( ) ) <nl> + . withThrows ( Function - > throws ( ) ) <nl> + . withParameterFlags ( Function - > hasParameterFlags ( ) ) ; <nl> auto BuiltFunction = <nl> Builder . createFunctionType ( Parameters , Result , flags ) ; <nl> TypeCache [ MetadataAddress ] = BuiltFunction ; <nl> class MetadataReader { <nl> return _readMetadata < TargetExistentialMetatypeMetadata > ( address ) ; <nl> case MetadataKind : : ForeignClass : <nl> return _readMetadata < TargetForeignClassMetadata > ( address ) ; <nl> - case MetadataKind : : Function : <nl> - return _readMetadata < TargetFunctionTypeMetadata > ( address ) ; <nl> + case MetadataKind : : Function : { <nl> + StoredSize flagsValue ; <nl> + auto flagsAddr = <nl> + address + TargetFunctionTypeMetadata < Runtime > : : OffsetToFlags ; <nl> + if ( ! Reader - > readInteger ( RemoteAddress ( flagsAddr ) , & flagsValue ) ) <nl> + return nullptr ; <nl> + <nl> + auto flags = <nl> + TargetFunctionTypeFlags < StoredSize > : : fromIntValue ( flagsValue ) ; <nl> + <nl> + auto totalSize = sizeof ( TargetFunctionTypeMetadata < Runtime > ) + <nl> + flags . getNumParameters ( ) * sizeof ( StoredPointer ) ; <nl> + <nl> + if ( flags . hasParameterFlags ( ) ) <nl> + totalSize + = flags . getNumParameters ( ) * sizeof ( uint32_t ) ; <nl> + <nl> + return _readMetadata ( address , totalSize ) ; <nl> + } <nl> case MetadataKind : : HeapGenericLocalVariable : <nl> return _readMetadata < TargetGenericBoxHeapMetadata > ( address ) ; <nl> case MetadataKind : : HeapLocalVariable : <nl> mmm a / include / swift / Runtime / Metadata . h <nl> ppp b / include / swift / Runtime / Metadata . h <nl> using EnumMetadata = TargetEnumMetadata < InProcess > ; <nl> template < typename Runtime > <nl> struct TargetFunctionTypeMetadata : public TargetMetadata < Runtime > { <nl> using StoredSize = typename Runtime : : StoredSize ; <nl> - <nl> - / / TODO : Make this target agnostic <nl> - using Argument = FlaggedPointer < const TargetMetadata < Runtime > * , 0 > ; <nl> + using Parameter = const TargetMetadata < Runtime > * ; <nl> <nl> TargetFunctionTypeFlags < StoredSize > Flags ; <nl> <nl> / / / The type metadata for the result type . <nl> ConstTargetMetadataPointer < Runtime , swift : : TargetMetadata > ResultType ; <nl> <nl> - TargetPointer < Runtime , Argument > getArguments ( ) { <nl> - return reinterpret_cast < TargetPointer < Runtime , Argument > > ( this + 1 ) ; <nl> + Parameter * getParameters ( ) { return reinterpret_cast < Parameter * > ( this + 1 ) ; } <nl> + <nl> + const Parameter * getParameters ( ) const { <nl> + return reinterpret_cast < const Parameter * > ( this + 1 ) ; <nl> } <nl> <nl> - TargetPointer < Runtime , const Argument > getArguments ( ) const { <nl> - return reinterpret_cast < TargetPointer < Runtime , const Argument > > ( this + 1 ) ; <nl> + ParameterFlags getParameterFlags ( unsigned index ) const { <nl> + assert ( index < getNumParameters ( ) ) ; <nl> + auto flags = hasParameterFlags ( ) ? getParameterFlags ( ) [ index ] : 0 ; <nl> + return ParameterFlags : : fromIntValue ( flags ) ; <nl> } <nl> - <nl> - StoredSize getNumArguments ( ) const { <nl> - return Flags . getNumArguments ( ) ; <nl> + <nl> + StoredSize getNumParameters ( ) const { <nl> + return Flags . getNumParameters ( ) ; <nl> } <nl> FunctionMetadataConvention getConvention ( ) const { <nl> return Flags . getConvention ( ) ; <nl> } <nl> bool throws ( ) const { return Flags . throws ( ) ; } <nl> + bool hasParameterFlags ( ) const { return Flags . hasParameterFlags ( ) ; } <nl> <nl> static constexpr StoredSize OffsetToFlags = sizeof ( TargetMetadata < Runtime > ) ; <nl> <nl> static bool classof ( const TargetMetadata < Runtime > * metadata ) { <nl> return metadata - > getKind ( ) = = MetadataKind : : Function ; <nl> } <nl> + <nl> + uint32_t * getParameterFlags ( ) { <nl> + return reinterpret_cast < uint32_t * > ( getParameters ( ) + getNumParameters ( ) ) ; <nl> + } <nl> + <nl> + const uint32_t * getParameterFlags ( ) const { <nl> + return reinterpret_cast < const uint32_t * > ( getParameters ( ) + <nl> + getNumParameters ( ) ) ; <nl> + } <nl> } ; <nl> using FunctionTypeMetadata = TargetFunctionTypeMetadata < InProcess > ; <nl> <nl> swift_getGenericWitnessTable ( GenericWitnessTable * genericTable , <nl> / / / \ brief Fetch a uniqued metadata for a function type . <nl> SWIFT_RUNTIME_EXPORT <nl> const FunctionTypeMetadata * <nl> - swift_getFunctionTypeMetadata ( const void * flagsArgsAndResult [ ] ) ; <nl> + swift_getFunctionTypeMetadata ( FunctionTypeFlags flags , <nl> + const Metadata * const * parameters , <nl> + const uint32_t * parameterFlags , <nl> + const Metadata * result ) ; <nl> <nl> SWIFT_RUNTIME_EXPORT <nl> const FunctionTypeMetadata * <nl> swift_getFunctionTypeMetadata1 ( FunctionTypeFlags flags , <nl> - const void * arg0 , <nl> - const Metadata * resultMetadata ) ; <nl> + const Metadata * arg0 , <nl> + const Metadata * result ) ; <nl> + <nl> + SWIFT_RUNTIME_EXPORT <nl> + const FunctionTypeMetadata * <nl> + swift_getFunctionTypeMetadata1WithFlags ( FunctionTypeFlags flags , <nl> + const Metadata * arg0 , <nl> + ParameterFlags flags0 , <nl> + const Metadata * result ) ; <nl> <nl> SWIFT_RUNTIME_EXPORT <nl> const FunctionTypeMetadata * <nl> swift_getFunctionTypeMetadata2 ( FunctionTypeFlags flags , <nl> - const void * arg0 , <nl> - const void * arg1 , <nl> - const Metadata * resultMetadata ) ; <nl> + const Metadata * arg0 , <nl> + const Metadata * arg1 , <nl> + const Metadata * result ) ; <nl> <nl> SWIFT_RUNTIME_EXPORT <nl> const FunctionTypeMetadata * <nl> - swift_getFunctionTypeMetadata3 ( FunctionTypeFlags flags , <nl> - const void * arg0 , <nl> - const void * arg1 , <nl> - const void * arg2 , <nl> - const Metadata * resultMetadata ) ; <nl> + swift_getFunctionTypeMetadata2WithFlags ( FunctionTypeFlags flags , <nl> + const Metadata * arg0 , <nl> + ParameterFlags flags0 , <nl> + const Metadata * arg1 , <nl> + ParameterFlags flags1 , <nl> + const Metadata * result ) ; <nl> + <nl> + SWIFT_RUNTIME_EXPORT <nl> + const FunctionTypeMetadata * swift_getFunctionTypeMetadata3 ( <nl> + FunctionTypeFlags flags , <nl> + const Metadata * arg0 , <nl> + const Metadata * arg1 , <nl> + const Metadata * arg2 , <nl> + const Metadata * result ) ; <nl> + <nl> + SWIFT_RUNTIME_EXPORT <nl> + const FunctionTypeMetadata * swift_getFunctionTypeMetadata3WithFlags ( <nl> + FunctionTypeFlags flags , <nl> + const Metadata * arg0 , <nl> + ParameterFlags flags0 , <nl> + const Metadata * arg1 , <nl> + ParameterFlags flags1 , <nl> + const Metadata * arg2 , <nl> + ParameterFlags flags2 , <nl> + const Metadata * result ) ; <nl> <nl> / / / \ brief Fetch a uniqued metadata for a thin function type . <nl> SWIFT_RUNTIME_EXPORT <nl> mmm a / include / swift / Runtime / RuntimeFunctions . def <nl> ppp b / include / swift / Runtime / RuntimeFunctions . def <nl> FUNCTION ( ArrayDestroy , swift_arrayDestroy , DefaultCC , <nl> ARGS ( OpaquePtrTy , SizeTy , TypeMetadataPtrTy ) , <nl> ATTRS ( NoUnwind ) ) <nl> <nl> - / / Metadata * swift_getFunctionTypeMetadata ( const void * * args ) ; <nl> + / / Metadata * swift_getFunctionTypeMetadata ( unsigned long flags , <nl> + / / const Metadata * * parameters , <nl> + / / const uint32_t * parameterFlags , <nl> + / / const Metadata * result ) ; <nl> FUNCTION ( GetFunctionMetadata , swift_getFunctionTypeMetadata , DefaultCC , <nl> RETURNS ( TypeMetadataPtrTy ) , <nl> - ARGS ( Int8PtrTy - > getPointerTo ( 0 ) ) , <nl> + ARGS ( SizeTy , <nl> + TypeMetadataPtrTy - > getPointerTo ( 0 ) , <nl> + Int32Ty - > getPointerTo ( 0 ) , <nl> + TypeMetadataPtrTy ) , <nl> ATTRS ( NoUnwind , ReadNone ) ) <nl> <nl> - / / Metadata * swift_getFunctionTypeMetadata1 ( unsigned long flags , const void * arg0 , const Metadata * resultMetadata ) ; <nl> + / / Metadata * swift_getFunctionTypeMetadata1 ( unsigned long flags , <nl> + / / const Metadata * arg0 , <nl> + / / const Metadata * resultMetadata ) ; <nl> FUNCTION ( GetFunctionMetadata1 , swift_getFunctionTypeMetadata1 , DefaultCC , <nl> + RETURNS ( TypeMetadataPtrTy ) , <nl> + ARGS ( SizeTy , TypeMetadataPtrTy , TypeMetadataPtrTy ) , <nl> + ATTRS ( NoUnwind , ReadNone ) ) <nl> + <nl> + / / Metadata * swift_getFunctionTypeMetadata1WithFlags ( unsigned long flags , <nl> + / / const Metadata * arg0 , <nl> + / / const uint32_t paramFlags , <nl> + / / const Metadata * resultMetadata ) ; <nl> + FUNCTION ( GetFunctionMetadata1WithFlags , swift_getFunctionTypeMetadata1WithFlags , <nl> + DefaultCC , <nl> RETURNS ( TypeMetadataPtrTy ) , <nl> - ARGS ( SizeTy , Int8PtrTy , TypeMetadataPtrTy ) , <nl> + ARGS ( SizeTy , TypeMetadataPtrTy , Int32Ty , TypeMetadataPtrTy ) , <nl> ATTRS ( NoUnwind , ReadNone ) ) <nl> <nl> - / / Metadata * swift_getFunctionTypeMetadata2 ( unsigned long flags , const void * arg0 , const void * arg1 , const Metadata * resultMetadata ) ; <nl> - FUNCTION ( GetFunctionMetadata2 , swift_getFunctionTypeMetadata2 , DefaultCC , <nl> + / / Metadata * swift_getFunctionTypeMetadata2 ( unsigned long flags , <nl> + / / const Metadata * arg0 , <nl> + / / const Metadata * arg1 , <nl> + / / const Metadata * resultMetadata ) ; <nl> + FUNCTION ( GetFunctionMetadata2 , swift_getFunctionTypeMetadata2 , <nl> + DefaultCC , <nl> + RETURNS ( TypeMetadataPtrTy ) , <nl> + ARGS ( SizeTy , TypeMetadataPtrTy , TypeMetadataPtrTy , TypeMetadataPtrTy ) , <nl> + ATTRS ( NoUnwind , ReadNone ) ) <nl> + <nl> + / / Metadata * swift_getFunctionTypeMetadata2WithFlags ( unsigned long flags , <nl> + / / const Metadata * arg0 , <nl> + / / const uint32_t flags0 , <nl> + / / const Metadata * arg1 , <nl> + / / const uint32_t flags1 , <nl> + / / const Metadata * resultMetadata ) ; <nl> + FUNCTION ( GetFunctionMetadata2WithFlags , swift_getFunctionTypeMetadata2WithFlags , <nl> + DefaultCC , <nl> RETURNS ( TypeMetadataPtrTy ) , <nl> - ARGS ( SizeTy , Int8PtrTy , Int8PtrTy , TypeMetadataPtrTy ) , <nl> + ARGS ( SizeTy , TypeMetadataPtrTy , Int32Ty , TypeMetadataPtrTy , Int32Ty , <nl> + TypeMetadataPtrTy ) , <nl> ATTRS ( NoUnwind , ReadNone ) ) <nl> <nl> - / / Metadata * swift_getFunctionTypeMetadata3 ( unsigned long flags , const void * arg0 , const void * arg1 , const void * arg2 , const Metadata * resultMetadata ) ; <nl> - FUNCTION ( GetFunctionMetadata3 , swift_getFunctionTypeMetadata3 , DefaultCC , <nl> + / / Metadata * swift_getFunctionTypeMetadata3 ( unsigned long flags , <nl> + / / const Metadata * arg0 , <nl> + / / const Metadata * arg1 , <nl> + / / const Metadata * arg2 , <nl> + / / const Metadata * resultMetadata ) ; <nl> + FUNCTION ( GetFunctionMetadata3 , swift_getFunctionTypeMetadata3 , <nl> + DefaultCC , <nl> + RETURNS ( TypeMetadataPtrTy ) , <nl> + ARGS ( SizeTy , TypeMetadataPtrTy , TypeMetadataPtrTy , TypeMetadataPtrTy , <nl> + TypeMetadataPtrTy ) , <nl> + ATTRS ( NoUnwind , ReadNone ) ) <nl> + <nl> + / / Metadata * swift_getFunctionTypeMetadata3WithFlags ( unsigned long flags , <nl> + / / const Metadata * arg0 , <nl> + / / const uint32_t flags0 , <nl> + / / const Metadata * arg1 , <nl> + / / const uint32_t flags1 , <nl> + / / const Metadata * arg2 , <nl> + / / const uint32_t flags2 , <nl> + / / const Metadata * resultMetadata ) ; <nl> + FUNCTION ( GetFunctionMetadata3WithFlags , swift_getFunctionTypeMetadata3WithFlags , <nl> + DefaultCC , <nl> RETURNS ( TypeMetadataPtrTy ) , <nl> - ARGS ( SizeTy , Int8PtrTy , Int8PtrTy , Int8PtrTy , TypeMetadataPtrTy ) , <nl> + ARGS ( SizeTy , TypeMetadataPtrTy , Int32Ty , TypeMetadataPtrTy , Int32Ty , <nl> + TypeMetadataPtrTy , Int32Ty , TypeMetadataPtrTy ) , <nl> ATTRS ( NoUnwind , ReadNone ) ) <nl> <nl> / / Metadata * swift_getForeignTypeMetadata ( Metadata * nonUnique ) ; <nl> mmm a / lib / IRGen / GenMeta . cpp <nl> ppp b / lib / IRGen / GenMeta . cpp <nl> namespace { <nl> " metadata ref for generic function type " ) ; <nl> return llvm : : UndefValue : : get ( IGF . IGM . TypeMetadataPtrTy ) ; <nl> } <nl> - <nl> - llvm : : Value * extractAndMarkResultType ( CanFunctionType type ) { <nl> - / / If the function type throws , set the lower bit of the return type <nl> - / / address , so that we can carry this information over to the function <nl> - / / type metadata . <nl> - auto metadata = IGF . emitTypeMetadataRef ( type - > getResult ( ) - > <nl> - getCanonicalType ( ) ) ; <nl> - return metadata ; <nl> - } <nl> <nl> - llvm : : Value * extractAndMarkInOut ( AnyFunctionType : : CanParam param ) { <nl> - / / If the type is inout , get the metadata for its inner object type <nl> - / / instead , and then set the lowest bit to help the runtime unique <nl> - / / the metadata type for this function . <nl> + llvm : : Value * getFunctionParameterRef ( AnyFunctionType : : CanParam param ) { <nl> auto type = param . getType ( ) ; <nl> - if ( param . getParameterFlags ( ) . isInOut ( ) ) { <nl> - auto objectType = type - > getInOutObjectType ( ) - > getCanonicalType ( ) ; <nl> - auto metadata = IGF . emitTypeMetadataRef ( objectType ) ; <nl> - auto metadataInt = IGF . Builder . CreatePtrToInt ( metadata , IGF . IGM . SizeTy ) ; <nl> - auto inoutFlag = llvm : : ConstantInt : : get ( IGF . IGM . SizeTy , 1 ) ; <nl> - auto marked = IGF . Builder . CreateOr ( metadataInt , inoutFlag ) ; <nl> - return IGF . Builder . CreateIntToPtr ( marked , IGF . IGM . Int8PtrTy ) ; <nl> - } <nl> - <nl> - auto metadata = IGF . emitTypeMetadataRef ( type ) ; <nl> - return IGF . Builder . CreateBitCast ( metadata , IGF . IGM . Int8PtrTy ) ; <nl> + if ( param . getParameterFlags ( ) . isInOut ( ) ) <nl> + type = type - > getInOutObjectType ( ) - > getCanonicalType ( ) ; <nl> + return IGF . emitTypeMetadataRef ( type ) ; <nl> } <nl> <nl> llvm : : Value * visitFunctionType ( CanFunctionType type ) { <nl> if ( auto metatype = tryGetLocal ( type ) ) <nl> return metatype ; <nl> <nl> - auto resultMetadata = extractAndMarkResultType ( type ) ; <nl> + auto result = <nl> + IGF . emitTypeMetadataRef ( type - > getResult ( ) - > getCanonicalType ( ) ) ; <nl> <nl> auto params = type . getParams ( ) ; <nl> auto numParams = params . size ( ) ; <nl> <nl> + bool hasFlags = false ; <nl> + for ( auto param : params ) { <nl> + if ( ! param . getParameterFlags ( ) . isNone ( ) ) { <nl> + hasFlags = true ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> / / Map the convention to a runtime metadata value . <nl> FunctionMetadataConvention metadataConvention ; <nl> switch ( type - > getRepresentation ( ) ) { <nl> namespace { <nl> } <nl> <nl> auto flagsVal = FunctionTypeFlags ( ) <nl> - . withNumArguments ( numParams ) <nl> + . withNumParameters ( numParams ) <nl> . withConvention ( metadataConvention ) <nl> - . withThrows ( type - > throws ( ) ) ; <nl> + . withThrows ( type - > throws ( ) ) <nl> + . withParameterFlags ( hasFlags ) ; <nl> <nl> auto flags = llvm : : ConstantInt : : get ( IGF . IGM . SizeTy , <nl> flagsVal . getIntValue ( ) ) ; <nl> <nl> + auto collectParameters = <nl> + [ & ] ( llvm : : function_ref < void ( unsigned , llvm : : Value * , <nl> + ParameterFlags flags ) > <nl> + processor ) { <nl> + for ( auto index : indices ( params ) ) { <nl> + auto param = params [ index ] ; <nl> + auto flags = param . getParameterFlags ( ) ; <nl> + <nl> + auto parameterFlags = ParameterFlags ( ) <nl> + . withInOut ( flags . isInOut ( ) ) <nl> + . withShared ( flags . isShared ( ) ) <nl> + . withVariadic ( flags . isVariadic ( ) ) ; <nl> + <nl> + processor ( index , getFunctionParameterRef ( param ) , parameterFlags ) ; <nl> + } <nl> + } ; <nl> + <nl> + auto constructSimpleCall = <nl> + [ & ] ( llvm : : SmallVectorImpl < llvm : : Value * > & arguments ) <nl> + - > llvm : : Constant * { <nl> + arguments . push_back ( flags ) ; <nl> + <nl> + collectParameters ( [ & ] ( unsigned i , llvm : : Value * typeRef , <nl> + ParameterFlags flags ) { <nl> + arguments . push_back ( typeRef ) ; <nl> + if ( hasFlags ) <nl> + arguments . push_back ( <nl> + llvm : : ConstantInt : : get ( IGF . IGM . Int32Ty , flags . getIntValue ( ) ) ) ; <nl> + } ) ; <nl> + <nl> + arguments . push_back ( result ) ; <nl> + <nl> + switch ( params . size ( ) ) { <nl> + case 1 : <nl> + return hasFlags ? IGF . IGM . getGetFunctionMetadata1WithFlagsFn ( ) <nl> + : IGF . IGM . getGetFunctionMetadata1Fn ( ) ; <nl> + <nl> + case 2 : <nl> + return hasFlags ? IGF . IGM . getGetFunctionMetadata2WithFlagsFn ( ) <nl> + : IGF . IGM . getGetFunctionMetadata2Fn ( ) ; <nl> + <nl> + case 3 : <nl> + return hasFlags ? IGF . IGM . getGetFunctionMetadata3WithFlagsFn ( ) <nl> + : IGF . IGM . getGetFunctionMetadata3Fn ( ) ; <nl> + <nl> + default : <nl> + llvm_unreachable ( " supports only 1 / 2 / 3 parameter functions " ) ; <nl> + } <nl> + } ; <nl> + <nl> + auto getArrayFor = [ & ] ( llvm : : Type * elementType , unsigned size , <nl> + const llvm : : Twine & name ) - > Address { <nl> + auto arrayTy = llvm : : ArrayType : : get ( elementType , size ) ; <nl> + return IGF . createAlloca ( arrayTy , IGF . IGM . getPointerAlignment ( ) , name ) ; <nl> + } ; <nl> + <nl> switch ( numParams ) { <nl> - case 1 : { <nl> - auto arg0 = extractAndMarkInOut ( params [ 0 ] ) ; <nl> - auto call = IGF . Builder . CreateCall ( IGF . IGM . getGetFunctionMetadata1Fn ( ) , <nl> - { flags , arg0 , resultMetadata } ) ; <nl> + case 1 : <nl> + case 2 : <nl> + case 3 : { <nl> + llvm : : SmallVector < llvm : : Value * , 8 > arguments ; <nl> + auto * metadataFn = constructSimpleCall ( arguments ) ; <nl> + auto * call = IGF . Builder . CreateCall ( metadataFn , arguments ) ; <nl> call - > setDoesNotThrow ( ) ; <nl> return setLocal ( CanType ( type ) , call ) ; <nl> - } <nl> + } <nl> <nl> - case 2 : { <nl> - auto arg0 = extractAndMarkInOut ( params [ 0 ] ) ; <nl> - auto arg1 = extractAndMarkInOut ( params [ 1 ] ) ; <nl> - auto call = IGF . Builder . CreateCall ( <nl> - IGF . IGM . getGetFunctionMetadata2Fn ( ) , <nl> - { flags , arg0 , arg1 , resultMetadata } ) ; <nl> - call - > setDoesNotThrow ( ) ; <nl> - return setLocal ( CanType ( type ) , call ) ; <nl> - } <nl> + default : <nl> + auto * const Int32Ptr = IGF . IGM . Int32Ty - > getPointerTo ( ) ; <nl> <nl> - case 3 : { <nl> - auto arg0 = extractAndMarkInOut ( params [ 0 ] ) ; <nl> - auto arg1 = extractAndMarkInOut ( params [ 1 ] ) ; <nl> - auto arg2 = extractAndMarkInOut ( params [ 2 ] ) ; <nl> - auto call = IGF . Builder . CreateCall ( <nl> - IGF . IGM . getGetFunctionMetadata3Fn ( ) , <nl> - { flags , arg0 , arg1 , arg2 , <nl> - resultMetadata } ) ; <nl> - call - > setDoesNotThrow ( ) ; <nl> - return setLocal ( CanType ( type ) , call ) ; <nl> - } <nl> + llvm : : SmallVector < llvm : : Value * , 8 > arguments ; <nl> + arguments . push_back ( flags ) ; <nl> <nl> - default : <nl> - auto arrayTy = llvm : : ArrayType : : get ( IGF . IGM . Int8PtrTy , numParams + 2 ) ; <nl> - Address buffer = IGF . createAlloca ( arrayTy , <nl> - IGF . IGM . getPointerAlignment ( ) , <nl> - " function - arguments " ) ; <nl> - IGF . Builder . CreateLifetimeStart ( buffer , <nl> + Address parameters ; <nl> + if ( ! params . empty ( ) ) { <nl> + parameters = getArrayFor ( IGF . IGM . TypeMetadataPtrTy , numParams , <nl> + " function - parameters " ) ; <nl> + <nl> + IGF . Builder . CreateLifetimeStart ( parameters , <nl> IGF . IGM . getPointerSize ( ) * numParams ) ; <nl> - Address pointerToFirstArg = IGF . Builder . CreateStructGEP ( buffer , 0 , <nl> - Size ( 0 ) ) ; <nl> - Address flagsPtr = IGF . Builder . CreateBitCast ( pointerToFirstArg , <nl> - IGF . IGM . SizeTy - > getPointerTo ( ) ) ; <nl> - IGF . Builder . CreateStore ( flags , flagsPtr ) ; <nl> - <nl> - for ( auto i : indices ( params ) ) { <nl> - auto argMetadata = extractAndMarkInOut ( params [ i ] ) ; <nl> - Address argPtr = IGF . Builder . CreateStructGEP ( buffer , i + 1 , <nl> + <nl> + ConstantInitBuilder paramFlags ( IGF . IGM ) ; <nl> + auto flagsArr = paramFlags . beginArray ( ) ; <nl> + collectParameters ( [ & ] ( unsigned i , llvm : : Value * typeRef , <nl> + ParameterFlags flags ) { <nl> + auto argPtr = IGF . Builder . CreateStructGEP ( parameters , i , <nl> IGF . IGM . getPointerSize ( ) ) ; <nl> - IGF . Builder . CreateStore ( argMetadata , argPtr ) ; <nl> + IGF . Builder . CreateStore ( typeRef , argPtr ) ; <nl> + if ( hasFlags ) <nl> + flagsArr . addInt32 ( flags . getIntValue ( ) ) ; <nl> + } ) ; <nl> + <nl> + auto parametersPtr = <nl> + IGF . Builder . CreateStructGEP ( parameters , 0 , Size ( 0 ) ) ; <nl> + arguments . push_back ( parametersPtr . getAddress ( ) ) ; <nl> + <nl> + if ( hasFlags ) { <nl> + auto * flagsVar = flagsArr . finishAndCreateGlobal ( <nl> + " parameter - flags " , IGF . IGM . getPointerAlignment ( ) , <nl> + / * constant * / true ) ; <nl> + arguments . push_back ( IGF . Builder . CreateBitCast ( flagsVar , Int32Ptr ) ) ; <nl> + } else { <nl> + flagsArr . abandon ( ) ; <nl> + arguments . push_back ( llvm : : ConstantPointerNull : : get ( Int32Ptr ) ) ; <nl> } <nl> + } else { <nl> + arguments . push_back ( llvm : : ConstantPointerNull : : get ( <nl> + IGF . IGM . TypeMetadataPtrTy - > getPointerTo ( ) ) ) ; <nl> + arguments . push_back ( llvm : : ConstantPointerNull : : get ( Int32Ptr ) ) ; <nl> + } <nl> <nl> - Address resultPtr = IGF . Builder . CreateStructGEP ( <nl> - buffer , numParams + 1 , IGF . IGM . getPointerSize ( ) ) ; <nl> - resultPtr = IGF . Builder . CreateBitCast ( resultPtr , <nl> - IGF . IGM . TypeMetadataPtrTy - > getPointerTo ( ) ) ; <nl> - IGF . Builder . CreateStore ( resultMetadata , resultPtr ) ; <nl> + arguments . push_back ( result ) ; <nl> <nl> - auto call = IGF . Builder . CreateCall ( IGF . IGM . getGetFunctionMetadataFn ( ) , <nl> - pointerToFirstArg . getAddress ( ) ) ; <nl> - call - > setDoesNotThrow ( ) ; <nl> + auto call = IGF . Builder . CreateCall ( IGF . IGM . getGetFunctionMetadataFn ( ) , <nl> + arguments ) ; <nl> + call - > setDoesNotThrow ( ) ; <nl> <nl> - IGF . Builder . CreateLifetimeEnd ( buffer , <nl> + if ( parameters . isValid ( ) ) <nl> + IGF . Builder . CreateLifetimeEnd ( parameters , <nl> IGF . IGM . getPointerSize ( ) * numParams ) ; <nl> <nl> - return setLocal ( type , call ) ; <nl> + return setLocal ( type , call ) ; <nl> } <nl> } <nl> <nl> mmm a / lib / RemoteAST / RemoteAST . cpp <nl> ppp b / lib / RemoteAST / RemoteAST . cpp <nl> class RemoteASTTypeBuilder { <nl> <nl> auto label = Ctx . getIdentifier ( param . getLabel ( ) ) ; <nl> auto flags = param . getFlags ( ) ; <nl> - funcParams . push_back ( AnyFunctionType : : Param ( type , label , flags ) ) ; <nl> + auto parameterFlags = ParameterTypeFlags ( ) <nl> + . withInOut ( flags . isInOut ( ) ) <nl> + . withShared ( flags . isShared ( ) ) <nl> + . withVariadic ( flags . isVariadic ( ) ) ; <nl> + <nl> + funcParams . push_back ( AnyFunctionType : : Param ( type , label , parameterFlags ) ) ; <nl> } <nl> <nl> return FunctionType : : get ( funcParams , output , einfo ) ; <nl> mmm a / stdlib / public / Reflection / TypeRef . cpp <nl> ppp b / stdlib / public / Reflection / TypeRef . cpp <nl> class PrintTypeRef : public TypeRefVisitor < PrintTypeRef , void > { <nl> if ( flags . isShared ( ) ) <nl> printHeader ( " shared " ) ; <nl> <nl> - if ( flags . isEscaping ( ) ) <nl> - printHeader ( " escaping " ) ; <nl> - <nl> printRec ( param . getType ( ) ) ; <nl> <nl> if ( ! flags . isNone ( ) ) { <nl> mmm a / stdlib / public / runtime / Casting . cpp <nl> ppp b / stdlib / public / runtime / Casting . cpp <nl> static bool _dynamicCastToFunction ( OpaqueValue * dest , <nl> / / The result and argument types must match . <nl> if ( srcFn - > ResultType ! = targetFn - > ResultType ) <nl> return _fail ( src , srcType , targetType , flags ) ; <nl> - if ( srcFn - > getNumArguments ( ) ! = targetFn - > getNumArguments ( ) ) <nl> + if ( srcFn - > getNumParameters ( ) ! = targetFn - > getNumParameters ( ) ) <nl> return _fail ( src , srcType , targetType , flags ) ; <nl> - for ( unsigned i = 0 , e = srcFn - > getNumArguments ( ) ; i < e ; + + i ) <nl> - if ( srcFn - > getArguments ( ) [ i ] ! = targetFn - > getArguments ( ) [ i ] ) <nl> + <nl> + if ( srcFn - > hasParameterFlags ( ) ! = targetFn - > hasParameterFlags ( ) ) <nl> + return _fail ( src , srcType , targetType , flags ) ; <nl> + <nl> + for ( unsigned i = 0 , e = srcFn - > getNumParameters ( ) ; i < e ; + + i ) { <nl> + if ( srcFn - > getParameters ( ) [ i ] ! = targetFn - > getParameters ( ) [ i ] | | <nl> + srcFn - > getParameterFlags ( i ) ! = targetFn - > getParameterFlags ( i ) ) <nl> return _fail ( src , srcType , targetType , flags ) ; <nl> - <nl> + } <nl> + <nl> return _succeed ( dest , src , srcType , flags ) ; <nl> } <nl> <nl> mmm a / stdlib / public / runtime / Demangle . cpp <nl> ppp b / stdlib / public / runtime / Demangle . cpp <nl> swift : : _swift_buildDemanglingForMetadata ( const Metadata * type , <nl> } <nl> <nl> std : : vector < NodePointer > inputs ; <nl> - for ( unsigned i = 0 , e = func - > getNumArguments ( ) ; i < e ; + + i ) { <nl> - auto arg = func - > getArguments ( ) [ i ] ; <nl> - auto input = _swift_buildDemanglingForMetadata ( arg . getPointer ( ) , Dem ) ; <nl> - if ( arg . getFlag ( ) ) { <nl> + for ( unsigned i = 0 , e = func - > getNumParameters ( ) ; i < e ; + + i ) { <nl> + auto param = func - > getParameters ( ) [ i ] ; <nl> + auto flags = func - > getParameterFlags ( i ) ; <nl> + auto input = _swift_buildDemanglingForMetadata ( param , Dem ) ; <nl> + <nl> + if ( flags . isInOut ( ) ) { <nl> NodePointer inout = Dem . createNode ( Node : : Kind : : InOut ) ; <nl> inout - > addChild ( input , Dem ) ; <nl> input = inout ; <nl> + } else if ( flags . isShared ( ) ) { <nl> + NodePointer shared = Dem . createNode ( Node : : Kind : : Shared ) ; <nl> + shared - > addChild ( input , Dem ) ; <nl> + input = shared ; <nl> } <nl> inputs . push_back ( input ) ; <nl> } <nl> mmm a / stdlib / public / runtime / Metadata . cpp <nl> ppp b / stdlib / public / runtime / Metadata . cpp <nl> <nl> / / <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> - # include " llvm / Support / MathExtras . h " <nl> - # include " swift / Demangling / Demangler . h " <nl> + # include " swift / Runtime / Metadata . h " <nl> + # include " MetadataCache . h " <nl> # include " swift / Basic / LLVM . h " <nl> - # include " swift / Basic / Range . h " <nl> # include " swift / Basic / Lazy . h " <nl> + # include " swift / Basic / Range . h " <nl> + # include " swift / Demangling / Demangler . h " <nl> # include " swift / Runtime / Casting . h " <nl> # include " swift / Runtime / HeapObject . h " <nl> - # include " swift / Runtime / Metadata . h " <nl> # include " swift / Runtime / Mutex . h " <nl> # include " swift / Strings . h " <nl> - # include " MetadataCache . h " <nl> + # include " llvm / Support / MathExtras . h " <nl> + # include " llvm / Support / PointerLikeTypeTraits . h " <nl> # include < algorithm > <nl> - # include < condition_variable > <nl> - # include < new > <nl> # include < cctype > <nl> + # include < condition_variable > <nl> # include < iostream > <nl> + # include < new > <nl> # if defined ( _WIN32 ) <nl> # define WIN32_LEAN_AND_MEAN <nl> / / Avoid defining macro max ( ) , min ( ) which conflict with std : : max ( ) , std : : min ( ) <nl> class FunctionCacheEntry { <nl> FullMetadata < FunctionTypeMetadata > Data ; <nl> <nl> struct Key { <nl> - const void * const * FlagsArgsAndResult ; <nl> + const FunctionTypeFlags Flags ; <nl> <nl> - FunctionTypeFlags getFlags ( ) const { <nl> - return FunctionTypeFlags : : fromIntValue ( size_t ( FlagsArgsAndResult [ 0 ] ) ) ; <nl> - } <nl> + const Metadata * const * Parameters ; <nl> + const uint32_t * ParameterFlags ; <nl> + const Metadata * Result ; <nl> + <nl> + Key ( FunctionTypeFlags flags , <nl> + const Metadata * const * params , <nl> + const uint32_t * paramFlags , <nl> + const Metadata * result ) <nl> + : Flags ( flags ) , Parameters ( params ) , ParameterFlags ( paramFlags ) , <nl> + Result ( result ) { } <nl> + <nl> + FunctionTypeFlags getFlags ( ) const { return Flags ; } <nl> + const Metadata * const * getParameters ( ) const { return Parameters ; } <nl> + const Metadata * getResult ( ) const { return Result ; } <nl> <nl> - const Metadata * getResult ( ) const { <nl> - auto opaqueResult = FlagsArgsAndResult [ getFlags ( ) . getNumArguments ( ) + 1 ] ; <nl> - return reinterpret_cast < const Metadata * > ( opaqueResult ) ; <nl> + const uint32_t * getParameterFlags ( ) const { <nl> + return ParameterFlags ; <nl> } <nl> <nl> - const void * const * getArguments ( ) const { <nl> - return getFlags ( ) . getNumArguments ( ) = = 0 <nl> - ? nullptr : & FlagsArgsAndResult [ 1 ] ; <nl> + : : ParameterFlags getParameterFlags ( unsigned index ) const { <nl> + assert ( index < Flags . getNumParameters ( ) ) ; <nl> + auto flags = Flags . hasParameterFlags ( ) ? ParameterFlags [ index ] : 0 ; <nl> + return ParameterFlags : : fromIntValue ( flags ) ; <nl> } <nl> } ; <nl> <nl> class FunctionCacheEntry { <nl> if ( auto result = comparePointers ( key . getResult ( ) , Data . ResultType ) ) <nl> return result ; <nl> <nl> - for ( unsigned i = 0 , e = keyFlags . getNumArguments ( ) ; i ! = e ; + + i ) { <nl> + for ( unsigned i = 0 , e = keyFlags . getNumParameters ( ) ; i ! = e ; + + i ) { <nl> + if ( auto result = comparePointers ( key . getParameters ( ) [ i ] , <nl> + Data . getParameters ( ) [ i ] ) ) <nl> + return result ; <nl> + <nl> if ( auto result = <nl> - comparePointers ( key . getArguments ( ) [ i ] , <nl> - Data . getArguments ( ) [ i ] . getOpaqueValue ( ) ) ) <nl> + compareIntegers ( key . getParameterFlags ( i ) . getIntValue ( ) , <nl> + Data . getParameterFlags ( i ) . getIntValue ( ) ) ) <nl> return result ; <nl> } <nl> <nl> return 0 ; <nl> } <nl> - <nl> static size_t getExtraAllocationSize ( Key key ) { <nl> - return key . getFlags ( ) . getNumArguments ( ) <nl> - * sizeof ( FunctionTypeMetadata : : Argument ) ; <nl> + return getExtraAllocationSize ( key . Flags ) ; <nl> } <nl> + <nl> size_t getExtraAllocationSize ( ) const { <nl> - return Data . Flags . getNumArguments ( ) <nl> - * sizeof ( FunctionTypeMetadata : : Argument ) ; <nl> + return getExtraAllocationSize ( Data . Flags ) ; <nl> + } <nl> + <nl> + static size_t getExtraAllocationSize ( const FunctionTypeFlags flags ) { <nl> + const auto numParams = flags . getNumParameters ( ) ; <nl> + auto size = numParams * sizeof ( FunctionTypeMetadata : : Parameter ) ; <nl> + if ( flags . hasParameterFlags ( ) ) <nl> + size + = numParams * sizeof ( uint32_t ) ; <nl> + <nl> + const auto alignment = sizeof ( void * ) ; <nl> + return ( size + alignment - 1 ) & ~ ( alignment - 1 ) ; <nl> } <nl> } ; <nl> <nl> static SimpleGlobalCache < FunctionCacheEntry > FunctionTypes ; <nl> <nl> const FunctionTypeMetadata * <nl> swift : : swift_getFunctionTypeMetadata1 ( FunctionTypeFlags flags , <nl> - const void * arg0 , <nl> + const Metadata * arg0 , <nl> const Metadata * result ) { <nl> - assert ( flags . getNumArguments ( ) = = 1 <nl> + assert ( flags . getNumParameters ( ) = = 1 <nl> + & & " wrong number of arguments in function metadata flags ? ! " ) ; <nl> + const Metadata * parameters [ ] = { arg0 } ; <nl> + return swift_getFunctionTypeMetadata ( flags , parameters , nullptr , result ) ; <nl> + } <nl> + <nl> + const FunctionTypeMetadata * <nl> + swift : : swift_getFunctionTypeMetadata1WithFlags ( FunctionTypeFlags flags , <nl> + const Metadata * arg0 , <nl> + ParameterFlags flags0 , <nl> + const Metadata * result ) { <nl> + assert ( flags . getNumParameters ( ) = = 1 <nl> & & " wrong number of arguments in function metadata flags ? ! " ) ; <nl> - const void * flagsArgsAndResult [ ] = { <nl> - reinterpret_cast < const void * > ( flags . getIntValue ( ) ) , <nl> - arg0 , <nl> - static_cast < const void * > ( result ) <nl> - } ; <nl> - return swift_getFunctionTypeMetadata ( flagsArgsAndResult ) ; <nl> - } <nl> - const FunctionTypeMetadata * <nl> + const Metadata * parameters [ ] = { arg0 } ; <nl> + const uint32_t parameterFlags [ ] = { flags0 . getIntValue ( ) } ; <nl> + return swift_getFunctionTypeMetadata ( flags , <nl> + parameters , <nl> + parameterFlags , <nl> + result ) ; <nl> + } <nl> + <nl> + const FunctionTypeMetadata * <nl> swift : : swift_getFunctionTypeMetadata2 ( FunctionTypeFlags flags , <nl> - const void * arg0 , <nl> - const void * arg1 , <nl> + const Metadata * arg0 , <nl> + const Metadata * arg1 , <nl> const Metadata * result ) { <nl> - assert ( flags . getNumArguments ( ) = = 2 <nl> + assert ( flags . getNumParameters ( ) = = 2 <nl> + & & " wrong number of arguments in function metadata flags ? ! " ) ; <nl> + const Metadata * parameters [ ] = { arg0 , arg1 } ; <nl> + return swift_getFunctionTypeMetadata ( flags , parameters , nullptr , result ) ; <nl> + } <nl> + <nl> + const FunctionTypeMetadata * <nl> + swift : : swift_getFunctionTypeMetadata2WithFlags ( FunctionTypeFlags flags , <nl> + const Metadata * arg0 , <nl> + ParameterFlags flags0 , <nl> + const Metadata * arg1 , <nl> + ParameterFlags flags1 , <nl> + const Metadata * result ) { <nl> + assert ( flags . getNumParameters ( ) = = 2 <nl> & & " wrong number of arguments in function metadata flags ? ! " ) ; <nl> - const void * flagsArgsAndResult [ ] = { <nl> - reinterpret_cast < const void * > ( flags . getIntValue ( ) ) , <nl> - arg0 , <nl> - arg1 , <nl> - static_cast < const void * > ( result ) <nl> - } ; <nl> - return swift_getFunctionTypeMetadata ( flagsArgsAndResult ) ; <nl> - } <nl> - const FunctionTypeMetadata * <nl> + const Metadata * parameters [ ] = { arg0 , arg1 } ; <nl> + const uint32_t parameterFlags [ ] = { <nl> + flags0 . getIntValue ( ) , <nl> + flags1 . getIntValue ( ) <nl> + } ; <nl> + return swift_getFunctionTypeMetadata ( flags , <nl> + parameters , <nl> + parameterFlags , <nl> + result ) ; <nl> + } <nl> + <nl> + const FunctionTypeMetadata * <nl> swift : : swift_getFunctionTypeMetadata3 ( FunctionTypeFlags flags , <nl> - const void * arg0 , <nl> - const void * arg1 , <nl> - const void * arg2 , <nl> + const Metadata * arg0 , <nl> + const Metadata * arg1 , <nl> + const Metadata * arg2 , <nl> const Metadata * result ) { <nl> - assert ( flags . getNumArguments ( ) = = 3 <nl> + assert ( flags . getNumParameters ( ) = = 3 <nl> & & " wrong number of arguments in function metadata flags ? ! " ) ; <nl> - const void * flagsArgsAndResult [ ] = { <nl> - reinterpret_cast < const void * > ( flags . getIntValue ( ) ) , <nl> - arg0 , <nl> - arg1 , <nl> - arg2 , <nl> - static_cast < const void * > ( result ) <nl> - } ; <nl> - return swift_getFunctionTypeMetadata ( flagsArgsAndResult ) ; <nl> + const Metadata * parameters [ ] = { arg0 , arg1 , arg2 } ; <nl> + return swift_getFunctionTypeMetadata ( flags , parameters , nullptr , result ) ; <nl> + } <nl> + <nl> + const FunctionTypeMetadata * <nl> + swift : : swift_getFunctionTypeMetadata3WithFlags ( FunctionTypeFlags flags , <nl> + const Metadata * arg0 , <nl> + ParameterFlags flags0 , <nl> + const Metadata * arg1 , <nl> + ParameterFlags flags1 , <nl> + const Metadata * arg2 , <nl> + ParameterFlags flags2 , <nl> + const Metadata * result ) { <nl> + assert ( flags . getNumParameters ( ) = = 3 <nl> + & & " wrong number of arguments in function metadata flags ? ! " ) ; <nl> + const Metadata * parameters [ ] = { arg0 , arg1 , arg2 } ; <nl> + const uint32_t parameterFlags [ ] = { <nl> + flags0 . getIntValue ( ) , <nl> + flags1 . getIntValue ( ) , <nl> + flags2 . getIntValue ( ) <nl> + } ; <nl> + return swift_getFunctionTypeMetadata ( flags , <nl> + parameters , <nl> + parameterFlags , <nl> + result ) ; <nl> } <nl> <nl> const FunctionTypeMetadata * <nl> - swift : : swift_getFunctionTypeMetadata ( const void * flagsArgsAndResult [ ] ) { <nl> - FunctionCacheEntry : : Key key = { flagsArgsAndResult } ; <nl> + swift : : swift_getFunctionTypeMetadata ( FunctionTypeFlags flags , <nl> + const Metadata * const * parameters , <nl> + const uint32_t * parameterFlags , <nl> + const Metadata * result ) { <nl> + FunctionCacheEntry : : Key key = { flags , parameters , parameterFlags , result } ; <nl> return & FunctionTypes . getOrInsert ( key ) . first - > Data ; <nl> } <nl> <nl> FunctionCacheEntry : : FunctionCacheEntry ( Key key ) { <nl> break ; <nl> } <nl> <nl> - unsigned numArguments = flags . getNumArguments ( ) ; <nl> + unsigned numParameters = flags . getNumParameters ( ) ; <nl> <nl> Data . setKind ( MetadataKind : : Function ) ; <nl> Data . Flags = flags ; <nl> Data . ResultType = key . getResult ( ) ; <nl> <nl> - for ( size_t i = 0 ; i < numArguments ; + + i ) { <nl> - auto opaqueArg = key . getArguments ( ) [ i ] ; <nl> - auto arg = FunctionTypeMetadata : : Argument : : getFromOpaqueValue ( opaqueArg ) ; <nl> - Data . getArguments ( ) [ i ] = arg ; <nl> + for ( unsigned i = 0 ; i < numParameters ; + + i ) { <nl> + Data . getParameters ( ) [ i ] = key . getParameters ( ) [ i ] ; <nl> + if ( flags . hasParameterFlags ( ) ) <nl> + Data . getParameterFlags ( ) [ i ] = key . getParameterFlags ( i ) . getIntValue ( ) ; <nl> } <nl> } <nl> <nl> mmm a / test / IRGen / c_function_pointer . sil <nl> ppp b / test / IRGen / c_function_pointer . sil <nl> entry : <nl> <nl> / / CHECK - LABEL : define linkonce_odr hidden % swift . type * @ _T0yyXCMa ( ) <nl> / / - - 0x3000001 - - C convention , 1 argument <nl> - / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata ( i8 * * % 3 ) <nl> - <nl> + / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata ( [ [ WORD : i ( 32 | 64 ) ] ] 100663296 , % swift . type * * null , i32 * null , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> mmm a / test / IRGen / dynamic_cast_functions . swift <nl> ppp b / test / IRGen / dynamic_cast_functions . swift <nl> let t2 : Any . Type = ( ( ( Int , Int ) ) - > ( ) ) . self <nl> <nl> / / CHECK : ok <nl> print ( ( t1 = = t2 ) ? " fail " : " ok " ) <nl> + <nl> + let i : ( inout Int ) - > Void = { _ in } <nl> + let j : ( __shared Int ) - > Void = { _ in } <nl> + let k : ( Int , inout Int ) - > Void = { _ , _ in } <nl> + let l : ( inout Int , Float , inout String ) - > Void = { _ , _ , _ in } <nl> + let m : ( __shared Int , String , inout Float , Double ) - > Void = { _ , _ , _ , _ in } <nl> + <nl> + let i_any : Any = i <nl> + let j_any : Any = j <nl> + let k_any : Any = k <nl> + let l_any : Any = l <nl> + let m_any : Any = m <nl> + <nl> + / / CHECK : ok <nl> + print ( ( i_any as ? ( Int ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( i_any as ? ( __shared Int ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( i_any as ? ( inout Int ) - > Void ) ! = nil ? " ok " : " fail " ) <nl> + <nl> + / / CHECK : ok <nl> + print ( ( j_any as ? ( Int ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( j_any as ? ( inout Int ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( j_any as ? ( __shared Int ) - > Void ) ! = nil ? " ok " : " fail " ) <nl> + <nl> + / / CHECK : ok <nl> + print ( ( k_any as ? ( Int , Int ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( k_any as ? ( Int , inout Int ) - > Void ) ! = nil ? " ok " : " fail " ) <nl> + / / CHECK : ok <nl> + print ( ( k_any as ? ( inout Int , Int ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( k_any as ? ( inout Int , __shared Int ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + <nl> + / / CHECK : ok <nl> + print ( ( l_any as ? ( Int , Float , String ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( l_any as ? ( Int , Float , inout String ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( l_any as ? ( Int , inout Float , String ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( l_any as ? ( inout Int , Float , String ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( l_any as ? ( inout Int , inout Float , String ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( l_any as ? ( inout Int , Float , __shared String ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( l_any as ? ( inout Int , Float , inout String ) - > Void ) ! = nil ? " ok " : " fail " ) <nl> + <nl> + / / CHECK : ok <nl> + print ( ( m_any as ? ( Int , String , Float , Double ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( m_any as ? ( Int , String , Float , inout Double ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( m_any as ? ( Int , String , Float , __shared Double ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( m_any as ? ( Int , String , inout Float , Double ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( m_any as ? ( Int , __shared String , Float , Double ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( m_any as ? ( inout Int , String , __shared Float , Double ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( m_any as ? ( __shared Int , String , Float , inout Double ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( m_any as ? ( Int , __shared String , inout Float , Double ) - > Void ) ! = nil ? " fail " : " ok " ) <nl> + / / CHECK : ok <nl> + print ( ( m_any as ? ( __shared Int , String , inout Float , Double ) - > Void ) ! = nil ? " ok " : " fail " ) <nl> mmm a / test / IRGen / function_metadata . swift <nl> ppp b / test / IRGen / function_metadata . swift <nl> func arch < F > ( _ f : F ) { } <nl> <nl> / / CHECK : define hidden swiftcc void @ _T017function_metadata9test_archyyF ( ) <nl> func test_arch ( ) { <nl> - / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata ( i8 * * % 3 ) { { # [ 0 - 9 ] + } } <nl> + / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata ( [ [ WORD : i ( 32 | 64 ) ] ] 0 , % swift . type * * null , i32 * null , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> arch ( { ( ) - > ( ) in } ) <nl> <nl> - / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata1 ( [ [ WORD : i ( 32 | 64 ) ] ] 1 , i8 * bitcast ( % swift . type * @ _T0SiN to i8 * ) , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) { { # [ 0 - 9 ] + } } <nl> + / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata1 ( [ [ WORD : i ( 32 | 64 ) ] ] 1 , % swift . type * @ _T0SiN , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) { { # [ 0 - 9 ] + } } <nl> arch ( { ( x : Int ) - > ( ) in } ) <nl> <nl> - / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata1 ( [ [ WORD ] ] 1 , i8 * bitcast ( % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) to i8 * ) , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> + / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata1 ( [ [ WORD ] ] 1 , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> arch ( { ( _ : ( ) ) - > ( ) in } ) <nl> <nl> - / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata1 ( [ [ WORD ] ] 1 , i8 * inttoptr ( [ [ WORD ] ] or ( [ [ WORD ] ] ptrtoint ( % swift . type * @ _T0SiN to [ [ WORD ] ] ) , [ [ WORD ] ] 1 ) to i8 * ) , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> + / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata1WithFlags ( [ [ WORD ] ] 16777217 , % swift . type * @ _T0SiN , i32 1 , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> arch ( { ( x : inout Int ) - > ( ) in } ) <nl> <nl> - / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata1 ( [ [ WORD ] ] 1 , i8 * % 3 , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> + / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata1 ( [ [ WORD ] ] 1 , % swift . type * % 2 , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> arch ( { ( x : ( Int , Float ) ) - > ( ) in } ) <nl> <nl> - / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata2 ( [ [ WORD ] ] 2 , i8 * inttoptr ( [ [ WORD ] ] or ( [ [ WORD ] ] ptrtoint ( % swift . type * @ _T0SiN to [ [ WORD ] ] ) , [ [ WORD ] ] 1 ) to i8 * ) , i8 * bitcast ( % swift . type * @ _T0SiN to i8 * ) , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> + / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata2WithFlags ( [ [ WORD ] ] 16777218 , % swift . type * @ _T0SiN , i32 1 , % swift . type * @ _T0SiN , i32 0 , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> arch ( { ( x : inout Int , y : Int ) - > ( ) in } ) <nl> <nl> - / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata2 ( [ [ WORD ] ] 2 , i8 * bitcast ( % swift . type * @ _T0SfN to i8 * ) , i8 * bitcast ( % swift . type * @ _T0SiN to i8 * ) , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> + / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata2 ( [ [ WORD ] ] 2 , % swift . type * @ _T0SfN , % swift . type * @ _T0SiN , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> arch ( { ( a : Float , b : Int ) - > ( ) in } ) <nl> <nl> - / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata3 ( [ [ WORD ] ] 3 , i8 * inttoptr ( [ [ WORD ] ] or ( [ [ WORD ] ] ptrtoint ( % swift . type * @ _T0SiN to [ [ WORD ] ] ) , [ [ WORD ] ] 1 ) to i8 * ) , i8 * bitcast ( % swift . type * @ _T0SfN to i8 * ) , i8 * bitcast ( % swift . type * @ _T0SSN to i8 * ) , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> + / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata3WithFlags ( [ [ WORD ] ] 16777219 , % swift . type * @ _T0SiN , i32 1 , % swift . type * @ _T0SfN , i32 0 , % swift . type * @ _T0SSN , i32 0 , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> arch ( { ( x : inout Int , y : Float , z : String ) - > ( ) in } ) <nl> <nl> - / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata3 ( [ [ WORD ] ] 3 , i8 * bitcast ( % swift . type * @ _T0SfN to i8 * ) , i8 * bitcast ( % swift . type * @ _T0SfN to i8 * ) , i8 * bitcast ( % swift . type * @ _T0SiN to i8 * ) , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> + / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata3 ( [ [ WORD ] ] 3 , % swift . type * @ _T0SfN , % swift . type * @ _T0SfN , % swift . type * @ _T0SiN , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> arch ( { ( a : Float , b : Float , c : Int ) - > ( ) in } ) <nl> <nl> - / / CHECK : [ [ T0 : % . * ] ] = getelementptr inbounds [ 6 x i8 * ] , [ 6 x i8 * ] * % function - arguments , i32 0 , i32 0 <nl> - / / CHECK : store [ [ WORD ] ] 4 <nl> - / / CHECK : getelementptr inbounds [ 6 x i8 * ] , [ 6 x i8 * ] * % function - arguments , i32 0 , i32 1 <nl> - / / CHECK : store i8 * inttoptr ( [ [ WORD ] ] or ( [ [ WORD ] ] ptrtoint ( % swift . type * @ _T0SiN to [ [ WORD ] ] ) , [ [ WORD ] ] 1 ) to i8 * ) <nl> - / / CHECK : getelementptr inbounds [ 6 x i8 * ] , [ 6 x i8 * ] * % function - arguments , i32 0 , i32 2 <nl> - / / CHECK : store i8 * bitcast ( % swift . type * @ _T0SdN to i8 * ) <nl> - / / CHECK : getelementptr inbounds [ 6 x i8 * ] , [ 6 x i8 * ] * % function - arguments , i32 0 , i32 3 <nl> - / / CHECK : store i8 * bitcast ( % swift . type * @ _T0SSN to i8 * ) <nl> - / / CHECK : getelementptr inbounds [ 6 x i8 * ] , [ 6 x i8 * ] * % function - arguments , i32 0 , i32 4 <nl> - / / CHECK : store i8 * bitcast ( % swift . type * @ _T0s4Int8VN to i8 * ) <nl> - / / CHECK : getelementptr inbounds [ 6 x i8 * ] , [ 6 x i8 * ] * % function - arguments , i32 0 , i32 5 <nl> - / / CHECK : store % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) <nl> - / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata ( i8 * * [ [ T0 ] ] ) { { # [ 0 - 9 ] + } } <nl> + / / CHECK : getelementptr inbounds [ 4 x % swift . type * ] , [ 4 x % swift . type * ] * % function - parameters , i32 0 , i32 0 <nl> + / / CHECK : store % swift . type * @ _T0SiN , % swift . type * * [ [ T : % . * ] ] , align [ [ ALIGN : ( 4 | 8 ) ] ] <nl> + / / CHECK : getelementptr inbounds [ 4 x % swift . type * ] , [ 4 x % swift . type * ] * % function - parameters , i32 0 , i32 1 <nl> + / / CHECK : store % swift . type * @ _T0SdN , % swift . type * * [ [ T : % . * ] ] , align [ [ ALIGN : ( 4 | 8 ) ] ] <nl> + / / CHECK : getelementptr inbounds [ 4 x % swift . type * ] , [ 4 x % swift . type * ] * % function - parameters , i32 0 , i32 2 <nl> + / / CHECK : store % swift . type * @ _T0SSN , % swift . type * * [ [ T : % . * ] ] , align [ [ ALIGN : ( 4 | 8 ) ] ] <nl> + / / CHECK : getelementptr inbounds [ 4 x % swift . type * ] , [ 4 x % swift . type * ] * % function - parameters , i32 0 , i32 3 <nl> + / / CHECK : store % swift . type * @ _T0s4Int8VN , % swift . type * * [ [ T : % . * ] ] , align [ [ ALIGN : ( 4 | 8 ) ] ] <nl> + / / CHECK : [ [ T : % . * ] ] = getelementptr inbounds [ 4 x % swift . type * ] , [ 4 x % swift . type * ] * % function - parameters , i32 0 , i32 0 <nl> + / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata ( [ [ WORD ] ] 16777220 , % swift . type * * % 7 , i32 * getelementptr inbounds ( [ 4 x i32 ] , [ 4 x i32 ] * @ parameter - flags , i32 0 , i32 0 ) , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> arch ( { ( x : inout Int , y : Double , z : String , w : Int8 ) - > ( ) in } ) <nl> } <nl> mmm a / test / IRGen / objc_block . sil <nl> ppp b / test / IRGen / objc_block . sil <nl> entry ( % b : $ * @ convention ( block ) ( ) - > ( ) ) : <nl> <nl> / / CHECK - LABEL : define { { ( protected ) ? } } swiftcc void @ generic_with_block ( % objc_block * * noalias nocapture dereferenceable ( { { . * } } ) ) <nl> / / - - 0x100_0001 = block convention , 1 arg <nl> - / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata ( i8 * * % 3 ) <nl> - <nl> + / / CHECK : call % swift . type * @ swift_getFunctionTypeMetadata ( [ [ WORD : i ( 32 | 64 ) ] ] 33554432 , % swift . type * * null , i32 * null , % swift . type * getelementptr inbounds ( % swift . full_type , % swift . full_type * @ _T0ytN , i32 0 , i32 1 ) ) <nl> mmm a / unittests / Reflection / TypeRef . cpp <nl> ppp b / unittests / Reflection / TypeRef . cpp <nl> TEST ( TypeRefTest , UniqueFunctionTypeRef ) { <nl> EXPECT_NE ( F4 , F1 ) ; <nl> <nl> / / Test parameter with and without inout / shared / variadic and / or label . <nl> - ParameterTypeFlags paramFlags ; <nl> + ParameterFlags paramFlags ; <nl> auto inoutFlags = paramFlags . withInOut ( true ) ; <nl> auto variadicFlags = paramFlags . withVariadic ( true ) ; <nl> auto sharedFlags = paramFlags . withShared ( true ) ; <nl>
Merge pull request from xedin / fn - metadata - changes
apple/swift
e23a6f1359c7334e721ad1482f601a0cf3c486c9
2017-11-07T03:50:19Z
mmm a / aten / src / ATen / StorageImpl . h <nl> ppp b / aten / src / ATen / StorageImpl . h <nl> <nl> # include < ATen / Allocator . h > <nl> # include < ATen / ScalarType . h > <nl> # include < ATen / ScalarTypeUtils . h > <nl> - # include < TH / THTypeConversion . hpp > <nl> <nl> # include < ATen / core / intrusive_ptr . h > <nl> <nl> struct AT_API StorageImpl : public c10 : : intrusive_ptr_target { <nl> template < typename T > <nl> inline T * data ( ) const { <nl> auto data_type_T = <nl> - at : : scalarTypeToDataType ( at : : CTypeToScalarType < th : : from_type < T > > : : to ( ) ) ; <nl> + at : : scalarTypeToDataType ( at : : CTypeToScalarType < T > : : to ( ) ) ; <nl> if ( dtype ( ) ! = data_type_T ) { <nl> AT_ERROR ( <nl> " Attempt to access StorageImpl having data type " , <nl> mmm a / aten / src / ATen / gen . py <nl> ppp b / aten / src / ATen / gen . py <nl> def generate_storage_type_and_tensor ( backend , density , scalar_type , declarations <nl> if scalar_name = = " Half " : <nl> env [ ' SparseTensor ' ] = ' Tensor ' <nl> if backend = = " CUDA " : <nl> - env [ ' to_th_type ' ] = ' HalfFix < __half , Half > ' <nl> - env [ ' to_at_type ' ] = ' HalfFix < Half , __half > ' <nl> env [ ' AS_REAL ' ] = ' convert < half , double > ' <nl> - env [ ' THScalarType ' ] = ' half ' <nl> - else : <nl> - env [ ' to_th_type ' ] = ' HalfFix < THHalf , Half > ' <nl> - env [ ' to_at_type ' ] = ' HalfFix < Half , THHalf > ' <nl> - elif scalar_name = = ' Long ' : <nl> - env [ ' to_th_type ' ] = ' long ' <nl> - env [ ' to_at_type ' ] = ' int64_t ' <nl> - else : <nl> - env [ ' to_th_type ' ] = ' ' <nl> - env [ ' to_at_type ' ] = ' ' <nl> <nl> declarations , definitions = function_wrapper . create_derived ( <nl> env , declarations ) <nl> mmm a / aten / src / TH / CMakeLists . txt <nl> ppp b / aten / src / TH / CMakeLists . txt <nl> INSTALL ( FILES <nl> THTensor . hpp <nl> THStorageFunctions . hpp <nl> THGenerator . hpp <nl> - THTypeConversion . hpp <nl> DESTINATION " $ { ATEN_INSTALL_INCLUDE_SUBDIR } / TH " ) <nl> <nl> INSTALL ( FILES <nl> mmm a / aten / src / TH / THHalf . h <nl> ppp b / aten / src / TH / THHalf . h <nl> <nl> # define TH_HALF_H <nl> <nl> # include < TH / THGeneral . h > <nl> - # include < stdint . h > <nl> <nl> - / * Neither built - in nor included from Cutorch , use our definition lifted from CUDA * / <nl> - # if defined ( __GNUC__ ) <nl> - # define __thalign__ ( n ) __attribute__ ( ( aligned ( n ) ) ) <nl> - # elif defined ( _WIN32 ) <nl> - # define __thalign__ ( n ) __declspec ( align ( n ) ) <nl> - # else <nl> - # define __thalign__ ( n ) <nl> + # ifdef __cplusplus <nl> + # include < ATen / TensorImpl . h > <nl> # endif <nl> <nl> - typedef struct __thalign__ ( 2 ) { <nl> - unsigned short x ; <nl> - } __THHalf ; <nl> - <nl> - typedef struct __thalign__ ( 4 ) { <nl> - unsigned int x ; <nl> - } __THHalf2 ; <nl> - <nl> - typedef __THHalf THHalf ; <nl> - typedef __THHalf2 THHalf2 ; <nl> + # ifdef __cplusplus <nl> + # define THHalf at : : Half <nl> + # else <nl> + typedef struct at_Half at_Half ; <nl> + # define THHalf at_Half <nl> + # endif <nl> <nl> TH_API void TH_float2halfbits ( float * , unsigned short * ) ; <nl> TH_API void TH_halfbits2float ( unsigned short * , float * ) ; <nl> <nl> TH_API THHalf TH_float2half ( float ) ; <nl> - TH_API float TH_half2float ( THHalf ) ; <nl> - <nl> - # ifndef TH_HALF_BITS_TO_LITERAL <nl> - # define TH_HALF_BITS_TO_LITERAL ( n ) { n } <nl> - # endif <nl> - <nl> - # define TH_HALF_ZERO 0x0U <nl> - # define TH_HALF_INF 0x7C00U <nl> + TH_API float TH_half2float ( THHalf ) ; <nl> <nl> - # undef __thalign__ <nl> # endif <nl> mmm a / aten / src / TH / THStorageFunctions . hpp <nl> ppp b / aten / src / TH / THStorageFunctions . hpp <nl> <nl> <nl> # include < ATen / ScalarType . h > <nl> # include < ATen / ScalarTypeUtils . h > <nl> - # include " THTypeConversion . hpp " <nl> # include < atomic > <nl> <nl> / / Note [ Weak references for intrusive refcounting ] <nl> deleted file mode 100644 <nl> index d40169e7180e . . 000000000000 <nl> mmm a / aten / src / TH / THTypeConversion . hpp <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - <nl> - # include < ATen / core / Half . h > <nl> - # include " THHalf . h " <nl> - <nl> - / / Type traits to convert types to TH - specific types . Used primarily to <nl> - / / convert at : : Half to TH ' s half type . This makes the conversion explicit . <nl> - / / FIXME : we should just use the same type <nl> - <nl> - namespace th { <nl> - <nl> - template < typename T > <nl> - struct FromTypeConversion { <nl> - using type = T ; <nl> - } ; <nl> - <nl> - template < > <nl> - struct FromTypeConversion < THHalf > { <nl> - using type = at : : Half ; <nl> - } ; <nl> - <nl> - template < typename T > <nl> - using from_type = typename FromTypeConversion < T > : : type ; <nl> - } <nl> mmm a / aten / src / TH / generic / THStorage . cpp <nl> ppp b / aten / src / TH / generic / THStorage . cpp <nl> size_t THStorage_ ( elementSize ) ( ) <nl> <nl> THStorage * THStorage_ ( new ) ( void ) <nl> { <nl> - return THStorage_new ( at : : CTypeToScalarType < th : : from_type < real > > : : to ( ) ) ; <nl> + return THStorage_new ( at : : CTypeToScalarType < real > : : to ( ) ) ; <nl> } <nl> <nl> THStorage * THStorage_ ( newWithSize ) ( ptrdiff_t size ) <nl> { <nl> THStorage * storage = c10 : : make_intrusive < at : : StorageImpl > ( <nl> - at : : scalarTypeToDataType ( at : : CTypeToScalarType < th : : from_type < real > > : : to ( ) ) , <nl> + at : : scalarTypeToDataType ( at : : CTypeToScalarType < real > : : to ( ) ) , <nl> size , <nl> getTHDefaultAllocator ( ) , <nl> true ) . release ( ) ; <nl> THStorage * THStorage_ ( newWithAllocator ) ( ptrdiff_t size , <nl> at : : Allocator * allocator ) <nl> { <nl> THStorage * storage = c10 : : make_intrusive < at : : StorageImpl > ( <nl> - at : : scalarTypeToDataType ( at : : CTypeToScalarType < th : : from_type < real > > : : to ( ) ) , <nl> + at : : scalarTypeToDataType ( at : : CTypeToScalarType < real > : : to ( ) ) , <nl> size , <nl> allocator , <nl> true ) . release ( ) ; <nl> THStorage * THStorage_ ( newWithAllocator ) ( ptrdiff_t size , <nl> <nl> THStorage * THStorage_ ( newWithMapping ) ( const char * filename , ptrdiff_t size , int flags ) <nl> { <nl> - auto scalar_type = at : : CTypeToScalarType < th : : from_type < real > > : : to ( ) ; <nl> + auto scalar_type = at : : CTypeToScalarType < real > : : to ( ) ; <nl> size_t actual_size = - 1 ; <nl> THStorage * storage = c10 : : make_intrusive < at : : StorageImpl > ( <nl> at : : scalarTypeToDataType ( scalar_type ) , <nl> void THStorage_ ( free ) ( THStorage * storage ) <nl> THStorage * THStorage_ ( newWithDataAndAllocator ) ( at : : DataPtr & & data , ptrdiff_t size , <nl> at : : Allocator * allocator ) { <nl> THStorage * storage = c10 : : make_intrusive < at : : StorageImpl > ( <nl> - at : : scalarTypeToDataType ( at : : CTypeToScalarType < th : : from_type < real > > : : to ( ) ) , <nl> + at : : scalarTypeToDataType ( at : : CTypeToScalarType < real > : : to ( ) ) , <nl> size , <nl> std : : move ( data ) , <nl> allocator , <nl>
Remove THHalf type
pytorch/pytorch
c4e1adf29d0b22fa5ff0ea2206a22f4d035c36cb
2018-08-29T23:44:45Z
mmm a / cocos / ui / UIRichText . cpp <nl> ppp b / cocos / ui / UIRichText . cpp <nl> <nl> <nl> # include " ui / UIRichText . h " <nl> <nl> + # include < sstream > <nl> # include < vector > <nl> # include < locale > <nl> <nl> <nl> # include " base / ccUTF8 . h " <nl> # include " ui / UIHelper . h " <nl> <nl> - NS_CC_BEGIN <nl> + USING_NS_CC ; <nl> + using namespace cocos2d : : ui ; <nl> <nl> - namespace ui { <nl> - class ListenerComponent : public Component <nl> + class ListenerComponent : public Component <nl> + { <nl> + public : <nl> + static const std : : string COMPONENT_NAME ; / * ! < component name * / <nl> + <nl> + static ListenerComponent * create ( Node * parent , const std : : string & url , const RichText : : OpenUrlHandler handleOpenUrl = nullptr ) <nl> { <nl> - public : <nl> - static ListenerComponent * create ( Label * parent , const std : : string & url ) <nl> - { <nl> - auto component = new ( std : : nothrow ) ListenerComponent ( parent , url ) ; <nl> - component - > autorelease ( ) ; <nl> - return component ; <nl> - } <nl> + auto component = new ( std : : nothrow ) ListenerComponent ( parent , url , handleOpenUrl ) ; <nl> + component - > autorelease ( ) ; <nl> + return component ; <nl> + } <nl> <nl> - explicit ListenerComponent ( Label * parent , const std : : string & url ) <nl> - : _parent ( parent ) <nl> - , _url ( url ) <nl> - { <nl> - _touchListener = cocos2d : : EventListenerTouchAllAtOnce : : create ( ) ; <nl> - _touchListener - > onTouchesEnded = CC_CALLBACK_2 ( ListenerComponent : : onTouchesEnded , this ) ; <nl> + explicit ListenerComponent ( Node * parent , const std : : string & url , const RichText : : OpenUrlHandler handleOpenUrl ) <nl> + : _parent ( parent ) <nl> + , _url ( url ) <nl> + , _handleOpenUrl ( handleOpenUrl ) <nl> + { <nl> + setName ( ListenerComponent : : COMPONENT_NAME ) ; <nl> + <nl> + _touchListener = cocos2d : : EventListenerTouchAllAtOnce : : create ( ) ; <nl> + _touchListener - > onTouchesEnded = CC_CALLBACK_2 ( ListenerComponent : : onTouchesEnded , this ) ; <nl> <nl> - Director : : getInstance ( ) - > getEventDispatcher ( ) - > addEventListenerWithSceneGraphPriority ( _touchListener , _parent ) ; <nl> - _touchListener - > retain ( ) ; <nl> - } <nl> + Director : : getInstance ( ) - > getEventDispatcher ( ) - > addEventListenerWithSceneGraphPriority ( _touchListener , _parent ) ; <nl> + _touchListener - > retain ( ) ; <nl> + } <nl> <nl> - virtual ~ ListenerComponent ( ) <nl> - { <nl> - Director : : getInstance ( ) - > getEventDispatcher ( ) - > removeEventListener ( _touchListener ) ; <nl> - _touchListener - > release ( ) ; <nl> - } <nl> + virtual ~ ListenerComponent ( ) <nl> + { <nl> + Director : : getInstance ( ) - > getEventDispatcher ( ) - > removeEventListener ( _touchListener ) ; <nl> + _touchListener - > release ( ) ; <nl> + } <nl> <nl> - void onTouchesEnded ( const std : : vector < Touch * > & touches , Event * event ) <nl> + void onTouchesEnded ( const std : : vector < Touch * > & touches , Event * event ) <nl> + { <nl> + for ( const auto & touch : touches ) <nl> { <nl> - for ( const auto & touch : touches ) <nl> - { <nl> - / / FIXME : Node : : getBoundBox ( ) doesn ' t return it in local coordinates . . . so create one manually . <nl> - Rect localRect = Rect ( Vec2 : : ZERO , _parent - > getContentSize ( ) ) ; <nl> - if ( localRect . containsPoint ( _parent - > convertTouchToNodeSpace ( touch ) ) ) <nl> - Application : : getInstance ( ) - > openURL ( _url ) ; <nl> + / / FIXME : Node : : getBoundBox ( ) doesn ' t return it in local coordinates . . . so create one manually . <nl> + Rect localRect = Rect ( Vec2 : : ZERO , _parent - > getContentSize ( ) ) ; <nl> + if ( localRect . containsPoint ( _parent - > convertTouchToNodeSpace ( touch ) ) ) { <nl> + if ( _handleOpenUrl ) { <nl> + _handleOpenUrl ( _url ) ; <nl> + } <nl> } <nl> } <nl> + } <nl> + <nl> + void setOpenUrlHandler ( const RichText : : OpenUrlHandler & handleOpenUrl ) <nl> + { <nl> + _handleOpenUrl = handleOpenUrl ; <nl> + } <nl> <nl> - private : <nl> - Label * _parent ; / / weak ref . <nl> - std : : string _url ; <nl> - EventDispatcher * _eventDispatcher ; / / weak ref . <nl> - EventListenerTouchAllAtOnce * _touchListener ; / / strong ref . <nl> - } ; <nl> - <nl> + private : <nl> + Node * _parent ; / / weak ref . <nl> + std : : string _url ; <nl> + RichText : : OpenUrlHandler _handleOpenUrl ; <nl> + EventDispatcher * _eventDispatcher ; / / weak ref . <nl> + EventListenerTouchAllAtOnce * _touchListener ; / / strong ref . <nl> + } ; <nl> + const std : : string ListenerComponent : : COMPONENT_NAME ( " cocos2d_ui_UIRichText_ListenerComponent " ) ; <nl> <nl> bool RichElement : : init ( int tag , const Color3B & color , GLubyte opacity ) <nl> { <nl> bool RichElement : : init ( int tag , const Color3B & color , GLubyte opacity ) <nl> _opacity = opacity ; <nl> return true ; <nl> } <nl> - <nl> - <nl> - RichElementText * RichElementText : : create ( int tag , const Color3B & color , GLubyte opacity , const std : : string & text , const std : : string & fontName , float fontSize , uint32_t flags , const std : : string & url ) <nl> + <nl> + bool RichElement : : equalType ( Type type ) <nl> + { <nl> + return ( _type = = type ) ; <nl> + } <nl> + <nl> + void RichElement : : setColor ( const Color3B & color ) <nl> + { <nl> + _color = color ; <nl> + } <nl> + <nl> + RichElementText * RichElementText : : create ( int tag , const Color3B & color , GLubyte opacity , const std : : string & text , <nl> + const std : : string & fontName , float fontSize , uint32_t flags , const std : : string & url , <nl> + const Color3B & outlineColor , int outlineSize , <nl> + const Color3B & shadowColor , const cocos2d : : Size & shadowOffset , int shadowBlurRadius , <nl> + const Color3B & glowColor ) <nl> { <nl> RichElementText * element = new ( std : : nothrow ) RichElementText ( ) ; <nl> - if ( element & & element - > init ( tag , color , opacity , text , fontName , fontSize , flags , url ) ) <nl> + if ( element & & element - > init ( tag , color , opacity , text , fontName , fontSize , flags , url , <nl> + outlineColor , outlineSize , shadowColor , shadowOffset , shadowBlurRadius , glowColor ) ) <nl> { <nl> element - > autorelease ( ) ; <nl> return element ; <nl> RichElementText * RichElementText : : create ( int tag , const Color3B & color , GLubyte <nl> return nullptr ; <nl> } <nl> <nl> - bool RichElementText : : init ( int tag , const Color3B & color , GLubyte opacity , const std : : string & text , const std : : string & fontName , float fontSize , uint32_t flags , const std : : string & url ) <nl> + bool RichElementText : : init ( int tag , const Color3B & color , GLubyte opacity , const std : : string & text , <nl> + const std : : string & fontName , float fontSize , uint32_t flags , const std : : string & url , <nl> + const Color3B & outlineColor , int outlineSize , <nl> + const Color3B & shadowColor , const cocos2d : : Size & shadowOffset , int shadowBlurRadius , <nl> + const Color3B & glowColor ) <nl> { <nl> if ( RichElement : : init ( tag , color , opacity ) ) <nl> { <nl> bool RichElementText : : init ( int tag , const Color3B & color , GLubyte opacity , const <nl> _fontSize = fontSize ; <nl> _flags = flags ; <nl> _url = url ; <nl> + _outlineColor = outlineColor ; <nl> + _outlineSize = outlineSize ; <nl> + _shadowColor = shadowColor ; <nl> + _shadowOffset = shadowOffset ; <nl> + _shadowBlurRadius = shadowBlurRadius ; <nl> + _glowColor = glowColor ; <nl> return true ; <nl> } <nl> return false ; <nl> } <nl> <nl> - RichElementImage * RichElementImage : : create ( int tag , const Color3B & color , GLubyte opacity , const std : : string & filePath ) <nl> + RichElementImage * RichElementImage : : create ( int tag , const Color3B & color , GLubyte opacity , const std : : string & filePath , const std : : string & url ) <nl> { <nl> RichElementImage * element = new ( std : : nothrow ) RichElementImage ( ) ; <nl> - if ( element & & element - > init ( tag , color , opacity , filePath ) ) <nl> + if ( element & & element - > init ( tag , color , opacity , filePath , url ) ) <nl> { <nl> element - > autorelease ( ) ; <nl> return element ; <nl> RichElementImage * RichElementImage : : create ( int tag , const Color3B & color , GLubyt <nl> return nullptr ; <nl> } <nl> <nl> - bool RichElementImage : : init ( int tag , const Color3B & color , GLubyte opacity , const std : : string & filePath ) <nl> + bool RichElementImage : : init ( int tag , const Color3B & color , GLubyte opacity , const std : : string & filePath , const std : : string & url ) <nl> { <nl> if ( RichElement : : init ( tag , color , opacity ) ) <nl> { <nl> _filePath = filePath ; <nl> _width = - 1 ; <nl> _height = - 1 ; <nl> + _url = url ; <nl> return true ; <nl> } <nl> return false ; <nl> } <nl> + <nl> void RichElementImage : : setWidth ( int width ) <nl> { <nl> _width = width ; <nl> void RichElementImage : : setHeight ( int height ) <nl> _height = height ; <nl> } <nl> <nl> + void RichElementImage : : setUrl ( const std : : string & url ) <nl> + { <nl> + _url = url ; <nl> + } <nl> + <nl> RichElementCustomNode * RichElementCustomNode : : create ( int tag , const Color3B & color , GLubyte opacity , cocos2d : : Node * customNode ) <nl> { <nl> RichElementCustomNode * element = new ( std : : nothrow ) RichElementCustomNode ( ) ; <nl> RichElementNewLine * RichElementNewLine : : create ( int tag , const Color3B & color , GL <nl> CC_SAFE_DELETE ( element ) ; <nl> return nullptr ; <nl> } <nl> - <nl> - RichText : : RichText ( ) <nl> - : _formatTextDirty ( true ) <nl> - , _leftSpaceWidth ( 0 . 0f ) <nl> - , _verticalSpace ( 0 . 0f ) <nl> - , _wrapMode ( WRAP_PER_WORD ) <nl> - { <nl> - } <nl> - <nl> - RichText : : ~ RichText ( ) <nl> - { <nl> - _richElements . clear ( ) ; <nl> - } <nl> - <nl> - RichText * RichText : : create ( ) <nl> - { <nl> - RichText * widget = new ( std : : nothrow ) RichText ( ) ; <nl> - if ( widget & & widget - > init ( ) ) <nl> - { <nl> - widget - > autorelease ( ) ; <nl> - return widget ; <nl> - } <nl> - CC_SAFE_DELETE ( widget ) ; <nl> - return nullptr ; <nl> - } <nl> - <nl> - RichText * RichText : : createWithXML ( const std : : string & xml ) <nl> - { <nl> - RichText * widget = new ( std : : nothrow ) RichText ( ) ; <nl> - if ( widget & & widget - > initWithXML ( xml ) ) <nl> - { <nl> - widget - > autorelease ( ) ; <nl> - return widget ; <nl> - } <nl> - CC_SAFE_DELETE ( widget ) ; <nl> - return nullptr ; <nl> - } <nl> - <nl> - bool RichText : : init ( ) <nl> - { <nl> - if ( Widget : : init ( ) ) <nl> - { <nl> - return true ; <nl> - } <nl> - return false ; <nl> - } <nl> <nl> + / * * @ brief parse a XML . * / <nl> class MyXMLVisitor : public tinyxml2 : : XMLVisitor <nl> { <nl> + public : <nl> + / * * @ brief underline or strikethrough * / <nl> + enum class StyleLine { <nl> + NONE , <nl> + UNDERLINE , / * ! < underline * / <nl> + STRIKETHROUGH / * ! < a typographical presentation of words with a horizontal line through their center * / <nl> + } ; <nl> + <nl> + / * * @ brief outline , shadow or glow * / <nl> + enum class StyleEffect { <nl> + NONE , <nl> + OUTLINE , / * ! < outline effect enabled * / <nl> + SHADOW , / * ! < shadow effect enabled * / <nl> + GLOW / * ! < glow effect enabled @ discussion Limiting use to only when the Label created with true type font . * / <nl> + } ; <nl> + <nl> + / * * @ brief the attributes of text tag * / <nl> struct Attributes <nl> { <nl> - std : : string face ; <nl> - std : : string url ; <nl> - float fontSize ; <nl> - Color3B color ; <nl> - bool hasColor ; <nl> - bool bold ; <nl> - bool italics ; <nl> - bool underline ; <nl> - bool strikethrough ; <nl> - <nl> + std : : string face ; / * ! < font name * / <nl> + std : : string url ; / * ! < url is a attribute of a anchor tag * / <nl> + float fontSize ; / * ! < font size * / <nl> + Color3B color ; / * ! < font color * / <nl> + bool hasColor ; / * ! < or color is specified ? * / <nl> + bool bold ; / * ! < bold text * / <nl> + bool italics ; / * ! < italic text * / <nl> + StyleLine line ; / * ! < underline or strikethrough * / <nl> + StyleEffect effect ; / * ! < outline , shadow or glow * / <nl> + Color3B outlineColor ; / * ! < the color of the outline * / <nl> + int outlineSize ; / * ! < the outline effect size value * / <nl> + Color3B shadowColor ; / * ! < the shadow effect color value * / <nl> + cocos2d : : Size shadowOffset ; / * ! < shadow effect offset value * / <nl> + int shadowBlurRadius ; / * ! < the shadow effect blur radius * / <nl> + Color3B glowColor ; / * ! < the glow effect color value * / <nl> + <nl> void setColor ( const Color3B & acolor ) <nl> { <nl> color = acolor ; <nl> class MyXMLVisitor : public tinyxml2 : : XMLVisitor <nl> Attributes ( ) <nl> : bold ( false ) <nl> , italics ( false ) <nl> - , underline ( false ) <nl> - , strikethrough ( false ) <nl> + , line ( StyleLine : : NONE ) <nl> , hasColor ( false ) <nl> , fontSize ( - 1 ) <nl> + , effect ( StyleEffect : : NONE ) <nl> { <nl> } <nl> } ; <nl> - <nl> + <nl> + private : <nl> std : : vector < Attributes > _fontElements ; <nl> - <nl> + <nl> RichText * _richText ; <nl> + <nl> + struct TagBehavior { <nl> + bool isFontElement ; <nl> + RichText : : VisitEnterHandler handleVisitEnter ; <nl> + } ; <nl> + typedef std : : unordered_map < std : : string , TagBehavior > TagTables ; <nl> + <nl> + static TagTables _tagTables ; <nl> + <nl> + public : <nl> + explicit MyXMLVisitor ( RichText * richText ) ; <nl> + virtual ~ MyXMLVisitor ( ) ; <nl> + <nl> + Color3B getColor ( ) const ; <nl> + <nl> + float getFontSize ( ) const ; <nl> + <nl> + std : : string getFace ( ) const ; <nl> + <nl> + std : : string getURL ( ) const ; <nl> + <nl> + bool getBold ( ) const ; <nl> + <nl> + bool getItalics ( ) const ; <nl> + <nl> + bool getUnderline ( ) const ; <nl> + <nl> + bool getStrikethrough ( ) const ; <nl> + <nl> + std : : tuple < bool , Color3B , int > getOutline ( ) const ; <nl> + <nl> + std : : tuple < bool , Color3B , cocos2d : : Size , int > getShadow ( ) const ; <nl> + <nl> + std : : tuple < bool , Color3B > getGlow ( ) const ; <nl> + <nl> + / / / Visit an element . <nl> + virtual bool VisitEnter ( const tinyxml2 : : XMLElement & element , const tinyxml2 : : XMLAttribute * firstAttribute ) ; <nl> + <nl> + / / / Visit an element . <nl> + virtual bool VisitExit ( const tinyxml2 : : XMLElement & element ) ; <nl> + <nl> + / / / Visit a text node . <nl> + virtual bool Visit ( const tinyxml2 : : XMLText & text ) ; <nl> + <nl> + void pushBackFontElement ( const Attributes & attribs ) ; <nl> + <nl> + void popBackFontElement ( ) ; <nl> + <nl> + void pushBackElement ( RichElement * element ) ; <nl> + <nl> + static void setTagDescription ( const std : : string & tag , bool isFontElement , RichText : : VisitEnterHandler handleVisitEnter ) ; <nl> + <nl> + static void removeTagDescription ( const std : : string & tag ) ; <nl> + <nl> + private : <nl> + ValueMap tagAttrMapWithXMLElement ( const tinyxml2 : : XMLElement & element ) ; <nl> + } ; <nl> <nl> - Color3B getColor ( ) const <nl> - { <nl> - for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> - { <nl> - if ( i - > hasColor ) <nl> - return i - > color ; <nl> + MyXMLVisitor : : TagTables MyXMLVisitor : : _tagTables ; <nl> + <nl> + MyXMLVisitor : : MyXMLVisitor ( RichText * richText ) <nl> + : _richText ( richText ) <nl> + , _fontElements ( 20 ) <nl> + { <nl> + MyXMLVisitor : : setTagDescription ( " font " , true , [ ] ( const ValueMap & tagAttrValueMap ) { <nl> + / / supported attributes : <nl> + / / size , color , align , face <nl> + ValueMap attrValueMap ; <nl> + <nl> + if ( tagAttrValueMap . find ( " size " ) ! = tagAttrValueMap . end ( ) ) { <nl> + attrValueMap [ RichText : : KEY_FONT_SIZE ] = tagAttrValueMap . at ( " size " ) . asString ( ) ; <nl> + } <nl> + if ( tagAttrValueMap . find ( " color " ) ! = tagAttrValueMap . end ( ) ) { <nl> + attrValueMap [ RichText : : KEY_FONT_COLOR_STRING ] = tagAttrValueMap . at ( " color " ) . asString ( ) ; <nl> } <nl> - return Color3B : : WHITE ; <nl> + if ( tagAttrValueMap . find ( " face " ) ! = tagAttrValueMap . end ( ) ) { <nl> + attrValueMap [ RichText : : KEY_FONT_FACE ] = tagAttrValueMap . at ( " face " ) . asString ( ) ; <nl> + } <nl> + <nl> + return make_pair ( attrValueMap , nullptr ) ; <nl> + } ) ; <nl> + <nl> + MyXMLVisitor : : setTagDescription ( " b " , true , [ ] ( const ValueMap & tagAttrValueMap ) { <nl> + / / no supported attributes <nl> + ValueMap attrValueMap ; <nl> + attrValueMap [ RichText : : KEY_TEXT_BOLD ] = true ; <nl> + return make_pair ( attrValueMap , nullptr ) ; <nl> + } ) ; <nl> + <nl> + MyXMLVisitor : : setTagDescription ( " i " , true , [ ] ( const ValueMap & tagAttrValueMap ) { <nl> + / / no supported attributes <nl> + ValueMap attrValueMap ; <nl> + attrValueMap [ RichText : : KEY_TEXT_ITALIC ] = true ; <nl> + return make_pair ( attrValueMap , nullptr ) ; <nl> + } ) ; <nl> + <nl> + MyXMLVisitor : : setTagDescription ( " del " , true , [ ] ( const ValueMap & tagAttrValueMap ) { <nl> + / / no supported attributes <nl> + ValueMap attrValueMap ; <nl> + attrValueMap [ RichText : : KEY_TEXT_LINE ] = RichText : : VALUE_TEXT_LINE_DEL ; <nl> + return make_pair ( attrValueMap , nullptr ) ; <nl> + } ) ; <nl> + <nl> + MyXMLVisitor : : setTagDescription ( " u " , true , [ ] ( const ValueMap & tagAttrValueMap ) { <nl> + / / no supported attributes <nl> + ValueMap attrValueMap ; <nl> + attrValueMap [ RichText : : KEY_TEXT_LINE ] = RichText : : VALUE_TEXT_LINE_UNDER ; <nl> + return make_pair ( attrValueMap , nullptr ) ; <nl> + } ) ; <nl> + <nl> + MyXMLVisitor : : setTagDescription ( " small " , true , [ ] ( const ValueMap & tagAttrValueMap ) { <nl> + ValueMap attrValueMap ; <nl> + attrValueMap [ RichText : : KEY_FONT_SMALL ] = true ; <nl> + return make_pair ( attrValueMap , nullptr ) ; <nl> + } ) ; <nl> + <nl> + MyXMLVisitor : : setTagDescription ( " big " , true , [ ] ( const ValueMap & tagAttrValueMap ) { <nl> + ValueMap attrValueMap ; <nl> + attrValueMap [ RichText : : KEY_FONT_BIG ] = true ; <nl> + return make_pair ( attrValueMap , nullptr ) ; <nl> + } ) ; <nl> + <nl> + MyXMLVisitor : : setTagDescription ( " img " , false , [ ] ( const ValueMap & tagAttrValueMap ) { <nl> + / / supported attributes : <nl> + / / src , height , width <nl> + std : : string src ; <nl> + int height = - 1 ; <nl> + int width = - 1 ; <nl> + <nl> + if ( tagAttrValueMap . find ( " src " ) ! = tagAttrValueMap . end ( ) ) { <nl> + src = tagAttrValueMap . at ( " src " ) . asString ( ) ; <nl> + } <nl> + if ( tagAttrValueMap . find ( " height " ) ! = tagAttrValueMap . end ( ) ) { <nl> + height = tagAttrValueMap . at ( " height " ) . asInt ( ) ; <nl> + } <nl> + if ( tagAttrValueMap . find ( " width " ) ! = tagAttrValueMap . end ( ) ) { <nl> + width = tagAttrValueMap . at ( " width " ) . asInt ( ) ; <nl> + } <nl> + <nl> + RichElementImage * elementImg = nullptr ; <nl> + if ( src . length ( ) ) { <nl> + elementImg = RichElementImage : : create ( 0 , Color3B : : WHITE , 255 , src ) ; <nl> + if ( 0 < = height ) elementImg - > setHeight ( height ) ; <nl> + if ( 0 < = width ) elementImg - > setWidth ( width ) ; <nl> + } <nl> + return make_pair ( ValueMap ( ) , elementImg ) ; <nl> + } ) ; <nl> + <nl> + MyXMLVisitor : : setTagDescription ( " a " , true , [ ] ( const ValueMap & tagAttrValueMap ) { <nl> + / / supported attributes : <nl> + ValueMap attrValueMap ; <nl> + <nl> + if ( tagAttrValueMap . find ( " href " ) ! = tagAttrValueMap . end ( ) ) { <nl> + attrValueMap [ RichText : : KEY_URL ] = tagAttrValueMap . at ( " href " ) . asString ( ) ; <nl> + } <nl> + return make_pair ( attrValueMap , nullptr ) ; <nl> + } ) ; <nl> + <nl> + MyXMLVisitor : : setTagDescription ( " br " , false , [ ] ( const ValueMap & tagAttrValueMap ) { <nl> + RichElementNewLine * richElement = RichElementNewLine : : create ( 0 , Color3B : : WHITE , 255 ) ; <nl> + return make_pair ( ValueMap ( ) , richElement ) ; <nl> + } ) ; <nl> + <nl> + MyXMLVisitor : : setTagDescription ( " outline " , true , [ ] ( const ValueMap & tagAttrValueMap ) { <nl> + / / supported attributes : <nl> + / / color , cize <nl> + ValueMap attrValueMap ; <nl> + <nl> + attrValueMap [ RichText : : KEY_TEXT_STYLE ] = RichText : : VALUE_TEXT_STYLE_OUTLINE ; <nl> + if ( tagAttrValueMap . find ( " color " ) ! = tagAttrValueMap . end ( ) ) { <nl> + attrValueMap [ RichText : : KEY_TEXT_OUTLINE_COLOR ] = tagAttrValueMap . at ( " color " ) . asString ( ) ; <nl> + } <nl> + if ( tagAttrValueMap . find ( " size " ) ! = tagAttrValueMap . end ( ) ) { <nl> + attrValueMap [ RichText : : KEY_TEXT_OUTLINE_SIZE ] = tagAttrValueMap . at ( " size " ) . asString ( ) ; <nl> + } <nl> + return make_pair ( attrValueMap , nullptr ) ; <nl> + } ) ; <nl> + <nl> + MyXMLVisitor : : setTagDescription ( " shadow " , true , [ ] ( const ValueMap & tagAttrValueMap ) { <nl> + / / supported attributes : <nl> + / / color , offsetWidth , offsetHeight , blurRadius <nl> + ValueMap attrValueMap ; <nl> + <nl> + attrValueMap [ RichText : : KEY_TEXT_STYLE ] = RichText : : VALUE_TEXT_STYLE_SHADOW ; <nl> + if ( tagAttrValueMap . find ( " color " ) ! = tagAttrValueMap . end ( ) ) { <nl> + attrValueMap [ RichText : : KEY_TEXT_SHADOW_COLOR ] = tagAttrValueMap . at ( " color " ) . asString ( ) ; <nl> + } <nl> + if ( tagAttrValueMap . find ( " offsetWidth " ) ! = tagAttrValueMap . end ( ) ) { <nl> + attrValueMap [ RichText : : KEY_TEXT_SHADOW_OFFSET_WIDTH ] = tagAttrValueMap . at ( " offsetWidth " ) . asString ( ) ; <nl> + } <nl> + if ( tagAttrValueMap . find ( " offsetHeight " ) ! = tagAttrValueMap . end ( ) ) { <nl> + attrValueMap [ RichText : : KEY_TEXT_SHADOW_OFFSET_HEIGHT ] = tagAttrValueMap . at ( " offsetHeight " ) . asString ( ) ; <nl> + } <nl> + if ( tagAttrValueMap . find ( " blurRadius " ) ! = tagAttrValueMap . end ( ) ) { <nl> + attrValueMap [ RichText : : KEY_TEXT_SHADOW_BLUR_RADIUS ] = tagAttrValueMap . at ( " blurRadius " ) . asString ( ) ; <nl> + } <nl> + return make_pair ( attrValueMap , nullptr ) ; <nl> + } ) ; <nl> + <nl> + MyXMLVisitor : : setTagDescription ( " glow " , true , [ ] ( const ValueMap & tagAttrValueMap ) { <nl> + / / supported attributes : <nl> + / / color <nl> + ValueMap attrValueMap ; <nl> + <nl> + attrValueMap [ RichText : : KEY_TEXT_STYLE ] = RichText : : VALUE_TEXT_STYLE_GLOW ; <nl> + if ( tagAttrValueMap . find ( " color " ) ! = tagAttrValueMap . end ( ) ) { <nl> + attrValueMap [ RichText : : KEY_TEXT_GLOW_COLOR ] = tagAttrValueMap . at ( " color " ) . asString ( ) ; <nl> + } <nl> + return make_pair ( attrValueMap , nullptr ) ; <nl> + } ) ; <nl> + } <nl> + <nl> + MyXMLVisitor : : ~ MyXMLVisitor ( ) <nl> + { <nl> + } <nl> + <nl> + Color3B MyXMLVisitor : : getColor ( ) const <nl> + { <nl> + for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> + { <nl> + if ( i - > hasColor ) <nl> + return i - > color ; <nl> } <nl> + return Color3B : : WHITE ; <nl> + } <nl> <nl> - float getFontSize ( ) const <nl> + float MyXMLVisitor : : getFontSize ( ) const <nl> + { <nl> + for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> { <nl> - for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> - { <nl> - if ( i - > fontSize ! = - 1 ) <nl> - return i - > fontSize ; <nl> - } <nl> - return 12 ; <nl> + if ( i - > fontSize ! = - 1 ) <nl> + return i - > fontSize ; <nl> } <nl> + return 12 ; <nl> + } <nl> <nl> - std : : string getFace ( ) const <nl> + std : : string MyXMLVisitor : : getFace ( ) const <nl> + { <nl> + for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> { <nl> - for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> - { <nl> - if ( i - > face . size ( ) ! = 0 ) <nl> - return i - > face ; <nl> - } <nl> - return " fonts / Marker Felt . ttf " ; <nl> + if ( i - > face . size ( ) ! = 0 ) <nl> + return i - > face ; <nl> } <nl> + return " fonts / Marker Felt . ttf " ; <nl> + } <nl> <nl> - std : : string getURL ( ) const <nl> + std : : string MyXMLVisitor : : getURL ( ) const <nl> + { <nl> + for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> { <nl> - for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> - { <nl> - if ( i - > url . size ( ) ! = 0 ) <nl> - return i - > url ; <nl> - } <nl> - return " " ; <nl> + if ( i - > url . size ( ) ! = 0 ) <nl> + return i - > url ; <nl> } <nl> + return " " ; <nl> + } <nl> <nl> - bool getBold ( ) const <nl> + bool MyXMLVisitor : : getBold ( ) const <nl> + { <nl> + for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> { <nl> - for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> - { <nl> - if ( i - > bold ) <nl> - return true ; <nl> - } <nl> - return false ; <nl> + if ( i - > bold ) <nl> + return true ; <nl> } <nl> + return false ; <nl> + } <nl> <nl> - bool getItalics ( ) const <nl> + bool MyXMLVisitor : : getItalics ( ) const <nl> + { <nl> + for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> { <nl> - for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> - { <nl> - if ( i - > italics ) <nl> - return true ; <nl> - } <nl> - return false ; <nl> + if ( i - > italics ) <nl> + return true ; <nl> } <nl> + return false ; <nl> + } <nl> <nl> - bool getUnderline ( ) const <nl> + bool MyXMLVisitor : : getUnderline ( ) const <nl> + { <nl> + for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> { <nl> - for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> - { <nl> - if ( i - > underline ) <nl> - return true ; <nl> - } <nl> - return false ; <nl> + if ( i - > line = = StyleLine : : UNDERLINE ) <nl> + return true ; <nl> } <nl> + return false ; <nl> + } <nl> <nl> - bool getStrikethrough ( ) const <nl> + bool MyXMLVisitor : : getStrikethrough ( ) const <nl> + { <nl> + for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> { <nl> - for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> - { <nl> - if ( i - > strikethrough ) <nl> - return true ; <nl> - } <nl> - return false ; <nl> + if ( i - > line = = StyleLine : : STRIKETHROUGH ) <nl> + return true ; <nl> } <nl> + return false ; <nl> + } <nl> <nl> - public : <nl> - explicit MyXMLVisitor ( RichText * richText ) <nl> - : _richText ( richText ) <nl> - , _fontElements ( 20 ) <nl> - { } <nl> - virtual ~ MyXMLVisitor ( ) { } <nl> + std : : tuple < bool , Color3B , int > MyXMLVisitor : : getOutline ( ) const <nl> + { <nl> + for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> + { <nl> + if ( i - > effect = = StyleEffect : : OUTLINE ) <nl> + return std : : make_tuple ( true , i - > outlineColor , i - > outlineSize ) ; <nl> + } <nl> + return std : : make_tuple ( false , Color3B : : WHITE , - 1 ) ; <nl> + } <nl> <nl> - / / / Visit an element . <nl> - virtual bool VisitEnter ( const tinyxml2 : : XMLElement & element , const tinyxml2 : : XMLAttribute * firstAttribute ) <nl> + std : : tuple < bool , Color3B , cocos2d : : Size , int > MyXMLVisitor : : getShadow ( ) const <nl> + { <nl> + for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> { <nl> - auto elementName = element . Value ( ) ; <nl> + if ( i - > effect = = StyleEffect : : SHADOW ) <nl> + return std : : make_tuple ( true , i - > shadowColor , i - > shadowOffset , i - > shadowBlurRadius ) ; <nl> + } <nl> + return std : : make_tuple ( false , Color3B : : BLACK , Size ( 2 . 0 , - 2 . 0 ) , 0 ) ; <nl> + } <nl> <nl> - if ( strcmp ( elementName , " font " ) = = 0 ) <nl> - { <nl> - / / supported attributes : <nl> - / / size , color , align , face <nl> - auto size = element . Attribute ( " size " ) ; <nl> - auto color = element . Attribute ( " color " ) ; <nl> - auto face = element . Attribute ( " face " ) ; <nl> - <nl> - Attributes attribs ; <nl> - if ( size ) <nl> - attribs . fontSize = atof ( size ) ; <nl> - if ( color ) <nl> - { <nl> - if ( strlen ( color ) = = 7 ) <nl> - { <nl> - int r , g , b ; <nl> - sscanf ( color , " % * c % 2x % 2x % 2x " , & r , & g , & b ) ; <nl> - attribs . setColor ( Color3B ( r , g , b ) ) ; <nl> + std : : tuple < bool , Color3B > MyXMLVisitor : : getGlow ( ) const <nl> + { <nl> + for ( auto i = _fontElements . rbegin ( ) ; i ! = _fontElements . rend ( ) ; + + i ) <nl> + { <nl> + if ( i - > effect = = StyleEffect : : GLOW ) <nl> + return std : : make_tuple ( true , i - > glowColor ) ; <nl> + } <nl> + return std : : make_tuple ( false , Color3B : : WHITE ) ; <nl> + } <nl> + <nl> + bool MyXMLVisitor : : VisitEnter ( const tinyxml2 : : XMLElement & element , const tinyxml2 : : XMLAttribute * firstAttribute ) <nl> + { <nl> + auto elementName = element . Value ( ) ; <nl> + auto it = _tagTables . find ( elementName ) ; <nl> + if ( it ! = _tagTables . end ( ) ) { <nl> + auto tagBehavior = it - > second ; <nl> + if ( tagBehavior . handleVisitEnter ! = nullptr ) { <nl> + ValueMap & & tagAttrValueMap = tagAttrMapWithXMLElement ( element ) ; <nl> + auto result = tagBehavior . handleVisitEnter ( tagAttrValueMap ) ; <nl> + ValueMap & attrValueMap = result . first ; <nl> + RichElement * richElement = result . second ; <nl> + if ( ! attrValueMap . empty ( ) ) { <nl> + Attributes attributes ; <nl> + <nl> + if ( attrValueMap . find ( RichText : : KEY_FONT_SIZE ) ! = attrValueMap . end ( ) ) { <nl> + attributes . fontSize = attrValueMap . at ( RichText : : KEY_FONT_SIZE ) . asFloat ( ) ; <nl> + } <nl> + if ( attrValueMap . find ( RichText : : KEY_FONT_SMALL ) ! = attrValueMap . end ( ) ) { <nl> + attributes . fontSize = getFontSize ( ) * 0 . 8 ; <nl> } <nl> - else <nl> - attribs . setColor ( Color3B : : WHITE ) ; <nl> + if ( attrValueMap . find ( RichText : : KEY_FONT_BIG ) ! = attrValueMap . end ( ) ) { <nl> + attributes . fontSize = getFontSize ( ) * 1 . 25 ; <nl> + } <nl> + if ( attrValueMap . find ( RichText : : KEY_FONT_COLOR_STRING ) ! = attrValueMap . end ( ) ) { <nl> + attributes . setColor ( _richText - > color3BWithString ( attrValueMap . at ( RichText : : KEY_FONT_COLOR_STRING ) . asString ( ) ) ) ; <nl> + } <nl> + if ( attrValueMap . find ( RichText : : KEY_FONT_FACE ) ! = attrValueMap . end ( ) ) { <nl> + attributes . face = attrValueMap . at ( RichText : : KEY_FONT_FACE ) . asString ( ) ; <nl> + } <nl> + if ( attrValueMap . find ( RichText : : KEY_TEXT_BOLD ) ! = attrValueMap . end ( ) ) { <nl> + attributes . bold = true ; <nl> + } <nl> + if ( attrValueMap . find ( RichText : : KEY_TEXT_ITALIC ) ! = attrValueMap . end ( ) ) { <nl> + attributes . italics = true ; <nl> + } <nl> + if ( attrValueMap . find ( RichText : : KEY_TEXT_LINE ) ! = attrValueMap . end ( ) ) { <nl> + auto keyTextLine = attrValueMap . at ( RichText : : KEY_TEXT_LINE ) . asString ( ) ; <nl> + if ( keyTextLine = = RichText : : VALUE_TEXT_LINE_DEL ) { <nl> + attributes . line = StyleLine : : STRIKETHROUGH ; <nl> + } <nl> + else if ( keyTextLine = = RichText : : VALUE_TEXT_LINE_UNDER ) { <nl> + attributes . line = StyleLine : : UNDERLINE ; <nl> + } <nl> + } <nl> + if ( attrValueMap . find ( RichText : : KEY_URL ) ! = attrValueMap . end ( ) ) { <nl> + attributes . url = attrValueMap . at ( RichText : : KEY_URL ) . asString ( ) ; <nl> + attributes . setColor ( _richText - > getAnchorFontColor3B ( ) ) ; <nl> + if ( _richText - > isAnchorTextBoldEnabled ( ) ) { <nl> + attributes . bold = true ; <nl> + } <nl> + if ( _richText - > isAnchorTextItalicEnabled ( ) ) { <nl> + attributes . italics = true ; <nl> + } <nl> + if ( _richText - > isAnchorTextUnderlineEnabled ( ) ) { <nl> + attributes . line = StyleLine : : UNDERLINE ; <nl> + } <nl> + if ( _richText - > isAnchorTextDelEnabled ( ) ) { <nl> + attributes . line = StyleLine : : STRIKETHROUGH ; <nl> + } <nl> + if ( _richText - > isAnchorTextOutlineEnabled ( ) ) { <nl> + attributes . effect = StyleEffect : : OUTLINE ; <nl> + attributes . outlineColor = _richText - > getAnchorTextOutlineColor3B ( ) ; <nl> + attributes . outlineSize = _richText - > getAnchorTextOutlineSize ( ) ; <nl> + } <nl> + if ( _richText - > isAnchorTextShadowEnabled ( ) ) { <nl> + attributes . effect = StyleEffect : : SHADOW ; <nl> + attributes . shadowColor = _richText - > getAnchorTextShadowColor3B ( ) ; <nl> + attributes . shadowOffset = _richText - > getAnchorTextShadowOffset ( ) ; <nl> + attributes . shadowBlurRadius = _richText - > getAnchorTextShadowBlurRadius ( ) ; <nl> + } <nl> + if ( _richText - > isAnchorTextGlowEnabled ( ) ) { <nl> + attributes . effect = StyleEffect : : GLOW ; <nl> + attributes . glowColor = _richText - > getAnchorTextGlowColor3B ( ) ; <nl> + } <nl> + } <nl> + if ( attrValueMap . find ( RichText : : KEY_TEXT_STYLE ) ! = attrValueMap . end ( ) ) { <nl> + auto keyTextStyle = attrValueMap . at ( RichText : : KEY_TEXT_STYLE ) . asString ( ) ; <nl> + if ( keyTextStyle = = RichText : : VALUE_TEXT_STYLE_OUTLINE ) { <nl> + attributes . effect = StyleEffect : : OUTLINE ; <nl> + if ( attrValueMap . find ( RichText : : KEY_TEXT_OUTLINE_COLOR ) ! = attrValueMap . end ( ) ) { <nl> + attributes . outlineColor = _richText - > color3BWithString ( attrValueMap . at ( RichText : : KEY_TEXT_OUTLINE_COLOR ) . asString ( ) ) ; <nl> + } <nl> + if ( attrValueMap . find ( RichText : : KEY_TEXT_OUTLINE_SIZE ) ! = attrValueMap . end ( ) ) { <nl> + attributes . outlineSize = attrValueMap . at ( RichText : : KEY_TEXT_OUTLINE_SIZE ) . asInt ( ) ; <nl> + } <nl> + } <nl> + else if ( keyTextStyle = = RichText : : VALUE_TEXT_STYLE_SHADOW ) { <nl> + attributes . effect = StyleEffect : : SHADOW ; <nl> + if ( attrValueMap . find ( RichText : : KEY_TEXT_SHADOW_COLOR ) ! = attrValueMap . end ( ) ) { <nl> + attributes . shadowColor = _richText - > color3BWithString ( attrValueMap . at ( RichText : : KEY_TEXT_SHADOW_COLOR ) . asString ( ) ) ; <nl> + } <nl> + if ( ( attrValueMap . find ( RichText : : KEY_TEXT_SHADOW_OFFSET_WIDTH ) ! = attrValueMap . end ( ) ) <nl> + & & ( attrValueMap . find ( RichText : : KEY_TEXT_SHADOW_OFFSET_HEIGHT ) ! = attrValueMap . end ( ) ) ) { <nl> + attributes . shadowOffset = Size ( attrValueMap . at ( RichText : : KEY_TEXT_SHADOW_OFFSET_WIDTH ) . asFloat ( ) , <nl> + attrValueMap . at ( RichText : : KEY_TEXT_SHADOW_OFFSET_HEIGHT ) . asFloat ( ) ) ; <nl> + } <nl> + if ( attrValueMap . find ( RichText : : KEY_TEXT_SHADOW_BLUR_RADIUS ) ! = attrValueMap . end ( ) ) { <nl> + attributes . shadowBlurRadius = attrValueMap . at ( RichText : : KEY_TEXT_SHADOW_BLUR_RADIUS ) . asInt ( ) ; <nl> + } <nl> + } <nl> + else if ( keyTextStyle = = RichText : : VALUE_TEXT_STYLE_GLOW ) { <nl> + attributes . effect = StyleEffect : : GLOW ; <nl> + if ( attrValueMap . find ( RichText : : KEY_TEXT_GLOW_COLOR ) ! = attrValueMap . end ( ) ) { <nl> + attributes . glowColor = _richText - > color3BWithString ( attrValueMap . at ( RichText : : KEY_TEXT_GLOW_COLOR ) . asString ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + pushBackFontElement ( attributes ) ; <nl> + } <nl> + if ( richElement ) { <nl> + if ( richElement - > equalType ( RichElement : : Type : : IMAGE ) ) { <nl> + richElement - > setColor ( getColor ( ) ) ; <nl> + auto * richElementImage = static_cast < RichElementImage * > ( richElement ) ; <nl> + richElementImage - > setUrl ( getURL ( ) ) ; <nl> + } <nl> + else if ( richElement - > equalType ( RichElement : : Type : : NEWLINE ) ) { <nl> + richElement - > setColor ( getColor ( ) ) ; <nl> + } <nl> + pushBackElement ( richElement ) ; <nl> } <nl> - if ( face ) <nl> - attribs . face = face ; <nl> - <nl> - _fontElements . push_back ( attribs ) ; <nl> - } <nl> - else if ( strcmp ( elementName , " b " ) = = 0 ) <nl> - { <nl> - / / no supported attributes <nl> - Attributes attribs ; <nl> - attribs . bold = 1 ; <nl> - _fontElements . push_back ( attribs ) ; <nl> - } <nl> - else if ( strcmp ( elementName , " i " ) = = 0 ) <nl> - { <nl> - / / no supported attributes <nl> - Attributes attribs ; <nl> - attribs . italics = 1 ; <nl> - _fontElements . push_back ( attribs ) ; <nl> - } <nl> - else if ( strcmp ( elementName , " del " ) = = 0 ) <nl> - { <nl> - / / no supported attributes <nl> - Attributes attribs ; <nl> - attribs . strikethrough = true ; <nl> - _fontElements . push_back ( attribs ) ; <nl> - } <nl> - else if ( strcmp ( elementName , " u " ) = = 0 ) <nl> - { <nl> - / / no supported attributes <nl> - Attributes attribs ; <nl> - attribs . underline = true ; <nl> - _fontElements . push_back ( attribs ) ; <nl> - } <nl> - else if ( strcmp ( elementName , " small " ) = = 0 ) <nl> - { <nl> - Attributes attribs ; <nl> - attribs . fontSize = getFontSize ( ) * 0 . 8 ; <nl> - _fontElements . push_back ( attribs ) ; <nl> } <nl> + } <nl> + return true ; <nl> + } <nl> <nl> - else if ( strcmp ( elementName , " big " ) = = 0 ) <nl> - { <nl> - Attributes attribs ; <nl> - attribs . fontSize = getFontSize ( ) * 1 . 25 ; <nl> - _fontElements . push_back ( attribs ) ; <nl> + bool MyXMLVisitor : : VisitExit ( const tinyxml2 : : XMLElement & element ) <nl> + { <nl> + auto elementName = element . Value ( ) ; <nl> + auto it = _tagTables . find ( elementName ) ; <nl> + if ( it ! = _tagTables . end ( ) ) { <nl> + auto tagBehavior = it - > second ; <nl> + if ( tagBehavior . isFontElement ) { <nl> + popBackFontElement ( ) ; <nl> } <nl> + } <nl> + return true ; <nl> + } <nl> <nl> - else if ( strcmp ( elementName , " img " ) = = 0 ) <nl> - { <nl> - / / supported attributes : <nl> - / / src , height , width <nl> - auto src = element . Attribute ( " src " ) ; <nl> - auto height = element . Attribute ( " height " ) ; <nl> - auto width = element . Attribute ( " width " ) ; <nl> - <nl> - if ( src ) { <nl> - auto elementNL = RichElementImage : : create ( 0 , getColor ( ) , 255 , src ) ; <nl> - <nl> - if ( height ) <nl> - elementNL - > setHeight ( atoi ( height ) ) ; <nl> - if ( width ) <nl> - elementNL - > setWidth ( atoi ( width ) ) ; <nl> - <nl> - _richText - > pushBackElement ( elementNL ) ; <nl> - } <nl> - } <nl> - else if ( strcmp ( elementName , " a " ) = = 0 ) <nl> - { <nl> - / / supported attributes : <nl> - Attributes attribs ; <nl> - auto href = element . Attribute ( " href " ) ; <nl> - attribs . setColor ( Color3B : : BLUE ) ; <nl> - attribs . underline = true ; <nl> - attribs . url = href ; <nl> - _fontElements . push_back ( attribs ) ; <nl> - } <nl> - else if ( strcmp ( elementName , " br " ) = = 0 ) <nl> - { <nl> - auto color = getColor ( ) ; <nl> - auto elementNL = RichElementNewLine : : create ( 0 , color , 255 ) ; <nl> - _richText - > pushBackElement ( elementNL ) ; <nl> + bool MyXMLVisitor : : Visit ( const tinyxml2 : : XMLText & text ) <nl> + { <nl> + auto color = getColor ( ) ; <nl> + auto face = getFace ( ) ; <nl> + auto fontSize = getFontSize ( ) ; <nl> + auto italics = getItalics ( ) ; <nl> + auto underline = getUnderline ( ) ; <nl> + auto strikethrough = getStrikethrough ( ) ; <nl> + auto bold = getBold ( ) ; <nl> + auto url = getURL ( ) ; <nl> + auto outline = getOutline ( ) ; <nl> + auto shadow = getShadow ( ) ; <nl> + auto glow = getGlow ( ) ; <nl> + <nl> + uint32_t flags = 0 ; <nl> + if ( italics ) <nl> + flags | = RichElementText : : ITALICS_FLAG ; <nl> + if ( bold ) <nl> + flags | = RichElementText : : BOLD_FLAG ; <nl> + if ( underline ) <nl> + flags | = RichElementText : : UNDERLINE_FLAG ; <nl> + if ( strikethrough ) <nl> + flags | = RichElementText : : STRIKETHROUGH_FLAG ; <nl> + if ( url . size ( ) > 0 ) <nl> + flags | = RichElementText : : URL_FLAG ; <nl> + if ( std : : get < 0 > ( outline ) ) <nl> + flags | = RichElementText : : OUTLINE_FLAG ; <nl> + if ( std : : get < 0 > ( shadow ) ) <nl> + flags | = RichElementText : : SHADOW_FLAG ; <nl> + if ( std : : get < 0 > ( glow ) ) <nl> + flags | = RichElementText : : GLOW_FLAG ; <nl> + <nl> + auto element = RichElementText : : create ( 0 , color , 255 , text . Value ( ) , face , fontSize , flags , url , <nl> + std : : get < 1 > ( outline ) , std : : get < 2 > ( outline ) , <nl> + std : : get < 1 > ( shadow ) , std : : get < 2 > ( shadow ) , std : : get < 3 > ( shadow ) , <nl> + std : : get < 1 > ( glow ) ) ; <nl> + _richText - > pushBackElement ( element ) ; <nl> + return true ; <nl> + } <nl> + <nl> + void MyXMLVisitor : : pushBackFontElement ( const MyXMLVisitor : : Attributes & attribs ) <nl> + { <nl> + _fontElements . push_back ( attribs ) ; <nl> + } <nl> + <nl> + void MyXMLVisitor : : popBackFontElement ( ) <nl> + { <nl> + _fontElements . pop_back ( ) ; <nl> + } <nl> + <nl> + void MyXMLVisitor : : pushBackElement ( RichElement * element ) <nl> + { <nl> + _richText - > pushBackElement ( element ) ; <nl> + } <nl> + <nl> + void MyXMLVisitor : : setTagDescription ( const std : : string & tag , bool isFontElement , RichText : : VisitEnterHandler handleVisitEnter ) <nl> + { <nl> + MyXMLVisitor : : _tagTables [ tag ] = { isFontElement , handleVisitEnter } ; <nl> + } <nl> + <nl> + void MyXMLVisitor : : removeTagDescription ( const std : : string & tag ) <nl> + { <nl> + MyXMLVisitor : : _tagTables . erase ( tag ) ; <nl> + } <nl> + <nl> + ValueMap MyXMLVisitor : : tagAttrMapWithXMLElement ( const tinyxml2 : : XMLElement & element ) <nl> + { <nl> + ValueMap tagAttrValueMap ; <nl> + for ( const tinyxml2 : : XMLAttribute * attr = element . FirstAttribute ( ) ; attr ! = nullptr ; attr = attr - > Next ( ) ) { <nl> + if ( attr - > Name ( ) & & attr - > Value ( ) ) { <nl> + tagAttrValueMap [ std : : string ( attr - > Name ( ) ) ] = std : : string ( attr - > Value ( ) ) ; <nl> } <nl> - return true ; <nl> } <nl> - / / / Visit an element . <nl> - virtual bool VisitExit ( const tinyxml2 : : XMLElement & element ) <nl> + return tagAttrValueMap ; <nl> + } <nl> + <nl> + const std : : string RichText : : KEY_VERTICAL_SPACE ( " KEY_VERTICAL_SPACE " ) ; <nl> + const std : : string RichText : : KEY_WRAP_MODE ( " KEY_WRAP_MODE " ) ; <nl> + const std : : string RichText : : KEY_FONT_COLOR_STRING ( " KEY_FONT_COLOR_STRING " ) ; <nl> + const std : : string RichText : : KEY_FONT_SIZE ( " KEY_FONT_SIZE " ) ; <nl> + const std : : string RichText : : KEY_FONT_SMALL ( " KEY_FONT_SMALL " ) ; <nl> + const std : : string RichText : : KEY_FONT_BIG ( " KEY_FONT_BIG " ) ; <nl> + const std : : string RichText : : KEY_FONT_FACE ( " KEY_FONT_FACE " ) ; <nl> + const std : : string RichText : : KEY_TEXT_BOLD ( " KEY_TEXT_BOLD " ) ; <nl> + const std : : string RichText : : KEY_TEXT_ITALIC ( " KEY_TEXT_ITALIC " ) ; <nl> + const std : : string RichText : : KEY_TEXT_LINE ( " KEY_TEXT_LINE " ) ; <nl> + const std : : string RichText : : VALUE_TEXT_LINE_NONE ( " VALUE_TEXT_LINE_NONE " ) ; <nl> + const std : : string RichText : : VALUE_TEXT_LINE_DEL ( " VALUE_TEXT_LINE_DEL " ) ; <nl> + const std : : string RichText : : VALUE_TEXT_LINE_UNDER ( " VALUE_TEXT_LINE_UNDER " ) ; <nl> + const std : : string RichText : : KEY_TEXT_STYLE ( " KEY_TEXT_STYLE " ) ; <nl> + const std : : string RichText : : VALUE_TEXT_STYLE_NONE ( " VALUE_TEXT_STYLE_NONE " ) ; <nl> + const std : : string RichText : : VALUE_TEXT_STYLE_OUTLINE ( " VALUE_TEXT_STYLE_OUTLINE " ) ; <nl> + const std : : string RichText : : VALUE_TEXT_STYLE_SHADOW ( " VALUE_TEXT_STYLE_SHADOW " ) ; <nl> + const std : : string RichText : : VALUE_TEXT_STYLE_GLOW ( " VALUE_TEXT_STYLE_GLOW " ) ; <nl> + const std : : string RichText : : KEY_TEXT_OUTLINE_COLOR ( " KEY_TEXT_OUTLINE_COLOR " ) ; <nl> + const std : : string RichText : : KEY_TEXT_OUTLINE_SIZE ( " KEY_TEXT_OUTLINE_SIZE " ) ; <nl> + const std : : string RichText : : KEY_TEXT_SHADOW_COLOR ( " KEY_TEXT_SHADOW_COLOR " ) ; <nl> + const std : : string RichText : : KEY_TEXT_SHADOW_OFFSET_WIDTH ( " KEY_TEXT_SHADOW_OFFSET_WIDTH " ) ; <nl> + const std : : string RichText : : KEY_TEXT_SHADOW_OFFSET_HEIGHT ( " KEY_TEXT_SHADOW_OFFSET_HEIGHT " ) ; <nl> + const std : : string RichText : : KEY_TEXT_SHADOW_BLUR_RADIUS ( " KEY_TEXT_SHADOW_BLUR_RADIUS " ) ; <nl> + const std : : string RichText : : KEY_TEXT_GLOW_COLOR ( " KEY_TEXT_GLOW_COLOR " ) ; <nl> + const std : : string RichText : : KEY_URL ( " KEY_URL " ) ; <nl> + const std : : string RichText : : KEY_ANCHOR_FONT_COLOR_STRING ( " KEY_ANCHOR_FONT_COLOR_STRING " ) ; <nl> + const std : : string RichText : : KEY_ANCHOR_TEXT_BOLD ( " KEY_ANCHOR_TEXT_BOLD " ) ; <nl> + const std : : string RichText : : KEY_ANCHOR_TEXT_ITALIC ( " KEY_ANCHOR_TEXT_ITALIC " ) ; <nl> + const std : : string RichText : : KEY_ANCHOR_TEXT_LINE ( " KEY_ANCHOR_TEXT_LINE " ) ; <nl> + const std : : string RichText : : KEY_ANCHOR_TEXT_STYLE ( " KEY_ANCHOR_TEXT_STYLE " ) ; <nl> + const std : : string RichText : : KEY_ANCHOR_TEXT_OUTLINE_COLOR ( " KEY_ANCHOR_TEXT_OUTLINE_COLOR " ) ; <nl> + const std : : string RichText : : KEY_ANCHOR_TEXT_OUTLINE_SIZE ( " KEY_ANCHOR_TEXT_OUTLINE_SIZE " ) ; <nl> + const std : : string RichText : : KEY_ANCHOR_TEXT_SHADOW_COLOR ( " KEY_ANCHOR_TEXT_SHADOW_COLOR " ) ; <nl> + const std : : string RichText : : KEY_ANCHOR_TEXT_SHADOW_OFFSET_WIDTH ( " KEY_ANCHOR_TEXT_SHADOW_OFFSET_WIDTH " ) ; <nl> + const std : : string RichText : : KEY_ANCHOR_TEXT_SHADOW_OFFSET_HEIGHT ( " KEY_ANCHOR_TEXT_SHADOW_OFFSET_HEIGHT " ) ; <nl> + const std : : string RichText : : KEY_ANCHOR_TEXT_SHADOW_BLUR_RADIUS ( " KEY_ANCHOR_TEXT_SHADOW_BLUR_RADIUS " ) ; <nl> + const std : : string RichText : : KEY_ANCHOR_TEXT_GLOW_COLOR ( " KEY_ANCHOR_TEXT_GLOW_COLOR " ) ; <nl> + <nl> + RichText : : RichText ( ) <nl> + : _formatTextDirty ( true ) <nl> + , _leftSpaceWidth ( 0 . 0f ) <nl> + { <nl> + _defaults [ KEY_VERTICAL_SPACE ] = 0 . 0f ; <nl> + _defaults [ KEY_WRAP_MODE ] = static_cast < int > ( WrapMode : : WRAP_PER_WORD ) ; <nl> + _defaults [ KEY_FONT_COLOR_STRING ] = " # ffffff " ; <nl> + _defaults [ KEY_FONT_SIZE ] = 12 . 0f ; <nl> + _defaults [ KEY_FONT_FACE ] = " Verdana " ; <nl> + _defaults [ KEY_ANCHOR_FONT_COLOR_STRING ] = " # 0000FF " ; <nl> + _defaults [ KEY_ANCHOR_TEXT_BOLD ] = false ; <nl> + _defaults [ KEY_ANCHOR_TEXT_ITALIC ] = false ; <nl> + _defaults [ KEY_ANCHOR_TEXT_LINE ] = VALUE_TEXT_LINE_NONE ; <nl> + _defaults [ KEY_ANCHOR_TEXT_STYLE ] = VALUE_TEXT_STYLE_NONE ; <nl> + } <nl> + <nl> + RichText : : ~ RichText ( ) <nl> + { <nl> + _richElements . clear ( ) ; <nl> + } <nl> + <nl> + RichText * RichText : : create ( ) <nl> + { <nl> + RichText * widget = new ( std : : nothrow ) RichText ( ) ; <nl> + if ( widget & & widget - > init ( ) ) <nl> { <nl> - auto elementName = element . Value ( ) ; <nl> - if ( ( strcmp ( elementName , " font " ) = = 0 ) | | <nl> - ( strcmp ( elementName , " i " ) = = 0 ) | | <nl> - ( strcmp ( elementName , " b " ) = = 0 ) | | <nl> - ( strcmp ( elementName , " del " ) = = 0 ) | | <nl> - ( strcmp ( elementName , " u " ) = = 0 ) | | <nl> - ( strcmp ( elementName , " small " ) = = 0 ) | | <nl> - ( strcmp ( elementName , " big " ) = = 0 ) | | <nl> - ( strcmp ( elementName , " a " ) = = 0 ) ) <nl> - { <nl> - _fontElements . pop_back ( ) ; <nl> - } <nl> - return true ; <nl> + widget - > autorelease ( ) ; <nl> + return widget ; <nl> } <nl> + CC_SAFE_DELETE ( widget ) ; <nl> + return nullptr ; <nl> + } <nl> <nl> - / / / Visit a text node . <nl> - virtual bool Visit ( const tinyxml2 : : XMLText & text ) <nl> + RichText * RichText : : createWithXML ( const std : : string & xml , const ValueMap & defaults , const OpenUrlHandler & handleOpenUrl ) <nl> + { <nl> + RichText * widget = new ( std : : nothrow ) RichText ( ) ; <nl> + if ( widget & & widget - > initWithXML ( xml , defaults , handleOpenUrl ) ) <nl> + { <nl> + widget - > autorelease ( ) ; <nl> + return widget ; <nl> + } <nl> + CC_SAFE_DELETE ( widget ) ; <nl> + return nullptr ; <nl> + } <nl> + <nl> + bool RichText : : init ( ) <nl> + { <nl> + if ( Widget : : init ( ) ) <nl> { <nl> - auto color = getColor ( ) ; <nl> - auto face = getFace ( ) ; <nl> - auto fontSize = getFontSize ( ) ; <nl> - auto italics = getItalics ( ) ; <nl> - auto underline = getUnderline ( ) ; <nl> - auto strikethrough = getStrikethrough ( ) ; <nl> - auto bold = getBold ( ) ; <nl> - auto url = getURL ( ) ; <nl> - <nl> - uint32_t flags = 0 ; <nl> - if ( italics ) <nl> - flags | = RichElementText : : ITALICS_FLAG ; <nl> - if ( bold ) <nl> - flags | = RichElementText : : BOLD_FLAG ; <nl> - if ( underline ) <nl> - flags | = RichElementText : : UNDERLINE_FLAG ; <nl> - if ( strikethrough ) <nl> - flags | = RichElementText : : STRIKETHROUGH_FLAG ; <nl> - if ( url . size ( ) > 0 ) <nl> - flags | = RichElementText : : URL_FLAG ; <nl> - <nl> - auto element = RichElementText : : create ( 0 , color , 255 , text . Value ( ) , face , fontSize , flags , url ) ; <nl> - _richText - > pushBackElement ( element ) ; <nl> return true ; <nl> } <nl> - } ; <nl> + return false ; <nl> + } <nl> <nl> - bool RichText : : initWithXML ( const std : : string & origxml ) <nl> + bool RichText : : initWithXML ( const std : : string & origxml , const ValueMap & defaults , const OpenUrlHandler & handleOpenUrl ) <nl> { <nl> + static std : : function < std : : string ( RichText * ) > startTagFont = [ ] ( RichText * richText ) { <nl> + std : : string fontFace = richText - > getFontFace ( ) ; <nl> + std : : stringstream ss ; <nl> + ss < < richText - > getFontSize ( ) ; <nl> + std : : string fontSize = ss . str ( ) ; <nl> + std : : string fontColor = richText - > getFontColor ( ) ; <nl> + return " < font face = \ " " + fontFace + " \ " size = \ " " + fontSize + " \ " color = \ " " + fontColor + " \ " > " ; <nl> + } ; <nl> if ( Widget : : init ( ) ) <nl> { <nl> + setDefaults ( defaults ) ; <nl> + setOpenUrlHandler ( handleOpenUrl ) ; <nl> + <nl> tinyxml2 : : XMLDocument document ; <nl> <nl> / / solves to issues : <nl> / / - creates defaults values <nl> / / - makes sure that the xml well formed and starts with an element <nl> - auto xml = " < font face = \ " Verdana \ " size = \ " 12 \ " color = \ " # ffffff \ " > " + origxml + " < / font > " ; <nl> + std : : string xml = startTagFont ( this ) ; <nl> + xml + = origxml ; <nl> + xml + = " < / font > " ; <nl> <nl> auto error = document . Parse ( xml . c_str ( ) , xml . length ( ) ) ; <nl> if ( error = = tinyxml2 : : XML_SUCCESS ) <nl> void RichText : : removeElement ( RichElement * element ) <nl> <nl> RichText : : WrapMode RichText : : getWrapMode ( ) const <nl> { <nl> - return _wrapMode ; <nl> + return static_cast < RichText : : WrapMode > ( _defaults . at ( KEY_WRAP_MODE ) . asInt ( ) ) ; <nl> } <nl> <nl> void RichText : : setWrapMode ( RichText : : WrapMode wrapMode ) <nl> { <nl> - if ( _wrapMode ! = wrapMode ) <nl> + if ( static_cast < RichText : : WrapMode > ( _defaults . at ( KEY_WRAP_MODE ) . asInt ( ) ) ! = wrapMode ) <nl> { <nl> - _wrapMode = wrapMode ; <nl> + _defaults [ KEY_WRAP_MODE ] = static_cast < int > ( wrapMode ) ; <nl> _formatTextDirty = true ; <nl> } <nl> } <nl> <nl> + void RichText : : setFontColor ( const std : : string & color ) <nl> + { <nl> + _defaults [ KEY_FONT_COLOR_STRING ] = color ; <nl> + } <nl> + <nl> + std : : string RichText : : getFontColor ( ) <nl> + { <nl> + return _defaults . at ( KEY_FONT_COLOR_STRING ) . asString ( ) ; <nl> + } <nl> + <nl> + cocos2d : : Color3B RichText : : getFontColor3B ( ) <nl> + { <nl> + return color3BWithString ( getFontColor ( ) ) ; <nl> + } <nl> + <nl> + void RichText : : setFontSize ( float size ) <nl> + { <nl> + _defaults [ KEY_FONT_SIZE ] = size ; <nl> + } <nl> + <nl> + float RichText : : getFontSize ( ) <nl> + { <nl> + return _defaults . at ( KEY_FONT_SIZE ) . asFloat ( ) ; <nl> + } <nl> + <nl> + void RichText : : setFontFace ( const std : : string & face ) <nl> + { <nl> + _defaults [ KEY_FONT_FACE ] = face ; <nl> + } <nl> + <nl> + std : : string RichText : : getFontFace ( ) <nl> + { <nl> + return _defaults . at ( KEY_FONT_FACE ) . asString ( ) ; <nl> + } <nl> + <nl> + void RichText : : setAnchorFontColor ( const std : : string & color ) <nl> + { <nl> + _defaults [ KEY_ANCHOR_FONT_COLOR_STRING ] = color ; <nl> + } <nl> + <nl> + std : : string RichText : : getAnchorFontColor ( ) <nl> + { <nl> + return _defaults . at ( KEY_ANCHOR_FONT_COLOR_STRING ) . asString ( ) ; <nl> + } <nl> + <nl> + cocos2d : : Color3B RichText : : getAnchorFontColor3B ( ) <nl> + { <nl> + return color3BWithString ( getAnchorFontColor ( ) ) ; <nl> + } <nl> + <nl> + void RichText : : setAnchorTextBold ( bool enable ) <nl> + { <nl> + _defaults [ KEY_ANCHOR_TEXT_BOLD ] = enable ; <nl> + } <nl> + <nl> + bool RichText : : isAnchorTextBoldEnabled ( ) <nl> + { <nl> + return _defaults [ KEY_ANCHOR_TEXT_BOLD ] . asBool ( ) ; <nl> + } <nl> + <nl> + void RichText : : setAnchorTextItalic ( bool enable ) <nl> + { <nl> + _defaults [ KEY_ANCHOR_TEXT_ITALIC ] = enable ; <nl> + } <nl> + <nl> + bool RichText : : isAnchorTextItalicEnabled ( ) <nl> + { <nl> + return _defaults [ KEY_ANCHOR_TEXT_ITALIC ] . asBool ( ) ; <nl> + } <nl> + <nl> + void RichText : : setAnchorTextDel ( bool enable ) <nl> + { <nl> + if ( enable ) <nl> + _defaults [ KEY_ANCHOR_TEXT_LINE ] = VALUE_TEXT_LINE_DEL ; <nl> + else if ( _defaults [ KEY_ANCHOR_TEXT_LINE ] . asString ( ) = = VALUE_TEXT_LINE_DEL ) <nl> + _defaults [ KEY_ANCHOR_TEXT_LINE ] = VALUE_TEXT_LINE_NONE ; <nl> + } <nl> + <nl> + bool RichText : : isAnchorTextDelEnabled ( ) <nl> + { <nl> + return ( _defaults [ KEY_ANCHOR_TEXT_LINE ] . asString ( ) = = VALUE_TEXT_LINE_DEL ) ; <nl> + } <nl> + <nl> + void RichText : : setAnchorTextUnderline ( bool enable ) <nl> + { <nl> + if ( enable ) <nl> + _defaults [ KEY_ANCHOR_TEXT_LINE ] = VALUE_TEXT_LINE_UNDER ; <nl> + else if ( _defaults [ KEY_ANCHOR_TEXT_LINE ] . asString ( ) = = VALUE_TEXT_LINE_UNDER ) <nl> + _defaults [ KEY_ANCHOR_TEXT_LINE ] = VALUE_TEXT_LINE_NONE ; <nl> + } <nl> + <nl> + bool RichText : : isAnchorTextUnderlineEnabled ( ) <nl> + { <nl> + return ( _defaults [ KEY_ANCHOR_TEXT_LINE ] . asString ( ) = = VALUE_TEXT_LINE_UNDER ) ; <nl> + } <nl> + <nl> + void RichText : : setAnchorTextOutline ( bool enable , const Color3B & outlineColor , int outlineSize ) <nl> + { <nl> + if ( enable ) <nl> + _defaults [ KEY_ANCHOR_TEXT_STYLE ] = VALUE_TEXT_STYLE_OUTLINE ; <nl> + else if ( _defaults [ KEY_ANCHOR_TEXT_STYLE ] . asString ( ) = = VALUE_TEXT_STYLE_OUTLINE ) <nl> + _defaults [ KEY_ANCHOR_TEXT_STYLE ] = VALUE_TEXT_STYLE_NONE ; <nl> + _defaults [ KEY_ANCHOR_TEXT_OUTLINE_COLOR ] = stringWithColor3B ( outlineColor ) ; <nl> + _defaults [ KEY_ANCHOR_TEXT_OUTLINE_SIZE ] = outlineSize ; <nl> + } <nl> + <nl> + bool RichText : : isAnchorTextOutlineEnabled ( ) <nl> + { <nl> + return ( _defaults [ KEY_ANCHOR_TEXT_STYLE ] . asString ( ) = = VALUE_TEXT_STYLE_OUTLINE ) ; <nl> + } <nl> + <nl> + Color3B RichText : : getAnchorTextOutlineColor3B ( ) <nl> + { <nl> + if ( _defaults . find ( KEY_ANCHOR_TEXT_OUTLINE_COLOR ) ! = _defaults . end ( ) ) { <nl> + return color3BWithString ( _defaults . at ( KEY_ANCHOR_TEXT_OUTLINE_COLOR ) . asString ( ) ) ; <nl> + } <nl> + return Color3B ( ) ; <nl> + } <nl> + <nl> + int RichText : : getAnchorTextOutlineSize ( ) <nl> + { <nl> + if ( _defaults . find ( KEY_ANCHOR_TEXT_OUTLINE_SIZE ) ! = _defaults . end ( ) ) { <nl> + return _defaults . at ( KEY_ANCHOR_TEXT_OUTLINE_SIZE ) . asInt ( ) ; <nl> + } <nl> + return - 1 ; <nl> + } <nl> + <nl> + void RichText : : setAnchorTextShadow ( bool enable , const Color3B & shadowColor , const Size & offset , int blurRadius ) <nl> + { <nl> + if ( enable ) <nl> + _defaults [ KEY_ANCHOR_TEXT_STYLE ] = VALUE_TEXT_STYLE_SHADOW ; <nl> + else if ( _defaults [ KEY_ANCHOR_TEXT_STYLE ] . asString ( ) = = VALUE_TEXT_STYLE_SHADOW ) <nl> + _defaults [ KEY_ANCHOR_TEXT_STYLE ] = VALUE_TEXT_STYLE_NONE ; <nl> + _defaults [ KEY_ANCHOR_TEXT_SHADOW_COLOR ] = stringWithColor3B ( shadowColor ) ; <nl> + _defaults [ KEY_ANCHOR_TEXT_SHADOW_OFFSET_WIDTH ] = offset . width ; <nl> + _defaults [ KEY_ANCHOR_TEXT_SHADOW_OFFSET_HEIGHT ] = offset . height ; <nl> + _defaults [ KEY_ANCHOR_TEXT_SHADOW_BLUR_RADIUS ] = blurRadius ; <nl> + } <nl> + <nl> + bool RichText : : isAnchorTextShadowEnabled ( ) <nl> + { <nl> + return ( _defaults [ KEY_ANCHOR_TEXT_STYLE ] . asString ( ) = = VALUE_TEXT_STYLE_SHADOW ) ; <nl> + } <nl> + <nl> + Color3B RichText : : getAnchorTextShadowColor3B ( ) <nl> + { <nl> + if ( _defaults . find ( KEY_ANCHOR_TEXT_SHADOW_COLOR ) ! = _defaults . end ( ) ) { <nl> + return color3BWithString ( _defaults . at ( KEY_ANCHOR_TEXT_SHADOW_COLOR ) . asString ( ) ) ; <nl> + } <nl> + return Color3B ( ) ; <nl> + } <nl> + <nl> + Size RichText : : getAnchorTextShadowOffset ( ) <nl> + { <nl> + float width = 2 . 0f ; <nl> + float height = - 2 . 0f ; <nl> + if ( _defaults . find ( KEY_ANCHOR_TEXT_SHADOW_OFFSET_WIDTH ) ! = _defaults . end ( ) ) { <nl> + width = _defaults . at ( KEY_ANCHOR_TEXT_SHADOW_OFFSET_WIDTH ) . asFloat ( ) ; <nl> + } <nl> + if ( _defaults . find ( KEY_ANCHOR_TEXT_SHADOW_OFFSET_HEIGHT ) ! = _defaults . end ( ) ) { <nl> + height = _defaults . at ( KEY_ANCHOR_TEXT_SHADOW_OFFSET_HEIGHT ) . asFloat ( ) ; <nl> + } <nl> + return Size ( width , height ) ; <nl> + } <nl> + <nl> + int RichText : : getAnchorTextShadowBlurRadius ( ) <nl> + { <nl> + if ( _defaults . find ( KEY_ANCHOR_TEXT_SHADOW_BLUR_RADIUS ) ! = _defaults . end ( ) ) { <nl> + return _defaults . at ( KEY_ANCHOR_TEXT_SHADOW_BLUR_RADIUS ) . asInt ( ) ; <nl> + } <nl> + return 0 ; <nl> + } <nl> + <nl> + void RichText : : setAnchorTextGlow ( bool enable , const Color3B & glowColor ) <nl> + { <nl> + if ( enable ) <nl> + _defaults [ KEY_ANCHOR_TEXT_STYLE ] = VALUE_TEXT_STYLE_GLOW ; <nl> + else if ( _defaults [ KEY_ANCHOR_TEXT_STYLE ] . asString ( ) = = VALUE_TEXT_STYLE_GLOW ) <nl> + _defaults [ KEY_ANCHOR_TEXT_STYLE ] = VALUE_TEXT_STYLE_NONE ; <nl> + _defaults [ KEY_ANCHOR_TEXT_GLOW_COLOR ] = stringWithColor3B ( glowColor ) ; <nl> + } <nl> + <nl> + bool RichText : : isAnchorTextGlowEnabled ( ) <nl> + { <nl> + return ( _defaults [ KEY_ANCHOR_TEXT_STYLE ] . asString ( ) = = VALUE_TEXT_STYLE_GLOW ) ; <nl> + } <nl> + <nl> + Color3B RichText : : getAnchorTextGlowColor3B ( ) <nl> + { <nl> + if ( _defaults . find ( KEY_ANCHOR_TEXT_GLOW_COLOR ) ! = _defaults . end ( ) ) { <nl> + return color3BWithString ( _defaults . at ( KEY_ANCHOR_TEXT_GLOW_COLOR ) . asString ( ) ) ; <nl> + } <nl> + return Color3B ( ) ; <nl> + } <nl> + <nl> + void RichText : : setDefaults ( const ValueMap & defaults ) <nl> + { <nl> + if ( defaults . find ( KEY_VERTICAL_SPACE ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_VERTICAL_SPACE ] = defaults . at ( KEY_VERTICAL_SPACE ) . asFloat ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_WRAP_MODE ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_WRAP_MODE ] = defaults . at ( KEY_WRAP_MODE ) . asInt ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_FONT_COLOR_STRING ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_FONT_COLOR_STRING ] = defaults . at ( KEY_FONT_COLOR_STRING ) . asString ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_FONT_SIZE ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_FONT_SIZE ] = defaults . at ( KEY_FONT_SIZE ) . asFloat ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_FONT_FACE ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_FONT_FACE ] = defaults . at ( KEY_FONT_FACE ) . asString ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_ANCHOR_FONT_COLOR_STRING ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_ANCHOR_FONT_COLOR_STRING ] = defaults . at ( KEY_ANCHOR_FONT_COLOR_STRING ) . asString ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_ANCHOR_TEXT_BOLD ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_ANCHOR_TEXT_BOLD ] = defaults . at ( KEY_ANCHOR_TEXT_BOLD ) . asBool ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_ANCHOR_TEXT_ITALIC ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_ANCHOR_TEXT_ITALIC ] = defaults . at ( KEY_ANCHOR_TEXT_ITALIC ) . asBool ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_ANCHOR_TEXT_LINE ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_ANCHOR_TEXT_LINE ] = defaults . at ( KEY_ANCHOR_TEXT_LINE ) . asString ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_ANCHOR_TEXT_STYLE ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_ANCHOR_TEXT_STYLE ] = defaults . at ( KEY_ANCHOR_TEXT_STYLE ) . asString ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_ANCHOR_TEXT_OUTLINE_COLOR ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_ANCHOR_TEXT_OUTLINE_COLOR ] = defaults . at ( KEY_ANCHOR_TEXT_OUTLINE_COLOR ) . asString ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_ANCHOR_TEXT_OUTLINE_SIZE ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_ANCHOR_TEXT_OUTLINE_SIZE ] = defaults . at ( KEY_ANCHOR_TEXT_OUTLINE_SIZE ) . asInt ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_ANCHOR_TEXT_SHADOW_COLOR ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_ANCHOR_TEXT_SHADOW_COLOR ] = defaults . at ( KEY_ANCHOR_TEXT_SHADOW_COLOR ) . asString ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_ANCHOR_TEXT_SHADOW_OFFSET_WIDTH ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_ANCHOR_TEXT_SHADOW_OFFSET_WIDTH ] = defaults . at ( KEY_ANCHOR_TEXT_SHADOW_OFFSET_WIDTH ) . asFloat ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_ANCHOR_TEXT_SHADOW_OFFSET_HEIGHT ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_ANCHOR_TEXT_SHADOW_OFFSET_HEIGHT ] = defaults . at ( KEY_ANCHOR_TEXT_SHADOW_OFFSET_HEIGHT ) . asFloat ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_ANCHOR_TEXT_SHADOW_BLUR_RADIUS ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_ANCHOR_TEXT_SHADOW_BLUR_RADIUS ] = defaults . at ( KEY_ANCHOR_TEXT_SHADOW_BLUR_RADIUS ) . asInt ( ) ; <nl> + } <nl> + if ( defaults . find ( KEY_ANCHOR_TEXT_GLOW_COLOR ) ! = defaults . end ( ) ) { <nl> + _defaults [ KEY_ANCHOR_TEXT_GLOW_COLOR ] = defaults . at ( KEY_ANCHOR_TEXT_GLOW_COLOR ) . asString ( ) ; <nl> + } <nl> + } <nl> + <nl> + ValueMap RichText : : getDefaults ( ) const <nl> + { <nl> + ValueMap defaults ; <nl> + return defaults ; <nl> + } <nl> + <nl> + cocos2d : : Color3B RichText : : color3BWithString ( const std : : string & color ) <nl> + { <nl> + if ( color . length ( ) = = 4 ) { <nl> + int r , g , b ; <nl> + sscanf ( color . c_str ( ) , " % * c % 1x % 1x % 1x " , & r , & g , & b ) ; <nl> + r + = r * 16 ; <nl> + g + = g * 16 ; <nl> + b + = b * 16 ; <nl> + return Color3B ( r , g , b ) ; <nl> + } <nl> + else if ( color . length ( ) = = 7 ) { <nl> + int r , g , b ; <nl> + sscanf ( color . c_str ( ) , " % * c % 2x % 2x % 2x " , & r , & g , & b ) ; <nl> + return Color3B ( r , g , b ) ; <nl> + } <nl> + else if ( color . length ( ) = = 9 ) { <nl> + int r , g , b , a ; <nl> + sscanf ( color . c_str ( ) , " % * c % 2x % 2x % 2x % 2x " , & r , & g , & b , & a ) ; <nl> + return Color3B ( r , g , b ) ; <nl> + } <nl> + return Color3B : : WHITE ; <nl> + } <nl> + <nl> + std : : string RichText : : stringWithColor3B ( const cocos2d : : Color3B & color3b ) <nl> + { <nl> + int r = color3b . r ; <nl> + int g = color3b . g ; <nl> + int b = color3b . b ; <nl> + char buf [ 8 ] ; <nl> + snprintf ( buf , sizeof ( buf ) , " # % 02x % 02x % 02x " , r , g , b ) ; <nl> + return std : : string ( buf , 7 ) ; <nl> + } <nl> + <nl> + std : : string RichText : : stringWithColor4B ( const cocos2d : : Color4B & color4b ) <nl> + { <nl> + int r = color4b . r ; <nl> + int g = color4b . g ; <nl> + int b = color4b . b ; <nl> + int a = color4b . a ; <nl> + char buf [ 10 ] ; <nl> + snprintf ( buf , sizeof ( buf ) , " # % 02x % 02x % 02x % 02x " , r , g , b , a ) ; <nl> + return std : : string ( buf , 9 ) ; <nl> + } <nl> + <nl> + void RichText : : setTagDescription ( const std : : string & tag , bool isFontElement , VisitEnterHandler handleVisitEnter ) <nl> + { <nl> + MyXMLVisitor : : setTagDescription ( tag , isFontElement , handleVisitEnter ) ; <nl> + } <nl> + <nl> + void RichText : : removeTagDescription ( const std : : string & tag ) <nl> + { <nl> + MyXMLVisitor : : removeTagDescription ( tag ) ; <nl> + } <nl> + <nl> + void RichText : : openUrl ( const std : : string & url ) <nl> + { <nl> + if ( _handleOpenUrl ) { <nl> + _handleOpenUrl ( url ) ; <nl> + } <nl> + else { <nl> + Application : : getInstance ( ) - > openURL ( url ) ; <nl> + } <nl> + } <nl> + <nl> + void RichText : : setOpenUrlHandler ( const OpenUrlHandler & handleOpenUrl ) <nl> + { <nl> + _handleOpenUrl = handleOpenUrl ; <nl> + } <nl> + <nl> void RichText : : formatText ( ) <nl> { <nl> if ( _formatTextDirty ) <nl> void RichText : : formatText ( ) <nl> if ( elmtText - > _flags & RichElementText : : STRIKETHROUGH_FLAG ) <nl> label - > enableStrikethrough ( ) ; <nl> if ( elmtText - > _flags & RichElementText : : URL_FLAG ) <nl> - label - > addComponent ( ListenerComponent : : create ( label , elmtText - > _url ) ) ; <nl> + label - > addComponent ( ListenerComponent : : create ( label , elmtText - > _url , <nl> + std : : bind ( & RichText : : openUrl , this , std : : placeholders : : _1 ) ) ) ; <nl> + if ( elmtText - > _flags & RichElementText : : OUTLINE_FLAG ) { <nl> + label - > enableOutline ( Color4B ( elmtText - > _outlineColor ) , elmtText - > _outlineSize ) ; <nl> + } <nl> + if ( elmtText - > _flags & RichElementText : : SHADOW_FLAG ) { <nl> + label - > enableShadow ( Color4B ( elmtText - > _shadowColor ) , <nl> + elmtText - > _shadowOffset , <nl> + elmtText - > _shadowBlurRadius ) ; <nl> + } <nl> + if ( elmtText - > _flags & RichElementText : : GLOW_FLAG ) { <nl> + label - > enableGlow ( Color4B ( elmtText - > _glowColor ) ) ; <nl> + } <nl> elementRenderer = label ; <nl> break ; <nl> } <nl> void RichText : : formatText ( ) <nl> elementRenderer - > setScaleY ( elmtImage - > _height / currentSize . height ) ; <nl> elementRenderer - > setContentSize ( Size ( currentSize . width * elementRenderer - > getScaleX ( ) , <nl> currentSize . height * elementRenderer - > getScaleY ( ) ) ) ; <nl> + elementRenderer - > addComponent ( ListenerComponent : : create ( elementRenderer , <nl> + elmtImage - > _url , <nl> + std : : bind ( & RichText : : openUrl , this , std : : placeholders : : _1 ) ) ) ; <nl> } <nl> break ; <nl> } <nl> void RichText : : formatText ( ) <nl> case RichElement : : Type : : TEXT : <nl> { <nl> RichElementText * elmtText = static_cast < RichElementText * > ( element ) ; <nl> - handleTextRenderer ( elmtText - > _text , elmtText - > _fontName , elmtText - > _fontSize , elmtText - > _color , elmtText - > _opacity , elmtText - > _flags , elmtText - > _url ) ; <nl> + handleTextRenderer ( elmtText - > _text , elmtText - > _fontName , elmtText - > _fontSize , elmtText - > _color , <nl> + elmtText - > _opacity , elmtText - > _flags , elmtText - > _url , <nl> + elmtText - > _outlineColor , elmtText - > _outlineSize , <nl> + elmtText - > _shadowColor , elmtText - > _shadowOffset , elmtText - > _shadowBlurRadius , <nl> + elmtText - > _glowColor ) ; <nl> break ; <nl> } <nl> case RichElement : : Type : : IMAGE : <nl> { <nl> RichElementImage * elmtImage = static_cast < RichElementImage * > ( element ) ; <nl> - handleImageRenderer ( elmtImage - > _filePath , elmtImage - > _color , elmtImage - > _opacity , elmtImage - > _width , elmtImage - > _height ) ; <nl> + handleImageRenderer ( elmtImage - > _filePath , elmtImage - > _color , elmtImage - > _opacity , elmtImage - > _width , elmtImage - > _height , elmtImage - > _url ) ; <nl> break ; <nl> } <nl> case RichElement : : Type : : CUSTOM : <nl> int RichText : : findSplitPositionForChar ( cocos2d : : Label * label , const std : : string & <nl> return leftLength ; <nl> } <nl> <nl> - void RichText : : handleTextRenderer ( const std : : string & text , const std : : string & fontName , float fontSize , const Color3B & color , GLubyte opacity , uint32_t flags , const std : : string & url ) <nl> + void RichText : : handleTextRenderer ( const std : : string & text , const std : : string & fontName , float fontSize , const Color3B & color , <nl> + GLubyte opacity , uint32_t flags , const std : : string & url , <nl> + const Color3B & outlineColor , int outlineSize , <nl> + const Color3B & shadowColor , const cocos2d : : Size & shadowOffset , int shadowBlurRadius , <nl> + const Color3B & glowColor ) <nl> { <nl> auto fileExist = FileUtils : : getInstance ( ) - > isFileExist ( fontName ) ; <nl> Label * textRenderer = nullptr ; <nl> void RichText : : handleTextRenderer ( const std : : string & text , const std : : string & fo <nl> if ( flags & RichElementText : : STRIKETHROUGH_FLAG ) <nl> textRenderer - > enableStrikethrough ( ) ; <nl> if ( flags & RichElementText : : URL_FLAG ) <nl> - textRenderer - > addComponent ( ListenerComponent : : create ( textRenderer , url ) ) ; <nl> + textRenderer - > addComponent ( ListenerComponent : : create ( textRenderer , <nl> + url , <nl> + std : : bind ( & RichText : : openUrl , this , std : : placeholders : : _1 ) ) ) ; <nl> + if ( flags & RichElementText : : OUTLINE_FLAG ) { <nl> + textRenderer - > enableOutline ( Color4B ( outlineColor ) , outlineSize ) ; <nl> + } <nl> + if ( flags & RichElementText : : SHADOW_FLAG ) { <nl> + textRenderer - > enableShadow ( Color4B ( shadowColor ) , shadowOffset , shadowBlurRadius ) ; <nl> + } <nl> + if ( flags & RichElementText : : GLOW_FLAG ) { <nl> + textRenderer - > enableGlow ( Color4B ( glowColor ) ) ; <nl> + } <nl> <nl> float textRendererWidth = textRenderer - > getContentSize ( ) . width ; <nl> _leftSpaceWidth - = textRendererWidth ; <nl> if ( _leftSpaceWidth < 0 . 0f ) <nl> { <nl> int leftLength = 0 ; <nl> - if ( _wrapMode = = WRAP_PER_WORD ) <nl> + if ( static_cast < RichText : : WrapMode > ( _defaults . at ( KEY_WRAP_MODE ) . asInt ( ) ) = = WRAP_PER_WORD ) <nl> leftLength = findSplitPositionForWord ( textRenderer , text ) ; <nl> else <nl> leftLength = findSplitPositionForChar ( textRenderer , text ) ; <nl> void RichText : : handleTextRenderer ( const std : : string & text , const std : : string & fo <nl> if ( flags & RichElementText : : STRIKETHROUGH_FLAG ) <nl> leftRenderer - > enableStrikethrough ( ) ; <nl> if ( flags & RichElementText : : URL_FLAG ) <nl> - leftRenderer - > addComponent ( ListenerComponent : : create ( leftRenderer , url ) ) ; <nl> - <nl> + leftRenderer - > addComponent ( ListenerComponent : : create ( leftRenderer , <nl> + url , <nl> + std : : bind ( & RichText : : openUrl , this , std : : placeholders : : _1 ) ) ) ; <nl> + if ( flags & RichElementText : : OUTLINE_FLAG ) { <nl> + leftRenderer - > enableOutline ( Color4B ( outlineColor ) , outlineSize ) ; <nl> + } <nl> + if ( flags & RichElementText : : SHADOW_FLAG ) { <nl> + leftRenderer - > enableShadow ( Color4B ( shadowColor ) , shadowOffset , shadowBlurRadius ) ; <nl> + } <nl> + if ( flags & RichElementText : : GLOW_FLAG ) { <nl> + leftRenderer - > enableGlow ( Color4B ( glowColor ) ) ; <nl> + } <nl> } <nl> } <nl> <nl> addNewLine ( ) ; <nl> - handleTextRenderer ( cutWords , fontName , fontSize , color , opacity , flags , url ) ; <nl> + handleTextRenderer ( cutWords , fontName , fontSize , color , opacity , flags , url , <nl> + outlineColor , outlineSize , <nl> + shadowColor , shadowOffset , shadowBlurRadius , <nl> + glowColor ) ; <nl> } <nl> else <nl> { <nl> void RichText : : handleTextRenderer ( const std : : string & text , const std : : string & fo <nl> } <nl> } <nl> <nl> - void RichText : : handleImageRenderer ( const std : : string & filePath , const Color3B & color , GLubyte opacity , int width , int height ) <nl> + void RichText : : handleImageRenderer ( const std : : string & filePath , const Color3B & color , GLubyte opacity , int width , int height , const std : : string url ) <nl> { <nl> Sprite * imageRenderer = Sprite : : create ( filePath ) ; <nl> if ( imageRenderer ) <nl> void RichText : : handleImageRenderer ( const std : : string & filePath , const Color3B & c <nl> currentSize . height * imageRenderer - > getScaleY ( ) ) ) ; <nl> <nl> handleCustomRenderer ( imageRenderer ) ; <nl> + imageRenderer - > addComponent ( ListenerComponent : : create ( imageRenderer , <nl> + url , <nl> + std : : bind ( & RichText : : openUrl , this , std : : placeholders : : _1 ) ) ) ; <nl> } <nl> } <nl> <nl> void RichText : : formarRenderers ( ) <nl> { <nl> Vector < Node * > * row = ( _elementRenders [ i ] ) ; <nl> float nextPosX = 0 . 0f ; <nl> - nextPosY - = ( maxHeights [ i ] + _verticalSpace ) ; <nl> + nextPosY - = ( maxHeights [ i ] + _defaults . at ( KEY_VERTICAL_SPACE ) . asFloat ( ) ) ; <nl> <nl> for ( ssize_t j = 0 ; j < row - > size ( ) ; j + + ) <nl> { <nl> void RichText : : pushToContainer ( cocos2d : : Node * renderer ) <nl> <nl> void RichText : : setVerticalSpace ( float space ) <nl> { <nl> - _verticalSpace = space ; <nl> + _defaults [ KEY_VERTICAL_SPACE ] = space ; <nl> } <nl> <nl> void RichText : : ignoreContentAdaptWithSize ( bool ignore ) <nl> std : : string RichText : : getDescription ( ) const <nl> { <nl> return " RichText " ; <nl> } <nl> - <nl> - } <nl> - <nl> - NS_CC_END <nl> mmm a / cocos / ui / UIRichText . h <nl> ppp b / cocos / ui / UIRichText . h <nl> <nl> <nl> # include " ui / UIWidget . h " <nl> # include " ui / GUIExport . h " <nl> + # include " base / CCValue . h " <nl> <nl> NS_CC_BEGIN <nl> / * * <nl> NS_CC_BEGIN <nl> class Label ; <nl> <nl> namespace ui { <nl> - <nl> + <nl> / * * <nl> * @ brief Rich text element base class . <nl> * It defines the basic common properties for all rich text element . <nl> class CC_GUI_DLL RichElement : public Ref <nl> * / <nl> enum class Type <nl> { <nl> - TEXT , <nl> - IMAGE , <nl> - CUSTOM , <nl> - NEWLINE <nl> + TEXT , / * ! < RichElementText * / <nl> + IMAGE , / * ! < RichElementImage * / <nl> + CUSTOM , / * ! < RichElementCustomNode * / <nl> + NEWLINE / * ! < RichElementNewLine * / <nl> } ; <nl> <nl> / * * <nl> class CC_GUI_DLL RichElement : public Ref <nl> * @ return True if initialize success , false otherwise . <nl> * / <nl> bool init ( int tag , const Color3B & color , GLubyte opacity ) ; <nl> + <nl> + bool equalType ( Type type ) ; <nl> + void setColor ( const Color3B & color ) ; <nl> protected : <nl> - Type _type ; <nl> - int _tag ; <nl> - Color3B _color ; <nl> - GLubyte _opacity ; <nl> + Type _type ; / * ! < Rich element type . * / <nl> + int _tag ; / * ! < A integer tag value . * / <nl> + Color3B _color ; / * ! < A color in ` Color3B ` . * / <nl> + GLubyte _opacity ; / * ! < A opacity value in ` GLubyte ` . * / <nl> friend class RichText ; <nl> } ; <nl> <nl> class CC_GUI_DLL RichElementText : public RichElement <nl> { _type = Type : : TEXT ; } ; <nl> <nl> enum { <nl> - ITALICS_FLAG = 1 < < 0 , <nl> - BOLD_FLAG = 1 < < 1 , <nl> - UNDERLINE_FLAG = 1 < < 2 , <nl> - STRIKETHROUGH_FLAG = 1 < < 3 , <nl> - URL_FLAG = 1 < < 4 <nl> + ITALICS_FLAG = 1 < < 0 , / * ! < italic text * / <nl> + BOLD_FLAG = 1 < < 1 , / * ! < bold text * / <nl> + UNDERLINE_FLAG = 1 < < 2 , / * ! < underline * / <nl> + STRIKETHROUGH_FLAG = 1 < < 3 , / * ! < strikethrough * / <nl> + URL_FLAG = 1 < < 4 , / * ! < url of anchor * / <nl> + OUTLINE_FLAG = 1 < < 5 , / * ! < outline effect * / <nl> + SHADOW_FLAG = 1 < < 6 , / * ! < shadow effect * / <nl> + GLOW_FLAG = 1 < < 7 / * ! < glow effect * / <nl> } ; <nl> <nl> / * * <nl> class CC_GUI_DLL RichElementText : public RichElement <nl> * @ param text Content string . <nl> * @ param fontName Content font name . <nl> * @ param fontSize Content font size . <nl> - * @ param flags : italics , bold , underline or strikethrough <nl> + * @ param flags : italics , bold , underline , strikethrough , url , outline , shadow or glow <nl> + * @ param url uniform resource locator <nl> + * @ param outlineColor the color of the outline <nl> + * @ param outlineSize the outline effect size value <nl> + * @ param shadowColor the shadow effect color value <nl> + * @ param shadowOffset shadow effect offset value <nl> + * @ param shadowBlurRadius the shadow effect blur radius <nl> + * @ param glowColor glow color <nl> * @ return True if initialize success , false otherwise . <nl> * / <nl> - bool init ( int tag , const Color3B & color , GLubyte opacity , const std : : string & text , const std : : string & fontName , float fontSize , uint32_t flags , const std : : string & url ) ; <nl> - <nl> + bool init ( int tag , const Color3B & color , GLubyte opacity , const std : : string & text , <nl> + const std : : string & fontName , float fontSize , uint32_t flags , const std : : string & url , <nl> + const Color3B & outlineColor = Color3B : : WHITE , int outlineSize = - 1 , <nl> + const Color3B & shadowColor = Color3B : : BLACK , const cocos2d : : Size & shadowOffset = Size ( 2 . 0 , - 2 . 0 ) , int shadowBlurRadius = 0 , <nl> + const Color3B & glowColor = Color3B : : WHITE ) ; <nl> <nl> / * * <nl> * @ brief Create a RichElementText with various arguments . <nl> class CC_GUI_DLL RichElementText : public RichElement <nl> * @ param text Content string . <nl> * @ param fontName Content font name . <nl> * @ param fontSize Content font size . <nl> - * @ param flags : italics , bold , underline or strikethrough <nl> + * @ param flags : italics , bold , underline , strikethrough , url , outline , shadow or glow <nl> + * @ param url uniform resource locator <nl> + * @ param outlineColor the color of the outline <nl> + * @ param outlineSize the outline effect size value <nl> + * @ param shadowColor the shadow effect color value <nl> + * @ param shadowOffset shadow effect offset value <nl> + * @ param shadowBlurRadius the shadow effect blur radius <nl> + * @ param glowColor glow color <nl> * @ return RichElementText instance . <nl> * / <nl> static RichElementText * create ( int tag , const Color3B & color , GLubyte opacity , const std : : string & text , <nl> - const std : : string & fontName , float fontSize , uint32_t flags = 0 , const std : : string & url = " " ) ; <nl> + const std : : string & fontName , float fontSize , uint32_t flags = 0 , const std : : string & url = " " , <nl> + const Color3B & outlineColor = Color3B : : WHITE , int outlineSize = - 1 , <nl> + const Color3B & shadowColor = Color3B : : BLACK , const cocos2d : : Size & shadowOffset = Size ( 2 . 0 , - 2 . 0 ) , int shadowBlurRadius = 0 , <nl> + const Color3B & glowColor = Color3B : : WHITE ) ; <nl> protected : <nl> std : : string _text ; <nl> std : : string _fontName ; <nl> float _fontSize ; <nl> uint32_t _flags ; <nl> std : : string _url ; <nl> + Color3B _outlineColor ; / * ! < the color of the outline * / <nl> + int _outlineSize ; / * ! < the outline effect size value * / <nl> + Color3B _shadowColor ; / * ! < the shadow effect color value * / <nl> + cocos2d : : Size _shadowOffset ; / * ! < shadow effect offset value * / <nl> + int _shadowBlurRadius ; / * ! < the shadow effect blur radius * / <nl> + Color3B _glowColor ; / * ! < attributes of glow tag * / <nl> friend class RichText ; <nl> - <nl> } ; <nl> <nl> / * * <nl> class CC_GUI_DLL RichElementImage : public RichElement <nl> * @ param color A color in Color3B . <nl> * @ param opacity A opacity in GLubyte . <nl> * @ param filePath A image file name . <nl> + * @ param url uniform resource locator <nl> * @ return True if initialize success , false otherwise . <nl> * / <nl> - bool init ( int tag , const Color3B & color , GLubyte opacity , const std : : string & filePath ) ; <nl> + bool init ( int tag , const Color3B & color , GLubyte opacity , const std : : string & filePath , const std : : string & url = " " ) ; <nl> <nl> <nl> / * * <nl> class CC_GUI_DLL RichElementImage : public RichElement <nl> * @ param color A color in Color3B . <nl> * @ param opacity A opacity in GLubyte . <nl> * @ param filePath A image file name . <nl> + * @ param url uniform resource locator <nl> * @ return A RichElementImage instance . <nl> * / <nl> - static RichElementImage * create ( int tag , const Color3B & color , GLubyte opacity , const std : : string & filePath ) ; <nl> + static RichElementImage * create ( int tag , const Color3B & color , GLubyte opacity , const std : : string & filePath , const std : : string & url = " " ) ; <nl> <nl> void setWidth ( int width ) ; <nl> void setHeight ( int height ) ; <nl> + void setUrl ( const std : : string & url ) ; <nl> protected : <nl> std : : string _filePath ; <nl> Rect _textureRect ; <nl> class CC_GUI_DLL RichElementImage : public RichElement <nl> friend class RichText ; <nl> int _width ; <nl> int _height ; <nl> + std : : string _url ; / * ! < attributes of anchor tag * / <nl> } ; <nl> <nl> / * * <nl> class CC_GUI_DLL RichElementNewLine : public RichElement <nl> protected : <nl> friend class RichText ; <nl> } ; <nl> - <nl> + <nl> / * * <nl> * @ brief A container for displaying various RichElements . <nl> * We could use it to display texts with images easily . <nl> class CC_GUI_DLL RichText : public Widget <nl> WRAP_PER_CHAR <nl> } ; <nl> <nl> + / * * <nl> + * @ brief call to open a resource specified by a URL <nl> + * @ param url a URL <nl> + * / <nl> + typedef std : : function < void ( const std : : string & url ) > OpenUrlHandler ; <nl> + <nl> + / * * <nl> + * @ brief called on the specified tag <nl> + * @ param tagAttrValueMap the attributes of a tag <nl> + * @ result text attributes and RichElement <nl> + * / <nl> + typedef std : : function < std : : pair < ValueMap , RichElement * > ( const ValueMap & tagAttrValueMap ) > VisitEnterHandler ; <nl> + <nl> + static const std : : string KEY_VERTICAL_SPACE ; / * ! < key of vertical space * / <nl> + static const std : : string KEY_WRAP_MODE ; / * ! < key of per word , or per char * / <nl> + static const std : : string KEY_FONT_COLOR_STRING ; / * ! < key of font color * / <nl> + static const std : : string KEY_FONT_SIZE ; / * ! < key of font size * / <nl> + static const std : : string KEY_FONT_SMALL ; / * ! < key of font size small * / <nl> + static const std : : string KEY_FONT_BIG ; / * ! < key of font size big * / <nl> + static const std : : string KEY_FONT_FACE ; / * ! < key of font name * / <nl> + static const std : : string KEY_TEXT_BOLD ; / * ! < key of text bold * / <nl> + static const std : : string KEY_TEXT_ITALIC ; / * ! < key of text italic * / <nl> + static const std : : string KEY_TEXT_LINE ; / * ! < key of line style * / <nl> + static const std : : string VALUE_TEXT_LINE_NONE ; / * ! < value of none * / <nl> + static const std : : string VALUE_TEXT_LINE_DEL ; / * ! < value of strikethrough line * / <nl> + static const std : : string VALUE_TEXT_LINE_UNDER ; / * ! < value of underline * / <nl> + static const std : : string KEY_TEXT_STYLE ; / * ! < key of effect style * / <nl> + static const std : : string VALUE_TEXT_STYLE_NONE ; / * ! < value of none * / <nl> + static const std : : string VALUE_TEXT_STYLE_OUTLINE ; / * ! < value of outline * / <nl> + static const std : : string VALUE_TEXT_STYLE_SHADOW ; / * ! < value of shadow * / <nl> + static const std : : string VALUE_TEXT_STYLE_GLOW ; / * ! < value of glow * / <nl> + static const std : : string KEY_TEXT_OUTLINE_COLOR ; / * ! < key of outline color * / <nl> + static const std : : string KEY_TEXT_OUTLINE_SIZE ; / * ! < key of outline size * / <nl> + static const std : : string KEY_TEXT_SHADOW_COLOR ; / * ! < key of shadow color * / <nl> + static const std : : string KEY_TEXT_SHADOW_OFFSET_WIDTH ; / * ! < key of shadow offset ( width ) * / <nl> + static const std : : string KEY_TEXT_SHADOW_OFFSET_HEIGHT ; / * ! < key of shadow offset ( height ) * / <nl> + static const std : : string KEY_TEXT_SHADOW_BLUR_RADIUS ; / * ! < key of shadow blur radius * / <nl> + static const std : : string KEY_TEXT_GLOW_COLOR ; / * ! < key of glow color * / <nl> + static const std : : string KEY_URL ; / * ! < key of url * / <nl> + static const std : : string KEY_ANCHOR_FONT_COLOR_STRING ; / * ! < key of font color of anchor tag * / <nl> + static const std : : string KEY_ANCHOR_TEXT_BOLD ; / * ! < key of text bold of anchor tag * / <nl> + static const std : : string KEY_ANCHOR_TEXT_ITALIC ; / * ! < key of text italic of anchor tag * / <nl> + static const std : : string KEY_ANCHOR_TEXT_LINE ; / * ! < key of line style of anchor tag * / <nl> + static const std : : string KEY_ANCHOR_TEXT_STYLE ; / * ! < key of effect style of anchor tag * / <nl> + static const std : : string KEY_ANCHOR_TEXT_OUTLINE_COLOR ; / * ! < key of outline color of anchor tag * / <nl> + static const std : : string KEY_ANCHOR_TEXT_OUTLINE_SIZE ; / * ! < key of outline size of anchor tag * / <nl> + static const std : : string KEY_ANCHOR_TEXT_SHADOW_COLOR ; / * ! < key of shadow color of anchor tag * / <nl> + static const std : : string KEY_ANCHOR_TEXT_SHADOW_OFFSET_WIDTH ; / * ! < key of shadow offset ( width ) of anchor tag * / <nl> + static const std : : string KEY_ANCHOR_TEXT_SHADOW_OFFSET_HEIGHT ; / * ! < key of shadow offset ( height ) of anchor tag * / <nl> + static const std : : string KEY_ANCHOR_TEXT_SHADOW_BLUR_RADIUS ; / * ! < key of shadow blur radius of anchor tag * / <nl> + static const std : : string KEY_ANCHOR_TEXT_GLOW_COLOR ; / * ! < key of glow color of anchor tag * / <nl> + <nl> / * * <nl> * @ brief Default constructor . <nl> * @ js ctor <nl> class CC_GUI_DLL RichText : public Widget <nl> * <nl> * @ return RichText instance . <nl> * / <nl> - static RichText * createWithXML ( const std : : string & xml ) ; <nl> + static RichText * createWithXML ( const std : : string & xml , const ValueMap & defaults = ValueMap ( ) , const OpenUrlHandler & handleOpenUrl = nullptr ) ; <nl> <nl> / * * <nl> * @ brief Insert a RichElement at a given index . <nl> class CC_GUI_DLL RichText : public Widget <nl> virtual void ignoreContentAdaptWithSize ( bool ignore ) override ; <nl> virtual std : : string getDescription ( ) const override ; <nl> <nl> - / * * @ brief sets the wrapping mode : WRAP_PER_CHAR or WRAP_PER_WORD <nl> + void setWrapMode ( WrapMode wrapMode ) ; / * ! < sets the wrapping mode : WRAP_PER_CHAR or WRAP_PER_WORD * / <nl> + WrapMode getWrapMode ( ) const ; / * ! < returns the current wrapping mode * / <nl> + void setFontColor ( const std : : string & color ) ; / * ! < Set the font color . @ param color the # RRGGBB hexadecimal notation . * / <nl> + std : : string getFontColor ( ) ; / * ! < return the current font color * / <nl> + Color3B getFontColor3B ( ) ; / * ! < return the current font color * / <nl> + void setFontSize ( float size ) ; / * ! < Set the font size . @ param size the font size . * / <nl> + float getFontSize ( ) ; / * ! < return the current font size * / <nl> + void setFontFace ( const std : : string & face ) ; / * ! < Set the font face . @ param face the font face . * / <nl> + std : : string getFontFace ( ) ; / * ! < return the current font face * / <nl> + void setAnchorFontColor ( const std : : string & color ) ; / * ! < Set the font color of a - tag . @ param face the font color . * / <nl> + std : : string getAnchorFontColor ( ) ; / * ! < return the current font color of a - tag * / <nl> + cocos2d : : Color3B getAnchorFontColor3B ( ) ; / * ! < return the current font color of a - tag * / <nl> + void setAnchorTextBold ( bool enable ) ; / * ! < enable bold text of a - tag * / <nl> + bool isAnchorTextBoldEnabled ( ) ; / * ! < valid style is bold text of a - tag ? * / <nl> + void setAnchorTextItalic ( bool enable ) ; / * ! < enable italic text of a - tag * / <nl> + bool isAnchorTextItalicEnabled ( ) ; / * ! < valid style is italic text of a - tag ? * / <nl> + void setAnchorTextDel ( bool enable ) ; / * ! < enable the strikethrough of a - tag * / <nl> + bool isAnchorTextDelEnabled ( ) ; / * ! < valid strikethrough of a - tag ? * / <nl> + void setAnchorTextUnderline ( bool enable ) ; / * ! < enable the underline of a - tag * / <nl> + bool isAnchorTextUnderlineEnabled ( ) ; / * ! < valid underline of a - tag ? * / <nl> + / * * @ breif enable the outline of a - tag * / <nl> + void setAnchorTextOutline ( bool enable , const Color3B & outlineColor = Color3B : : WHITE , int outlineSize = - 1 ) ; <nl> + bool isAnchorTextOutlineEnabled ( ) ; / * ! < valid outline of a - tag ? * / <nl> + Color3B getAnchorTextOutlineColor3B ( ) ; / * ! < return the current text outline color of a - tag * / <nl> + int getAnchorTextOutlineSize ( ) ; / * ! < return the current text outline size of a - tag * / <nl> + / * * @ breif enable the shadow of a - tag * / <nl> + void setAnchorTextShadow ( bool enable , const Color3B & shadowColor = Color3B : : BLACK , const Size & offset = Size ( 2 . 0 , - 2 . 0 ) , int blurRadius = 0 ) ; <nl> + bool isAnchorTextShadowEnabled ( ) ; / * ! < valid shadow of a - tag ? * / <nl> + Color3B getAnchorTextShadowColor3B ( ) ; / * ! < return the current text shadow color of a - tag * / <nl> + Size getAnchorTextShadowOffset ( ) ; / * ! < return the current text shadow offset of a - tag * / <nl> + int getAnchorTextShadowBlurRadius ( ) ; / * ! < return the current text shadow blur radius of a - tag * / <nl> + void setAnchorTextGlow ( bool enable , const Color3B & glowColor = Color3B : : WHITE ) ; / * ! < enable the glow of a - tag * / <nl> + bool isAnchorTextGlowEnabled ( ) ; / * ! < valid glow of a - tag ? * / <nl> + Color3B getAnchorTextGlowColor3B ( ) ; / * ! < return the current text glow color of a - tag * / <nl> + void setDefaults ( const ValueMap & defaults ) ; / * ! < set the default values * / <nl> + ValueMap getDefaults ( ) const ; / * ! < returns the default values * / <nl> + <nl> + cocos2d : : Color3B color3BWithString ( const std : : string & color ) ; / * ! < convert a color string into a Color3B . * / <nl> + std : : string stringWithColor3B ( const cocos2d : : Color3B & color3b ) ; / * ! < convert a Color3B into a color string . * / <nl> + std : : string stringWithColor4B ( const cocos2d : : Color4B & color4b ) ; / * ! < convert a Color4B into a color string . * / <nl> + <nl> + / * * <nl> + * @ brief add a callback to own tag . <nl> + * @ param tag tag ' s name <nl> + * @ param isFontElement use attributes of text tag <nl> + * @ param handleVisitEnter callback <nl> * / <nl> - void setWrapMode ( WrapMode wrapMode ) ; <nl> + static void setTagDescription ( const std : : string & tag , bool isFontElement , VisitEnterHandler handleVisitEnter ) ; <nl> <nl> - / * * @ brief returns the current wrapping mode * / <nl> - WrapMode getWrapMode ( ) const ; <nl> + / * * <nl> + * @ brief remove a callback to own tag . <nl> + * @ param tag tag ' s name <nl> + * / <nl> + static void removeTagDescription ( const std : : string & tag ) ; <nl> <nl> + void openUrl ( const std : : string & url ) ; <nl> + <nl> + / * * <nl> + * @ brief Asks the callback to open a resource specified by a URL . <nl> + * @ discussion If you set up your own URL in the anchor tag , it is used to intercept the URL open . <nl> + * @ param handleOpenUrl the callback <nl> + * / <nl> + void setOpenUrlHandler ( const OpenUrlHandler & handleOpenUrl ) ; <nl> + <nl> CC_CONSTRUCTOR_ACCESS : <nl> virtual bool init ( ) override ; <nl> <nl> - bool initWithXML ( const std : : string & xml ) ; <nl> + bool initWithXML ( const std : : string & xml , const ValueMap & defaults = ValueMap ( ) , const OpenUrlHandler & handleOpenUrl = nullptr ) ; <nl> <nl> protected : <nl> virtual void adaptRenderers ( ) override ; <nl> <nl> virtual void initRenderer ( ) override ; <nl> void pushToContainer ( Node * renderer ) ; <nl> - void handleTextRenderer ( const std : : string & text , const std : : string & fontName , float fontSize , const Color3B & color , GLubyte opacity , uint32_t flags , const std : : string & url = " " ) ; <nl> - void handleImageRenderer ( const std : : string & fileParh , const Color3B & color , GLubyte opacity , int width , int height ) ; <nl> + void handleTextRenderer ( const std : : string & text , const std : : string & fontName , float fontSize , const Color3B & color , <nl> + GLubyte opacity , uint32_t flags , const std : : string & url = " " , <nl> + const Color3B & outlineColor = Color3B : : WHITE , int outlineSize = - 1 , <nl> + const Color3B & shadowColor = Color3B : : BLACK , const cocos2d : : Size & shadowOffset = Size ( 2 . 0 , - 2 . 0 ) , int shadowBlurRadius = 0 , <nl> + const Color3B & glowColor = Color3B : : WHITE ) ; <nl> + void handleImageRenderer ( const std : : string & fileParh , const Color3B & color , GLubyte opacity , int width , int height , const std : : string url ) ; <nl> void handleCustomRenderer ( Node * renderer ) ; <nl> void formarRenderers ( ) ; <nl> void addNewLine ( ) ; <nl> class CC_GUI_DLL RichText : public Widget <nl> Vector < RichElement * > _richElements ; <nl> std : : vector < Vector < Node * > * > _elementRenders ; <nl> float _leftSpaceWidth ; <nl> - float _verticalSpace ; <nl> <nl> - / / per word , or per char <nl> - WrapMode _wrapMode ; <nl> + ValueMap _defaults ; / * ! < default values * / <nl> + OpenUrlHandler _handleOpenUrl ; / * ! < the callback for open URL * / <nl> } ; <nl> <nl> } <nl> mmm a / tests / cpp - tests / Classes / UITest / CocoStudioGUITest / UIRichTextTest / UIRichTextTest . cpp <nl> ppp b / tests / cpp - tests / Classes / UITest / CocoStudioGUITest / UIRichTextTest / UIRichTextTest . cpp <nl> UIRichTextTests : : UIRichTextTests ( ) <nl> ADD_TEST_CASE ( UIRichTextXMLSUIB3 ) ; <nl> ADD_TEST_CASE ( UIRichTextXMLImg ) ; <nl> ADD_TEST_CASE ( UIRichTextXMLUrl ) ; <nl> + ADD_TEST_CASE ( UIRichTextXMLUrlImg ) ; <nl> ADD_TEST_CASE ( UIRichTextXMLFace ) ; <nl> ADD_TEST_CASE ( UIRichTextXMLBR ) ; <nl> ADD_TEST_CASE ( UIRichTextXMLInvalid ) ; <nl> + ADD_TEST_CASE ( UIRichTextXMLOutline ) ; <nl> + ADD_TEST_CASE ( UIRichTextXMLShadow ) ; <nl> + ADD_TEST_CASE ( UIRichTextXMLGlow ) ; <nl> + ADD_TEST_CASE ( UIRichTextXMLExtend ) ; <nl> } <nl> <nl> <nl> void UIRichTextXMLUrl : : switchWrapMode ( Ref * pSender , Widget : : TouchEventType type ) <nl> } <nl> } <nl> <nl> + / / <nl> + / / UIRichTextXMLUrlImg <nl> + / / <nl> + bool UIRichTextXMLUrlImg : : init ( ) <nl> + { <nl> + if ( UIScene : : init ( ) ) <nl> + { <nl> + Size widgetSize = _widget - > getContentSize ( ) ; <nl> + <nl> + / / Add the alert <nl> + Text * alert = Text : : create ( " RichText " , " fonts / Marker Felt . ttf " , 30 ) ; <nl> + alert - > setColor ( Color3B ( 159 , 168 , 176 ) ) ; <nl> + alert - > setPosition ( Vec2 ( widgetSize . width / 2 . 0f , widgetSize . height / 2 . 0f - alert - > getContentSize ( ) . height * 3 . 125 ) ) ; <nl> + _widget - > addChild ( alert ) ; <nl> + <nl> + <nl> + Button * button = Button : : create ( " cocosui / animationbuttonnormal . png " , " cocosui / animationbuttonpressed . png " ) ; <nl> + button - > setTouchEnabled ( true ) ; <nl> + button - > setTitleText ( " switch " ) ; <nl> + button - > setPosition ( Vec2 ( widgetSize . width * 1 / 3 , widgetSize . height / 2 . 0f + button - > getContentSize ( ) . height * 2 . 5 ) ) ; <nl> + button - > addTouchEventListener ( CC_CALLBACK_2 ( UIRichTextXMLUrlImg : : touchEvent , this ) ) ; <nl> + button - > setLocalZOrder ( 10 ) ; <nl> + _widget - > addChild ( button ) ; <nl> + <nl> + Button * button2 = Button : : create ( " cocosui / animationbuttonnormal . png " , " cocosui / animationbuttonpressed . png " ) ; <nl> + button2 - > setTouchEnabled ( true ) ; <nl> + button2 - > setTitleText ( " wrap mode " ) ; <nl> + button2 - > setPosition ( Vec2 ( widgetSize . width * 2 / 3 , widgetSize . height / 2 . 0f + button2 - > getContentSize ( ) . height * 2 . 5 ) ) ; <nl> + button2 - > addTouchEventListener ( CC_CALLBACK_2 ( UIRichTextXMLUrlImg : : switchWrapMode , this ) ) ; <nl> + button2 - > setLocalZOrder ( 10 ) ; <nl> + _widget - > addChild ( button2 ) ; <nl> + <nl> + <nl> + / / RichText <nl> + _richText = RichText : : createWithXML ( " And this link will redirect you to google : < a href = ' http : / / www . google . com ' > < img src = \ " cocosui / ccicon . png \ " height = \ " 48 \ " width = \ " 48 \ " / > < / a > " ) ; <nl> + _richText - > ignoreContentAdaptWithSize ( false ) ; <nl> + _richText - > setContentSize ( Size ( 100 , 100 ) ) ; <nl> + <nl> + _richText - > setPosition ( Vec2 ( widgetSize . width / 2 , widgetSize . height / 2 ) ) ; <nl> + _richText - > setLocalZOrder ( 10 ) ; <nl> + <nl> + <nl> + _widget - > addChild ( _richText ) ; <nl> + <nl> + / / test remove all children , this call won ' t effect the test <nl> + _richText - > removeAllChildren ( ) ; <nl> + <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + void UIRichTextXMLUrlImg : : touchEvent ( Ref * pSender , Widget : : TouchEventType type ) <nl> + { <nl> + switch ( type ) <nl> + { <nl> + case Widget : : TouchEventType : : ENDED : <nl> + { <nl> + if ( _richText - > isIgnoreContentAdaptWithSize ( ) ) <nl> + { <nl> + _richText - > ignoreContentAdaptWithSize ( false ) ; <nl> + _richText - > setContentSize ( Size ( 100 , 100 ) ) ; <nl> + } <nl> + else <nl> + { <nl> + _richText - > ignoreContentAdaptWithSize ( true ) ; <nl> + } <nl> + } <nl> + break ; <nl> + <nl> + default : <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + void UIRichTextXMLUrlImg : : switchWrapMode ( Ref * pSender , Widget : : TouchEventType type ) <nl> + { <nl> + if ( type = = Widget : : TouchEventType : : ENDED ) <nl> + { <nl> + auto wrapMode = _richText - > getWrapMode ( ) ; <nl> + wrapMode = ( wrapMode = = RichText : : WRAP_PER_WORD ) ? RichText : : WRAP_PER_CHAR : RichText : : WRAP_PER_WORD ; <nl> + _richText - > setWrapMode ( wrapMode ) ; <nl> + } <nl> + } <nl> + <nl> / / <nl> / / UIRichTextXMLFace <nl> / / <nl> bool UIRichTextXMLInvalid : : init ( ) <nl> return false ; <nl> } <nl> <nl> + / / <nl> + / / UIRichTextXMLOutline <nl> + / / <nl> + bool UIRichTextXMLOutline : : init ( ) <nl> + { <nl> + if ( UIScene : : init ( ) ) <nl> + { <nl> + Size widgetSize = _widget - > getContentSize ( ) ; <nl> + <nl> + / / Add the alert <nl> + Text * alert = Text : : create ( " Outline " , " fonts / Marker Felt . ttf " , 30 ) ; <nl> + alert - > setColor ( Color3B ( 159 , 168 , 176 ) ) ; <nl> + alert - > setPosition ( Vec2 ( widgetSize . width / 2 . 0f , widgetSize . height / 2 . 0f - alert - > getContentSize ( ) . height * 3 . 125 ) ) ; <nl> + _widget - > addChild ( alert ) ; <nl> + <nl> + <nl> + Button * button = Button : : create ( " cocosui / animationbuttonnormal . png " , " cocosui / animationbuttonpressed . png " ) ; <nl> + button - > setTouchEnabled ( true ) ; <nl> + button - > setTitleText ( " switch " ) ; <nl> + button - > setPosition ( Vec2 ( widgetSize . width * 1 / 3 , widgetSize . height / 2 . 0f + button - > getContentSize ( ) . height * 2 . 5 ) ) ; <nl> + button - > addTouchEventListener ( CC_CALLBACK_2 ( UIRichTextXMLOutline : : touchEvent , this ) ) ; <nl> + button - > setLocalZOrder ( 10 ) ; <nl> + _widget - > addChild ( button ) ; <nl> + <nl> + Button * button2 = Button : : create ( " cocosui / animationbuttonnormal . png " , " cocosui / animationbuttonpressed . png " ) ; <nl> + button2 - > setTouchEnabled ( true ) ; <nl> + button2 - > setTitleText ( " wrap mode " ) ; <nl> + button2 - > setPosition ( Vec2 ( widgetSize . width * 2 / 3 , widgetSize . height / 2 . 0f + button2 - > getContentSize ( ) . height * 2 . 5 ) ) ; <nl> + button2 - > addTouchEventListener ( CC_CALLBACK_2 ( UIRichTextXMLOutline : : switchWrapMode , this ) ) ; <nl> + button2 - > setLocalZOrder ( 10 ) ; <nl> + _widget - > addChild ( button2 ) ; <nl> + <nl> + <nl> + / / RichText <nl> + _richText = RichText : : createWithXML ( " < font face = ' fonts / Marker Felt . ttf ' size = \ " 24 \ " > < outline color = \ " # D2B48C \ " size = \ " 2 \ " > OUTLINE < / outline > < / font > " ) ; <nl> + _richText - > ignoreContentAdaptWithSize ( false ) ; <nl> + _richText - > setContentSize ( Size ( 100 , 100 ) ) ; <nl> + <nl> + _richText - > setPosition ( Vec2 ( widgetSize . width / 2 , widgetSize . height / 2 ) ) ; <nl> + _richText - > setLocalZOrder ( 10 ) ; <nl> + <nl> + <nl> + _widget - > addChild ( _richText ) ; <nl> + <nl> + / / test remove all children , this call won ' t effect the test <nl> + _richText - > removeAllChildren ( ) ; <nl> + <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + void UIRichTextXMLOutline : : touchEvent ( Ref * pSender , Widget : : TouchEventType type ) <nl> + { <nl> + switch ( type ) <nl> + { <nl> + case Widget : : TouchEventType : : ENDED : <nl> + { <nl> + if ( _richText - > isIgnoreContentAdaptWithSize ( ) ) <nl> + { <nl> + _richText - > ignoreContentAdaptWithSize ( false ) ; <nl> + _richText - > setContentSize ( Size ( 100 , 100 ) ) ; <nl> + } <nl> + else <nl> + { <nl> + _richText - > ignoreContentAdaptWithSize ( true ) ; <nl> + } <nl> + } <nl> + break ; <nl> + <nl> + default : <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + void UIRichTextXMLOutline : : switchWrapMode ( Ref * pSender , Widget : : TouchEventType type ) <nl> + { <nl> + if ( type = = Widget : : TouchEventType : : ENDED ) <nl> + { <nl> + auto wrapMode = _richText - > getWrapMode ( ) ; <nl> + wrapMode = ( wrapMode = = RichText : : WRAP_PER_WORD ) ? RichText : : WRAP_PER_CHAR : RichText : : WRAP_PER_WORD ; <nl> + _richText - > setWrapMode ( wrapMode ) ; <nl> + } <nl> + } <nl> + <nl> + / / <nl> + / / UIRichTextXMLShadow <nl> + / / <nl> + bool UIRichTextXMLShadow : : init ( ) <nl> + { <nl> + if ( UIScene : : init ( ) ) <nl> + { <nl> + Size widgetSize = _widget - > getContentSize ( ) ; <nl> + <nl> + / / Add the alert <nl> + Text * alert = Text : : create ( " Shadow " , " fonts / Marker Felt . ttf " , 30 ) ; <nl> + alert - > setColor ( Color3B ( 159 , 168 , 176 ) ) ; <nl> + alert - > setPosition ( Vec2 ( widgetSize . width / 2 . 0f , widgetSize . height / 2 . 0f - alert - > getContentSize ( ) . height * 3 . 125 ) ) ; <nl> + _widget - > addChild ( alert ) ; <nl> + <nl> + <nl> + Button * button = Button : : create ( " cocosui / animationbuttonnormal . png " , " cocosui / animationbuttonpressed . png " ) ; <nl> + button - > setTouchEnabled ( true ) ; <nl> + button - > setTitleText ( " switch " ) ; <nl> + button - > setPosition ( Vec2 ( widgetSize . width * 1 / 3 , widgetSize . height / 2 . 0f + button - > getContentSize ( ) . height * 2 . 5 ) ) ; <nl> + button - > addTouchEventListener ( CC_CALLBACK_2 ( UIRichTextXMLShadow : : touchEvent , this ) ) ; <nl> + button - > setLocalZOrder ( 10 ) ; <nl> + _widget - > addChild ( button ) ; <nl> + <nl> + Button * button2 = Button : : create ( " cocosui / animationbuttonnormal . png " , " cocosui / animationbuttonpressed . png " ) ; <nl> + button2 - > setTouchEnabled ( true ) ; <nl> + button2 - > setTitleText ( " wrap mode " ) ; <nl> + button2 - > setPosition ( Vec2 ( widgetSize . width * 2 / 3 , widgetSize . height / 2 . 0f + button2 - > getContentSize ( ) . height * 2 . 5 ) ) ; <nl> + button2 - > addTouchEventListener ( CC_CALLBACK_2 ( UIRichTextXMLShadow : : switchWrapMode , this ) ) ; <nl> + button2 - > setLocalZOrder ( 10 ) ; <nl> + _widget - > addChild ( button2 ) ; <nl> + <nl> + <nl> + / / RichText <nl> + _richText = RichText : : createWithXML ( " < font size = \ " 24 \ " > < shadow color = \ " # 4169E1 \ " offsetWidth = \ " 8 \ " offsetHeight = \ " - 8 \ " blurRadius = \ " 2 \ " > SHADOW < / shadow > < / font > " ) ; <nl> + _richText - > ignoreContentAdaptWithSize ( false ) ; <nl> + _richText - > setContentSize ( Size ( 100 , 100 ) ) ; <nl> + <nl> + _richText - > setPosition ( Vec2 ( widgetSize . width / 2 , widgetSize . height / 2 ) ) ; <nl> + _richText - > setLocalZOrder ( 10 ) ; <nl> + <nl> + <nl> + _widget - > addChild ( _richText ) ; <nl> + <nl> + / / test remove all children , this call won ' t effect the test <nl> + _richText - > removeAllChildren ( ) ; <nl> + <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + void UIRichTextXMLShadow : : touchEvent ( Ref * pSender , Widget : : TouchEventType type ) <nl> + { <nl> + switch ( type ) <nl> + { <nl> + case Widget : : TouchEventType : : ENDED : <nl> + { <nl> + if ( _richText - > isIgnoreContentAdaptWithSize ( ) ) <nl> + { <nl> + _richText - > ignoreContentAdaptWithSize ( false ) ; <nl> + _richText - > setContentSize ( Size ( 100 , 100 ) ) ; <nl> + } <nl> + else <nl> + { <nl> + _richText - > ignoreContentAdaptWithSize ( true ) ; <nl> + } <nl> + } <nl> + break ; <nl> + <nl> + default : <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + void UIRichTextXMLShadow : : switchWrapMode ( Ref * pSender , Widget : : TouchEventType type ) <nl> + { <nl> + if ( type = = Widget : : TouchEventType : : ENDED ) <nl> + { <nl> + auto wrapMode = _richText - > getWrapMode ( ) ; <nl> + wrapMode = ( wrapMode = = RichText : : WRAP_PER_WORD ) ? RichText : : WRAP_PER_CHAR : RichText : : WRAP_PER_WORD ; <nl> + _richText - > setWrapMode ( wrapMode ) ; <nl> + } <nl> + } <nl> + <nl> + / / <nl> + / / UIRichTextXMLGlow <nl> + / / <nl> + bool UIRichTextXMLGlow : : init ( ) <nl> + { <nl> + if ( UIScene : : init ( ) ) <nl> + { <nl> + Size widgetSize = _widget - > getContentSize ( ) ; <nl> + <nl> + / / Add the alert <nl> + Text * alert = Text : : create ( " Glow " , " fonts / Marker Felt . ttf " , 30 ) ; <nl> + alert - > setColor ( Color3B ( 159 , 168 , 176 ) ) ; <nl> + alert - > setPosition ( Vec2 ( widgetSize . width / 2 . 0f , widgetSize . height / 2 . 0f - alert - > getContentSize ( ) . height * 3 . 125 ) ) ; <nl> + _widget - > addChild ( alert ) ; <nl> + <nl> + <nl> + Button * button = Button : : create ( " cocosui / animationbuttonnormal . png " , " cocosui / animationbuttonpressed . png " ) ; <nl> + button - > setTouchEnabled ( true ) ; <nl> + button - > setTitleText ( " switch " ) ; <nl> + button - > setPosition ( Vec2 ( widgetSize . width * 1 / 3 , widgetSize . height / 2 . 0f + button - > getContentSize ( ) . height * 2 . 5 ) ) ; <nl> + button - > addTouchEventListener ( CC_CALLBACK_2 ( UIRichTextXMLGlow : : touchEvent , this ) ) ; <nl> + button - > setLocalZOrder ( 10 ) ; <nl> + _widget - > addChild ( button ) ; <nl> + <nl> + Button * button2 = Button : : create ( " cocosui / animationbuttonnormal . png " , " cocosui / animationbuttonpressed . png " ) ; <nl> + button2 - > setTouchEnabled ( true ) ; <nl> + button2 - > setTitleText ( " wrap mode " ) ; <nl> + button2 - > setPosition ( Vec2 ( widgetSize . width * 2 / 3 , widgetSize . height / 2 . 0f + button2 - > getContentSize ( ) . height * 2 . 5 ) ) ; <nl> + button2 - > addTouchEventListener ( CC_CALLBACK_2 ( UIRichTextXMLGlow : : switchWrapMode , this ) ) ; <nl> + button2 - > setLocalZOrder ( 10 ) ; <nl> + _widget - > addChild ( button2 ) ; <nl> + <nl> + <nl> + / / RichText <nl> + _richText = RichText : : createWithXML ( " < font face = \ " fonts / Marker Felt . ttf \ " size = \ " 24 \ " > < glow color = \ " # AFEEEE \ " > GLOW < / glow > < / font > " ) ; <nl> + _richText - > ignoreContentAdaptWithSize ( false ) ; <nl> + _richText - > setContentSize ( Size ( 100 , 100 ) ) ; <nl> + <nl> + _richText - > setPosition ( Vec2 ( widgetSize . width / 2 , widgetSize . height / 2 ) ) ; <nl> + _richText - > setLocalZOrder ( 10 ) ; <nl> + <nl> + <nl> + _widget - > addChild ( _richText ) ; <nl> + <nl> + / / test remove all children , this call won ' t effect the test <nl> + _richText - > removeAllChildren ( ) ; <nl> + <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + void UIRichTextXMLGlow : : touchEvent ( Ref * pSender , Widget : : TouchEventType type ) <nl> + { <nl> + switch ( type ) <nl> + { <nl> + case Widget : : TouchEventType : : ENDED : <nl> + { <nl> + if ( _richText - > isIgnoreContentAdaptWithSize ( ) ) <nl> + { <nl> + _richText - > ignoreContentAdaptWithSize ( false ) ; <nl> + _richText - > setContentSize ( Size ( 100 , 100 ) ) ; <nl> + } <nl> + else <nl> + { <nl> + _richText - > ignoreContentAdaptWithSize ( true ) ; <nl> + } <nl> + } <nl> + break ; <nl> + <nl> + default : <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + void UIRichTextXMLGlow : : switchWrapMode ( Ref * pSender , Widget : : TouchEventType type ) <nl> + { <nl> + if ( type = = Widget : : TouchEventType : : ENDED ) <nl> + { <nl> + auto wrapMode = _richText - > getWrapMode ( ) ; <nl> + wrapMode = ( wrapMode = = RichText : : WRAP_PER_WORD ) ? RichText : : WRAP_PER_CHAR : RichText : : WRAP_PER_WORD ; <nl> + _richText - > setWrapMode ( wrapMode ) ; <nl> + } <nl> + } <nl> + <nl> + / / <nl> + / / UIRichTextXMLExtend <nl> + / / <nl> + bool UIRichTextXMLExtend : : init ( ) <nl> + { <nl> + if ( UIScene : : init ( ) ) <nl> + { <nl> + Size widgetSize = _widget - > getContentSize ( ) ; <nl> + <nl> + / / Add the alert <nl> + Text * alert = Text : : create ( " Extend " , " fonts / Marker Felt . ttf " , 30 ) ; <nl> + alert - > setColor ( Color3B ( 159 , 168 , 176 ) ) ; <nl> + alert - > setPosition ( Vec2 ( widgetSize . width / 2 . 0f , widgetSize . height / 2 . 0f - alert - > getContentSize ( ) . height * 3 . 125 ) ) ; <nl> + _widget - > addChild ( alert ) ; <nl> + <nl> + <nl> + Button * button = Button : : create ( " cocosui / animationbuttonnormal . png " , " cocosui / animationbuttonpressed . png " ) ; <nl> + button - > setTouchEnabled ( true ) ; <nl> + button - > setTitleText ( " switch " ) ; <nl> + button - > setPosition ( Vec2 ( widgetSize . width * 1 / 3 , widgetSize . height / 2 . 0f + button - > getContentSize ( ) . height * 2 . 5 ) ) ; <nl> + button - > addTouchEventListener ( CC_CALLBACK_2 ( UIRichTextXMLExtend : : touchEvent , this ) ) ; <nl> + button - > setLocalZOrder ( 10 ) ; <nl> + _widget - > addChild ( button ) ; <nl> + <nl> + Button * button2 = Button : : create ( " cocosui / animationbuttonnormal . png " , " cocosui / animationbuttonpressed . png " ) ; <nl> + button2 - > setTouchEnabled ( true ) ; <nl> + button2 - > setTitleText ( " wrap mode " ) ; <nl> + button2 - > setPosition ( Vec2 ( widgetSize . width * 2 / 3 , widgetSize . height / 2 . 0f + button2 - > getContentSize ( ) . height * 2 . 5 ) ) ; <nl> + button2 - > addTouchEventListener ( CC_CALLBACK_2 ( UIRichTextXMLExtend : : switchWrapMode , this ) ) ; <nl> + button2 - > setLocalZOrder ( 10 ) ; <nl> + _widget - > addChild ( button2 ) ; <nl> + <nl> + / * Tag extension * / <nl> + RichText : : setTagDescription ( " CloseNormal " , false , [ ] ( const ValueMap & tagAttrValueMap ) { <nl> + RichElementImage * richElement = RichElementImage : : create ( 0 , Color3B : : WHITE , 255 , " cocosui / CloseNormal . png " ) ; <nl> + return make_pair ( ValueMap ( ) , richElement ) ; <nl> + } ) ; <nl> + RichText : : setTagDescription ( " CloseSelected " , false , [ ] ( const ValueMap & tagAttrValueMap ) { <nl> + RichElementImage * richElement = RichElementImage : : create ( 0 , Color3B : : WHITE , 255 , " cocosui / CloseSelected . png " ) ; <nl> + return make_pair ( ValueMap ( ) , richElement ) ; <nl> + } ) ; <nl> + <nl> + / * Defaults * / <nl> + ValueMap defaults ; <nl> + defaults [ RichText : : KEY_FONT_COLOR_STRING ] = " # FFF " ; <nl> + defaults [ RichText : : KEY_FONT_SIZE ] = 12 . 0f ; <nl> + defaults [ RichText : : KEY_FONT_FACE ] = " fonts / Marker Felt . ttf " ; <nl> + defaults [ RichText : : KEY_ANCHOR_FONT_COLOR_STRING ] = " # f0f8ff " ; <nl> + defaults [ RichText : : KEY_ANCHOR_TEXT_BOLD ] = false ; <nl> + defaults [ RichText : : KEY_ANCHOR_TEXT_ITALIC ] = false ; <nl> + / / defaults [ RichText : : KEY_ANCHOR_TEXT_LINE ] = RichText : : VALUE_TEXT_LINE_NONE ; <nl> + / / defaults [ RichText : : KEY_ANCHOR_TEXT_LINE ] = RichText : : VALUE_TEXT_LINE_DEL ; <nl> + defaults [ RichText : : KEY_ANCHOR_TEXT_LINE ] = RichText : : VALUE_TEXT_LINE_UNDER ; <nl> + / / defaults [ RichText : : KEY_ANCHOR_TEXT_STYLE ] = RichText : : VALUE_TEXT_STYLE_NONE ; <nl> + / / defaults [ RichText : : KEY_ANCHOR_TEXT_STYLE ] = RichText : : VALUE_TEXT_STYLE_OUTLINE ; <nl> + / / defaults [ RichText : : KEY_ANCHOR_TEXT_STYLE ] = RichText : : VALUE_TEXT_STYLE_SHADOW ; <nl> + / / defaults [ RichText : : KEY_ANCHOR_TEXT_STYLE ] = RichText : : VALUE_TEXT_STYLE_GLOW ; <nl> + defaults [ RichText : : KEY_ANCHOR_TEXT_OUTLINE_COLOR ] = " # D2B48C " ; <nl> + defaults [ RichText : : KEY_ANCHOR_TEXT_OUTLINE_SIZE ] = 4 ; <nl> + defaults [ RichText : : KEY_ANCHOR_TEXT_SHADOW_COLOR ] = " # 4169E1 " ; <nl> + defaults [ RichText : : KEY_ANCHOR_TEXT_SHADOW_OFFSET_WIDTH ] = 4 . 0f ; <nl> + defaults [ RichText : : KEY_ANCHOR_TEXT_SHADOW_OFFSET_HEIGHT ] = - 4 . 0f ; <nl> + defaults [ RichText : : KEY_ANCHOR_TEXT_SHADOW_BLUR_RADIUS ] = 0 ; <nl> + defaults [ RichText : : KEY_ANCHOR_TEXT_GLOW_COLOR ] = " # AFEEEE " ; <nl> + <nl> + / / RichText <nl> + _richText = RichText : : createWithXML ( " < span > CloseNormal - tag : < br / > < CloseNormal / > < br / > < br / > CloseSelected - tag : < br / > < CloseSelected > < / CloseSelected > < / span > " , <nl> + defaults , <nl> + [ ] ( const std : : string & url ) { <nl> + Application : : getInstance ( ) - > openURL ( url ) ; <nl> + } ) ; <nl> + _richText - > ignoreContentAdaptWithSize ( false ) ; <nl> + _richText - > setContentSize ( Size ( 100 , 100 ) ) ; <nl> + <nl> + _richText - > setPosition ( Vec2 ( widgetSize . width / 2 , widgetSize . height / 2 ) ) ; <nl> + _richText - > setLocalZOrder ( 10 ) ; <nl> + <nl> + <nl> + _widget - > addChild ( _richText ) ; <nl> + <nl> + / / test remove all children , this call won ' t effect the test <nl> + _richText - > removeAllChildren ( ) ; <nl> + <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + void UIRichTextXMLExtend : : touchEvent ( Ref * pSender , Widget : : TouchEventType type ) <nl> + { <nl> + switch ( type ) <nl> + { <nl> + case Widget : : TouchEventType : : ENDED : <nl> + { <nl> + if ( _richText - > isIgnoreContentAdaptWithSize ( ) ) <nl> + { <nl> + _richText - > ignoreContentAdaptWithSize ( false ) ; <nl> + _richText - > setContentSize ( Size ( 100 , 100 ) ) ; <nl> + } <nl> + else <nl> + { <nl> + _richText - > ignoreContentAdaptWithSize ( true ) ; <nl> + } <nl> + } <nl> + break ; <nl> + <nl> + default : <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + void UIRichTextXMLExtend : : switchWrapMode ( Ref * pSender , Widget : : TouchEventType type ) <nl> + { <nl> + if ( type = = Widget : : TouchEventType : : ENDED ) <nl> + { <nl> + auto wrapMode = _richText - > getWrapMode ( ) ; <nl> + wrapMode = ( wrapMode = = RichText : : WRAP_PER_WORD ) ? RichText : : WRAP_PER_CHAR : RichText : : WRAP_PER_WORD ; <nl> + _richText - > setWrapMode ( wrapMode ) ; <nl> + } <nl> + } <nl> mmm a / tests / cpp - tests / Classes / UITest / CocoStudioGUITest / UIRichTextTest / UIRichTextTest . h <nl> ppp b / tests / cpp - tests / Classes / UITest / CocoStudioGUITest / UIRichTextTest / UIRichTextTest . h <nl> class UIRichTextXMLUrl : public UIScene <nl> cocos2d : : ui : : RichText * _richText ; <nl> } ; <nl> <nl> + class UIRichTextXMLUrlImg : public UIScene <nl> + { <nl> + public : <nl> + CREATE_FUNC ( UIRichTextXMLUrlImg ) ; <nl> + <nl> + bool init ( ) override ; <nl> + void touchEvent ( cocos2d : : Ref * sender , cocos2d : : ui : : Widget : : TouchEventType type ) ; <nl> + void switchWrapMode ( cocos2d : : Ref * sender , cocos2d : : ui : : Widget : : TouchEventType type ) ; <nl> + <nl> + protected : <nl> + cocos2d : : ui : : RichText * _richText ; <nl> + } ; <nl> + <nl> class UIRichTextXMLFace : public UIScene <nl> { <nl> public : <nl> class UIRichTextXMLInvalid : public UIScene <nl> cocos2d : : ui : : RichText * _richText ; <nl> } ; <nl> <nl> + class UIRichTextXMLOutline : public UIScene <nl> + { <nl> + public : <nl> + CREATE_FUNC ( UIRichTextXMLOutline ) ; <nl> + <nl> + bool init ( ) override ; <nl> + void touchEvent ( cocos2d : : Ref * sender , cocos2d : : ui : : Widget : : TouchEventType type ) ; <nl> + void switchWrapMode ( cocos2d : : Ref * sender , cocos2d : : ui : : Widget : : TouchEventType type ) ; <nl> + <nl> + protected : <nl> + cocos2d : : ui : : RichText * _richText ; <nl> + } ; <nl> <nl> + class UIRichTextXMLShadow : public UIScene <nl> + { <nl> + public : <nl> + CREATE_FUNC ( UIRichTextXMLShadow ) ; <nl> + <nl> + bool init ( ) override ; <nl> + void touchEvent ( cocos2d : : Ref * sender , cocos2d : : ui : : Widget : : TouchEventType type ) ; <nl> + void switchWrapMode ( cocos2d : : Ref * sender , cocos2d : : ui : : Widget : : TouchEventType type ) ; <nl> + <nl> + protected : <nl> + cocos2d : : ui : : RichText * _richText ; <nl> + } ; <nl> + <nl> + class UIRichTextXMLGlow : public UIScene <nl> + { <nl> + public : <nl> + CREATE_FUNC ( UIRichTextXMLGlow ) ; <nl> + <nl> + bool init ( ) override ; <nl> + void touchEvent ( cocos2d : : Ref * sender , cocos2d : : ui : : Widget : : TouchEventType type ) ; <nl> + void switchWrapMode ( cocos2d : : Ref * sender , cocos2d : : ui : : Widget : : TouchEventType type ) ; <nl> + <nl> + protected : <nl> + cocos2d : : ui : : RichText * _richText ; <nl> + } ; <nl> + <nl> + class UIRichTextXMLExtend : public UIScene <nl> + { <nl> + public : <nl> + CREATE_FUNC ( UIRichTextXMLExtend ) ; <nl> + <nl> + bool init ( ) override ; <nl> + void touchEvent ( cocos2d : : Ref * sender , cocos2d : : ui : : Widget : : TouchEventType type ) ; <nl> + void switchWrapMode ( cocos2d : : Ref * sender , cocos2d : : ui : : Widget : : TouchEventType type ) ; <nl> + <nl> + protected : <nl> + cocos2d : : ui : : RichText * _richText ; <nl> + } ; <nl> <nl> # endif / * defined ( __TestCpp__UIRichTextTest__ ) * / <nl> mmm a / tools / tojs / cocos2dx_ui . ini <nl> ppp b / tools / tojs / cocos2dx_ui . ini <nl> headers = % ( cocosdir ) s / cocos / ui / CocosGUI . h % ( cocosdir ) s / cocos / ui / UIScrollViewBar <nl> <nl> # what classes to produce code for . You can use regular expressions here . When testing the regular <nl> # expression , it will be enclosed in " ^ $ " , like this : " ^ Menu * $ " . <nl> - classes = Helper Layout Widget Button CheckBox ImageView Text TextAtlas TextBMFont RichText RichElement RichElementText RichElementImage RichElementCustomNode LoadingBar Slider TextField UICCTextField ScrollView ScrollViewBar PageView ListView LayoutParameter LinearLayoutParameter RelativeLayoutParameter VideoPlayer HBox VBox RelativeBox Scale9Sprite EditBox $ LayoutComponent RadioButtonGroup RadioButton AbstractCheckButton TabControl TabHeader <nl> + classes = Helper Layout Widget Button CheckBox ImageView Text TextAtlas TextBMFont RichText RichElement RichElementText RichElementImage RichElementCustomNode RichElementNewLine LoadingBar Slider TextField UICCTextField ScrollView ScrollViewBar PageView ListView LayoutParameter LinearLayoutParameter RelativeLayoutParameter VideoPlayer HBox VBox RelativeBox Scale9Sprite EditBox $ LayoutComponent RadioButtonGroup RadioButton AbstractCheckButton TabControl TabHeader <nl> <nl> - classes_need_extend = Layout Widget Button CheckBox ImageView Text TextAtlas TextBMFont RichText RichElement RichElementText RichElementImage RichElementCustomNode LoadingBar Slider TextField ScrollView ScrollViewBar PageView ListView VideoPlayer HBox VBox RelativeBox Scale9Sprite EditBox $ LayoutComponent RadioButtonGroup RadioButton AbstractCheckButton TabControl TabHeader <nl> + classes_need_extend = Layout Widget Button CheckBox ImageView Text TextAtlas TextBMFont RichText RichElement RichElementText RichElementImage RichElementCustomNode RichElementNewLine LoadingBar Slider TextField ScrollView ScrollViewBar PageView ListView VideoPlayer HBox VBox RelativeBox Scale9Sprite EditBox $ LayoutComponent RadioButtonGroup RadioButton AbstractCheckButton TabControl TabHeader <nl> <nl> # what should we skip ? in the format ClassName : : [ function function ] <nl> # ClassName is a regular expression , but will be used like this : " ^ ClassName $ " functions are also <nl> skip = * : : [ ^ visit $ copyWith . * onEnter . * onExit . * ^ description $ getObjectType . * H <nl> Layer : : [ getInputManager ] , <nl> LayoutParameter : : [ ( s | g ) etMargin ] , <nl> ImageView : : [ doubleClickEvent checkDoubleClick ] , <nl> - RichText : : [ getVirtualRendererSize ] , <nl> + RichText : : [ getVirtualRendererSize setTagDescription removeTagDescription setOpenUrlHandler ] , <nl> EditBox : : [ ( g | s ) etDelegate ^ keyboard . * touchDownAction getScriptEditBoxHandler registerScriptEditBoxHandler unregisterScriptEditBoxHandler ] <nl> <nl> rename_functions = Widget : : [ init = _init ] , <nl> mmm a / tools / tolua / cocos2dx_ui . ini <nl> ppp b / tools / tolua / cocos2dx_ui . ini <nl> classes = Helper Widget Layout Button CheckBox ImageView Text TextAtlas TextBMFo <nl> <nl> skip = * : : [ ^ visit $ copyWith . * onEnter . * onExit . * ^ description $ getObjectType . * HSV onTouch . * onAcc . * onKey . * onRegisterTouchListener ccTouch . * ( g | s ) etDelegate ] , <nl> Widget : : [ addTouchEventListener addClickEventListener addCCSEventListener ] , <nl> - LayoutParameter : : [ ( s | g ) etMargin ] <nl> + LayoutParameter : : [ ( s | g ) etMargin ] , <nl> + RichText : : [ setTagDescription removeTagDescription setOpenUrlHandler ] <nl> <nl> rename_functions = <nl> <nl>
Merge pull request from m - yukio / feature / richtext_xml_refactor
cocos2d/cocos2d-x
b8eae430fbdef5efa4226646c646009135d58e75
2016-04-21T03:05:29Z
mmm a / tensorflow / compiler / mlir / tensorflow / ir / tf_generated_ops . td <nl> ppp b / tensorflow / compiler / mlir / tensorflow / ir / tf_generated_ops . td <nl> subsequent operation and then be optimized away , however . ) <nl> } ] ; <nl> } <nl> <nl> + def TF_BucketizeOp : TF_Op < " Bucketize " , [ NoSideEffect , SameOperandsAndResultShape ] > { <nl> + let summary = " Bucketizes ' input ' based on ' boundaries ' . " ; <nl> + <nl> + let description = [ { <nl> + For example , if the inputs are <nl> + boundaries = [ 0 , 10 , 100 ] <nl> + input = [ [ - 5 , 10000 ] <nl> + [ 150 , 10 ] <nl> + [ 5 , 100 ] ] <nl> + <nl> + then the output will be <nl> + output = [ [ 0 , 3 ] <nl> + [ 3 , 2 ] <nl> + [ 1 , 3 ] ] <nl> + } ] ; <nl> + <nl> + let arguments = ( ins <nl> + TensorOf < [ F32 , F64 , I32 , I64 ] > : $ input , <nl> + <nl> + F32ArrayAttr : $ boundaries <nl> + ) ; <nl> + <nl> + let results = ( outs <nl> + I32Tensor : $ output <nl> + ) ; <nl> + <nl> + TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> + } <nl> + <nl> def TF_CaseOp : TF_Op < " Case " , [ ] > { <nl> let summary = [ { <nl> An n - way switch statement which calls a single branch function . <nl> convolutional neural networks ( NIPS 2012 ) ] ( http : / / papers . nips . cc / paper / 4824 - imag <nl> TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> } <nl> <nl> + def TF_LRNGradOp : TF_Op < " LRNGrad " , [ NoSideEffect ] > { <nl> + let summary = " Gradients for Local Response Normalization . " ; <nl> + <nl> + let description = [ { <nl> + } ] ; <nl> + <nl> + let arguments = ( ins <nl> + TensorOf < [ BF16 , F16 , F32 ] > : $ input_grads , <nl> + TensorOf < [ BF16 , F16 , F32 ] > : $ input_image , <nl> + TensorOf < [ BF16 , F16 , F32 ] > : $ output_image , <nl> + <nl> + DefaultValuedAttr < I64Attr , " 5 " > : $ depth_radius , <nl> + DefaultValuedAttr < F32Attr , " 1 . 0f " > : $ bias , <nl> + DefaultValuedAttr < F32Attr , " 1 . 0f " > : $ alpha , <nl> + DefaultValuedAttr < F32Attr , " 0 . 5f " > : $ beta <nl> + ) ; <nl> + <nl> + let results = ( outs <nl> + TensorOf < [ BF16 , F16 , F32 ] > : $ output <nl> + ) ; <nl> + <nl> + TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> + } <nl> + <nl> def TF_LeakyReluOp : TF_Op < " LeakyRelu " , [ NoSideEffect , SameOperandsAndResultType ] > { <nl> let summary = " Computes rectified linear : ` max ( features , features * alpha ) ` . " ; <nl> <nl>
Auto generate TensorFlow Bucketize and LRNGrad ops
tensorflow/tensorflow
3862d97f8586f3caf3856834948dc9cc7d00ff92
2020-06-13T01:12:57Z
diff - - git a / Dynamic Programming / Edit Distance . cpp b / Dynamic Programming / Edit Distance . cpp <nl> new file mode 100644 <nl> index 0000000000 . . d99d03c5a1 <nl> mmm / dev / null <nl> ppp b / Dynamic Programming / Edit Distance . cpp <nl> <nl> + / * Given two strings str1 & str2 <nl> + * and below operations that can <nl> + * be performed on str1 . Find <nl> + * minimum number of edits <nl> + * ( operations ) required to convert <nl> + * ' str1 ' into ' str2 ' / <nl> + * a . Insert <nl> + * b . Remove <nl> + * c . Replace <nl> + * All of the above operations are <nl> + * of equal cost <nl> + * / <nl> + <nl> + # include < iostream > <nl> + # include < string > <nl> + using namespace std ; <nl> + <nl> + int min ( int x , int y , int z ) { <nl> + return min ( min ( x , y ) , z ) ; <nl> + } <nl> + <nl> + / * A Naive recursive C + + program to find <nl> + * minimum number of operations to convert <nl> + * str1 to str2 . <nl> + * O ( 3 ^ m ) <nl> + * / <nl> + int editDist ( string str1 , string str2 , int m , int n ) { <nl> + if ( m = = 0 ) return n ; <nl> + if ( n = = 0 ) return m ; <nl> + <nl> + / / If last characters are same then continue <nl> + / / for the rest of them . <nl> + if ( str1 [ m - 1 ] = = str2 [ n - 2 ] ) <nl> + return editDist ( str1 , str2 , m - 1 , n - 1 ) ; <nl> + <nl> + / / If last not same , then 3 possibilities <nl> + / / a . Insert b . Remove c . Replace <nl> + / / Get min of three and continue for rest . <nl> + return 1 + min ( editDist ( str1 , str2 , m , n - 1 ) , <nl> + editDist ( str1 , str2 , m - 1 , n ) , <nl> + editDist ( str1 , str2 , m - 1 , n - 1 ) <nl> + ) ; <nl> + } <nl> + <nl> + / * A DP based program <nl> + * O ( m x n ) <nl> + * / <nl> + int editDistDP ( string str1 , string str2 , int m , int n ) { <nl> + <nl> + / / Create Table for SubProblems <nl> + int dp [ m + 1 ] [ n + 1 ] ; <nl> + <nl> + / / Fill d [ ] [ ] in bottom up manner <nl> + for ( int i = 0 ; i < = m ; i + + ) { <nl> + for ( int j = 0 ; j < = n ; j + + ) { <nl> + / / If str1 empty . Then add all of str2 <nl> + if ( i = = 0 ) <nl> + dp [ i ] [ j ] = j ; <nl> + <nl> + / / If str2 empty . Then add all of str1 <nl> + else if ( j = = 0 ) <nl> + dp [ i ] [ j ] = i ; <nl> + <nl> + / / If character same . Recur for remaining <nl> + else if ( str1 [ i - 1 ] = = str2 [ j - 1 ] ) <nl> + dp [ i ] [ j ] = = dp [ i - 1 ] [ j - 1 ] ; <nl> + <nl> + else <nl> + dp [ i ] [ j ] = 1 + min ( dp [ i ] [ j - 1 ] , / / Insert <nl> + dp [ i - 1 ] [ j ] , / / Remove <nl> + dp [ i - 1 ] [ j - 1 ] / / Replace <nl> + ) ; <nl> + } <nl> + } <nl> + <nl> + return dp [ m ] [ n ] ; <nl> + } <nl> + <nl> + int main ( ) { <nl> + string str1 = " sunday " ; <nl> + string str2 = " saturday " ; <nl> + <nl> + cout < < editDist ( str1 , str1 , str1 . length ( ) , str2 . length ( ) ) < < endl ; <nl> + cout < < editDistDP ( str1 , str1 , str1 . length ( ) , str2 . length ( ) ) < < endl ; <nl> + <nl> + return 0 ; <nl> + } <nl>
Edit Distance Algorithm
TheAlgorithms/C-Plus-Plus
85dc1cbbe13ca54c5fe3d81640102a52eb0df0e7
2017-10-28T10:06:20Z
mmm a / src / compiler / ruby_generator . cc <nl> ppp b / src / compiler / ruby_generator . cc <nl> <nl> * / <nl> <nl> # include < cctype > <nl> - # include < string > <nl> # include < map > <nl> # include < vector > <nl> <nl> + # include " src / compiler / config . h " <nl> # include " src / compiler / ruby_generator . h " <nl> # include " src / compiler / ruby_generator_helpers - inl . h " <nl> # include " src / compiler / ruby_generator_map - inl . h " <nl> # include " src / compiler / ruby_generator_string - inl . h " <nl> - # include < google / protobuf / io / printer . h > <nl> - # include < google / protobuf / io / zero_copy_stream_impl_lite . h > <nl> - # include < google / protobuf / descriptor . pb . h > <nl> - # include < google / protobuf / descriptor . h > <nl> - <nl> - using google : : protobuf : : FileDescriptor ; <nl> - using google : : protobuf : : ServiceDescriptor ; <nl> - using google : : protobuf : : MethodDescriptor ; <nl> - using google : : protobuf : : io : : Printer ; <nl> - using google : : protobuf : : io : : StringOutputStream ; <nl> + <nl> + using grpc : : protobuf : : FileDescriptor ; <nl> + using grpc : : protobuf : : ServiceDescriptor ; <nl> + using grpc : : protobuf : : MethodDescriptor ; <nl> + using grpc : : protobuf : : io : : Printer ; <nl> + using grpc : : protobuf : : io : : StringOutputStream ; <nl> using std : : map ; <nl> using std : : vector ; <nl> <nl> namespace grpc_ruby_generator { <nl> namespace { <nl> <nl> / / Prints out the method using the ruby gRPC DSL . <nl> - void PrintMethod ( const MethodDescriptor * method , const std : : string & package , <nl> + void PrintMethod ( const MethodDescriptor * method , const grpc : : string & package , <nl> Printer * out ) { <nl> - std : : string input_type = RubyTypeOf ( method - > input_type ( ) - > name ( ) , package ) ; <nl> + grpc : : string input_type = RubyTypeOf ( method - > input_type ( ) - > name ( ) , package ) ; <nl> if ( method - > client_streaming ( ) ) { <nl> input_type = " stream ( " + input_type + " ) " ; <nl> } <nl> - std : : string output_type = RubyTypeOf ( method - > output_type ( ) - > name ( ) , package ) ; <nl> + grpc : : string output_type = RubyTypeOf ( method - > output_type ( ) - > name ( ) , package ) ; <nl> if ( method - > server_streaming ( ) ) { <nl> output_type = " stream ( " + output_type + " ) " ; <nl> } <nl> - std : : map < std : : string , std : : string > method_vars = <nl> + std : : map < grpc : : string , grpc : : string > method_vars = <nl> ListToDict ( { " mth . name " , method - > name ( ) , " input . type " , input_type , <nl> " output . type " , output_type , } ) ; <nl> out - > Print ( method_vars , " rpc : $ mth . name $ , $ input . type $ , $ output . type $ \ n " ) ; <nl> } <nl> <nl> / / Prints out the service using the ruby gRPC DSL . <nl> - void PrintService ( const ServiceDescriptor * service , const std : : string & package , <nl> + void PrintService ( const ServiceDescriptor * service , const grpc : : string & package , <nl> Printer * out ) { <nl> if ( service - > method_count ( ) = = 0 ) { <nl> return ; <nl> } <nl> <nl> / / Begin the service module <nl> - std : : map < std : : string , std : : string > module_vars = <nl> + std : : map < grpc : : string , grpc : : string > module_vars = <nl> ListToDict ( { " module . name " , CapitalizeFirst ( service - > name ( ) ) , } ) ; <nl> out - > Print ( module_vars , " module $ module . name $ \ n " ) ; <nl> out - > Indent ( ) ; <nl> <nl> / / TODO ( temiola ) : add documentation <nl> - std : : string doc = " TODO : add proto service documentation here " ; <nl> - std : : map < std : : string , std : : string > template_vars = <nl> + grpc : : string doc = " TODO : add proto service documentation here " ; <nl> + std : : map < grpc : : string , grpc : : string > template_vars = <nl> ListToDict ( { " Documentation " , doc , } ) ; <nl> out - > Print ( " \ n " ) ; <nl> out - > Print ( template_vars , " # $ Documentation $ \ n " ) ; <nl> void PrintService ( const ServiceDescriptor * service , const std : : string & package , <nl> out - > Print ( " \ n " ) ; <nl> out - > Print ( " self . marshal_class_method = : encode \ n " ) ; <nl> out - > Print ( " self . unmarshal_class_method = : decode \ n " ) ; <nl> - std : : map < std : : string , std : : string > pkg_vars = <nl> + std : : map < grpc : : string , grpc : : string > pkg_vars = <nl> ListToDict ( { " service . name " , service - > name ( ) , " pkg . name " , package , } ) ; <nl> out - > Print ( pkg_vars , " self . service_name = ' $ pkg . name $ . $ service . name $ ' \ n " ) ; <nl> out - > Print ( " \ n " ) ; <nl> void PrintService ( const ServiceDescriptor * service , const std : : string & package , <nl> <nl> } / / namespace <nl> <nl> - std : : string GetServices ( const FileDescriptor * file ) { <nl> - std : : string output ; <nl> + grpc : : string GetServices ( const FileDescriptor * file ) { <nl> + grpc : : string output ; <nl> StringOutputStream output_stream ( & output ) ; <nl> Printer out ( & output_stream , ' $ ' ) ; <nl> <nl> std : : string GetServices ( const FileDescriptor * file ) { <nl> } <nl> <nl> / / Write out a file header . <nl> - std : : map < std : : string , std : : string > header_comment_vars = ListToDict ( <nl> + std : : map < grpc : : string , grpc : : string > header_comment_vars = ListToDict ( <nl> { " file . name " , file - > name ( ) , " file . package " , file - > package ( ) , } ) ; <nl> out . Print ( " # Generated by the protocol buffer compiler . DO NOT EDIT ! \ n " ) ; <nl> out . Print ( header_comment_vars , <nl> std : : string GetServices ( const FileDescriptor * file ) { <nl> / / Write out require statemment to import the separately generated file <nl> / / that defines the messages used by the service . This is generated by the <nl> / / main ruby plugin . <nl> - std : : map < std : : string , std : : string > dep_vars = <nl> + std : : map < grpc : : string , grpc : : string > dep_vars = <nl> ListToDict ( { " dep . name " , MessagesRequireName ( file ) , } ) ; <nl> out . Print ( dep_vars , " require ' $ dep . name $ ' \ n " ) ; <nl> <nl> / / Write out services within the modules <nl> out . Print ( " \ n " ) ; <nl> - std : : vector < std : : string > modules = Split ( file - > package ( ) , ' . ' ) ; <nl> + std : : vector < grpc : : string > modules = Split ( file - > package ( ) , ' . ' ) ; <nl> for ( size_t i = 0 ; i < modules . size ( ) ; + + i ) { <nl> - std : : map < std : : string , std : : string > module_vars = <nl> + std : : map < grpc : : string , grpc : : string > module_vars = <nl> ListToDict ( { " module . name " , CapitalizeFirst ( modules [ i ] ) , } ) ; <nl> out . Print ( module_vars , " module $ module . name $ \ n " ) ; <nl> out . Indent ( ) ; <nl> mmm a / src / compiler / ruby_generator . h <nl> ppp b / src / compiler / ruby_generator . h <nl> <nl> # ifndef GRPC_INTERNAL_COMPILER_RUBY_GENERATOR_H <nl> # define GRPC_INTERNAL_COMPILER_RUBY_GENERATOR_H <nl> <nl> - # include < string > <nl> - <nl> - namespace google { <nl> - namespace protobuf { <nl> - class FileDescriptor ; <nl> - } / / namespace protobuf <nl> - } / / namespace google <nl> + # include " src / compiler / config . h " <nl> <nl> namespace grpc_ruby_generator { <nl> <nl> - std : : string GetServices ( const google : : protobuf : : FileDescriptor * file ) ; <nl> + grpc : : string GetServices ( const grpc : : protobuf : : FileDescriptor * file ) ; <nl> <nl> } / / namespace grpc_ruby_generator <nl> <nl> mmm a / src / compiler / ruby_generator_helpers - inl . h <nl> ppp b / src / compiler / ruby_generator_helpers - inl . h <nl> <nl> # ifndef GRPC_INTERNAL_COMPILER_RUBY_GENERATOR_HELPERS_INL_H <nl> # define GRPC_INTERNAL_COMPILER_RUBY_GENERATOR_HELPERS_INL_H <nl> <nl> - # include < string > <nl> - <nl> - # include < google / protobuf / descriptor . h > <nl> + # include " src / compiler / config . h " <nl> # include " src / compiler / ruby_generator_string - inl . h " <nl> <nl> namespace grpc_ruby_generator { <nl> <nl> - inline bool ServicesFilename ( const google : : protobuf : : FileDescriptor * file , <nl> - std : : string * file_name_or_error ) { <nl> + inline bool ServicesFilename ( const grpc : : protobuf : : FileDescriptor * file , <nl> + grpc : : string * file_name_or_error ) { <nl> / / Get output file name . <nl> static const unsigned proto_suffix_length = 6 ; / / length of " . proto " <nl> if ( file - > name ( ) . size ( ) > proto_suffix_length & & <nl> inline bool ServicesFilename ( const google : : protobuf : : FileDescriptor * file , <nl> } <nl> } <nl> <nl> - inline std : : string MessagesRequireName ( <nl> - const google : : protobuf : : FileDescriptor * file ) { <nl> + inline grpc : : string MessagesRequireName ( <nl> + const grpc : : protobuf : : FileDescriptor * file ) { <nl> return Replace ( file - > name ( ) , " . proto " , " " ) ; <nl> } <nl> <nl> mmm a / src / compiler / ruby_generator_map - inl . h <nl> ppp b / src / compiler / ruby_generator_map - inl . h <nl> <nl> # ifndef GRPC_INTERNAL_COMPILER_RUBY_GENERATOR_MAP_INL_H <nl> # define GRPC_INTERNAL_COMPILER_RUBY_GENERATOR_MAP_INL_H <nl> <nl> + # include " src / compiler / config . h " <nl> + <nl> # include < iostream > <nl> # include < initializer_list > <nl> # include < map > <nl> # include < ostream > / / NOLINT <nl> - # include < string > <nl> # include < vector > <nl> <nl> using std : : initializer_list ; <nl> namespace grpc_ruby_generator { <nl> <nl> / / Converts an initializer list of the form { key0 , value0 , key1 , value1 , . . . } <nl> / / into a map of key * to value * . Is merely a readability helper for later code . <nl> - inline std : : map < std : : string , std : : string > ListToDict ( <nl> - const initializer_list < std : : string > & values ) { <nl> + inline std : : map < grpc : : string , grpc : : string > ListToDict ( <nl> + const initializer_list < grpc : : string > & values ) { <nl> if ( values . size ( ) % 2 ! = 0 ) { <nl> std : : cerr < < " Not every ' key ' has a value in ` values ` . " <nl> < < std : : endl ; <nl> } <nl> - std : : map < std : : string , std : : string > value_map ; <nl> + std : : map < grpc : : string , grpc : : string > value_map ; <nl> auto value_iter = values . begin ( ) ; <nl> for ( unsigned i = 0 ; i < values . size ( ) / 2 ; + + i ) { <nl> - std : : string key = * value_iter ; <nl> + grpc : : string key = * value_iter ; <nl> + + value_iter ; <nl> - std : : string value = * value_iter ; <nl> + grpc : : string value = * value_iter ; <nl> value_map [ key ] = value ; <nl> + + value_iter ; <nl> } <nl> mmm a / src / compiler / ruby_generator_string - inl . h <nl> ppp b / src / compiler / ruby_generator_string - inl . h <nl> <nl> # ifndef GRPC_INTERNAL_COMPILER_RUBY_GENERATOR_STRING_INL_H <nl> # define GRPC_INTERNAL_COMPILER_RUBY_GENERATOR_STRING_INL_H <nl> <nl> + # include " src / compiler / config . h " <nl> + <nl> # include < algorithm > <nl> - # include < string > <nl> # include < sstream > <nl> # include < vector > <nl> <nl> using std : : transform ; <nl> namespace grpc_ruby_generator { <nl> <nl> / / Split splits a string using char into elems . <nl> - inline std : : vector < std : : string > & Split ( const std : : string & s , char delim , <nl> - std : : vector < std : : string > * elems ) { <nl> + inline std : : vector < grpc : : string > & Split ( const grpc : : string & s , char delim , <nl> + std : : vector < grpc : : string > * elems ) { <nl> std : : stringstream ss ( s ) ; <nl> - std : : string item ; <nl> + grpc : : string item ; <nl> while ( getline ( ss , item , delim ) ) { <nl> elems - > push_back ( item ) ; <nl> } <nl> inline std : : vector < std : : string > & Split ( const std : : string & s , char delim , <nl> } <nl> <nl> / / Split splits a string using char , returning the result in a vector . <nl> - inline std : : vector < std : : string > Split ( const std : : string & s , char delim ) { <nl> - std : : vector < std : : string > elems ; <nl> + inline std : : vector < grpc : : string > Split ( const grpc : : string & s , char delim ) { <nl> + std : : vector < grpc : : string > elems ; <nl> Split ( s , delim , & elems ) ; <nl> return elems ; <nl> } <nl> <nl> / / Replace replaces from with to in s . <nl> - inline std : : string Replace ( std : : string s , const std : : string & from , <nl> - const std : : string & to ) { <nl> + inline grpc : : string Replace ( grpc : : string s , const grpc : : string & from , <nl> + const grpc : : string & to ) { <nl> size_t start_pos = s . find ( from ) ; <nl> - if ( start_pos = = std : : string : : npos ) { <nl> + if ( start_pos = = grpc : : string : : npos ) { <nl> return s ; <nl> } <nl> s . replace ( start_pos , from . length ( ) , to ) ; <nl> inline std : : string Replace ( std : : string s , const std : : string & from , <nl> } <nl> <nl> / / ReplaceAll replaces all instances of search with replace in s . <nl> - inline std : : string ReplaceAll ( std : : string s , const std : : string & search , <nl> - const std : : string & replace ) { <nl> + inline grpc : : string ReplaceAll ( grpc : : string s , const grpc : : string & search , <nl> + const grpc : : string & replace ) { <nl> size_t pos = 0 ; <nl> - while ( ( pos = s . find ( search , pos ) ) ! = std : : string : : npos ) { <nl> + while ( ( pos = s . find ( search , pos ) ) ! = grpc : : string : : npos ) { <nl> s . replace ( pos , search . length ( ) , replace ) ; <nl> pos + = replace . length ( ) ; <nl> } <nl> inline std : : string ReplaceAll ( std : : string s , const std : : string & search , <nl> } <nl> <nl> / / ReplacePrefix replaces from with to in s if search is a prefix of s . <nl> - inline bool ReplacePrefix ( std : : string * s , const std : : string & from , <nl> - const std : : string & to ) { <nl> + inline bool ReplacePrefix ( grpc : : string * s , const grpc : : string & from , <nl> + const grpc : : string & to ) { <nl> size_t start_pos = s - > find ( from ) ; <nl> - if ( start_pos = = std : : string : : npos | | start_pos ! = 0 ) { <nl> + if ( start_pos = = grpc : : string : : npos | | start_pos ! = 0 ) { <nl> return false ; <nl> } <nl> s - > replace ( start_pos , from . length ( ) , to ) ; <nl> inline bool ReplacePrefix ( std : : string * s , const std : : string & from , <nl> } <nl> <nl> / / CapitalizeFirst capitalizes the first char in a string . <nl> - inline std : : string CapitalizeFirst ( std : : string s ) { <nl> + inline grpc : : string CapitalizeFirst ( grpc : : string s ) { <nl> if ( s . empty ( ) ) { <nl> return s ; <nl> } <nl> inline std : : string CapitalizeFirst ( std : : string s ) { <nl> } <nl> <nl> / / RubyTypeOf updates a proto type to the required ruby equivalent . <nl> - inline std : : string RubyTypeOf ( const std : : string & a_type , <nl> - const std : : string & package ) { <nl> - std : : string res ( a_type ) ; <nl> + inline grpc : : string RubyTypeOf ( const grpc : : string & a_type , <nl> + const grpc : : string & package ) { <nl> + grpc : : string res ( a_type ) ; <nl> ReplacePrefix ( & res , package , " " ) ; / / remove the leading package if present <nl> ReplacePrefix ( & res , " . " , " " ) ; / / remove the leading . ( no package ) <nl> - if ( res . find ( ' . ' ) = = std : : string : : npos ) { <nl> + if ( res . find ( ' . ' ) = = grpc : : string : : npos ) { <nl> return res ; <nl> } else { <nl> - std : : vector < std : : string > prefixes_and_type = Split ( res , ' . ' ) ; <nl> + std : : vector < grpc : : string > prefixes_and_type = Split ( res , ' . ' ) ; <nl> for ( unsigned int i = 0 ; i < prefixes_and_type . size ( ) ; + + i ) { <nl> if ( i ! = 0 ) { <nl> res + = " : : " ; / / switch ' . ' to the ruby module delim <nl> mmm a / src / compiler / ruby_plugin . cc <nl> ppp b / src / compiler / ruby_plugin . cc <nl> <nl> * / <nl> <nl> / / Generates Ruby gRPC service interface out of Protobuf IDL . <nl> - / / <nl> - / / This is a Proto2 compiler plugin . See net / proto2 / compiler / proto / plugin . proto <nl> - / / and net / proto2 / compiler / public / plugin . h for more information on plugins . <nl> <nl> # include < memory > <nl> - # include < string > <nl> <nl> + # include " src / compiler / config . h " <nl> # include " src / compiler / ruby_generator . h " <nl> # include " src / compiler / ruby_generator_helpers - inl . h " <nl> - # include < google / protobuf / compiler / code_generator . h > <nl> - # include < google / protobuf / compiler / plugin . h > <nl> - # include < google / protobuf / io / coded_stream . h > <nl> - # include < google / protobuf / io / zero_copy_stream . h > <nl> - # include < google / protobuf / descriptor . h > <nl> <nl> - class RubyGrpcGenerator : public google : : protobuf : : compiler : : CodeGenerator { <nl> + class RubyGrpcGenerator : public grpc : : protobuf : : compiler : : CodeGenerator { <nl> public : <nl> RubyGrpcGenerator ( ) { } <nl> ~ RubyGrpcGenerator ( ) { } <nl> <nl> - bool Generate ( const google : : protobuf : : FileDescriptor * file , <nl> - const std : : string & parameter , <nl> - google : : protobuf : : compiler : : GeneratorContext * context , <nl> - std : : string * error ) const { <nl> - std : : string code = grpc_ruby_generator : : GetServices ( file ) ; <nl> + bool Generate ( const grpc : : protobuf : : FileDescriptor * file , <nl> + const grpc : : string & parameter , <nl> + grpc : : protobuf : : compiler : : GeneratorContext * context , <nl> + grpc : : string * error ) const { <nl> + grpc : : string code = grpc_ruby_generator : : GetServices ( file ) ; <nl> if ( code . size ( ) = = 0 ) { <nl> return true ; / / don ' t generate a file if there are no services <nl> } <nl> <nl> / / Get output file name . <nl> - std : : string file_name ; <nl> + grpc : : string file_name ; <nl> if ( ! grpc_ruby_generator : : ServicesFilename ( file , & file_name ) ) { <nl> return false ; <nl> } <nl> - std : : unique_ptr < google : : protobuf : : io : : ZeroCopyOutputStream > output ( <nl> + std : : unique_ptr < grpc : : protobuf : : io : : ZeroCopyOutputStream > output ( <nl> context - > Open ( file_name ) ) ; <nl> - google : : protobuf : : io : : CodedOutputStream coded_out ( output . get ( ) ) ; <nl> + grpc : : protobuf : : io : : CodedOutputStream coded_out ( output . get ( ) ) ; <nl> coded_out . WriteRaw ( code . data ( ) , code . size ( ) ) ; <nl> return true ; <nl> } <nl> class RubyGrpcGenerator : public google : : protobuf : : compiler : : CodeGenerator { <nl> <nl> int main ( int argc , char * argv [ ] ) { <nl> RubyGrpcGenerator generator ; <nl> - return google : : protobuf : : compiler : : PluginMain ( argc , argv , & generator ) ; <nl> + return grpc : : protobuf : : compiler : : PluginMain ( argc , argv , & generator ) ; <nl> } <nl>
Use grpc : : counterparts in ruby code generator
grpc/grpc
478568e7c9566598445410a8a112eaa02289dc23
2015-03-24T05:09:22Z
mmm a / ports / libvorbis / portfile . cmake <nl> ppp b / ports / libvorbis / portfile . cmake <nl> if ( NOT EXISTS " $ { CURRENT_BUILDTREES_DIR } / src / . git " ) <nl> WORKING_DIRECTORY $ { DOWNLOADS } / vorbis . git <nl> LOGNAME worktree <nl> ) <nl> - message ( STATUS " Patching " ) <nl> - vcpkg_execute_required_process ( <nl> - COMMAND $ { GIT } apply $ { CMAKE_CURRENT_LIST_DIR } / 0001 - Add - vorbisenc . c - to - vorbis - library . patch <nl> - $ { CMAKE_CURRENT_LIST_DIR } / 0002 - Allow - deprecated - functions . patch <nl> - - - ignore - whitespace - - whitespace = fix <nl> - WORKING_DIRECTORY $ { CURRENT_BUILDTREES_DIR } / src <nl> - LOGNAME patch <nl> - ) <nl> endif ( ) <nl> + vcpkg_apply_patches ( SOURCE_PATH $ { CURRENT_BUILDTREES_DIR } / src <nl> + PATCHES <nl> + $ { CMAKE_CURRENT_LIST_DIR } / 0001 - Add - vorbisenc . c - to - vorbis - library . patch <nl> + $ { CMAKE_CURRENT_LIST_DIR } / 0002 - Allow - deprecated - functions . patch <nl> + ) <nl> <nl> file ( TO_NATIVE_PATH " $ { VCPKG_ROOT_DIR } / installed / $ { TARGET_TRIPLET } / include " OGG_INCLUDE ) <nl> file ( TO_NATIVE_PATH " $ { VCPKG_ROOT_DIR } / installed / $ { TARGET_TRIPLET } / lib / ogg . lib " OGG_LIB_REL ) <nl>
[ libvorbis ] Fixup missed commit in a2f3a4b
microsoft/vcpkg
7a080e79163d8e3bdf9eb33b5d761d2d0ca89696
2017-02-02T03:05:55Z
mmm a / browser / api / atom_api_protocol . cc <nl> ppp b / browser / api / atom_api_protocol . cc <nl> <nl> # include " content / public / browser / browser_thread . h " <nl> # include " net / url_request / url_request_context . h " <nl> # include " net / url_request / url_request_context_getter . h " <nl> + # include " net / url_request / url_request_error_job . h " <nl> + # include " net / url_request / url_request_file_job . h " <nl> # include " net / url_request / url_request_job_factory_impl . h " <nl> + # include " net / url_request / url_request_simple_job . h " <nl> # include " vendor / node / src / node . h " <nl> <nl> namespace atom { <nl> net : : URLRequestJobFactoryImpl * GetRequestJobFactory ( ) { <nl> return job_factory ; <nl> } <nl> <nl> + / / Ask JS which type of job it wants , and then delegate corresponding methods . <nl> + class AdapterRequestJob : public net : : URLRequestJob { <nl> + public : <nl> + AdapterRequestJob ( net : : URLRequest * request , <nl> + net : : NetworkDelegate * network_delegate ) <nl> + : URLRequestJob ( request , network_delegate ) { } <nl> + <nl> + protected : <nl> + virtual void Start ( ) OVERRIDE { <nl> + DCHECK ( ! real_job_ ) ; <nl> + real_job_ = new net : : URLRequestErrorJob ( <nl> + request ( ) , network_delegate ( ) , net : : ERR_NOT_IMPLEMENTED ) ; <nl> + real_job_ - > Start ( ) ; <nl> + } <nl> + <nl> + virtual void Kill ( ) OVERRIDE { <nl> + DCHECK ( real_job_ ) ; <nl> + real_job_ - > Kill ( ) ; <nl> + } <nl> + <nl> + virtual bool ReadRawData ( net : : IOBuffer * buf , <nl> + int buf_size , <nl> + int * bytes_read ) OVERRIDE { <nl> + DCHECK ( real_job_ ) ; <nl> + / / The ReadRawData is a protected method . <nl> + switch ( type_ ) { <nl> + case FILE_JOB : <nl> + return static_cast < net : : URLRequestFileJob * > ( real_job_ . get ( ) ) - > <nl> + ReadRawData ( buf , buf_size , bytes_read ) ; <nl> + default : <nl> + return net : : URLRequestJob : : ReadRawData ( buf , buf_size , bytes_read ) ; <nl> + } <nl> + } <nl> + <nl> + virtual bool IsRedirectResponse ( GURL * location , <nl> + int * http_status_code ) OVERRIDE { <nl> + DCHECK ( real_job_ ) ; <nl> + return real_job_ - > IsRedirectResponse ( location , http_status_code ) ; <nl> + } <nl> + <nl> + virtual net : : Filter * SetupFilter ( ) const OVERRIDE { <nl> + DCHECK ( real_job_ ) ; <nl> + return real_job_ - > SetupFilter ( ) ; <nl> + } <nl> + <nl> + virtual bool GetMimeType ( std : : string * mime_type ) const OVERRIDE { <nl> + DCHECK ( real_job_ ) ; <nl> + return real_job_ - > GetMimeType ( mime_type ) ; <nl> + } <nl> + <nl> + virtual bool GetCharset ( std : : string * charset ) OVERRIDE { <nl> + DCHECK ( real_job_ ) ; <nl> + return real_job_ - > GetCharset ( charset ) ; <nl> + } <nl> + <nl> + private : <nl> + enum JOB_TYPE { <nl> + ERROR_JOB , <nl> + STRING_JOB , <nl> + FILE_JOB , <nl> + } ; <nl> + <nl> + void GetJobTypeInUI ( ) { <nl> + } <nl> + <nl> + void CreateJobAndStart ( ) { <nl> + } <nl> + <nl> + scoped_refptr < net : : URLRequestJob > real_job_ ; <nl> + <nl> + JOB_TYPE type_ ; <nl> + <nl> + DISALLOW_COPY_AND_ASSIGN ( AdapterRequestJob ) ; <nl> + } ; <nl> + <nl> + / / Always return the same AdapterRequestJob for all requests , because the <nl> + / / content API needs the ProtocolHandler to return a job immediately , and <nl> + / / getting the real job from the JS requires asynchronous calls , so we have <nl> + / / to create an adapter job first . <nl> class AdapterProtocolHandler <nl> : public net : : URLRequestJobFactory : : ProtocolHandler { <nl> public : <nl> class AdapterProtocolHandler <nl> virtual net : : URLRequestJob * MaybeCreateJob ( <nl> net : : URLRequest * request , <nl> net : : NetworkDelegate * network_delegate ) const OVERRIDE { <nl> - return NULL ; <nl> + return new AdapterRequestJob ( request , network_delegate ) ; <nl> } <nl> <nl> private : <nl>
Return AdapterRequestJob when creating job .
electron/electron
f63661256fe07f18b14ade620f69b47f8487440b
2013-08-24T11:33:23Z
mmm a / src / python / grpcio / tests / _runner . py <nl> ppp b / src / python / grpcio / tests / _runner . py <nl> <nl> - # Copyright 2015 , Google Inc . <nl> + # Copyright 2015 - 2016 , Google Inc . <nl> # All rights reserved . <nl> # <nl> # Redistribution and use in source and binary forms , with or without <nl> class Runner ( object ) : <nl> <nl> def run ( self , suite ) : <nl> " " " See setuptools ' test_runner setup argument for information . " " " <nl> + # only run test cases with id starting with given prefix <nl> + testcase_filter = os . getenv ( ' GPRC_PYTHON_TESTRUNNER_FILTER ' ) <nl> + filtered_cases = [ ] <nl> + for case in _loader . iterate_suite_cases ( suite ) : <nl> + if not testcase_filter or case . id ( ) . startswith ( testcase_filter ) : <nl> + filtered_cases . append ( case ) <nl> + <nl> # Ensure that every test case has no collision with any other test case in <nl> # the augmented results . <nl> augmented_cases = [ AugmentedCase ( case , uuid . uuid4 ( ) ) <nl> - for case in _loader . iterate_suite_cases ( suite ) ] <nl> + for case in filtered_cases ] <nl> case_id_by_case = dict ( ( augmented_case . case , augmented_case . id ) <nl> for augmented_case in augmented_cases ) <nl> result_out = StringIO . StringIO ( ) <nl> new file mode 100644 <nl> index 00000000000 . . 388d040d5ca <nl> mmm / dev / null <nl> ppp b / src / python / grpcio / tests / tests . json <nl> <nl> + [ <nl> + " _base_interface_test . AsyncEasyTest " , <nl> + " _base_interface_test . AsyncPeasyTest " , <nl> + " _base_interface_test . SyncEasyTest " , <nl> + " _base_interface_test . SyncPeasyTest " , <nl> + " _beta_features_test . BetaFeaturesTest " , <nl> + " _beta_features_test . ContextManagementAndLifecycleTest " , <nl> + " _channel_test . ChannelTest " , <nl> + " _connectivity_channel_test . ChannelConnectivityTest " , <nl> + " _core_over_links_base_interface_test . AsyncEasyTest " , <nl> + " _core_over_links_base_interface_test . AsyncPeasyTest " , <nl> + " _core_over_links_base_interface_test . SyncEasyTest " , <nl> + " _core_over_links_base_interface_test . SyncPeasyTest " , <nl> + " _crust_over_core_face_interface_test . DynamicInvokerBlockingInvocationInlineServiceTest " , <nl> + " _crust_over_core_face_interface_test . DynamicInvokerEventInvocationSynchronousEventServiceTest " , <nl> + " _crust_over_core_face_interface_test . DynamicInvokerFutureInvocationAsynchronousEventServiceTest " , <nl> + " _crust_over_core_face_interface_test . GenericInvokerBlockingInvocationInlineServiceTest " , <nl> + " _crust_over_core_face_interface_test . GenericInvokerEventInvocationSynchronousEventServiceTest " , <nl> + " _crust_over_core_face_interface_test . GenericInvokerFutureInvocationAsynchronousEventServiceTest " , <nl> + " _crust_over_core_face_interface_test . MultiCallableInvokerBlockingInvocationInlineServiceTest " , <nl> + " _crust_over_core_face_interface_test . MultiCallableInvokerEventInvocationSynchronousEventServiceTest " , <nl> + " _crust_over_core_face_interface_test . MultiCallableInvokerFutureInvocationAsynchronousEventServiceTest " , <nl> + " _crust_over_core_over_links_face_interface_test . DynamicInvokerBlockingInvocationInlineServiceTest " , <nl> + " _crust_over_core_over_links_face_interface_test . DynamicInvokerEventInvocationSynchronousEventServiceTest " , <nl> + " _crust_over_core_over_links_face_interface_test . DynamicInvokerFutureInvocationAsynchronousEventServiceTest " , <nl> + " _crust_over_core_over_links_face_interface_test . GenericInvokerBlockingInvocationInlineServiceTest " , <nl> + " _crust_over_core_over_links_face_interface_test . GenericInvokerEventInvocationSynchronousEventServiceTest " , <nl> + " _crust_over_core_over_links_face_interface_test . GenericInvokerFutureInvocationAsynchronousEventServiceTest " , <nl> + " _crust_over_core_over_links_face_interface_test . MultiCallableInvokerBlockingInvocationInlineServiceTest " , <nl> + " _crust_over_core_over_links_face_interface_test . MultiCallableInvokerEventInvocationSynchronousEventServiceTest " , <nl> + " _crust_over_core_over_links_face_interface_test . MultiCallableInvokerFutureInvocationAsynchronousEventServiceTest " , <nl> + " _face_interface_test . DynamicInvokerBlockingInvocationInlineServiceTest " , <nl> + " _face_interface_test . DynamicInvokerEventInvocationSynchronousEventServiceTest " , <nl> + " _face_interface_test . DynamicInvokerFutureInvocationAsynchronousEventServiceTest " , <nl> + " _face_interface_test . GenericInvokerBlockingInvocationInlineServiceTest " , <nl> + " _face_interface_test . GenericInvokerEventInvocationSynchronousEventServiceTest " , <nl> + " _face_interface_test . GenericInvokerFutureInvocationAsynchronousEventServiceTest " , <nl> + " _face_interface_test . MultiCallableInvokerBlockingInvocationInlineServiceTest " , <nl> + " _face_interface_test . MultiCallableInvokerEventInvocationSynchronousEventServiceTest " , <nl> + " _face_interface_test . MultiCallableInvokerFutureInvocationAsynchronousEventServiceTest " , <nl> + " _implementations_test . ChannelCredentialsTest " , <nl> + " _insecure_interop_test . InsecureInteropTest " , <nl> + " _intermediary_low_test . CancellationTest " , <nl> + " _intermediary_low_test . EchoTest " , <nl> + " _intermediary_low_test . ExpirationTest " , <nl> + " _intermediary_low_test . LonelyClientTest " , <nl> + " _later_test . LaterTest " , <nl> + " _logging_pool_test . LoggingPoolTest " , <nl> + " _lonely_invocation_link_test . LonelyInvocationLinkTest " , <nl> + " _low_test . HangingServerShutdown " , <nl> + " _low_test . InsecureServerInsecureClient " , <nl> + " _not_found_test . NotFoundTest " , <nl> + " _sanity_test . Sanity " , <nl> + " _secure_interop_test . SecureInteropTest " , <nl> + " _transmission_test . RoundTripTest " , <nl> + " _transmission_test . TransmissionTest " , <nl> + " _utilities_test . ChannelConnectivityTest " , <nl> + " beta_python_plugin_test . PythonPluginTest " , <nl> + " cygrpc_test . InsecureServerInsecureClient " , <nl> + " cygrpc_test . SecureServerSecureClient " , <nl> + " cygrpc_test . TypeSmokeTest " <nl> + ] <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . 2f88fa04122 <nl> mmm / dev / null <nl> ppp b / src / python / grpcio / tests / unit / _sanity / __init__ . py <nl> <nl> + # Copyright 2016 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + <nl> new file mode 100644 <nl> index 00000000000 . . 0a5a715c0e1 <nl> mmm / dev / null <nl> ppp b / src / python / grpcio / tests / unit / _sanity / _sanity_test . py <nl> <nl> + # Copyright 2016 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + import json <nl> + import unittest <nl> + <nl> + import tests <nl> + <nl> + <nl> + class Sanity ( unittest . TestCase ) : <nl> + <nl> + def testTestsJsonUpToDate ( self ) : <nl> + " " " Autodiscovers all test suites and checks that tests . json is up to date " " " <nl> + loader = tests . Loader ( ) <nl> + loader . loadTestsFromNames ( [ ' tests ' ] ) <nl> + test_suite_names = [ <nl> + test_case_class . id ( ) . rsplit ( ' . ' , 1 ) [ 0 ] <nl> + for test_case_class in tests . _loader . iterate_suite_cases ( loader . suite ) ] <nl> + test_suite_names = sorted ( set ( test_suite_names ) ) <nl> + <nl> + with open ( ' src / python / grpcio / tests / tests . json ' ) as tests_json_file : <nl> + tests_json = json . load ( tests_json_file ) <nl> + self . assertListEqual ( test_suite_names , tests_json ) <nl> + <nl> + <nl> + if __name__ = = ' __main__ ' : <nl> + unittest . main ( verbosity = 2 ) <nl> mmm a / tools / run_tests / build_python . sh <nl> ppp b / tools / run_tests / build_python . sh <nl> export GRPC_PYTHON_ENABLE_CYTHON_TRACING = 1 <nl> tox - - notest <nl> <nl> $ ROOT / . tox / py27 / bin / python $ ROOT / setup . py build <nl> + $ ROOT / . tox / py27 / bin / python $ ROOT / setup . py build_py <nl> + $ ROOT / . tox / py27 / bin / python $ ROOT / setup . py gather - - test <nl> mmm a / tools / run_tests / run_python . sh <nl> ppp b / tools / run_tests / run_python . sh <nl> export LDFLAGS = " - L $ ROOT / libs / $ CONFIG " <nl> export GRPC_PYTHON_BUILD_WITH_CYTHON = 1 <nl> export GRPC_PYTHON_ENABLE_CYTHON_TRACING = 1 <nl> <nl> - tox <nl> + if [ " $ CONFIG " = " gcov " ] <nl> + then <nl> + tox <nl> + else <nl> + $ ROOT / . tox / py27 / bin / python $ ROOT / setup . py test <nl> + fi <nl> <nl> mkdir - p $ ROOT / reports <nl> rm - rf $ ROOT / reports / python - coverage <nl> mmm a / tools / run_tests / run_tests . py <nl> ppp b / tools / run_tests / run_tests . py <nl> def configure ( self , config , args ) : <nl> _check_compiler ( self . args . compiler , [ ' default ' ] ) <nl> <nl> def test_specs ( self ) : <nl> + # load list of known test suites <nl> + with open ( ' src / python / grpcio / tests / tests . json ' ) as tests_json_file : <nl> + tests_json = json . load ( tests_json_file ) <nl> environment = dict ( _FORCE_ENVIRON_FOR_WRAPPERS ) <nl> environment [ ' PYVER ' ] = ' 2 . 7 ' <nl> - return [ self . config . job_spec ( <nl> - [ ' tools / run_tests / run_python . sh ' ] , <nl> - None , <nl> - environ = environment , <nl> - shortname = ' py . test ' , <nl> - timeout_seconds = 15 * 60 <nl> - ) ] <nl> + if self . config . build_config ! = ' gcov ' : <nl> + return [ self . config . job_spec ( <nl> + [ ' tools / run_tests / run_python . sh ' ] , <nl> + None , <nl> + environ = dict ( environment . items ( ) + <nl> + [ ( ' GPRC_PYTHON_TESTRUNNER_FILTER ' , suite_name ) ] ) , <nl> + shortname = ' py . test . % s ' % suite_name , <nl> + timeout_seconds = 5 * 60 ) <nl> + for suite_name in tests_json ] <nl> + else : <nl> + return [ self . config . job_spec ( [ ' tools / run_tests / run_python . sh ' ] , <nl> + None , <nl> + environ = environment , <nl> + shortname = ' py . test . coverage ' , <nl> + timeout_seconds = 15 * 60 ) ] <nl> + <nl> <nl> def pre_build_steps ( self ) : <nl> return [ ] <nl>
make python test suites run in parallel
grpc/grpc
072ebaa1537673e100b0a027d3935252da14fccb
2016-03-03T01:46:18Z
mmm a / docs / Arrays . rst <nl> ppp b / docs / Arrays . rst <nl> Bridging Rules and Terminology for all Types <nl> protocol _BridgedToObjectiveC { <nl> typealias _ObjectiveCType : AnyObject <nl> func _bridgeToObjectiveC ( ) - > _ObjectiveCType <nl> - class func _bridgeFromObjectiveC ( _ : _ObjectiveCType ) - > Self <nl> + class func _forceBridgeFromObjectiveC ( _ : _ObjectiveCType ) - > Self <nl> } <nl> <nl> . . Note : : classes and ` ` @ objc ` ` existentials shall not conform to <nl> Bridging Rules and Terminology for all Types <nl> <nl> protocol _ConditionallyBridgedToObjectiveC : _BridgedToObjectiveC { <nl> class func _isBridgedToObjectiveC ( ) - > Bool <nl> - class func _bridgeFromObjectiveCConditional ( _ : _ObjectiveCType ) - > Self ? <nl> + class func _conditionallyBridgeFromObjectiveC ( _ : _ObjectiveCType ) - > Self ? <nl> } <nl> <nl> Bridging from , or * bridging back * to , a type ` ` T ` ` conforming to <nl> ` ` _ConditionallyBridgedToObjectiveC ` ` when <nl> ` ` T . _isBridgedToObjectiveC ( ) ` ` is ` ` false ` ` is a user programming <nl> error that may be diagnosed at <nl> - runtime . ` ` _bridgeFromObjectiveCConditional ` ` can be used to attempt <nl> + runtime . ` ` _conditionallyBridgeFromObjectiveC ` ` can be used to attempt <nl> to bridge back , and return ` ` nil ` ` if the entire object cannot be <nl> bridged . <nl> <nl> Bridging Rules and Terminology for all Types <nl> then a value ` ` x ` ` of type ` ` T ` ` is * * bridged * * as <nl> ` ` T . _ObjectiveCType ` ` via ` ` x . _bridgeToObjectiveC ( ) ` ` , and an object <nl> ` ` y ` ` of ` ` T . _ObjectiveCType ` ` is * * bridged back * * to ` ` T ` ` via <nl> - ` ` T . _bridgeFromObjectiveC ( y ) ` ` <nl> + ` ` T . _forceBridgeFromObjectiveC ( y ) ` ` <nl> <nl> - Otherwise , ` ` T ` ` * * does not bridge * * to Objective - C <nl> <nl> mmm a / include / swift / AST / KnownIdentifiers . def <nl> ppp b / include / swift / AST / KnownIdentifiers . def <nl> <nl> # endif <nl> <nl> IDENTIFIER ( atIndexedSubscript ) <nl> - IDENTIFIER_WITH_NAME ( bridgeFromObjectiveC , " _bridgeFromObjectiveC " ) <nl> - IDENTIFIER_WITH_NAME ( bridgeFromObjectiveCConditional , <nl> - " _bridgeFromObjectiveCConditional " ) <nl> + IDENTIFIER_WITH_NAME ( forceBridgeFromObjectiveC , " _forceBridgeFromObjectiveC " ) <nl> + IDENTIFIER_WITH_NAME ( conditionallyBridgeFromObjectiveC , <nl> + " _conditionallyBridgeFromObjectiveC " ) <nl> IDENTIFIER_WITH_NAME ( bridgeToObjectiveC , " _bridgeToObjectiveC " ) <nl> IDENTIFIER ( dealloc ) <nl> IDENTIFIER ( deinit ) <nl> mmm a / include / swift / AST / KnownProtocols . def <nl> ppp b / include / swift / AST / KnownProtocols . def <nl> PROTOCOL ( Equatable ) <nl> PROTOCOL ( Hashable ) <nl> PROTOCOL ( Comparable ) <nl> <nl> - PROTOCOL ( _BridgedToObjectiveCType ) <nl> - PROTOCOL ( _ConditionallyBridgedToObjectiveCType ) <nl> + PROTOCOL ( _ObjectiveCBridgeable ) <nl> <nl> LITERAL_CONVERTIBLE_PROTOCOL ( ArrayLiteralConvertible ) <nl> LITERAL_CONVERTIBLE_PROTOCOL ( BooleanLiteralConvertible ) <nl> mmm a / include / swift / Serialization / ModuleFormat . h <nl> ppp b / include / swift / Serialization / ModuleFormat . h <nl> namespace index_block { <nl> Hashable , <nl> Comparable , <nl> <nl> - _BridgedToObjectiveCType , <nl> - _ConditionallyBridgedToObjectiveCType , <nl> + _ObjectiveCBridgeable <nl> } ; <nl> <nl> using KnownProtocolLayout = BCGenericRecordLayout < <nl> mmm a / lib / Sema / CSApply . cpp <nl> ppp b / lib / Sema / CSApply . cpp <nl> namespace { <nl> <nl> / / Find the _BridgedToObjectiveC protocol . <nl> auto bridgedProto <nl> - = tc . Context . getProtocol ( KnownProtocolKind : : _BridgedToObjectiveCType ) ; <nl> + = tc . Context . getProtocol ( KnownProtocolKind : : _ObjectiveCBridgeable ) ; <nl> <nl> / / Find the conformance of the value type to _BridgedToObjectiveC . <nl> Type valueType = value - > getType ( ) - > getRValueType ( ) ; <nl> namespace { <nl> / / / \ param valueType The value type to which we are bridging . <nl> / / / <nl> / / / \ returns a value of type \ c valueType that stores the bridged result . <nl> - Expr * bridgeFromObjectiveC ( Expr * object , Type valueType ) { <nl> + Expr * forceBridgeFromObjectiveC ( Expr * object , Type valueType ) { <nl> auto & tc = cs . getTypeChecker ( ) ; <nl> <nl> / / Find the _BridgedToObjectiveC protocol . <nl> auto bridgedProto <nl> - = tc . Context . getProtocol ( KnownProtocolKind : : _BridgedToObjectiveCType ) ; <nl> + = tc . Context . getProtocol ( KnownProtocolKind : : _ObjectiveCBridgeable ) ; <nl> <nl> / / Find the conformance of the value type to _BridgedToObjectiveC . <nl> ProtocolConformance * conformance = nullptr ; <nl> namespace { <nl> / / Form the call . <nl> return tc . callWitness ( TypeExpr : : createImplicit ( valueType , tc . Context ) , <nl> cs . DC , bridgedProto , conformance , <nl> - tc . Context . Id_bridgeFromObjectiveC , <nl> + tc . Context . Id_forceBridgeFromObjectiveC , <nl> { object } , <nl> diag : : broken_bridged_to_objc_protocol ) ; <nl> } <nl> namespace { <nl> / / / \ param valueType The value type to which we are bridging . <nl> / / / <nl> / / / \ returns a value of type \ c valueType ? that stores the bridged result . <nl> - Expr * bridgeFromObjectiveCConditional ( Expr * object , Type valueType ) { <nl> + Expr * conditionallyBridgeFromObjectiveC ( Expr * object , Type valueType ) { <nl> auto & tc = cs . getTypeChecker ( ) ; <nl> <nl> - / / Find the _ConditionallyBridgedToObjectiveC protocol . <nl> - auto conditionalBridgedProto <nl> - = tc . Context . getProtocol ( <nl> - KnownProtocolKind : : _ConditionallyBridgedToObjectiveCType ) ; <nl> + / / Find the _ObjCBridgeable protocol . <nl> + auto bridgeableProto <nl> + = tc . Context . getProtocol ( KnownProtocolKind : : _ObjectiveCBridgeable ) ; <nl> <nl> - / / Check whether the value type conforms to <nl> - / / _ConditionallyBridgedToObjectiveC . If so , we have a specific <nl> - / / entry point for conditional bridging . <nl> + / / Check whether the value type conforms to _ObjCBridgeable . If <nl> + / / so , we have a specific entry point for conditional bridging . <nl> ProtocolConformance * conditionalConformance = nullptr ; <nl> - if ( tc . conformsToProtocol ( valueType , conditionalBridgedProto , cs . DC , <nl> + if ( tc . conformsToProtocol ( valueType , bridgeableProto , cs . DC , <nl> & conditionalConformance ) ) { <nl> Expr * valueMetatype = TypeExpr : : createImplicit ( valueType , tc . Context ) ; <nl> Expr * args [ 1 ] = { object } ; <nl> - return tc . callWitness ( valueMetatype , cs . DC , conditionalBridgedProto , <nl> + return tc . callWitness ( valueMetatype , cs . DC , bridgeableProto , <nl> conditionalConformance , <nl> - tc . Context . Id_bridgeFromObjectiveCConditional , <nl> + tc . Context . Id_conditionallyBridgeFromObjectiveC , <nl> args , diag : : broken_bridged_to_objc_protocol ) ; <nl> } <nl> <nl> - Expr * result = bridgeFromObjectiveC ( object , valueType ) ; <nl> + Expr * result = forceBridgeFromObjectiveC ( object , valueType ) ; <nl> if ( ! result ) <nl> return nullptr ; <nl> <nl> namespace { <nl> result - > setImplicit ( true ) ; <nl> } <nl> <nl> - result = bridgeFromObjectiveCConditional ( result , destValueType ) ; <nl> + result = conditionallyBridgeFromObjectiveC ( result , destValueType ) ; <nl> if ( ! result ) <nl> return nullptr ; <nl> <nl> namespace { <nl> OptionalType : : get ( destValueType ) ) ; <nl> } <nl> } else { <nl> - result = bridgeFromObjectiveC ( result , destValueType ) ; <nl> + result = forceBridgeFromObjectiveC ( result , destValueType ) ; <nl> if ( ! result ) <nl> return nullptr ; <nl> <nl> Expr * ExprRewriter : : coerceToType ( Expr * expr , Type toType , <nl> } <nl> <nl> case ConversionRestrictionKind : : BridgeFromObjC : <nl> - return bridgeFromObjectiveC ( expr , toType ) ; <nl> + return forceBridgeFromObjectiveC ( expr , toType ) ; <nl> } <nl> } <nl> <nl> mmm a / lib / Sema / TypeCheckConstraints . cpp <nl> ppp b / lib / Sema / TypeCheckConstraints . cpp <nl> CheckedCastKind TypeChecker : : typeCheckCheckedCast ( Type fromType , <nl> return CheckedCastKind : : Unresolved ; <nl> } <nl> <nl> - / / / If the expression is a an implicit call to _bridgeFromObjectiveC or <nl> - / / / _bridgeFromObjectiveCConditional , returns the argument of that call . <nl> + / / / If the expression is a an implicit call to _forceBridgeFromObjectiveC or <nl> + / / / _conditionallyBridgeFromObjectiveC , returns the argument of that call . <nl> static Expr * lookThroughBridgeFromObjCCall ( ASTContext & ctx , Expr * expr ) { <nl> auto call = dyn_cast < CallExpr > ( expr ) ; <nl> if ( ! call | | ! call - > isImplicit ( ) ) <nl> static Expr * lookThroughBridgeFromObjCCall ( ASTContext & ctx , Expr * expr ) { <nl> <nl> auto callee = memberAccess - > getCalledValue ( ) ; <nl> if ( ! callee | | ! callee - > hasName ( ) | | <nl> - ( ! callee - > getFullName ( ) . matchesRef ( ctx . Id_bridgeFromObjectiveC ) & & <nl> + ( ! callee - > getFullName ( ) . matchesRef ( ctx . Id_forceBridgeFromObjectiveC ) & & <nl> ! callee - > getFullName ( ) . matchesRef ( <nl> - ctx . Id_bridgeFromObjectiveCConditional ) ) ) <nl> + ctx . Id_conditionallyBridgeFromObjectiveC ) ) ) <nl> return nullptr ; <nl> <nl> return call - > getArg ( ) ; <nl> ExplicitCastExpr * swift : : findForcedDowncast ( ASTContext & ctx , Expr * expr ) { <nl> isa < UnresolvedCheckedCastExpr > ( sub ) ) <nl> return cast < ExplicitCastExpr > ( sub ) ; <nl> <nl> - / / Otherwise , try to look through an implicit _bridgeFromObjectiveC ( ) call . <nl> + / / Otherwise , try to look through an implicit _forceBridgeFromObjectiveC ( ) call . <nl> if ( auto arg = lookThroughBridgeFromObjCCall ( ctx , sub ) ) { <nl> sub = skipOptionalEvalAndBinds ( arg ) ; <nl> if ( isa < ForcedCheckedCastExpr > ( sub ) | | <nl> mmm a / lib / Sema / TypeCheckType . cpp <nl> ppp b / lib / Sema / TypeCheckType . cpp <nl> Type TypeChecker : : getBridgedToObjC ( const DeclContext * dc , Type type ) { <nl> <nl> / / Retrieve the _BridgedToObjectiveC protocol . <nl> auto bridgedProto <nl> - = Context . getProtocol ( KnownProtocolKind : : _BridgedToObjectiveCType ) ; <nl> + = Context . getProtocol ( KnownProtocolKind : : _ObjectiveCBridgeable ) ; <nl> if ( ! bridgedProto ) <nl> return nullptr ; <nl> <nl> mmm a / stdlib / core / ArrayBridge . swift <nl> ppp b / stdlib / core / ArrayBridge . swift <nl> public func _arrayBridgeFromObjectiveCConditional < Base , BridgesToDerived > ( <nl> ElementwiseBridging : <nl> do { <nl> for object : Base in source { <nl> - let value = Swift . _bridgeFromObjectiveCConditional ( <nl> + let value = Swift . _conditionallyBridgeFromObjectiveC ( <nl> unsafeBitCast ( object , AnyObject . self ) , BridgesToDerived . self ) <nl> if _slowPath ( value = = nil ) { <nl> break ElementwiseBridging <nl> mmm a / stdlib / core / BridgeObjectiveC . swift <nl> ppp b / stdlib / core / BridgeObjectiveC . swift <nl> public func withUnsafePointers < A0 , A1 , A2 , Result > ( <nl> } <nl> <nl> / / / A Swift Array or Dictionary of types conforming to <nl> - / / / ` _BridgedToObjectiveCType ` can be passed to ObjectiveC as an NSArray or <nl> + / / / ` _ObjectiveCBridgeable ` can be passed to ObjectiveC as an NSArray or <nl> / / / NSDictionary , respectively . The elements of the resulting NSArray <nl> / / / or NSDictionary will be the result of calling ` _bridgeToObjectiveC ` <nl> / / / on each elmeent of the source container . <nl> - public protocol _BridgedToObjectiveCType { <nl> + public protocol _ObjectiveCBridgeable { <nl> typealias _ObjectiveCType : AnyObject <nl> <nl> - / / Workaround : right now protocol witness tables don ' t include associated <nl> - / / types , so we can not find ' _ObjectiveCType ' from them . <nl> + / / / Return true iff instances of ` Self ` can be converted to <nl> + / / / Objective - C . Even if this method returns ` true ` , A given <nl> + / / / instance of ` Self . _ObjectiveCType ` may , or may not , convert <nl> + / / / successfully to ` Self ` ; for example , an ` NSArray ` will only <nl> + / / / convert successfully to ` [ String ] ` if it contains only <nl> + / / / ` NSString ` \ s . <nl> + class func _isBridgedToObjectiveC ( ) - > Bool <nl> + <nl> + / / _getObjectiveCType is a workaround : right now protocol witness <nl> + / / tables don ' t include associated types , so we can not find <nl> + / / ' _ObjectiveCType . self ' from them . <nl> + <nl> + / / / Must return ` _ObjectiveCType . self ` . <nl> class func _getObjectiveCType ( ) - > Any . Type <nl> <nl> + / / / Convert ` self ` to Objective - C <nl> func _bridgeToObjectiveC ( ) - > _ObjectiveCType <nl> <nl> / / / Bridge from an Objective - C object of the bridged class type to a <nl> public protocol _BridgedToObjectiveCType { <nl> / / / via as ) , and may defer complete checking until later . For <nl> / / / example , when bridging from NSArray to Array < T > , we can defer <nl> / / / the checking for the individual elements of the array . <nl> - class func _bridgeFromObjectiveC ( source : _ObjectiveCType ) - > Self <nl> - } <nl> - <nl> - / / / Whether a given type conforming to this protocol bridges to <nl> - / / / ObjectiveC is only knowable at runtime . Array < T > is an example ; <nl> - / / / it bridges to ObjectiveC iff T does . <nl> - public protocol _ConditionallyBridgedToObjectiveCType : <nl> - _BridgedToObjectiveCType { <nl> - class func _isBridgedToObjectiveC ( ) - > Bool <nl> + class func _forceBridgeFromObjectiveC ( source : _ObjectiveCType ) - > Self <nl> <nl> / / / Try to bridge from an Objective - C object of the bridged class <nl> / / / type to a value of the Self type . <nl> public protocol _ConditionallyBridgedToObjectiveCType : <nl> / / / <nl> / / / Returns the bridged value if bridging succeeded , nil if bridging <nl> / / / did not succeed . <nl> - class func _bridgeFromObjectiveCConditional ( source : _ObjectiveCType ) - > Self ? <nl> + class func _conditionallyBridgeFromObjectiveC ( <nl> + source : _ObjectiveCType <nl> + ) - > Self ? <nl> } <nl> <nl> / / = = = mmm Bridging facilities written in Objective - C mmmmmmmmmmmmmmmmmmmmm - - = = = / / <nl> public protocol _ConditionallyBridgedToObjectiveCType : <nl> / / / <nl> / / / - If ` T ` is a class type , it is alaways bridged verbatim , the function <nl> / / / returns ` x ` ; <nl> - / / / - otherwise , ` T ` conforms to ` _BridgedToObjectiveCType ` : <nl> - / / / + if ` T ` conforms to ` _ConditionallyBridgedToObjectiveCType ` and <nl> - / / / ` T . _isBridgedToObjectiveC ( ) ` returns ` false ` , then the result is empty ; <nl> + / / / <nl> + / / / - otherwise , ` T ` conforms to ` _ObjectiveCBridgeable ` : <nl> + / / / + if ` T . _isBridgedToObjectiveC ( ) ` returns ` false ` , then the <nl> + / / / result is empty ; <nl> / / / + otherwise , returns the result of ` x . _bridgeToObjectiveC ( ) ` ; <nl> + / / / <nl> / / / - otherwise , the result is empty . <nl> public func _bridgeToObjectiveC < T > ( x : T ) - > AnyObject ? { <nl> if _fastPath ( _isClassOrObjCExistential ( T . self ) ) { <nl> func _bridgeNonVerbatimToObjectiveC < T > ( x : T ) - > AnyObject ? <nl> / / / - If ` T ` is a class type : <nl> / / / - if the dynamic type of ` x ` is ` T ` or a subclass of it , it is bridged <nl> / / / verbatim , the function returns ` x ` ; <nl> - / / / - otherwise , if ` T ` conforms to ` _BridgedToObjectiveCType ` : <nl> + / / / - otherwise , if ` T ` conforms to ` _ObjectiveCBridgeable ` : <nl> / / / + if the dynamic type of ` x ` is not ` T . _getObjectiveCType ( ) ` <nl> / / / or a subclass of it , trap <nl> - / / / + otherwise , returns the result of ` T . _bridgeFromObjectiveC ( x ) ` ; <nl> + / / / + otherwise , returns the result of ` T . _forceBridgeFromObjectiveC ( x ) ` ; <nl> / / / - otherwise , trap <nl> - public func _bridgeFromObjectiveC < T > ( x : AnyObject , _ : T . Type ) - > T { <nl> + public func _forceBridgeFromObjectiveC < T > ( x : AnyObject , _ : T . Type ) - > T { <nl> if _fastPath ( _isClassOrObjCExistential ( T . self ) ) { <nl> return x as T <nl> } <nl> public func _bridgeFromObjectiveC < T > ( x : AnyObject , _ : T . Type ) - > T { <nl> / / / - If ` T ` is a class type : <nl> / / / - if the dynamic type of ` x ` is ` T ` or a subclass of it , it is bridged <nl> / / / verbatim , the function returns ` x ` ; <nl> - / / / - otherwise , if ` T ` conforms to ` _BridgedToObjectiveCType ` : <nl> - / / / + if ` T ` conforms to ` _ConditionallyBridgedToObjectiveCType ` and <nl> - / / / ` T . _isBridgedToObjectiveC ( ) ` returns ` false ` , then the result is empty ; <nl> + / / / - otherwise , if ` T ` conforms to ` _ObjectiveCBridgeable ` : <nl> + / / / + if ` T . _isBridgedToObjectiveC ( ) ` returns ` false ` , then the result is <nl> + / / / empty ; <nl> / / / + otherwise , if the dynamic type of ` x ` is not ` T . _getObjectiveCType ( ) ` <nl> / / / or a subclass of it , the result is empty ; <nl> / / / + otherwise , returns the result of <nl> - / / / ` T . _bridgeFromObjectiveCConditional ( x ) ` ; <nl> + / / / ` T . _conditionallyBridgeFromObjectiveC ( x ) ` ; <nl> / / / - otherwise , the result is empty . <nl> - public func _bridgeFromObjectiveCConditional < T > ( x : AnyObject , _ : T . Type ) - > T ? { <nl> + public func _conditionallyBridgeFromObjectiveC < T > ( <nl> + x : AnyObject , _ : T . Type <nl> + ) - > T ? { <nl> if _fastPath ( _isClassOrObjCExistential ( T . self ) ) { <nl> return x as ? T <nl> } <nl> func _bridgeNonVerbatimFromObjectiveCConditional < T > ( x : AnyObject , <nl> / / / representation . <nl> / / / <nl> / / / - If ` T ` is a class type , returns ` true ` ; <nl> - / / / - otherwise , ` T ` conforms to <nl> - / / / ` _ConditionallyBridgedToObjectiveCType ` , returns <nl> + / / / - otherwise , if ` T ` conforms to ` _ObjectiveCBridgeable ` , returns <nl> / / / ` T . _isBridgedToObjectiveC ( ) ` ; <nl> - / / / - otherwise , if ` T ` conforms to ` _BridgedToObjectiveCType ` , returns ` true ` . <nl> public func _isBridgedToObjectiveC < T > ( _ : T . Type ) - > Bool { <nl> if _fastPath ( _isClassOrObjCExistential ( T . self ) ) { <nl> return true <nl> public func _isBridgedToObjectiveC < T > ( _ : T . Type ) - > Bool { <nl> func _isBridgedNonVerbatimToObjectiveC < T > ( _ : T . Type ) - > Bool <nl> <nl> / / / A type that ' s bridged " verbatim " does not conform to <nl> - / / / ` _BridgedToObjectiveCType ` , and can have its bits reinterpreted as an <nl> + / / / ` _ObjectiveCBridgeable ` , and can have its bits reinterpreted as an <nl> / / / ` AnyObject ` . When this function returns true , the storage of an <nl> / / / ` Array < T > ` can be ` unsafeBitCast ` as an array of ` AnyObject ` . <nl> public func _isBridgedVerbatimToObjectiveC < T > ( _ : T . Type ) - > Bool { <nl> mmm a / stdlib / core / Dictionary . swift <nl> ppp b / stdlib / core / Dictionary . swift <nl> final class _NativeDictionaryStorageOwner < Key : Hashable , Value > <nl> } <nl> <nl> override func bridgingObjectForKey ( aKey : AnyObject , dummy : ( ) ) - > AnyObject ? { <nl> - let nativeKey = _bridgeFromObjectiveC ( aKey , Key . self ) <nl> + let nativeKey = _forceBridgeFromObjectiveC ( aKey , Key . self ) <nl> if let nativeValue = nativeStorage . maybeGet ( nativeKey ) { <nl> return _bridgeToObjectiveCUnconditional ( nativeValue ) <nl> } <nl> enum _VariantDictionaryStorage < Key : Hashable , Value > : <nl> var oldCocoaGenerator = _CocoaDictionaryGenerator ( cocoaDictionary ) <nl> while let ( key : AnyObject , value : AnyObject ) = oldCocoaGenerator . next ( ) { <nl> newNativeStorage . unsafeAddNew ( <nl> - key : _bridgeFromObjectiveC ( key , Key . self ) , <nl> - value : _bridgeFromObjectiveC ( value , Value . self ) ) <nl> + key : _forceBridgeFromObjectiveC ( key , Key . self ) , <nl> + value : _forceBridgeFromObjectiveC ( value , Value . self ) ) <nl> } <nl> newNativeStorage . count = cocoaDictionary . count <nl> <nl> enum _VariantDictionaryStorage < Key : Hashable , Value > : <nl> case . Cocoa ( let cocoaStorage ) : <nl> var ( anyObjectKey : AnyObject , anyObjectValue : AnyObject ) = <nl> cocoaStorage . assertingGet ( i . _cocoaIndex ) <nl> - let nativeKey = _bridgeFromObjectiveC ( anyObjectKey , Key . self ) <nl> - let nativeValue = _bridgeFromObjectiveC ( anyObjectValue , Value . self ) <nl> + let nativeKey = _forceBridgeFromObjectiveC ( anyObjectKey , Key . self ) <nl> + let nativeValue = _forceBridgeFromObjectiveC ( anyObjectValue , Value . self ) <nl> return ( nativeKey , nativeValue ) <nl> } <nl> } <nl> enum _VariantDictionaryStorage < Key : Hashable , Value > : <nl> / / FIXME : This assumes that Key and Value are bridged verbatim . <nl> let anyObjectKey : AnyObject = _bridgeToObjectiveCUnconditional ( key ) <nl> let anyObjectValue : AnyObject = cocoaStorage . assertingGet ( anyObjectKey ) <nl> - return _bridgeFromObjectiveC ( anyObjectValue , Value . self ) <nl> + return _forceBridgeFromObjectiveC ( anyObjectValue , Value . self ) <nl> } <nl> } <nl> <nl> enum _VariantDictionaryStorage < Key : Hashable , Value > : <nl> case . Cocoa ( let cocoaStorage ) : <nl> let anyObjectKey : AnyObject = _bridgeToObjectiveCUnconditional ( key ) <nl> if let anyObjectValue : AnyObject = cocoaStorage . maybeGet ( anyObjectKey ) { <nl> - return _bridgeFromObjectiveC ( anyObjectValue , Value . self ) <nl> + return _forceBridgeFromObjectiveC ( anyObjectValue , Value . self ) <nl> } <nl> return . None <nl> } <nl> enum _VariantDictionaryStorage < Key : Hashable , Value > : <nl> cocoaIndex . allKeys [ cocoaIndex . currentKeyIndex ] <nl> migrateDataToNativeStorage ( cocoaStorage ) <nl> nativeRemoveObjectForKey ( <nl> - _bridgeFromObjectiveC ( anyObjectKey , Key . self ) ) <nl> + _forceBridgeFromObjectiveC ( anyObjectKey , Key . self ) ) <nl> } <nl> } <nl> <nl> public struct DictionaryGenerator < Key : Hashable , Value > : GeneratorType { <nl> case . _Cocoa ( var cocoaGenerator ) : <nl> if let ( anyObjectKey : AnyObject , anyObjectValue : AnyObject ) = <nl> cocoaGenerator . next ( ) { <nl> - let nativeKey = _bridgeFromObjectiveC ( anyObjectKey , Key . self ) <nl> - let nativeValue = _bridgeFromObjectiveC ( anyObjectValue , Value . self ) <nl> + let nativeKey = _forceBridgeFromObjectiveC ( anyObjectKey , Key . self ) <nl> + let nativeValue = _forceBridgeFromObjectiveC ( anyObjectValue , Value . self ) <nl> return ( nativeKey , nativeValue ) <nl> } <nl> return . None <nl> public func = = < Key : Equatable , Value : Equatable > ( <nl> let optRhsValue : AnyObject ? = <nl> rhsCocoa . maybeGet ( _bridgeToObjectiveCUnconditional ( key ) ) <nl> if let rhsValue : AnyObject = optRhsValue { <nl> - if value = = _bridgeFromObjectiveC ( rhsValue , Value . self ) { <nl> + if value = = _forceBridgeFromObjectiveC ( rhsValue , Value . self ) { <nl> continue <nl> } <nl> } <nl> public func _dictionaryBridgeFromObjectiveCConditional < <nl> return nil <nl> } <nl> } else { <nl> - if let bridgedKey = _bridgeFromObjectiveCConditional ( <nl> + if let bridgedKey = _conditionallyBridgeFromObjectiveC ( <nl> _reinterpretCastToAnyObject ( key ) , SwiftKey . self ) { <nl> resultKey = bridgedKey <nl> } else { <nl> public func _dictionaryBridgeFromObjectiveCConditional < <nl> return nil <nl> } <nl> } else { <nl> - if let bridgedValue = _bridgeFromObjectiveCConditional ( <nl> + if let bridgedValue = _conditionallyBridgeFromObjectiveC ( <nl> _reinterpretCastToAnyObject ( value ) , SwiftValue . self ) { <nl> resultValue = bridgedValue <nl> } else { <nl> mmm a / stdlib / core / ImplicitlyUnwrappedOptional . swift <nl> ppp b / stdlib / core / ImplicitlyUnwrappedOptional . swift <nl> func _injectNothingIntoImplicitlyUnwrappedOptional < T > ( ) - > T ! { <nl> return . None <nl> } <nl> <nl> - extension ImplicitlyUnwrappedOptional : _ConditionallyBridgedToObjectiveCType { <nl> + extension ImplicitlyUnwrappedOptional : _ObjectiveCBridgeable { <nl> public static func _getObjectiveCType ( ) - > Any . Type { <nl> return Swift . _getBridgedObjectiveCType ( T . self ) ! <nl> } <nl> extension ImplicitlyUnwrappedOptional : _ConditionallyBridgedToObjectiveCType { <nl> } <nl> } <nl> <nl> - public static func _bridgeFromObjectiveC ( x : AnyObject ) - > T ! { <nl> - return Swift . _bridgeFromObjectiveC ( x , T . self ) <nl> + public static func _forceBridgeFromObjectiveC ( x : AnyObject ) - > T ! { <nl> + return Swift . _forceBridgeFromObjectiveC ( x , T . self ) <nl> } <nl> <nl> - public static func _bridgeFromObjectiveCConditional ( x : AnyObject ) - > T ! ? { <nl> - let bridged : T ? = Swift . _bridgeFromObjectiveCConditional ( x , T . self ) <nl> + public static func _conditionallyBridgeFromObjectiveC ( x : AnyObject ) - > T ! ? { <nl> + let bridged : T ? = Swift . _conditionallyBridgeFromObjectiveC ( x , T . self ) <nl> if let value = bridged { <nl> return value <nl> } <nl> mmm a / stdlib / objc / Foundation / Foundation . swift <nl> ppp b / stdlib / objc / Foundation / Foundation . swift <nl> class NSConstantString { } <nl> <nl> @ asmname ( " swift_convertStringToNSString " ) internal <nl> func _convertStringToNSString ( string : String ) - > NSString { <nl> - return String . _bridgeFromObjectiveC ( string ) <nl> + return String . _forceBridgeFromObjectiveC ( string ) <nl> } <nl> <nl> internal func _convertNSStringToString ( nsstring : NSString ) - > String { <nl> - return String . _bridgeFromObjectiveC ( nsstring ) <nl> + return String . _forceBridgeFromObjectiveC ( nsstring ) <nl> } <nl> <nl> extension NSString : StringLiteralConvertible { <nl> extension String { <nl> } <nl> } <nl> <nl> - extension String : _BridgedToObjectiveCType { <nl> + extension String : _ObjectiveCBridgeable { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> public static func _getObjectiveCType ( ) - > Any . Type { <nl> return NSString . self <nl> } <nl> extension String : _BridgedToObjectiveCType { <nl> return unsafeBitCast ( _bridgeToObjectiveCImpl ( ) , NSString . self ) <nl> } <nl> <nl> - public static func _bridgeFromObjectiveC ( x : NSString ) - > String { <nl> + public static func _forceBridgeFromObjectiveC ( x : NSString ) - > String { <nl> return String ( _cocoaString : unsafeBitCast ( x , NSString . self ) ) <nl> } <nl> + <nl> + public static func _conditionallyBridgeFromObjectiveC ( <nl> + x : NSString <nl> + ) - > String ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> } <nl> <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> extension String : _BridgedToObjectiveCType { <nl> / / conversion to NSNumber is automatic ( auto - boxing ) , while conversion <nl> / / back to a specific numeric type requires a cast . <nl> / / FIXME : Incomplete list of types . <nl> - extension Int : _BridgedToObjectiveCType { <nl> + extension Int : _ObjectiveCBridgeable { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> public init ( _ number : NSNumber ) { <nl> value = number . integerValue . value <nl> } <nl> extension Int : _BridgedToObjectiveCType { <nl> return NSNumber ( integer : self ) <nl> } <nl> <nl> - public static func _bridgeFromObjectiveC ( x : NSNumber ) - > Int { <nl> + public static func _forceBridgeFromObjectiveC ( x : NSNumber ) - > Int { <nl> return x . integerValue <nl> } <nl> + <nl> + public static func _conditionallyBridgeFromObjectiveC ( x : NSNumber ) - > Int ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> } <nl> <nl> - extension UInt : _BridgedToObjectiveCType { <nl> + extension UInt : _ObjectiveCBridgeable { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> public init ( _ number : NSNumber ) { <nl> value = number . unsignedIntegerValue . value <nl> } <nl> extension UInt : _BridgedToObjectiveCType { <nl> return NSNumber ( unsignedInteger : Int ( self . value ) ) <nl> } <nl> <nl> - public static func _bridgeFromObjectiveC ( x : NSNumber ) - > UInt { <nl> + public static func _forceBridgeFromObjectiveC ( x : NSNumber ) - > UInt { <nl> return UInt ( x . unsignedIntegerValue . value ) <nl> } <nl> + public static func _conditionallyBridgeFromObjectiveC ( x : NSNumber ) - > UInt ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> } <nl> <nl> - extension Float : _BridgedToObjectiveCType { <nl> + extension Float : _ObjectiveCBridgeable { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> public init ( _ number : NSNumber ) { <nl> self = number . floatValue <nl> } <nl> extension Float : _BridgedToObjectiveCType { <nl> return NSNumber ( float : self ) <nl> } <nl> <nl> - public static func _bridgeFromObjectiveC ( x : NSNumber ) - > Float { <nl> + public static func _forceBridgeFromObjectiveC ( x : NSNumber ) - > Float { <nl> return x . floatValue <nl> } <nl> + <nl> + public static func _conditionallyBridgeFromObjectiveC ( x : NSNumber ) - > Float ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> } <nl> <nl> - extension Double : _BridgedToObjectiveCType { <nl> + extension Double : _ObjectiveCBridgeable { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> public init ( _ number : NSNumber ) { <nl> self = number . doubleValue <nl> } <nl> extension Double : _BridgedToObjectiveCType { <nl> return NSNumber ( double : self ) <nl> } <nl> <nl> - public static func _bridgeFromObjectiveC ( x : NSNumber ) - > Double { <nl> + public static func _forceBridgeFromObjectiveC ( x : NSNumber ) - > Double { <nl> return x . doubleValue <nl> } <nl> + <nl> + public static func _conditionallyBridgeFromObjectiveC ( <nl> + x : NSNumber <nl> + ) - > Double ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> } <nl> <nl> - extension Bool : _BridgedToObjectiveCType { <nl> + extension Bool : _ObjectiveCBridgeable { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> public init ( _ number : NSNumber ) { <nl> if number . boolValue { self = true } <nl> else { self = false } <nl> extension Bool : _BridgedToObjectiveCType { <nl> return NSNumber ( bool : self ) <nl> } <nl> <nl> - public static func _bridgeFromObjectiveC ( x : NSNumber ) - > Bool { <nl> + public static func _forceBridgeFromObjectiveC ( x : NSNumber ) - > Bool { <nl> return x . boolValue <nl> } <nl> + <nl> + public static func _conditionallyBridgeFromObjectiveC ( x : NSNumber ) - > Bool ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> } <nl> <nl> / / CGFloat bridging . <nl> - extension CGFloat : _BridgedToObjectiveCType { <nl> + extension CGFloat : _ObjectiveCBridgeable { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> public init ( _ number : NSNumber ) { <nl> self . native = CGFloat . NativeType ( number ) <nl> } <nl> extension CGFloat : _BridgedToObjectiveCType { <nl> return self . native . _bridgeToObjectiveC ( ) <nl> } <nl> <nl> - public static func _bridgeFromObjectiveC ( x : NSNumber ) - > CGFloat { <nl> - return CGFloat ( CGFloat . NativeType . _bridgeFromObjectiveC ( x ) ) <nl> + public static func _forceBridgeFromObjectiveC ( x : NSNumber ) - > CGFloat { <nl> + return CGFloat ( CGFloat . NativeType . _forceBridgeFromObjectiveC ( x ) ) <nl> + } <nl> + <nl> + public static func _conditionallyBridgeFromObjectiveC ( <nl> + x : NSNumber <nl> + ) - > CGFloat ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> } <nl> } <nl> <nl> extension NSArray : ArrayLiteralConvertible { <nl> / / / to Objective - C code as a method that accepts an ` NSArray ` . This operation <nl> / / / is referred to as a " forced conversion " in . . / . . / . . / docs / Arrays . rst <nl> public func _convertNSArrayToArray < T > ( source : NSArray ) - > [ T ] { <nl> - return Array . _bridgeFromObjectiveC ( source ) <nl> + return Array . _forceBridgeFromObjectiveC ( source ) <nl> } <nl> <nl> / / / The entry point for converting ` Array ` to ` NSArray ` in bridge <nl> public func _convertArrayToNSArray < T > ( arr : [ T ] ) - > NSArray { <nl> return arr . _bridgeToObjectiveC ( ) <nl> } <nl> <nl> - extension Array : _ConditionallyBridgedToObjectiveCType { <nl> + extension Array : _ObjectiveCBridgeable { <nl> public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> return Swift . _isBridgedToObjectiveC ( T . self ) <nl> } <nl> extension Array : _ConditionallyBridgedToObjectiveCType { <nl> return unsafeBitCast ( self . _buffer . _asCocoaArray ( ) , NSArray . self ) <nl> } <nl> <nl> - public static func _bridgeFromObjectiveC ( source : NSArray ) - > Array { <nl> + public static func _forceBridgeFromObjectiveC ( source : NSArray ) - > Array { <nl> _precondition ( <nl> Swift . _isBridgedToObjectiveC ( T . self ) , <nl> " array element type is not bridged to Objective - C " ) <nl> extension Array : _ConditionallyBridgedToObjectiveCType { <nl> return _arrayBridgeFromObjectiveC ( anyObjectArr ) <nl> } <nl> <nl> - public static func _bridgeFromObjectiveCConditional ( source : NSArray ) <nl> + public static func _conditionallyBridgeFromObjectiveC ( source : NSArray ) <nl> - > Array ? { <nl> / / Construct the result array by conditionally bridging each element . <nl> var anyObjectArr <nl> public func _convertNSDictionaryToDictionary < <nl> Key : Hashable , Value > ( d : NSDictionary ) <nl> - > [ Key : Value ] { <nl> / / Note : there should be * a good justification * for doing something else <nl> - / / than just dispatching to ` _bridgeFromObjectiveC ` . <nl> - return Dictionary . _bridgeFromObjectiveC ( d ) <nl> + / / than just dispatching to ` _forceBridgeFromObjectiveC ` . <nl> + return Dictionary . _forceBridgeFromObjectiveC ( d ) <nl> } <nl> <nl> / / / The entry point for bridging ` Dictionary ` to ` NSDictionary ` in bridge <nl> public func _convertDictionaryToNSDictionary < Key , Value > ( <nl> } <nl> <nl> / / Dictionary < Key , Value > is conditionally bridged to NSDictionary <nl> - extension Dictionary : _ConditionallyBridgedToObjectiveCType { <nl> + extension Dictionary : _ObjectiveCBridgeable { <nl> public static func _getObjectiveCType ( ) - > Any . Type { <nl> return NSDictionary . self <nl> } <nl> extension Dictionary : _ConditionallyBridgedToObjectiveCType { <nl> return unsafeBitCast ( _bridgeToObjectiveCImpl ( ) , NSDictionary . self ) <nl> } <nl> <nl> - public static func _bridgeFromObjectiveC ( d : NSDictionary ) - > Dictionary { <nl> + public static func _forceBridgeFromObjectiveC ( d : NSDictionary ) - > Dictionary { <nl> if let result = [ Key : Value ] . _bridgeFromObjectiveCAdoptingNativeStorage ( <nl> d as AnyObject ) { <nl> return result <nl> extension Dictionary : _ConditionallyBridgedToObjectiveCType { <nl> ( anyObjectKey : AnyObject ! , anyObjectValue : AnyObject ! , <nl> stop : UnsafeMutablePointer < ObjCBool > ) in <nl> builder . add ( <nl> - key : Swift . _bridgeFromObjectiveC ( anyObjectKey , Key . self ) , <nl> - value : Swift . _bridgeFromObjectiveC ( anyObjectValue , Value . self ) ) <nl> + key : Swift . _forceBridgeFromObjectiveC ( anyObjectKey , Key . self ) , <nl> + value : Swift . _forceBridgeFromObjectiveC ( anyObjectValue , Value . self ) ) <nl> } <nl> return builder . take ( ) <nl> } <nl> <nl> - public static func _bridgeFromObjectiveCConditional ( <nl> + public static func _conditionallyBridgeFromObjectiveC ( <nl> x : NSDictionary <nl> ) - > Dictionary ? { <nl> let anyDict = x as [ NSObject : AnyObject ] <nl> mmm a / stdlib / objc / Foundation / NSValue . swift <nl> ppp b / stdlib / objc / Foundation / NSValue . swift <nl> <nl> / / <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> - extension NSRange : _BridgedToObjectiveCType { <nl> + extension NSRange : _ObjectiveCBridgeable { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> public static func _getObjectiveCType ( ) - > Any . Type { <nl> return NSValue . self <nl> } <nl> extension NSRange : _BridgedToObjectiveCType { <nl> return NSValue ( range : self ) <nl> } <nl> <nl> - public static func _bridgeFromObjectiveC ( x : NSValue ) - > NSRange { <nl> + public static func _forceBridgeFromObjectiveC ( x : NSValue ) - > NSRange { <nl> return x . rangeValue <nl> } <nl> + <nl> + public static func _conditionallyBridgeFromObjectiveC ( <nl> + x : NSValue <nl> + ) - > NSRange ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> } <nl> mmm a / stdlib / runtime / Casting . cpp <nl> ppp b / stdlib / runtime / Casting . cpp <nl> extern " C " OpaqueExistentialContainer swift_stdlib_dynamicCastToExistential1 ( <nl> namespace { <nl> <nl> / / protocol _BridgedToObjectiveC { <nl> - struct _BridgedToObjectiveCWitnessTable { <nl> + struct _ObjectiveCBridgeableWitnessTable { <nl> / / typealias _ObjectiveCType : class <nl> const Metadata * ObjectiveCType ; <nl> <nl> + / / class func _isBridgedToObjectiveC ( ) - > bool <nl> + bool ( * isBridgedToObjectiveC ) ( const Metadata * value , const Metadata * T ) ; <nl> + <nl> / / class func _getObjectiveCType ( ) - > Any . Type <nl> const Metadata * ( * getObjectiveCType ) ( const Metadata * self , <nl> const Metadata * selfType ) ; <nl> <nl> / / func _bridgeToObjectiveC ( ) - > _ObjectiveCType <nl> HeapObject * ( * bridgeToObjectiveC ) ( OpaqueValue * self , const Metadata * Self ) ; <nl> - / / class func _bridgeFromObjectiveC ( x : _ObjectiveCType ) - > Self <nl> - OpaqueExistentialContainer ( * bridgeFromObjectiveC ) ( HeapObject * sourceValue , <nl> + / / class func _forceBridgeFromObjectiveC ( x : _ObjectiveCType ) - > Self <nl> + OpaqueExistentialContainer ( * forceBridgeFromObjectiveC ) ( HeapObject * sourceValue , <nl> const Metadata * self , <nl> const Metadata * selfType ) ; <nl> - } ; <nl> - / / } <nl> - <nl> - / / protocol _ConditionallyBridgedToObjectiveC { <nl> - struct _ConditionallyBridgedToObjectiveCWitnessTable { <nl> - / / My untrained eye can ' t find this offset in the generated LLVM IR , <nl> - / / but I do see it being applied in x86 assembly . It disappears <nl> - / / when inheritance from _BridgedToObjectiveC is removed . If it <nl> - / / presents any portability problems we can drop that inheritance <nl> - / / relationship . <nl> - const void * const probablyPointsAtBridgedToObjectiveCWitnessTable ; <nl> <nl> - / / class func _isBridgedToObjectiveC ( ) - > bool <nl> - bool ( * isBridgedToObjectiveC ) ( const Metadata * value , const Metadata * T ) ; <nl> - <nl> - / / class func _bridgeFromObjectiveCConditional ( x : _ObjectiveCType ) - > Self ? <nl> - OpaqueExistentialContainer ( * bridgeFromObjectiveCConditional ) ( <nl> + / / class func _conditionallyBridgeFromObjectiveC ( x : _ObjectiveCType ) - > Self ? <nl> + OpaqueExistentialContainer ( * conditionallyBridgeFromObjectiveC ) ( <nl> HeapObject * sourceValue , <nl> const Metadata * self , <nl> const Metadata * selfType ) ; <nl> struct _ConditionallyBridgedToObjectiveCWitnessTable { <nl> <nl> } / / unnamed namespace <nl> <nl> - extern " C " const ProtocolDescriptor _TMpSs24_BridgedToObjectiveCType ; <nl> - extern " C " const ProtocolDescriptor _TMpSs37_ConditionallyBridgedToObjectiveCType ; <nl> + extern " C " const ProtocolDescriptor _TMpSs21_ObjectiveCBridgeable ; <nl> <nl> / / = = = mmm Bridging helpers for the Swift stdlib mmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> / / Functions that must discover and possibly use an arbitrary type ' s <nl> / / conformance to a given protocol . See . . / core / BridgeObjectiveC . swift for <nl> / / documentation . <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> - static const _BridgedToObjectiveCWitnessTable * <nl> + static const _ObjectiveCBridgeableWitnessTable * <nl> findBridgeWitness ( const Metadata * T ) { <nl> - auto w = swift_conformsToProtocol ( T , & _TMpSs24_BridgedToObjectiveCType , nullptr ) ; <nl> - return reinterpret_cast < const _BridgedToObjectiveCWitnessTable * > ( w ) ; <nl> - } <nl> - <nl> - static const _ConditionallyBridgedToObjectiveCWitnessTable * <nl> - findConditionalBridgeWitness ( const Metadata * T ) { <nl> - auto w = swift_conformsToProtocol ( <nl> - T , & _TMpSs37_ConditionallyBridgedToObjectiveCType , nullptr ) ; <nl> - <nl> - return reinterpret_cast < <nl> - const _ConditionallyBridgedToObjectiveCWitnessTable * > ( w ) ; <nl> + auto w = swift_conformsToProtocol ( T , & _TMpSs21_ObjectiveCBridgeable , nullptr ) ; <nl> + return reinterpret_cast < const _ObjectiveCBridgeableWitnessTable * > ( w ) ; <nl> } <nl> <nl> static inline bool swift_isClassOrObjCExistentialImpl ( const Metadata * T ) { <nl> extern " C " HeapObject * swift_bridgeNonVerbatimToObjectiveC ( <nl> ) { <nl> assert ( ! swift_isClassOrObjCExistentialImpl ( T ) ) ; <nl> <nl> - auto const bridgeWitness = findBridgeWitness ( T ) ; <nl> - <nl> - if ( bridgeWitness ) { <nl> - if ( auto conditionalWitness = findConditionalBridgeWitness ( T ) ) { <nl> - if ( ! conditionalWitness - > isBridgedToObjectiveC ( T , T ) ) <nl> + if ( const auto * bridgeWitness = findBridgeWitness ( T ) ) { <nl> + if ( ! bridgeWitness - > isBridgedToObjectiveC ( T , T ) ) <nl> return nullptr ; <nl> - } <nl> auto result = bridgeWitness - > bridgeToObjectiveC ( value , T ) ; <nl> / / Witnesses take ' self ' at + 0 , so we still need to consume the + 1 argument . <nl> T - > vw_destroy ( value ) ; <nl> swift_bridgeNonVerbatimFromObjectiveC ( <nl> const Metadata * nativeType_ <nl> ) { <nl> / / Check if the type conforms to _BridgedToObjectiveC . <nl> - const auto * bridgeWitness = findBridgeWitness ( nativeType ) ; <nl> - if ( bridgeWitness ) { <nl> + if ( const auto * bridgeWitness = findBridgeWitness ( nativeType ) ) { <nl> / / if the type also conforms to _ConditionallyBridgedToObjectiveC , <nl> / / make sure it bridges at runtime <nl> - auto conditionalWitness = findConditionalBridgeWitness ( nativeType ) ; <nl> - if ( <nl> - conditionalWitness = = nullptr <nl> - | | conditionalWitness - > isBridgedToObjectiveC ( nativeType , nativeType ) <nl> - ) { <nl> + if ( bridgeWitness - > isBridgedToObjectiveC ( nativeType , nativeType ) ) { <nl> / / Check if sourceValue has the _ObjectiveCType type required by the <nl> / / protocol . <nl> const Metadata * objectiveCType = <nl> swift_bridgeNonVerbatimFromObjectiveC ( <nl> objectiveCType ) ) ; <nl> <nl> if ( sourceValueAsObjectiveCType ) { <nl> - / / The type matches . _bridgeFromObjectiveC returns ` Self ` , so <nl> + / / The type matches . _forceBridgeFromObjectiveC returns ` Self ` , so <nl> / / we can just return it directly . <nl> - return bridgeWitness - > bridgeFromObjectiveC ( <nl> + return bridgeWitness - > forceBridgeFromObjectiveC ( <nl> static_cast < HeapObject * > ( sourceValueAsObjectiveCType ) , <nl> nativeType , nativeType ) ; <nl> } <nl> swift_bridgeNonVerbatimFromObjectiveCConditional ( <nl> <nl> / / If the type also conforms to _ConditionallyBridgedToObjectiveC , <nl> / / use conditional bridging . <nl> - if ( auto conditionalWitness = findConditionalBridgeWitness ( nativeType ) ) { <nl> - return conditionalWitness - > bridgeFromObjectiveCConditional ( <nl> - static_cast < HeapObject * > ( sourceValueAsObjectiveCType ) , <nl> - nativeType , nativeType ) ; <nl> - } <nl> - <nl> - / / Perform direct bridging . _bridgeFromObjectiveC returns ` Self ` , so <nl> - / / we need to wrap it in an optional . <nl> - OpaqueExistentialContainer value <nl> - = bridgeWitness - > bridgeFromObjectiveC ( <nl> - static_cast < HeapObject * > ( sourceValueAsObjectiveCType ) , <nl> - nativeType , nativeType ) ; <nl> - <nl> - return _TFSs24_injectValueIntoOptionalU__FQ_GSqQ__ ( <nl> - reinterpret_cast < OpaqueValue * > ( & value ) , <nl> - nativeType ) ; <nl> + return bridgeWitness - > conditionallyBridgeFromObjectiveC ( <nl> + static_cast < HeapObject * > ( sourceValueAsObjectiveCType ) , <nl> + nativeType , nativeType ) ; <nl> } <nl> <nl> / / func isBridgedNonVerbatimToObjectiveC < T > ( x : T . Type ) - > Bool <nl> extern " C " bool swift_isBridgedNonVerbatimToObjectiveC ( <nl> assert ( ! swift_isClassOrObjCExistentialImpl ( T ) ) ; <nl> <nl> auto bridgeWitness = findBridgeWitness ( T ) ; <nl> - <nl> - if ( bridgeWitness ) { <nl> - auto conditionalWitness = findConditionalBridgeWitness ( T ) ; <nl> - return ! conditionalWitness | | <nl> - conditionalWitness - > isBridgedToObjectiveC ( value , T ) ; <nl> - } <nl> - <nl> - return false ; <nl> + return bridgeWitness & & bridgeWitness - > isBridgedToObjectiveC ( value , T ) ; <nl> } <nl> <nl> / / func isClassOrObjCExistential < T > ( x : T . Type ) - > Bool <nl> mmm a / test / Constraints / bridging . swift <nl> ppp b / test / Constraints / bridging . swift <nl> class BridgedClass : NSObject , NSCopying { <nl> <nl> class BridgedClassSub : BridgedClass { } <nl> <nl> - struct BridgedStruct : Hashable , _BridgedToObjectiveCType { <nl> + struct BridgedStruct : Hashable , _ObjectiveCBridgeable { <nl> var hashValue : Int { return 0 } <nl> <nl> + static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> static func _getObjectiveCType ( ) - > Any . Type { <nl> return BridgedClass . self <nl> } <nl> struct BridgedStruct : Hashable , _BridgedToObjectiveCType { <nl> return BridgedClass ( ) <nl> } <nl> <nl> - static func _bridgeFromObjectiveC ( x : BridgedClass ) - > BridgedStruct { <nl> + static func _forceBridgeFromObjectiveC ( x : BridgedClass ) - > BridgedStruct { <nl> return BridgedStruct ( ) <nl> } <nl> + <nl> + static func _conditionallyBridgeFromObjectiveC ( <nl> + x : BridgedClass <nl> + ) - > BridgedStruct ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> } <nl> <nl> func = = ( x : BridgedStruct , y : BridgedStruct ) - > Bool { return true } <nl> mmm a / test / Inputs / clang - importer - sdk / swift - modules / Foundation . swift <nl> ppp b / test / Inputs / clang - importer - sdk / swift - modules / Foundation . swift <nl> internal func _convertNSDictionaryToDictionary < K : NSObject , V : AnyObject > ( <nl> return Dictionary < K , V > ( ) <nl> } <nl> <nl> - extension String : _BridgedToObjectiveCType { <nl> + extension String : _ObjectiveCBridgeable { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> public static func _getObjectiveCType ( ) - > Any . Type { <nl> return NSString . self <nl> } <nl> public func _bridgeToObjectiveC ( ) - > NSString { <nl> return NSString ( ) <nl> } <nl> - public static func _bridgeFromObjectiveC ( x : NSString ) - > String { <nl> + public static func _forceBridgeFromObjectiveC ( x : NSString ) - > String { <nl> _fatalError ( " implement " ) <nl> } <nl> + public static func _conditionallyBridgeFromObjectiveC ( <nl> + x : NSString <nl> + ) - > String ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> } <nl> <nl> - extension Int : _BridgedToObjectiveCType { <nl> + extension Int : _ObjectiveCBridgeable { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> public static func _getObjectiveCType ( ) - > Any . Type { <nl> return NSNumber . self <nl> } <nl> public func _bridgeToObjectiveC ( ) - > NSNumber { <nl> return NSNumber ( ) <nl> } <nl> - public static func _bridgeFromObjectiveC ( x : NSNumber ) - > Int { <nl> + public static func _forceBridgeFromObjectiveC ( x : NSNumber ) - > Int { <nl> _fatalError ( " implement " ) <nl> } <nl> + public static func _conditionallyBridgeFromObjectiveC ( <nl> + x : NSNumber <nl> + ) - > Int ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> } <nl> <nl> - extension Array : _BridgedToObjectiveCType { <nl> + extension Array : _ObjectiveCBridgeable { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> public static func _getObjectiveCType ( ) - > Any . Type { <nl> return NSArray . self <nl> } <nl> public func _bridgeToObjectiveC ( ) - > NSArray { <nl> return NSArray ( ) <nl> } <nl> - public static func _bridgeFromObjectiveC ( x : NSArray ) - > Array { <nl> + public static func _forceBridgeFromObjectiveC ( x : NSArray ) - > Array { <nl> return [ ] <nl> } <nl> + public static func _conditionallyBridgeFromObjectiveC ( <nl> + x : NSArray <nl> + ) - > Array ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> } <nl> <nl> - extension Dictionary : _BridgedToObjectiveCType { <nl> + extension Dictionary : _ObjectiveCBridgeable { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> public static func _getObjectiveCType ( ) - > Any . Type { <nl> return NSDictionary . self <nl> } <nl> public func _bridgeToObjectiveC ( ) - > NSDictionary { <nl> return NSDictionary ( ) <nl> } <nl> - public static func _bridgeFromObjectiveC ( x : NSDictionary ) - > Dictionary { <nl> + public static func _forceBridgeFromObjectiveC ( x : NSDictionary ) - > Dictionary { <nl> return [ : ] <nl> } <nl> + public static func _conditionallyBridgeFromObjectiveC ( <nl> + x : NSDictionary <nl> + ) - > Dictionary ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> } <nl> <nl> - extension CGFloat : _BridgedToObjectiveCType { <nl> + extension CGFloat : _ObjectiveCBridgeable { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> public static func _getObjectiveCType ( ) - > Any . Type { <nl> return NSNumber . self <nl> } <nl> public func _bridgeToObjectiveC ( ) - > NSNumber { <nl> return NSNumber ( ) <nl> } <nl> - public static func _bridgeFromObjectiveC ( x : NSNumber ) - > CGFloat { <nl> + public static func _forceBridgeFromObjectiveC ( x : NSNumber ) - > CGFloat { <nl> return CGFloat ( ) <nl> } <nl> + public static func _conditionallyBridgeFromObjectiveC ( <nl> + x : NSNumber <nl> + ) - > CGFloat ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> } <nl> mmm a / test / Interpreter / SDK / CoreGraphics_CGFloat . swift <nl> ppp b / test / Interpreter / SDK / CoreGraphics_CGFloat . swift <nl> func bridging ( ) { <nl> <nl> / / Array bridging . <nl> var arr : [ CGFloat ] = [ 3 . 14159 , 2 . 71818 ] <nl> - <nl> + <nl> / / Array - > NSArray <nl> / / CHECK - NEXT : ( <nl> / / CHECK - NEXT : " 3 . 14 <nl> mmm a / test / SILGen / Inputs / Foundation . swift <nl> ppp b / test / SILGen / Inputs / Foundation . swift <nl> func _convertNSDictionaryToDictionary < K : NSObject , V : AnyObject > ( <nl> return Dictionary < K , V > ( ) <nl> } <nl> <nl> - extension String : _BridgedToObjectiveCType { <nl> + extension String : _ObjectiveCBridgeable { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> public static func _getObjectiveCType ( ) - > Any . Type { <nl> return NSString . self <nl> } <nl> public func _bridgeToObjectiveC ( ) - > NSString { <nl> return NSString ( ) <nl> } <nl> - public static func _bridgeFromObjectiveC ( x : NSString ) - > String { <nl> - fatal ( " implement " ) <nl> + public static func _forceBridgeFromObjectiveC ( x : NSString ) - > String { <nl> + fatalError ( " implement " ) <nl> + } <nl> + public static func _conditionallyBridgeFromObjectiveC ( <nl> + x : NSString <nl> + ) - > String ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> } <nl> } <nl> <nl> - extension Int : _BridgedToObjectiveCType { <nl> + extension Int : _ObjectiveCBridgeable { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> public static func _getObjectiveCType ( ) - > Any . Type { <nl> return NSNumber . self <nl> } <nl> public func _bridgeToObjectiveC ( ) - > NSNumber { <nl> return NSNumber ( ) <nl> } <nl> - public static func _bridgeFromObjectiveC ( x : NSNumber ) - > Int { <nl> + public static func _forceBridgeFromObjectiveC ( x : NSNumber ) - > Int { <nl> fatal ( " implement " ) <nl> } <nl> + public static func _conditionallyBridgeFromObjectiveC ( <nl> + x : NSNumber <nl> + ) - > Int ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> } <nl> <nl> - extension Array : _ConditionallyBridgedToObjectiveCType { <nl> + extension Array : _ObjectiveCBridgeable { <nl> public static func _getObjectiveCType ( ) - > Any . Type { <nl> return NSArray . self <nl> } <nl> public func _bridgeToObjectiveC ( ) - > NSArray { <nl> return NSArray ( ) <nl> } <nl> - public static func _bridgeFromObjectiveC ( x : NSArray ) - > Array { <nl> + public static func _forceBridgeFromObjectiveC ( x : NSArray ) - > Array { <nl> fatal ( " implement " ) <nl> } <nl> - static func _bridgeFromObjectiveCConditional ( x : NSArray ) - > Array ? { <nl> + public static func _conditionallyBridgeFromObjectiveC ( x : NSArray ) - > Array ? { <nl> return nil <nl> } <nl> - static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> return Swift . _isBridgedToObjectiveC ( T . self ) <nl> } <nl> } <nl> <nl> - extension Dictionary : _ConditionallyBridgedToObjectiveCType { <nl> + extension Dictionary : _ObjectiveCBridgeable { <nl> public static func _getObjectiveCType ( ) - > Any . Type { <nl> return NSDictionary . self <nl> } <nl> public func _bridgeToObjectiveC ( ) - > NSDictionary { <nl> return NSDictionary ( ) <nl> } <nl> - public static func _bridgeFromObjectiveC ( x : NSDictionary ) - > Dictionary { <nl> + public static func _forceBridgeFromObjectiveC ( x : NSDictionary ) - > Dictionary { <nl> fatal ( " implement " ) <nl> } <nl> - static func _bridgeFromObjectiveCConditional ( x : NSDictionary ) - > Dictionary ? { <nl> + public static func _conditionallyBridgeFromObjectiveC ( x : NSDictionary ) - > Dictionary ? { <nl> return nil <nl> } <nl> - static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + public static func _isBridgedToObjectiveC ( ) - > Bool { <nl> return Swift . _isBridgedToObjectiveC ( T . self ) <nl> } <nl> } <nl> mmm a / test / SILGen / collection_downcast . swift <nl> ppp b / test / SILGen / collection_downcast . swift <nl> class BridgedObjC : NSObject { } <nl> <nl> func = = ( x : BridgedObjC , y : BridgedObjC ) - > Bool { return true } <nl> <nl> - struct BridgedSwift : Hashable , _BridgedToObjectiveCType { <nl> + struct BridgedSwift : Hashable , _ObjectiveCBridgeable { <nl> + static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> var hashValue : Int { return 0 } <nl> <nl> static func _getObjectiveCType ( ) - > Any . Type { <nl> struct BridgedSwift : Hashable , _BridgedToObjectiveCType { <nl> return BridgedObjC ( ) <nl> } <nl> <nl> - static func _bridgeFromObjectiveC ( x : BridgedObjC ) - > BridgedSwift { <nl> + static func _forceBridgeFromObjectiveC ( x : BridgedObjC ) - > BridgedSwift { <nl> + return BridgedSwift ( ) <nl> + } <nl> + static func _conditionallyBridgeFromObjectiveC ( <nl> + x : BridgedObjC <nl> + ) - > BridgedSwift ? { <nl> return BridgedSwift ( ) <nl> } <nl> } <nl> func testArrayDowncast ( array : [ AnyObject ] ) - > [ BridgedObjC ] { <nl> / / CHECK - LABEL : sil @ _TF19collection_downcast27testArrayDowncastFromObject <nl> / / CHECK : bb0 ( [ [ OBJ : % [ 0 - 9 ] + ] ] : $ AnyObject ) : <nl> func testArrayDowncastFromObject ( obj : AnyObject ) - > [ BridgedObjC ] { <nl> - / / CHECK : [ [ BRIDGE_FN : % [ 0 - 9 ] + ] ] = function_ref @ _TFSa21_bridgeFromObjectiveCU__fMGSaQ__FCSo7NSArrayGSaQ__ : $ @ thin < τ_0_0 > ( @ owned NSArray , @ thin Array < τ_0_0 > . Type ) - > @ owned Array < τ_0_0 > <nl> + / / CHECK : [ [ BRIDGE_FN : % [ 0 - 9 ] + ] ] = function_ref @ _TFSa26_forceBridgeFromObjectiveCU__fMGSaQ__FCSo7NSArrayGSaQ__ : $ @ thin < τ_0_0 > ( @ owned NSArray , @ thin Array < τ_0_0 > . Type ) - > @ owned Array < τ_0_0 > <nl> / / CHECK : [ [ ARRAY_META : % [ 0 - 9 ] + ] ] = metatype $ @ thin Array < BridgedObjC > . Type <nl> / / CHECK : [ [ NSARRAY_OBJ : % [ 0 - 9 ] + ] ] = unconditional_checked_cast [ [ OBJ ] ] : $ AnyObject to $ NSArray <nl> / / CHECK : apply [ [ BRIDGE_FN ] ] < BridgedObjC > ( [ [ NSARRAY_OBJ ] ] , [ [ ARRAY_META ] ] ) : $ @ thin < τ_0_0 > ( @ owned NSArray , @ thin Array < τ_0_0 > . Type ) - > @ owned Array < τ_0_0 > <nl> func testArrayDowncastFromObject ( obj : AnyObject ) - > [ BridgedObjC ] { <nl> / / CHECK - LABEL : sil @ _TF19collection_downcast28testArrayDowncastFromNSArray <nl> / / CHECK : bb0 ( [ [ NSARRAY_OBJ : % [ 0 - 9 ] + ] ] : $ NSArray ) : <nl> func testArrayDowncastFromNSArray ( obj : NSArray ) - > [ BridgedObjC ] { <nl> - / / CHECK : [ [ BRIDGE_FN : % [ 0 - 9 ] + ] ] = function_ref @ _TFSa21_bridgeFromObjectiveCU__fMGSaQ__FCSo7NSArrayGSaQ__ : $ @ thin < τ_0_0 > ( @ owned NSArray , @ thin Array < τ_0_0 > . Type ) - > @ owned Array < τ_0_0 > <nl> + / / CHECK : [ [ BRIDGE_FN : % [ 0 - 9 ] + ] ] = function_ref @ _TFSa26_forceBridgeFromObjectiveCU__fMGSaQ__FCSo7NSArrayGSaQ__ : $ @ thin < τ_0_0 > ( @ owned NSArray , @ thin Array < τ_0_0 > . Type ) - > @ owned Array < τ_0_0 > <nl> / / CHECK : [ [ ARRAY_META : % [ 0 - 9 ] + ] ] = metatype $ @ thin Array < BridgedObjC > . Type <nl> / / CHECK : apply [ [ BRIDGE_FN ] ] < BridgedObjC > ( [ [ NSARRAY_OBJ ] ] , [ [ ARRAY_META ] ] ) : $ @ thin < τ_0_0 > ( @ owned NSArray , @ thin Array < τ_0_0 > . Type ) - > @ owned Array < τ_0_0 > <nl> return obj as [ BridgedObjC ] <nl> func testArrayIsaBridged ( array : [ AnyObject ] ) - > Bool { <nl> / / CHECK : bb0 ( [ [ OBJ : % [ 0 - 9 ] + ] ] : $ AnyObject ) : <nl> func testDictionaryDowncastFromObject ( obj : AnyObject ) <nl> - > Dictionary < BridgedObjC , BridgedObjC > { <nl> - / / CHECK : [ [ BRIDGE_FN : % [ 0 - 9 ] + ] ] = function_ref @ _TFVSs10Dictionary21_bridgeFromObjectiveCUSs8Hashable___fMGS_Q_Q0__FCSo12NSDictionaryGS_Q_Q0__ : $ @ thin < τ_0_0 , τ_0_1 where τ_0_0 : Hashable > ( @ owned NSDictionary , @ thin Dictionary < τ_0_0 , τ_0_1 > . Type ) - > @ owned Dictionary < τ_0_0 , τ_0_1 > <nl> + / / CHECK : [ [ BRIDGE_FN : % [ 0 - 9 ] + ] ] = function_ref @ _TFVSs10Dictionary26_forceBridgeFromObjectiveCUSs8Hashable___fMGS_Q_Q0__FCSo12NSDictionaryGS_Q_Q0__ : $ @ thin < τ_0_0 , τ_0_1 where τ_0_0 : Hashable > ( @ owned NSDictionary , @ thin Dictionary < τ_0_0 , τ_0_1 > . Type ) - > @ owned Dictionary < τ_0_0 , τ_0_1 > <nl> / / CHECK : [ [ DICT_META : % [ 0 - 9 ] + ] ] = metatype $ @ thin Dictionary < BridgedObjC , BridgedObjC > . Type <nl> / / CHECK : [ [ NSDICT_OBJ : % [ 0 - 9 ] + ] ] = unconditional_checked_cast [ [ OBJ ] ] : $ AnyObject to $ NSDictionary <nl> / / CHECK : apply [ [ BRIDGE_FN ] ] < BridgedObjC , BridgedObjC > ( [ [ NSDICT_OBJ ] ] , [ [ DICT_META ] ] ) : $ @ thin < τ_0_0 , τ_0_1 where τ_0_0 : Hashable > ( @ owned NSDictionary , @ thin Dictionary < τ_0_0 , τ_0_1 > . Type ) - > @ owned Dictionary < τ_0_0 , τ_0_1 > <nl> mmm a / test / SILGen / collection_upcast . swift <nl> ppp b / test / SILGen / collection_upcast . swift <nl> class BridgedObjC : NSObject { } <nl> <nl> func = = ( x : BridgedObjC , y : BridgedObjC ) - > Bool { return true } <nl> <nl> - struct BridgedSwift : Hashable , _BridgedToObjectiveCType { <nl> + struct BridgedSwift : Hashable , _ObjectiveCBridgeable { <nl> + static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> var hashValue : Int { return 0 } <nl> <nl> static func _getObjectiveCType ( ) - > Any . Type { <nl> struct BridgedSwift : Hashable , _BridgedToObjectiveCType { <nl> return BridgedObjC ( ) <nl> } <nl> <nl> - static func _bridgeFromObjectiveC ( x : BridgedObjC ) - > BridgedSwift { <nl> + static func _forceBridgeFromObjectiveC ( x : BridgedObjC ) - > BridgedSwift { <nl> + return BridgedSwift ( ) <nl> + } <nl> + static func _conditionallyBridgeFromObjectiveC ( <nl> + x : BridgedObjC <nl> + ) - > BridgedSwift ? { <nl> return BridgedSwift ( ) <nl> } <nl> } <nl> mmm a / test / SILGen / switch_objc . swift <nl> ppp b / test / SILGen / switch_objc . swift <nl> func matchesEither ( # input : Hive , # a : Hive , # b : Hive ) - > Bool { <nl> / / CHECK - LABEL : sil @ _TF11switch_objc9bridgedIs <nl> / / CHECK : bb0 ( [ [ OBJ : % [ 0 - 9 ] + ] ] : $ AnyObject ) : <nl> func bridgedIs ( obj : AnyObject ) { <nl> - / / CHECK : function_ref @ _TFSS21_bridgeFromObjectiveCfMSSFCSo8NSStringSS <nl> + / / CHECK : function_ref @ _TFSS34_conditionallyBridgeFromObjectiveCfMSSFCSo8NSStringGSqSS <nl> / / CHECK : checked_cast_br [ [ OBJ ] ] : $ AnyObject to $ NSString <nl> obj as ? String <nl> <nl> - / / CHECK : [ [ ARRAY_BRIDGE_FN : % [ 0 - 9 ] + ] ] = function_ref @ _TFSa21_bridgeFromObjectiveCU__fMGSaQ__FCSo7NSArrayGSaQ__ <nl> + / / CHECK : [ [ ARRAY_BRIDGE_FN : % [ 0 - 9 ] + ] ] = function_ref @ _TFSa34_conditionallyBridgeFromObjectiveCU__fMGSaQ__FCSo7NSArrayGSqGSaQ__ <nl> / / CHECK : apply [ [ ARRAY_BRIDGE_FN ] ] < NSString > <nl> obj as ? [ NSString ] <nl> } <nl> mmm a / test / expr / cast / array_bridge . swift <nl> ppp b / test / expr / cast / array_bridge . swift <nl> class A { <nl> var x = 0 <nl> } <nl> <nl> - struct B : _BridgedToObjectiveCType { <nl> + struct B : _ObjectiveCBridgeable { <nl> + static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> static func _getObjectiveCType ( ) - > Any . Type { <nl> return A . self <nl> } <nl> func _bridgeToObjectiveC ( ) - > A { <nl> return A ( ) <nl> } <nl> - static func _bridgeFromObjectiveC ( x : A ) - > B { <nl> + static func _forceBridgeFromObjectiveC ( x : A ) - > B { <nl> + return B ( ) <nl> + } <nl> + static func _conditionallyBridgeFromObjectiveC ( x : A ) - > B ? { <nl> return B ( ) <nl> } <nl> } <nl> class E { <nl> var x = 0 <nl> } <nl> <nl> - struct F : _BridgedToObjectiveCType { <nl> + struct F : _ObjectiveCBridgeable { <nl> + static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> static func _getObjectiveCType ( ) - > Any . Type { <nl> return E . self <nl> } <nl> func _bridgeToObjectiveC ( ) - > E { <nl> return E ( ) <nl> } <nl> - static func _bridgeFromObjectiveC ( x : E ) - > F { <nl> + static func _forceBridgeFromObjectiveC ( x : E ) - > F { <nl> + return F ( ) <nl> + } <nl> + static func _conditionallyBridgeFromObjectiveC ( x : E ) - > F ? { <nl> return F ( ) <nl> } <nl> } <nl> class G { <nl> var x = 0 <nl> } <nl> <nl> - struct H : _ConditionallyBridgedToObjectiveCType { <nl> + struct H : _ObjectiveCBridgeable { <nl> static func _getObjectiveCType ( ) - > Any . Type { <nl> return G . self <nl> } <nl> func _bridgeToObjectiveC ( ) - > G { <nl> return G ( ) <nl> } <nl> - static func _bridgeFromObjectiveC ( x : G ) - > H { <nl> + static func _forceBridgeFromObjectiveC ( x : G ) - > H { <nl> return H ( ) <nl> } <nl> - static func _bridgeFromObjectiveCConditional ( x : G ) - > H ? { <nl> + static func _conditionallyBridgeFromObjectiveC ( x : G ) - > H ? { <nl> _preconditionFailure ( " implement " ) <nl> } <nl> static func _isBridgedToObjectiveC ( ) - > Bool { <nl> var h : [ H ] = [ ] <nl> g = h / / should type check , but cause a failure at runtime <nl> <nl> <nl> - struct I : _BridgedToObjectiveCType { <nl> + struct I : _ObjectiveCBridgeable { <nl> + static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> static func _getObjectiveCType ( ) - > Any . Type { <nl> return A . self <nl> } <nl> func _bridgeToObjectiveC ( ) - > AnyObject { <nl> return A ( ) <nl> } <nl> - static func _bridgeFromObjectiveC ( x : AnyObject ) - > I { <nl> + static func _forceBridgeFromObjectiveC ( x : AnyObject ) - > I { <nl> + return I ( ) <nl> + } <nl> + static func _conditionallyBridgeFromObjectiveC ( x : AnyObject ) - > I ? { <nl> return I ( ) <nl> } <nl> } <nl> mmm a / test / expr / cast / array_downcast . swift <nl> ppp b / test / expr / cast / array_downcast . swift <nl> class A { <nl> var x = 0 <nl> } <nl> <nl> - struct B : _BridgedToObjectiveCType { <nl> + struct B : _ObjectiveCBridgeable { <nl> + static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> static func _getObjectiveCType ( ) - > Any . Type { <nl> return A . self <nl> } <nl> func _bridgeToObjectiveC ( ) - > A { <nl> return A ( ) <nl> } <nl> - static func _bridgeFromObjectiveC ( x : A ) - > B { <nl> + static func _forceBridgeFromObjectiveC ( x : A ) - > B { <nl> + return B ( ) <nl> + } <nl> + static func _conditionallyBridgeFromObjectiveC ( x : A ) - > B ? { <nl> return B ( ) <nl> } <nl> } <nl> mmm a / test / expr / cast / array_downcast_Foundation . swift <nl> ppp b / test / expr / cast / array_downcast_Foundation . swift <nl> func testDowncastOptionalObject ( obj : AnyObject ? ! ) - > [ String ] ? { <nl> / / CHECK - NEXT : ( inject_into_optional implicit type = ' [ String ] ? ' <nl> / / CHECK - NEXT : ( call_expr implicit type = ' [ String ] ' <nl> / / CHECK - NEXT : ( dot_syntax_call_expr implicit type = ' ( NSArray ) - > Array < String > ' <nl> - / / CHECK - NEXT : ( declref_expr implicit type = ' ( Array < String > . Type ) - > ( NSArray ) - > Array < String > ' decl = Foundation . ( file ) . Array . _bridgeFromObjectiveC <nl> + / / CHECK - NEXT : ( declref_expr implicit type = ' ( Array < String > . Type ) - > ( NSArray ) - > Array < String > ' decl = Foundation . ( file ) . Array . _forceBridgeFromObjectiveC <nl> / / CHECK - NEXT : ( type_expr implicit type = ' [ String ] . Type ' typerepr = ' < null > ' ) ) <nl> / / CHECK - NEXT : ( forced_checked_cast_expr type = ' NSArray ' { { . * existential_to_concrete } } <nl> / / CHECK - NEXT : ( bind_optional_expr implicit type = ' AnyObject ' <nl> mmm a / test / expr / cast / bridged . swift <nl> ppp b / test / expr / cast / bridged . swift <nl> class NSObject { } <nl> class BridgedClass : NSObject { <nl> } <nl> <nl> - struct BridgedStruct : _BridgedToObjectiveCType { <nl> + struct BridgedStruct : _ObjectiveCBridgeable { <nl> + static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> static func _getObjectiveCType ( ) - > Any . Type { <nl> return BridgedClass . self <nl> } <nl> struct BridgedStruct : _BridgedToObjectiveCType { <nl> return BridgedClass ( ) <nl> } <nl> <nl> - static func _bridgeFromObjectiveC ( x : BridgedClass ) - > BridgedStruct { <nl> + static func _forceBridgeFromObjectiveC ( x : BridgedClass ) - > BridgedStruct { <nl> + return BridgedStruct ( ) <nl> + } <nl> + static func _conditionallyBridgeFromObjectiveC ( x : BridgedClass ) - > BridgedStruct ? { <nl> return BridgedStruct ( ) <nl> } <nl> } <nl> mmm a / test / expr / cast / dictionary_bridge . swift <nl> ppp b / test / expr / cast / dictionary_bridge . swift <nl> class ObjC : Root { <nl> <nl> class DerivesObjC : ObjC { } <nl> <nl> - struct BridgedToObjC : Hashable , _BridgedToObjectiveCType { <nl> + struct BridgedToObjC : Hashable , _ObjectiveCBridgeable { <nl> + static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> static func _getObjectiveCType ( ) - > Any . Type { <nl> return ObjC . self <nl> } <nl> func _bridgeToObjectiveC ( ) - > ObjC { <nl> return ObjC ( ) <nl> } <nl> - static func _bridgeFromObjectiveC ( x : ObjC ) - > BridgedToObjC { <nl> + static func _forceBridgeFromObjectiveC ( x : ObjC ) - > BridgedToObjC { <nl> + return BridgedToObjC ( ) <nl> + } <nl> + static func _conditionallyBridgeFromObjectiveC ( x : ObjC ) - > BridgedToObjC ? { <nl> return BridgedToObjC ( ) <nl> } <nl> <nl> mmm a / test / stdlib / ArrayBridge . swift <nl> ppp b / test / stdlib / ArrayBridge . swift <nl> class BridgedObjC : Base , Printable , Barable { <nl> var bridgeFromOperationCount = 0 <nl> var bridgeToOperationCount = 0 <nl> <nl> - struct BridgedSwift : Printable , _ConditionallyBridgedToObjectiveCType { <nl> + struct BridgedSwift : Printable , _ObjectiveCBridgeable { <nl> static func _getObjectiveCType ( ) - > Any . Type { <nl> return BridgedObjC . self <nl> } <nl> struct BridgedSwift : Printable , _ConditionallyBridgedToObjectiveCType { <nl> return true <nl> } <nl> <nl> - static func _bridgeFromObjectiveC ( x : BridgedObjC ) - > BridgedSwift { <nl> + static func _forceBridgeFromObjectiveC ( x : BridgedObjC ) - > BridgedSwift { <nl> assert ( x . value > = 0 , " not bridged " ) <nl> + + bridgeFromOperationCount <nl> return BridgedSwift ( x . value ) <nl> } <nl> <nl> - static func _bridgeFromObjectiveCConditional ( x : BridgedObjC ) - > BridgedSwift ? { <nl> + static func _conditionallyBridgeFromObjectiveC ( x : BridgedObjC ) - > BridgedSwift ? { <nl> return x . value > = 0 ? BridgedSwift ( x . value ) : nil <nl> } <nl> <nl> func testBridgedVerbatim ( ) { <nl> testBridgedVerbatim ( ) <nl> <nl> / / = = = mmm Explicitly Bridged mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - = = = / / <nl> - / / BridgedSwift conforms to _BridgedToObjectiveCType <nl> + / / BridgedSwift conforms to _ObjectiveCBridgeable <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> func testExplicitlyBridged ( ) { <nl> / / CHECK - LABEL : testExplicitlyBridged ( ) <nl> func testExplicitlyBridged ( ) { <nl> <nl> / / Make sure we can bridge back . <nl> let roundTripBridgedSwifts <nl> - = [ BridgedSwift ] . _bridgeFromObjectiveC ( bridgedSwiftsAsNSArray ) <nl> + = [ BridgedSwift ] . _forceBridgeFromObjectiveC ( bridgedSwiftsAsNSArray ) <nl> / / CHECK - NEXT - NOT : [ BridgedSwift # [ [ id00 ] ] ( 42 ) , BridgedSwift # [ [ id01 ] ] ( 17 ) ] <nl> / / CHECK - NEXT : [ BridgedSwift # [ [ id10 : [ 0 - 9 ] + ] ] ( 42 ) , BridgedSwift # [ [ id11 : [ 0 - 9 ] + ] ] ( 17 ) ] <nl> println ( " roundTripBridgedSwifts = \ ( roundTripBridgedSwifts ) ) " ) <nl> func testExplicitlyBridged ( ) { <nl> <nl> / / . . . and bridge * that * back <nl> let bridgedBackSwifts <nl> - = [ BridgedSwift ] . _bridgeFromObjectiveC ( cocoaBridgedSwifts ) <nl> + = [ BridgedSwift ] . _forceBridgeFromObjectiveC ( cocoaBridgedSwifts ) <nl> / / CHECK - NEXT - NOT : [ BridgedSwift # [ [ id00 ] ] ( 42 ) , BridgedSwift # [ [ id01 ] ] ( 17 ) ] <nl> / / CHECK - NEXT - NOT : [ BridgedSwift # [ [ id10 ] ] ( 42 ) , BridgedSwift # [ [ id11 ] ] ( 17 ) ] <nl> / / CHECK - NEXT : [ BridgedSwift # { { [ 0 - 9 ] + } } ( 42 ) , BridgedSwift # { { [ 0 - 9 ] + } } ( 17 ) ] <nl> mmm a / test / stdlib / BridgeNonVerbatim . swift <nl> ppp b / test / stdlib / BridgeNonVerbatim . swift <nl> func = = ( x : Tracked , y : Tracked ) - > Bool { <nl> return x . value = = y . value <nl> } <nl> <nl> - struct X : _BridgedToObjectiveCType { <nl> + struct X : _ObjectiveCBridgeable { <nl> + static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> init ( _ value : Int ) { <nl> self . value = value <nl> } <nl> struct X : _BridgedToObjectiveCType { <nl> return Tracked ( value ) <nl> } <nl> <nl> - static func _bridgeFromObjectiveC ( x : Tracked ) - > X { <nl> + static func _forceBridgeFromObjectiveC ( x : Tracked ) - > X { <nl> + return X ( x . value ) <nl> + } <nl> + <nl> + static func _conditionallyBridgeFromObjectiveC ( x : Tracked ) - > X ? { <nl> return X ( x . value ) <nl> } <nl> <nl> mmm a / test / stdlib / Bridgeable . swift <nl> ppp b / test / stdlib / Bridgeable . swift <nl> func testBridging < T > ( x : T , name : String ) { <nl> } <nl> <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> - struct BridgedValueType : _BridgedToObjectiveCType { <nl> + struct BridgedValueType : _ObjectiveCBridgeable { <nl> + static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> static func _getObjectiveCType ( ) - > Any . Type { <nl> return C . self <nl> } <nl> func _bridgeToObjectiveC ( ) - > C { <nl> return C ( ) <nl> } <nl> - static func _bridgeFromObjectiveC ( x : C ) - > BridgedValueType { <nl> + static func _forceBridgeFromObjectiveC ( x : C ) - > BridgedValueType { <nl> + _preconditionFailure ( " implement " ) <nl> + } <nl> + static func _conditionallyBridgeFromObjectiveC ( x : C ) - > BridgedValueType ? { <nl> _preconditionFailure ( " implement " ) <nl> } <nl> } <nl> class PlainClass { } <nl> testBridging ( PlainClass ( ) , " PlainClass " ) <nl> <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> - struct ConditionallyBridged < T > <nl> - : _BridgedToObjectiveCType , _ConditionallyBridgedToObjectiveCType { <nl> + struct ConditionallyBridged < T > : _ObjectiveCBridgeable { <nl> static func _getObjectiveCType ( ) - > Any . Type { <nl> return C . self <nl> } <nl> func _bridgeToObjectiveC ( ) - > C { <nl> return C ( ) <nl> } <nl> - static func _bridgeFromObjectiveC ( x : C ) - > ConditionallyBridged < T > { <nl> + static func _forceBridgeFromObjectiveC ( x : C ) - > ConditionallyBridged < T > { <nl> _preconditionFailure ( " implement " ) <nl> } <nl> - static func _bridgeFromObjectiveCConditional ( x : C ) <nl> + static func _conditionallyBridgeFromObjectiveC ( x : C ) <nl> - > ConditionallyBridged < T > ? { <nl> _preconditionFailure ( " implement " ) <nl> } <nl> mmm a / test / stdlib / Dictionary . swift <nl> ppp b / test / stdlib / Dictionary . swift <nl> var bridgedKeySerial = 0 <nl> var _bridgedKeyBridgeOperations = 0 <nl> <nl> struct TestBridgedKeyTy <nl> - : Equatable , Hashable , Printable , _BridgedToObjectiveCType { <nl> + : Equatable , Hashable , Printable , _ObjectiveCBridgeable { <nl> + static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> static var bridgeOperations : Int { <nl> get { <nl> return _bridgedKeyBridgeOperations <nl> struct TestBridgedKeyTy <nl> return TestObjCKeyTy ( value ) <nl> } <nl> <nl> - static func _bridgeFromObjectiveC ( x : TestObjCKeyTy ) - > TestBridgedKeyTy { <nl> + static func _forceBridgeFromObjectiveC ( x : TestObjCKeyTy ) - > TestBridgedKeyTy { <nl> TestBridgedKeyTy . bridgeOperations + + <nl> return TestBridgedKeyTy ( x . value ) <nl> } <nl> + <nl> + static func _conditionallyBridgeFromObjectiveC ( <nl> + x : TestObjCKeyTy <nl> + ) - > TestBridgedKeyTy ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> <nl> var value : Int <nl> var _hashValue : Int <nl> var bridgedValueCount = 0 <nl> var bridgedValueSerial = 0 <nl> var _bridgedValueBridgeOperations = 0 <nl> <nl> - struct TestBridgedValueTy : Printable , _BridgedToObjectiveCType { <nl> + struct TestBridgedValueTy : Printable , _ObjectiveCBridgeable { <nl> + static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> static var bridgeOperations : Int { <nl> get { <nl> return _bridgedValueBridgeOperations <nl> struct TestBridgedValueTy : Printable , _BridgedToObjectiveCType { <nl> return TestObjCValueTy ( value ) <nl> } <nl> <nl> - static func _bridgeFromObjectiveC ( x : TestObjCValueTy ) - > TestBridgedValueTy { <nl> + static func _forceBridgeFromObjectiveC ( <nl> + x : TestObjCValueTy <nl> + ) - > TestBridgedValueTy { <nl> TestBridgedValueTy . bridgeOperations + + <nl> return TestBridgedValueTy ( x . value ) <nl> } <nl> <nl> + static func _conditionallyBridgeFromObjectiveC ( <nl> + x : TestObjCValueTy <nl> + ) - > TestBridgedValueTy ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> + <nl> var value : Int <nl> var serial : Int <nl> } <nl> <nl> struct TestBridgedEquatableValueTy <nl> - : Equatable , Printable , _BridgedToObjectiveCType { <nl> + : Equatable , Printable , _ObjectiveCBridgeable { <nl> + <nl> + static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> init ( _ value : Int ) { <nl> + + bridgedValueCount <nl> serial = + + bridgedValueSerial <nl> struct TestBridgedEquatableValueTy <nl> return TestObjCEquatableValueTy ( value ) <nl> } <nl> <nl> - static func _bridgeFromObjectiveC ( x : TestObjCEquatableValueTy ) - > TestBridgedEquatableValueTy { <nl> + static func _forceBridgeFromObjectiveC ( <nl> + x : TestObjCEquatableValueTy <nl> + ) - > TestBridgedEquatableValueTy { <nl> return TestBridgedEquatableValueTy ( x . value ) <nl> } <nl> <nl> + static func _conditionallyBridgeFromObjectiveC ( <nl> + x : TestObjCEquatableValueTy <nl> + ) - > TestBridgedEquatableValueTy ? { <nl> + return self . _forceBridgeFromObjectiveC ( x ) <nl> + } <nl> + <nl> var value : Int <nl> var serial : Int <nl> } <nl> func getBridgedVerbatimDictionaryAndNSMutableDictionary ( ) <nl> <nl> func getBridgedNonverbatimDictionary ( ) - > Dictionary < TestBridgedKeyTy , TestBridgedValueTy > { <nl> var nsd = getAsNSDictionary ( [ 10 : 1010 , 20 : 1020 , 30 : 1030 ] ) <nl> - return Dictionary . _bridgeFromObjectiveC ( nsd ) <nl> + return Dictionary . _forceBridgeFromObjectiveC ( nsd ) <nl> } <nl> <nl> func getBridgedNonverbatimDictionary ( d : Dictionary < Int , Int > ) - > Dictionary < TestBridgedKeyTy , TestBridgedValueTy > { <nl> var nsd = getAsNSDictionary ( d ) <nl> - return Dictionary . _bridgeFromObjectiveC ( nsd ) <nl> + return Dictionary . _forceBridgeFromObjectiveC ( nsd ) <nl> } <nl> <nl> func getBridgedNonverbatimDictionaryAndNSMutableDictionary ( ) <nl> - > ( Dictionary < TestBridgedKeyTy , TestBridgedValueTy > , NSMutableDictionary ) { <nl> var nsd = getAsNSMutableDictionary ( [ 10 : 1010 , 20 : 1020 , 30 : 1030 ] ) <nl> - return ( Dictionary . _bridgeFromObjectiveC ( nsd ) , nsd ) <nl> + return ( Dictionary . _forceBridgeFromObjectiveC ( nsd ) , nsd ) <nl> } <nl> <nl> func getBridgedVerbatimEquatableDictionary ( d : Dictionary < Int , Int > ) - > Dictionary < NSObject , TestObjCEquatableValueTy > { <nl> func getBridgedVerbatimEquatableDictionary ( d : Dictionary < Int , Int > ) - > Dictionar <nl> <nl> func getBridgedNonverbatimEquatableDictionary ( d : Dictionary < Int , Int > ) - > Dictionary < TestBridgedKeyTy , TestBridgedEquatableValueTy > { <nl> var nsd = getAsEquatableNSDictionary ( d ) <nl> - return Dictionary . _bridgeFromObjectiveC ( nsd ) <nl> + return Dictionary . _forceBridgeFromObjectiveC ( nsd ) <nl> } <nl> <nl> func getHugeBridgedVerbatimDictionaryHelper ( ) - > NSDictionary { <nl> func getHugeBridgedVerbatimDictionary ( ) - > Dictionary < NSObject , AnyObject > { <nl> <nl> func getHugeBridgedNonverbatimDictionary ( ) - > Dictionary < TestBridgedKeyTy , TestBridgedValueTy > { <nl> var nsd = getHugeBridgedVerbatimDictionaryHelper ( ) <nl> - return Dictionary . _bridgeFromObjectiveC ( nsd ) <nl> + return Dictionary . _forceBridgeFromObjectiveC ( nsd ) <nl> } <nl> <nl> / / / A mock dictionary that stores its keys and values in parallel arrays , which <nl> func getParallelArrayBridgedVerbatimDictionary ( ) - > Dictionary < NSObject , AnyObje <nl> <nl> func getParallelArrayBridgedNonverbatimDictionary ( ) - > Dictionary < TestBridgedKeyTy , TestBridgedValueTy > { <nl> var nsd : NSDictionary = ParallelArrayDictionary ( ) <nl> - return Dictionary . _bridgeFromObjectiveC ( nsd ) <nl> + return Dictionary . _forceBridgeFromObjectiveC ( nsd ) <nl> } <nl> <nl> func test_BridgedFromObjC_Verbatim_DictionaryIsCopied ( ) { <nl> mmm a / test / stdlib / DictionaryTraps . swift <nl> ppp b / test / stdlib / DictionaryTraps . swift <nl> class TestObjCKeyTy : NSObject { <nl> var value : Int <nl> } <nl> <nl> - struct TestBridgedKeyTy : Hashable , _BridgedToObjectiveCType { <nl> + struct TestBridgedKeyTy : Hashable , _ObjectiveCBridgeable { <nl> + static func _isBridgedToObjectiveC ( ) - > Bool { <nl> + return true <nl> + } <nl> + <nl> init ( _ value : Int ) { self . value = value } <nl> <nl> var hashValue : Int { return value } <nl> struct TestBridgedKeyTy : Hashable , _BridgedToObjectiveCType { <nl> return TestObjCKeyTy ( value ) <nl> } <nl> <nl> - static func _bridgeFromObjectiveC ( x : TestObjCKeyTy ) - > TestBridgedKeyTy { <nl> + static func _forceBridgeFromObjectiveC ( x : TestObjCKeyTy ) - > TestBridgedKeyTy { <nl> + return TestBridgedKeyTy ( x . value ) <nl> + } <nl> + <nl> + static func _conditionallyBridgeFromObjectiveC ( <nl> + x : TestObjCKeyTy <nl> + ) - > TestBridgedKeyTy ? { <nl> return TestBridgedKeyTy ( x . value ) <nl> } <nl> <nl> mmm a / test / stdlib / NSValueBridging . swift <nl> ppp b / test / stdlib / NSValueBridging . swift <nl> var nsValueBridging = TestCase ( " NSValueBridging " ) <nl> <nl> nsValueBridging . test ( " NSRange " ) { <nl> let nsValue = _bridgeToObjectiveC ( NSRange ( location : 17 , length : 19 ) ) as NSValue <nl> - let swiftValue : NSRange = _bridgeFromObjectiveC ( nsValue , NSRange . self ) <nl> + let swiftValue : NSRange = _forceBridgeFromObjectiveC ( nsValue , NSRange . self ) <nl> expectEqual ( 17 , swiftValue . location ) <nl> expectEqual ( 19 , swiftValue . length ) <nl> } <nl> mmm a / test / stdlib / Runtime . swift <nl> ppp b / test / stdlib / Runtime . swift <nl> struct NotBridgedValueType { <nl> var a : ClassA = ClassA ( value : 4242 ) <nl> } <nl> <nl> - struct BridgedValueType : _ConditionallyBridgedToObjectiveCType { <nl> + struct BridgedValueType : _ObjectiveCBridgeable { <nl> static func _getObjectiveCType ( ) - > Any . Type { <nl> return ClassA . self <nl> } <nl> struct BridgedValueType : _ConditionallyBridgedToObjectiveCType { <nl> return true <nl> } <nl> <nl> - static func _bridgeFromObjectiveC ( x : ClassA ) - > BridgedValueType { <nl> + static func _forceBridgeFromObjectiveC ( x : ClassA ) - > BridgedValueType { <nl> assert ( x . value % 2 = = 0 , " not bridged to Objective - C " ) <nl> return BridgedValueType ( value : x . value ) <nl> } <nl> <nl> - static func _bridgeFromObjectiveCConditional ( x : ClassA ) - > BridgedValueType ? { <nl> + static func _conditionallyBridgeFromObjectiveC ( <nl> + x : ClassA <nl> + ) - > BridgedValueType ? { <nl> if x . value % 2 = = 0 { <nl> return BridgedValueType ( value : x . value ) <nl> } <nl> struct BridgedValueType : _ConditionallyBridgedToObjectiveCType { <nl> var value : Int <nl> } <nl> <nl> - struct BridgedLargeValueType : _ConditionallyBridgedToObjectiveCType { <nl> + struct BridgedLargeValueType : _ObjectiveCBridgeable { <nl> init ( value : Int ) { <nl> value0 = value <nl> value1 = value <nl> struct BridgedLargeValueType : _ConditionallyBridgedToObjectiveCType { <nl> return true <nl> } <nl> <nl> - static func _bridgeFromObjectiveC ( x : ClassA ) - > BridgedLargeValueType { <nl> + static func _forceBridgeFromObjectiveC ( x : ClassA ) - > BridgedLargeValueType { <nl> assert ( x . value % 2 = = 0 , " not bridged to Objective - C " ) <nl> return BridgedLargeValueType ( value : x . value ) <nl> } <nl> <nl> - static func _bridgeFromObjectiveCConditional ( x : ClassA ) - > BridgedLargeValueType ? { <nl> + static func _conditionallyBridgeFromObjectiveC ( x : ClassA ) - > BridgedLargeValueType ? { <nl> if x . value % 2 = = 0 { <nl> return BridgedLargeValueType ( value : x . value ) <nl> } <nl> struct BridgedLargeValueType : _ConditionallyBridgedToObjectiveCType { <nl> } <nl> <nl> <nl> - struct ConditionallyBridgedValueType < T > <nl> - : _ConditionallyBridgedToObjectiveCType { <nl> + struct ConditionallyBridgedValueType < T > : _ObjectiveCBridgeable { <nl> static func _getObjectiveCType ( ) - > Any . Type { <nl> return ClassA . self <nl> } <nl> struct ConditionallyBridgedValueType < T > <nl> return ClassA ( value : value ) <nl> } <nl> <nl> - static func _bridgeFromObjectiveC ( x : ClassA ) - > ConditionallyBridgedValueType { <nl> + static func _forceBridgeFromObjectiveC ( <nl> + x : ClassA <nl> + ) - > ConditionallyBridgedValueType { <nl> assert ( x . value % 2 = = 0 , " not bridged from Objective - C " ) <nl> return ConditionallyBridgedValueType ( value : x . value ) <nl> } <nl> <nl> - static func _bridgeFromObjectiveCConditional ( x : ClassA ) <nl> + static func _conditionallyBridgeFromObjectiveC ( x : ClassA ) <nl> - > ConditionallyBridgedValueType ? { <nl> if x . value % 2 = = 0 { <nl> return ConditionallyBridgedValueType ( value : x . value ) <nl> Runtime . test ( " bridgeToObjectiveC " ) { <nl> expectTrue ( _bridgeToObjectiveC ( bridgedVerbatimRef ) = = = bridgedVerbatimRef ) <nl> } <nl> <nl> - Runtime . test ( " bridgeFromObjectiveC " ) { <nl> + Runtime . test ( " forceBridgeFromObjectiveC " ) { <nl> / / Bridge back using NotBridgedValueType . <nl> - expectEmpty ( _bridgeFromObjectiveCConditional ( <nl> + expectEmpty ( _conditionallyBridgeFromObjectiveC ( <nl> ClassA ( value : 21 ) , NotBridgedValueType . self ) ) <nl> <nl> - expectEmpty ( _bridgeFromObjectiveCConditional ( <nl> + expectEmpty ( _conditionallyBridgeFromObjectiveC ( <nl> ClassA ( value : 42 ) , NotBridgedValueType . self ) ) <nl> <nl> - expectEmpty ( _bridgeFromObjectiveCConditional ( <nl> + expectEmpty ( _conditionallyBridgeFromObjectiveC ( <nl> BridgedVerbatimRefType ( ) , NotBridgedValueType . self ) ) <nl> <nl> / / Bridge back using BridgedValueType . <nl> - expectEmpty ( _bridgeFromObjectiveCConditional ( <nl> + expectEmpty ( _conditionallyBridgeFromObjectiveC ( <nl> ClassA ( value : 21 ) , BridgedValueType . self ) ) <nl> <nl> - expectEqual ( 42 , _bridgeFromObjectiveC ( <nl> + expectEqual ( 42 , _forceBridgeFromObjectiveC ( <nl> ClassA ( value : 42 ) , BridgedValueType . self ) . value ) <nl> - expectEqual ( 42 , _bridgeFromObjectiveCConditional ( <nl> + expectEqual ( 42 , _conditionallyBridgeFromObjectiveC ( <nl> ClassA ( value : 42 ) , BridgedValueType . self ) ! . value ) <nl> <nl> - expectEmpty ( _bridgeFromObjectiveCConditional ( <nl> + expectEmpty ( _conditionallyBridgeFromObjectiveC ( <nl> BridgedVerbatimRefType ( ) , BridgedValueType . self ) ) <nl> <nl> / / Bridge back using BridgedLargeValueType . <nl> - expectEmpty ( _bridgeFromObjectiveCConditional ( <nl> + expectEmpty ( _conditionallyBridgeFromObjectiveC ( <nl> ClassA ( value : 21 ) , BridgedLargeValueType . self ) ) <nl> <nl> - expectEqual ( 42 , _bridgeFromObjectiveC ( <nl> + expectEqual ( 42 , _forceBridgeFromObjectiveC ( <nl> ClassA ( value : 42 ) , BridgedLargeValueType . self ) . value ) <nl> - expectEqual ( 42 , _bridgeFromObjectiveCConditional ( <nl> + expectEqual ( 42 , _conditionallyBridgeFromObjectiveC ( <nl> ClassA ( value : 42 ) , BridgedLargeValueType . self ) ! . value ) <nl> <nl> - expectEmpty ( _bridgeFromObjectiveCConditional ( <nl> + expectEmpty ( _conditionallyBridgeFromObjectiveC ( <nl> BridgedVerbatimRefType ( ) , BridgedLargeValueType . self ) ) <nl> <nl> / / Bridge back using BridgedVerbatimRefType . <nl> - expectEmpty ( _bridgeFromObjectiveCConditional ( <nl> + expectEmpty ( _conditionallyBridgeFromObjectiveC ( <nl> ClassA ( value : 21 ) , BridgedVerbatimRefType . self ) ) <nl> <nl> - expectEmpty ( _bridgeFromObjectiveCConditional ( <nl> + expectEmpty ( _conditionallyBridgeFromObjectiveC ( <nl> ClassA ( value : 42 ) , BridgedVerbatimRefType . self ) ) <nl> <nl> var bridgedVerbatimRef = BridgedVerbatimRefType ( ) <nl> - expectTrue ( _bridgeFromObjectiveC ( <nl> + expectTrue ( _forceBridgeFromObjectiveC ( <nl> bridgedVerbatimRef , BridgedVerbatimRefType . self ) = = = bridgedVerbatimRef ) <nl> - expectTrue ( _bridgeFromObjectiveCConditional ( <nl> + expectTrue ( _conditionallyBridgeFromObjectiveC ( <nl> bridgedVerbatimRef , BridgedVerbatimRefType . self ) ! = = = bridgedVerbatimRef ) <nl> } <nl> <nl>
[ stdlib ] Consolidate bridging protocols
apple/swift
9a13a7148bb7f9dbce48c954e0f07f0740b19c77
2014-07-29T01:30:27Z