diff
stringlengths
41
2.03M
msg
stringlengths
1
1.5k
repo
stringlengths
5
40
sha
stringlengths
40
40
time
stringlengths
20
20
mmm a / xbmc / GUIInfoManager . cpp <nl> ppp b / xbmc / GUIInfoManager . cpp <nl> std : : string CGUIInfoManager : : GetMultiInfoLabel ( const GUIInfo & info , int contextW <nl> } <nl> else if ( info . m_info = = CONTAINER_CONTENT ) <nl> { <nl> - CGUIWindow * window = GetWindowWithCondition ( contextWindow , WINDOW_CONDITION_IS_MEDIA_WINDOW ) ; ; <nl> + CGUIWindow * window = GetWindowWithCondition ( contextWindow , WINDOW_CONDITION_IS_MEDIA_WINDOW ) ; <nl> if ( window ) <nl> return static_cast < CGUIMediaWindow * > ( window ) - > CurrentDirectory ( ) . GetContent ( ) ; <nl> } <nl> mmm a / xbmc / addons / interfaces / Filesystem . cpp <nl> ppp b / xbmc / addons / interfaces / Filesystem . cpp <nl> char * Interface_Filesystem : : make_legal_filename ( void * kodiBase , const char * file <nl> return nullptr ; <nl> } <nl> <nl> - std : : string string = CUtil : : MakeLegalFileName ( filename ) ; ; <nl> + std : : string string = CUtil : : MakeLegalFileName ( filename ) ; <nl> char * buffer = strdup ( string . c_str ( ) ) ; <nl> return buffer ; <nl> } <nl> char * Interface_Filesystem : : make_legal_path ( void * kodiBase , const char * path ) <nl> return nullptr ; <nl> } <nl> <nl> - std : : string string = CUtil : : MakeLegalPath ( path ) ; ; <nl> + std : : string string = CUtil : : MakeLegalPath ( path ) ; <nl> char * buffer = strdup ( string . c_str ( ) ) ; <nl> return buffer ; <nl> } <nl> mmm a / xbmc / guilib / GUISliderControl . cpp <nl> ppp b / xbmc / guilib / GUISliderControl . cpp <nl> void CGUISliderControl : : Process ( unsigned int currentTime , CDirtyRegionList & dirt <nl> if ( m_orientation = = HORIZONTAL ) <nl> fScale = m_height = = 0 ? 1 . 0f : m_height / m_guiBackground . GetTextureHeight ( ) ; <nl> else <nl> - fScale = m_width = = 0 ? 1 . 0f : m_width / nibUpper . GetTextureWidth ( ) ; ; <nl> + fScale = m_width = = 0 ? 1 . 0f : m_width / nibUpper . GetTextureWidth ( ) ; <nl> dirty | = ProcessSelector ( nibUpper , currentTime , fScale , RangeSelectorUpper ) ; <nl> } <nl> <nl> mmm a / xbmc / guilib / GUIWindow . cpp <nl> ppp b / xbmc / guilib / GUIWindow . cpp <nl> bool CGUIWindow : : Load ( TiXmlElement * pRootElement ) <nl> } <nl> else if ( strValue = = " depth " & & pChild - > FirstChild ( ) ) <nl> { <nl> - float stereo = static_cast < float > ( atof ( pChild - > FirstChild ( ) - > Value ( ) ) ) ; ; <nl> + float stereo = static_cast < float > ( atof ( pChild - > FirstChild ( ) - > Value ( ) ) ) ; <nl> m_stereo = std : : max ( - 1 . f , std : : min ( 1 . f , stereo ) ) ; <nl> } <nl> else if ( strValue = = " controls " ) <nl> mmm a / xbmc / interfaces / legacy / WindowInterceptor . h <nl> ppp b / xbmc / interfaces / legacy / WindowInterceptor . h <nl> namespace XBMCAddon <nl> <nl> bool IsDialogRunning ( ) const override { XBMC_TRACE ; return checkedb ( IsDialogRunning ( ) ) ; } ; <nl> bool IsDialog ( ) const override { XBMC_TRACE ; return checkedb ( IsDialog ( ) ) ; } ; <nl> - bool IsMediaWindow ( ) const override { XBMC_TRACE ; return checkedb ( IsMediaWindow ( ) ) ; ; } ; <nl> + bool IsMediaWindow ( ) const override { XBMC_TRACE ; return checkedb ( IsMediaWindow ( ) ) ; } ; <nl> <nl> void SetRenderOrder ( int renderOrder ) override { XBMC_TRACE ; P : : m_renderOrder = renderOrder ; } <nl> <nl> mmm a / xbmc / windowing / osx / WinSystemOSX . mm <nl> ppp b / xbmc / windowing / osx / WinSystemOSX . mm <nl> static void DisplayReconfigured ( CGDirectDisplayID display , <nl> / / we don ' t have a window to get the current screen on <nl> / / in that case ResizeWindow is called at a later stage from SetFullScreen ( false ) <nl> / / and we can grab the correct display number here then <nl> - m_lastDisplayNr = GetDisplayIndex ( GetDisplayIDFromScreen ( [ window screen ] ) ) ; ; <nl> + m_lastDisplayNr = GetDisplayIndex ( GetDisplayIDFromScreen ( [ window screen ] ) ) ; <nl> } <nl> } <nl> <nl>
Merge pull request from hudokkow / bubble_bobble
xbmc/xbmc
8893f3529c7977972e431c646bb9e5fa6002a676
2017-07-01T06:40:42Z
mmm a / tensorflow / go / op / wrappers . go <nl> ppp b / tensorflow / go / op / wrappers . go <nl> func DepthwiseConv2dNativeBackpropFilterDataFormat ( value string ) DepthwiseConv2d <nl> / / element on that dimension . The dimension order is determined by the value of <nl> / / ` data_format ` , see above for details . Dilations in the batch and depth <nl> / / dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func DepthwiseConv2dNativeBackpropFilterDilations ( value [ ] int64 ) DepthwiseConv2dNativeBackpropFilterAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func SampleDistortedBoundingBoxV2Seed2 ( value int64 ) SampleDistortedBoundingBoxV2 <nl> / / <nl> / / value : The cropped area of the image must have an aspect ratio = <nl> / / width / height within this range . <nl> - / / If not specified , defaults to { f : 0 . 75 f : 1 . 33 } <nl> + / / If not specified , defaults to { f : 0 . 75 f : 1 . 33 } <nl> func SampleDistortedBoundingBoxV2AspectRatioRange ( value [ ] float32 ) SampleDistortedBoundingBoxV2Attr { <nl> return func ( m optionalAttr ) { <nl> m [ " aspect_ratio_range " ] = value <nl> func SampleDistortedBoundingBoxV2AspectRatioRange ( value [ ] float32 ) SampleDistort <nl> / / <nl> / / value : The cropped area of the image must contain a fraction of the <nl> / / supplied image within this range . <nl> - / / If not specified , defaults to { f : 0 . 05 f : 1 } <nl> + / / If not specified , defaults to { f : 0 . 05 f : 1 } <nl> func SampleDistortedBoundingBoxV2AreaRange ( value [ ] float32 ) SampleDistortedBoundingBoxV2Attr { <nl> return func ( m optionalAttr ) { <nl> m [ " area_range " ] = value <nl> func SampleDistortedBoundingBoxMinObjectCovered ( value float32 ) SampleDistortedBo <nl> / / <nl> / / value : The cropped area of the image must have an aspect ratio = <nl> / / width / height within this range . <nl> - / / If not specified , defaults to { f : 0 . 75 f : 1 . 33 } <nl> + / / If not specified , defaults to { f : 0 . 75 f : 1 . 33 } <nl> func SampleDistortedBoundingBoxAspectRatioRange ( value [ ] float32 ) SampleDistortedBoundingBoxAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " aspect_ratio_range " ] = value <nl> func SampleDistortedBoundingBoxAspectRatioRange ( value [ ] float32 ) SampleDistorted <nl> / / <nl> / / value : The cropped area of the image must contain a fraction of the <nl> / / supplied image within this range . <nl> - / / If not specified , defaults to { f : 0 . 05 f : 1 } <nl> + / / If not specified , defaults to { f : 0 . 05 f : 1 } <nl> func SampleDistortedBoundingBoxAreaRange ( value [ ] float32 ) SampleDistortedBoundingBoxAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " area_range " ] = value <nl> func ImageSummaryMaxImages ( value int64 ) ImageSummaryAttr { <nl> / / ImageSummaryBadColor sets the optional bad_color attribute to value . <nl> / / <nl> / / value : Color to use for pixels with non - finite values . <nl> - / / If not specified , defaults to { dtype : DT_UINT8 tensor_shape : { dim : { size : 4 } } int_val : 255 int_val : 0 int_val : 0 int_val : 255 } <nl> + / / If not specified , defaults to { dtype : DT_UINT8 tensor_shape : { dim : { size : 4 } } int_val : 255 int_val : 0 int_val : 0 int_val : 255 } <nl> func ImageSummaryBadColor ( value tf . Tensor ) ImageSummaryAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " bad_color " ] = value <nl> func Conv3DBackpropFilterV2DataFormat ( value string ) Conv3DBackpropFilterV2Attr { <nl> / / filter element on that dimension . The dimension order is determined by the <nl> / / value of ` data_format ` , see above for details . Dilations in the batch and <nl> / / depth dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> func Conv3DBackpropFilterV2Dilations ( value [ ] int64 ) Conv3DBackpropFilterV2Attr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func Conv2DBackpropInputDataFormat ( value string ) Conv2DBackpropInputAttr { <nl> / / element on that dimension . The dimension order is determined by the value of <nl> / / ` data_format ` , see above for details . Dilations in the batch and depth <nl> / / dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func Conv2DBackpropInputDilations ( value [ ] int64 ) Conv2DBackpropInputAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func Conv2DDataFormat ( value string ) Conv2DAttr { <nl> / / filter element on that dimension . The dimension order is determined by the <nl> / / value of ` data_format ` , see above for details . Dilations in the batch and <nl> / / depth dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func Conv2DDilations ( value [ ] int64 ) Conv2DAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutType ( value tf . DataTy <nl> / / QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeDilations sets the optional dilations attribute to value . <nl> / / <nl> / / value : List of dilation values . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeDilations ( value [ ] int64 ) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func QuantizedDepthwiseConv2DWithBiasAndReluOutType ( value tf . DataType ) Quantized <nl> / / QuantizedDepthwiseConv2DWithBiasAndReluDilations sets the optional dilations attribute to value . <nl> / / <nl> / / value : List of dilation values . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func QuantizedDepthwiseConv2DWithBiasAndReluDilations ( value [ ] int64 ) QuantizedDepthwiseConv2DWithBiasAndReluAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func QuantizedDepthwiseConv2DWithBiasOutType ( value tf . DataType ) QuantizedDepthwi <nl> / / QuantizedDepthwiseConv2DWithBiasDilations sets the optional dilations attribute to value . <nl> / / <nl> / / value : List of dilation values . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func QuantizedDepthwiseConv2DWithBiasDilations ( value [ ] int64 ) QuantizedDepthwiseConv2DWithBiasAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func QuantizedDepthwiseConv2DOutType ( value tf . DataType ) QuantizedDepthwiseConv2D <nl> / / QuantizedDepthwiseConv2DDilations sets the optional dilations attribute to value . <nl> / / <nl> / / value : List of dilation values . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func QuantizedDepthwiseConv2DDilations ( value [ ] int64 ) QuantizedDepthwiseConv2DAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func QuantizedConv2DPerChannelOutType ( value tf . DataType ) QuantizedConv2DPerChann <nl> / / QuantizedConv2DPerChannelDilations sets the optional dilations attribute to value . <nl> / / <nl> / / value : list of dilation values . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func QuantizedConv2DPerChannelDilations ( value [ ] int64 ) QuantizedConv2DPerChannelAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func Conv3DBackpropInputV2DataFormat ( value string ) Conv3DBackpropInputV2Attr { <nl> / / filter element on that dimension . The dimension order is determined by the <nl> / / value of ` data_format ` , see above for details . Dilations in the batch and <nl> / / depth dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> func Conv3DBackpropInputV2Dilations ( value [ ] int64 ) Conv3DBackpropInputV2Attr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func AvgPool3DGrad ( scope * Scope , orig_input_shape tf . Output , grad tf . Output , ksi <nl> type Conv3DBackpropFilterAttr func ( optionalAttr ) <nl> <nl> / / Conv3DBackpropFilterDilations sets the optional dilations attribute to value . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> func Conv3DBackpropFilterDilations ( value [ ] int64 ) Conv3DBackpropFilterAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func Conv3DDataFormat ( value string ) Conv3DAttr { <nl> / / filter element on that dimension . The dimension order is determined by the <nl> / / value of ` data_format ` , see above for details . Dilations in the batch and <nl> / / depth dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> func Conv3DDilations ( value [ ] int64 ) Conv3DAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func DepthwiseConv2dNativeBackpropInputDataFormat ( value string ) DepthwiseConv2dN <nl> / / element on that dimension . The dimension order is determined by the value of <nl> / / ` data_format ` , see above for details . Dilations in the batch and depth <nl> / / dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func DepthwiseConv2dNativeBackpropInputDilations ( value [ ] int64 ) DepthwiseConv2dNativeBackpropInputAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func DepthwiseConv2dNativeBackpropInput ( scope * Scope , input_sizes tf . Output , fil <nl> type Conv3DBackpropInputAttr func ( optionalAttr ) <nl> <nl> / / Conv3DBackpropInputDilations sets the optional dilations attribute to value . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> func Conv3DBackpropInputDilations ( value [ ] int64 ) Conv3DBackpropInputAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func DepthwiseConv2dNativeDataFormat ( value string ) DepthwiseConv2dNativeAttr { <nl> / / element on that dimension . The dimension order is determined by the value of <nl> / / ` data_format ` , see above for details . Dilations in the batch and depth <nl> / / dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func DepthwiseConv2dNativeDilations ( value [ ] int64 ) DepthwiseConv2dNativeAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func QuantizedConv2DOutType ( value tf . DataType ) QuantizedConv2DAttr { <nl> / / filter element on that dimension . The dimension order is determined by the <nl> / / value of ` data_format ` , see above for details . Dilations in the batch and <nl> / / depth dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func QuantizedConv2DDilations ( value [ ] int64 ) QuantizedConv2DAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func Conv2DBackpropFilterDataFormat ( value string ) Conv2DBackpropFilterAttr { <nl> / / element on that dimension . The dimension order is determined by the value of <nl> / / ` data_format ` , see above for details . Dilations in the batch and depth <nl> / / dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func Conv2DBackpropFilterDilations ( value [ ] int64 ) Conv2DBackpropFilterAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl>
Go : Update generated wrapper functions for TensorFlow ops .
tensorflow/tensorflow
326e06710105000c73d2c0730aac54094c932d57
2020-03-06T23:16:32Z
mmm a / AUTHORS <nl> ppp b / AUTHORS <nl> Developers : <nl> zhiqiangxu <nl> Fixed a logic error in ControlUtils : : RectUnion . <nl> Fixed an issue that there is an useless conversion in ScrollView : : onTouchBegan . <nl> + Deleted several lines of useless code in ScrollView : : deaccelerateScrolling . <nl> <nl> yinkaile ( 2youyouo2 ) <nl> Maintainer of Armature Bone Animation . <nl>
Update AUTHORS [ ci skip ]
cocos2d/cocos2d-x
147828ac872aab35d2ec6498f7c00654fb11150e
2014-02-20T12:21:42Z
mmm a / tests / cpp - tests / Classes / UITest / CocoStudioGUITest / UIRichTextTest / UIRichTextTest . cpp <nl> ppp b / tests / cpp - tests / Classes / UITest / CocoStudioGUITest / UIRichTextTest / UIRichTextTest . cpp <nl> bool UIRichTextTest : : init ( ) <nl> _richText - > ignoreContentAdaptWithSize ( false ) ; <nl> _richText - > setContentSize ( Size ( 100 , 100 ) ) ; <nl> <nl> - RichElementText * re1 = RichElementText : : create ( 1 , Color3B : : WHITE , 255 , str1 , " Marker Felt " , 10 ) ; <nl> + RichElementText * re1 = RichElementText : : create ( 1 , Color3B : : WHITE , 255 , str1 , " SimSun " , 10 ) ; <nl> RichElementText * re2 = RichElementText : : create ( 2 , Color3B : : YELLOW , 255 , " And this is yellow . " , " Helvetica " , 10 ) ; <nl> - RichElementText * re3 = RichElementText : : create ( 3 , Color3B : : GRAY , 255 , str2 , " Helvetica " , 10 ) ; <nl> + RichElementText * re3 = RichElementText : : create ( 3 , Color3B : : GRAY , 255 , str2 , " Yu Mincho " , 10 ) ; <nl> RichElementText * re4 = RichElementText : : create ( 4 , Color3B : : GREEN , 255 , " And green with TTF support . " , " fonts / Marker Felt . ttf " , 10 ) ; <nl> RichElementText * re5 = RichElementText : : create ( 5 , Color3B : : RED , 255 , " Last one is red " , " Helvetica " , 10 ) ; <nl> <nl>
Merge pull request from WenhaiLin / v3 - UIRichTextTest - fix
cocos2d/cocos2d-x
d59db143705ad9e8d3085a8bb97c1e5d49bc1abe
2015-06-21T01:55:38Z
mmm a / drivers / gles3 / rasterizer_scene_gles3 . cpp <nl> ppp b / drivers / gles3 / rasterizer_scene_gles3 . cpp <nl> void RasterizerSceneGLES3 : : render_scene ( const Transform & p_cam_transform , const <nl> if ( storage - > frame . current_rt - > buffers . active ) { <nl> current_fbo = storage - > frame . current_rt - > buffers . fbo ; <nl> } else { <nl> + if ( storage - > frame . current_rt - > effects . mip_maps [ 0 ] . sizes . size ( ) = = 0 ) { <nl> + ERR_PRINT_ONCE ( " Can ' t use canvas background mode in a render target configured without sampling " ) ; <nl> + return ; <nl> + } <nl> current_fbo = storage - > frame . current_rt - > effects . mip_maps [ 0 ] . sizes [ 0 ] . fbo ; <nl> } <nl> <nl>
throw error when user tries to use Canvas background without sample buffer
godotengine/godot
e65d2184b95ce22553dd6df729e79db3f75c30aa
2019-09-16T04:07:07Z
mmm a / release <nl> ppp b / release <nl> fi <nl> <nl> if [ [ $ TEST ! = ' yes ' ] ] <nl> then <nl> - gen_revision_author <nl> + # now done in external release scripts via - - version <nl> + # gen_revision_author <nl> else <nl> REVISION = 99999 <nl> fi <nl>
Try fix double version up during release
ClickHouse/ClickHouse
8607cac261d39fa501083a04d27dcd1217ff43b4
2017-03-16T16:46:05Z
mmm a / include / swift / AST / DiagnosticsParse . def <nl> ppp b / include / swift / AST / DiagnosticsParse . def <nl> ERROR ( layout_alignment_should_be_positive , none , <nl> ERROR ( expected_rparen_layout_constraint , none , <nl> " expected ' ) ' to complete layout constraint " , ( ) ) <nl> ERROR ( layout_constraints_only_inside_specialize_attr , none , <nl> - " layout constraints are only allowed inside @ _specialize attributes " , ( ) ) <nl> + " layout constraints are only allowed inside ' _specialize ' attributes " , ( ) ) <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> / / Pattern parsing diagnostics <nl> ERROR ( attr_escaping_conflicts_noescape , none , <nl> <nl> / / _specialize <nl> ERROR ( attr_specialize_missing_colon , none , <nl> - " missing ' : ' after % 0 in @ _specialize attribute " , ( StringRef ) ) <nl> + " missing ' : ' after % 0 in ' _specialize ' attribute " , ( StringRef ) ) <nl> <nl> ERROR ( attr_specialize_missing_comma , none , <nl> - " missing ' , ' in @ _specialize attribute " , ( ) ) <nl> + " missing ' , ' in ' _specialize ' attribute " , ( ) ) <nl> <nl> ERROR ( attr_specialize_unknown_parameter_name , none , <nl> - " unknown parameter % 0 in @ _specialize attribute " , ( StringRef ) ) <nl> + " unknown parameter % 0 in ' _specialize attribute ' " , ( StringRef ) ) <nl> <nl> ERROR ( attr_specialize_expected_bool_value , none , <nl> - " expected a boolean true or false value in @ _specialize attribute " , ( ) ) <nl> + " expected a boolean true or false value in ' _specialize ' attribute " , ( ) ) <nl> <nl> ERROR ( attr_specialize_missing_parameter_label_or_where_clause , none , <nl> - " expected a parameter label or a where clause in @ _specialize attribute " , ( ) ) <nl> + " expected a parameter label or a where clause in ' _specialize ' attribute " , ( ) ) <nl> <nl> ERROR ( attr_specialize_parameter_already_defined , none , <nl> - " parameter % 0 was already defined in @ _specialize attribute " , ( StringRef ) ) <nl> + " parameter ' % 0 ' was already defined in ' _specialize ' attribute " , ( StringRef ) ) <nl> <nl> ERROR ( attr_specialize_expected_partial_or_full , none , <nl> - " expected ' partial ' or ' full ' as values of the ' kind ' parameter in @ _specialize attribute " , ( ) ) <nl> + " expected ' partial ' or ' full ' as values of the ' kind ' parameter in ' _specialize ' attribute " , ( ) ) <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> / / Generics parsing diagnostics <nl> mmm a / include / swift / AST / DiagnosticsSema . def <nl> ppp b / include / swift / AST / DiagnosticsSema . def <nl> NOTE ( resilience_decl_declared_here , <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> <nl> ERROR ( specialize_attr_nongeneric_trailing_where , none , <nl> - " trailing ' where ' clause in @ _specialize attribute of non - generic function % 0 " , ( Identifier ) ) <nl> + " trailing ' where ' clause in ' _specialize ' attribute of non - generic function % 0 " , ( Identifier ) ) <nl> ERROR ( specialize_missing_where_clause , none , <nl> - " missing ' where ' clause in @ _specialize attribute " , ( ) ) <nl> + " missing ' where ' clause in ' _specialize ' attribute " , ( ) ) <nl> ERROR ( specialize_empty_where_clause , none , <nl> - " empty ' where ' clause in @ _specialize attribute " , ( ) ) <nl> + " empty ' where ' clause in ' _specialize ' attribute " , ( ) ) <nl> ERROR ( specialize_attr_non_concrete_same_type_req , none , <nl> - " Only concrete type same - type requirements are supported by @ _specialize attribute " , ( ) ) <nl> + " Only concrete type same - type requirements are supported by ' _specialize ' attribute " , ( ) ) <nl> ERROR ( specialize_attr_only_generic_param_req , none , <nl> - " Only requirements on generic parameters are supported by @ _specialize attribute " , ( ) ) <nl> + " Only requirements on generic parameters are supported by ' _specialize ' attribute " , ( ) ) <nl> ERROR ( specialize_attr_only_one_concrete_same_type_req , none , <nl> - " Only one concrete type should be used in the same - type requirement in @ _specialize attribute " , ( ) ) <nl> + " Only one concrete type should be used in the same - type requirement in ' _specialize ' attribute " , ( ) ) <nl> ERROR ( specialize_attr_non_nominal_type_constraint_req , none , <nl> - " Only conformances to nominal types are supported by @ _specialize attribute " , ( ) ) <nl> + " Only conformances to nominal types are supported by ' _specialize ' attribute " , ( ) ) <nl> ERROR ( specialize_attr_non_protocol_type_constraint_req , none , <nl> - " Only conformances to protocol types are supported by @ _specialize attribute " , ( ) ) <nl> + " Only conformances to protocol types are supported by ' _specialize ' attribute " , ( ) ) <nl> ERROR ( specialize_attr_type_parameter_count_mismatch , none , <nl> " % select { too many | too few } 2 type parameters are specified " <nl> - " in @ _specialize attribute ( got % 1 , but expected % 0 ) " , <nl> + " in ' _specialize ' attribute ( got % 1 , but expected % 0 ) " , <nl> ( unsigned , unsigned , bool ) ) <nl> ERROR ( specialize_attr_missing_constraint , none , <nl> - " Missing constraint for % 0 in @ _specialize attribute " , ( DeclName ) ) <nl> + " Missing constraint for % 0 in ' _specialize ' attribute " , ( DeclName ) ) <nl> ERROR ( specialize_attr_unsupported_kind_of_req , none , <nl> - " Only same - type and layout requirements are supported by @ _specialize attribute " , ( ) ) <nl> + " Only same - type and layout requirements are supported by ' _specialize ' attribute " , ( ) ) <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> / / Variable usage diagnostics <nl> mmm a / test / attr / attr_specialize . swift <nl> ppp b / test / attr / attr_specialize . swift <nl> public func oneGenericParam < T > ( _ t : T ) - > T { <nl> <nl> / / CHECK : @ _specialize ( exported : false , kind : full , where T = = Int , U = = Int ) <nl> @ _specialize ( where T = = Int , U = = Int ) <nl> - @ _specialize ( where T = = Int ) / / expected - error { { too few type parameters are specified in @ _specialize attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' U ' in @ _specialize attribute } } <nl> + @ _specialize ( where T = = Int ) / / expected - error { { too few type parameters are specified in ' _specialize ' attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' U ' in ' _specialize ' attribute } } <nl> public func twoGenericParams < T , U > ( _ t : T , u : U ) - > ( T , U ) { <nl> return ( t , u ) <nl> } <nl> <nl> - @ _specialize ( where T = = Int ) / / expected - error { { trailing ' where ' clause in @ _specialize attribute of non - generic function ' nonGenericParam ' } } <nl> + @ _specialize ( where T = = Int ) / / expected - error { { trailing ' where ' clause in ' _specialize ' attribute of non - generic function ' nonGenericParam ' } } <nl> func nonGenericParam ( x : Int ) { } <nl> <nl> / / Specialize contextual types . <nl> func nonGenericParam ( x : Int ) { } <nl> class G < T > { <nl> / / CHECK : @ _specialize ( exported : false , kind : full , where T = = Int ) <nl> @ _specialize ( where T = = Int ) <nl> - @ _specialize ( where T = = T ) / / expected - error { { Only concrete type same - type requirements are supported by @ _specialize attribute } } <nl> - @ _specialize ( where T = = S < T > ) / / expected - error { { Only concrete type same - type requirements are supported by @ _specialize attribute } } <nl> + @ _specialize ( where T = = T ) / / expected - error { { Only concrete type same - type requirements are supported by ' _specialize ' attribute } } <nl> + @ _specialize ( where T = = S < T > ) / / expected - error { { Only concrete type same - type requirements are supported by ' _specialize ' attribute } } <nl> @ _specialize ( where T = = Int , U = = Int ) / / expected - error { { use of undeclared type ' U ' } } <nl> func noGenericParams ( ) { } <nl> <nl> class G < T > { <nl> @ _specialize ( where T = = Int , U = = Float ) <nl> / / CHECK : @ _specialize ( exported : false , kind : full , where T = = Int , U = = S < Int > ) <nl> @ _specialize ( where T = = Int , U = = S < Int > ) <nl> - @ _specialize ( where T = = Int ) / / expected - error { { too few type parameters are specified in @ _specialize attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' U ' in @ _specialize attribute } } <nl> + @ _specialize ( where T = = Int ) / / expected - error { { too few type parameters are specified in ' _specialize ' attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' U ' in ' _specialize ' attribute } } <nl> func oneGenericParam < U > ( _ t : T , u : U ) - > ( U , T ) { <nl> return ( u , t ) <nl> } <nl> func sameTypeRequirement < T : HasElt > ( _ t : T ) where T . Element = = Float { } <nl> @ _specialize ( where T = = NonSub ) / / expected - error { { ' T ' requires that ' NonSub ' inherit from ' Base ' } } <nl> func superTypeRequirement < T : Base > ( _ t : T ) { } <nl> <nl> - @ _specialize ( where X : _Trivial ( 8 ) , Y = = Int ) / / expected - error { { trailing ' where ' clause in @ _specialize attribute of non - generic function ' requirementOnNonGenericFunction ' } } <nl> + @ _specialize ( where X : _Trivial ( 8 ) , Y = = Int ) / / expected - error { { trailing ' where ' clause in ' _specialize ' attribute of non - generic function ' requirementOnNonGenericFunction ' } } <nl> public func requirementOnNonGenericFunction ( x : Int , y : Int ) { <nl> } <nl> <nl> - @ _specialize ( where Y = = Int ) / / expected - error { { too few type parameters are specified in @ _specialize attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' X ' in @ _specialize attribute } } <nl> + @ _specialize ( where Y = = Int ) / / expected - error { { too few type parameters are specified in ' _specialize ' attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' X ' in ' _specialize ' attribute } } <nl> public func missingRequirement < X : P , Y > ( x : X , y : Y ) { <nl> } <nl> <nl> @ _specialize ( where ) / / expected - error { { expected identifier for type name } } <nl> - @ _specialize ( ) / / expected - error { { expected a parameter label or a where clause in @ _specialize attribute } } expected - error { { expected declaration } } <nl> + @ _specialize ( ) / / expected - error { { expected a parameter label or a where clause in ' _specialize ' attribute } } expected - error { { expected declaration } } <nl> public func funcWithEmptySpecializeAttr < X : P , Y > ( x : X , y : Y ) { <nl> } <nl> <nl> <nl> @ _specialize ( where X : _Trivial ( 8 ) , Y : _Trivial ( 32 ) , Z = = Int ) / / expected - error { { use of undeclared type ' Z ' } } <nl> @ _specialize ( where X : _Trivial ( 8 ) , Y : _Trivial ( 32 , 4 ) ) <nl> - @ _specialize ( where X = = Int ) / / expected - error { { too few type parameters are specified in @ _specialize attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' Y ' in @ _specialize attribute } } <nl> - @ _specialize ( where Y : _Trivial ( 32 ) ) / / expected - error { { too few type parameters are specified in @ _specialize attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' X ' in @ _specialize attribute } } <nl> - @ _specialize ( where Y : P ) / / expected - error { { Only same - type and layout requirements are supported by @ _specialize attribute } } expected - error { { too few type parameters are specified in @ _specialize attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' X ' in @ _specialize attribute } } <nl> - @ _specialize ( where Y : MyClass ) / / expected - error { { use of undeclared type ' MyClass ' } } expected - error { { too few type parameters are specified in @ _specialize attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' X ' in @ _specialize attribute } } <nl> + @ _specialize ( where X = = Int ) / / expected - error { { too few type parameters are specified in ' _specialize ' attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' Y ' in ' _specialize ' attribute } } <nl> + @ _specialize ( where Y : _Trivial ( 32 ) ) / / expected - error { { too few type parameters are specified in ' _specialize ' attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' X ' in ' _specialize ' attribute } } <nl> + @ _specialize ( where Y : P ) / / expected - error { { Only same - type and layout requirements are supported by ' _specialize ' attribute } } expected - error { { too few type parameters are specified in ' _specialize ' attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' X ' in ' _specialize ' attribute } } <nl> + @ _specialize ( where Y : MyClass ) / / expected - error { { use of undeclared type ' MyClass ' } } expected - error { { too few type parameters are specified in ' _specialize ' attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' X ' in ' _specialize ' attribute } } <nl> @ _specialize ( where X : _Trivial ( 8 ) , Y = = Int ) <nl> @ _specialize ( where X = = Int , Y = = Int ) <nl> - @ _specialize ( where X = = Int , X = = Int ) / / expected - error { { too few type parameters are specified in @ _specialize attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' Y ' in @ _specialize attribute } } <nl> + @ _specialize ( where X = = Int , X = = Int ) / / expected - error { { too few type parameters are specified in ' _specialize ' attribute ( got 1 , but expected 2 ) } } expected - error { { Missing constraint for ' Y ' in ' _specialize ' attribute } } <nl> @ _specialize ( where Y : _Trivial ( 32 ) , X = = Float ) <nl> - @ _specialize ( where X1 = = Int , Y1 = = Int ) / / expected - error { { use of undeclared type ' X1 ' } } expected - error { { use of undeclared type ' Y1 ' } } expected - error { { too few type parameters are specified in @ _specialize attribute ( got 0 , but expected 2 ) } } expected - error { { Missing constraint for ' X ' in @ _specialize attribute } } expected - error { { Missing constraint for ' Y ' in @ _specialize attribute } } <nl> + @ _specialize ( where X1 = = Int , Y1 = = Int ) / / expected - error { { use of undeclared type ' X1 ' } } expected - error { { use of undeclared type ' Y1 ' } } expected - error { { too few type parameters are specified in ' _specialize ' attribute ( got 0 , but expected 2 ) } } expected - error { { Missing constraint for ' X ' in ' _specialize ' attribute } } expected - error { { Missing constraint for ' Y ' in ' _specialize ' attribute } } <nl> public func funcWithTwoGenericParameters < X , Y > ( x : X , y : Y ) { <nl> } <nl> <nl> @ _specialize ( where X = = Int , Y = = Int ) <nl> @ _specialize ( exported : true , where X = = Int , Y = = Int ) <nl> @ _specialize ( exported : false , where X = = Int , Y = = Int ) <nl> - @ _specialize ( exported : false where X = = Int , Y = = Int ) / / expected - error { { missing ' , ' in @ _specialize attribute } } <nl> - @ _specialize ( exported : yes , where X = = Int , Y = = Int ) / / expected - error { { expected a boolean true or false value in @ _specialize attribute } } <nl> - @ _specialize ( exported : , where X = = Int , Y = = Int ) / / expected - error { { expected a boolean true or false value in @ _specialize attribute } } <nl> + @ _specialize ( exported : false where X = = Int , Y = = Int ) / / expected - error { { missing ' , ' in ' _specialize ' attribute } } <nl> + @ _specialize ( exported : yes , where X = = Int , Y = = Int ) / / expected - error { { expected a boolean true or false value in ' _specialize ' attribute } } <nl> + @ _specialize ( exported : , where X = = Int , Y = = Int ) / / expected - error { { expected a boolean true or false value in ' _specialize ' attribute } } <nl> <nl> @ _specialize ( kind : partial , where X = = Int , Y = = Int ) <nl> @ _specialize ( kind : partial , where X = = Int ) <nl> @ _specialize ( kind : full , where X = = Int , Y = = Int ) <nl> - @ _specialize ( kind : any , where X = = Int , Y = = Int ) / / expected - error { { expected ' partial ' or ' full ' as values of the ' kind ' parameter in @ _specialize attribute } } <nl> - @ _specialize ( kind : false , where X = = Int , Y = = Int ) / / expected - error { { expected ' partial ' or ' full ' as values of the ' kind ' parameter in @ _specialize attribute } } <nl> - @ _specialize ( kind : partial where X = = Int , Y = = Int ) / / expected - error { { missing ' , ' in @ _specialize attribute } } <nl> + @ _specialize ( kind : any , where X = = Int , Y = = Int ) / / expected - error { { expected ' partial ' or ' full ' as values of the ' kind ' parameter in ' _specialize ' attribute } } <nl> + @ _specialize ( kind : false , where X = = Int , Y = = Int ) / / expected - error { { expected ' partial ' or ' full ' as values of the ' kind ' parameter in ' _specialize ' attribute } } <nl> + @ _specialize ( kind : partial where X = = Int , Y = = Int ) / / expected - error { { missing ' , ' in ' _specialize ' attribute } } <nl> @ _specialize ( kind : partial , where X = = Int , Y = = Int ) <nl> @ _specialize ( kind : , where X = = Int , Y = = Int ) <nl> <nl> @ _specialize ( exported : true , kind : partial , where X = = Int , Y = = Int ) <nl> - @ _specialize ( exported : true , exported : true , where X = = Int , Y = = Int ) / / expected - error { { parameter exported was already defined in @ _specialize attribute } } <nl> + @ _specialize ( exported : true , exported : true , where X = = Int , Y = = Int ) / / expected - error { { parameter ' exported ' was already defined in ' _specialize ' attribute } } <nl> @ _specialize ( kind : partial , exported : true , where X = = Int , Y = = Int ) <nl> - @ _specialize ( kind : partial , kind : partial , where X = = Int , Y = = Int ) / / expected - error { { parameter kind was already defined in @ _specialize attribute } } <nl> + @ _specialize ( kind : partial , kind : partial , where X = = Int , Y = = Int ) / / expected - error { { parameter ' kind ' was already defined in ' _specialize ' attribute } } <nl> <nl> @ _specialize ( where X = = Int , Y = = Int , exported : true , kind : partial ) / / expected - error { { use of undeclared type ' exported ' } } expected - error { { use of undeclared type ' kind ' } } expected - error { { use of undeclared type ' partial ' } } expected - error { { expected identifier for type name } } <nl> public func anotherFuncWithTwoGenericParameters < X : P , Y > ( x : X , y : Y ) { <nl> } <nl> <nl> - @ _specialize ( where T : P ) / / expected - error { { Only same - type and layout requirements are supported by @ _specialize attribute } } <nl> - @ _specialize ( where T : Int ) / / expected - error { { Only conformances to protocol types are supported by @ _specialize attribute } } expected - error { { Only same - type and layout requirements are supported by @ _specialize attribute } } <nl> + @ _specialize ( where T : P ) / / expected - error { { Only same - type and layout requirements are supported by ' _specialize ' attribute } } <nl> + @ _specialize ( where T : Int ) / / expected - error { { Only conformances to protocol types are supported by ' _specialize ' attribute } } expected - error { { Only same - type and layout requirements are supported by ' _specialize ' attribute } } <nl> <nl> - @ _specialize ( where T : S1 ) / / expected - error { { Only conformances to protocol types are supported by @ _specialize attribute } } expected - error { { Only same - type and layout requirements are supported by @ _specialize attribute } } <nl> - @ _specialize ( where T : C1 ) / / expected - error { { Only conformances to protocol types are supported by @ _specialize attribute } } expected - error { { Only same - type and layout requirements are supported by @ _specialize attribute } } <nl> - @ _specialize ( where Int : P ) / / expected - error { { type ' Int ' in conformance requirement does not refer to a generic parameter or associated type } } expected - error { { Only same - type and layout requirements are supported by @ _specialize attribute } } expected - error { { too few type parameters are specified in @ _specialize attribute ( got 0 , but expected 1 ) } } expected - error { { Missing constraint for ' T ' in @ _specialize attribute } } <nl> + @ _specialize ( where T : S1 ) / / expected - error { { Only conformances to protocol types are supported by ' _specialize ' attribute } } expected - error { { Only same - type and layout requirements are supported by ' _specialize ' attribute } } <nl> + @ _specialize ( where T : C1 ) / / expected - error { { Only conformances to protocol types are supported by ' _specialize ' attribute } } expected - error { { Only same - type and layout requirements are supported by ' _specialize ' attribute } } <nl> + @ _specialize ( where Int : P ) / / expected - error { { type ' Int ' in conformance requirement does not refer to a generic parameter or associated type } } expected - error { { Only same - type and layout requirements are supported by ' _specialize ' attribute } } expected - error { { too few type parameters are specified in ' _specialize ' attribute ( got 0 , but expected 1 ) } } expected - error { { Missing constraint for ' T ' in ' _specialize ' attribute } } <nl> func funcWithForbiddenSpecializeRequirement < T > ( _ t : T ) { <nl> } <nl> <nl> @ _specialize ( where T : _Trivial ( 32 ) , T : _Trivial ( 64 ) , T : _Trivial , T : _RefCountedObject ) / / expected - error { { multiple layout constraints cannot be used at the same time : ' _Trivial ( 64 ) ' and ' _Trivial ( 32 ) ' } } expected - note { { previous layout constraint declaration ' _Trivial ( 32 ) ' was here } } expected - error { { multiple layout constraints cannot be used at the same time : ' _Trivial ' and ' _Trivial ( 32 ) ' } } expected - note { { previous layout constraint declaration ' _Trivial ( 32 ) ' was here } } expected - error { { multiple layout constraints cannot be used at the same time : ' _RefCountedObject ' and ' _Trivial ( 32 ) ' } } expected - note { { previous layout constraint declaration ' _Trivial ( 32 ) ' was here } } <nl> - @ _specialize ( where Array < T > = = Int ) / / expected - error { { neither type in same - type refers to a generic parameter or associated type } } expected - error { { Only requirements on generic parameters are supported by @ _specialize attribute } } <nl> - @ _specialize ( where T . Element = = Int ) / / expected - error { { Only requirements on generic parameters are supported by @ _specialize attribute } } <nl> + @ _specialize ( where Array < T > = = Int ) / / expected - error { { neither type in same - type refers to a generic parameter or associated type } } expected - error { { Only requirements on generic parameters are supported by ' _specialize ' attribute } } <nl> + @ _specialize ( where T . Element = = Int ) / / expected - error { { Only requirements on generic parameters are supported by ' _specialize ' attribute } } <nl> public func funcWithComplexSpecializeRequirements < T : ProtocolWithDep > ( t : T ) - > Int { <nl> return 55555 <nl> } <nl> public func copyValueAndReturn < S > ( _ t : S , s : inout S ) - > S where S : P { <nl> <nl> struct OuterStruct < S > { <nl> struct MyStruct < T > { <nl> - @ _specialize ( where T = = Int , U = = Float ) / / expected - error { { too few type parameters are specified in @ _specialize attribute ( got 2 , but expected 3 ) } } expected - error { { Missing constraint for ' S ' in @ _specialize attribute } } <nl> + @ _specialize ( where T = = Int , U = = Float ) / / expected - error { { too few type parameters are specified in ' _specialize ' attribute ( got 2 , but expected 3 ) } } expected - error { { Missing constraint for ' S ' in ' _specialize ' attribute } } <nl> public func foo < U > ( u : U ) { <nl> } <nl> <nl> public func copy3 < S > ( _ s : S ) - > S { <nl> return s <nl> } <nl> <nl> - public func funcWithWhereClause < T > ( t : T ) where T : P , T : _Trivial ( 64 ) { / / expected - error { { layout constraints are only allowed inside @ _specialize attributes } } <nl> + public func funcWithWhereClause < T > ( t : T ) where T : P , T : _Trivial ( 64 ) { / / expected - error { { layout constraints are only allowed inside ' _specialize ' attributes } } <nl> } <nl>
Make diagnostics for @ _specialize look similar to other attributes
apple/swift
88d6e5c43b94325413c4a2f24a4634fe7e5f0fc5
2017-01-19T00:43:42Z
mmm a / folly / io / async / AsyncSocket . cpp <nl> ppp b / folly / io / async / AsyncSocket . cpp <nl> <nl> # include < errno . h > <nl> # include < limits . h > <nl> # include < sys / types . h > <nl> + # include < sstream > <nl> # include < thread > <nl> <nl> + # if __linux__ <nl> + # include < linux / sockios . h > <nl> + # include < sys / ioctl . h > <nl> + # endif <nl> + <nl> # if FOLLY_HAVE_VLA <nl> # define FOLLY_HAVE_VLA_01 1 <nl> # else <nl> int AsyncSocket : : setRecvBufSize ( size_t bufsize ) { <nl> return 0 ; <nl> } <nl> <nl> + # if __linux__ <nl> + size_t AsyncSocket : : getSendBufInUse ( ) const { <nl> + if ( fd_ = = NetworkSocket ( ) ) { <nl> + std : : stringstream issueString ; <nl> + issueString < < " AsyncSocket : : getSendBufInUse ( ) called on non - open socket " <nl> + < < this < < " ( state = " < < state_ < < " ) " ; <nl> + VLOG ( 4 ) < < issueString . str ( ) ; <nl> + throw std : : logic_error ( issueString . str ( ) ) ; <nl> + } <nl> + <nl> + size_t returnValue = 0 ; <nl> + if ( - 1 = = : : ioctl ( fd_ . toFd ( ) , SIOCOUTQ , & returnValue ) ) { <nl> + int errnoCopy = errno ; <nl> + std : : stringstream issueString ; <nl> + issueString < < " Failed to get the tx used bytes on Socket : " < < this <nl> + < < " ( fd = " < < fd_ < < " , state = " < < state_ <nl> + < < " ) : " < < errnoStr ( errnoCopy ) ; <nl> + VLOG ( 2 ) < < issueString . str ( ) ; <nl> + throw std : : logic_error ( issueString . str ( ) ) ; <nl> + } <nl> + <nl> + return returnValue ; <nl> + } <nl> + <nl> + size_t AsyncSocket : : getRecvBufInUse ( ) const { <nl> + if ( fd_ = = NetworkSocket ( ) ) { <nl> + std : : stringstream issueString ; <nl> + issueString < < " AsyncSocket : : getRecvBufInUse ( ) called on non - open socket " <nl> + < < this < < " ( state = " < < state_ < < " ) " ; <nl> + VLOG ( 4 ) < < issueString . str ( ) ; <nl> + throw std : : logic_error ( issueString . str ( ) ) ; <nl> + } <nl> + <nl> + size_t returnValue = 0 ; <nl> + if ( - 1 = = : : ioctl ( fd_ . toFd ( ) , SIOCINQ , & returnValue ) ) { <nl> + std : : stringstream issueString ; <nl> + int errnoCopy = errno ; <nl> + issueString < < " Failed to get the rx used bytes on Socket : " < < this <nl> + < < " ( fd = " < < fd_ < < " , state = " < < state_ <nl> + < < " ) : " < < errnoStr ( errnoCopy ) ; <nl> + VLOG ( 2 ) < < issueString . str ( ) ; <nl> + throw std : : logic_error ( issueString . str ( ) ) ; <nl> + } <nl> + <nl> + return returnValue ; <nl> + } <nl> + # endif <nl> + <nl> int AsyncSocket : : setTCPProfile ( int profd ) { <nl> if ( fd_ = = NetworkSocket ( ) ) { <nl> VLOG ( 4 ) < < " AsyncSocket : : setTCPProfile ( ) called on non - open socket " < < this <nl> mmm a / folly / io / async / AsyncSocket . h <nl> ppp b / folly / io / async / AsyncSocket . h <nl> class AsyncSocket : virtual public AsyncTransportWrapper { <nl> * / <nl> int setRecvBufSize ( size_t bufsize ) ; <nl> <nl> + # if __linux__ <nl> + / * * <nl> + * @ brief This method is used to get the number of bytes that are currently <nl> + * stored in the TCP send / tx buffer <nl> + * <nl> + * @ return the number of bytes in the send / tx buffer or folly : : none if there <nl> + * was a problem <nl> + * / <nl> + size_t getSendBufInUse ( ) const ; <nl> + <nl> + / * * <nl> + * @ brief This method is used to get the number of bytes that are currently <nl> + * stored in the TCP receive / rx buffer <nl> + * <nl> + * @ return the number of bytes in the receive / rx buffer or folly : : none if <nl> + * there was a problem <nl> + * / <nl> + size_t getRecvBufInUse ( ) const ; <nl> + # endif <nl> + <nl> / * * <nl> * Sets a specific tcp personality <nl> * Available only on kernels 3 . 2 and greater <nl> mmm a / folly / io / async / test / AsyncSocketTest2 . cpp <nl> ppp b / folly / io / async / test / AsyncSocketTest2 . cpp <nl> TEST ( AsyncSocketTest , V4TosReflectTest ) { <nl> ASSERT_EQ ( value , 0x2c ) ; <nl> } <nl> # endif <nl> + <nl> + # if __linux__ <nl> + TEST ( AsyncSocketTest , getBufInUse ) { <nl> + EventBase eventBase ; <nl> + std : : shared_ptr < AsyncServerSocket > server ( <nl> + AsyncServerSocket : : newSocket ( & eventBase ) ) ; <nl> + server - > bind ( 0 ) ; <nl> + server - > listen ( 5 ) ; <nl> + <nl> + std : : shared_ptr < AsyncSocket > client = AsyncSocket : : newSocket ( & eventBase ) ; <nl> + client - > connect ( nullptr , server - > getAddress ( ) ) ; <nl> + <nl> + NetworkSocket servfd = server - > getNetworkSocket ( ) ; <nl> + <nl> + auto clientAccepted = <nl> + AsyncSocket : : newSocket ( nullptr , netops : : accept ( servfd , nullptr , nullptr ) ) ; <nl> + <nl> + clientAccepted - > setRecvBufSize ( 3000 ) ; <nl> + <nl> + std : : string testData ; <nl> + <nl> + for ( int i = 0 ; i < 10000 ; + + i ) { <nl> + testData + = " 0123456789 " ; <nl> + } <nl> + <nl> + client - > write ( nullptr , ( const void * ) testData . c_str ( ) , testData . size ( ) ) ; <nl> + <nl> + eventBase . loop ( ) ; <nl> + <nl> + size_t recvBufSize = clientAccepted - > getRecvBufInUse ( ) ; <nl> + size_t sendBufSize = client - > getSendBufInUse ( ) ; <nl> + <nl> + EXPECT_EQ ( ( recvBufSize + sendBufSize ) , testData . size ( ) ) ; <nl> + EXPECT_GE ( recvBufSize , 0 ) ; <nl> + EXPECT_GE ( sendBufSize , 0 ) ; <nl> + } <nl> + # endif <nl>
Adding method getSendBufInUse and getRecvBufInUse to
facebook/folly
a0394d84f2d5c3e50ebfd0566f9d3acb52cfab5a
2019-06-27T19:02:59Z
mmm a / editor / import / resource_importer_texture . cpp <nl> ppp b / editor / import / resource_importer_texture . cpp <nl> bool ResourceImporterTexture : : get_option_visibility ( const String & p_option , cons <nl> } <nl> <nl> int ResourceImporterTexture : : get_preset_count ( ) const { <nl> - return 4 ; <nl> + return 3 ; <nl> } <nl> <nl> String ResourceImporterTexture : : get_preset_name ( int p_idx ) const { <nl> static const char * preset_names [ ] = { <nl> - " 2D , Detect 3D " , <nl> + " 2D / 3D ( Auto - Detect ) " , <nl> " 2D " , <nl> - " 2D Pixel " , <nl> - " 3D " <nl> + " 3D " , <nl> } ; <nl> <nl> return preset_names [ p_idx ] ; <nl> mmm a / editor / import / resource_importer_texture . h <nl> ppp b / editor / import / resource_importer_texture . h <nl> class ResourceImporterTexture : public ResourceImporter { <nl> enum Preset { <nl> PRESET_DETECT , <nl> PRESET_2D , <nl> - PRESET_2D_PIXEL , <nl> PRESET_3D , <nl> } ; <nl> <nl>
Remove the obsolete " 2D Pixel " import preset
godotengine/godot
26161de739803f3f80c76de01fd4032d39d48e47
2020-06-14T13:08:09Z
mmm a / arangod / Aql / ConditionFinder . cpp <nl> ppp b / arangod / Aql / ConditionFinder . cpp <nl> bool ConditionFinder : : before ( ExecutionNode * en ) { <nl> } <nl> <nl> case EN : : CALCULATION : { <nl> - auto outvars = en - > getVariablesSetHere ( ) ; <nl> - TRI_ASSERT ( outvars . size ( ) = = 1 ) ; <nl> - <nl> _variableDefinitions . emplace ( <nl> - outvars [ 0 ] - > id , <nl> + ExecutionNode : : castTo < CalculationNode const * > ( en ) - > outVariable ( ) - > id , <nl> ExecutionNode : : castTo < CalculationNode const * > ( en ) - > expression ( ) - > node ( ) ) ; <nl> TRI_IF_FAILURE ( " ConditionFinder : : variableDefinition " ) { <nl> THROW_ARANGO_EXCEPTION ( TRI_ERROR_DEBUG ) ; <nl> mmm a / arangod / Aql / OptimizerRules . cpp <nl> ppp b / arangod / Aql / OptimizerRules . cpp <nl> void arangodb : : aql : : removeRedundantCalculationsRule ( Optimizer * opt , <nl> continue ; <nl> } <nl> <nl> - auto outvar = n - > getVariablesSetHere ( ) ; <nl> - TRI_ASSERT ( outvar . size ( ) = = 1 ) ; <nl> + arangodb : : aql : : Variable const * outvar = nn - > outVariable ( ) ; <nl> <nl> try { <nl> nn - > expression ( ) - > stringifyIfNotTooLong ( & buffer ) ; <nl> void arangodb : : aql : : removeRedundantCalculationsRule ( Optimizer * opt , <nl> <nl> if ( isEqual ) { <nl> / / expressions are identical <nl> - auto outvars = current - > getVariablesSetHere ( ) ; <nl> - TRI_ASSERT ( outvars . size ( ) = = 1 ) ; <nl> - <nl> / / check if target variable is already registered as a replacement <nl> / / this covers the following case : <nl> / / - replacements is set to B = > C <nl> / / - we ' re now inserting a replacement A = > B <nl> / / the goal now is to enter a replacement A = > C instead of A = > B <nl> - auto target = outvars [ 0 ] ; <nl> + auto target = ExecutionNode : : castTo < CalculationNode const * > ( current ) - > outVariable ( ) ; <nl> while ( target ! = nullptr ) { <nl> auto it = replacements . find ( target - > id ) ; <nl> <nl> void arangodb : : aql : : removeRedundantCalculationsRule ( Optimizer * opt , <nl> break ; <nl> } <nl> } <nl> - replacements . emplace ( outvar [ 0 ] - > id , target ) ; <nl> + replacements . emplace ( outvar - > id , target ) ; <nl> <nl> / / also check if the insertion enables further shortcuts <nl> / / this covers the following case : <nl> void arangodb : : aql : : removeRedundantCalculationsRule ( Optimizer * opt , <nl> / / - we have just inserted a replacement B = > C <nl> / / the goal now is to change the replacement A = > B to A = > C <nl> for ( auto it = replacements . begin ( ) ; it ! = replacements . end ( ) ; + + it ) { <nl> - if ( ( * it ) . second = = outvar [ 0 ] ) { <nl> + if ( ( * it ) . second = = outvar ) { <nl> ( * it ) . second = target ; <nl> } <nl> } <nl> void arangodb : : aql : : removeUnnecessaryCalculationsRule ( Optimizer * opt , <nl> arangodb : : HashSet < ExecutionNode * > toUnlink ; <nl> <nl> for ( auto const & n : nodes ) { <nl> + arangodb : : aql : : Variable const * outVariable = nullptr ; <nl> + <nl> if ( n - > getType ( ) = = EN : : CALCULATION ) { <nl> auto nn = ExecutionNode : : castTo < CalculationNode * > ( n ) ; <nl> <nl> void arangodb : : aql : : removeUnnecessaryCalculationsRule ( Optimizer * opt , <nl> / / If this node is non - deterministic , we must not optimize it away ! <nl> continue ; <nl> } <nl> + <nl> + outVariable = nn - > outVariable ( ) ; <nl> / / will remove calculation when we get here <nl> } else if ( n - > getType ( ) = = EN : : SUBQUERY ) { <nl> auto nn = ExecutionNode : : castTo < SubqueryNode * > ( n ) ; <nl> void arangodb : : aql : : removeUnnecessaryCalculationsRule ( Optimizer * opt , <nl> continue ; <nl> } <nl> / / will remove subquery when we get here <nl> + outVariable = nn - > outVariable ( ) ; <nl> + } else { <nl> + TRI_ASSERT ( false ) ; <nl> + continue ; <nl> } <nl> <nl> - auto outvars = n - > getVariablesSetHere ( ) ; <nl> - TRI_ASSERT ( outvars . size ( ) = = 1 ) ; <nl> + TRI_ASSERT ( outVariable ! = nullptr ) ; <nl> <nl> - if ( ! n - > isVarUsedLater ( outvars [ 0 ] ) ) { <nl> + if ( ! n - > isVarUsedLater ( outVariable ) ) { <nl> / / The variable whose value is calculated here is not used at <nl> / / all further down the pipeline ! We remove the whole <nl> / / calculation node , <nl> void arangodb : : aql : : removeUnnecessaryCalculationsRule ( Optimizer * opt , <nl> if ( ! hasCollectWithOutVariable ) { <nl> / / no COLLECT found , now replace <nl> std : : unordered_map < VariableId , Variable const * > replacements ; <nl> - replacements . emplace ( outvars [ 0 ] - > id , <nl> + replacements . emplace ( outVariable - > id , <nl> static_cast < Variable const * > ( rootNode - > getData ( ) ) ) ; <nl> <nl> RedundantCalculationsReplacer finder ( plan - > getAst ( ) , replacements ) ; <nl> void arangodb : : aql : : removeUnnecessaryCalculationsRule ( Optimizer * opt , <nl> <nl> while ( current ! = nullptr ) { <nl> current - > getVariablesUsedHere ( vars ) ; <nl> - if ( vars . find ( outvars [ 0 ] ) ! = vars . end ( ) ) { <nl> + if ( vars . find ( outVariable ) ! = vars . end ( ) ) { <nl> if ( current - > getType ( ) = = EN : : COLLECT ) { <nl> if ( ExecutionNode : : castTo < CollectNode const * > ( current ) - > hasOutVariableButNoCount ( ) ) { <nl> / / COLLECT with an INTO variable will collect all variables from <nl> void arangodb : : aql : : removeUnnecessaryCalculationsRule ( Optimizer * opt , <nl> TRI_ASSERT ( otherExpression ! = nullptr ) ; <nl> <nl> if ( rootNode - > type ! = NODE_TYPE_ATTRIBUTE_ACCESS & & <nl> - Ast : : countReferences ( otherExpression - > node ( ) , outvars [ 0 ] ) > 1 ) { <nl> + Ast : : countReferences ( otherExpression - > node ( ) , outVariable ) > 1 ) { <nl> / / used more than once . . . better give up <nl> continue ; <nl> } <nl> void arangodb : : aql : : removeUnnecessaryCalculationsRule ( Optimizer * opt , <nl> } <nl> <nl> TRI_ASSERT ( other ! = nullptr ) ; <nl> - otherExpression - > replaceVariableReference ( outvars [ 0 ] , rootNode ) ; <nl> + otherExpression - > replaceVariableReference ( outVariable , rootNode ) ; <nl> <nl> toUnlink . emplace ( n ) ; <nl> } <nl> struct SortToIndexNode final : public WalkerWorker < ExecutionNode > { <nl> return false ; / / skip . we don ' t care . <nl> <nl> case EN : : CALCULATION : { <nl> - auto outvars = en - > getVariablesSetHere ( ) ; <nl> - TRI_ASSERT ( outvars . size ( ) = = 1 ) ; <nl> - <nl> _variableDefinitions . emplace ( <nl> - outvars [ 0 ] - > id , <nl> + ExecutionNode : : castTo < CalculationNode const * > ( en ) - > outVariable ( ) - > id , <nl> ExecutionNode : : castTo < CalculationNode const * > ( en ) - > expression ( ) - > node ( ) ) ; <nl> return false ; <nl> } <nl> void arangodb : : aql : : scatterInClusterRule ( Optimizer * opt , std : : unique_ptr < Executi <nl> vocbase = idxNode - > vocbase ( ) ; <nl> collection = idxNode - > collection ( ) ; <nl> TRI_ASSERT ( collection ! = nullptr ) ; <nl> - auto outVars = idxNode - > getVariablesSetHere ( ) ; <nl> - TRI_ASSERT ( outVars . size ( ) = = 1 ) ; <nl> - Variable const * sortVariable = outVars [ 0 ] ; <nl> + Variable const * sortVariable = idxNode - > outVariable ( ) ; <nl> bool isSortAscending = idxNode - > options ( ) . ascending ; <nl> auto allIndexes = idxNode - > getIndexes ( ) ; <nl> TRI_ASSERT ( ! allIndexes . empty ( ) ) ; <nl> class RemoveToEnumCollFinder final : public WalkerWorker < ExecutionNode > { <nl> auto expr = cn - > expression ( ) ; <nl> if ( expr - > isAttributeAccess ( ) ) { <nl> / / check the variable is the same as the remove variable <nl> - auto vars = cn - > getVariablesSetHere ( ) ; <nl> - if ( vars . size ( ) ! = 1 | | vars [ 0 ] ! = rn - > inVariable ( ) ) { <nl> + if ( cn - > outVariable ( ) ! = rn - > inVariable ( ) ) { <nl> break ; / / abort . . . <nl> } <nl> / / check the remove node ' s collection is sharded over _key <nl> class RemoveToEnumCollFinder final : public WalkerWorker < ExecutionNode > { <nl> auto fn = ExecutionNode : : castTo < FilterNode const * > ( _lastNode ) ; <nl> <nl> / / check these are a Calc - Filter pair <nl> - if ( cn - > getVariablesSetHere ( ) [ 0 ] ! = fn - > inVariable ( ) ) { <nl> + if ( cn - > outVariable ( ) ! = fn - > inVariable ( ) ) { <nl> break ; / / abort . . . <nl> } <nl> <nl> void arangodb : : aql : : replaceOrWithInRule ( Optimizer * opt , std : : unique_ptr < Executio <nl> <nl> auto fn = ExecutionNode : : castTo < FilterNode const * > ( n ) ; <nl> auto cn = ExecutionNode : : castTo < CalculationNode * > ( dep ) ; <nl> - auto outVar = cn - > getVariablesSetHere ( ) ; <nl> + auto outVar = cn - > outVariable ( ) ; <nl> <nl> - if ( outVar . size ( ) ! = 1 | | outVar [ 0 ] ! = fn - > inVariable ( ) ) { <nl> + if ( outVar ! = fn - > inVariable ( ) ) { <nl> continue ; <nl> } <nl> <nl> void arangodb : : aql : : replaceOrWithInRule ( Optimizer * opt , std : : unique_ptr < Executio <nl> THROW_ARANGO_EXCEPTION ( TRI_ERROR_DEBUG ) ; <nl> } <nl> <nl> - newNode = new CalculationNode ( plan . get ( ) , plan - > nextId ( ) , expr , outVar [ 0 ] ) ; <nl> + newNode = new CalculationNode ( plan . get ( ) , plan - > nextId ( ) , expr , outVar ) ; <nl> } catch ( . . . ) { <nl> delete expr ; <nl> throw ; <nl> void arangodb : : aql : : removeRedundantOrRule ( Optimizer * opt , <nl> <nl> auto fn = ExecutionNode : : castTo < FilterNode const * > ( n ) ; <nl> auto cn = ExecutionNode : : castTo < CalculationNode * > ( dep ) ; <nl> - auto outVar = cn - > getVariablesSetHere ( ) ; <nl> + auto outVar = cn - > outVariable ( ) ; <nl> <nl> - if ( outVar . size ( ) ! = 1 | | outVar [ 0 ] ! = fn - > inVariable ( ) ) { <nl> + if ( outVar ! = fn - > inVariable ( ) ) { <nl> continue ; <nl> } <nl> if ( cn - > expression ( ) - > node ( ) - > type ! = NODE_TYPE_OPERATOR_BINARY_OR ) { <nl> void arangodb : : aql : : removeRedundantOrRule ( Optimizer * opt , <nl> Expression * expr = new Expression ( plan . get ( ) , plan - > getAst ( ) , astNode ) ; <nl> <nl> try { <nl> - newNode = new CalculationNode ( plan . get ( ) , plan - > nextId ( ) , expr , outVar [ 0 ] ) ; <nl> + newNode = new CalculationNode ( plan . get ( ) , plan - > nextId ( ) , expr , outVar ) ; <nl> } catch ( . . . ) { <nl> delete expr ; <nl> throw ; <nl>
avoid a few dynamic memory allocations in the optimizer ( )
arangodb/arangodb
be53ccda9708613eade6fff89b6c542e37efac5b
2019-03-13T11:55:19Z
mmm a / src / compiler / arm / instruction - selector - arm . cc <nl> ppp b / src / compiler / arm / instruction - selector - arm . cc <nl> void InstructionSelector : : VisitStore ( Node * node ) { <nl> Node * index = node - > InputAt ( 1 ) ; <nl> Node * value = node - > InputAt ( 2 ) ; <nl> <nl> - StoreRepresentation store_rep = OpParameter < StoreRepresentation > ( node ) ; <nl> + StoreRepresentation store_rep = StoreRepresentationOf ( node - > op ( ) ) ; <nl> WriteBarrierKind write_barrier_kind = store_rep . write_barrier_kind ( ) ; <nl> - MachineRepresentation rep = store_rep . machine_type ( ) . representation ( ) ; <nl> + MachineRepresentation rep = store_rep . representation ( ) ; <nl> <nl> if ( write_barrier_kind ! = kNoWriteBarrier ) { <nl> DCHECK_EQ ( MachineRepresentation : : kTagged , rep ) ; <nl> void InstructionSelector : : VisitCheckedLoad ( Node * node ) { <nl> <nl> <nl> void InstructionSelector : : VisitCheckedStore ( Node * node ) { <nl> - MachineRepresentation rep = <nl> - CheckedStoreRepresentationOf ( node - > op ( ) ) . representation ( ) ; <nl> + MachineRepresentation rep = CheckedStoreRepresentationOf ( node - > op ( ) ) ; <nl> ArmOperandGenerator g ( this ) ; <nl> Node * const buffer = node - > InputAt ( 0 ) ; <nl> Node * const offset = node - > InputAt ( 1 ) ; <nl> mmm a / src / compiler / arm64 / instruction - selector - arm64 . cc <nl> ppp b / src / compiler / arm64 / instruction - selector - arm64 . cc <nl> void InstructionSelector : : VisitStore ( Node * node ) { <nl> Node * index = node - > InputAt ( 1 ) ; <nl> Node * value = node - > InputAt ( 2 ) ; <nl> <nl> - StoreRepresentation store_rep = OpParameter < StoreRepresentation > ( node ) ; <nl> + StoreRepresentation store_rep = StoreRepresentationOf ( node - > op ( ) ) ; <nl> WriteBarrierKind write_barrier_kind = store_rep . write_barrier_kind ( ) ; <nl> - MachineRepresentation rep = store_rep . machine_type ( ) . representation ( ) ; <nl> + MachineRepresentation rep = store_rep . representation ( ) ; <nl> <nl> / / TODO ( arm64 ) : I guess this could be done in a better way . <nl> if ( write_barrier_kind ! = kNoWriteBarrier ) { <nl> void InstructionSelector : : VisitCheckedLoad ( Node * node ) { <nl> <nl> <nl> void InstructionSelector : : VisitCheckedStore ( Node * node ) { <nl> - MachineRepresentation rep = <nl> - CheckedStoreRepresentationOf ( node - > op ( ) ) . representation ( ) ; <nl> + MachineRepresentation rep = CheckedStoreRepresentationOf ( node - > op ( ) ) ; <nl> Arm64OperandGenerator g ( this ) ; <nl> Node * const buffer = node - > InputAt ( 0 ) ; <nl> Node * const offset = node - > InputAt ( 1 ) ; <nl> mmm a / src / compiler / basic - block - instrumentor . cc <nl> ppp b / src / compiler / basic - block - instrumentor . cc <nl> BasicBlockProfiler : : Data * BasicBlockInstrumentor : : Instrument ( <nl> graph - > start ( ) , graph - > start ( ) ) ; <nl> Node * inc = graph - > NewNode ( machine . Int32Add ( ) , load , one ) ; <nl> Node * store = <nl> - graph - > NewNode ( machine . Store ( StoreRepresentation ( MachineType : : Uint32 ( ) , <nl> - kNoWriteBarrier ) ) , <nl> + graph - > NewNode ( machine . Store ( StoreRepresentation ( <nl> + MachineRepresentation : : kWord32 , kNoWriteBarrier ) ) , <nl> base , zero , inc , graph - > start ( ) , graph - > start ( ) ) ; <nl> / / Insert the new nodes . <nl> static const int kArraySize = 6 ; <nl> mmm a / src / compiler / change - lowering . cc <nl> ppp b / src / compiler / change - lowering . cc <nl> Node * ChangeLowering : : AllocateHeapNumberWithValue ( Node * value , Node * control ) { <nl> } <nl> Node * heap_number = graph ( ) - > NewNode ( allocate_heap_number_operator_ . get ( ) , <nl> target , context , effect , control ) ; <nl> - Node * store = graph ( ) - > NewNode ( machine ( ) - > Store ( StoreRepresentation ( <nl> - MachineType : : Float64 ( ) , kNoWriteBarrier ) ) , <nl> - heap_number , HeapNumberValueIndexConstant ( ) , <nl> - value , heap_number , control ) ; <nl> + Node * store = graph ( ) - > NewNode ( <nl> + machine ( ) - > Store ( StoreRepresentation ( MachineRepresentation : : kFloat64 , <nl> + kNoWriteBarrier ) ) , <nl> + heap_number , HeapNumberValueIndexConstant ( ) , value , heap_number , control ) ; <nl> return graph ( ) - > NewNode ( common ( ) - > FinishRegion ( ) , heap_number , store ) ; <nl> } <nl> <nl> Reduction ChangeLowering : : StoreField ( Node * node ) { <nl> type ) ; <nl> Node * offset = jsgraph ( ) - > IntPtrConstant ( access . offset - access . tag ( ) ) ; <nl> node - > InsertInput ( graph ( ) - > zone ( ) , 1 , offset ) ; <nl> - NodeProperties : : ChangeOp ( <nl> - node , machine ( ) - > Store ( StoreRepresentation ( access . machine_type , kind ) ) ) ; <nl> + NodeProperties : : ChangeOp ( node , <nl> + machine ( ) - > Store ( StoreRepresentation ( <nl> + access . machine_type . representation ( ) , kind ) ) ) ; <nl> return Changed ( node ) ; <nl> } <nl> <nl> Reduction ChangeLowering : : StoreElement ( Node * node ) { <nl> node - > ReplaceInput ( 1 , ComputeIndex ( access , node - > InputAt ( 1 ) ) ) ; <nl> NodeProperties : : ChangeOp ( <nl> node , machine ( ) - > Store ( StoreRepresentation ( <nl> - access . machine_type , <nl> + access . machine_type . representation ( ) , <nl> ComputeWriteBarrierKind ( access . base_is_tagged , <nl> access . machine_type . representation ( ) , <nl> access . type , type ) ) ) ) ; <nl> mmm a / src / compiler / ia32 / instruction - selector - ia32 . cc <nl> ppp b / src / compiler / ia32 / instruction - selector - ia32 . cc <nl> void InstructionSelector : : VisitStore ( Node * node ) { <nl> Node * index = node - > InputAt ( 1 ) ; <nl> Node * value = node - > InputAt ( 2 ) ; <nl> <nl> - StoreRepresentation store_rep = OpParameter < StoreRepresentation > ( node ) ; <nl> + StoreRepresentation store_rep = StoreRepresentationOf ( node - > op ( ) ) ; <nl> WriteBarrierKind write_barrier_kind = store_rep . write_barrier_kind ( ) ; <nl> - MachineRepresentation rep = store_rep . machine_type ( ) . representation ( ) ; <nl> + MachineRepresentation rep = store_rep . representation ( ) ; <nl> <nl> if ( write_barrier_kind ! = kNoWriteBarrier ) { <nl> DCHECK_EQ ( MachineRepresentation : : kTagged , rep ) ; <nl> void InstructionSelector : : VisitCheckedLoad ( Node * node ) { <nl> <nl> <nl> void InstructionSelector : : VisitCheckedStore ( Node * node ) { <nl> - MachineRepresentation rep = <nl> - CheckedStoreRepresentationOf ( node - > op ( ) ) . representation ( ) ; <nl> + MachineRepresentation rep = CheckedStoreRepresentationOf ( node - > op ( ) ) ; <nl> IA32OperandGenerator g ( this ) ; <nl> Node * const buffer = node - > InputAt ( 0 ) ; <nl> Node * const offset = node - > InputAt ( 1 ) ; <nl> mmm a / src / compiler / interpreter - assembler . cc <nl> ppp b / src / compiler / interpreter - assembler . cc <nl> Node * InterpreterAssembler : : LoadRegister ( Node * reg_index ) { <nl> <nl> Node * InterpreterAssembler : : StoreRegister ( Node * value , Node * reg_index ) { <nl> return raw_assembler_ - > Store ( <nl> - MachineType : : AnyTagged ( ) , RegisterFileRawPointer ( ) , <nl> + MachineRepresentation : : kTagged , RegisterFileRawPointer ( ) , <nl> RegisterFrameOffset ( reg_index ) , value , kNoWriteBarrier ) ; <nl> } <nl> <nl> Node * InterpreterAssembler : : StoreContextSlot ( Node * context , Node * slot_index , <nl> Node * offset = <nl> IntPtrAdd ( WordShl ( slot_index , kPointerSizeLog2 ) , <nl> Int32Constant ( Context : : kHeaderSize - kHeapObjectTag ) ) ; <nl> - return raw_assembler_ - > Store ( MachineType : : AnyTagged ( ) , context , offset , value , <nl> - kFullWriteBarrier ) ; <nl> + return raw_assembler_ - > Store ( MachineRepresentation : : kTagged , context , offset , <nl> + value , kFullWriteBarrier ) ; <nl> } <nl> <nl> <nl> mmm a / src / compiler / js - generic - lowering . cc <nl> ppp b / src / compiler / js - generic - lowering . cc <nl> void JSGenericLowering : : LowerJSStoreContext ( Node * node ) { <nl> node - > ReplaceInput ( 1 , jsgraph ( ) - > Int32Constant ( Context : : SlotOffset ( <nl> static_cast < int > ( access . index ( ) ) ) ) ) ; <nl> NodeProperties : : ChangeOp ( <nl> - node , machine ( ) - > Store ( StoreRepresentation ( MachineType : : AnyTagged ( ) , <nl> + node , machine ( ) - > Store ( StoreRepresentation ( MachineRepresentation : : kTagged , <nl> kFullWriteBarrier ) ) ) ; <nl> } <nl> <nl> void JSGenericLowering : : LowerJSStoreMessage ( Node * node ) { <nl> node - > RemoveInput ( NodeProperties : : FirstContextIndex ( node ) ) ; <nl> node - > InsertInput ( zone ( ) , 0 , jsgraph ( ) - > ExternalConstant ( message_address ) ) ; <nl> node - > InsertInput ( zone ( ) , 1 , jsgraph ( ) - > IntPtrConstant ( 0 ) ) ; <nl> - StoreRepresentation representation ( MachineType : : AnyTagged ( ) , kNoWriteBarrier ) ; <nl> + StoreRepresentation representation ( MachineRepresentation : : kTagged , <nl> + kNoWriteBarrier ) ; <nl> NodeProperties : : ChangeOp ( node , machine ( ) - > Store ( representation ) ) ; <nl> } <nl> <nl> mmm a / src / compiler / machine - operator - reducer . cc <nl> ppp b / src / compiler / machine - operator - reducer . cc <nl> Reduction MachineOperatorReducer : : ReduceTruncateFloat64ToInt32 ( Node * node ) { <nl> <nl> Reduction MachineOperatorReducer : : ReduceStore ( Node * node ) { <nl> MachineRepresentation const rep = <nl> - StoreRepresentationOf ( node - > op ( ) ) . machine_type ( ) . representation ( ) ; <nl> + StoreRepresentationOf ( node - > op ( ) ) . representation ( ) ; <nl> Node * const value = node - > InputAt ( 2 ) ; <nl> switch ( value - > opcode ( ) ) { <nl> case IrOpcode : : kWord32And : { <nl> mmm a / src / compiler / machine - operator . cc <nl> ppp b / src / compiler / machine - operator . cc <nl> std : : ostream & operator < < ( std : : ostream & os , WriteBarrierKind kind ) { <nl> <nl> <nl> bool operator = = ( StoreRepresentation lhs , StoreRepresentation rhs ) { <nl> - return lhs . machine_type ( ) = = rhs . machine_type ( ) & & <nl> + return lhs . representation ( ) = = rhs . representation ( ) & & <nl> lhs . write_barrier_kind ( ) = = rhs . write_barrier_kind ( ) ; <nl> } <nl> <nl> bool operator ! = ( StoreRepresentation lhs , StoreRepresentation rhs ) { <nl> <nl> <nl> size_t hash_value ( StoreRepresentation rep ) { <nl> - return base : : hash_combine ( rep . machine_type ( ) , rep . write_barrier_kind ( ) ) ; <nl> + return base : : hash_combine ( rep . representation ( ) , rep . write_barrier_kind ( ) ) ; <nl> } <nl> <nl> <nl> std : : ostream & operator < < ( std : : ostream & os , StoreRepresentation rep ) { <nl> - return os < < " ( " < < rep . machine_type ( ) < < " : " < < rep . write_barrier_kind ( ) <nl> + return os < < " ( " < < rep . representation ( ) < < " : " < < rep . write_barrier_kind ( ) <nl> < < " ) " ; <nl> } <nl> <nl> CheckedStoreRepresentation CheckedStoreRepresentationOf ( Operator const * op ) { <nl> V ( AnyTagged ) <nl> <nl> <nl> + # define MACHINE_REPRESENTATION_LIST ( V ) \ <nl> + V ( kFloat32 ) \ <nl> + V ( kFloat64 ) \ <nl> + V ( kWord8 ) \ <nl> + V ( kWord16 ) \ <nl> + V ( kWord32 ) \ <nl> + V ( kWord64 ) \ <nl> + V ( kTagged ) <nl> + <nl> + <nl> struct MachineOperatorGlobalCache { <nl> # define PURE ( Name , properties , value_input_count , control_input_count , \ <nl> output_count ) \ <nl> struct MachineOperatorGlobalCache { <nl> : Operator1 < StoreRepresentation > ( \ <nl> IrOpcode : : kStore , Operator : : kNoRead | Operator : : kNoThrow , \ <nl> " Store " , 3 , 1 , 1 , 0 , 1 , 0 , \ <nl> - StoreRepresentation ( MachineType : : Type ( ) , write_barrier_kind ) ) { } \ <nl> + StoreRepresentation ( MachineRepresentation : : Type , \ <nl> + write_barrier_kind ) ) { } \ <nl> } ; \ <nl> struct Store # # Type # # NoWriteBarrier # # Operator final \ <nl> : public Store # # Type # # Operator { \ <nl> struct MachineOperatorGlobalCache { <nl> CheckedStore # # Type # # Operator ( ) \ <nl> : Operator1 < CheckedStoreRepresentation > ( \ <nl> IrOpcode : : kCheckedStore , Operator : : kNoRead | Operator : : kNoThrow , \ <nl> - " CheckedStore " , 4 , 1 , 1 , 0 , 1 , 0 , MachineType : : Type ( ) ) { } \ <nl> + " CheckedStore " , 4 , 1 , 1 , 0 , 1 , 0 , MachineRepresentation : : Type ) { \ <nl> + } \ <nl> } ; \ <nl> Store # # Type # # NoWriteBarrier # # Operator kStore # # Type # # NoWriteBarrier ; \ <nl> Store # # Type # # MapWriteBarrier # # Operator kStore # # Type # # MapWriteBarrier ; \ <nl> struct MachineOperatorGlobalCache { <nl> kStore # # Type # # PointerWriteBarrier ; \ <nl> Store # # Type # # FullWriteBarrier # # Operator kStore # # Type # # FullWriteBarrier ; \ <nl> CheckedStore # # Type # # Operator kCheckedStore # # Type ; <nl> - MACHINE_TYPE_LIST ( STORE ) <nl> + MACHINE_REPRESENTATION_LIST ( STORE ) <nl> # undef STORE <nl> } ; <nl> <nl> const Operator * MachineOperatorBuilder : : Load ( LoadRepresentation rep ) { <nl> <nl> <nl> const Operator * MachineOperatorBuilder : : Store ( StoreRepresentation store_rep ) { <nl> - MachineType type = store_rep . machine_type ( ) ; <nl> - # define STORE ( Type ) \ <nl> - if ( type = = MachineType : : Type ( ) ) { \ <nl> + switch ( store_rep . representation ( ) ) { <nl> + # define STORE ( kRep ) \ <nl> + case MachineRepresentation : : kRep : \ <nl> switch ( store_rep . write_barrier_kind ( ) ) { \ <nl> case kNoWriteBarrier : \ <nl> - return & cache_ . k # # Store # # Type # # NoWriteBarrier ; \ <nl> + return & cache_ . k # # Store # # kRep # # NoWriteBarrier ; \ <nl> case kMapWriteBarrier : \ <nl> - return & cache_ . k # # Store # # Type # # MapWriteBarrier ; \ <nl> + return & cache_ . k # # Store # # kRep # # MapWriteBarrier ; \ <nl> case kPointerWriteBarrier : \ <nl> - return & cache_ . k # # Store # # Type # # PointerWriteBarrier ; \ <nl> + return & cache_ . k # # Store # # kRep # # PointerWriteBarrier ; \ <nl> case kFullWriteBarrier : \ <nl> - return & cache_ . k # # Store # # Type # # FullWriteBarrier ; \ <nl> + return & cache_ . k # # Store # # kRep # # FullWriteBarrier ; \ <nl> } \ <nl> - } <nl> - MACHINE_TYPE_LIST ( STORE ) <nl> + break ; <nl> + MACHINE_REPRESENTATION_LIST ( STORE ) <nl> # undef STORE <nl> + default : <nl> + break ; <nl> + } <nl> UNREACHABLE ( ) ; <nl> return nullptr ; <nl> } <nl> const Operator * MachineOperatorBuilder : : CheckedLoad ( <nl> <nl> const Operator * MachineOperatorBuilder : : CheckedStore ( <nl> CheckedStoreRepresentation rep ) { <nl> - # define STORE ( Type ) \ <nl> - if ( rep = = MachineType : : Type ( ) ) { \ <nl> - return & cache_ . kCheckedStore # # Type ; \ <nl> - } <nl> - MACHINE_TYPE_LIST ( STORE ) <nl> + switch ( rep ) { <nl> + # define STORE ( kRep ) \ <nl> + case MachineRepresentation : : kRep : \ <nl> + return & cache_ . kCheckedStore # # kRep ; <nl> + MACHINE_REPRESENTATION_LIST ( STORE ) <nl> # undef STORE <nl> + default : <nl> + break ; <nl> + } <nl> UNREACHABLE ( ) ; <nl> return nullptr ; <nl> } <nl> mmm a / src / compiler / machine - operator . h <nl> ppp b / src / compiler / machine - operator . h <nl> LoadRepresentation LoadRepresentationOf ( Operator const * ) ; <nl> / / correct write barrier . <nl> class StoreRepresentation final { <nl> public : <nl> - StoreRepresentation ( MachineType machine_type , <nl> + StoreRepresentation ( MachineRepresentation representation , <nl> WriteBarrierKind write_barrier_kind ) <nl> - : machine_type_ ( machine_type ) , write_barrier_kind_ ( write_barrier_kind ) { } <nl> + : representation_ ( representation ) , <nl> + write_barrier_kind_ ( write_barrier_kind ) { } <nl> <nl> - MachineType machine_type ( ) const { return machine_type_ ; } <nl> + MachineRepresentation representation ( ) const { return representation_ ; } <nl> WriteBarrierKind write_barrier_kind ( ) const { return write_barrier_kind_ ; } <nl> <nl> private : <nl> - MachineType machine_type_ ; <nl> + MachineRepresentation representation_ ; <nl> WriteBarrierKind write_barrier_kind_ ; <nl> } ; <nl> <nl> CheckedLoadRepresentation CheckedLoadRepresentationOf ( Operator const * ) ; <nl> <nl> <nl> / / A CheckedStore needs a MachineType . <nl> - typedef MachineType CheckedStoreRepresentation ; <nl> + typedef MachineRepresentation CheckedStoreRepresentation ; <nl> <nl> CheckedStoreRepresentation CheckedStoreRepresentationOf ( Operator const * ) ; <nl> <nl> mmm a / src / compiler / mips / instruction - selector - mips . cc <nl> ppp b / src / compiler / mips / instruction - selector - mips . cc <nl> void InstructionSelector : : VisitStore ( Node * node ) { <nl> Node * index = node - > InputAt ( 1 ) ; <nl> Node * value = node - > InputAt ( 2 ) ; <nl> <nl> - StoreRepresentation store_rep = OpParameter < StoreRepresentation > ( node ) ; <nl> + StoreRepresentation store_rep = StoreRepresentationOf ( node - > op ( ) ) ; <nl> WriteBarrierKind write_barrier_kind = store_rep . write_barrier_kind ( ) ; <nl> - MachineRepresentation rep = store_rep . machine_type ( ) . representation ( ) ; <nl> + MachineRepresentation rep = store_rep . representation ( ) ; <nl> <nl> / / TODO ( mips ) : I guess this could be done in a better way . <nl> if ( write_barrier_kind ! = kNoWriteBarrier ) { <nl> void InstructionSelector : : VisitCheckedLoad ( Node * node ) { <nl> <nl> <nl> void InstructionSelector : : VisitCheckedStore ( Node * node ) { <nl> - MachineRepresentation rep = <nl> - CheckedStoreRepresentationOf ( node - > op ( ) ) . representation ( ) ; <nl> + MachineRepresentation rep = CheckedStoreRepresentationOf ( node - > op ( ) ) ; <nl> MipsOperandGenerator g ( this ) ; <nl> Node * const buffer = node - > InputAt ( 0 ) ; <nl> Node * const offset = node - > InputAt ( 1 ) ; <nl> mmm a / src / compiler / mips64 / instruction - selector - mips64 . cc <nl> ppp b / src / compiler / mips64 / instruction - selector - mips64 . cc <nl> void InstructionSelector : : VisitStore ( Node * node ) { <nl> Node * index = node - > InputAt ( 1 ) ; <nl> Node * value = node - > InputAt ( 2 ) ; <nl> <nl> - StoreRepresentation store_rep = OpParameter < StoreRepresentation > ( node ) ; <nl> + StoreRepresentation store_rep = StoreRepresentationOf ( node - > op ( ) ) ; <nl> WriteBarrierKind write_barrier_kind = store_rep . write_barrier_kind ( ) ; <nl> - MachineRepresentation rep = store_rep . machine_type ( ) . representation ( ) ; <nl> + MachineRepresentation rep = store_rep . representation ( ) ; <nl> <nl> / / TODO ( mips ) : I guess this could be done in a better way . <nl> if ( write_barrier_kind ! = kNoWriteBarrier ) { <nl> void InstructionSelector : : VisitCheckedLoad ( Node * node ) { <nl> <nl> <nl> void InstructionSelector : : VisitCheckedStore ( Node * node ) { <nl> - MachineRepresentation rep = <nl> - CheckedStoreRepresentationOf ( node - > op ( ) ) . representation ( ) ; <nl> + MachineRepresentation rep = CheckedStoreRepresentationOf ( node - > op ( ) ) ; <nl> Mips64OperandGenerator g ( this ) ; <nl> Node * const buffer = node - > InputAt ( 0 ) ; <nl> Node * const offset = node - > InputAt ( 1 ) ; <nl> mmm a / src / compiler / raw - machine - assembler . h <nl> ppp b / src / compiler / raw - machine - assembler . h <nl> class RawMachineAssembler { <nl> Node * Load ( MachineType rep , Node * base , Node * index ) { <nl> return AddNode ( machine ( ) - > Load ( rep ) , base , index ) ; <nl> } <nl> - Node * Store ( MachineType rep , Node * base , Node * value , <nl> + Node * Store ( MachineRepresentation rep , Node * base , Node * value , <nl> WriteBarrierKind write_barrier ) { <nl> return Store ( rep , base , IntPtrConstant ( 0 ) , value , write_barrier ) ; <nl> } <nl> - Node * Store ( MachineType rep , Node * base , Node * index , Node * value , <nl> + Node * Store ( MachineRepresentation rep , Node * base , Node * index , Node * value , <nl> WriteBarrierKind write_barrier ) { <nl> return AddNode ( machine ( ) - > Store ( StoreRepresentation ( rep , write_barrier ) ) , <nl> base , index , value ) ; <nl> class RawMachineAssembler { <nl> Node * LoadFromPointer ( void * address , MachineType rep , int32_t offset = 0 ) { <nl> return Load ( rep , PointerConstant ( address ) , Int32Constant ( offset ) ) ; <nl> } <nl> - Node * StoreToPointer ( void * address , MachineType rep , Node * node ) { <nl> + Node * StoreToPointer ( void * address , MachineRepresentation rep , Node * node ) { <nl> return Store ( rep , PointerConstant ( address ) , node , kNoWriteBarrier ) ; <nl> } <nl> Node * StringConstant ( const char * string ) { <nl> mmm a / src / compiler / simplified - lowering . cc <nl> ppp b / src / compiler / simplified - lowering . cc <nl> class RepresentationSelector { <nl> StoreRepresentation rep = StoreRepresentationOf ( node - > op ( ) ) ; <nl> ProcessInput ( node , 0 , UseInfo : : AnyTagged ( ) ) ; / / tagged pointer <nl> ProcessInput ( node , 1 , UseInfo : : PointerInt ( ) ) ; / / index <nl> - ProcessInput ( node , 2 , TruncatingUseInfoFromRepresentation ( <nl> - rep . machine_type ( ) . representation ( ) ) ) ; <nl> + ProcessInput ( node , 2 , <nl> + TruncatingUseInfoFromRepresentation ( rep . representation ( ) ) ) ; <nl> ProcessRemainingInputs ( node , 3 ) ; <nl> SetOutput ( node , MachineType : : None ( ) ) ; <nl> break ; <nl> void SimplifiedLowering : : DoLoadBuffer ( Node * node , MachineType output_type , <nl> <nl> void SimplifiedLowering : : DoStoreBuffer ( Node * node ) { <nl> DCHECK_EQ ( IrOpcode : : kStoreBuffer , node - > opcode ( ) ) ; <nl> - MachineType const type = BufferAccessOf ( node - > op ( ) ) . machine_type ( ) ; <nl> - NodeProperties : : ChangeOp ( node , machine ( ) - > CheckedStore ( type ) ) ; <nl> + MachineRepresentation const rep = <nl> + BufferAccessOf ( node - > op ( ) ) . machine_type ( ) . representation ( ) ; <nl> + NodeProperties : : ChangeOp ( node , machine ( ) - > CheckedStore ( rep ) ) ; <nl> } <nl> <nl> <nl> mmm a / src / compiler / wasm - compiler . cc <nl> ppp b / src / compiler / wasm - compiler . cc <nl> Node * WasmGraphBuilder : : StoreGlobal ( uint32_t index , Node * val ) { <nl> Node * addr = jsgraph ( ) - > IntPtrConstant ( <nl> module_ - > globals_area + module_ - > module - > globals - > at ( index ) . offset ) ; <nl> const Operator * op = jsgraph ( ) - > machine ( ) - > Store ( <nl> - StoreRepresentation ( mem_type , kNoWriteBarrier ) ) ; <nl> + StoreRepresentation ( mem_type . representation ( ) , kNoWriteBarrier ) ) ; <nl> Node * node = graph ( ) - > NewNode ( op , addr , jsgraph ( ) - > Int32Constant ( 0 ) , val , <nl> * effect_ , * control_ ) ; <nl> * effect_ = node ; <nl> Node * WasmGraphBuilder : : StoreMem ( MachineType memtype , Node * index , <nl> if ( module_ & & module_ - > asm_js ) { <nl> / / asm . js semantics use CheckedStore ( i . e . ignore OOB writes ) . <nl> DCHECK_EQ ( 0 , offset ) ; <nl> - const Operator * op = jsgraph ( ) - > machine ( ) - > CheckedStore ( memtype ) ; <nl> + const Operator * op = <nl> + jsgraph ( ) - > machine ( ) - > CheckedStore ( memtype . representation ( ) ) ; <nl> store = graph ( ) - > NewNode ( op , MemBuffer ( 0 ) , index , MemSize ( 0 ) , val , * effect_ , <nl> * control_ ) ; <nl> } else { <nl> / / WASM semantics throw on OOB . Introduce explicit bounds check . <nl> BoundsCheckMem ( memtype , index , offset ) ; <nl> - StoreRepresentation rep ( memtype , kNoWriteBarrier ) ; <nl> + StoreRepresentation rep ( memtype . representation ( ) , kNoWriteBarrier ) ; <nl> store = <nl> graph ( ) - > NewNode ( jsgraph ( ) - > machine ( ) - > Store ( rep ) , MemBuffer ( offset ) , <nl> index , val , * effect_ , * control_ ) ; <nl> mmm a / src / compiler / x64 / instruction - selector - x64 . cc <nl> ppp b / src / compiler / x64 / instruction - selector - x64 . cc <nl> void InstructionSelector : : VisitStore ( Node * node ) { <nl> Node * index = node - > InputAt ( 1 ) ; <nl> Node * value = node - > InputAt ( 2 ) ; <nl> <nl> - StoreRepresentation store_rep = OpParameter < StoreRepresentation > ( node ) ; <nl> + StoreRepresentation store_rep = StoreRepresentationOf ( node - > op ( ) ) ; <nl> WriteBarrierKind write_barrier_kind = store_rep . write_barrier_kind ( ) ; <nl> - MachineRepresentation rep = store_rep . machine_type ( ) . representation ( ) ; <nl> + MachineRepresentation rep = store_rep . representation ( ) ; <nl> <nl> if ( write_barrier_kind ! = kNoWriteBarrier ) { <nl> DCHECK_EQ ( MachineRepresentation : : kTagged , rep ) ; <nl> void InstructionSelector : : VisitCheckedLoad ( Node * node ) { <nl> <nl> <nl> void InstructionSelector : : VisitCheckedStore ( Node * node ) { <nl> - MachineRepresentation rep = <nl> - CheckedStoreRepresentationOf ( node - > op ( ) ) . representation ( ) ; <nl> + MachineRepresentation rep = CheckedStoreRepresentationOf ( node - > op ( ) ) ; <nl> X64OperandGenerator g ( this ) ; <nl> Node * const buffer = node - > InputAt ( 0 ) ; <nl> Node * const offset = node - > InputAt ( 1 ) ; <nl> mmm a / test / cctest / compiler / codegen - tester . cc <nl> ppp b / test / cctest / compiler / codegen - tester . cc <nl> TEST ( RunBufferedRawMachineAssemblerTesterTester ) { <nl> { <nl> BufferedRawMachineAssemblerTester < void > m ; <nl> int64_t result ; <nl> - m . Store ( MachineTypeForC < int64_t > ( ) , m . PointerConstant ( & result ) , <nl> - m . Int64Constant ( 0x12500000000 ) , kNoWriteBarrier ) ; <nl> + m . Store ( MachineTypeForC < int64_t > ( ) . representation ( ) , <nl> + m . PointerConstant ( & result ) , m . Int64Constant ( 0x12500000000 ) , <nl> + kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> m . Call ( ) ; <nl> CHECK_EQ ( 0x12500000000 , result ) ; <nl> TEST ( RunBufferedRawMachineAssemblerTesterTester ) { <nl> { <nl> BufferedRawMachineAssemblerTester < void > m ( MachineType : : Float64 ( ) ) ; <nl> double result ; <nl> - m . Store ( MachineTypeForC < double > ( ) , m . PointerConstant ( & result ) , <nl> - m . Parameter ( 0 ) , kNoWriteBarrier ) ; <nl> + m . Store ( MachineTypeForC < double > ( ) . representation ( ) , <nl> + m . PointerConstant ( & result ) , m . Parameter ( 0 ) , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> FOR_FLOAT64_INPUTS ( i ) { <nl> m . Call ( * i ) ; <nl> TEST ( RunBufferedRawMachineAssemblerTesterTester ) { <nl> BufferedRawMachineAssemblerTester < void > m ( MachineType : : Int64 ( ) , <nl> MachineType : : Int64 ( ) ) ; <nl> int64_t result ; <nl> - m . Store ( MachineTypeForC < int64_t > ( ) , m . PointerConstant ( & result ) , <nl> + m . Store ( MachineTypeForC < int64_t > ( ) . representation ( ) , <nl> + m . PointerConstant ( & result ) , <nl> m . Int64Add ( m . Parameter ( 0 ) , m . Parameter ( 1 ) ) , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> FOR_INT64_INPUTS ( i ) { <nl> TEST ( RunBufferedRawMachineAssemblerTesterTester ) { <nl> MachineType : : Int64 ( ) , MachineType : : Int64 ( ) , MachineType : : Int64 ( ) ) ; <nl> int64_t result ; <nl> m . Store ( <nl> - MachineTypeForC < int64_t > ( ) , m . PointerConstant ( & result ) , <nl> + MachineTypeForC < int64_t > ( ) . representation ( ) , m . PointerConstant ( & result ) , <nl> m . Int64Add ( m . Int64Add ( m . Parameter ( 0 ) , m . Parameter ( 1 ) ) , m . Parameter ( 2 ) ) , <nl> kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> TEST ( RunBufferedRawMachineAssemblerTesterTester ) { <nl> MachineType : : Int64 ( ) , MachineType : : Int64 ( ) , MachineType : : Int64 ( ) , <nl> MachineType : : Int64 ( ) ) ; <nl> int64_t result ; <nl> - m . Store ( MachineTypeForC < int64_t > ( ) , m . PointerConstant ( & result ) , <nl> + m . Store ( MachineTypeForC < int64_t > ( ) . representation ( ) , <nl> + m . PointerConstant ( & result ) , <nl> m . Int64Add ( m . Int64Add ( m . Int64Add ( m . Parameter ( 0 ) , m . Parameter ( 1 ) ) , <nl> m . Parameter ( 2 ) ) , <nl> m . Parameter ( 3 ) ) , <nl> mmm a / test / cctest / compiler / codegen - tester . h <nl> ppp b / test / cctest / compiler / codegen - tester . h <nl> class BufferedRawMachineAssemblerTester <nl> / / Store node is provided as a parameter . By storing the return value in <nl> / / memory it is possible to return 64 bit values . <nl> void Return ( Node * input ) { <nl> - Store ( MachineTypeForC < ReturnType > ( ) , <nl> + Store ( MachineTypeForC < ReturnType > ( ) . representation ( ) , <nl> RawMachineAssembler : : Parameter ( return_parameter_index_ ) , input , <nl> kNoWriteBarrier ) ; <nl> RawMachineAssembler : : Return ( Int32Constant ( 1234 ) ) ; <nl> class BinopTester { <nl> <nl> void AddReturn ( Node * val ) { <nl> if ( use_result_buffer ) { <nl> - T - > Store ( rep , T - > PointerConstant ( & result ) , T - > Int32Constant ( 0 ) , val , <nl> - kNoWriteBarrier ) ; <nl> + T - > Store ( rep . representation ( ) , T - > PointerConstant ( & result ) , <nl> + T - > Int32Constant ( 0 ) , val , kNoWriteBarrier ) ; <nl> T - > Return ( T - > Int32Constant ( CHECK_VALUE ) ) ; <nl> } else { <nl> T - > Return ( val ) ; <nl> mmm a / test / cctest / compiler / test - changes - lowering . cc <nl> ppp b / test / cctest / compiler / test - changes - lowering . cc <nl> TEST ( RunChangeTaggedToFloat64 ) { <nl> <nl> t . BuildStoreAndLower ( t . simplified ( ) - > ChangeTaggedToFloat64 ( ) , <nl> t . machine ( ) - > Store ( StoreRepresentation ( <nl> - MachineType : : Float64 ( ) , kNoWriteBarrier ) ) , <nl> + MachineRepresentation : : kFloat64 , kNoWriteBarrier ) ) , <nl> & result ) ; <nl> <nl> { <nl> mmm a / test / cctest / compiler / test - machine - operator - reducer . cc <nl> ppp b / test / cctest / compiler / test - machine - operator - reducer . cc <nl> TEST ( ReduceLoadStore ) { <nl> } <nl> <nl> { <nl> - Node * store = R . graph . NewNode ( R . machine . Store ( StoreRepresentation ( <nl> - MachineType : : Int32 ( ) , kNoWriteBarrier ) ) , <nl> - base , index , load , load , R . graph . start ( ) ) ; <nl> + Node * store = <nl> + R . graph . NewNode ( R . machine . Store ( StoreRepresentation ( <nl> + MachineRepresentation : : kWord32 , kNoWriteBarrier ) ) , <nl> + base , index , load , load , R . graph . start ( ) ) ; <nl> MachineOperatorReducer reducer ( & R . jsgraph ) ; <nl> Reduction reduction = reducer . Reduce ( store ) ; <nl> CHECK ( ! reduction . Changed ( ) ) ; / / stores should not be reduced . <nl> mmm a / test / cctest / compiler / test - run - machops . cc <nl> ppp b / test / cctest / compiler / test - run - machops . cc <nl> TEST ( RunLoadStoreFloat32Offset ) { <nl> / / generate load [ # base + # index ] <nl> Node * load = m . Load ( MachineType : : Float32 ( ) , m . PointerConstant ( from ) , <nl> m . IntPtrConstant ( offset ) ) ; <nl> - m . Store ( MachineType : : Float32 ( ) , m . PointerConstant ( to ) , <nl> + m . Store ( MachineRepresentation : : kFloat32 , m . PointerConstant ( to ) , <nl> m . IntPtrConstant ( offset ) , load , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( magic ) ) ; <nl> <nl> TEST ( RunLoadStoreFloat64Offset ) { <nl> / / generate load [ # base + # index ] <nl> Node * load = m . Load ( MachineType : : Float64 ( ) , m . PointerConstant ( from ) , <nl> m . IntPtrConstant ( offset ) ) ; <nl> - m . Store ( MachineType : : Float64 ( ) , m . PointerConstant ( to ) , <nl> + m . Store ( MachineRepresentation : : kFloat64 , m . PointerConstant ( to ) , <nl> m . IntPtrConstant ( offset ) , load , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( magic ) ) ; <nl> <nl> static void RunLoadStore ( MachineType rep ) { <nl> Node * index0 = m . IntPtrConstant ( x * sizeof ( buffer [ 0 ] ) ) ; <nl> Node * load = m . Load ( rep , base , index0 ) ; <nl> Node * index1 = m . IntPtrConstant ( y * sizeof ( buffer [ 0 ] ) ) ; <nl> - m . Store ( rep , base , index1 , load , kNoWriteBarrier ) ; <nl> + m . Store ( rep . representation ( ) , base , index1 , load , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( OK ) ) ; <nl> <nl> CHECK ( buffer [ x ] ! = buffer [ y ] ) ; <nl> TEST ( RunFloatDiamond ) { <nl> m . Goto ( & end ) ; <nl> m . Bind ( & end ) ; <nl> Node * phi = m . Phi ( MachineRepresentation : : kFloat32 , k2 , k1 ) ; <nl> - m . Store ( MachineType : : Float32 ( ) , m . PointerConstant ( & buffer ) , <nl> + m . Store ( MachineRepresentation : : kFloat32 , m . PointerConstant ( & buffer ) , <nl> m . IntPtrConstant ( 0 ) , phi , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( magic ) ) ; <nl> <nl> TEST ( RunDoubleDiamond ) { <nl> m . Goto ( & end ) ; <nl> m . Bind ( & end ) ; <nl> Node * phi = m . Phi ( MachineRepresentation : : kFloat64 , k2 , k1 ) ; <nl> - m . Store ( MachineType : : Float64 ( ) , m . PointerConstant ( & buffer ) , <nl> + m . Store ( MachineRepresentation : : kFloat64 , m . PointerConstant ( & buffer ) , <nl> m . Int32Constant ( 0 ) , phi , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( magic ) ) ; <nl> <nl> TEST ( RunRefDiamond ) { <nl> m . Goto ( & end ) ; <nl> m . Bind ( & end ) ; <nl> Node * phi = m . Phi ( MachineRepresentation : : kTagged , k2 , k1 ) ; <nl> - m . Store ( MachineType : : AnyTagged ( ) , m . PointerConstant ( & buffer ) , <nl> + m . Store ( MachineRepresentation : : kTagged , m . PointerConstant ( & buffer ) , <nl> m . Int32Constant ( 0 ) , phi , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( magic ) ) ; <nl> <nl> TEST ( RunDoubleRefDiamond ) { <nl> m . Bind ( & end ) ; <nl> Node * dphi = m . Phi ( MachineRepresentation : : kFloat64 , d2 , d1 ) ; <nl> Node * rphi = m . Phi ( MachineRepresentation : : kTagged , r2 , r1 ) ; <nl> - m . Store ( MachineType : : Float64 ( ) , m . PointerConstant ( & dbuffer ) , <nl> + m . Store ( MachineRepresentation : : kFloat64 , m . PointerConstant ( & dbuffer ) , <nl> m . Int32Constant ( 0 ) , dphi , kNoWriteBarrier ) ; <nl> - m . Store ( MachineType : : AnyTagged ( ) , m . PointerConstant ( & rbuffer ) , <nl> + m . Store ( MachineRepresentation : : kTagged , m . PointerConstant ( & rbuffer ) , <nl> m . Int32Constant ( 0 ) , rphi , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( magic ) ) ; <nl> <nl> TEST ( RunDoubleRefDoubleDiamond ) { <nl> Node * dphi2 = m . Phi ( MachineRepresentation : : kFloat64 , d1 , dphi1 ) ; <nl> Node * rphi2 = m . Phi ( MachineRepresentation : : kTagged , r1 , rphi1 ) ; <nl> <nl> - m . Store ( MachineType : : Float64 ( ) , m . PointerConstant ( & dbuffer ) , <nl> + m . Store ( MachineRepresentation : : kFloat64 , m . PointerConstant ( & dbuffer ) , <nl> m . Int32Constant ( 0 ) , dphi2 , kNoWriteBarrier ) ; <nl> - m . Store ( MachineType : : AnyTagged ( ) , m . PointerConstant ( & rbuffer ) , <nl> + m . Store ( MachineRepresentation : : kTagged , m . PointerConstant ( & rbuffer ) , <nl> m . Int32Constant ( 0 ) , rphi2 , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( magic ) ) ; <nl> <nl> TEST ( RunDoubleLoopPhi ) { <nl> m . Bind ( & body ) ; <nl> m . Goto ( & header ) ; <nl> m . Bind ( & end ) ; <nl> - m . Store ( MachineType : : Float64 ( ) , m . PointerConstant ( & buffer ) , <nl> + m . Store ( MachineRepresentation : : kFloat64 , m . PointerConstant ( & buffer ) , <nl> m . Int32Constant ( 0 ) , phi , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( magic ) ) ; <nl> <nl> static void LoadStoreTruncation ( MachineType kRepresentation ) { <nl> RawMachineAssemblerTester < int32_t > m ; <nl> Node * a = m . LoadFromPointer ( & input , kRepresentation ) ; <nl> Node * ap1 = m . Int32Add ( a , m . Int32Constant ( 1 ) ) ; <nl> - m . StoreToPointer ( & input , kRepresentation , ap1 ) ; <nl> + m . StoreToPointer ( & input , kRepresentation . representation ( ) , ap1 ) ; <nl> m . Return ( ap1 ) ; <nl> <nl> const IntType max = std : : numeric_limits < IntType > : : max ( ) ; <nl> TEST ( RunTestIntPtrArithmetic ) { <nl> Node * output = m . PointerConstant ( & outputs [ kInputSize - 1 ] ) ; <nl> Node * elem_size = m . IntPtrConstant ( sizeof ( inputs [ 0 ] ) ) ; <nl> for ( int i = 0 ; i < kInputSize ; i + + ) { <nl> - m . Store ( MachineType : : Int32 ( ) , output , m . Load ( MachineType : : Int32 ( ) , input ) , <nl> - kNoWriteBarrier ) ; <nl> + m . Store ( MachineRepresentation : : kWord32 , output , <nl> + m . Load ( MachineType : : Int32 ( ) , input ) , kNoWriteBarrier ) ; <nl> input = m . IntPtrAdd ( input , elem_size ) ; <nl> output = m . IntPtrSub ( output , elem_size ) ; <nl> } <nl> TEST ( RunSpillLotsOfThings ) { <nl> accs [ i ] = acc ; <nl> } <nl> for ( int i = 0 ; i < kInputSize ; i + + ) { <nl> - m . StoreToPointer ( & outputs [ i ] , MachineType : : Int32 ( ) , accs [ i ] ) ; <nl> + m . StoreToPointer ( & outputs [ i ] , MachineRepresentation : : kWord32 , accs [ i ] ) ; <nl> } <nl> m . Return ( one ) ; <nl> m . Call ( ) ; <nl> TEST ( RunSpillConstantsAndParameters ) { <nl> accs [ i ] = acc ; <nl> } <nl> for ( int i = 0 ; i < kInputSize ; i + + ) { <nl> - m . StoreToPointer ( & outputs [ i ] , MachineType : : Int32 ( ) , accs [ i ] ) ; <nl> + m . StoreToPointer ( & outputs [ i ] , MachineRepresentation : : kWord32 , accs [ i ] ) ; <nl> } <nl> m . Return ( m . Int32Add ( acc , m . Int32Add ( m . Parameter ( 0 ) , m . Parameter ( 1 ) ) ) ) ; <nl> FOR_INT32_INPUTS ( i ) { <nl> TEST ( RunInt32AddWithOverflowP ) { <nl> Node * add = m . Int32AddWithOverflow ( bt . param0 , bt . param1 ) ; <nl> Node * val = m . Projection ( 0 , add ) ; <nl> Node * ovf = m . Projection ( 1 , add ) ; <nl> - m . StoreToPointer ( & actual_val , MachineType : : Int32 ( ) , val ) ; <nl> + m . StoreToPointer ( & actual_val , MachineRepresentation : : kWord32 , val ) ; <nl> bt . AddReturn ( ovf ) ; <nl> FOR_INT32_INPUTS ( i ) { <nl> FOR_INT32_INPUTS ( j ) { <nl> TEST ( RunInt32AddWithOverflowImm ) { <nl> Node * add = m . Int32AddWithOverflow ( m . Int32Constant ( * i ) , m . Parameter ( 0 ) ) ; <nl> Node * val = m . Projection ( 0 , add ) ; <nl> Node * ovf = m . Projection ( 1 , add ) ; <nl> - m . StoreToPointer ( & actual_val , MachineType : : Int32 ( ) , val ) ; <nl> + m . StoreToPointer ( & actual_val , MachineRepresentation : : kWord32 , val ) ; <nl> m . Return ( ovf ) ; <nl> FOR_INT32_INPUTS ( j ) { <nl> int expected_ovf = bits : : SignedAddOverflow32 ( * i , * j , & expected_val ) ; <nl> TEST ( RunInt32AddWithOverflowImm ) { <nl> Node * add = m . Int32AddWithOverflow ( m . Parameter ( 0 ) , m . Int32Constant ( * i ) ) ; <nl> Node * val = m . Projection ( 0 , add ) ; <nl> Node * ovf = m . Projection ( 1 , add ) ; <nl> - m . StoreToPointer ( & actual_val , MachineType : : Int32 ( ) , val ) ; <nl> + m . StoreToPointer ( & actual_val , MachineRepresentation : : kWord32 , val ) ; <nl> m . Return ( ovf ) ; <nl> FOR_INT32_INPUTS ( j ) { <nl> int expected_ovf = bits : : SignedAddOverflow32 ( * i , * j , & expected_val ) ; <nl> TEST ( RunInt32AddWithOverflowImm ) { <nl> m . Int32AddWithOverflow ( m . Int32Constant ( * i ) , m . Int32Constant ( * j ) ) ; <nl> Node * val = m . Projection ( 0 , add ) ; <nl> Node * ovf = m . Projection ( 1 , add ) ; <nl> - m . StoreToPointer ( & actual_val , MachineType : : Int32 ( ) , val ) ; <nl> + m . StoreToPointer ( & actual_val , MachineRepresentation : : kWord32 , val ) ; <nl> m . Return ( ovf ) ; <nl> int expected_ovf = bits : : SignedAddOverflow32 ( * i , * j , & expected_val ) ; <nl> CHECK_EQ ( expected_ovf , m . Call ( ) ) ; <nl> TEST ( RunInt32SubWithOverflowP ) { <nl> Node * add = m . Int32SubWithOverflow ( bt . param0 , bt . param1 ) ; <nl> Node * val = m . Projection ( 0 , add ) ; <nl> Node * ovf = m . Projection ( 1 , add ) ; <nl> - m . StoreToPointer ( & actual_val , MachineType : : Int32 ( ) , val ) ; <nl> + m . StoreToPointer ( & actual_val , MachineRepresentation : : kWord32 , val ) ; <nl> bt . AddReturn ( ovf ) ; <nl> FOR_INT32_INPUTS ( i ) { <nl> FOR_INT32_INPUTS ( j ) { <nl> TEST ( RunInt32SubWithOverflowImm ) { <nl> Node * add = m . Int32SubWithOverflow ( m . Int32Constant ( * i ) , m . Parameter ( 0 ) ) ; <nl> Node * val = m . Projection ( 0 , add ) ; <nl> Node * ovf = m . Projection ( 1 , add ) ; <nl> - m . StoreToPointer ( & actual_val , MachineType : : Int32 ( ) , val ) ; <nl> + m . StoreToPointer ( & actual_val , MachineRepresentation : : kWord32 , val ) ; <nl> m . Return ( ovf ) ; <nl> FOR_INT32_INPUTS ( j ) { <nl> int expected_ovf = bits : : SignedSubOverflow32 ( * i , * j , & expected_val ) ; <nl> TEST ( RunInt32SubWithOverflowImm ) { <nl> Node * add = m . Int32SubWithOverflow ( m . Parameter ( 0 ) , m . Int32Constant ( * i ) ) ; <nl> Node * val = m . Projection ( 0 , add ) ; <nl> Node * ovf = m . Projection ( 1 , add ) ; <nl> - m . StoreToPointer ( & actual_val , MachineType : : Int32 ( ) , val ) ; <nl> + m . StoreToPointer ( & actual_val , MachineRepresentation : : kWord32 , val ) ; <nl> m . Return ( ovf ) ; <nl> FOR_INT32_INPUTS ( j ) { <nl> int expected_ovf = bits : : SignedSubOverflow32 ( * j , * i , & expected_val ) ; <nl> TEST ( RunInt32SubWithOverflowImm ) { <nl> m . Int32SubWithOverflow ( m . Int32Constant ( * i ) , m . Int32Constant ( * j ) ) ; <nl> Node * val = m . Projection ( 0 , add ) ; <nl> Node * ovf = m . Projection ( 1 , add ) ; <nl> - m . StoreToPointer ( & actual_val , MachineType : : Int32 ( ) , val ) ; <nl> + m . StoreToPointer ( & actual_val , MachineRepresentation : : kWord32 , val ) ; <nl> m . Return ( ovf ) ; <nl> int expected_ovf = bits : : SignedSubOverflow32 ( * i , * j , & expected_val ) ; <nl> CHECK_EQ ( expected_ovf , m . Call ( ) ) ; <nl> TEST ( RunChangeInt32ToInt64P ) { <nl> if ( kPointerSize < 8 ) return ; <nl> int64_t actual = - 1 ; <nl> RawMachineAssemblerTester < int32_t > m ( MachineType : : Int32 ( ) ) ; <nl> - m . StoreToPointer ( & actual , MachineType : : Int64 ( ) , <nl> + m . StoreToPointer ( & actual , MachineRepresentation : : kWord64 , <nl> m . ChangeInt32ToInt64 ( m . Parameter ( 0 ) ) ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> FOR_INT32_INPUTS ( i ) { <nl> TEST ( RunChangeUint32ToUint64P ) { <nl> if ( kPointerSize < 8 ) return ; <nl> int64_t actual = - 1 ; <nl> RawMachineAssemblerTester < int32_t > m ( MachineType : : Uint32 ( ) ) ; <nl> - m . StoreToPointer ( & actual , MachineType : : Uint64 ( ) , <nl> + m . StoreToPointer ( & actual , MachineRepresentation : : kWord64 , <nl> m . ChangeUint32ToUint64 ( m . Parameter ( 0 ) ) ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> FOR_UINT32_INPUTS ( i ) { <nl> TEST ( RunCheckedStoreInt64 ) { <nl> Node * index = m . Parameter ( 0 ) ; <nl> Node * length = m . Int32Constant ( 16 ) ; <nl> Node * value = m . Int64Constant ( write ) ; <nl> - Node * store = m . AddNode ( m . machine ( ) - > CheckedStore ( MachineType : : Int64 ( ) ) , base , <nl> - index , length , value ) ; <nl> + Node * store = <nl> + m . AddNode ( m . machine ( ) - > CheckedStore ( MachineRepresentation : : kWord64 ) , base , <nl> + index , length , value ) ; <nl> USE ( store ) ; <nl> m . Return ( m . Int32Constant ( 11 ) ) ; <nl> <nl> TEST ( RunBitcastInt64ToFloat64 ) { <nl> double output = 0 . 0 ; <nl> RawMachineAssemblerTester < int32_t > m ; <nl> m . StoreToPointer ( <nl> - & output , MachineType : : Float64 ( ) , <nl> + & output , MachineRepresentation : : kFloat64 , <nl> m . BitcastInt64ToFloat64 ( m . LoadFromPointer ( & input , MachineType : : Int64 ( ) ) ) ) ; <nl> m . Return ( m . Int32Constant ( 11 ) ) ; <nl> FOR_INT64_INPUTS ( i ) { <nl> TEST ( RunTryTruncateFloat32ToInt64WithCheck ) { <nl> Node * trunc = m . TryTruncateFloat32ToInt64 ( m . Parameter ( 0 ) ) ; <nl> Node * val = m . Projection ( 0 , trunc ) ; <nl> Node * check = m . Projection ( 1 , trunc ) ; <nl> - m . StoreToPointer ( & success , MachineType : : Int64 ( ) , check ) ; <nl> + m . StoreToPointer ( & success , MachineRepresentation : : kWord64 , check ) ; <nl> m . Return ( val ) ; <nl> <nl> FOR_FLOAT32_INPUTS ( i ) { <nl> TEST ( RunTryTruncateFloat64ToInt64WithCheck ) { <nl> Node * trunc = m . TryTruncateFloat64ToInt64 ( m . Parameter ( 0 ) ) ; <nl> Node * val = m . Projection ( 0 , trunc ) ; <nl> Node * check = m . Projection ( 1 , trunc ) ; <nl> - m . StoreToPointer ( & success , MachineType : : Int64 ( ) , check ) ; <nl> + m . StoreToPointer ( & success , MachineRepresentation : : kWord64 , check ) ; <nl> m . Return ( val ) ; <nl> <nl> FOR_FLOAT64_INPUTS ( i ) { <nl> TEST ( RunTryTruncateFloat32ToUint64WithCheck ) { <nl> Node * trunc = m . TryTruncateFloat32ToUint64 ( m . Parameter ( 0 ) ) ; <nl> Node * val = m . Projection ( 0 , trunc ) ; <nl> Node * check = m . Projection ( 1 , trunc ) ; <nl> - m . StoreToPointer ( & success , MachineType : : Int64 ( ) , check ) ; <nl> + m . StoreToPointer ( & success , MachineRepresentation : : kWord64 , check ) ; <nl> m . Return ( val ) ; <nl> <nl> FOR_FLOAT32_INPUTS ( i ) { <nl> TEST ( RunTryTruncateFloat64ToUint64WithCheck ) { <nl> Node * trunc = m . TryTruncateFloat64ToUint64 ( m . Parameter ( 0 ) ) ; <nl> Node * val = m . Projection ( 0 , trunc ) ; <nl> Node * check = m . Projection ( 1 , trunc ) ; <nl> - m . StoreToPointer ( & success , MachineType : : Int64 ( ) , check ) ; <nl> + m . StoreToPointer ( & success , MachineRepresentation : : kWord64 , check ) ; <nl> m . Return ( val ) ; <nl> <nl> FOR_FLOAT64_INPUTS ( i ) { <nl> TEST ( RunBitcastInt32ToFloat32 ) { <nl> float output = 0 . 0 ; <nl> RawMachineAssemblerTester < int32_t > m ; <nl> m . StoreToPointer ( <nl> - & output , MachineType : : Float32 ( ) , <nl> + & output , MachineRepresentation : : kFloat32 , <nl> m . BitcastInt32ToFloat32 ( m . LoadFromPointer ( & input , MachineType : : Int32 ( ) ) ) ) ; <nl> m . Return ( m . Int32Constant ( 11 ) ) ; <nl> FOR_INT32_INPUTS ( i ) { <nl> mmm a / test / cctest / compiler / test - run - native - calls . cc <nl> ppp b / test / cctest / compiler / test - run - native - calls . cc <nl> class ArgsBuffer { <nl> Node * StoreOutput ( RawMachineAssembler & raw , Node * value ) { <nl> Node * base = raw . PointerConstant ( & output ) ; <nl> Node * offset = raw . Int32Constant ( 0 ) ; <nl> - return raw . Store ( MachineTypeForC < CType > ( ) , base , offset , value , <nl> - kNoWriteBarrier ) ; <nl> + return raw . Store ( MachineTypeForC < CType > ( ) . representation ( ) , base , offset , <nl> + value , kNoWriteBarrier ) ; <nl> } <nl> <nl> / / Computes the next set of inputs by updating the { input } array . <nl> static void CopyTwentyInt32 ( CallDescriptor * desc ) { <nl> Node * base = raw . PointerConstant ( output ) ; <nl> for ( int i = 0 ; i < kNumParams ; i + + ) { <nl> Node * offset = raw . Int32Constant ( i * sizeof ( int32_t ) ) ; <nl> - raw . Store ( MachineType : : Int32 ( ) , base , offset , raw . Parameter ( i ) , <nl> + raw . Store ( MachineRepresentation : : kWord32 , base , offset , raw . Parameter ( i ) , <nl> kNoWriteBarrier ) ; <nl> } <nl> raw . Return ( raw . Int32Constant ( 42 ) ) ; <nl> void MixedParamTest ( int start ) { <nl> } <nl> <nl> Node * call = raw . CallN ( desc , target , args ) ; <nl> - Node * store = raw . StoreToPointer ( output , sig - > GetReturn ( ) , call ) ; <nl> + Node * store = <nl> + raw . StoreToPointer ( output , sig - > GetReturn ( ) . representation ( ) , call ) ; <nl> USE ( store ) ; <nl> expected_ret = static_cast < int32_t > ( constant ) ; <nl> raw . Return ( raw . Int32Constant ( expected_ret ) ) ; <nl> mmm a / test / cctest / compiler / test - simplified - lowering . cc <nl> ppp b / test / cctest / compiler / test - simplified - lowering . cc <nl> TEST ( LowerStoreField_to_store ) { <nl> CHECK_EQ ( val , store - > InputAt ( 2 ) ) ; <nl> CheckFieldAccessArithmetic ( access , store ) ; <nl> <nl> - StoreRepresentation rep = OpParameter < StoreRepresentation > ( store ) ; <nl> + StoreRepresentation rep = StoreRepresentationOf ( store - > op ( ) ) ; <nl> if ( kMachineReps [ i ] . representation ( ) = = MachineRepresentation : : kTagged ) { <nl> CHECK_EQ ( kFullWriteBarrier , rep . write_barrier_kind ( ) ) ; <nl> } <nl> - CHECK_EQ ( kMachineReps [ i ] , rep . machine_type ( ) ) ; <nl> + CHECK_EQ ( kMachineReps [ i ] . representation ( ) , rep . representation ( ) ) ; <nl> } <nl> } <nl> { <nl> TEST ( LowerStoreField_to_store ) { <nl> t . LowerAllNodesAndLowerChanges ( ) ; <nl> CHECK_EQ ( IrOpcode : : kStore , store - > opcode ( ) ) ; <nl> CHECK_EQ ( t . p1 , store - > InputAt ( 2 ) ) ; <nl> - StoreRepresentation rep = OpParameter < StoreRepresentation > ( store ) ; <nl> + StoreRepresentation rep = StoreRepresentationOf ( store - > op ( ) ) ; <nl> CHECK_EQ ( kNoWriteBarrier , rep . write_barrier_kind ( ) ) ; <nl> } <nl> } <nl> TEST ( LowerStoreElement_to_store ) { <nl> CHECK_EQ ( val , store - > InputAt ( 2 ) ) ; <nl> CheckElementAccessArithmetic ( access , store ) ; <nl> <nl> - StoreRepresentation rep = OpParameter < StoreRepresentation > ( store ) ; <nl> + StoreRepresentation rep = StoreRepresentationOf ( store - > op ( ) ) ; <nl> if ( kMachineReps [ i ] . representation ( ) = = MachineRepresentation : : kTagged ) { <nl> CHECK_EQ ( kFullWriteBarrier , rep . write_barrier_kind ( ) ) ; <nl> } <nl> - CHECK_EQ ( kMachineReps [ i ] , rep . machine_type ( ) ) ; <nl> + CHECK_EQ ( kMachineReps [ i ] . representation ( ) , rep . representation ( ) ) ; <nl> } <nl> } <nl> { <nl> TEST ( LowerStoreElement_to_store ) { <nl> t . LowerAllNodesAndLowerChanges ( ) ; <nl> CHECK_EQ ( IrOpcode : : kStore , store - > opcode ( ) ) ; <nl> CHECK_EQ ( t . p2 , store - > InputAt ( 2 ) ) ; <nl> - StoreRepresentation rep = OpParameter < StoreRepresentation > ( store ) ; <nl> + StoreRepresentation rep = StoreRepresentationOf ( store - > op ( ) ) ; <nl> CHECK_EQ ( kNoWriteBarrier , rep . write_barrier_kind ( ) ) ; <nl> } <nl> } <nl> mmm a / test / unittests / compiler / arm / instruction - selector - arm - unittest . cc <nl> ppp b / test / unittests / compiler / arm / instruction - selector - arm - unittest . cc <nl> TEST_P ( InstructionSelectorMemoryAccessTest , StoreWithParameters ) { <nl> const MemoryAccess memacc = GetParam ( ) ; <nl> StreamBuilder m ( this , MachineType : : Int32 ( ) , MachineType : : Pointer ( ) , <nl> MachineType : : Int32 ( ) , memacc . type ) ; <nl> - m . Store ( memacc . type , m . Parameter ( 0 ) , m . Parameter ( 1 ) , m . Parameter ( 2 ) , <nl> - kNoWriteBarrier ) ; <nl> + m . Store ( memacc . type . representation ( ) , m . Parameter ( 0 ) , m . Parameter ( 1 ) , <nl> + m . Parameter ( 2 ) , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> Stream s = m . Build ( ) ; <nl> ASSERT_EQ ( 1U , s . size ( ) ) ; <nl> TEST_P ( InstructionSelectorMemoryAccessTest , StoreWithImmediateIndex ) { <nl> TRACED_FOREACH ( int32_t , index , memacc . immediates ) { <nl> StreamBuilder m ( this , MachineType : : Int32 ( ) , MachineType : : Pointer ( ) , <nl> memacc . type ) ; <nl> - m . Store ( memacc . type , m . Parameter ( 0 ) , m . Int32Constant ( index ) , m . Parameter ( 1 ) , <nl> - kNoWriteBarrier ) ; <nl> + m . Store ( memacc . type . representation ( ) , m . Parameter ( 0 ) , <nl> + m . Int32Constant ( index ) , m . Parameter ( 1 ) , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> Stream s = m . Build ( ) ; <nl> ASSERT_EQ ( 1U , s . size ( ) ) ; <nl> mmm a / test / unittests / compiler / arm64 / instruction - selector - arm64 - unittest . cc <nl> ppp b / test / unittests / compiler / arm64 / instruction - selector - arm64 - unittest . cc <nl> TEST_P ( InstructionSelectorMemoryAccessTest , StoreWithParameters ) { <nl> const MemoryAccess memacc = GetParam ( ) ; <nl> StreamBuilder m ( this , MachineType : : Int32 ( ) , MachineType : : Pointer ( ) , <nl> MachineType : : Int32 ( ) , memacc . type ) ; <nl> - m . Store ( memacc . type , m . Parameter ( 0 ) , m . Parameter ( 1 ) , m . Parameter ( 2 ) , <nl> - kNoWriteBarrier ) ; <nl> + m . Store ( memacc . type . representation ( ) , m . Parameter ( 0 ) , m . Parameter ( 1 ) , <nl> + m . Parameter ( 2 ) , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> Stream s = m . Build ( ) ; <nl> ASSERT_EQ ( 1U , s . size ( ) ) ; <nl> TEST_P ( InstructionSelectorMemoryAccessTest , StoreWithImmediateIndex ) { <nl> TRACED_FOREACH ( int32_t , index , memacc . immediates ) { <nl> StreamBuilder m ( this , MachineType : : Int32 ( ) , MachineType : : Pointer ( ) , <nl> memacc . type ) ; <nl> - m . Store ( memacc . type , m . Parameter ( 0 ) , m . Int32Constant ( index ) , m . Parameter ( 1 ) , <nl> - kNoWriteBarrier ) ; <nl> + m . Store ( memacc . type . representation ( ) , m . Parameter ( 0 ) , <nl> + m . Int32Constant ( index ) , m . Parameter ( 1 ) , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> Stream s = m . Build ( ) ; <nl> ASSERT_EQ ( 1U , s . size ( ) ) ; <nl> mmm a / test / unittests / compiler / change - lowering - unittest . cc <nl> ppp b / test / unittests / compiler / change - lowering - unittest . cc <nl> TARGET_TEST_P ( ChangeLoweringCommonTest , StoreFieldSmi ) { <nl> Reduction r = Reduce ( store ) ; <nl> <nl> ASSERT_TRUE ( r . Changed ( ) ) ; <nl> - EXPECT_THAT ( <nl> - r . replacement ( ) , <nl> - IsStore ( StoreRepresentation ( MachineType : : AnyTagged ( ) , kNoWriteBarrier ) , <nl> - p0 , IsIntPtrConstant ( access . offset - access . tag ( ) ) , p1 , <nl> - graph ( ) - > start ( ) , graph ( ) - > start ( ) ) ) ; <nl> + EXPECT_THAT ( r . replacement ( ) , <nl> + IsStore ( StoreRepresentation ( MachineRepresentation : : kTagged , <nl> + kNoWriteBarrier ) , <nl> + p0 , IsIntPtrConstant ( access . offset - access . tag ( ) ) , p1 , <nl> + graph ( ) - > start ( ) , graph ( ) - > start ( ) ) ) ; <nl> } <nl> <nl> <nl> TARGET_TEST_P ( ChangeLoweringCommonTest , StoreFieldTagged ) { <nl> Reduction r = Reduce ( store ) ; <nl> <nl> ASSERT_TRUE ( r . Changed ( ) ) ; <nl> - EXPECT_THAT ( <nl> - r . replacement ( ) , <nl> - IsStore ( StoreRepresentation ( MachineType : : AnyTagged ( ) , kFullWriteBarrier ) , <nl> - p0 , IsIntPtrConstant ( access . offset - access . tag ( ) ) , p1 , <nl> - graph ( ) - > start ( ) , graph ( ) - > start ( ) ) ) ; <nl> + EXPECT_THAT ( r . replacement ( ) , <nl> + IsStore ( StoreRepresentation ( MachineRepresentation : : kTagged , <nl> + kFullWriteBarrier ) , <nl> + p0 , IsIntPtrConstant ( access . offset - access . tag ( ) ) , p1 , <nl> + graph ( ) - > start ( ) , graph ( ) - > start ( ) ) ) ; <nl> } <nl> <nl> <nl> TARGET_TEST_P ( ChangeLoweringCommonTest , StoreElementTagged ) { <nl> index_match = IsChangeUint32ToUint64 ( index_match ) ; <nl> } <nl> <nl> - EXPECT_THAT ( <nl> - r . replacement ( ) , <nl> - IsStore ( StoreRepresentation ( MachineType : : AnyTagged ( ) , kFullWriteBarrier ) , <nl> - p0 , index_match , p2 , graph ( ) - > start ( ) , graph ( ) - > start ( ) ) ) ; <nl> + EXPECT_THAT ( r . replacement ( ) , <nl> + IsStore ( StoreRepresentation ( MachineRepresentation : : kTagged , <nl> + kFullWriteBarrier ) , <nl> + p0 , index_match , p2 , graph ( ) - > start ( ) , graph ( ) - > start ( ) ) ) ; <nl> } <nl> <nl> <nl> TARGET_TEST_P ( ChangeLoweringCommonTest , StoreElementUint8 ) { <nl> index_match = IsChangeUint32ToUint64 ( index_match ) ; <nl> } <nl> <nl> - EXPECT_THAT ( <nl> - r . replacement ( ) , <nl> - IsStore ( StoreRepresentation ( MachineType : : Uint8 ( ) , kNoWriteBarrier ) , p0 , <nl> - index_match , p2 , graph ( ) - > start ( ) , graph ( ) - > start ( ) ) ) ; <nl> + EXPECT_THAT ( r . replacement ( ) , <nl> + IsStore ( StoreRepresentation ( MachineRepresentation : : kWord8 , <nl> + kNoWriteBarrier ) , <nl> + p0 , index_match , p2 , graph ( ) - > start ( ) , graph ( ) - > start ( ) ) ) ; <nl> } <nl> <nl> <nl> TARGET_TEST_F ( ChangeLowering32Test , ChangeInt32ToTagged ) { <nl> Capture < Node * > add , branch , heap_number , if_true ; <nl> EXPECT_THAT ( <nl> r . replacement ( ) , <nl> - IsPhi ( <nl> - MachineRepresentation : : kTagged , <nl> - IsFinishRegion ( <nl> - AllOf ( CaptureEq ( & heap_number ) , <nl> - IsAllocateHeapNumber ( _ , CaptureEq ( & if_true ) ) ) , <nl> - IsStore ( <nl> - StoreRepresentation ( MachineType : : Float64 ( ) , kNoWriteBarrier ) , <nl> - CaptureEq ( & heap_number ) , <nl> - IsIntPtrConstant ( HeapNumber : : kValueOffset - kHeapObjectTag ) , <nl> - IsChangeInt32ToFloat64 ( value ) , CaptureEq ( & heap_number ) , <nl> - CaptureEq ( & if_true ) ) ) , <nl> - IsProjection ( <nl> - 0 , AllOf ( CaptureEq ( & add ) , IsInt32AddWithOverflow ( value , value ) ) ) , <nl> - IsMerge ( AllOf ( CaptureEq ( & if_true ) , IsIfTrue ( CaptureEq ( & branch ) ) ) , <nl> - IsIfFalse ( AllOf ( CaptureEq ( & branch ) , <nl> - IsBranch ( IsProjection ( 1 , CaptureEq ( & add ) ) , <nl> - graph ( ) - > start ( ) ) ) ) ) ) ) ; <nl> + IsPhi ( MachineRepresentation : : kTagged , <nl> + IsFinishRegion ( <nl> + AllOf ( CaptureEq ( & heap_number ) , <nl> + IsAllocateHeapNumber ( _ , CaptureEq ( & if_true ) ) ) , <nl> + IsStore ( <nl> + StoreRepresentation ( MachineRepresentation : : kFloat64 , <nl> + kNoWriteBarrier ) , <nl> + CaptureEq ( & heap_number ) , <nl> + IsIntPtrConstant ( HeapNumber : : kValueOffset - kHeapObjectTag ) , <nl> + IsChangeInt32ToFloat64 ( value ) , CaptureEq ( & heap_number ) , <nl> + CaptureEq ( & if_true ) ) ) , <nl> + IsProjection ( 0 , AllOf ( CaptureEq ( & add ) , <nl> + IsInt32AddWithOverflow ( value , value ) ) ) , <nl> + IsMerge ( AllOf ( CaptureEq ( & if_true ) , IsIfTrue ( CaptureEq ( & branch ) ) ) , <nl> + IsIfFalse ( AllOf ( CaptureEq ( & branch ) , <nl> + IsBranch ( IsProjection ( 1 , CaptureEq ( & add ) ) , <nl> + graph ( ) - > start ( ) ) ) ) ) ) ) ; <nl> } <nl> <nl> <nl> TARGET_TEST_F ( ChangeLowering32Test , ChangeUint32ToTagged ) { <nl> AllOf ( CaptureEq ( & heap_number ) , <nl> IsAllocateHeapNumber ( _ , CaptureEq ( & if_false ) ) ) , <nl> IsStore ( <nl> - StoreRepresentation ( MachineType : : Float64 ( ) , kNoWriteBarrier ) , <nl> + StoreRepresentation ( MachineRepresentation : : kFloat64 , <nl> + kNoWriteBarrier ) , <nl> CaptureEq ( & heap_number ) , <nl> IsInt32Constant ( HeapNumber : : kValueOffset - kHeapObjectTag ) , <nl> IsChangeUint32ToFloat64 ( value ) , CaptureEq ( & heap_number ) , <nl> TARGET_TEST_F ( ChangeLowering64Test , ChangeUint32ToTagged ) { <nl> AllOf ( CaptureEq ( & heap_number ) , <nl> IsAllocateHeapNumber ( _ , CaptureEq ( & if_false ) ) ) , <nl> IsStore ( <nl> - StoreRepresentation ( MachineType : : Float64 ( ) , kNoWriteBarrier ) , <nl> + StoreRepresentation ( MachineRepresentation : : kFloat64 , <nl> + kNoWriteBarrier ) , <nl> CaptureEq ( & heap_number ) , <nl> IsInt64Constant ( HeapNumber : : kValueOffset - kHeapObjectTag ) , <nl> IsChangeUint32ToFloat64 ( value ) , CaptureEq ( & heap_number ) , <nl> mmm a / test / unittests / compiler / ia32 / instruction - selector - ia32 - unittest . cc <nl> ppp b / test / unittests / compiler / ia32 / instruction - selector - ia32 - unittest . cc <nl> TEST_P ( InstructionSelectorMemoryAccessTest , StoreWithParameters ) { <nl> const MemoryAccess memacc = GetParam ( ) ; <nl> StreamBuilder m ( this , MachineType : : Int32 ( ) , MachineType : : Pointer ( ) , <nl> MachineType : : Int32 ( ) , memacc . type ) ; <nl> - m . Store ( memacc . type , m . Parameter ( 0 ) , m . Parameter ( 1 ) , m . Parameter ( 2 ) , <nl> - kNoWriteBarrier ) ; <nl> + m . Store ( memacc . type . representation ( ) , m . Parameter ( 0 ) , m . Parameter ( 1 ) , <nl> + m . Parameter ( 2 ) , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> Stream s = m . Build ( ) ; <nl> ASSERT_EQ ( 1U , s . size ( ) ) ; <nl> TEST_P ( InstructionSelectorMemoryAccessTest , StoreWithImmediateBase ) { <nl> TRACED_FOREACH ( int32_t , base , kImmediates ) { <nl> StreamBuilder m ( this , MachineType : : Int32 ( ) , MachineType : : Int32 ( ) , <nl> memacc . type ) ; <nl> - m . Store ( memacc . type , m . Int32Constant ( base ) , m . Parameter ( 0 ) , m . Parameter ( 1 ) , <nl> - kNoWriteBarrier ) ; <nl> + m . Store ( memacc . type . representation ( ) , m . Int32Constant ( base ) , m . Parameter ( 0 ) , <nl> + m . Parameter ( 1 ) , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> Stream s = m . Build ( ) ; <nl> ASSERT_EQ ( 1U , s . size ( ) ) ; <nl> TEST_P ( InstructionSelectorMemoryAccessTest , StoreWithImmediateIndex ) { <nl> TRACED_FOREACH ( int32_t , index , kImmediates ) { <nl> StreamBuilder m ( this , MachineType : : Int32 ( ) , MachineType : : Pointer ( ) , <nl> memacc . type ) ; <nl> - m . Store ( memacc . type , m . Parameter ( 0 ) , m . Int32Constant ( index ) , m . Parameter ( 1 ) , <nl> - kNoWriteBarrier ) ; <nl> + m . Store ( memacc . type . representation ( ) , m . Parameter ( 0 ) , <nl> + m . Int32Constant ( index ) , m . Parameter ( 1 ) , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> Stream s = m . Build ( ) ; <nl> ASSERT_EQ ( 1U , s . size ( ) ) ; <nl> class AddressingModeUnitTest : public InstructionSelectorTest { <nl> void Run ( Node * base , Node * load_index , Node * store_index , <nl> AddressingMode mode ) { <nl> Node * load = m - > Load ( MachineType : : Int32 ( ) , base , load_index ) ; <nl> - m - > Store ( MachineType : : Int32 ( ) , base , store_index , load , kNoWriteBarrier ) ; <nl> + m - > Store ( MachineRepresentation : : kWord32 , base , store_index , load , <nl> + kNoWriteBarrier ) ; <nl> m - > Return ( m - > Int32Constant ( 0 ) ) ; <nl> Stream s = m - > Build ( ) ; <nl> ASSERT_EQ ( 2U , s . size ( ) ) ; <nl> mmm a / test / unittests / compiler / interpreter - assembler - unittest . cc <nl> ppp b / test / unittests / compiler / interpreter - assembler - unittest . cc <nl> TARGET_TEST_F ( InterpreterAssemblerTest , StoreRegister ) { <nl> Node * store_reg_node = m . StoreRegister ( store_value , reg_index_node ) ; <nl> EXPECT_THAT ( <nl> store_reg_node , <nl> - m . IsStore ( <nl> - StoreRepresentation ( MachineType : : AnyTagged ( ) , kNoWriteBarrier ) , <nl> - IsParameter ( Linkage : : kInterpreterRegisterFileParameter ) , <nl> - IsWordShl ( reg_index_node , IsInt32Constant ( kPointerSizeLog2 ) ) , <nl> - store_value ) ) ; <nl> + m . IsStore ( StoreRepresentation ( MachineRepresentation : : kTagged , <nl> + kNoWriteBarrier ) , <nl> + IsParameter ( Linkage : : kInterpreterRegisterFileParameter ) , <nl> + IsWordShl ( reg_index_node , IsInt32Constant ( kPointerSizeLog2 ) ) , <nl> + store_value ) ) ; <nl> } <nl> } <nl> <nl> TARGET_TEST_F ( InterpreterAssemblerTest , StoreContextSlot ) { <nl> IsIntPtrAdd ( IsWordShl ( slot_index , IsInt32Constant ( kPointerSizeLog2 ) ) , <nl> IsInt32Constant ( Context : : kHeaderSize - kHeapObjectTag ) ) ; <nl> EXPECT_THAT ( store_context_slot , <nl> - m . IsStore ( StoreRepresentation ( MachineType : : AnyTagged ( ) , <nl> + m . IsStore ( StoreRepresentation ( MachineRepresentation : : kTagged , <nl> kFullWriteBarrier ) , <nl> context , offset , value ) ) ; <nl> } <nl> mmm a / test / unittests / compiler / machine - operator - reducer - unittest . cc <nl> ppp b / test / unittests / compiler / machine - operator - reducer - unittest . cc <nl> TEST_F ( MachineOperatorReducerTest , Float64LessThanOrEqualWithFloat32Constant ) { <nl> <nl> <nl> TEST_F ( MachineOperatorReducerTest , StoreRepWord8WithWord32And ) { <nl> - const StoreRepresentation rep ( MachineType : : Uint8 ( ) , kNoWriteBarrier ) ; <nl> + const StoreRepresentation rep ( MachineRepresentation : : kWord8 , kNoWriteBarrier ) ; <nl> Node * const base = Parameter ( 0 ) ; <nl> Node * const index = Parameter ( 1 ) ; <nl> Node * const value = Parameter ( 2 ) ; <nl> TEST_F ( MachineOperatorReducerTest , StoreRepWord8WithWord32And ) { <nl> <nl> <nl> TEST_F ( MachineOperatorReducerTest , StoreRepWord8WithWord32SarAndWord32Shl ) { <nl> - const StoreRepresentation rep ( MachineType : : Uint8 ( ) , kNoWriteBarrier ) ; <nl> + const StoreRepresentation rep ( MachineRepresentation : : kWord8 , kNoWriteBarrier ) ; <nl> Node * const base = Parameter ( 0 ) ; <nl> Node * const index = Parameter ( 1 ) ; <nl> Node * const value = Parameter ( 2 ) ; <nl> TEST_F ( MachineOperatorReducerTest , StoreRepWord8WithWord32SarAndWord32Shl ) { <nl> <nl> <nl> TEST_F ( MachineOperatorReducerTest , StoreRepWord16WithWord32And ) { <nl> - const StoreRepresentation rep ( MachineType : : Uint16 ( ) , kNoWriteBarrier ) ; <nl> + const StoreRepresentation rep ( MachineRepresentation : : kWord16 , <nl> + kNoWriteBarrier ) ; <nl> Node * const base = Parameter ( 0 ) ; <nl> Node * const index = Parameter ( 1 ) ; <nl> Node * const value = Parameter ( 2 ) ; <nl> TEST_F ( MachineOperatorReducerTest , StoreRepWord16WithWord32And ) { <nl> <nl> <nl> TEST_F ( MachineOperatorReducerTest , StoreRepWord16WithWord32SarAndWord32Shl ) { <nl> - const StoreRepresentation rep ( MachineType : : Uint16 ( ) , kNoWriteBarrier ) ; <nl> + const StoreRepresentation rep ( MachineRepresentation : : kWord16 , <nl> + kNoWriteBarrier ) ; <nl> Node * const base = Parameter ( 0 ) ; <nl> Node * const index = Parameter ( 1 ) ; <nl> Node * const value = Parameter ( 2 ) ; <nl> mmm a / test / unittests / compiler / machine - operator - unittest . cc <nl> ppp b / test / unittests / compiler / machine - operator - unittest . cc <nl> const MachineType kMachineTypesForAccess [ ] = { <nl> MachineType : : Int32 ( ) , MachineType : : Uint32 ( ) , MachineType : : Int64 ( ) , <nl> MachineType : : Uint64 ( ) , MachineType : : AnyTagged ( ) } ; <nl> <nl> + <nl> + const MachineRepresentation kRepresentationsForStore [ ] = { <nl> + MachineRepresentation : : kFloat32 , MachineRepresentation : : kFloat64 , <nl> + MachineRepresentation : : kWord8 , MachineRepresentation : : kWord16 , <nl> + MachineRepresentation : : kWord32 , MachineRepresentation : : kWord64 , <nl> + MachineRepresentation : : kTagged } ; <nl> + <nl> } / / namespace <nl> <nl> <nl> INSTANTIATE_TEST_CASE_P ( <nl> <nl> class MachineStoreOperatorTest <nl> : public MachineOperatorTestWithParam < <nl> - : : testing : : tuple < MachineType , WriteBarrierKind > > { <nl> + : : testing : : tuple < MachineRepresentation , WriteBarrierKind > > { <nl> protected : <nl> StoreRepresentation GetParam ( ) const { <nl> return StoreRepresentation ( <nl> - : : testing : : get < 0 > ( MachineOperatorTestWithParam < <nl> - : : testing : : tuple < MachineType , WriteBarrierKind > > : : GetParam ( ) ) , <nl> - : : testing : : get < 1 > ( MachineOperatorTestWithParam < <nl> - : : testing : : tuple < MachineType , WriteBarrierKind > > : : GetParam ( ) ) ) ; <nl> + : : testing : : get < 0 > ( <nl> + MachineOperatorTestWithParam < : : testing : : tuple < <nl> + MachineRepresentation , WriteBarrierKind > > : : GetParam ( ) ) , <nl> + : : testing : : get < 1 > ( <nl> + MachineOperatorTestWithParam < : : testing : : tuple < <nl> + MachineRepresentation , WriteBarrierKind > > : : GetParam ( ) ) ) ; <nl> } <nl> } ; <nl> <nl> INSTANTIATE_TEST_CASE_P ( <nl> MachineOperatorTest , MachineStoreOperatorTest , <nl> : : testing : : Combine ( <nl> : : testing : : ValuesIn ( kMachineReps ) , <nl> - : : testing : : Combine ( : : testing : : ValuesIn ( kMachineTypesForAccess ) , <nl> + : : testing : : Combine ( : : testing : : ValuesIn ( kRepresentationsForStore ) , <nl> : : testing : : Values ( kNoWriteBarrier , <nl> kFullWriteBarrier ) ) ) ) ; <nl> # endif <nl> mmm a / test / unittests / compiler / mips / instruction - selector - mips - unittest . cc <nl> ppp b / test / unittests / compiler / mips / instruction - selector - mips - unittest . cc <nl> TEST_P ( InstructionSelectorMemoryAccessTest , StoreWithParameters ) { <nl> const MemoryAccess memacc = GetParam ( ) ; <nl> StreamBuilder m ( this , MachineType : : Int32 ( ) , MachineType : : Pointer ( ) , <nl> MachineType : : Int32 ( ) , memacc . type ) ; <nl> - m . Store ( memacc . type , m . Parameter ( 0 ) , m . Parameter ( 1 ) , kNoWriteBarrier ) ; <nl> + m . Store ( memacc . type . representation ( ) , m . Parameter ( 0 ) , m . Parameter ( 1 ) , <nl> + kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> Stream s = m . Build ( ) ; <nl> ASSERT_EQ ( 1U , s . size ( ) ) ; <nl> TEST_P ( InstructionSelectorMemoryAccessImmTest , StoreWithImmediateIndex ) { <nl> TRACED_FOREACH ( int32_t , index , memacc . immediates ) { <nl> StreamBuilder m ( this , MachineType : : Int32 ( ) , MachineType : : Pointer ( ) , <nl> memacc . type ) ; <nl> - m . Store ( memacc . type , m . Parameter ( 0 ) , m . Int32Constant ( index ) , m . Parameter ( 1 ) , <nl> - kNoWriteBarrier ) ; <nl> + m . Store ( memacc . type . representation ( ) , m . Parameter ( 0 ) , <nl> + m . Int32Constant ( index ) , m . Parameter ( 1 ) , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> Stream s = m . Build ( ) ; <nl> ASSERT_EQ ( 1U , s . size ( ) ) ; <nl> TEST_P ( InstructionSelectorMemoryAccessImmMoreThan16bitTest , <nl> TRACED_FOREACH ( int32_t , index , memacc . immediates ) { <nl> StreamBuilder m ( this , MachineType : : Int32 ( ) , MachineType : : Pointer ( ) , <nl> memacc . type ) ; <nl> - m . Store ( memacc . type , m . Parameter ( 0 ) , m . Int32Constant ( index ) , m . Parameter ( 1 ) , <nl> - kNoWriteBarrier ) ; <nl> + m . Store ( memacc . type . representation ( ) , m . Parameter ( 0 ) , <nl> + m . Int32Constant ( index ) , m . Parameter ( 1 ) , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> Stream s = m . Build ( ) ; <nl> ASSERT_EQ ( 2U , s . size ( ) ) ; <nl> mmm a / test / unittests / compiler / mips64 / instruction - selector - mips64 - unittest . cc <nl> ppp b / test / unittests / compiler / mips64 / instruction - selector - mips64 - unittest . cc <nl> TEST_P ( InstructionSelectorMemoryAccessTest , StoreWithParameters ) { <nl> const MemoryAccess memacc = GetParam ( ) ; <nl> StreamBuilder m ( this , MachineType : : Int32 ( ) , MachineType : : Pointer ( ) , <nl> MachineType : : Int32 ( ) , memacc . type ) ; <nl> - m . Store ( memacc . type , m . Parameter ( 0 ) , m . Parameter ( 1 ) , kNoWriteBarrier ) ; <nl> + m . Store ( memacc . type . representation ( ) , m . Parameter ( 0 ) , m . Parameter ( 1 ) , <nl> + kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> Stream s = m . Build ( ) ; <nl> ASSERT_EQ ( 1U , s . size ( ) ) ; <nl> TEST_P ( InstructionSelectorMemoryAccessImmTest , StoreWithImmediateIndex ) { <nl> TRACED_FOREACH ( int32_t , index , memacc . immediates ) { <nl> StreamBuilder m ( this , MachineType : : Int32 ( ) , MachineType : : Pointer ( ) , <nl> memacc . type ) ; <nl> - m . Store ( memacc . type , m . Parameter ( 0 ) , m . Int32Constant ( index ) , m . Parameter ( 1 ) , <nl> - kNoWriteBarrier ) ; <nl> + m . Store ( memacc . type . representation ( ) , m . Parameter ( 0 ) , <nl> + m . Int32Constant ( index ) , m . Parameter ( 1 ) , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> Stream s = m . Build ( ) ; <nl> ASSERT_EQ ( 1U , s . size ( ) ) ; <nl> TEST_P ( InstructionSelectorMemoryAccessImmMoreThan16bitTest , <nl> TRACED_FOREACH ( int32_t , index , memacc . immediates ) { <nl> StreamBuilder m ( this , MachineType : : Int32 ( ) , MachineType : : Pointer ( ) , <nl> memacc . type ) ; <nl> - m . Store ( memacc . type , m . Parameter ( 0 ) , m . Int32Constant ( index ) , m . Parameter ( 1 ) , <nl> - kNoWriteBarrier ) ; <nl> + m . Store ( memacc . type . representation ( ) , m . Parameter ( 0 ) , <nl> + m . Int32Constant ( index ) , m . Parameter ( 1 ) , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> Stream s = m . Build ( ) ; <nl> ASSERT_EQ ( 2U , s . size ( ) ) ; <nl> mmm a / test / unittests / compiler / x64 / instruction - selector - x64 - unittest . cc <nl> ppp b / test / unittests / compiler / x64 / instruction - selector - x64 - unittest . cc <nl> TEST_P ( InstructionSelectorMemoryAccessTest , StoreWithParameters ) { <nl> const MemoryAccess memacc = GetParam ( ) ; <nl> StreamBuilder m ( this , MachineType : : Int32 ( ) , MachineType : : Pointer ( ) , <nl> MachineType : : Int32 ( ) , memacc . type ) ; <nl> - m . Store ( memacc . type , m . Parameter ( 0 ) , m . Parameter ( 1 ) , m . Parameter ( 2 ) , <nl> - kNoWriteBarrier ) ; <nl> + m . Store ( memacc . type . representation ( ) , m . Parameter ( 0 ) , m . Parameter ( 1 ) , <nl> + m . Parameter ( 2 ) , kNoWriteBarrier ) ; <nl> m . Return ( m . Int32Constant ( 0 ) ) ; <nl> Stream s = m . Build ( ) ; <nl> ASSERT_EQ ( 1U , s . size ( ) ) ; <nl>
[ turbofan ] Store nodes use only MachineRepresentation , not MachineType .
v8/v8
56673804e026b297a5165e6f807f55aaaad6c6ee
2015-12-11T15:34:16Z
mmm a / src / rdb_protocol / val . cc <nl> ppp b / src / rdb_protocol / val . cc <nl> counted_t < func_t > val_t : : as_func ( function_shortcut_t shortcut ) { <nl> unreachable ( ) ; <nl> } <nl> <nl> - / / We use a switch here so that people have to update it if they add another <nl> - / / shortcut . <nl> switch ( shortcut ) { <nl> case CONSTANT_SHORTCUT : <nl> return new_constant_func ( as_datum ( ) , backtrace ( ) ) ; <nl>
Removed unnecessary comment about the benefits of switch statements .
rethinkdb/rethinkdb
01a6f6dbe1b3db23013ab5b761a89b4bcb5f8ec3
2014-06-03T02:26:46Z
mmm a / src / compiler / access - info . cc <nl> ppp b / src / compiler / access - info . cc <nl> bool AccessInfoFactory : : ComputeElementAccessInfo ( <nl> bool AccessInfoFactory : : ComputeElementAccessInfos ( <nl> FeedbackNexus nexus , MapHandles const & maps , AccessMode access_mode , <nl> ZoneVector < ElementAccessInfo > * access_infos ) const { <nl> - ProcessedFeedback processed ( broker ( ) - > zone ( ) ) ; <nl> - <nl> + ElementAccessFeedback const * processed ; <nl> if ( FLAG_concurrent_inlining ) { <nl> - / / TODO ( neis ) : When concurrent inlining is ready , <nl> - / / - change the printing below to not look into the heap , <nl> - / / - remove the call to ProcessFeedbackMapsForElementAccess , <nl> - / / - remove the Allow * scopes , <nl> - AllowCodeDependencyChange dependency_change_ ; <nl> - AllowHandleAllocation handle_allocation_ ; <nl> - AllowHandleDereference handle_dereference_ ; <nl> - AllowHeapAllocation heap_allocation_ ; <nl> - <nl> - / / We have already processed the feedback for this nexus during <nl> - / / serialization . Use that data ! We still process the incoming { maps } ( even <nl> - / / though we don ' t use them ) so that we can print a comparison . <nl> - ProcessFeedbackMapsForElementAccess ( broker ( ) , maps , & processed ) ; <nl> - ProcessedFeedback const & preprocessed = broker ( ) - > GetFeedback ( nexus ) ; <nl> TRACE_BROKER ( broker ( ) , <nl> " ComputeElementAccessInfos : using preprocessed feedback " <nl> < < " ( slot " < < nexus . slot ( ) < < " of " <nl> - < < Brief ( * nexus . vector_handle ( ) ) < < " ; " <nl> - < < preprocessed . receiver_maps . size ( ) < < " / " <nl> - < < preprocessed . transitions . size ( ) < < " vs " <nl> - < < processed . receiver_maps . size ( ) < < " / " <nl> - < < processed . transitions . size ( ) < < " ) . \ n " ) ; <nl> - processed . receiver_maps = preprocessed . receiver_maps ; <nl> - processed . transitions = preprocessed . transitions ; <nl> + < < " feedback vector handle " <nl> + < < nexus . vector_handle ( ) . address ( ) < < " ) . \ n " ) ; <nl> + processed = broker ( ) - > GetElementAccessFeedback ( FeedbackSource ( nexus ) ) ; <nl> } else { <nl> - ProcessFeedbackMapsForElementAccess ( broker ( ) , maps , & processed ) ; <nl> + processed = broker ( ) - > ProcessFeedbackMapsForElementAccess ( maps ) ; <nl> } <nl> <nl> - if ( processed . receiver_maps . empty ( ) ) return false ; <nl> + if ( processed = = nullptr ) return false ; <nl> <nl> if ( access_mode = = AccessMode : : kLoad | | access_mode = = AccessMode : : kHas ) { <nl> / / For polymorphic loads of similar elements kinds ( i . e . all tagged or all <nl> bool AccessInfoFactory : : ComputeElementAccessInfos ( <nl> / / much faster than transitioning the elements to the worst case , trading a <nl> / / TransitionElementsKind for a CheckMaps , avoiding mutation of the array . <nl> ElementAccessInfo access_info ; <nl> - if ( ConsolidateElementLoad ( processed , & access_info ) ) { <nl> + if ( ConsolidateElementLoad ( * processed , & access_info ) ) { <nl> access_infos - > push_back ( access_info ) ; <nl> return true ; <nl> } <nl> } <nl> <nl> - for ( Handle < Map > receiver_map : processed . receiver_maps ) { <nl> + for ( Handle < Map > receiver_map : processed - > receiver_maps ) { <nl> / / Compute the element access information . <nl> ElementAccessInfo access_info ; <nl> if ( ! ComputeElementAccessInfo ( receiver_map , access_mode , & access_info ) ) { <nl> bool AccessInfoFactory : : ComputeElementAccessInfos ( <nl> } <nl> <nl> / / Collect the possible transitions for the { receiver_map } . <nl> - for ( auto transition : processed . transitions ) { <nl> + for ( auto transition : processed - > transitions ) { <nl> if ( transition . second . equals ( receiver_map ) ) { <nl> access_info . AddTransitionSource ( transition . first ) ; <nl> } <nl> Maybe < ElementsKind > GeneralizeElementsKind ( ElementsKind this_kind , <nl> } / / namespace <nl> <nl> bool AccessInfoFactory : : ConsolidateElementLoad ( <nl> - ProcessedFeedback const & processed , ElementAccessInfo * access_info ) const { <nl> - ProcessedFeedback : : MapIterator it = processed . all_maps ( broker ( ) ) ; <nl> + ElementAccessFeedback const & processed , <nl> + ElementAccessInfo * access_info ) const { <nl> + ElementAccessFeedback : : MapIterator it = processed . all_maps ( broker ( ) ) ; <nl> MapRef first_map = it . current ( ) ; <nl> InstanceType instance_type = first_map . instance_type ( ) ; <nl> ElementsKind elements_kind = first_map . elements_kind ( ) ; <nl> mmm a / src / compiler / access - info . h <nl> ppp b / src / compiler / access - info . h <nl> namespace compiler { <nl> <nl> / / Forward declarations . <nl> class CompilationDependencies ; <nl> + class ElementAccessFeedback ; <nl> class Type ; <nl> class TypeCache ; <nl> - struct ProcessedFeedback ; <nl> <nl> / / Whether we are loading a property or storing to a property . <nl> / / For a store during literal creation , do not walk up the prototype chain . <nl> class AccessInfoFactory final { <nl> ZoneVector < PropertyAccessInfo > * access_infos ) const ; <nl> <nl> private : <nl> - bool ConsolidateElementLoad ( ProcessedFeedback const & processed , <nl> + bool ConsolidateElementLoad ( ElementAccessFeedback const & processed , <nl> ElementAccessInfo * access_info ) const ; <nl> bool LookupSpecialFieldAccessor ( Handle < Map > map , Handle < Name > name , <nl> PropertyAccessInfo * access_info ) const ; <nl> mmm a / src / compiler / js - heap - broker . cc <nl> ppp b / src / compiler / js - heap - broker . cc <nl> <nl> # include " src / objects / js - regexp - inl . h " <nl> # include " src / objects / module - inl . h " <nl> # include " src / utils . h " <nl> + # include " src / vector - slot - pair . h " <nl> <nl> namespace v8 { <nl> namespace internal { <nl> void JSBoundFunctionRef : : Serialize ( ) { <nl> data ( ) - > AsJSBoundFunction ( ) - > Serialize ( broker ( ) ) ; <nl> } <nl> <nl> + void PropertyCellRef : : Serialize ( ) { <nl> + if ( broker ( ) - > mode ( ) = = JSHeapBroker : : kDisabled ) return ; <nl> + CHECK_EQ ( broker ( ) - > mode ( ) , JSHeapBroker : : kSerializing ) ; <nl> + data ( ) - > AsPropertyCell ( ) - > Serialize ( broker ( ) ) ; <nl> + } <nl> + <nl> bool CanInlineElementAccess ( MapRef const & map ) { <nl> if ( ! map . IsJSObjectMap ( ) ) return false ; <nl> if ( map . is_access_check_needed ( ) ) return false ; <nl> bool CanInlineElementAccess ( MapRef const & map ) { <nl> return false ; <nl> } <nl> <nl> - ProcessedFeedback : : ProcessedFeedback ( Zone * zone ) <nl> - : receiver_maps ( zone ) , transitions ( zone ) { } <nl> + GlobalAccessFeedback : : GlobalAccessFeedback ( PropertyCellRef cell ) <nl> + : ProcessedFeedback ( kGlobalAccess ) , <nl> + cell_or_context_ ( cell ) , <nl> + index_and_immutable_ ( 0 / * doesn ' t matter * / ) { } <nl> + <nl> + GlobalAccessFeedback : : GlobalAccessFeedback ( ContextRef script_context , <nl> + int slot_index , bool immutable ) <nl> + : ProcessedFeedback ( kGlobalAccess ) , <nl> + cell_or_context_ ( script_context ) , <nl> + index_and_immutable_ ( FeedbackNexus : : SlotIndexBits : : encode ( slot_index ) | <nl> + FeedbackNexus : : ImmutabilityBit : : encode ( immutable ) ) { <nl> + DCHECK_EQ ( this - > slot_index ( ) , slot_index ) ; <nl> + DCHECK_EQ ( this - > immutable ( ) , immutable ) ; <nl> + } <nl> + <nl> + bool GlobalAccessFeedback : : IsPropertyCell ( ) const { <nl> + return cell_or_context_ . IsPropertyCell ( ) ; <nl> + } <nl> + PropertyCellRef GlobalAccessFeedback : : property_cell ( ) const { <nl> + DCHECK ( IsPropertyCell ( ) ) ; <nl> + return cell_or_context_ . AsPropertyCell ( ) ; <nl> + } <nl> + <nl> + ContextRef GlobalAccessFeedback : : script_context ( ) const { <nl> + DCHECK ( IsScriptContextSlot ( ) ) ; <nl> + return cell_or_context_ . AsContext ( ) ; <nl> + } <nl> + int GlobalAccessFeedback : : slot_index ( ) const { <nl> + CHECK ( IsScriptContextSlot ( ) ) ; <nl> + return FeedbackNexus : : SlotIndexBits : : decode ( index_and_immutable_ ) ; <nl> + } <nl> + bool GlobalAccessFeedback : : immutable ( ) const { <nl> + CHECK ( IsScriptContextSlot ( ) ) ; <nl> + return FeedbackNexus : : ImmutabilityBit : : decode ( index_and_immutable_ ) ; <nl> + } <nl> + <nl> + ElementAccessFeedback : : ElementAccessFeedback ( Zone * zone ) <nl> + : ProcessedFeedback ( kElementAccess ) , <nl> + receiver_maps ( zone ) , <nl> + transitions ( zone ) { } <nl> <nl> - ProcessedFeedback : : MapIterator : : MapIterator ( ProcessedFeedback const & processed , <nl> - JSHeapBroker * broker ) <nl> + ElementAccessFeedback : : MapIterator : : MapIterator ( <nl> + ElementAccessFeedback const & processed , JSHeapBroker * broker ) <nl> : processed_ ( processed ) , broker_ ( broker ) { <nl> CHECK_LT ( processed . receiver_maps . size ( ) , <nl> std : : numeric_limits < size_t > : : max ( ) - processed . transitions . size ( ) ) ; <nl> } <nl> <nl> - bool ProcessedFeedback : : MapIterator : : done ( ) const { <nl> + bool ElementAccessFeedback : : MapIterator : : done ( ) const { <nl> return index_ > = <nl> processed_ . receiver_maps . size ( ) + processed_ . transitions . size ( ) ; <nl> } <nl> <nl> - void ProcessedFeedback : : MapIterator : : advance ( ) { index_ + + ; } <nl> + void ElementAccessFeedback : : MapIterator : : advance ( ) { index_ + + ; } <nl> <nl> - MapRef ProcessedFeedback : : MapIterator : : current ( ) const { <nl> + MapRef ElementAccessFeedback : : MapIterator : : current ( ) const { <nl> CHECK ( ! done ( ) ) ; <nl> size_t receiver_maps_size = processed_ . receiver_maps . size ( ) ; <nl> Handle < Map > map ; <nl> MapRef ProcessedFeedback : : MapIterator : : current ( ) const { <nl> return MapRef ( broker_ , map ) ; <nl> } <nl> <nl> - ProcessedFeedback : : MapIterator ProcessedFeedback : : all_maps ( <nl> + ElementAccessFeedback : : MapIterator ElementAccessFeedback : : all_maps ( <nl> JSHeapBroker * broker ) const { <nl> return MapIterator ( * this , broker ) ; <nl> } <nl> <nl> - ProcessedFeedback & JSHeapBroker : : CreateEmptyFeedback ( <nl> - FeedbackNexus const & nexus ) { <nl> - auto insertion = feedback_ . insert ( { nexus , ProcessedFeedback ( zone ( ) ) } ) ; <nl> + FeedbackSource : : FeedbackSource ( FeedbackNexus const & nexus ) <nl> + : vector ( nexus . vector_handle ( ) ) , slot ( nexus . slot ( ) ) { } <nl> + <nl> + FeedbackSource : : FeedbackSource ( VectorSlotPair const & pair ) <nl> + : vector ( pair . vector ( ) ) , slot ( pair . slot ( ) ) { } <nl> + <nl> + void JSHeapBroker : : SetFeedback ( FeedbackSource const & source , <nl> + ProcessedFeedback const * feedback ) { <nl> + auto insertion = feedback_ . insert ( { source , feedback } ) ; <nl> CHECK ( insertion . second ) ; <nl> - return insertion . first - > second ; <nl> } <nl> <nl> - bool JSHeapBroker : : HasFeedback ( FeedbackNexus const & nexus ) const { <nl> - return feedback_ . find ( nexus ) ! = feedback_ . end ( ) ; <nl> + bool JSHeapBroker : : HasFeedback ( FeedbackSource const & source ) const { <nl> + return feedback_ . find ( source ) ! = feedback_ . end ( ) ; <nl> } <nl> <nl> - ProcessedFeedback & JSHeapBroker : : GetFeedback ( FeedbackNexus const & nexus ) { <nl> - auto it = feedback_ . find ( nexus ) ; <nl> + ProcessedFeedback const * JSHeapBroker : : GetFeedback ( <nl> + FeedbackSource const & source ) const { <nl> + auto it = feedback_ . find ( source ) ; <nl> CHECK_NE ( it , feedback_ . end ( ) ) ; <nl> return it - > second ; <nl> } <nl> <nl> - void ProcessFeedbackMapsForElementAccess ( JSHeapBroker * broker , <nl> - MapHandles const & maps , <nl> - ProcessedFeedback * processed ) { <nl> - CHECK ( processed - > receiver_maps . empty ( ) ) ; <nl> - CHECK ( processed - > transitions . empty ( ) ) ; <nl> + ElementAccessFeedback const * JSHeapBroker : : GetElementAccessFeedback ( <nl> + FeedbackSource const & source ) const { <nl> + ProcessedFeedback const * feedback = GetFeedback ( source ) ; <nl> + if ( feedback = = nullptr ) return nullptr ; <nl> + CHECK_EQ ( feedback - > kind ( ) , ProcessedFeedback : : kElementAccess ) ; <nl> + return static_cast < ElementAccessFeedback const * > ( feedback ) ; <nl> + } <nl> <nl> + GlobalAccessFeedback const * JSHeapBroker : : GetGlobalAccessFeedback ( <nl> + FeedbackSource const & source ) const { <nl> + ProcessedFeedback const * feedback = GetFeedback ( source ) ; <nl> + if ( feedback = = nullptr ) return nullptr ; <nl> + CHECK_EQ ( feedback - > kind ( ) , ProcessedFeedback : : kGlobalAccess ) ; <nl> + return static_cast < GlobalAccessFeedback const * > ( feedback ) ; <nl> + } <nl> + <nl> + ElementAccessFeedback const * JSHeapBroker : : ProcessFeedbackMapsForElementAccess ( <nl> + MapHandles const & maps ) { <nl> / / Collect possible transition targets . <nl> MapHandles possible_transition_targets ; <nl> possible_transition_targets . reserve ( maps . size ( ) ) ; <nl> for ( Handle < Map > map : maps ) { <nl> - if ( CanInlineElementAccess ( MapRef ( broker , map ) ) & & <nl> + if ( CanInlineElementAccess ( MapRef ( this , map ) ) & & <nl> IsFastElementsKind ( map - > elements_kind ( ) ) & & <nl> GetInitialFastElementsKind ( ) ! = map - > elements_kind ( ) ) { <nl> possible_transition_targets . push_back ( map ) ; <nl> } <nl> } <nl> <nl> + if ( maps . empty ( ) ) return nullptr ; <nl> + <nl> + ElementAccessFeedback * result = new ( zone ( ) ) ElementAccessFeedback ( zone ( ) ) ; <nl> + <nl> / / Separate the actual receiver maps and the possible transition sources . <nl> for ( Handle < Map > map : maps ) { <nl> / / Don ' t generate elements kind transitions from stable maps . <nl> - Map transition_target = <nl> - map - > is_stable ( ) ? Map ( ) <nl> - : map - > FindElementsKindTransitionedMap ( <nl> - broker - > isolate ( ) , possible_transition_targets ) ; <nl> + Map transition_target = map - > is_stable ( ) <nl> + ? Map ( ) <nl> + : map - > FindElementsKindTransitionedMap ( <nl> + isolate ( ) , possible_transition_targets ) ; <nl> if ( transition_target . is_null ( ) ) { <nl> - processed - > receiver_maps . push_back ( map ) ; <nl> + result - > receiver_maps . push_back ( map ) ; <nl> } else { <nl> - processed - > transitions . emplace_back ( <nl> - map , handle ( transition_target , broker - > isolate ( ) ) ) ; <nl> + result - > transitions . emplace_back ( map , <nl> + handle ( transition_target , isolate ( ) ) ) ; <nl> } <nl> } <nl> <nl> # ifdef ENABLE_SLOW_DCHECKS <nl> / / No transition sources appear in { receiver_maps } . <nl> / / All transition targets appear in { receiver_maps } . <nl> - for ( auto & transition : processed - > transitions ) { <nl> - USE ( transition ) ; <nl> + for ( auto & transition : result - > transitions ) { <nl> CHECK ( std : : none_of ( <nl> - processed - > receiver_maps . cbegin ( ) , processed - > receiver_maps . cend ( ) , <nl> + result - > receiver_maps . cbegin ( ) , result - > receiver_maps . cend ( ) , <nl> [ & ] ( Handle < Map > map ) { return map . equals ( transition . first ) ; } ) ) ; <nl> CHECK ( std : : any_of ( <nl> - processed - > receiver_maps . cbegin ( ) , processed - > receiver_maps . cend ( ) , <nl> + result - > receiver_maps . cbegin ( ) , result - > receiver_maps . cend ( ) , <nl> [ & ] ( Handle < Map > map ) { return map . equals ( transition . second ) ; } ) ) ; <nl> } <nl> # endif <nl> + CHECK ( ! result - > receiver_maps . empty ( ) ) ; <nl> + <nl> + return result ; <nl> + } <nl> + <nl> + GlobalAccessFeedback const * JSHeapBroker : : ProcessFeedbackForGlobalAccess ( <nl> + FeedbackSource const & source ) { <nl> + FeedbackNexus nexus ( source . vector , source . slot ) ; <nl> + DCHECK ( nexus . kind ( ) = = FeedbackSlotKind : : kLoadGlobalInsideTypeof | | <nl> + nexus . kind ( ) = = FeedbackSlotKind : : kLoadGlobalNotInsideTypeof | | <nl> + nexus . kind ( ) = = FeedbackSlotKind : : kStoreGlobalSloppy | | <nl> + nexus . kind ( ) = = FeedbackSlotKind : : kStoreGlobalStrict ) ; <nl> + if ( nexus . ic_state ( ) ! = MONOMORPHIC | | nexus . GetFeedback ( ) - > IsCleared ( ) ) { <nl> + return nullptr ; <nl> + } <nl> + <nl> + Handle < Object > feedback_value ( nexus . GetFeedback ( ) - > GetHeapObjectOrSmi ( ) , <nl> + isolate ( ) ) ; <nl> + <nl> + if ( feedback_value - > IsSmi ( ) ) { <nl> + / / The wanted name belongs to a script - scope variable and the feedback tells <nl> + / / us where to find its value . <nl> + int number = feedback_value - > Number ( ) ; <nl> + int const script_context_index = <nl> + FeedbackNexus : : ContextIndexBits : : decode ( number ) ; <nl> + int const context_slot_index = FeedbackNexus : : SlotIndexBits : : decode ( number ) ; <nl> + bool const immutable = FeedbackNexus : : ImmutabilityBit : : decode ( number ) ; <nl> + Handle < Context > context = ScriptContextTable : : GetContext ( <nl> + isolate ( ) , native_context ( ) . script_context_table ( ) . object ( ) , <nl> + script_context_index ) ; <nl> + { <nl> + ObjectRef contents ( this , <nl> + handle ( context - > get ( context_slot_index ) , isolate ( ) ) ) ; <nl> + CHECK ( ! contents . equals ( <nl> + ObjectRef ( this , isolate ( ) - > factory ( ) - > the_hole_value ( ) ) ) ) ; <nl> + } <nl> + return new ( zone ( ) ) GlobalAccessFeedback ( ContextRef ( this , context ) , <nl> + context_slot_index , immutable ) ; <nl> + } <nl> + <nl> + CHECK ( feedback_value - > IsPropertyCell ( ) ) ; <nl> + / / The wanted name belongs ( or did belong ) to a property on the global <nl> + / / object and the feedback is the cell holding its value . <nl> + PropertyCellRef cell ( this , Handle < PropertyCell > : : cast ( feedback_value ) ) ; <nl> + cell . Serialize ( ) ; <nl> + return new ( zone ( ) ) GlobalAccessFeedback ( cell ) ; <nl> } <nl> <nl> # undef BIMODAL_ACCESSOR <nl> mmm a / src / compiler / js - heap - broker . h <nl> ppp b / src / compiler / js - heap - broker . h <nl> namespace v8 { <nl> namespace internal { <nl> <nl> class BytecodeArray ; <nl> + class VectorSlotPair ; <nl> class FixedDoubleArray ; <nl> class HeapNumber ; <nl> class InternalizedString ; <nl> class PropertyCellRef : public HeapObjectRef { <nl> Handle < PropertyCell > object ( ) const ; <nl> <nl> PropertyDetails property_details ( ) const ; <nl> + <nl> + void Serialize ( ) ; <nl> ObjectRef value ( ) const ; <nl> } ; <nl> <nl> class InternalizedStringRef : public StringRef { <nl> static const uint32_t kNotAnArrayIndex = - 1 ; / / 2 ^ 32 - 1 is not a valid index . <nl> } ; <nl> <nl> - struct ProcessedFeedback { <nl> - explicit ProcessedFeedback ( Zone * zone ) ; <nl> + class ProcessedFeedback : public ZoneObject { <nl> + public : <nl> + enum Kind { kElementAccess , kGlobalAccess } ; <nl> + Kind kind ( ) const { return kind_ ; } <nl> + <nl> + protected : <nl> + explicit ProcessedFeedback ( Kind kind ) : kind_ ( kind ) { } <nl> + <nl> + private : <nl> + Kind const kind_ ; <nl> + } ; <nl> + <nl> + class GlobalAccessFeedback : public ProcessedFeedback { <nl> + public : <nl> + explicit GlobalAccessFeedback ( PropertyCellRef cell ) ; <nl> + GlobalAccessFeedback ( ContextRef script_context , int slot_index , <nl> + bool immutable ) ; <nl> + <nl> + bool IsPropertyCell ( ) const ; <nl> + PropertyCellRef property_cell ( ) const ; <nl> + <nl> + bool IsScriptContextSlot ( ) const { return ! IsPropertyCell ( ) ; } <nl> + ContextRef script_context ( ) const ; <nl> + int slot_index ( ) const ; <nl> + bool immutable ( ) const ; <nl> + <nl> + private : <nl> + ObjectRef const cell_or_context_ ; <nl> + int const index_and_immutable_ ; <nl> + } ; <nl> + <nl> + class ElementAccessFeedback : public ProcessedFeedback { <nl> + public : <nl> + explicit ElementAccessFeedback ( Zone * zone ) ; <nl> <nl> / / No transition sources appear in { receiver_maps } . <nl> / / All transition targets appear in { receiver_maps } . <nl> struct ProcessedFeedback { <nl> MapRef current ( ) const ; <nl> <nl> private : <nl> - friend struct ProcessedFeedback ; <nl> + friend class ElementAccessFeedback ; <nl> <nl> - explicit MapIterator ( ProcessedFeedback const & processed , <nl> + explicit MapIterator ( ElementAccessFeedback const & processed , <nl> JSHeapBroker * broker ) ; <nl> <nl> - ProcessedFeedback const & processed_ ; <nl> + ElementAccessFeedback const & processed_ ; <nl> JSHeapBroker * const broker_ ; <nl> size_t index_ = 0 ; <nl> } ; <nl> struct ProcessedFeedback { <nl> MapIterator all_maps ( JSHeapBroker * broker ) const ; <nl> } ; <nl> <nl> + struct FeedbackSource { <nl> + FeedbackSource ( Handle < FeedbackVector > vector_ , FeedbackSlot slot_ ) <nl> + : vector ( vector_ ) , slot ( slot_ ) { } <nl> + explicit FeedbackSource ( FeedbackNexus const & nexus ) ; <nl> + explicit FeedbackSource ( VectorSlotPair const & pair ) ; <nl> + <nl> + Handle < FeedbackVector > const vector ; <nl> + FeedbackSlot const slot ; <nl> + <nl> + struct Hash { <nl> + size_t operator ( ) ( FeedbackSource const & source ) const { <nl> + return base : : hash_combine ( source . vector . address ( ) , source . slot ) ; <nl> + } <nl> + } ; <nl> + <nl> + struct Equal { <nl> + bool operator ( ) ( FeedbackSource const & lhs , <nl> + FeedbackSource const & rhs ) const { <nl> + return lhs . vector . equals ( rhs . vector ) & & lhs . slot = = rhs . slot ; <nl> + } <nl> + } ; <nl> + } ; <nl> + <nl> class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE ( ZoneObject ) { <nl> public : <nl> JSHeapBroker ( Isolate * isolate , Zone * broker_zone ) ; <nl> class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE ( ZoneObject ) { <nl> / / % ObjectPrototype % . <nl> bool IsArrayOrObjectPrototype ( const JSObjectRef & object ) const ; <nl> <nl> - ProcessedFeedback & CreateEmptyFeedback ( FeedbackNexus const & nexus ) ; <nl> - bool HasFeedback ( FeedbackNexus const & nexus ) const ; <nl> - ProcessedFeedback & GetFeedback ( FeedbackNexus const & nexus ) ; <nl> + bool HasFeedback ( FeedbackSource const & source ) const ; <nl> + / / The processed { feedback } can be { nullptr } , indicating that the original <nl> + / / feedback didn ' t contain information relevant for Turbofan . <nl> + void SetFeedback ( FeedbackSource const & source , <nl> + ProcessedFeedback const * feedback ) ; <nl> + ProcessedFeedback const * GetFeedback ( FeedbackSource const & source ) const ; <nl> + <nl> + / / Convenience wrappers around GetFeedback . <nl> + ElementAccessFeedback const * GetElementAccessFeedback ( <nl> + FeedbackSource const & source ) const ; <nl> + GlobalAccessFeedback const * GetGlobalAccessFeedback ( <nl> + FeedbackSource const & source ) const ; <nl> + <nl> + / / TODO ( neis ) : Move these into serializer when we ' re always in the background . <nl> + ElementAccessFeedback const * ProcessFeedbackMapsForElementAccess ( <nl> + MapHandles const & maps ) ; <nl> + GlobalAccessFeedback const * ProcessFeedbackForGlobalAccess ( <nl> + FeedbackSource const & source ) ; <nl> <nl> std : : ostream & Trace ( ) ; <nl> void IncrementTracingIndentation ( ) ; <nl> class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE ( ZoneObject ) { <nl> void SerializeShareableObjects ( ) ; <nl> void CollectArrayAndObjectPrototypes ( ) ; <nl> <nl> - struct FeedbackNexusHash { <nl> - size_t operator ( ) ( FeedbackNexus const & nexus ) const { <nl> - return base : : hash_combine ( nexus . vector_handle ( ) . address ( ) , nexus . slot ( ) ) ; <nl> - } <nl> - } ; <nl> - struct FeedbackNexusEqual { <nl> - bool operator ( ) ( FeedbackNexus const & lhs , FeedbackNexus const & rhs ) const { <nl> - return lhs . vector_handle ( ) . equals ( rhs . vector_handle ( ) ) & & <nl> - lhs . slot ( ) = = rhs . slot ( ) ; <nl> - } <nl> - } ; <nl> - <nl> Isolate * const isolate_ ; <nl> Zone * const broker_zone_ ; <nl> Zone * current_zone_ ; <nl> class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE ( ZoneObject ) { <nl> StdoutStream trace_out_ ; <nl> unsigned trace_indentation_ = 0 ; <nl> PerIsolateCompilerCache * compiler_cache_ ; <nl> - ZoneUnorderedMap < FeedbackNexus , ProcessedFeedback , FeedbackNexusHash , <nl> - FeedbackNexusEqual > <nl> + ZoneUnorderedMap < FeedbackSource , ProcessedFeedback const * , <nl> + FeedbackSource : : Hash , FeedbackSource : : Equal > <nl> feedback_ ; <nl> <nl> static const size_t kMinimalRefsBucketCount = 8 ; / / must be power of 2 <nl> Reduction NoChangeBecauseOfMissingData ( JSHeapBroker * broker , <nl> / / Miscellaneous definitions that should be moved elsewhere once concurrent <nl> / / compilation is finished . <nl> bool CanInlineElementAccess ( MapRef const & map ) ; <nl> - void ProcessFeedbackMapsForElementAccess ( JSHeapBroker * broker , <nl> - MapHandles const & maps , <nl> - ProcessedFeedback * processed ) ; <nl> <nl> # define TRACE_BROKER ( broker , x ) \ <nl> do { \ <nl> mmm a / src / compiler / js - native - context - specialization . cc <nl> ppp b / src / compiler / js - native - context - specialization . cc <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> it . TryLookupCachedProperty ( ) ; <nl> if ( it . state ( ) ! = LookupIterator : : DATA ) return NoChange ( ) ; <nl> if ( ! it . GetHolder < JSObject > ( ) - > IsJSGlobalObject ( ) ) return NoChange ( ) ; <nl> - Handle < PropertyCell > property_cell = it . GetPropertyCell ( ) ; <nl> + PropertyCellRef property_cell ( broker ( ) , it . GetPropertyCell ( ) ) ; <nl> + property_cell . Serialize ( ) ; <nl> return ReduceGlobalAccess ( node , receiver , value , name , access_mode , index , <nl> property_cell ) ; <nl> } <nl> <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> Node * node , Node * receiver , Node * value , Handle < Name > name , <nl> - AccessMode access_mode , Node * index , Handle < PropertyCell > property_cell ) { <nl> + AccessMode access_mode , Node * index , PropertyCellRef const & property_cell ) { <nl> Node * effect = NodeProperties : : GetEffectInput ( node ) ; <nl> Node * control = NodeProperties : : GetControlInput ( node ) ; <nl> <nl> - Handle < Object > property_cell_value ( property_cell - > value ( ) , isolate ( ) ) ; <nl> - if ( property_cell_value . is_identical_to ( factory ( ) - > the_hole_value ( ) ) ) { <nl> + ObjectRef property_cell_value = property_cell . value ( ) ; <nl> + if ( property_cell_value . IsHeapObject ( ) & & <nl> + property_cell_value . AsHeapObject ( ) . map ( ) . oddball_type ( ) = = <nl> + OddballType : : kHole ) { <nl> / / The property cell is no longer valid . <nl> return NoChange ( ) ; <nl> } <nl> <nl> - PropertyDetails property_details = property_cell - > property_details ( ) ; <nl> + PropertyDetails property_details = property_cell . property_details ( ) ; <nl> PropertyCellType property_cell_type = property_details . cell_type ( ) ; <nl> DCHECK_EQ ( kData , property_details . kind ( ) ) ; <nl> <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> } else if ( property_cell_type = = PropertyCellType : : kConstantType ) { <nl> / / There ' s also no fast - path to store to a global cell which pretended <nl> / / to be stable , but is no longer stable now . <nl> - if ( property_cell_value - > IsHeapObject ( ) & & <nl> - ! Handle < HeapObject > : : cast ( property_cell_value ) - > map ( ) - > is_stable ( ) ) { <nl> + if ( property_cell_value . IsHeapObject ( ) & & <nl> + ! property_cell_value . AsHeapObject ( ) . map ( ) . is_stable ( ) ) { <nl> return NoChange ( ) ; <nl> } <nl> } <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> / / can be deleted or reconfigured to an accessor property ) . <nl> if ( property_details . cell_type ( ) ! = PropertyCellType : : kMutable | | <nl> property_details . IsConfigurable ( ) ) { <nl> - dependencies ( ) - > DependOnGlobalProperty ( <nl> - PropertyCellRef ( broker ( ) , property_cell ) ) ; <nl> + dependencies ( ) - > DependOnGlobalProperty ( property_cell ) ; <nl> } <nl> <nl> / / Load from constant / undefined global property can be constant - folded . <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> value = access_mode = = AccessMode : : kHas <nl> ? jsgraph ( ) - > TrueConstant ( ) <nl> : jsgraph ( ) - > Constant ( property_cell_value ) ; <nl> - CHECK ( <nl> - ! property_cell_value . is_identical_to ( factory ( ) - > the_hole_value ( ) ) ) ; <nl> + DCHECK ( ! property_cell_value . IsHeapObject ( ) | | <nl> + property_cell_value . AsHeapObject ( ) . map ( ) . oddball_type ( ) ! = <nl> + OddballType : : kHole ) ; <nl> } else { <nl> DCHECK_NE ( AccessMode : : kHas , access_mode ) ; <nl> <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> MachineRepresentation representation = MachineRepresentation : : kTagged ; <nl> if ( property_details . cell_type ( ) = = PropertyCellType : : kConstantType ) { <nl> / / Compute proper type based on the current value in the cell . <nl> - if ( property_cell_value - > IsSmi ( ) ) { <nl> + if ( property_cell_value . IsSmi ( ) ) { <nl> property_cell_value_type = Type : : SignedSmall ( ) ; <nl> representation = MachineRepresentation : : kTaggedSigned ; <nl> - } else if ( property_cell_value - > IsNumber ( ) ) { <nl> + } else if ( property_cell_value . IsHeapNumber ( ) ) { <nl> property_cell_value_type = Type : : Number ( ) ; <nl> representation = MachineRepresentation : : kTaggedPointer ; <nl> } else { <nl> - MapRef property_cell_value_map ( <nl> - broker ( ) , handle ( HeapObject : : cast ( * property_cell_value ) - > map ( ) , <nl> - isolate ( ) ) ) ; <nl> + MapRef property_cell_value_map = <nl> + property_cell_value . AsHeapObject ( ) . map ( ) ; <nl> property_cell_value_type = Type : : For ( property_cell_value_map ) ; <nl> representation = MachineRepresentation : : kTaggedPointer ; <nl> <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> value = effect = graph ( ) - > NewNode ( <nl> simplified ( ) - > LoadField ( ForPropertyCellValue ( <nl> representation , property_cell_value_type , map , name ) ) , <nl> - jsgraph ( ) - > HeapConstant ( property_cell ) , effect , control ) ; <nl> + jsgraph ( ) - > Constant ( property_cell ) , effect , control ) ; <nl> } <nl> } <nl> } else { <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> case PropertyCellType : : kConstant : { <nl> / / Record a code dependency on the cell , and just deoptimize if the new <nl> / / value doesn ' t match the previous value stored inside the cell . <nl> - dependencies ( ) - > DependOnGlobalProperty ( <nl> - PropertyCellRef ( broker ( ) , property_cell ) ) ; <nl> + dependencies ( ) - > DependOnGlobalProperty ( property_cell ) ; <nl> Node * check = <nl> graph ( ) - > NewNode ( simplified ( ) - > ReferenceEqual ( ) , value , <nl> jsgraph ( ) - > Constant ( property_cell_value ) ) ; <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> / / Record a code dependency on the cell , and just deoptimize if the new <nl> / / values ' type doesn ' t match the type of the previous value in the <nl> / / cell . <nl> - dependencies ( ) - > DependOnGlobalProperty ( <nl> - PropertyCellRef ( broker ( ) , property_cell ) ) ; <nl> + dependencies ( ) - > DependOnGlobalProperty ( property_cell ) ; <nl> Type property_cell_value_type ; <nl> MachineRepresentation representation = MachineRepresentation : : kTagged ; <nl> - if ( property_cell_value - > IsHeapObject ( ) ) { <nl> + if ( property_cell_value . IsHeapObject ( ) ) { <nl> / / We cannot do anything if the { property_cell_value } s map is no <nl> / / longer stable . <nl> - Handle < Map > property_cell_value_map ( <nl> - Handle < HeapObject > : : cast ( property_cell_value ) - > map ( ) , isolate ( ) ) ; <nl> - DCHECK ( property_cell_value_map - > is_stable ( ) ) ; <nl> - dependencies ( ) - > DependOnStableMap ( <nl> - MapRef ( broker ( ) , property_cell_value_map ) ) ; <nl> + MapRef property_cell_value_map = <nl> + property_cell_value . AsHeapObject ( ) . map ( ) ; <nl> + dependencies ( ) - > DependOnStableMap ( property_cell_value_map ) ; <nl> <nl> / / Check that the { value } is a HeapObject . <nl> value = effect = graph ( ) - > NewNode ( simplified ( ) - > CheckHeapObject ( ) , <nl> value , effect , control ) ; <nl> <nl> / / Check { value } map against the { property_cell } map . <nl> - effect = <nl> - graph ( ) - > NewNode ( simplified ( ) - > CheckMaps ( <nl> - CheckMapsFlag : : kNone , <nl> - ZoneHandleSet < Map > ( property_cell_value_map ) ) , <nl> - value , effect , control ) ; <nl> + effect = graph ( ) - > NewNode ( <nl> + simplified ( ) - > CheckMaps ( <nl> + CheckMapsFlag : : kNone , <nl> + ZoneHandleSet < Map > ( property_cell_value_map . object ( ) ) ) , <nl> + value , effect , control ) ; <nl> property_cell_value_type = Type : : OtherInternal ( ) ; <nl> representation = MachineRepresentation : : kTaggedPointer ; <nl> } else { <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> effect = graph ( ) - > NewNode ( simplified ( ) - > StoreField ( ForPropertyCellValue ( <nl> representation , property_cell_value_type , <nl> MaybeHandle < Map > ( ) , name ) ) , <nl> - jsgraph ( ) - > HeapConstant ( property_cell ) , value , <nl> + jsgraph ( ) - > Constant ( property_cell ) , value , <nl> effect , control ) ; <nl> break ; <nl> } <nl> case PropertyCellType : : kMutable : { <nl> / / Record a code dependency on the cell , and just deoptimize if the <nl> / / property ever becomes read - only . <nl> - dependencies ( ) - > DependOnGlobalProperty ( <nl> - PropertyCellRef ( broker ( ) , property_cell ) ) ; <nl> + dependencies ( ) - > DependOnGlobalProperty ( property_cell ) ; <nl> effect = graph ( ) - > NewNode ( <nl> simplified ( ) - > StoreField ( ForPropertyCellValue ( <nl> MachineRepresentation : : kTagged , Type : : NonInternal ( ) , <nl> MaybeHandle < Map > ( ) , name ) ) , <nl> - jsgraph ( ) - > HeapConstant ( property_cell ) , value , effect , control ) ; <nl> + jsgraph ( ) - > Constant ( property_cell ) , value , effect , control ) ; <nl> break ; <nl> } <nl> } <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> <nl> Reduction JSNativeContextSpecialization : : ReduceJSLoadGlobal ( Node * node ) { <nl> DCHECK_EQ ( IrOpcode : : kJSLoadGlobal , node - > opcode ( ) ) ; <nl> - Node * effect = NodeProperties : : GetEffectInput ( node ) ; <nl> + DisallowHeapAccessIf no_heap_acess ( FLAG_concurrent_inlining ) ; <nl> <nl> LoadGlobalParameters const & p = LoadGlobalParametersOf ( node - > op ( ) ) ; <nl> if ( ! p . feedback ( ) . IsValid ( ) ) return NoChange ( ) ; <nl> - FeedbackNexus nexus ( p . feedback ( ) . vector ( ) , p . feedback ( ) . slot ( ) ) ; <nl> - <nl> - DCHECK ( nexus . kind ( ) = = FeedbackSlotKind : : kLoadGlobalInsideTypeof | | <nl> - nexus . kind ( ) = = FeedbackSlotKind : : kLoadGlobalNotInsideTypeof ) ; <nl> - if ( nexus . ic_state ( ) ! = MONOMORPHIC | | nexus . GetFeedback ( ) - > IsCleared ( ) ) { <nl> - return NoChange ( ) ; <nl> + FeedbackSource source ( p . feedback ( ) ) ; <nl> + <nl> + GlobalAccessFeedback const * processed ; <nl> + if ( FLAG_concurrent_inlining ) { <nl> + processed = broker ( ) - > GetGlobalAccessFeedback ( source ) ; <nl> + TRACE_BROKER ( broker ( ) , " ReduceJSLoadGlobal : using preprocessed feedback " <nl> + < < " ( slot " < < p . feedback ( ) . slot ( ) <nl> + < < " of feedback vector handle " <nl> + < < p . feedback ( ) . vector ( ) . address ( ) < < " ) . \ n " ) ; <nl> + } else { <nl> + processed = broker ( ) - > ProcessFeedbackForGlobalAccess ( source ) ; <nl> } <nl> - Handle < Object > feedback ( nexus . GetFeedback ( ) - > GetHeapObjectOrSmi ( ) , isolate ( ) ) ; <nl> - <nl> - if ( feedback - > IsSmi ( ) ) { <nl> - / / The wanted name belongs to a script - scope variable and the feedback tells <nl> - / / us where to find its value . <nl> - <nl> - int number = feedback - > Number ( ) ; <nl> - int const script_context_index = <nl> - FeedbackNexus : : ContextIndexBits : : decode ( number ) ; <nl> - int const context_slot_index = FeedbackNexus : : SlotIndexBits : : decode ( number ) ; <nl> - bool const immutable = FeedbackNexus : : ImmutabilityBit : : decode ( number ) ; <nl> - Handle < Context > context = ScriptContextTable : : GetContext ( <nl> - isolate ( ) , native_context ( ) . script_context_table ( ) . object ( ) , <nl> - script_context_index ) ; <nl> - <nl> - { <nl> - ObjectRef contents ( broker ( ) , <nl> - handle ( context - > get ( context_slot_index ) , isolate ( ) ) ) ; <nl> - CHECK ( ! contents . equals ( ObjectRef ( broker ( ) , factory ( ) - > the_hole_value ( ) ) ) ) ; <nl> - } <nl> <nl> - Node * context_constant = jsgraph ( ) - > Constant ( context ) ; <nl> - Node * value = effect = graph ( ) - > NewNode ( <nl> - javascript ( ) - > LoadContext ( 0 , context_slot_index , immutable ) , <nl> - context_constant , effect ) ; <nl> + if ( processed = = nullptr ) return NoChange ( ) ; <nl> + <nl> + if ( processed - > IsScriptContextSlot ( ) ) { <nl> + Node * effect = NodeProperties : : GetEffectInput ( node ) ; <nl> + Node * script_context = jsgraph ( ) - > Constant ( processed - > script_context ( ) ) ; <nl> + Node * value = effect = <nl> + graph ( ) - > NewNode ( javascript ( ) - > LoadContext ( 0 , processed - > slot_index ( ) , <nl> + processed - > immutable ( ) ) , <nl> + script_context , effect ) ; <nl> ReplaceWithValue ( node , value , effect ) ; <nl> return Replace ( value ) ; <nl> } <nl> <nl> - CHECK ( feedback - > IsPropertyCell ( ) ) ; <nl> - / / The wanted name belongs ( or did belong ) to a property on the global object <nl> - / / and the feedback is the cell holding its value . <nl> + CHECK ( processed - > IsPropertyCell ( ) ) ; <nl> return ReduceGlobalAccess ( node , nullptr , nullptr , p . name ( ) , AccessMode : : kLoad , <nl> - nullptr , Handle < PropertyCell > : : cast ( feedback ) ) ; <nl> + nullptr , processed - > property_cell ( ) ) ; <nl> } <nl> <nl> Reduction JSNativeContextSpecialization : : ReduceJSStoreGlobal ( Node * node ) { <nl> DCHECK_EQ ( IrOpcode : : kJSStoreGlobal , node - > opcode ( ) ) ; <nl> + DisallowHeapAccessIf no_heap_acess ( FLAG_concurrent_inlining ) ; <nl> + <nl> Node * value = NodeProperties : : GetValueInput ( node , 0 ) ; <nl> - Node * effect = NodeProperties : : GetEffectInput ( node ) ; <nl> - Node * control = NodeProperties : : GetControlInput ( node ) ; <nl> <nl> StoreGlobalParameters const & p = StoreGlobalParametersOf ( node - > op ( ) ) ; <nl> if ( ! p . feedback ( ) . IsValid ( ) ) return NoChange ( ) ; <nl> - FeedbackNexus nexus ( p . feedback ( ) . vector ( ) , p . feedback ( ) . slot ( ) ) ; <nl> - <nl> - DCHECK ( nexus . kind ( ) = = FeedbackSlotKind : : kStoreGlobalSloppy | | <nl> - nexus . kind ( ) = = FeedbackSlotKind : : kStoreGlobalStrict ) ; <nl> - if ( nexus . ic_state ( ) ! = MONOMORPHIC | | nexus . GetFeedback ( ) - > IsCleared ( ) ) { <nl> - return NoChange ( ) ; <nl> + FeedbackSource source ( p . feedback ( ) ) ; <nl> + <nl> + GlobalAccessFeedback const * processed ; <nl> + if ( FLAG_concurrent_inlining ) { <nl> + processed = broker ( ) - > GetGlobalAccessFeedback ( source ) ; <nl> + TRACE_BROKER ( broker ( ) , " ReduceJSStoreGlobal : using preprocessed feedback " <nl> + < < " ( slot " < < p . feedback ( ) . slot ( ) <nl> + < < " of feedback vector handle " <nl> + < < p . feedback ( ) . vector ( ) . address ( ) < < " ) . \ n " ) ; <nl> + } else { <nl> + processed = broker ( ) - > ProcessFeedbackForGlobalAccess ( source ) ; <nl> } <nl> - Handle < Object > feedback ( nexus . GetFeedback ( ) - > GetHeapObjectOrSmi ( ) , isolate ( ) ) ; <nl> - <nl> - if ( feedback - > IsSmi ( ) ) { <nl> - / / The wanted name belongs to a script - scope variable and the feedback tells <nl> - / / us where to find its value . <nl> - <nl> - int const script_context_index = <nl> - FeedbackNexus : : ContextIndexBits : : decode ( feedback - > Number ( ) ) ; <nl> - int const context_slot_index = <nl> - FeedbackNexus : : SlotIndexBits : : decode ( feedback - > Number ( ) ) ; <nl> - bool const immutable = <nl> - FeedbackNexus : : ImmutabilityBit : : decode ( feedback - > Number ( ) ) ; <nl> - Handle < Context > context = ScriptContextTable : : GetContext ( <nl> - isolate ( ) , native_context ( ) . script_context_table ( ) . object ( ) , <nl> - script_context_index ) ; <nl> - <nl> - if ( immutable ) return NoChange ( ) ; <nl> <nl> - { <nl> - ObjectRef contents ( broker ( ) , <nl> - handle ( context - > get ( context_slot_index ) , isolate ( ) ) ) ; <nl> - CHECK ( ! contents . equals ( ObjectRef ( broker ( ) , factory ( ) - > the_hole_value ( ) ) ) ) ; <nl> - } <nl> + if ( processed = = nullptr ) return NoChange ( ) ; <nl> <nl> - Node * context_constant = jsgraph ( ) - > Constant ( context ) ; <nl> - effect = graph ( ) - > NewNode ( javascript ( ) - > StoreContext ( 0 , context_slot_index ) , <nl> - value , context_constant , effect , control ) ; <nl> + if ( processed - > IsScriptContextSlot ( ) ) { <nl> + if ( processed - > immutable ( ) ) return NoChange ( ) ; <nl> + Node * effect = NodeProperties : : GetEffectInput ( node ) ; <nl> + Node * control = NodeProperties : : GetControlInput ( node ) ; <nl> + Node * script_context = jsgraph ( ) - > Constant ( processed - > script_context ( ) ) ; <nl> + effect = <nl> + graph ( ) - > NewNode ( javascript ( ) - > StoreContext ( 0 , processed - > slot_index ( ) ) , <nl> + value , script_context , effect , control ) ; <nl> ReplaceWithValue ( node , value , effect , control ) ; <nl> return Replace ( value ) ; <nl> } <nl> <nl> - CHECK ( feedback - > IsPropertyCell ( ) ) ; <nl> - / / The wanted name belongs ( or did belong ) to a property on the global object <nl> - / / and the feedback is the cell holding its value . <nl> - return ReduceGlobalAccess ( node , nullptr , value , p . name ( ) , AccessMode : : kStore , <nl> - nullptr , Handle < PropertyCell > : : cast ( feedback ) ) ; <nl> + if ( processed - > IsPropertyCell ( ) ) { <nl> + return ReduceGlobalAccess ( node , nullptr , value , p . name ( ) , <nl> + AccessMode : : kStore , nullptr , <nl> + processed - > property_cell ( ) ) ; <nl> + } <nl> + <nl> + UNREACHABLE ( ) ; <nl> } <nl> <nl> Reduction JSNativeContextSpecialization : : ReduceNamedAccess ( <nl> mmm a / src / compiler / js - native - context - specialization . h <nl> ppp b / src / compiler / js - native - context - specialization . h <nl> class V8_EXPORT_PRIVATE JSNativeContextSpecialization final <nl> Node * index = nullptr ) ; <nl> Reduction ReduceGlobalAccess ( Node * node , Node * receiver , Node * value , <nl> Handle < Name > name , AccessMode access_mode , <nl> - Node * index , Handle < PropertyCell > property_cell ) ; <nl> + Node * index , <nl> + PropertyCellRef const & property_cell ) ; <nl> Reduction ReduceKeyedLoadFromHeapConstant ( Node * node , Node * index , <nl> FeedbackNexus const & nexus , <nl> AccessMode access_mode , <nl> mmm a / src / compiler / serializer - for - background - compilation . cc <nl> ppp b / src / compiler / serializer - for - background - compilation . cc <nl> void SerializerForBackgroundCompilation : : VisitConstructWithSpread ( <nl> ProcessCallOrConstruct ( callee , new_target , arguments , slot , true ) ; <nl> } <nl> <nl> + void SerializerForBackgroundCompilation : : ProcessFeedbackForGlobalAccess ( <nl> + FeedbackSlot slot ) { <nl> + if ( slot . IsInvalid ( ) ) return ; <nl> + if ( environment ( ) - > function ( ) . feedback_vector . is_null ( ) ) return ; <nl> + FeedbackSource source ( environment ( ) - > function ( ) . feedback_vector , slot ) ; <nl> + if ( ! broker ( ) - > HasFeedback ( source ) ) { <nl> + broker ( ) - > SetFeedback ( source , <nl> + broker ( ) - > ProcessFeedbackForGlobalAccess ( source ) ) ; <nl> + } <nl> + / / TODO ( neis , mvstanton ) : In the case of an immutable script context slot , we <nl> + / / must also serialize that slot such that ContextRef : : get can retrieve the <nl> + / / value . <nl> + } <nl> + <nl> + void SerializerForBackgroundCompilation : : VisitLdaGlobal ( <nl> + BytecodeArrayIterator * iterator ) { <nl> + FeedbackSlot slot = iterator - > GetSlotOperand ( 1 ) ; <nl> + ProcessFeedbackForGlobalAccess ( slot ) ; <nl> + / / TODO ( neis , mvstanton ) : In the case of an immutable script context slot , add <nl> + / / the value as constant hint here and below <nl> + environment ( ) - > accumulator_hints ( ) . Clear ( ) ; <nl> + } <nl> + <nl> + void SerializerForBackgroundCompilation : : VisitLdaGlobalInsideTypeof ( <nl> + BytecodeArrayIterator * iterator ) { <nl> + VisitLdaGlobal ( iterator ) ; <nl> + } <nl> + <nl> + void SerializerForBackgroundCompilation : : VisitLdaLookupGlobalSlot ( <nl> + BytecodeArrayIterator * iterator ) { <nl> + VisitLdaGlobal ( iterator ) ; <nl> + } <nl> + <nl> + void SerializerForBackgroundCompilation : : VisitLdaLookupGlobalSlotInsideTypeof ( <nl> + BytecodeArrayIterator * iterator ) { <nl> + VisitLdaGlobal ( iterator ) ; <nl> + } <nl> + <nl> + void SerializerForBackgroundCompilation : : VisitStaGlobal ( <nl> + BytecodeArrayIterator * iterator ) { <nl> + FeedbackSlot slot = iterator - > GetSlotOperand ( 1 ) ; <nl> + ProcessFeedbackForGlobalAccess ( slot ) ; <nl> + } <nl> + <nl> + / / Note : We never use the same feeedback slot for multiple access modes . <nl> void SerializerForBackgroundCompilation : : ProcessFeedbackForKeyedPropertyAccess ( <nl> FeedbackSlot slot , AccessMode mode ) { <nl> if ( slot . IsInvalid ( ) ) return ; <nl> if ( environment ( ) - > function ( ) . feedback_vector . is_null ( ) ) return ; <nl> <nl> FeedbackNexus nexus ( environment ( ) - > function ( ) . feedback_vector , slot ) ; <nl> - if ( broker ( ) - > HasFeedback ( nexus ) ) return ; <nl> + FeedbackSource source ( nexus ) ; <nl> + if ( broker ( ) - > HasFeedback ( source ) ) return ; <nl> <nl> if ( nexus . ic_state ( ) = = MEGAMORPHIC ) return ; <nl> <nl> void SerializerForBackgroundCompilation : : ProcessFeedbackForKeyedPropertyAccess ( <nl> <nl> MapHandles maps ; <nl> nexus . ExtractMaps ( & maps ) ; <nl> - ProcessedFeedback & processed = broker ( ) - > CreateEmptyFeedback ( nexus ) ; <nl> - ProcessFeedbackMapsForElementAccess ( broker ( ) , maps , & processed ) ; <nl> + ElementAccessFeedback const * processed = <nl> + broker ( ) - > ProcessFeedbackMapsForElementAccess ( maps ) ; <nl> + broker ( ) - > SetFeedback ( source , processed ) ; <nl> + if ( processed = = nullptr ) return ; <nl> <nl> - for ( ProcessedFeedback : : MapIterator it = processed . all_maps ( broker ( ) ) ; <nl> + for ( ElementAccessFeedback : : MapIterator it = processed - > all_maps ( broker ( ) ) ; <nl> ! it . done ( ) ; it . advance ( ) ) { <nl> switch ( mode ) { <nl> case AccessMode : : kHas : <nl> mmm a / src / compiler / serializer - for - background - compilation . h <nl> ppp b / src / compiler / serializer - for - background - compilation . h <nl> namespace compiler { <nl> V ( CreateUnmappedArguments ) \ <nl> V ( LdaContextSlot ) \ <nl> V ( LdaCurrentContextSlot ) \ <nl> - V ( LdaGlobal ) \ <nl> - V ( LdaGlobalInsideTypeof ) \ <nl> V ( LdaImmutableContextSlot ) \ <nl> V ( LdaImmutableCurrentContextSlot ) \ <nl> V ( LdaNamedProperty ) \ <nl> namespace compiler { <nl> V ( ThrowSuperNotCalledIfHole ) \ <nl> V ( ThrowSuperAlreadyCalledIfNotHole ) <nl> <nl> - # define SUPPORTED_BYTECODE_LIST ( V ) \ <nl> - V ( CallAnyReceiver ) \ <nl> - V ( CallNoFeedback ) \ <nl> - V ( CallProperty ) \ <nl> - V ( CallProperty0 ) \ <nl> - V ( CallProperty1 ) \ <nl> - V ( CallProperty2 ) \ <nl> - V ( CallUndefinedReceiver ) \ <nl> - V ( CallUndefinedReceiver0 ) \ <nl> - V ( CallUndefinedReceiver1 ) \ <nl> - V ( CallUndefinedReceiver2 ) \ <nl> - V ( CallWithSpread ) \ <nl> - V ( Construct ) \ <nl> - V ( ConstructWithSpread ) \ <nl> - V ( CreateClosure ) \ <nl> - V ( ExtraWide ) \ <nl> - V ( Illegal ) \ <nl> - V ( LdaConstant ) \ <nl> - V ( LdaKeyedProperty ) \ <nl> - V ( LdaNull ) \ <nl> - V ( Ldar ) \ <nl> - V ( LdaSmi ) \ <nl> - V ( LdaUndefined ) \ <nl> - V ( LdaZero ) \ <nl> - V ( Mov ) \ <nl> - V ( Return ) \ <nl> - V ( StackCheck ) \ <nl> - V ( StaInArrayLiteral ) \ <nl> - V ( StaKeyedProperty ) \ <nl> - V ( Star ) \ <nl> - V ( TestIn ) \ <nl> - V ( Wide ) \ <nl> - CLEAR_ENVIRONMENT_LIST ( V ) \ <nl> - CLEAR_ACCUMULATOR_LIST ( V ) \ <nl> - CONDITIONAL_JUMPS_LIST ( V ) \ <nl> - UNCONDITIONAL_JUMPS_LIST ( V ) \ <nl> + # define SUPPORTED_BYTECODE_LIST ( V ) \ <nl> + V ( CallAnyReceiver ) \ <nl> + V ( CallNoFeedback ) \ <nl> + V ( CallProperty ) \ <nl> + V ( CallProperty0 ) \ <nl> + V ( CallProperty1 ) \ <nl> + V ( CallProperty2 ) \ <nl> + V ( CallUndefinedReceiver ) \ <nl> + V ( CallUndefinedReceiver0 ) \ <nl> + V ( CallUndefinedReceiver1 ) \ <nl> + V ( CallUndefinedReceiver2 ) \ <nl> + V ( CallWithSpread ) \ <nl> + V ( Construct ) \ <nl> + V ( ConstructWithSpread ) \ <nl> + V ( CreateClosure ) \ <nl> + V ( ExtraWide ) \ <nl> + V ( Illegal ) \ <nl> + V ( LdaConstant ) \ <nl> + V ( LdaGlobal ) \ <nl> + V ( LdaGlobalInsideTypeof ) \ <nl> + V ( LdaKeyedProperty ) \ <nl> + V ( LdaLookupGlobalSlot ) \ <nl> + V ( LdaLookupGlobalSlotInsideTypeof ) \ <nl> + V ( LdaNull ) \ <nl> + V ( Ldar ) \ <nl> + V ( LdaSmi ) \ <nl> + V ( LdaUndefined ) \ <nl> + V ( LdaZero ) \ <nl> + V ( Mov ) \ <nl> + V ( Return ) \ <nl> + V ( StackCheck ) \ <nl> + V ( StaGlobal ) \ <nl> + V ( StaInArrayLiteral ) \ <nl> + V ( StaKeyedProperty ) \ <nl> + V ( Star ) \ <nl> + V ( TestIn ) \ <nl> + V ( Wide ) \ <nl> + CLEAR_ENVIRONMENT_LIST ( V ) \ <nl> + CLEAR_ACCUMULATOR_LIST ( V ) \ <nl> + CONDITIONAL_JUMPS_LIST ( V ) \ <nl> + UNCONDITIONAL_JUMPS_LIST ( V ) \ <nl> INGORED_BYTECODE_LIST ( V ) <nl> <nl> class JSHeapBroker ; <nl> class SerializerForBackgroundCompilation { <nl> base : : Optional < Hints > new_target , <nl> const HintsVector & arguments , bool with_spread ) ; <nl> <nl> + void ProcessFeedbackForGlobalAccess ( FeedbackSlot slot ) ; <nl> void ProcessFeedbackForKeyedPropertyAccess ( FeedbackSlot slot , <nl> AccessMode mode ) ; <nl> <nl>
[ turbofan ] Preprocess feedback for global accesses ( partially )
v8/v8
04bb707e524ba0d97a700e06a04d43b19e50d265
2019-03-14T14:42:48Z
mmm a / src / base / adapters . h <nl> ppp b / src / base / adapters . h <nl> <nl> # ifndef V8_BASE_ADAPTERS_H_ <nl> # define V8_BASE_ADAPTERS_H_ <nl> <nl> + # include < iterator > <nl> + <nl> # include " src / base / macros . h " <nl> <nl> namespace v8 { <nl> namespace base { <nl> template < typename T > <nl> class ReversedAdapter { <nl> public : <nl> - typedef decltype ( static_cast < T * > ( nullptr ) - > rbegin ( ) ) Iterator ; <nl> + using Iterator = <nl> + std : : reverse_iterator < decltype ( std : : begin ( std : : declval < T > ( ) ) ) > ; <nl> <nl> explicit ReversedAdapter ( T & t ) : t_ ( t ) { } <nl> - ReversedAdapter ( const ReversedAdapter & ra ) : t_ ( ra . t_ ) { } <nl> + ReversedAdapter ( const ReversedAdapter & ra ) = default ; <nl> <nl> - Iterator begin ( ) const { return t_ . rbegin ( ) ; } <nl> - Iterator end ( ) const { return t_ . rend ( ) ; } <nl> + / / TODO ( clemensh ) : Use std : : rbegin / std : : rend once we have C + + 14 support . <nl> + Iterator begin ( ) const { return Iterator ( std : : end ( t_ ) ) ; } <nl> + Iterator end ( ) const { return Iterator ( std : : begin ( t_ ) ) ; } <nl> <nl> private : <nl> T & t_ ; <nl> mmm a / src / builtins / ia32 / builtins - ia32 . cc <nl> ppp b / src / builtins / ia32 / builtins - ia32 . cc <nl> <nl> <nl> # if V8_TARGET_ARCH_IA32 <nl> <nl> + # include " src / base / adapters . h " <nl> # include " src / code - factory . h " <nl> # include " src / debug / debug . h " <nl> # include " src / deoptimizer . h " <nl> # include " src / frame - constants . h " <nl> # include " src / frames . h " <nl> # include " src / objects - inl . h " <nl> + # include " src / wasm / wasm - linkage . h " <nl> <nl> namespace v8 { <nl> namespace internal { <nl> void Builtins : : Generate_WasmCompileLazy ( MacroAssembler * masm ) { <nl> / / Save all parameter registers ( see wasm - linkage . cc ) . They might be <nl> / / overwritten in the runtime call below . We don ' t have any callee - saved <nl> / / registers in wasm , so no need to store anything else . <nl> - constexpr Register gp_regs [ ] { eax , ebx , ecx , edx } ; <nl> - constexpr XMMRegister xmm_regs [ ] { xmm1 , xmm2 , xmm3 , xmm4 , xmm5 , xmm6 } ; <nl> - <nl> - for ( auto reg : gp_regs ) { <nl> + for ( Register reg : wasm : : kGpParamRegisters ) { <nl> + if ( reg = = kWasmInstanceRegister ) continue ; <nl> __ Push ( reg ) ; <nl> } <nl> - __ sub ( esp , Immediate ( 16 * arraysize ( xmm_regs ) ) ) ; <nl> - for ( int i = 0 , e = arraysize ( xmm_regs ) ; i < e ; + + i ) { <nl> - __ movdqu ( Operand ( esp , 16 * i ) , xmm_regs [ i ] ) ; <nl> + __ sub ( esp , Immediate ( 16 * arraysize ( wasm : : kFpParamRegisters ) ) ) ; <nl> + int offset = 0 ; <nl> + for ( DoubleRegister reg : wasm : : kFpParamRegisters ) { <nl> + __ movdqu ( Operand ( esp , offset ) , reg ) ; <nl> + offset + = 16 ; <nl> } <nl> <nl> / / Pass the WASM instance as an explicit argument to WasmCompileLazy . <nl> __ Push ( kWasmInstanceRegister ) ; <nl> / / Initialize the JavaScript context with 0 . CEntry will use it to <nl> / / set the current context on the isolate . <nl> - __ Move ( esi , Smi : : kZero ) ; <nl> + __ Move ( kContextRegister , Smi : : kZero ) ; <nl> __ CallRuntime ( Runtime : : kWasmCompileLazy ) ; <nl> / / The entrypoint address is the first return value . <nl> __ mov ( edi , kReturnRegister0 ) ; <nl> void Builtins : : Generate_WasmCompileLazy ( MacroAssembler * masm ) { <nl> __ mov ( kWasmInstanceRegister , kReturnRegister1 ) ; <nl> <nl> / / Restore registers . <nl> - for ( int i = arraysize ( xmm_regs ) - 1 ; i > = 0 ; - - i ) { <nl> - __ movdqu ( xmm_regs [ i ] , Operand ( esp , 16 * i ) ) ; <nl> + for ( DoubleRegister reg : base : : Reversed ( wasm : : kFpParamRegisters ) ) { <nl> + offset - = 16 ; <nl> + __ movdqu ( reg , Operand ( esp , offset ) ) ; <nl> } <nl> - __ add ( esp , Immediate ( 16 * arraysize ( xmm_regs ) ) ) ; <nl> - for ( int i = arraysize ( gp_regs ) - 1 ; i > = 0 ; - - i ) { <nl> - __ Pop ( gp_regs [ i ] ) ; <nl> + DCHECK_EQ ( 0 , offset ) ; <nl> + __ add ( esp , Immediate ( 16 * arraysize ( wasm : : kFpParamRegisters ) ) ) ; <nl> + for ( Register reg : base : : Reversed ( wasm : : kGpParamRegisters ) ) { <nl> + if ( reg = = kWasmInstanceRegister ) continue ; <nl> + __ Pop ( reg ) ; <nl> } <nl> } <nl> / / Finally , jump to the entrypoint . <nl> mmm a / src / builtins / x64 / builtins - x64 . cc <nl> ppp b / src / builtins / x64 / builtins - x64 . cc <nl> <nl> <nl> # if V8_TARGET_ARCH_X64 <nl> <nl> + # include " src / base / adapters . h " <nl> # include " src / code - factory . h " <nl> # include " src / counters . h " <nl> # include " src / deoptimizer . h " <nl> <nl> # include " src / frames . h " <nl> # include " src / objects - inl . h " <nl> # include " src / objects / debug - objects . h " <nl> + # include " src / wasm / wasm - linkage . h " <nl> <nl> namespace v8 { <nl> namespace internal { <nl> void Builtins : : Generate_WasmCompileLazy ( MacroAssembler * masm ) { <nl> / / Save all parameter registers ( see wasm - linkage . cc ) . They might be <nl> / / overwritten in the runtime call below . We don ' t have any callee - saved <nl> / / registers in wasm , so no need to store anything else . <nl> - constexpr Register gp_regs [ ] { rax , rbx , rcx , rdx , rdi } ; <nl> - constexpr XMMRegister xmm_regs [ ] { xmm1 , xmm2 , xmm3 , xmm4 , xmm5 , xmm6 } ; <nl> - <nl> - for ( auto reg : gp_regs ) { <nl> + for ( Register reg : wasm : : kGpParamRegisters ) { <nl> + if ( reg = = kWasmInstanceRegister ) continue ; <nl> __ Push ( reg ) ; <nl> } <nl> - __ subp ( rsp , Immediate ( 16 * arraysize ( xmm_regs ) ) ) ; <nl> - for ( int i = 0 , e = arraysize ( xmm_regs ) ; i < e ; + + i ) { <nl> - __ movdqu ( Operand ( rsp , 16 * i ) , xmm_regs [ i ] ) ; <nl> + __ subp ( rsp , Immediate ( 16 * arraysize ( wasm : : kFpParamRegisters ) ) ) ; <nl> + int offset = 0 ; <nl> + for ( DoubleRegister reg : wasm : : kFpParamRegisters ) { <nl> + __ movdqu ( Operand ( rsp , offset ) , reg ) ; <nl> + offset + = 16 ; <nl> } <nl> <nl> / / Pass the WASM instance as an explicit argument to WasmCompileLazy . <nl> __ Push ( kWasmInstanceRegister ) ; <nl> / / Initialize the JavaScript context with 0 . CEntry will use it to <nl> / / set the current context on the isolate . <nl> - __ Move ( rsi , Smi : : kZero ) ; <nl> + __ Move ( kContextRegister , Smi : : kZero ) ; <nl> __ CallRuntime ( Runtime : : kWasmCompileLazy ) ; <nl> / / The entrypoint address is the first return value . <nl> __ movq ( r11 , kReturnRegister0 ) ; <nl> void Builtins : : Generate_WasmCompileLazy ( MacroAssembler * masm ) { <nl> __ movq ( kWasmInstanceRegister , kReturnRegister1 ) ; <nl> <nl> / / Restore registers . <nl> - for ( int i = arraysize ( xmm_regs ) - 1 ; i > = 0 ; - - i ) { <nl> - __ movdqu ( xmm_regs [ i ] , Operand ( rsp , 16 * i ) ) ; <nl> + for ( DoubleRegister reg : base : : Reversed ( wasm : : kFpParamRegisters ) ) { <nl> + offset - = 16 ; <nl> + __ movdqu ( reg , Operand ( rsp , offset ) ) ; <nl> } <nl> - __ addp ( rsp , Immediate ( 16 * arraysize ( xmm_regs ) ) ) ; <nl> - for ( int i = arraysize ( gp_regs ) - 1 ; i > = 0 ; - - i ) { <nl> - __ Pop ( gp_regs [ i ] ) ; <nl> + DCHECK_EQ ( 0 , offset ) ; <nl> + __ addp ( rsp , Immediate ( 16 * arraysize ( wasm : : kFpParamRegisters ) ) ) ; <nl> + for ( Register reg : base : : Reversed ( wasm : : kGpParamRegisters ) ) { <nl> + if ( reg = = kWasmInstanceRegister ) continue ; <nl> + __ Pop ( reg ) ; <nl> } <nl> } <nl> / / Finally , jump to the entrypoint . <nl>
[ wasm ] Reuse information from wasm - linkage . h
v8/v8
7631358e34f75882a9e0ac9e333253b4ad77d73e
2018-05-15T12:06:34Z
mmm a / src / video_core / engines / shader_bytecode . h <nl> ppp b / src / video_core / engines / shader_bytecode . h <nl> class OpCode { <nl> INST ( " 111000100100mmm - " , Id : : BRA , Type : : Flow , " BRA " ) , <nl> INST ( " 1111000011111mmm " , Id : : SYNC , Type : : Flow , " SYNC " ) , <nl> INST ( " 111000110100mmm " , Id : : BRK , Type : : Flow , " BRK " ) , <nl> + INST ( " 111000110000mmm - " , Id : : EXIT , Type : : Flow , " EXIT " ) , <nl> INST ( " 1111000011110mmm " , Id : : DEPBAR , Type : : Synch , " DEPBAR " ) , <nl> INST ( " 1110111111011mmm " , Id : : LD_A , Type : : Memory , " LD_A " ) , <nl> INST ( " 1110111101001mmm " , Id : : LD_S , Type : : Memory , " LD_S " ) , <nl> class OpCode { <nl> INST ( " 1101111100mmmmmm " , Id : : TLD4S , Type : : Texture , " TLD4S " ) , <nl> INST ( " 110111110110mmm - " , Id : : TMML_B , Type : : Texture , " TMML_B " ) , <nl> INST ( " 1101111101011mmm " , Id : : TMML , Type : : Texture , " TMML " ) , <nl> - INST ( " 111000110000mmm - " , Id : : EXIT , Type : : Trivial , " EXIT " ) , <nl> INST ( " 11100000mmmmmm - - " , Id : : IPA , Type : : Trivial , " IPA " ) , <nl> INST ( " 1111101111100mmm " , Id : : OUT_R , Type : : Trivial , " OUT_R " ) , <nl> INST ( " 1110111111010mmm " , Id : : ISBERD , Type : : Trivial , " ISBERD " ) , <nl>
shader_bytecode : Mark EXIT as flow instruction
yuzu-emu/yuzu
a32c52b1d87daee717dc1d155e9bb59636be49ec
2019-06-04T16:18:35Z
mmm a / summerofcode / ideas . md <nl> ppp b / summerofcode / ideas . md <nl> to know the gRPC code and team ! <nl> <nl> * * Required skills for all projects : * * git version control , collaborative <nl> software development on github . com , and software development in at least one <nl> - of gRPC ' s ten languages on at least one of Linux , Mac OS X , and Windows . <nl> + of gRPC ' s ten languages on at least one of Linux , macOS , and Windows . <nl> <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> <nl> - gRPC C Core : <nl> + gRPC Core : <nl> <nl> - 1 . Port gRPC to one of the major BSD platforms ( [ FreeBSD ] ( https : / / freebsd . org ) , [ NetBSD ] ( https : / / netbsd . org ) , and [ OpenBSD ] ( https : / / openbsd . org ) ) and create packages for them . Add [ kqueue ] ( https : / / www . freebsd . org / cgi / man . cgi ? query = kqueue ) support in the process . <nl> - * * * Required skills : * * C programming language , BSD operating system . <nl> - * * * Likely mentors : * * [ Nicolas Noble ] ( https : / / github . com / nicolasnoble ) , <nl> - [ Vijay Pai ] ( https : / / github . com / vjpai ) . <nl> + 1 . Implement [ " early OK " semantics ] ( https : / / github . com / grpc / grpc / issues / 7032 ) . The gRPC wire protocol allows servers to complete an RPC with OK status without having processed all requests ever sent to the client ; it ' s the gRPC Core that currently restricts applications from so behaving . This behavioral gap in the gRPC Core should be filled in . <nl> + * * * Required skills : * * C programming language , C + + programming language . <nl> + * * * Likely mentors : * * [ Nathaniel Manista ] ( https : / / github . com / nathanielmanistaatgoogle ) , [ Nicolas Noble ] ( https : / / github . com / nicolasnoble ) . <nl> <nl> + 1 . [ Make channel - connectivity - watching cancellable ] ( https : / / github . com / grpc / grpc / issues / 3064 ) . Anything worth waiting for is worth cancelling . The fact that channel connectivity is currently poll - based means that clean shutdown of gRPC channels can take as long as the poll interval . No one should have to wait two hundred milliseconds to garbage - collect an object . <nl> + * * * Required skills : * * C programming language , C + + programming language , Python programming language . <nl> + * * * Likely mentors : * * [ Nathaniel Manista ] ( https : / / github . com / nathanielmanistaatgoogle ) , [ Vijay Pai ] ( https : / / github . com / vjpai ) . <nl> <nl> gRPC Python : <nl> <nl> - 1 . Port gRPC Python to [ PyPy ] ( http : / / pypy . org ) . Investigate the state of [ Cython support ] ( http : / / docs . cython . org / src / userguide / pypy . html ) to do this or potentially explore [ cffi ] ( https : / / cffi . readthedocs . org / en / latest / ) . <nl> - * * * Required skills : * * Python programming language , PyPy Python interpreter . <nl> - * * * Likely mentors : * * [ Nathaniel Manista ] ( https : / / github . com / nathanielmanistaatgoogle ) , [ Masood Malekghassemi ] ( https : / / github . com / soltanmm ) . <nl> - 1 . Develop and test Python 3 . 5 Support for gRPC . Make necessary changes to port gRPC and package it for supported platforms . <nl> - * * * Required skills : * * Python programming language , Python 3 . 5 interpreter . <nl> - * * * Likely mentors : * * [ Nathaniel Manista ] ( https : / / github . com / nathanielmanistaatgoogle ) , [ Masood Malekghassemi ] ( https : / / github . com / soltanmm ) . <nl> + 1 . Support static type - checking of both gRPC Python itself and of code that uses gRPC Python . No one likes dynamic typing and Python is finally outgrowing it ! There are probably errors in the implementation of gRPC Python that [ pytype ] ( https : / / github . com / google / pytype ) or [ mypy ] ( http : / / mypy - lang . org / ) could detect . There are certainly errors in other code that uses gRPC Python that they could detect . <nl> + * * * Required skills : * * Python programming language , open source development across multiple repositories and projects . <nl> + * * * Likely mentors : * * [ Nathaniel Manista ] ( https : / / github . com / nathanielmanistaatgoogle ) , [ Kailash Sethuraman ] ( https : / / github . com / hsaliak ) , [ Ken Payson ] ( https : / / github . com / kpayson64 ) , [ Mehrdad Afshari ] ( https : / / github . com / mehrdada ) . <nl> <nl> - gRPC Ruby / Java : <nl> - <nl> - 1 . [ jRuby ] ( http : / / jruby . org ) support for gRPC . Develop a jRuby wrapper for gRPC based on grpc - java and ensure that it is API compatible with the existing Ruby implementation and passes all tests . <nl> - * * * Required skills : * * Java programming language , Ruby programming language . <nl> - * * * Likely mentors : * * [ Michael Lumish ] ( https : / / github . com / murgatroid99 ) , [ Eric Anderson ] ( https : / / github . com / ejona86 ) . <nl> - <nl> - <nl> - gRPC Wire Protocol : <nl> - <nl> - 1 . Develop a [ Wireshark ] ( https : / / wireshark . org ) plugin for the gRPC protocol . Provide documentation and tutorials for this plugin . <nl> - * * * Bonus : * * consider set - up and use with mobile clients . <nl> - * * * Required skills : * * Wireshark software . <nl> - * * * Likely mentors : * * [ Nicolas Noble ] ( https : / / github . com / nicolasnoble ) . <nl> + 1 . [ Enable building of gRPC Python with Bazel ] ( https : / / github . com / grpc / grpc / issues / 8079 ) . Bazel is the designated replacement for our constellation of crufty build scripts , but it ' s still under active development itself . Up for a challenge ? gRPC Python could easily be the most complex codebase to be built with Bazel . <nl> + * * * Required skills : * * Python programming language , Bazel toolchain , Cython , open source development across multiple repositories and projects . <nl> + * * * Likely mentors : * * [ Nathaniel Manista ] ( https : / / github . com / nathanielmanistaatgoogle ) , [ Ken Payson ] ( https : / / github . com / kpayson64 ) , [ Mehrdad Afshari ] ( https : / / github . com / mehrdada ) . <nl>
Merge pull request from nathanielmanistaatgoogle / summer - of - code
grpc/grpc
20e7074e4101b4fdbae1764caa952301b38957c4
2018-01-23T04:07:11Z
mmm a / dlib / dnn / curand_dlibapi . h <nl> ppp b / dlib / dnn / curand_dlibapi . h <nl> namespace dlib <nl> <nl> void fill_gaussian ( <nl> tensor & data , <nl> - float mean , <nl> - float stddev <nl> + float mean = 0 , <nl> + float stddev = 1 <nl> ) ; <nl> / * ! <nl> requires <nl> mmm a / dlib / dnn / tensor_tools . h <nl> ppp b / dlib / dnn / tensor_tools . h <nl> namespace dlib { namespace tt <nl> <nl> void fill_gaussian ( <nl> tensor & data , <nl> - float mean , <nl> - float stddev <nl> + float mean = 0 , <nl> + float stddev = 1 <nl> ) ; <nl> / * ! <nl> requires <nl>
Made fill_gaussian ( ) default to a standard normal distribution .
davisking/dlib
7272bc74c2667450fb621720437e256b4330ae91
2016-06-11T12:03:40Z
mmm a / Telegram / SourceFiles / app . cpp <nl> ppp b / Telegram / SourceFiles / app . cpp <nl> namespace { <nl> typedef QMap < ChannelId , ReplyMarkups > ChannelReplyMarkups ; <nl> ChannelReplyMarkups channelReplyMarkups ; <nl> <nl> + PhotoItems photoItems ; <nl> VideoItems videoItems ; <nl> AudioItems audioItems ; <nl> DocumentItems documentItems ; <nl> namespace App { <nl> cSetStickerSetsOrder ( StickerSetsOrder ( ) ) ; <nl> cSetLastStickersUpdate ( 0 ) ; <nl> cSetReportSpamStatuses ( ReportSpamStatuses ( ) ) ; <nl> + : : photoItems . clear ( ) ; <nl> : : videoItems . clear ( ) ; <nl> : : audioItems . clear ( ) ; <nl> : : documentItems . clear ( ) ; <nl> namespace App { <nl> return result ; <nl> } <nl> <nl> + void regPhotoItem ( PhotoData * data , HistoryItem * item ) { <nl> + : : photoItems [ data ] . insert ( item , NullType ( ) ) ; <nl> + } <nl> + <nl> + void unregPhotoItem ( PhotoData * data , HistoryItem * item ) { <nl> + : : photoItems [ data ] . remove ( item ) ; <nl> + } <nl> + <nl> + const PhotoItems & photoItems ( ) { <nl> + return : : photoItems ; <nl> + } <nl> + <nl> void regVideoItem ( VideoData * data , HistoryItem * item ) { <nl> : : videoItems [ data ] . insert ( item , NullType ( ) ) ; <nl> } <nl> mmm a / Telegram / SourceFiles / app . h <nl> ppp b / Telegram / SourceFiles / app . h <nl> class FileUploader ; <nl> # include " layout . h " <nl> <nl> typedef QMap < HistoryItem * , NullType > HistoryItemsMap ; <nl> + typedef QMap < PhotoData * , HistoryItemsMap > PhotoItems ; <nl> typedef QMap < VideoData * , HistoryItemsMap > VideoItems ; <nl> typedef QMap < AudioData * , HistoryItemsMap > AudioItems ; <nl> typedef QMap < DocumentData * , HistoryItemsMap > DocumentItems ; <nl> namespace App { <nl> QImage readImage ( QByteArray data , QByteArray * format = 0 , bool opaque = true , bool * animated = 0 ) ; <nl> QImage readImage ( const QString & file , QByteArray * format = 0 , bool opaque = true , bool * animated = 0 , QByteArray * content = 0 ) ; <nl> <nl> + void regPhotoItem ( PhotoData * data , HistoryItem * item ) ; <nl> + void unregPhotoItem ( PhotoData * data , HistoryItem * item ) ; <nl> + const PhotoItems & photoItems ( ) ; <nl> + <nl> void regVideoItem ( VideoData * data , HistoryItem * item ) ; <nl> void unregVideoItem ( VideoData * data , HistoryItem * item ) ; <nl> const VideoItems & videoItems ( ) ; <nl> mmm a / Telegram / SourceFiles / facades . cpp <nl> ppp b / Telegram / SourceFiles / facades . cpp <nl> namespace Ui { <nl> return false ; <nl> } <nl> <nl> + bool isMediaViewShown ( ) { <nl> + if ( Window * w = App : : wnd ( ) ) return w - > ui_isMediaViewShown ( ) ; <nl> + return false ; <nl> + } <nl> + <nl> void clipRedraw ( ClipReader * reader ) { <nl> const GifItems & items ( App : : gifItems ( ) ) ; <nl> GifItems : : const_iterator it = items . constFind ( reader ) ; <nl> namespace Notify { <nl> if ( MainWidget * m = App : : main ( ) ) m - > notify_migrateUpdated ( peer ) ; <nl> } <nl> <nl> + void mediaViewHidden ( ) { <nl> + if ( MainWidget * m = App : : main ( ) ) m - > notify_mediaViewHidden ( ) ; <nl> + } <nl> + <nl> void clipReinit ( ClipReader * reader ) { <nl> const GifItems & items ( App : : gifItems ( ) ) ; <nl> GifItems : : const_iterator it = items . constFind ( reader ) ; <nl> mmm a / Telegram / SourceFiles / facades . h <nl> ppp b / Telegram / SourceFiles / facades . h <nl> namespace Ui { / / openssl doesn ' t allow me to use UI : ( <nl> void showLayer ( LayeredWidget * box , ShowLayerOptions options = CloseOtherLayers ) ; <nl> void hideLayer ( bool fast = false ) ; <nl> bool isLayerShown ( ) ; <nl> + bool isMediaViewShown ( ) ; <nl> <nl> void clipRedraw ( ClipReader * reader ) ; <nl> <nl> namespace Notify { <nl> <nl> void migrateUpdated ( PeerData * peer ) ; <nl> <nl> + void mediaViewHidden ( ) ; <nl> + <nl> void clipReinit ( ClipReader * reader ) ; <nl> <nl> void historyItemResized ( const HistoryItem * item , bool scrollToIt = false ) ; <nl> mmm a / Telegram / SourceFiles / gui / animation . cpp <nl> ppp b / Telegram / SourceFiles / gui / animation . cpp <nl> void ClipReader : : start ( int32 framew , int32 frameh , int32 outerw , int32 outerh , b <nl> } <nl> <nl> QPixmap ClipReader : : current ( int32 framew , int32 frameh , int32 outerw , int32 outerh , uint64 ms ) { <nl> - _lastDisplayMs . set ( ms ) ; <nl> _currentDisplayed . set ( true ) ; <nl> - if ( _paused . get ( ) ) { <nl> - _paused . set ( false ) ; <nl> - if ( _clipManagers . size ( ) < = _threadIndex ) error ( ) ; <nl> - if ( _state ! = ClipError ) { <nl> - _clipManagers . at ( _threadIndex ) - > update ( this ) ; <nl> + if ( ms ) { <nl> + _lastDisplayMs . set ( ms ) ; <nl> + if ( _paused . get ( ) ) { <nl> + _paused . set ( false ) ; <nl> + if ( _clipManagers . size ( ) < = _threadIndex ) error ( ) ; <nl> + if ( _state ! = ClipError ) { <nl> + _clipManagers . at ( _threadIndex ) - > update ( this ) ; <nl> + } <nl> } <nl> } <nl> <nl> class FFMpegReaderImplementation : public ClipReaderImplementation { <nl> continue ; <nl> } <nl> <nl> - return false ; <nl> + if ( res ! = AVERROR_EOF | | ! _hadFrame ) { / / try to skip end of file <nl> + return false ; <nl> + } <nl> + freePacket ( ) ; <nl> + _avpkt . data = NULL ; <nl> + _avpkt . size = 0 ; <nl> + continue ; <nl> } <nl> if ( res > 0 ) decoded = res ; <nl> } <nl> mmm a / Telegram / SourceFiles / gui / images . cpp <nl> ppp b / Telegram / SourceFiles / gui / images . cpp <nl> Copyright ( c ) 2014 - 2015 John Preston , https : / / desktop . telegram . org <nl> # include " pspecific . h " <nl> <nl> namespace { <nl> - typedef QMap < QString , LocalImage * > LocalImages ; <nl> + typedef QMap < QString , Image * > LocalImages ; <nl> LocalImages localImages ; <nl> <nl> Image * blank ( ) { <nl> ImagePtr : : ImagePtr ( int32 width , int32 height , const MTPFileLocation & location , I <nl> Parent ( ( location . type ( ) = = mtpc_fileLocation ) ? ( Image * ) ( getImage ( StorageImageLocation ( width , height , location . c_fileLocation ( ) ) ) ) : def . v ( ) ) { <nl> } <nl> <nl> + Image : : Image ( const QString & file , QByteArray fmt ) : _forgot ( false ) { <nl> + _data = QPixmap : : fromImage ( App : : readImage ( file , & fmt , false , 0 , & _saved ) , Qt : : ColorOnly ) ; <nl> + _format = fmt ; <nl> + if ( ! _data . isNull ( ) ) { <nl> + globalAquiredSize + = int64 ( _data . width ( ) ) * _data . height ( ) * 4 ; <nl> + } <nl> + } <nl> + <nl> + Image : : Image ( const QByteArray & filecontent , QByteArray fmt ) : _forgot ( false ) { <nl> + _data = QPixmap : : fromImage ( App : : readImage ( filecontent , & fmt , false ) , Qt : : ColorOnly ) ; <nl> + _format = fmt ; <nl> + _saved = filecontent ; <nl> + if ( ! _data . isNull ( ) ) { <nl> + globalAquiredSize + = int64 ( _data . width ( ) ) * _data . height ( ) * 4 ; <nl> + } <nl> + } <nl> + <nl> + Image : : Image ( const QPixmap & pixmap , QByteArray format ) : _format ( format ) , _forgot ( false ) , _data ( pixmap ) { <nl> + if ( ! _data . isNull ( ) ) { <nl> + globalAquiredSize + = int64 ( _data . width ( ) ) * _data . height ( ) * 4 ; <nl> + } <nl> + } <nl> + <nl> + Image : : Image ( const QByteArray & filecontent , QByteArray fmt , const QPixmap & pixmap ) : _saved ( filecontent ) , _format ( fmt ) , _forgot ( false ) , _data ( pixmap ) { <nl> + _data = pixmap ; <nl> + _format = fmt ; <nl> + _saved = filecontent ; <nl> + if ( ! _data . isNull ( ) ) { <nl> + globalAquiredSize + = int64 ( _data . width ( ) ) * _data . height ( ) * 4 ; <nl> + } <nl> + } <nl> + <nl> const QPixmap & Image : : pix ( int32 w , int32 h ) const { <nl> - restore ( ) ; <nl> checkload ( ) ; <nl> <nl> if ( w < = 0 | | ! width ( ) | | ! height ( ) ) { <nl> const QPixmap & Image : : pix ( int32 w , int32 h ) const { <nl> } <nl> <nl> const QPixmap & Image : : pixRounded ( int32 w , int32 h ) const { <nl> - restore ( ) ; <nl> checkload ( ) ; <nl> <nl> if ( w < = 0 | | ! width ( ) | | ! height ( ) ) { <nl> const QPixmap & Image : : pixRounded ( int32 w , int32 h ) const { <nl> } <nl> <nl> const QPixmap & Image : : pixBlurred ( int32 w , int32 h ) const { <nl> - restore ( ) ; <nl> checkload ( ) ; <nl> <nl> if ( w < = 0 | | ! width ( ) | | ! height ( ) ) { <nl> const QPixmap & Image : : pixBlurred ( int32 w , int32 h ) const { <nl> } <nl> <nl> const QPixmap & Image : : pixColored ( const style : : color & add , int32 w , int32 h ) const { <nl> - restore ( ) ; <nl> checkload ( ) ; <nl> <nl> if ( w < = 0 | | ! width ( ) | | ! height ( ) ) { <nl> const QPixmap & Image : : pixColored ( const style : : color & add , int32 w , int32 h ) cons <nl> } <nl> <nl> const QPixmap & Image : : pixBlurredColored ( const style : : color & add , int32 w , int32 h ) const { <nl> - restore ( ) ; <nl> checkload ( ) ; <nl> <nl> if ( w < = 0 | | ! width ( ) | | ! height ( ) ) { <nl> const QPixmap & Image : : pixBlurredColored ( const style : : color & add , int32 w , int32 <nl> } <nl> <nl> const QPixmap & Image : : pixSingle ( int32 w , int32 h , int32 outerw , int32 outerh ) const { <nl> - restore ( ) ; <nl> checkload ( ) ; <nl> <nl> if ( w < = 0 | | ! width ( ) | | ! height ( ) ) { <nl> const QPixmap & Image : : pixSingle ( int32 w , int32 h , int32 outerw , int32 outerh ) co <nl> } <nl> <nl> const QPixmap & Image : : pixBlurredSingle ( int32 w , int32 h , int32 outerw , int32 outerh ) const { <nl> - restore ( ) ; <nl> checkload ( ) ; <nl> <nl> if ( w < = 0 | | ! width ( ) | | ! height ( ) ) { <nl> QPixmap imagePix ( QImage img , int32 w , int32 h , bool smooth , bool blurred , bool r <nl> } <nl> <nl> QPixmap Image : : pixNoCache ( int32 w , int32 h , bool smooth , bool blurred , bool rounded , int32 outerw , int32 outerh ) const { <nl> + if ( ! loading ( ) ) const_cast < Image * > ( this ) - > load ( ) ; <nl> restore ( ) ; <nl> - loaded ( ) ; <nl> - <nl> - const QPixmap & p ( pixData ( ) ) ; <nl> - if ( p . isNull ( ) ) return blank ( ) - > pix ( ) ; <nl> + if ( _data . isNull ( ) ) return blank ( ) - > pix ( ) ; <nl> <nl> if ( isNull ( ) & & outerw > 0 & & outerh > 0 ) { <nl> outerw * = cIntRetinaFactor ( ) ; <nl> QPixmap Image : : pixNoCache ( int32 w , int32 h , bool smooth , bool blurred , bool roun <nl> if ( rounded ) imageRound ( result ) ; <nl> return QPixmap : : fromImage ( result , Qt : : ColorOnly ) ; <nl> } <nl> - return imagePix ( p . toImage ( ) , w , h , smooth , blurred , rounded , outerw , outerh ) ; <nl> + return imagePix ( _data . toImage ( ) , w , h , smooth , blurred , rounded , outerw , outerh ) ; <nl> } <nl> <nl> QPixmap Image : : pixColoredNoCache ( const style : : color & add , int32 w , int32 h , bool smooth ) const { <nl> + const_cast < Image * > ( this ) - > load ( ) ; <nl> restore ( ) ; <nl> - loaded ( ) ; <nl> + if ( _data . isNull ( ) ) return blank ( ) - > pix ( ) ; <nl> <nl> - const QPixmap & p ( pixData ( ) ) ; <nl> - if ( p . isNull ( ) ) { <nl> - return blank ( ) - > pix ( ) ; <nl> - } <nl> - if ( w < = 0 | | ! width ( ) | | ! height ( ) | | ( w = = width ( ) & & ( h < = 0 | | h = = height ( ) ) ) ) return QPixmap : : fromImage ( imageColored ( add , p . toImage ( ) ) ) ; <nl> + QImage img = _data . toImage ( ) ; <nl> + if ( w < = 0 | | ! width ( ) | | ! height ( ) | | ( w = = width ( ) & & ( h < = 0 | | h = = height ( ) ) ) ) return QPixmap : : fromImage ( imageColored ( add , img ) ) ; <nl> if ( h < = 0 ) { <nl> - return QPixmap : : fromImage ( imageColored ( add , p . toImage ( ) . scaledToWidth ( w , smooth ? Qt : : SmoothTransformation : Qt : : FastTransformation ) ) , Qt : : ColorOnly ) ; <nl> + return QPixmap : : fromImage ( imageColored ( add , img . scaledToWidth ( w , smooth ? Qt : : SmoothTransformation : Qt : : FastTransformation ) ) , Qt : : ColorOnly ) ; <nl> } <nl> - return QPixmap : : fromImage ( imageColored ( add , p . toImage ( ) . scaled ( w , h , Qt : : IgnoreAspectRatio , smooth ? Qt : : SmoothTransformation : Qt : : FastTransformation ) ) , Qt : : ColorOnly ) ; <nl> + return QPixmap : : fromImage ( imageColored ( add , img . scaled ( w , h , Qt : : IgnoreAspectRatio , smooth ? Qt : : SmoothTransformation : Qt : : FastTransformation ) ) , Qt : : ColorOnly ) ; <nl> } <nl> <nl> QPixmap Image : : pixBlurredColoredNoCache ( const style : : color & add , int32 w , int32 h ) const { <nl> + const_cast < Image * > ( this ) - > load ( ) ; <nl> restore ( ) ; <nl> - loaded ( ) ; <nl> - <nl> - const QPixmap & p ( pixData ( ) ) ; <nl> - if ( p . isNull ( ) ) return blank ( ) - > pix ( ) ; <nl> + if ( _data . isNull ( ) ) return blank ( ) - > pix ( ) ; <nl> <nl> - QImage img = imageBlur ( p . toImage ( ) ) ; <nl> + QImage img = imageBlur ( _data . toImage ( ) ) ; <nl> if ( h < = 0 ) { <nl> img = img . scaledToWidth ( w , Qt : : SmoothTransformation ) ; <nl> } else { <nl> QPixmap Image : : pixBlurredColoredNoCache ( const style : : color & add , int32 w , int32 <nl> } <nl> <nl> void Image : : forget ( ) const { <nl> - if ( forgot ) return ; <nl> + if ( _forgot ) return ; <nl> <nl> - const QPixmap & p ( pixData ( ) ) ; <nl> - if ( p . isNull ( ) ) return ; <nl> + if ( _data . isNull ( ) ) return ; <nl> <nl> invalidateSizeCache ( ) ; <nl> - if ( saved . isEmpty ( ) ) { <nl> - QBuffer buffer ( & saved ) ; <nl> - if ( format . toLower ( ) = = " webp " ) { <nl> - int a = 0 ; <nl> - } <nl> - if ( ! p . save ( & buffer , format ) ) { <nl> - if ( p . save ( & buffer , " PNG " ) ) { <nl> - format = " PNG " ; <nl> + if ( _saved . isEmpty ( ) ) { <nl> + QBuffer buffer ( & _saved ) ; <nl> + if ( ! _data . save ( & buffer , _format ) ) { <nl> + if ( _data . save ( & buffer , " PNG " ) ) { <nl> + _format = " PNG " ; <nl> } else { <nl> return ; <nl> } <nl> } <nl> } <nl> - globalAquiredSize - = int64 ( p . width ( ) ) * p . height ( ) * 4 ; <nl> - doForget ( ) ; <nl> - forgot = true ; <nl> + globalAquiredSize - = int64 ( _data . width ( ) ) * _data . height ( ) * 4 ; <nl> + _data = QPixmap ( ) ; <nl> + _forgot = true ; <nl> } <nl> <nl> void Image : : restore ( ) const { <nl> - if ( ! forgot ) return ; <nl> - doRestore ( ) ; <nl> - const QPixmap & p ( pixData ( ) ) ; <nl> - if ( ! p . isNull ( ) ) { <nl> - globalAquiredSize + = int64 ( p . width ( ) ) * p . height ( ) * 4 ; <nl> + if ( ! _forgot ) return ; <nl> + <nl> + QBuffer buffer ( & _saved ) ; <nl> + QImageReader reader ( & buffer , _format ) ; <nl> + # if QT_VERSION > = QT_VERSION_CHECK ( 5 , 5 , 0 ) <nl> + reader . setAutoTransform ( true ) ; <nl> + # endif <nl> + _data = QPixmap : : fromImageReader ( & reader , Qt : : ColorOnly ) ; <nl> + <nl> + if ( ! _data . isNull ( ) ) { <nl> + globalAquiredSize + = int64 ( _data . width ( ) ) * _data . height ( ) * 4 ; <nl> } <nl> - forgot = false ; <nl> + _forgot = false ; <nl> } <nl> <nl> void Image : : invalidateSizeCache ( ) const { <nl> void Image : : invalidateSizeCache ( ) const { <nl> _sizesCache . clear ( ) ; <nl> } <nl> <nl> - LocalImage : : LocalImage ( const QString & file , QByteArray fmt ) { <nl> - data = QPixmap : : fromImage ( App : : readImage ( file , & fmt , false , 0 , & saved ) , Qt : : ColorOnly ) ; <nl> - format = fmt ; <nl> - if ( ! data . isNull ( ) ) { <nl> - globalAquiredSize + = int64 ( data . width ( ) ) * data . height ( ) * 4 ; <nl> - } <nl> - } <nl> - <nl> - LocalImage : : LocalImage ( const QByteArray & filecontent , QByteArray fmt ) { <nl> - data = QPixmap : : fromImage ( App : : readImage ( filecontent , & fmt , false ) , Qt : : ColorOnly ) ; <nl> - format = fmt ; <nl> - saved = filecontent ; <nl> - if ( ! data . isNull ( ) ) { <nl> - globalAquiredSize + = int64 ( data . width ( ) ) * data . height ( ) * 4 ; <nl> - } <nl> - } <nl> - <nl> - LocalImage : : LocalImage ( const QPixmap & pixmap , QByteArray format ) : Image ( format ) , data ( pixmap ) { <nl> - if ( ! data . isNull ( ) ) { <nl> - globalAquiredSize + = int64 ( data . width ( ) ) * data . height ( ) * 4 ; <nl> - } <nl> - } <nl> - <nl> - LocalImage : : LocalImage ( const QByteArray & filecontent , QByteArray fmt , const QPixmap & pixmap ) { <nl> - data = pixmap ; <nl> - format = fmt ; <nl> - saved = filecontent ; <nl> - if ( ! data . isNull ( ) ) { <nl> - globalAquiredSize + = int64 ( data . width ( ) ) * data . height ( ) * 4 ; <nl> - } <nl> - } <nl> - <nl> - const QPixmap & LocalImage : : pixData ( ) const { <nl> - return data ; <nl> - } <nl> - <nl> - int32 LocalImage : : width ( ) const { <nl> - restore ( ) ; <nl> - return data . width ( ) ; <nl> - } <nl> - <nl> - int32 LocalImage : : height ( ) const { <nl> - restore ( ) ; <nl> - return data . height ( ) ; <nl> - } <nl> - <nl> - LocalImage : : ~ LocalImage ( ) { <nl> - if ( ! data . isNull ( ) ) { <nl> - globalAquiredSize - = int64 ( data . width ( ) ) * data . height ( ) * 4 ; <nl> + Image : : ~ Image ( ) { <nl> + invalidateSizeCache ( ) ; <nl> + if ( ! _data . isNull ( ) ) { <nl> + globalAquiredSize - = int64 ( _data . width ( ) ) * _data . height ( ) * 4 ; <nl> } <nl> } <nl> <nl> - LocalImage * getImage ( const QString & file , QByteArray format ) { <nl> + Image * getImage ( const QString & file , QByteArray format ) { <nl> QFileInfo f ( file ) ; <nl> QString key = qsl ( " / / : % 1 / / : % 2 / / : " ) . arg ( f . size ( ) ) . arg ( f . lastModified ( ) . toTime_t ( ) ) + file ; <nl> LocalImages : : const_iterator i = localImages . constFind ( key ) ; <nl> if ( i = = localImages . cend ( ) ) { <nl> - i = localImages . insert ( key , new LocalImage ( file , format ) ) ; <nl> + i = localImages . insert ( key , new Image ( file , format ) ) ; <nl> } <nl> return i . value ( ) ; <nl> } <nl> <nl> - LocalImage * getImage ( const QByteArray & filecontent , QByteArray format ) { <nl> - return new LocalImage ( filecontent , format ) ; <nl> + Image * getImage ( const QByteArray & filecontent , QByteArray format ) { <nl> + return new Image ( filecontent , format ) ; <nl> } <nl> <nl> - LocalImage * getImage ( const QPixmap & pixmap , QByteArray format ) { <nl> - return new LocalImage ( pixmap , format ) ; <nl> + Image * getImage ( const QPixmap & pixmap , QByteArray format ) { <nl> + return new Image ( pixmap , format ) ; <nl> } <nl> <nl> - LocalImage * getImage ( const QByteArray & filecontent , QByteArray format , const QPixmap & pixmap ) { <nl> - return new LocalImage ( filecontent , format , pixmap ) ; <nl> + Image * getImage ( const QByteArray & filecontent , QByteArray format , const QPixmap & pixmap ) { <nl> + return new Image ( filecontent , format , pixmap ) ; <nl> } <nl> <nl> void clearStorageImages ( ) { <nl> StorageImage : : StorageImage ( const StorageImageLocation & location , QByteArray & byt <nl> } <nl> } <nl> <nl> - const QPixmap & StorageImage : : pixData ( ) const { <nl> - return _data ; <nl> - } <nl> - <nl> int32 StorageImage : : width ( ) const { <nl> return _location . width ( ) ; <nl> } <nl> int32 StorageImage : : height ( ) const { <nl> return _location . height ( ) ; <nl> } <nl> <nl> - void StorageImage : : checkload ( ) const { <nl> + void StorageImage : : doCheckload ( ) const { <nl> if ( ! amLoading ( ) | | ! _loader - > done ( ) ) return ; <nl> <nl> QPixmap data = _loader - > imagePixmap ( ) ; <nl> void StorageImage : : checkload ( ) const { <nl> globalAquiredSize - = int64 ( _data . width ( ) ) * _data . height ( ) * 4 ; <nl> } <nl> <nl> - format = _loader - > imageFormat ( ) ; <nl> + _format = _loader - > imageFormat ( ) ; <nl> _data = data ; <nl> - saved = _loader - > bytes ( ) ; <nl> - const_cast < StorageImage * > ( this ) - > _size = saved . size ( ) ; <nl> + _saved = _loader - > bytes ( ) ; <nl> + const_cast < StorageImage * > ( this ) - > _size = _saved . size ( ) ; <nl> const_cast < StorageImage * > ( this ) - > _location . setSize ( _data . width ( ) , _data . height ( ) ) ; <nl> globalAquiredSize + = int64 ( _data . width ( ) ) * _data . height ( ) * 4 ; <nl> <nl> void StorageImage : : checkload ( ) const { <nl> _loader - > rpcInvalidate ( ) ; <nl> _loader = 0 ; <nl> <nl> - forgot = false ; <nl> + _forgot = false ; <nl> } <nl> <nl> void StorageImage : : setData ( QByteArray & bytes , const QByteArray & bytesFormat ) { <nl> void StorageImage : : setData ( QByteArray & bytes , const QByteArray & bytesFormat ) { <nl> _loader - > rpcInvalidate ( ) ; <nl> _loader = 0 ; <nl> } <nl> - saved = bytes ; <nl> - format = fmt ; <nl> - forgot = false ; <nl> + _saved = bytes ; <nl> + _format = fmt ; <nl> + _forgot = false ; <nl> } <nl> <nl> void StorageImage : : automaticLoad ( const HistoryItem * item ) { <nl> StorageImage : : ~ StorageImage ( ) { <nl> } <nl> <nl> bool StorageImage : : loaded ( ) const { <nl> - checkload ( ) ; <nl> - return ( ! _data . isNull ( ) | | ! saved . isNull ( ) ) ; <nl> - } <nl> - <nl> - bool StorageImage : : loading ( ) const { <nl> - return amLoading ( ) ; <nl> + doCheckload ( ) ; <nl> + return ( ! _data . isNull ( ) | | ! _saved . isNull ( ) ) ; <nl> } <nl> <nl> bool StorageImage : : displayLoading ( ) const { <nl> mmm a / Telegram / SourceFiles / gui / images . h <nl> ppp b / Telegram / SourceFiles / gui / images . h <nl> class HistoryItem ; <nl> class Image { <nl> public : <nl> <nl> - Image ( QByteArray format = " PNG " ) : format ( format ) , forgot ( false ) { <nl> - } <nl> + Image ( const QString & file , QByteArray format = QByteArray ( ) ) ; <nl> + Image ( const QByteArray & filecontent , QByteArray format = QByteArray ( ) ) ; <nl> + Image ( const QPixmap & pixmap , QByteArray format = QByteArray ( ) ) ; <nl> + Image ( const QByteArray & filecontent , QByteArray format , const QPixmap & pixmap ) ; <nl> <nl> virtual void automaticLoad ( const HistoryItem * item ) { / / auto load photo <nl> } <nl> class Image { <nl> QPixmap pixColoredNoCache ( const style : : color & add , int32 w = 0 , int32 h = 0 , bool smooth = false ) const ; <nl> QPixmap pixBlurredColoredNoCache ( const style : : color & add , int32 w , int32 h = 0 ) const ; <nl> <nl> - virtual int32 width ( ) const = 0 ; <nl> - virtual int32 height ( ) const = 0 ; <nl> + virtual int32 width ( ) const { <nl> + restore ( ) ; <nl> + return _data . width ( ) ; <nl> + } <nl> + <nl> + virtual int32 height ( ) const { <nl> + restore ( ) ; <nl> + return _data . height ( ) ; <nl> + } <nl> <nl> virtual void load ( bool loadFirst = false , bool prior = true ) { <nl> } <nl> + <nl> virtual void loadEvenCancelled ( bool loadFirst = false , bool prior = true ) { <nl> } <nl> <nl> bool isNull ( ) const ; <nl> <nl> void forget ( ) const ; <nl> - void restore ( ) const ; <nl> <nl> QByteArray savedFormat ( ) const { <nl> - return format ; <nl> + return _format ; <nl> } <nl> QByteArray savedData ( ) const { <nl> - return saved ; <nl> + return _saved ; <nl> } <nl> <nl> - virtual ~ Image ( ) { <nl> - invalidateSizeCache ( ) ; <nl> - } <nl> + virtual ~ Image ( ) ; <nl> <nl> protected : <nl> + Image ( QByteArray format = " PNG " ) : _format ( format ) , _forgot ( false ) { <nl> + } <nl> <nl> + void restore ( ) const ; <nl> virtual void checkload ( ) const { <nl> } <nl> - <nl> - virtual const QPixmap & pixData ( ) const = 0 ; <nl> - virtual void doForget ( ) const = 0 ; <nl> - virtual void doRestore ( ) const = 0 ; <nl> - <nl> void invalidateSizeCache ( ) const ; <nl> <nl> - mutable QByteArray saved , format ; <nl> - mutable bool forgot ; <nl> + mutable QByteArray _saved , _format ; <nl> + mutable bool _forgot ; <nl> + mutable QPixmap _data ; <nl> <nl> private : <nl> <nl> class Image { <nl> <nl> } ; <nl> <nl> - class LocalImage : public Image { <nl> - public : <nl> - <nl> - LocalImage ( const QString & file , QByteArray format = QByteArray ( ) ) ; <nl> - LocalImage ( const QByteArray & filecontent , QByteArray format = QByteArray ( ) ) ; <nl> - LocalImage ( const QPixmap & pixmap , QByteArray format = QByteArray ( ) ) ; <nl> - LocalImage ( const QByteArray & filecontent , QByteArray format , const QPixmap & pixmap ) ; <nl> - <nl> - int32 width ( ) const ; <nl> - int32 height ( ) const ; <nl> - <nl> - ~ LocalImage ( ) ; <nl> - <nl> - protected : <nl> - <nl> - const QPixmap & pixData ( ) const ; <nl> - void doForget ( ) const { <nl> - data = QPixmap ( ) ; <nl> - } <nl> - void doRestore ( ) const { <nl> - QBuffer buffer ( & saved ) ; <nl> - QImageReader reader ( & buffer , format ) ; <nl> - # if QT_VERSION > = QT_VERSION_CHECK ( 5 , 5 , 0 ) <nl> - reader . setAutoTransform ( true ) ; <nl> - # endif <nl> - data = QPixmap : : fromImageReader ( & reader , Qt : : ColorOnly ) ; <nl> - } <nl> - <nl> - private : <nl> - <nl> - mutable QPixmap data ; <nl> - } ; <nl> - <nl> - LocalImage * getImage ( const QString & file , QByteArray format ) ; <nl> - LocalImage * getImage ( const QByteArray & filecontent , QByteArray format ) ; <nl> - LocalImage * getImage ( const QPixmap & pixmap , QByteArray format ) ; <nl> - LocalImage * getImage ( const QByteArray & filecontent , QByteArray format , const QPixmap & pixmap ) ; <nl> + Image * getImage ( const QString & file , QByteArray format ) ; <nl> + Image * getImage ( const QByteArray & filecontent , QByteArray format ) ; <nl> + Image * getImage ( const QPixmap & pixmap , QByteArray format ) ; <nl> + Image * getImage ( const QByteArray & filecontent , QByteArray format , const QPixmap & pixmap ) ; <nl> <nl> typedef QPair < uint64 , uint64 > StorageKey ; <nl> inline uint64 storageMix32To64 ( int32 a , int32 b ) { <nl> class StorageImage : public Image { <nl> void automaticLoad ( const HistoryItem * item ) ; / / auto load photo <nl> <nl> bool loaded ( ) const ; <nl> - bool loading ( ) const ; <nl> + bool loading ( ) const { <nl> + return amLoading ( ) ; <nl> + } <nl> bool displayLoading ( ) const ; <nl> void cancel ( ) ; <nl> float64 progress ( ) const ; <nl> class StorageImage : public Image { <nl> ~ StorageImage ( ) ; <nl> <nl> protected : <nl> - <nl> - const QPixmap & pixData ( ) const ; <nl> - void checkload ( ) const ; <nl> - void doForget ( ) const { <nl> - _data = QPixmap ( ) ; <nl> - } <nl> - void doRestore ( ) const { <nl> - QBuffer buffer ( & saved ) ; <nl> - QImageReader reader ( & buffer , format ) ; <nl> - # if QT_VERSION > = QT_VERSION_CHECK ( 5 , 5 , 0 ) <nl> - reader . setAutoTransform ( true ) ; <nl> - # endif <nl> - _data = QPixmap : : fromImageReader ( & reader , Qt : : ColorOnly ) ; <nl> + void checkload ( ) const { <nl> + doCheckload ( ) ; <nl> } <nl> <nl> private : <nl> - mutable QPixmap _data ; <nl> StorageImageLocation _location ; <nl> int32 _size ; <nl> mutable mtpFileLoader * _loader ; <nl> class StorageImage : public Image { <nl> bool amLoading ( ) const { <nl> return _loader & & _loader ! = CancelledFileLoader ; <nl> } <nl> + void doCheckload ( ) const ; <nl> <nl> } ; <nl> <nl> mmm a / Telegram / SourceFiles / history . cpp <nl> ppp b / Telegram / SourceFiles / history . cpp <nl> void RadialAnimation : : draw ( Painter & p , const QRect & inner , int32 thickness , cons <nl> p . setOpacity ( o ) ; <nl> } <nl> <nl> + namespace { <nl> + int32 videoMaxStatusWidth ( VideoData * video ) { <nl> + int32 result = st : : normalFont - > width ( formatDownloadText ( video - > size , video - > size ) ) ; <nl> + result = qMax ( result , st : : normalFont - > width ( formatDurationAndSizeText ( video - > duration , video - > size ) ) ) ; <nl> + return result ; <nl> + } <nl> + <nl> + int32 audioMaxStatusWidth ( AudioData * audio ) { <nl> + int32 result = st : : normalFont - > width ( formatDownloadText ( audio - > size , audio - > size ) ) ; <nl> + result = qMax ( result , st : : normalFont - > width ( formatPlayedText ( audio - > duration , audio - > duration ) ) ) ; <nl> + result = qMax ( result , st : : normalFont - > width ( formatDurationAndSizeText ( audio - > duration , audio - > size ) ) ) ; <nl> + return result ; <nl> + } <nl> + <nl> + int32 documentMaxStatusWidth ( DocumentData * document ) { <nl> + int32 result = st : : normalFont - > width ( formatDownloadText ( document - > size , document - > size ) ) ; <nl> + if ( SongData * song = document - > song ( ) ) { <nl> + result = qMax ( result , st : : normalFont - > width ( formatPlayedText ( song - > duration , song - > duration ) ) ) ; <nl> + result = qMax ( result , st : : normalFont - > width ( formatDurationAndSizeText ( song - > duration , document - > size ) ) ) ; <nl> + } else { <nl> + result = qMax ( result , st : : normalFont - > width ( formatSizeText ( document - > size ) ) ) ; <nl> + } <nl> + return result ; <nl> + } <nl> + <nl> + int32 gifMaxStatusWidth ( DocumentData * document ) { <nl> + int32 result = st : : normalFont - > width ( formatDownloadText ( document - > size , document - > size ) ) ; <nl> + result = qMax ( result , st : : normalFont - > width ( formatGifAndSizeText ( document - > size ) ) ) ; <nl> + return result ; <nl> + } <nl> + } <nl> + <nl> + HistoryFileMedia : : HistoryFileMedia ( ) : HistoryMedia ( ) <nl> + , _animation ( 0 ) { <nl> + } <nl> + <nl> + void HistoryFileMedia : : linkOver ( HistoryItem * parent , const TextLinkPtr & lnk ) { <nl> + if ( ( lnk = = _savel | | lnk = = _cancell ) & & ! dataLoaded ( ) ) { <nl> + ensureAnimation ( parent ) ; <nl> + _animation - > a_thumbOver . start ( 1 ) ; <nl> + _animation - > _a_thumbOver . start ( ) ; <nl> + } <nl> + } <nl> + <nl> + void HistoryFileMedia : : linkOut ( HistoryItem * parent , const TextLinkPtr & lnk ) { <nl> + if ( _animation & & ( lnk = = _savel | | lnk = = _cancell ) ) { <nl> + _animation - > a_thumbOver . start ( 0 ) ; <nl> + _animation - > _a_thumbOver . start ( ) ; <nl> + } <nl> + } <nl> + <nl> + void HistoryFileMedia : : setLinks ( ITextLink * openl , ITextLink * savel , ITextLink * cancell ) { <nl> + _openl . reset ( openl ) ; <nl> + _savel . reset ( savel ) ; <nl> + _cancell . reset ( cancell ) ; <nl> + } <nl> + <nl> + void HistoryFileMedia : : setStatusSize ( int32 newSize , int32 fullSize , int32 duration , qint64 realDuration ) const { <nl> + _statusSize = newSize ; <nl> + if ( _statusSize = = FileStatusSizeReady ) { <nl> + _statusText = ( duration > = 0 ) ? formatDurationAndSizeText ( duration , fullSize ) : ( duration < - 1 ? formatGifAndSizeText ( fullSize ) : formatSizeText ( fullSize ) ) ; <nl> + } else if ( _statusSize = = FileStatusSizeLoaded ) { <nl> + _statusText = ( duration > = 0 ) ? formatDurationText ( duration ) : ( duration < - 1 ? qsl ( " GIF " ) : formatSizeText ( fullSize ) ) ; <nl> + } else if ( _statusSize = = FileStatusSizeFailed ) { <nl> + _statusText = lang ( lng_attach_failed ) ; <nl> + } else if ( _statusSize > = 0 ) { <nl> + _statusText = formatDownloadText ( _statusSize , fullSize ) ; <nl> + } else { <nl> + _statusText = formatPlayedText ( - _statusSize - 1 , realDuration ) ; <nl> + } <nl> + } <nl> + <nl> + void HistoryFileMedia : : step_thumbOver ( const HistoryItem * parent , float64 ms , bool timer ) { <nl> + float64 dt = ms / st : : msgFileOverDuration ; <nl> + if ( dt > = 1 ) { <nl> + _animation - > a_thumbOver . finish ( ) ; <nl> + _animation - > _a_thumbOver . stop ( ) ; <nl> + checkAnimationFinished ( ) ; <nl> + } else { <nl> + _animation - > a_thumbOver . update ( dt , anim : : linear ) ; <nl> + } <nl> + if ( timer ) { <nl> + Ui : : redrawHistoryItem ( parent ) ; <nl> + } <nl> + } <nl> + <nl> + void HistoryFileMedia : : step_radial ( const HistoryItem * parent , uint64 ms , bool timer ) { <nl> + _animation - > radial . update ( dataProgress ( ) , dataFinished ( ) , ms ) ; <nl> + if ( ! _animation - > radial . animating ( ) ) { <nl> + checkAnimationFinished ( ) ; <nl> + } <nl> + if ( timer ) { <nl> + Ui : : redrawHistoryItem ( parent ) ; <nl> + } <nl> + } <nl> + <nl> + void HistoryFileMedia : : ensureAnimation ( const HistoryItem * parent ) const { <nl> + if ( ! _animation ) { <nl> + _animation = new AnimationData ( <nl> + animation ( parent , const_cast < HistoryFileMedia * > ( this ) , & HistoryFileMedia : : step_thumbOver ) , <nl> + animation ( parent , const_cast < HistoryFileMedia * > ( this ) , & HistoryFileMedia : : step_radial ) ) ; <nl> + } <nl> + } <nl> + <nl> + void HistoryFileMedia : : checkAnimationFinished ( ) { <nl> + if ( _animation & & ! _animation - > _a_thumbOver . animating ( ) & & ! _animation - > radial . animating ( ) ) { <nl> + if ( dataLoaded ( ) ) { <nl> + delete _animation ; <nl> + _animation = 0 ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + HistoryFileMedia : : ~ HistoryFileMedia ( ) { <nl> + if ( _animation ) { <nl> + delete _animation ; <nl> + setBadPointer ( _animation ) ; <nl> + } <nl> + } <nl> + <nl> HistoryPhoto : : HistoryPhoto ( const MTPDphoto & photo , const QString & caption , HistoryItem * parent ) : HistoryFileMedia ( ) <nl> , _data ( App : : feedPhoto ( photo ) ) <nl> , _pixw ( 1 ) <nl> , _pixh ( 1 ) <nl> , _caption ( st : : minPhotoSize - st : : msgPadding . left ( ) - st : : msgPadding . right ( ) ) { <nl> - setLinks ( new PhotoLink ( _data ) , new PhotoLink ( _data ) , new PhotoCancelLink ( _data ) ) ; <nl> + setLinks ( new PhotoLink ( _data ) , new PhotoSaveLink ( _data ) , new PhotoCancelLink ( _data ) ) ; <nl> <nl> if ( ! caption . isEmpty ( ) ) { <nl> _caption . setText ( st : : msgFont , caption + parent - > skipBlock ( ) , itemTextNoMonoOptions ( parent ) ) ; <nl> HistoryPhoto : : HistoryPhoto ( PhotoData * photo ) : HistoryFileMedia ( ) <nl> , _data ( photo ) <nl> , _pixw ( 1 ) <nl> , _pixh ( 1 ) { <nl> - setLinks ( new PhotoLink ( _data ) , new PhotoLink ( _data ) , new PhotoCancelLink ( _data ) ) ; <nl> + setLinks ( new PhotoLink ( _data ) , new PhotoSaveLink ( _data ) , new PhotoCancelLink ( _data ) ) ; <nl> <nl> init ( ) ; <nl> } <nl> HistoryPhoto : : HistoryPhoto ( PeerData * chat , const MTPDphoto & photo , int32 width ) <nl> , _data ( App : : feedPhoto ( photo ) ) <nl> , _pixw ( 1 ) <nl> , _pixh ( 1 ) { <nl> - setLinks ( new PhotoLink ( _data , chat ) , new PhotoLink ( _data , chat ) , new PhotoCancelLink ( _data ) ) ; <nl> + setLinks ( new PhotoLink ( _data , chat ) , new PhotoSaveLink ( _data , chat ) , new PhotoCancelLink ( _data ) ) ; <nl> <nl> _width = width ; <nl> init ( ) ; <nl> HistoryPhoto : : HistoryPhoto ( const HistoryPhoto & other ) : HistoryFileMedia ( ) <nl> , _pixw ( other . _pixw ) <nl> , _pixh ( other . _pixh ) <nl> , _caption ( other . _caption ) { <nl> - setLinks ( new PhotoLink ( _data ) , new PhotoLink ( _data ) , new PhotoCancelLink ( _data ) ) ; <nl> + setLinks ( new PhotoLink ( _data ) , new PhotoSaveLink ( _data ) , new PhotoCancelLink ( _data ) ) ; <nl> <nl> init ( ) ; <nl> } <nl> <nl> - <nl> void HistoryPhoto : : init ( ) { <nl> _data - > thumb - > load ( ) ; <nl> } <nl> void HistoryPhoto : : updateFrom ( const MTPMessageMedia & media , HistoryItem * parent , <nl> } <nl> } <nl> <nl> + void HistoryPhoto : : regItem ( HistoryItem * item ) { <nl> + App : : regPhotoItem ( _data , item ) ; <nl> + } <nl> + <nl> + void HistoryPhoto : : unregItem ( HistoryItem * item ) { <nl> + App : : unregPhotoItem ( _data , item ) ; <nl> + } <nl> + <nl> const QString HistoryPhoto : : inDialogsText ( ) const { <nl> return _caption . isEmpty ( ) ? lang ( lng_in_dlg_photo ) : _caption . original ( 0 , 0xFFFF , Text : : ExpandLinksNone ) ; <nl> } <nl> ImagePtr HistoryPhoto : : replyPreview ( ) { <nl> return _data - > makeReplyPreview ( ) ; <nl> } <nl> <nl> - namespace { <nl> - int32 videoMaxStatusWidth ( VideoData * video ) { <nl> - int32 result = st : : normalFont - > width ( formatDownloadText ( video - > size , video - > size ) ) ; <nl> - result = qMax ( result , st : : normalFont - > width ( formatDurationAndSizeText ( video - > duration , video - > size ) ) ) ; <nl> - return result ; <nl> - } <nl> - <nl> - int32 audioMaxStatusWidth ( AudioData * audio ) { <nl> - int32 result = st : : normalFont - > width ( formatDownloadText ( audio - > size , audio - > size ) ) ; <nl> - result = qMax ( result , st : : normalFont - > width ( formatPlayedText ( audio - > duration , audio - > duration ) ) ) ; <nl> - result = qMax ( result , st : : normalFont - > width ( formatDurationAndSizeText ( audio - > duration , audio - > size ) ) ) ; <nl> - return result ; <nl> - } <nl> - <nl> - int32 documentMaxStatusWidth ( DocumentData * document ) { <nl> - int32 result = st : : normalFont - > width ( formatDownloadText ( document - > size , document - > size ) ) ; <nl> - if ( SongData * song = document - > song ( ) ) { <nl> - result = qMax ( result , st : : normalFont - > width ( formatPlayedText ( song - > duration , song - > duration ) ) ) ; <nl> - result = qMax ( result , st : : normalFont - > width ( formatDurationAndSizeText ( song - > duration , document - > size ) ) ) ; <nl> - } else { <nl> - result = qMax ( result , st : : normalFont - > width ( formatSizeText ( document - > size ) ) ) ; <nl> - } <nl> - return result ; <nl> - } <nl> - <nl> - int32 gifMaxStatusWidth ( DocumentData * document ) { <nl> - int32 result = st : : normalFont - > width ( formatDownloadText ( document - > size , document - > size ) ) ; <nl> - result = qMax ( result , st : : normalFont - > width ( formatGifAndSizeText ( document - > size ) ) ) ; <nl> - return result ; <nl> - } <nl> - } <nl> - <nl> - HistoryFileMedia : : HistoryFileMedia ( ) : HistoryMedia ( ) <nl> - , _animation ( 0 ) { <nl> - } <nl> - <nl> - void HistoryFileMedia : : linkOver ( HistoryItem * parent , const TextLinkPtr & lnk ) { <nl> - if ( ( lnk = = _savel | | lnk = = _cancell ) & & ! dataLoaded ( ) ) { <nl> - ensureAnimation ( parent ) ; <nl> - _animation - > a_thumbOver . start ( 1 ) ; <nl> - _animation - > _a_thumbOver . start ( ) ; <nl> - } <nl> - } <nl> - <nl> - void HistoryFileMedia : : linkOut ( HistoryItem * parent , const TextLinkPtr & lnk ) { <nl> - if ( _animation & & ( lnk = = _savel | | lnk = = _cancell ) ) { <nl> - _animation - > a_thumbOver . start ( 0 ) ; <nl> - _animation - > _a_thumbOver . start ( ) ; <nl> - } <nl> - } <nl> - <nl> - void HistoryFileMedia : : setLinks ( ITextLink * openl , ITextLink * savel , ITextLink * cancell ) { <nl> - _openl . reset ( openl ) ; <nl> - _savel . reset ( savel ) ; <nl> - _cancell . reset ( cancell ) ; <nl> - } <nl> - <nl> - void HistoryFileMedia : : setStatusSize ( int32 newSize , int32 fullSize , int32 duration , qint64 realDuration ) const { <nl> - _statusSize = newSize ; <nl> - if ( _statusSize = = FileStatusSizeReady ) { <nl> - _statusText = ( duration > = 0 ) ? formatDurationAndSizeText ( duration , fullSize ) : ( duration < - 1 ? formatGifAndSizeText ( fullSize ) : formatSizeText ( fullSize ) ) ; <nl> - } else if ( _statusSize = = FileStatusSizeLoaded ) { <nl> - _statusText = ( duration > = 0 ) ? formatDurationText ( duration ) : ( duration < - 1 ? qsl ( " GIF " ) : formatSizeText ( fullSize ) ) ; <nl> - } else if ( _statusSize = = FileStatusSizeFailed ) { <nl> - _statusText = lang ( lng_attach_failed ) ; <nl> - } else if ( _statusSize > = 0 ) { <nl> - _statusText = formatDownloadText ( _statusSize , fullSize ) ; <nl> - } else { <nl> - _statusText = formatPlayedText ( - _statusSize - 1 , realDuration ) ; <nl> - } <nl> - } <nl> - <nl> - void HistoryFileMedia : : step_thumbOver ( const HistoryItem * parent , float64 ms , bool timer ) { <nl> - float64 dt = ms / st : : msgFileOverDuration ; <nl> - if ( dt > = 1 ) { <nl> - _animation - > a_thumbOver . finish ( ) ; <nl> - _animation - > _a_thumbOver . stop ( ) ; <nl> - checkAnimationFinished ( ) ; <nl> - } else { <nl> - _animation - > a_thumbOver . update ( dt , anim : : linear ) ; <nl> - } <nl> - if ( timer ) { <nl> - Ui : : redrawHistoryItem ( parent ) ; <nl> - } <nl> - } <nl> - <nl> - void HistoryFileMedia : : step_radial ( const HistoryItem * parent , uint64 ms , bool timer ) { <nl> - _animation - > radial . update ( dataProgress ( ) , dataFinished ( ) , ms ) ; <nl> - if ( ! _animation - > radial . animating ( ) ) { <nl> - checkAnimationFinished ( ) ; <nl> - } <nl> - if ( timer ) { <nl> - Ui : : redrawHistoryItem ( parent ) ; <nl> - } <nl> - } <nl> - <nl> - void HistoryFileMedia : : ensureAnimation ( const HistoryItem * parent ) const { <nl> - if ( ! _animation ) { <nl> - _animation = new AnimationData ( <nl> - animation ( parent , const_cast < HistoryFileMedia * > ( this ) , & HistoryFileMedia : : step_thumbOver ) , <nl> - animation ( parent , const_cast < HistoryFileMedia * > ( this ) , & HistoryFileMedia : : step_radial ) ) ; <nl> - } <nl> - } <nl> - <nl> - void HistoryFileMedia : : checkAnimationFinished ( ) { <nl> - if ( _animation & & ! _animation - > _a_thumbOver . animating ( ) & & ! _animation - > radial . animating ( ) ) { <nl> - if ( dataLoaded ( ) ) { <nl> - delete _animation ; <nl> - _animation = 0 ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - HistoryFileMedia : : ~ HistoryFileMedia ( ) { <nl> - if ( _animation ) { <nl> - delete _animation ; <nl> - setBadPointer ( _animation ) ; <nl> - } <nl> - } <nl> - <nl> HistoryVideo : : HistoryVideo ( const MTPDvideo & video , const QString & caption , HistoryItem * parent ) : HistoryFileMedia ( ) <nl> , _data ( App : : feedVideo ( video ) ) <nl> , _thumbw ( 1 ) <nl> void HistoryGif : : draw ( Painter & p , const HistoryItem * parent , const QRect & r , boo <nl> QRect rthumb ( rtlrect ( skipx , skipy , width , height , _width ) ) ; <nl> <nl> if ( animating ) { <nl> - p . drawPixmap ( rthumb . topLeft ( ) , _gif - > current ( _thumbw , _thumbh , width , height , ms ) ) ; <nl> + p . drawPixmap ( rthumb . topLeft ( ) , _gif - > current ( _thumbw , _thumbh , width , height , Ui : : isMediaViewShown ( ) ? 0 : ms ) ) ; <nl> } else { <nl> p . drawPixmap ( rthumb . topLeft ( ) , _data - > thumb - > pixBlurredSingle ( _thumbw , _thumbh , width , height ) ) ; <nl> } <nl> mmm a / Telegram / SourceFiles / history . h <nl> ppp b / Telegram / SourceFiles / history . h <nl> class HistoryPhoto : public HistoryFileMedia { <nl> <nl> void updateFrom ( const MTPMessageMedia & media , HistoryItem * parent , bool allowEmitResize ) ; <nl> <nl> + void regItem ( HistoryItem * item ) ; <nl> + void unregItem ( HistoryItem * item ) ; <nl> + <nl> bool hasReplyPreview ( ) const { <nl> return ! _data - > thumb - > isNull ( ) ; <nl> } <nl> mmm a / Telegram / SourceFiles / historywidget . cpp <nl> ppp b / Telegram / SourceFiles / historywidget . cpp <nl> void HistoryInner : : saveContextImage ( ) { <nl> if ( ! lnk ) return ; <nl> <nl> PhotoData * photo = lnk - > photo ( ) ; <nl> - if ( ! photo | | ! photo - > date | | ! photo - > full - > loaded ( ) ) return ; <nl> + if ( ! photo | | ! photo - > date | | ! photo - > loaded ( ) ) return ; <nl> <nl> QString file ; <nl> if ( filedialogGetSaveFile ( file , lang ( lng_save_photo ) , qsl ( " JPEG Image ( * . jpg ) ; ; All files ( * . * ) " ) , filedialogDefaultName ( qsl ( " photo " ) , qsl ( " . jpg " ) ) ) ) { <nl> void HistoryInner : : copyContextImage ( ) { <nl> if ( ! lnk ) return ; <nl> <nl> PhotoData * photo = lnk - > photo ( ) ; <nl> - if ( ! photo | | ! photo - > date | | ! photo - > full - > loaded ( ) ) return ; <nl> + if ( ! photo | | ! photo - > date | | ! photo - > loaded ( ) ) return ; <nl> <nl> QApplication : : clipboard ( ) - > setPixmap ( photo - > full - > pix ( ) ) ; <nl> } <nl> void HistoryInner : : onUpdateSelected ( ) { <nl> _dragItem - > getSymbol ( second , afterSymbol , uponSymbol , m . x ( ) , m . y ( ) ) ; <nl> if ( afterSymbol & & _dragSelType = = TextSelectLetters ) + + second ; <nl> uint32 selState = _dragItem - > adjustSelection ( qMin ( second , _dragSymbol ) , qMax ( second , _dragSymbol ) , _dragSelType ) ; <nl> - _selected [ _dragItem ] = selState ; <nl> + if ( _selected [ _dragItem ] ! = selState ) { <nl> + _selected [ _dragItem ] = selState ; <nl> + Ui : : redrawHistoryItem ( _dragItem ) ; <nl> + } <nl> if ( ! _wasSelectedText & & ( selState = = FullSelection | | ( selState & 0xFFFF ) ! = ( ( selState > > 16 ) & 0xFFFF ) ) ) { <nl> _wasSelectedText = true ; <nl> setFocus ( ) ; <nl> void HistoryWidget : : notify_migrateUpdated ( PeerData * peer ) { <nl> } <nl> } <nl> <nl> + void HistoryWidget : : notify_mediaViewHidden ( ) { <nl> + if ( _list ) _list - > update ( ) ; <nl> + } <nl> + <nl> void HistoryWidget : : notify_historyItemResized ( const HistoryItem * row , bool scrollToIt ) { <nl> updateListSize ( 0 , false , false , row , scrollToIt ) ; <nl> } <nl> mmm a / Telegram / SourceFiles / historywidget . h <nl> ppp b / Telegram / SourceFiles / historywidget . h <nl> class HistoryWidget : public TWidget , public RPCSender { <nl> void notify_botCommandsChanged ( UserData * user ) ; <nl> void notify_userIsBotChanged ( UserData * user ) ; <nl> void notify_migrateUpdated ( PeerData * peer ) ; <nl> + void notify_mediaViewHidden ( ) ; <nl> void notify_historyItemResized ( const HistoryItem * item , bool scrollToIt ) ; <nl> <nl> ~ HistoryWidget ( ) ; <nl> mmm a / Telegram / SourceFiles / layout . cpp <nl> ppp b / Telegram / SourceFiles / layout . cpp <nl> int32 LayoutOverviewPhoto : : resizeGetHeight ( int32 width ) { <nl> } <nl> <nl> void LayoutOverviewPhoto : : paint ( Painter & p , const QRect & clip , uint32 selection , const PaintContext * context ) const { <nl> - bool good = _data - > full - > loaded ( ) ; <nl> + bool good = _data - > loaded ( ) ; <nl> if ( ! good ) { <nl> - _data - > medium - > load ( false , false ) ; <nl> + _data - > medium - > automaticLoad ( _parent ) ; <nl> good = _data - > medium - > loaded ( ) ; <nl> } <nl> if ( ( good & & ! _goodLoaded ) | | _pix . width ( ) ! = _width * cIntRetinaFactor ( ) ) { <nl> void LayoutOverviewPhoto : : paint ( Painter & p , const QRect & clip , uint32 selection , <nl> <nl> int32 size = _width * cIntRetinaFactor ( ) ; <nl> if ( _goodLoaded | | _data - > thumb - > loaded ( ) ) { <nl> - QImage img = ( _data - > full - > loaded ( ) ? _data - > full : ( _data - > medium - > loaded ( ) ? _data - > medium : _data - > thumb ) ) - > pix ( ) . toImage ( ) ; <nl> + QImage img = ( _data - > loaded ( ) ? _data - > full : ( _data - > medium - > loaded ( ) ? _data - > medium : _data - > thumb ) ) - > pix ( ) . toImage ( ) ; <nl> if ( ! _goodLoaded ) { <nl> img = imageBlur ( img ) ; <nl> } <nl> LayoutOverviewLink : : LayoutOverviewLink ( HistoryMedia * media , HistoryItem * parent ) <nl> } <nl> int32 tw = 0 , th = 0 ; <nl> if ( _page & & _page - > photo ) { <nl> - if ( ! _page - > photo - > full - > loaded ( ) ) _page - > photo - > thumb - > load ( false , false ) ; <nl> + if ( ! _page - > photo - > loaded ( ) ) _page - > photo - > thumb - > load ( false , false ) ; <nl> <nl> tw = convertScale ( _page - > photo - > thumb - > width ( ) ) ; <nl> th = convertScale ( _page - > photo - > thumb - > height ( ) ) ; <nl> void LayoutOverviewLink : : paint ( Painter & p , const QRect & clip , uint32 selection , <nl> if ( clip . intersects ( rtlrect ( 0 , top , st : : dlgPhotoSize , st : : dlgPhotoSize , _width ) ) ) { <nl> if ( _page & & _page - > photo ) { <nl> QPixmap pix ; <nl> - if ( _page - > photo - > full - > loaded ( ) ) { <nl> + if ( _page - > photo - > loaded ( ) ) { <nl> pix = _page - > photo - > full - > pixSingle ( _pixw , _pixh , st : : dlgPhotoSize , st : : dlgPhotoSize ) ; <nl> } else if ( _page - > photo - > medium - > loaded ( ) ) { <nl> pix = _page - > photo - > medium - > pixSingle ( _pixw , _pixh , st : : dlgPhotoSize , st : : dlgPhotoSize ) ; <nl> mmm a / Telegram / SourceFiles / mainwidget . cpp <nl> ppp b / Telegram / SourceFiles / mainwidget . cpp <nl> void MainWidget : : notify_migrateUpdated ( PeerData * peer ) { <nl> history . notify_migrateUpdated ( peer ) ; <nl> } <nl> <nl> + void MainWidget : : notify_mediaViewHidden ( ) { <nl> + history . notify_mediaViewHidden ( ) ; <nl> + } <nl> + <nl> void MainWidget : : ui_redrawHistoryItem ( const HistoryItem * item ) { <nl> if ( ! item ) return ; <nl> <nl> mmm a / Telegram / SourceFiles / mainwidget . h <nl> ppp b / Telegram / SourceFiles / mainwidget . h <nl> class MainWidget : public TWidget , public RPCSender { <nl> void notify_userIsBotChanged ( UserData * bot ) ; <nl> void notify_userIsContactChanged ( UserData * user , bool fromThisApp ) ; <nl> void notify_migrateUpdated ( PeerData * peer ) ; <nl> + void notify_mediaViewHidden ( ) ; <nl> void notify_historyItemResized ( const HistoryItem * row , bool scrollToIt ) ; <nl> void notify_historyItemLayoutChanged ( const HistoryItem * item ) ; <nl> <nl> mmm a / Telegram / SourceFiles / mediaview . cpp <nl> ppp b / Telegram / SourceFiles / mediaview . cpp <nl> void MediaView : : updateControls ( ) { <nl> _docCancel . hide ( ) ; <nl> } <nl> <nl> - _saveVisible = ( ( _photo & & _photo - > full - > loaded ( ) ) | | ( _doc & & ( _doc - > loaded ( true ) | | ( ! fileShown ( ) & & ( _photo | | _doc ) ) ) ) ) ; <nl> + _saveVisible = ( ( _photo & & _photo - > loaded ( ) ) | | ( _doc & & ( _doc - > loaded ( true ) | | ( ! fileShown ( ) & & ( _photo | | _doc ) ) ) ) ) ; <nl> _saveNav = myrtlrect ( width ( ) - st : : mvIconSize . width ( ) * 2 , height ( ) - st : : mvIconSize . height ( ) , st : : mvIconSize . width ( ) , st : : mvIconSize . height ( ) ) ; <nl> _saveNavIcon = centersprite ( _saveNav , st : : mvSave ) ; <nl> _moreNav = myrtlrect ( width ( ) - st : : mvIconSize . width ( ) , height ( ) - st : : mvIconSize . height ( ) , st : : mvIconSize . width ( ) , st : : mvIconSize . height ( ) ) ; <nl> void MediaView : : updateDropdown ( ) { <nl> _btnToMessage - > setVisible ( _msgid > 0 ) ; <nl> _btnShowInFolder - > setVisible ( _doc & & ! _doc - > already ( true ) . isEmpty ( ) ) ; <nl> _btnSaveAs - > setVisible ( true ) ; <nl> - _btnCopy - > setVisible ( ( _doc & & fileShown ( ) ) | | ( _photo & & _photo - > full - > loaded ( ) ) ) ; <nl> + _btnCopy - > setVisible ( ( _doc & & fileShown ( ) ) | | ( _photo & & _photo - > loaded ( ) ) ) ; <nl> _btnForward - > setVisible ( _canForward ) ; <nl> _btnDelete - > setVisible ( _canDelete | | ( _photo & & App : : self ( ) & & App : : self ( ) - > photoId = = _photo - > id ) | | ( _photo & & _photo - > peer & & _photo - > peer - > photoId = = _photo - > id & & ( _photo - > peer - > isChat ( ) | | ( _photo - > peer - > isChannel ( ) & & _photo - > peer - > asChannel ( ) - > amCreator ( ) ) ) ) ) ; <nl> _btnViewAll - > setVisible ( ( _overview ! = OverviewCount ) & & _history ) ; <nl> void MediaView : : onSaveAs ( ) { <nl> updateOver ( _lastMouseMovePos ) ; <nl> } <nl> } else { <nl> - if ( ! _photo | | ! _photo - > full - > loaded ( ) ) return ; <nl> + if ( ! _photo | | ! _photo - > loaded ( ) ) return ; <nl> <nl> psBringToBack ( this ) ; <nl> bool gotName = filedialogGetSaveFile ( file , lang ( lng_save_photo ) , qsl ( " JPEG Image ( * . jpg ) ; ; All files ( * . * ) " ) , filedialogDefaultName ( qsl ( " photo " ) , qsl ( " . jpg " ) ) ) ; <nl> void MediaView : : onDownload ( ) { <nl> updateOver ( _lastMouseMovePos ) ; <nl> } <nl> } else { <nl> - if ( ! _photo | | ! _photo - > full - > loaded ( ) ) { <nl> + if ( ! _photo | | ! _photo - > loaded ( ) ) { <nl> _saveVisible = false ; <nl> update ( _saveNav ) ; <nl> } else { <nl> void MediaView : : onCopy ( ) { <nl> QApplication : : clipboard ( ) - > setPixmap ( QPixmap : : fromImage ( _gif - > frameOriginal ( ) ) ) ; <nl> } <nl> } else { <nl> - if ( ! _photo | | ! _photo - > full - > loaded ( ) ) return ; <nl> + if ( ! _photo | | ! _photo - > loaded ( ) ) return ; <nl> <nl> QApplication : : clipboard ( ) - > setPixmap ( _photo - > full - > pix ( ) ) ; <nl> } <nl> void MediaView : : displayPhoto ( PhotoData * photo , HistoryItem * item ) { <nl> _from = _user ; <nl> } <nl> updateControls ( ) ; <nl> - _photo - > full - > loadEvenCancelled ( ) ; <nl> + _photo - > download ( ) ; <nl> if ( isHidden ( ) ) { <nl> psUpdateOverlayed ( this ) ; <nl> show ( ) ; <nl> void MediaView : : paintEvent ( QPaintEvent * e ) { <nl> / / photo <nl> if ( _photo ) { <nl> int32 w = _width * cIntRetinaFactor ( ) ; <nl> - if ( _full < = 0 & & _photo - > full - > loaded ( ) ) { <nl> + if ( _full < = 0 & & _photo - > loaded ( ) ) { <nl> int32 h = int ( ( _photo - > full - > height ( ) * ( qreal ( w ) / qreal ( _photo - > full - > width ( ) ) ) ) + 0 . 9999 ) ; <nl> _current = _photo - > full - > pixNoCache ( w , h , true ) ; <nl> if ( cRetina ( ) ) _current . setDevicePixelRatio ( cRetinaFactor ( ) ) ; <nl> void MediaView : : preloadData ( int32 delta ) { <nl> if ( HistoryItem * item = App : : histItemById ( previewHistory - > channelId ( ) , previewHistory - > overview [ _overview ] [ previewIndex ] ) ) { <nl> if ( HistoryMedia * media = item - > getMedia ( ) ) { <nl> switch ( media - > type ( ) ) { <nl> - case MediaTypePhoto : static_cast < HistoryPhoto * > ( media ) - > photo ( ) - > full - > loadEvenCancelled ( ) ; break ; <nl> + case MediaTypePhoto : static_cast < HistoryPhoto * > ( media ) - > photo ( ) - > download ( ) ; break ; <nl> case MediaTypeDocument : <nl> case MediaTypeGif : { <nl> DocumentData * doc = media - > getDocument ( ) ; <nl> void MediaView : : preloadData ( int32 delta ) { <nl> } <nl> for ( int32 i = from ; i < = to ; + + i ) { <nl> if ( i > = 0 & & i < _user - > photos . size ( ) & & i ! = _index ) { <nl> - _user - > photos [ i ] - > full - > loadEvenCancelled ( ) ; <nl> + _user - > photos [ i ] - > download ( ) ; <nl> } <nl> } <nl> int32 forgetIndex = _index - delta * 2 ; <nl> void MediaView : : hide ( ) { <nl> a_cOpacity = anim : : fvalue ( 1 , 1 ) ; <nl> QWidget : : hide ( ) ; <nl> stopGif ( ) ; <nl> + <nl> + Notify : : mediaViewHidden ( ) ; <nl> } <nl> <nl> void MediaView : : onMenuDestroy ( QObject * obj ) { <nl> mmm a / Telegram / SourceFiles / settingswidget . cpp <nl> ppp b / Telegram / SourceFiles / settingswidget . cpp <nl> void SettingsInner : : mousePressEvent ( QMouseEvent * e ) { <nl> Ui : : showLayer ( new EditNameTitleBox ( self ( ) ) ) ; <nl> } else if ( QRect ( _left , st : : setTop , st : : setPhotoSize , st : : setPhotoSize ) . contains ( e - > pos ( ) ) ) { <nl> if ( _photoLink ) { <nl> - App : : photo ( self ( ) - > photoId ) - > full - > load ( ) ; <nl> + App : : photo ( self ( ) - > photoId ) - > download ( ) ; <nl> _photoLink - > onClick ( e - > button ( ) ) ; <nl> } else { <nl> onUpdatePhoto ( ) ; <nl> mmm a / Telegram / SourceFiles / structs . cpp <nl> ppp b / Telegram / SourceFiles / structs . cpp <nl> void PhotoData : : automaticLoad ( const HistoryItem * item ) { <nl> full - > automaticLoad ( item ) ; <nl> } <nl> <nl> + void PhotoData : : download ( ) { <nl> + full - > loadEvenCancelled ( ) ; <nl> + notifyLayoutChanged ( ) ; <nl> + } <nl> + <nl> bool PhotoData : : loaded ( ) const { <nl> - return full - > loaded ( ) ; <nl> + bool wasLoading = loading ( ) ; <nl> + if ( full - > loaded ( ) ) { <nl> + if ( wasLoading ) { <nl> + notifyLayoutChanged ( ) ; <nl> + } <nl> + return true ; <nl> + } <nl> + return false ; <nl> } <nl> <nl> bool PhotoData : : loading ( ) const { <nl> bool PhotoData : : displayLoading ( ) const { <nl> <nl> void PhotoData : : cancel ( ) { <nl> full - > cancel ( ) ; <nl> + notifyLayoutChanged ( ) ; <nl> + } <nl> + <nl> + void PhotoData : : notifyLayoutChanged ( ) const { <nl> + const PhotoItems & items ( App : : photoItems ( ) ) ; <nl> + PhotoItems : : const_iterator i = items . constFind ( const_cast < PhotoData * > ( this ) ) ; <nl> + if ( i ! = items . cend ( ) ) { <nl> + for ( HistoryItemsMap : : const_iterator j = i - > cbegin ( ) , e = i - > cend ( ) ; j ! = e ; + + j ) { <nl> + Notify : : historyItemLayoutChanged ( j . key ( ) ) ; <nl> + } <nl> + } <nl> } <nl> <nl> float64 PhotoData : : progress ( ) const { <nl> - return loading ( ) ? full - > progress ( ) : ( uploading ( ) ? ( float64 ( uploadingData - > offset ) / uploadingData - > size ) : ( loaded ( ) ? 1 : 0 ) ) ; <nl> + if ( uploading ( ) ) { <nl> + if ( uploadingData - > size > 0 ) { <nl> + return float64 ( uploadingData - > offset ) / uploadingData - > size ; <nl> + } <nl> + return 0 ; <nl> + } <nl> + return full - > progress ( ) ; <nl> } <nl> <nl> int32 PhotoData : : loadOffset ( ) const { <nl> void PhotoLink : : onClick ( Qt : : MouseButton button ) const { <nl> } <nl> } <nl> <nl> + void PhotoSaveLink : : onClick ( Qt : : MouseButton button ) const { <nl> + if ( button ! = Qt : : LeftButton ) return ; <nl> + <nl> + PhotoData * data = photo ( ) ; <nl> + if ( ! data - > date ) return ; <nl> + <nl> + data - > download ( ) ; <nl> + } <nl> + <nl> void PhotoCancelLink : : onClick ( Qt : : MouseButton button ) const { <nl> if ( button ! = Qt : : LeftButton ) return ; <nl> <nl> bool VideoData : : displayLoading ( ) const { <nl> } <nl> <nl> float64 VideoData : : progress ( ) const { <nl> - return loading ( ) ? _loader - > currentProgress ( ) : ( uploading ( ) ? ( float64 ( uploadOffset ) / size ) : ( loaded ( ) ? 1 : 0 ) ) ; <nl> + if ( uploading ( ) ) { <nl> + if ( size > 0 ) { <nl> + return float64 ( uploadOffset ) / size ; <nl> + } <nl> + return 0 ; <nl> + } <nl> + return loading ( ) ? _loader - > currentProgress ( ) : ( loaded ( ) ? 1 : 0 ) ; <nl> } <nl> <nl> int32 VideoData : : loadOffset ( ) const { <nl> bool AudioData : : displayLoading ( ) const { <nl> } <nl> <nl> float64 AudioData : : progress ( ) const { <nl> - return loading ( ) ? _loader - > currentProgress ( ) : ( uploading ( ) ? ( float64 ( uploadOffset ) / size ) : ( loaded ( ) ? 1 : 0 ) ) ; <nl> + if ( uploading ( ) ) { <nl> + if ( size > 0 ) { <nl> + return float64 ( uploadOffset ) / size ; <nl> + } <nl> + return 0 ; <nl> + } <nl> + return loading ( ) ? _loader - > currentProgress ( ) : ( loaded ( ) ? 1 : 0 ) ; <nl> } <nl> <nl> int32 AudioData : : loadOffset ( ) const { <nl> bool DocumentData : : displayLoading ( ) const { <nl> } <nl> <nl> float64 DocumentData : : progress ( ) const { <nl> - return loading ( ) ? _loader - > currentProgress ( ) : ( uploading ( ) ? ( float64 ( uploadOffset ) / size ) : ( loaded ( ) ? 1 : 0 ) ) ; <nl> + if ( uploading ( ) ) { <nl> + if ( size > 0 ) { <nl> + return float64 ( uploadOffset ) / size ; <nl> + } <nl> + return 0 ; <nl> + } <nl> + return loading ( ) ? _loader - > currentProgress ( ) : ( loaded ( ) ? 1 : 0 ) ; <nl> } <nl> <nl> int32 DocumentData : : loadOffset ( ) const { <nl> mmm a / Telegram / SourceFiles / structs . h <nl> ppp b / Telegram / SourceFiles / structs . h <nl> class PhotoData { <nl> <nl> void automaticLoad ( const HistoryItem * item ) ; <nl> <nl> + void download ( ) ; <nl> bool loaded ( ) const ; <nl> bool loading ( ) const ; <nl> bool displayLoading ( ) const ; <nl> class PhotoData { <nl> } ; <nl> UploadingData * uploadingData ; <nl> <nl> - / / int32 cachew ; <nl> - / / QPixmap cache ; <nl> + private : <nl> + void notifyLayoutChanged ( ) const ; <nl> + <nl> } ; <nl> <nl> class PhotoLink : public ITextLink { <nl> class PhotoLink : public ITextLink { <nl> <nl> } ; <nl> <nl> + class PhotoSaveLink : public PhotoLink { <nl> + TEXT_LINK_CLASS ( PhotoSaveLink ) <nl> + <nl> + public : <nl> + PhotoSaveLink ( PhotoData * photo , PeerData * peer = 0 ) : PhotoLink ( photo , peer ) { <nl> + } <nl> + void onClick ( Qt : : MouseButton button ) const ; <nl> + <nl> + } ; <nl> + <nl> class PhotoCancelLink : public PhotoLink { <nl> TEXT_LINK_CLASS ( PhotoCancelLink ) <nl> <nl> mmm a / Telegram / SourceFiles / window . cpp <nl> ppp b / Telegram / SourceFiles / window . cpp <nl> bool Window : : ui_isLayerShown ( ) { <nl> return ! ! layerBg ; <nl> } <nl> <nl> + bool Window : : ui_isMediaViewShown ( ) { <nl> + return _mediaView & & ! _mediaView - > isHidden ( ) ; <nl> + } <nl> + <nl> void Window : : notify_clipReinit ( ClipReader * reader ) { <nl> if ( _mediaView & & ! _mediaView - > isHidden ( ) ) { <nl> _mediaView - > notify_clipReinit ( reader ) ; <nl> mmm a / Telegram / SourceFiles / window . h <nl> ppp b / Telegram / SourceFiles / window . h <nl> class Window : public PsMainWindow { <nl> void ui_clipRedraw ( ClipReader * reader ) ; <nl> void ui_showLayer ( LayeredWidget * box , ShowLayerOptions options ) ; <nl> bool ui_isLayerShown ( ) ; <nl> + bool ui_isMediaViewShown ( ) ; <nl> <nl> void notify_clipReinit ( ClipReader * reader ) ; <nl> <nl>
pausing gifs when mediaview is opened , improved photos handling - download on click if autoload disabled
telegramdesktop/tdesktop
a5622cfe3bff7f531ee3cec31353711b0d441881
2015-12-25T13:09:14Z
mmm a / source / common / protobuf / utility . cc <nl> ppp b / source / common / protobuf / utility . cc <nl> uint64_t convertPercent ( double percent , uint64_t max_value ) { <nl> return max_value * ( percent / 100 . 0 ) ; <nl> } <nl> <nl> + bool evaluateFractionalPercent ( envoy : : type : : FractionalPercent percent , uint64_t random_value ) { <nl> + return random_value % fractionalPercentDenominatorToInt ( percent . denominator ( ) ) < <nl> + percent . numerator ( ) ; <nl> + } <nl> + <nl> uint64_t fractionalPercentDenominatorToInt ( <nl> const envoy : : type : : FractionalPercent : : DenominatorType & denominator ) { <nl> switch ( denominator ) { <nl> mmm a / source / common / protobuf / utility . h <nl> ppp b / source / common / protobuf / utility . h <nl> namespace ProtobufPercentHelper { <nl> uint64_t checkAndReturnDefault ( uint64_t default_value , uint64_t max_value ) ; <nl> uint64_t convertPercent ( double percent , uint64_t max_value ) ; <nl> <nl> + / * * <nl> + * Given a fractional percent chance of a given event occurring , evaluate to a yes / no decision <nl> + * based on a provided random value . <nl> + * @ param percent the chance of a given event happening . <nl> + * @ param random_value supplies a numerical value to use to evaluate the event . <nl> + * @ return bool decision about whether the event should occur . <nl> + * / <nl> + bool evaluateFractionalPercent ( envoy : : type : : FractionalPercent percent , uint64_t random_value ) ; <nl> + <nl> / * * <nl> * Convert a fractional percent denominator enum into an integer . <nl> * @ param denominator supplies denominator to convert . <nl> mmm a / source / common / runtime / runtime_impl . cc <nl> ppp b / source / common / runtime / runtime_impl . cc <nl> bool SnapshotImpl : : featureEnabled ( const std : : string & key , <nl> const envoy : : type : : FractionalPercent & default_value , <nl> uint64_t random_value ) const { <nl> const auto & entry = values_ . find ( key ) ; <nl> - uint64_t numerator , denominator ; <nl> + envoy : : type : : FractionalPercent percent ; <nl> if ( entry ! = values_ . end ( ) & & entry - > second . fractional_percent_value_ . has_value ( ) ) { <nl> - numerator = entry - > second . fractional_percent_value_ - > numerator ( ) ; <nl> - denominator = ProtobufPercentHelper : : fractionalPercentDenominatorToInt ( <nl> - entry - > second . fractional_percent_value_ - > denominator ( ) ) ; <nl> + percent = entry - > second . fractional_percent_value_ . value ( ) ; <nl> } else if ( entry ! = values_ . end ( ) & & entry - > second . uint_value_ . has_value ( ) ) { <nl> - / / The runtime value must have been specified as an integer rather than a fractional percent <nl> - / / proto . To preserve legacy semantics , we ' ll assume this represents a percentage . <nl> - numerator = entry - > second . uint_value_ . value ( ) ; <nl> - denominator = 100 ; <nl> + / / Check for > 100 because the runtime value is assumed to be specified as <nl> + / / an integer , and it also ensures that truncating the uint64_t runtime <nl> + / / value into a uint32_t percent numerator later is safe <nl> + if ( entry - > second . uint_value_ . value ( ) > 100 ) { <nl> + return true ; <nl> + } <nl> + <nl> + / / The runtime value was specified as an integer rather than a fractional <nl> + / / percent proto . To preserve legacy semantics , we treat it as a percentage <nl> + / / ( i . e . denominator of 100 ) . <nl> + percent . set_numerator ( entry - > second . uint_value_ . value ( ) ) ; <nl> + percent . set_denominator ( envoy : : type : : FractionalPercent : : HUNDRED ) ; <nl> } else { <nl> - numerator = default_value . numerator ( ) ; <nl> - denominator = <nl> - ProtobufPercentHelper : : fractionalPercentDenominatorToInt ( default_value . denominator ( ) ) ; <nl> + percent = default_value ; <nl> } <nl> <nl> - return random_value % denominator < numerator ; <nl> + return ProtobufPercentHelper : : evaluateFractionalPercent ( percent , random_value ) ; <nl> } <nl> <nl> uint64_t SnapshotImpl : : getInteger ( const std : : string & key , uint64_t default_value ) const { <nl> mmm a / test / common / protobuf / utility_test . cc <nl> ppp b / test / common / protobuf / utility_test . cc <nl> TEST_F ( ProtobufUtilityTest , convertPercentNaN ) { <nl> EnvoyException ) ; <nl> } <nl> <nl> + namespace ProtobufPercentHelper { <nl> + <nl> + TEST_F ( ProtobufUtilityTest , evaluateFractionalPercent ) { <nl> + { / / 0 / 100 ( default ) <nl> + envoy : : type : : FractionalPercent percent ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 0 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 50 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 100 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 1000 ) ) ; <nl> + } <nl> + { / / 5 / 100 <nl> + envoy : : type : : FractionalPercent percent ; <nl> + percent . set_numerator ( 5 ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 0 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 4 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 5 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 50 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 100 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 104 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 105 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 204 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 1000 ) ) ; <nl> + } <nl> + { / / 75 / 100 <nl> + envoy : : type : : FractionalPercent percent ; <nl> + percent . set_numerator ( 75 ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 0 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 4 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 5 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 74 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 80 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 100 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 104 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 105 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 200 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 274 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 280 ) ) ; <nl> + } <nl> + { / / 5 / 10000 <nl> + envoy : : type : : FractionalPercent percent ; <nl> + percent . set_denominator ( envoy : : type : : FractionalPercent : : TEN_THOUSAND ) ; <nl> + percent . set_numerator ( 5 ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 0 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 4 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 5 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 50 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 100 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 9000 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 10000 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 10004 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 10005 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 20004 ) ) ; <nl> + } <nl> + { / / 5 / MILLION <nl> + envoy : : type : : FractionalPercent percent ; <nl> + percent . set_denominator ( envoy : : type : : FractionalPercent : : MILLION ) ; <nl> + percent . set_numerator ( 5 ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 0 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 4 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 5 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 50 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 100 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 9000 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 10000 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 10004 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 10005 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 900005 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 900000 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 1000000 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 1000004 ) ) ; <nl> + EXPECT_FALSE ( evaluateFractionalPercent ( percent , 1000005 ) ) ; <nl> + EXPECT_TRUE ( evaluateFractionalPercent ( percent , 2000004 ) ) ; <nl> + } <nl> + } <nl> + <nl> + } / / namespace ProtobufPercentHelper <nl> + <nl> TEST_F ( ProtobufUtilityTest , RepeatedPtrUtilDebugString ) { <nl> Protobuf : : RepeatedPtrField < ProtobufWkt : : UInt32Value > repeated ; <nl> EXPECT_EQ ( " [ ] " , RepeatedPtrUtil : : debugString ( repeated ) ) ; <nl> mmm a / test / common / runtime / runtime_impl_test . cc <nl> ppp b / test / common / runtime / runtime_impl_test . cc <nl> TEST_F ( DiskBackedLoaderImplTest , OverrideFolderDoesNotExist ) { <nl> EXPECT_EQ ( " hello " , loader - > snapshot ( ) . get ( " file1 " ) ) ; <nl> } <nl> <nl> + TEST_F ( DiskBackedLoaderImplTest , PercentHandling ) { <nl> + setup ( ) ; <nl> + run ( " test / common / runtime / test_data / current " , " envoy_override " ) ; <nl> + <nl> + envoy : : type : : FractionalPercent default_value ; <nl> + <nl> + / / Smoke test integer value of 0 , should be interpreted as 0 % <nl> + { <nl> + loader - > mergeValues ( { { " foo " , " 0 " } } ) ; <nl> + <nl> + EXPECT_FALSE ( loader - > snapshot ( ) . featureEnabled ( " foo " , default_value , 0 ) ) ; <nl> + EXPECT_FALSE ( loader - > snapshot ( ) . featureEnabled ( " foo " , default_value , 5 ) ) ; <nl> + } <nl> + <nl> + / / Smoke test integer value of 5 , should be interpreted as 5 % <nl> + { <nl> + loader - > mergeValues ( { { " foo " , " 5 " } } ) ; <nl> + EXPECT_TRUE ( loader - > snapshot ( ) . featureEnabled ( " foo " , default_value , 0 ) ) ; <nl> + EXPECT_TRUE ( loader - > snapshot ( ) . featureEnabled ( " foo " , default_value , 4 ) ) ; <nl> + EXPECT_FALSE ( loader - > snapshot ( ) . featureEnabled ( " foo " , default_value , 5 ) ) ; <nl> + EXPECT_TRUE ( loader - > snapshot ( ) . featureEnabled ( " foo " , default_value , 100 ) ) ; <nl> + } <nl> + <nl> + / / Verify uint64 - > uint32 conversion by using a runtime value with all 0s in <nl> + / / the bottom 32 bits . If it were to be naively treated as a uint32_t then it <nl> + / / would appear as 0 % , but it should be 100 % because we assume the <nl> + / / denominator is 100 <nl> + { <nl> + / / NOTE : high_value has to have the property that the lowest 32 bits % 100 <nl> + / / is less than 100 . If it ' s greater than 100 the test will pass whether or <nl> + / / not the uint32 conversion is handled properly . <nl> + uint64_t high_value = 1UL < < 60 ; <nl> + std : : string high_value_str = std : : to_string ( high_value ) ; <nl> + loader - > mergeValues ( { { " foo " , high_value_str } } ) ; <nl> + EXPECT_TRUE ( loader - > snapshot ( ) . featureEnabled ( " foo " , default_value , 0 ) ) ; <nl> + EXPECT_TRUE ( loader - > snapshot ( ) . featureEnabled ( " foo " , default_value , 50 ) ) ; <nl> + EXPECT_TRUE ( loader - > snapshot ( ) . featureEnabled ( " foo " , default_value , 100 ) ) ; <nl> + EXPECT_TRUE ( loader - > snapshot ( ) . featureEnabled ( " foo " , default_value , 12389 ) ) ; <nl> + EXPECT_TRUE ( loader - > snapshot ( ) . featureEnabled ( " foo " , default_value , 23859235 ) ) ; <nl> + } <nl> + } <nl> + <nl> void testNewOverrides ( Loader & loader , Stats : : Store & store ) { <nl> / / New string <nl> loader . mergeValues ( { { " foo " , " bar " } } ) ; <nl>
add helper to ProtobufPercentHelper for evaluating FractionalPercent ( )
envoyproxy/envoy
851859b7a214bad1c66043e49dff2f04b417608b
2019-02-20T19:04:39Z
mmm a / tools / heap - stats / trace - file - reader . js <nl> ppp b / tools / heap - stats / trace - file - reader . js <nl> class TraceFileReader extends HTMLElement { <nl> const return_data = ( result . includes ( ' V8 . GC_Objects_Stats ' ) ) ? <nl> this . createModelFromChromeTraceFile ( contents ) : <nl> this . createModelFromV8TraceFile ( contents ) ; <nl> + this . extendAndSanitizeModel ( return_data ) ; <nl> this . updateLabel ( ' Finished loading \ ' ' + file . name + ' \ ' . ' ) ; <nl> this . dispatchEvent ( new CustomEvent ( <nl> ' change ' , { bubbles : true , composed : true , detail : return_data } ) ) ; <nl> } <nl> <nl> - createOrUpdateEntryIfNeeded ( data , keys , entry ) { <nl> + createOrUpdateEntryIfNeeded ( data , entry ) { <nl> console . assert ( entry . isolate , ' entry should have an isolate ' ) ; <nl> - if ( ! ( entry . isolate in keys ) ) { <nl> - keys [ entry . isolate ] = new Set ( ) ; <nl> - } <nl> if ( ! ( entry . isolate in data ) ) { <nl> data [ entry . isolate ] = { <nl> non_empty_instance_types : new Set ( ) , <nl> class TraceFileReader extends HTMLElement { <nl> } <nl> } <nl> <nl> - createDatasetIfNeeded ( data , keys , entry , data_set ) { <nl> + createDatasetIfNeeded ( data , entry , data_set ) { <nl> if ( ! ( data_set in data [ entry . isolate ] . gcs [ entry . id ] ) ) { <nl> data [ entry . isolate ] . gcs [ entry . id ] [ data_set ] = { <nl> instance_type_data : { } , <nl> class TraceFileReader extends HTMLElement { <nl> } <nl> } <nl> <nl> - addInstanceTypeData ( <nl> - data , keys , isolate , gc_id , data_set , instance_type , entry ) { <nl> - keys [ isolate ] . add ( data_set ) ; <nl> + addInstanceTypeData ( data , isolate , gc_id , data_set , instance_type , entry ) { <nl> data [ isolate ] . gcs [ gc_id ] [ data_set ] . instance_type_data [ instance_type ] = { <nl> overall : entry . overall , <nl> count : entry . count , <nl> class TraceFileReader extends HTMLElement { <nl> } <nl> } <nl> <nl> - extendAndSanitizeModel ( data , keys ) { <nl> + extendAndSanitizeModel ( data ) { <nl> const checkNonNegativeProperty = ( obj , property ) = > { <nl> console . assert ( obj [ property ] > = 0 , ' negative property ' , obj , property ) ; <nl> } ; <nl> <nl> for ( const isolate of Object . keys ( data ) ) { <nl> - for ( const gc of Object . keys ( data [ isolate ] . gcs ) ) { <nl> - for ( const data_set_key of keys [ isolate ] ) { <nl> + const isolate_data = data [ isolate ] ; <nl> + for ( const gc of Object . keys ( isolate_data . gcs ) ) { <nl> + const gc_data = isolate_data . gcs [ gc ] ; <nl> + for ( const data_set_key of isolate_data . data_sets ) { <nl> const data_set = data [ isolate ] . gcs [ gc ] [ data_set_key ] ; <nl> / / Create a ranked instance type array that sorts instance types by <nl> / / memory size ( overall ) . <nl> class TraceFileReader extends HTMLElement { <nl> } <nl> <nl> createModelFromChromeTraceFile ( contents ) { <nl> - console . log ( ' Processing log as chrome trace file . ' ) ; <nl> - const data = Object . create ( null ) ; / / Final data container . <nl> - const keys = Object . create ( null ) ; / / Collecting ' keys ' per isolate . <nl> + / / Trace files support two formats . <nl> + / / { traceEvents : [ data ] } <nl> + const kObjectTraceFile = { <nl> + name : ' object ' , <nl> + endToken : ' ] } ' , <nl> + getDataArray : o = > o . traceEvents <nl> + } ; <nl> + / / [ data ] <nl> + const kArrayTraceFile = { <nl> + name : ' array ' , <nl> + endToken : ' ] ' , <nl> + getDataArray : o = > o <nl> + } ; <nl> + const handler = <nl> + ( contents [ 0 ] [ 0 ] = = = ' { ' ) ? kObjectTraceFile : kArrayTraceFile ; <nl> + console . log ( ` Processing log as chrome trace file ( $ { handler . name } ) . ` ) ; <nl> <nl> / / Pop last line in log as it might be broken . <nl> contents . pop ( ) ; <nl> / / Remove trailing comma . <nl> contents [ contents . length - 1 ] = contents [ contents . length - 1 ] . slice ( 0 , - 1 ) ; <nl> / / Terminate JSON . <nl> - const sanitized_contents = [ . . . contents , ' ] } ' ] . join ( ' ' ) ; <nl> + const sanitized_contents = [ . . . contents , handler . endToken ] . join ( ' ' ) ; <nl> + <nl> + const data = Object . create ( null ) ; / / Final data container . <nl> try { <nl> const raw_data = JSON . parse ( sanitized_contents ) ; <nl> - const objects_stats_data = <nl> - raw_data . traceEvents . filter ( e = > e . name = = ' V8 . GC_Objects_Stats ' ) ; <nl> - objects_stats_data . forEach ( trace_data = > { <nl> - const actual_data = trace_data . args ; <nl> - const data_sets = new Set ( Object . keys ( actual_data ) ) ; <nl> - Object . keys ( actual_data ) . forEach ( data_set = > { <nl> - const string_entry = actual_data [ data_set ] ; <nl> - try { <nl> - const entry = JSON . parse ( string_entry ) ; <nl> - this . createOrUpdateEntryIfNeeded ( data , keys , entry ) ; <nl> - this . createDatasetIfNeeded ( data , keys , entry , data_set ) ; <nl> - const isolate = entry . isolate ; <nl> - const time = entry . time ; <nl> - const gc_id = entry . id ; <nl> - data [ isolate ] . gcs [ gc_id ] . time = time ; <nl> - data [ isolate ] . gcs [ gc_id ] [ data_set ] . bucket_sizes = <nl> - entry . bucket_sizes ; <nl> - for ( let [ instance_type , value ] of Object . entries ( <nl> - entry . type_data ) ) { <nl> - / / Trace file format uses markers that do not have actual <nl> - / / properties . <nl> - if ( ! ( ' overall ' in value ) ) continue ; <nl> - this . addInstanceTypeData ( <nl> - data , keys , isolate , gc_id , data_set , instance_type , value ) ; <nl> - } <nl> - } catch ( e ) { <nl> - console . log ( ' Unable to parse data set entry ' , e ) ; <nl> - } <nl> - } ) ; <nl> - } ) ; <nl> + const raw_array_data = handler . getDataArray ( raw_data ) ; <nl> + raw_array_data . filter ( e = > e . name = = = ' V8 . GC_Objects_Stats ' ) <nl> + . forEach ( trace_data = > { <nl> + const actual_data = trace_data . args ; <nl> + const data_sets = new Set ( Object . keys ( actual_data ) ) ; <nl> + Object . keys ( actual_data ) . forEach ( data_set = > { <nl> + const string_entry = actual_data [ data_set ] ; <nl> + try { <nl> + const entry = JSON . parse ( string_entry ) ; <nl> + this . createOrUpdateEntryIfNeeded ( data , entry ) ; <nl> + this . createDatasetIfNeeded ( data , entry , data_set ) ; <nl> + const isolate = entry . isolate ; <nl> + const time = entry . time ; <nl> + const gc_id = entry . id ; <nl> + data [ isolate ] . gcs [ gc_id ] . time = time ; <nl> + data [ isolate ] . gcs [ gc_id ] [ data_set ] . bucket_sizes = <nl> + entry . bucket_sizes ; <nl> + for ( let [ instance_type , value ] of Object . entries ( <nl> + entry . type_data ) ) { <nl> + / / Trace file format uses markers that do not have actual <nl> + / / properties . <nl> + if ( ! ( ' overall ' in value ) ) continue ; <nl> + this . addInstanceTypeData ( <nl> + data , isolate , gc_id , data_set , instance_type , value ) ; <nl> + } <nl> + } catch ( e ) { <nl> + console . log ( ' Unable to parse data set entry ' , e ) ; <nl> + } <nl> + } ) ; <nl> + } ) ; <nl> } catch ( e ) { <nl> console . error ( ' Unable to parse chrome trace file . ' , e ) ; <nl> } <nl> - this . extendAndSanitizeModel ( data , keys ) ; <nl> return data ; <nl> } <nl> <nl> class TraceFileReader extends HTMLElement { <nl> } ) ; <nl> <nl> const data = Object . create ( null ) ; / / Final data container . <nl> - const keys = Object . create ( null ) ; / / Collecting ' keys ' per isolate . <nl> - <nl> for ( var entry of contents ) { <nl> if ( entry = = = null | | entry . type = = = undefined ) { <nl> continue ; <nl> } <nl> if ( entry . type = = = ' zone ' ) { <nl> - this . createOrUpdateEntryIfNeeded ( data , keys , entry ) ; <nl> + this . createOrUpdateEntryIfNeeded ( data , entry ) ; <nl> const stacktrace = ( ' stacktrace ' in entry ) ? entry . stacktrace : [ ] ; <nl> data [ entry . isolate ] . samples . zone [ entry . time ] = { <nl> allocated : entry . allocated , <nl> class TraceFileReader extends HTMLElement { <nl> } ; <nl> } else if ( <nl> entry . type = = = ' zonecreation ' | | entry . type = = = ' zonedestruction ' ) { <nl> - this . createOrUpdateEntryIfNeeded ( data , keys , entry ) ; <nl> + this . createOrUpdateEntryIfNeeded ( data , entry ) ; <nl> data [ entry . isolate ] . zonetags . push ( <nl> Object . assign ( { opening : entry . type = = = ' zonecreation ' } , entry ) ) ; <nl> } else if ( entry . type = = = ' gc_descriptor ' ) { <nl> - this . createOrUpdateEntryIfNeeded ( data , keys , entry ) ; <nl> + this . createOrUpdateEntryIfNeeded ( data , entry ) ; <nl> data [ entry . isolate ] . gcs [ entry . id ] . time = entry . time ; <nl> if ( ' zone ' in entry ) <nl> data [ entry . isolate ] . gcs [ entry . id ] . malloced = entry . zone ; <nl> } else if ( entry . type = = = ' instance_type_data ' ) { <nl> if ( entry . id in data [ entry . isolate ] . gcs ) { <nl> - this . createOrUpdateEntryIfNeeded ( data , keys , entry ) ; <nl> - this . createDatasetIfNeeded ( data , keys , entry , entry . key ) ; <nl> + this . createOrUpdateEntryIfNeeded ( data , entry ) ; <nl> + this . createDatasetIfNeeded ( data , entry , entry . key ) ; <nl> this . addInstanceTypeData ( <nl> - data , keys , entry . isolate , entry . id , entry . key , <nl> + data , entry . isolate , entry . id , entry . key , <nl> entry . instance_type_name , entry ) ; <nl> } <nl> } else if ( entry . type = = = ' bucket_sizes ' ) { <nl> if ( entry . id in data [ entry . isolate ] . gcs ) { <nl> - this . createOrUpdateEntryIfNeeded ( data , keys , entry ) ; <nl> - this . createDatasetIfNeeded ( data , keys , entry , entry . key ) ; <nl> + this . createOrUpdateEntryIfNeeded ( data , entry ) ; <nl> + this . createDatasetIfNeeded ( data , entry , entry . key ) ; <nl> data [ entry . isolate ] . gcs [ entry . id ] [ entry . key ] . bucket_sizes = <nl> entry . sizes ; <nl> } <nl> class TraceFileReader extends HTMLElement { <nl> console . log ( ' Unknown entry type : ' + entry . type ) ; <nl> } <nl> } <nl> - this . extendAndSanitizeModel ( data , keys ) ; <nl> return data ; <nl> } <nl> } <nl>
[ object - stats ] Visualizer : Allow loading trace file format using array
v8/v8
093cfad6293869598cc6829d2f81c6870f7c37a6
2018-02-01T15:22:29Z
mmm a / tensorflow / contrib / lite / experimental / c / c_api . cc <nl> ppp b / tensorflow / contrib / lite / experimental / c / c_api . cc <nl> void * TFL_TensorData ( const TFL_Tensor * tensor ) { <nl> return static_cast < void * > ( tensor - > data . raw ) ; <nl> } <nl> <nl> + const char * TFL_TensorName ( const TFL_Tensor * tensor ) { return tensor - > name ; } <nl> + <nl> TFL_Status TFL_TensorCopyFromBuffer ( TFL_Tensor * tensor , const void * input_data , <nl> size_t input_data_size ) { <nl> if ( tensor - > bytes ! = input_data_size ) { <nl> mmm a / tensorflow / contrib / lite / experimental / c / c_api . h <nl> ppp b / tensorflow / contrib / lite / experimental / c / c_api . h <nl> TFL_CAPI_EXPORT extern int32_t TFL_InterpreterGetOutputTensorCount ( <nl> <nl> / / Returns the tensor associated with the output index . <nl> / / REQUIRES : 0 < = input_index < TFL_InterpreterGetOutputTensorCount ( tensor ) <nl> + / / <nl> + / / NOTE : The shape and underlying data buffer for output tensors may be not <nl> + / / be available until after the output tensor has been both sized and allocated . <nl> + / / In general , best practice is to interact with the output tensor * after * <nl> + / / calling TFL_InterpreterInvoke ( ) . <nl> TFL_CAPI_EXPORT extern const TFL_Tensor * TFL_InterpreterGetOutputTensor ( <nl> const TFL_Interpreter * interpreter , int32_t output_index ) ; <nl> <nl> TFL_CAPI_EXPORT extern size_t TFL_TensorByteSize ( const TFL_Tensor * tensor ) ; <nl> <nl> / / Returns a pointer to the underlying data buffer . <nl> / / <nl> - / / Note : The result may be null if tensors have not yet been allocated , e . g . , <nl> + / / NOTE : The result may be null if tensors have not yet been allocated , e . g . , <nl> / / if the Tensor has just been created or resized and ` TFL_AllocateTensors ( ) ` <nl> / / has yet to be called , or if the output tensor is dynamically sized and the <nl> / / interpreter hasn ' t been invoked . <nl> TFL_CAPI_EXPORT extern void * TFL_TensorData ( const TFL_Tensor * tensor ) ; <nl> <nl> + / / Returns the ( null - terminated ) name of the tensor . <nl> + TFL_CAPI_EXPORT extern const char * TFL_TensorName ( const TFL_Tensor * tensor ) ; <nl> + <nl> / / Copies from the provided input buffer into the tensor ' s buffer . <nl> / / REQUIRES : input_data_size = = TFL_TensorByteSize ( tensor ) <nl> TFL_CAPI_EXPORT extern TFL_Status TFL_TensorCopyFromBuffer ( <nl> mmm a / tensorflow / contrib / lite / experimental / c / c_api_test . cc <nl> ppp b / tensorflow / contrib / lite / experimental / c / c_api_test . cc <nl> TEST ( CApiSimple , Smoke ) { <nl> EXPECT_EQ ( TFL_TensorNumDims ( input_tensor ) , 1 ) ; <nl> EXPECT_EQ ( TFL_TensorDim ( input_tensor , 0 ) , 2 ) ; <nl> EXPECT_EQ ( TFL_TensorByteSize ( input_tensor ) , sizeof ( float ) * 2 ) ; <nl> + EXPECT_NE ( TFL_TensorData ( input_tensor ) , nullptr ) ; <nl> + EXPECT_STREQ ( TFL_TensorName ( input_tensor ) , " input " ) ; <nl> <nl> std : : array < float , 2 > input = { 1 . f , 3 . f } ; <nl> ASSERT_EQ ( TFL_TensorCopyFromBuffer ( input_tensor , input . data ( ) , <nl> TEST ( CApiSimple , Smoke ) { <nl> EXPECT_EQ ( TFL_TensorNumDims ( output_tensor ) , 1 ) ; <nl> EXPECT_EQ ( TFL_TensorDim ( output_tensor , 0 ) , 2 ) ; <nl> EXPECT_EQ ( TFL_TensorByteSize ( output_tensor ) , sizeof ( float ) * 2 ) ; <nl> + EXPECT_NE ( TFL_TensorData ( output_tensor ) , nullptr ) ; <nl> + EXPECT_STREQ ( TFL_TensorName ( output_tensor ) , " output " ) ; <nl> <nl> std : : array < float , 2 > output ; <nl> ASSERT_EQ ( TFL_TensorCopyToBuffer ( output_tensor , output . data ( ) , <nl>
Add TFL_TensorName ( ) to C API
tensorflow/tensorflow
dfd6aba381a7867906ba2d173c1720bb38dec6a8
2018-09-07T18:07:26Z
mmm a / src / inspector / js_protocol . json <nl> ppp b / src / inspector / js_protocol . json <nl> <nl> } <nl> ] <nl> } , <nl> + { <nl> + " name " : " setMaxCallStackSizeToCapture " , <nl> + " experimental " : true , <nl> + " parameters " : [ <nl> + { <nl> + " name " : " size " , <nl> + " type " : " integer " <nl> + } <nl> + ] <nl> + } , <nl> { <nl> " name " : " terminateExecution " , <nl> " description " : " Terminate current or next JavaScript execution . \ nWill cancel the termination when the outer - most script execution ends . " , <nl> mmm a / src / inspector / js_protocol . pdl <nl> ppp b / src / inspector / js_protocol . pdl <nl> domain Runtime <nl> parameters <nl> boolean enabled <nl> <nl> + experimental command setMaxCallStackSizeToCapture <nl> + parameters <nl> + integer size <nl> + <nl> # Terminate current or next JavaScript execution . <nl> # Will cancel the termination when the outer - most script execution ends . <nl> experimental command terminateExecution <nl> mmm a / src / inspector / v8 - runtime - agent - impl . cc <nl> ppp b / src / inspector / v8 - runtime - agent - impl . cc <nl> Response V8RuntimeAgentImpl : : setCustomObjectFormatterEnabled ( bool enabled ) { <nl> return Response : : OK ( ) ; <nl> } <nl> <nl> + Response V8RuntimeAgentImpl : : setMaxCallStackSizeToCapture ( int size ) { <nl> + if ( size < 0 ) { <nl> + return Response : : Error ( " maxCallStackSizeToCapture should be non - negative " ) ; <nl> + } <nl> + V8StackTraceImpl : : maxCallStackSizeToCapture = size ; <nl> + return Response : : OK ( ) ; <nl> + } <nl> + <nl> Response V8RuntimeAgentImpl : : discardConsoleEntries ( ) { <nl> V8ConsoleMessageStorage * storage = <nl> m_inspector - > ensureConsoleMessageStorage ( m_session - > contextGroupId ( ) ) ; <nl> mmm a / src / inspector / v8 - runtime - agent - impl . h <nl> ppp b / src / inspector / v8 - runtime - agent - impl . h <nl> class V8RuntimeAgentImpl : public protocol : : Runtime : : Backend { <nl> Response releaseObjectGroup ( const String16 & objectGroup ) override ; <nl> Response runIfWaitingForDebugger ( ) override ; <nl> Response setCustomObjectFormatterEnabled ( bool ) override ; <nl> + Response setMaxCallStackSizeToCapture ( int ) override ; <nl> Response discardConsoleEntries ( ) override ; <nl> Response compileScript ( const String16 & expression , const String16 & sourceURL , <nl> bool persistScript , Maybe < int > executionContextId , <nl> mmm a / src / inspector / v8 - stack - trace - impl . cc <nl> ppp b / src / inspector / v8 - stack - trace - impl . cc <nl> <nl> <nl> namespace v8_inspector { <nl> <nl> + int V8StackTraceImpl : : maxCallStackSizeToCapture = 200 ; <nl> + <nl> namespace { <nl> <nl> static const v8 : : StackTrace : : StackTraceOptions stackTraceOptions = <nl> std : : unique_ptr < V8StackTrace > V8StackTraceImpl : : clone ( ) { <nl> } <nl> <nl> StringView V8StackTraceImpl : : firstNonEmptySourceURL ( ) const { <nl> - for ( size_t i = 0 ; i < m_frames . size ( ) ; + + i ) { <nl> - if ( m_frames [ i ] - > sourceURL ( ) . length ( ) ) { <nl> - return toStringView ( m_frames [ i ] - > sourceURL ( ) ) ; <nl> + StackFrameIterator current ( this ) ; <nl> + while ( ! current . done ( ) ) { <nl> + if ( current . frame ( ) - > sourceURL ( ) . length ( ) ) { <nl> + return toStringView ( current . frame ( ) - > sourceURL ( ) ) ; <nl> } <nl> + current . next ( ) ; <nl> } <nl> return StringView ( ) ; <nl> } <nl> mmm a / src / inspector / v8 - stack - trace - impl . h <nl> ppp b / src / inspector / v8 - stack - trace - impl . h <nl> class V8StackTraceImpl : public V8StackTrace { <nl> public : <nl> static void setCaptureStackTraceForUncaughtExceptions ( v8 : : Isolate * , <nl> bool capture ) ; <nl> - static const int maxCallStackSizeToCapture = 200 ; <nl> + static int maxCallStackSizeToCapture ; <nl> static std : : unique_ptr < V8StackTraceImpl > create ( V8Debugger * , <nl> int contextGroupId , <nl> v8 : : Local < v8 : : StackTrace > , <nl> new file mode 100644 <nl> index 00000000000 . . f628d80e1fe <nl> mmm / dev / null <nl> ppp b / test / inspector / runtime / set - max - call - stack - size - expected . txt <nl> <nl> + Checks Runtime . setMaxCallStackSizeToCapture . <nl> + Test with max size 0 . <nl> + { <nl> + args : [ <nl> + [ 0 ] : { <nl> + type : string <nl> + value : Nested call . <nl> + } <nl> + ] <nl> + executionContextId : < executionContextId > <nl> + timestamp : < timestamp > <nl> + type : log <nl> + } <nl> + Test with max size 1 . <nl> + { <nl> + args : [ <nl> + [ 0 ] : { <nl> + type : string <nl> + value : Nested call . <nl> + } <nl> + ] <nl> + executionContextId : < executionContextId > <nl> + stackTrace : { <nl> + callFrames : [ <nl> + [ 0 ] : { <nl> + columnNumber : 10 <nl> + functionName : bar <nl> + lineNumber : 2 <nl> + scriptId : < scriptId > <nl> + url : test . js <nl> + } <nl> + ] <nl> + parent : { <nl> + callFrames : [ <nl> + [ 0 ] : { <nl> + columnNumber : 2 <nl> + functionName : test <nl> + lineNumber : 10 <nl> + scriptId : < scriptId > <nl> + url : test . js <nl> + } <nl> + ] <nl> + description : setTimeout <nl> + } <nl> + } <nl> + timestamp : < timestamp > <nl> + type : log <nl> + } <nl> + Test with max size 2 . <nl> + { <nl> + args : [ <nl> + [ 0 ] : { <nl> + type : string <nl> + value : Nested call . <nl> + } <nl> + ] <nl> + executionContextId : < executionContextId > <nl> + stackTrace : { <nl> + callFrames : [ <nl> + [ 0 ] : { <nl> + columnNumber : 10 <nl> + functionName : bar <nl> + lineNumber : 2 <nl> + scriptId : < scriptId > <nl> + url : test . js <nl> + } <nl> + [ 1 ] : { <nl> + columnNumber : 2 <nl> + functionName : foo <nl> + lineNumber : 6 <nl> + scriptId : < scriptId > <nl> + url : test . js <nl> + } <nl> + ] <nl> + parent : { <nl> + callFrames : [ <nl> + [ 0 ] : { <nl> + columnNumber : 2 <nl> + functionName : test <nl> + lineNumber : 10 <nl> + scriptId : < scriptId > <nl> + url : test . js <nl> + } <nl> + [ 1 ] : { <nl> + columnNumber : 0 <nl> + functionName : <nl> + lineNumber : 0 <nl> + scriptId : < scriptId > <nl> + url : expr . js <nl> + } <nl> + ] <nl> + description : setTimeout <nl> + } <nl> + } <nl> + timestamp : < timestamp > <nl> + type : log <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . cb76ab69870 <nl> mmm / dev / null <nl> ppp b / test / inspector / runtime / set - max - call - stack - size . js <nl> <nl> + / / Copyright 2018 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + let { session , contextGroup , Protocol } = InspectorTest . start ( ' Checks Runtime . setMaxCallStackSizeToCapture . ' ) ; <nl> + <nl> + Protocol . Runtime . enable ( ) ; <nl> + Protocol . Runtime . onConsoleAPICalled ( <nl> + message = > InspectorTest . logMessage ( message . params ) ) ; <nl> + <nl> + contextGroup . addScript ( ` <nl> + function bar ( ) { <nl> + console . log ( " Nested call . " ) ; <nl> + } <nl> + <nl> + function foo ( ) { <nl> + bar ( ) ; <nl> + } <nl> + <nl> + async function test ( ) { <nl> + setTimeout ( foo , 0 ) ; <nl> + } <nl> + / / # sourceURL = test . js ` ) ; <nl> + <nl> + Protocol . Runtime . setAsyncCallStackDepth ( { maxDepth : 10 } ) ; <nl> + ( async function test ( ) { <nl> + await Protocol . Runtime . setMaxCallStackSizeToCapture ( { size : 0 } ) ; <nl> + InspectorTest . log ( ' Test with max size 0 . ' ) ; <nl> + await Protocol . Runtime . evaluate ( { expression : ' test ( ) / / # sourceURL = expr . js ' } ) ; <nl> + await Protocol . Runtime . setMaxCallStackSizeToCapture ( { size : 1 } ) ; <nl> + InspectorTest . log ( ' Test with max size 1 . ' ) ; <nl> + await Protocol . Runtime . evaluate ( { expression : ' test ( ) / / # sourceURL = expr . js ' } ) ; <nl> + await Protocol . Runtime . setMaxCallStackSizeToCapture ( { size : 2 } ) ; <nl> + InspectorTest . log ( ' Test with max size 2 . ' ) ; <nl> + await Protocol . Runtime . evaluate ( { expression : ' test ( ) / / # sourceURL = expr . js ' } ) ; <nl> + InspectorTest . completeTest ( ) ; <nl> + } ) ( ) ; <nl>
Add more support for flexible stack trace capturing .
v8/v8
9758552aa83c7d60c269268056994332645a125b
2018-06-04T22:59:12Z
mmm a / tensorflow / python / keras / layers / preprocessing / categorical . py <nl> ppp b / tensorflow / python / keras / layers / preprocessing / categorical . py <nl> <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import sparse_tensor <nl> + from tensorflow . python . framework import tensor_shape <nl> from tensorflow . python . framework import tensor_spec <nl> from tensorflow . python . keras . engine . base_layer import Layer <nl> from tensorflow . python . ops import lookup_ops <nl> + from tensorflow . python . ops import sparse_ops <nl> <nl> <nl> class CategoryLookup ( Layer ) : <nl> def compute_output_signature ( self , input_spec ) : <nl> shape = output_shape , dtype = output_dtype ) <nl> else : <nl> return tensor_spec . TensorSpec ( shape = output_shape , dtype = output_dtype ) <nl> + <nl> + <nl> + class CategoryCrossing ( Layer ) : <nl> + " " " Category crossing layer . <nl> + <nl> + This layer transforms multiple categorical inputs to categorical outputs <nl> + by Cartesian product , and hash the output if necessary . Without hashing <nl> + ( ` num_bins = None ` ) the output dtype is string , with hashing the output dtype <nl> + is int64 . <nl> + <nl> + Arguments : <nl> + depth : depth of input crossing . By default None , all inputs are crossed into <nl> + one output . It can also be an int or tuple / list of ints . Passing an <nl> + integer will create combinations of crossed outputs with depth up to that <nl> + integer , i . e . , [ 1 , 2 , . . . , ` depth ` ) , and passing a tuple of integers will <nl> + create crossed outputs with depth for the specified values in the tuple , <nl> + i . e . , ` depth ` = ( N1 , N2 ) will create all possible crossed outputs with depth <nl> + equal to N1 or N2 . Passing ` None ` means a single crossed output with all <nl> + inputs . For example , with inputs ` a ` , ` b ` and ` c ` , ` depth = 2 ` means the <nl> + output will be [ a ; b ; c ; cross ( a , b ) ; cross ( bc ) ; cross ( ca ) ] . <nl> + num_bins : Number of hash bins . By default None , no hashing is performed . <nl> + name : Name to give to the layer . <nl> + * * kwargs : Keyword arguments to construct a layer . <nl> + <nl> + Input shape : a list of string or int tensors or sparse tensors of shape <nl> + ` [ batch_size , d1 , . . . , dm ] ` <nl> + <nl> + Output shape : a single string or int tensor or sparse tensor of shape <nl> + ` [ batch_size , d1 , . . . , dm ] ` <nl> + <nl> + Example : ( ` depth ` = None ) <nl> + If the layer receives three inputs : <nl> + ` a = [ [ 1 ] , [ 4 ] ] ` , ` b = [ [ 2 ] , [ 5 ] ] ` , ` c = [ [ 3 ] , [ 6 ] ] ` <nl> + the output will be a string tensor if not hashed : <nl> + ` [ [ b ' 1_X_2_X_3 ' ] , [ b ' 4_X_5_X_6 ' ] ] ` <nl> + the output will be an int64 tensor if hashed : <nl> + ` [ [ hash ( b ' 1_X_2_X_3 ' ) ] , [ hash ( b ' 4_X_5_X_6 ' ) ] ] ` <nl> + <nl> + Example : ( ` depth ` is an integer ) <nl> + With the same input above , and if ` depth ` = 2 , <nl> + the output will be a list of 6 string tensors if not hashed : <nl> + ` [ [ b ' 1 ' ] , [ b ' 4 ' ] ] ` <nl> + ` [ [ b ' 2 ' ] , [ b ' 5 ' ] ] ` <nl> + ` [ [ b ' 3 ' ] , [ b ' 6 ' ] ] ` <nl> + ` [ [ b ' 1_X_2 ' ] , [ b ' 4_X_5 ' ] ] ` , <nl> + ` [ [ b ' 2_X_3 ' ] , [ b ' 5_X_6 ' ] ] ` , <nl> + ` [ [ b ' 3_X_1 ' ] , [ b ' 6_X_4 ' ] ] ` <nl> + the output will be a list of 6 int64 tensors if hashed : <nl> + ` [ [ hash ( b ' 1 ' ) ] , [ hash ( b ' 4 ' ) ] ] ` <nl> + ` [ [ hash ( b ' 2 ' ) ] , [ hash ( b ' 5 ' ) ] ] ` <nl> + ` [ [ hash ( b ' 3 ' ) ] , [ hash ( b ' 6 ' ) ] ] ` <nl> + ` [ [ hash ( b ' 1_X_2 ' ) ] , [ hash ( b ' 4_X_5 ' ) ] ] ` , <nl> + ` [ [ hash ( b ' 2_X_3 ' ) ] , [ hash ( b ' 5_X_6 ' ) ] ] ` , <nl> + ` [ [ hash ( b ' 3_X_1 ' ) ] , [ hash ( b ' 6_X_4 ' ) ] ] ` <nl> + <nl> + Example : ( ` depth ` is a tuple / list of integers ) <nl> + With the same input above , and if ` depth ` = ( 2 , 3 ) <nl> + the output will be a list of 4 string tensors if not hashed : <nl> + ` [ [ b ' 1_X_2 ' ] , [ b ' 4_X_5 ' ] ] ` , <nl> + ` [ [ b ' 2_X_3 ' ] , [ b ' 5_X_6 ' ] ] ` , <nl> + ` [ [ b ' 3_X_1 ' ] , [ b ' 6_X_4 ' ] ] ` , <nl> + ` [ [ b ' 1_X_2_X_3 ' ] , [ b ' 4_X_5_X_6 ' ] ] ` <nl> + the output will be a list of 4 int64 tensors if hashed : <nl> + ` [ [ hash ( b ' 1_X_2 ' ) ] , [ hash ( b ' 4_X_5 ' ) ] ] ` , <nl> + ` [ [ hash ( b ' 2_X_3 ' ) ] , [ hash ( b ' 5_X_6 ' ) ] ] ` , <nl> + ` [ [ hash ( b ' 3_X_1 ' ) ] , [ hash ( b ' 6_X_4 ' ) ] ] ` , <nl> + ` [ [ hash ( b ' 1_X_2_X_3 ' ) ] , [ hash ( b ' 4_X_5_X_6 ' ) ] ] ` <nl> + " " " <nl> + <nl> + def __init__ ( self , depth = None , num_bins = None , name = None , * * kwargs ) : <nl> + # TODO ( tanzheny ) : Add support for depth . <nl> + # TODO ( tanzheny ) : Consider making seperator configurable . <nl> + if depth is not None : <nl> + raise NotImplementedError ( ' ` depth ` is not supported yet . ' ) <nl> + self . num_bins = num_bins <nl> + self . depth = depth <nl> + super ( CategoryCrossing , self ) . __init__ ( name = name , * * kwargs ) <nl> + <nl> + def call ( self , inputs ) : <nl> + sparse_output = False <nl> + if any ( [ isinstance ( inp , sparse_tensor . SparseTensor ) for inp in inputs ] ) : <nl> + sparse_output = True <nl> + if self . num_bins is not None : <nl> + output = sparse_ops . sparse_cross_hashed ( <nl> + inputs , num_buckets = self . num_bins ) <nl> + else : <nl> + output = sparse_ops . sparse_cross ( inputs ) <nl> + if not sparse_output : <nl> + output = sparse_ops . sparse_tensor_to_dense ( output ) <nl> + return output <nl> + <nl> + def compute_output_shape ( self , input_shape ) : <nl> + if not isinstance ( input_shape , ( tuple , list ) ) : <nl> + raise ValueError ( ' A ` CategoryCrossing ` layer should be called ' <nl> + ' on a list of inputs . ' ) <nl> + input_shapes = input_shape <nl> + batch_size = None <nl> + for inp_shape in input_shapes : <nl> + inp_tensor_shape = tensor_shape . TensorShape ( inp_shape ) . as_list ( ) <nl> + if len ( inp_tensor_shape ) ! = 2 : <nl> + raise ValueError ( ' Inputs must be rank 2 , get { } ' . format ( input_shapes ) ) <nl> + if batch_size is None : <nl> + batch_size = inp_tensor_shape [ 0 ] <nl> + # The second dimension is dynamic based on inputs . <nl> + output_shape = [ batch_size , None ] <nl> + return tensor_shape . TensorShape ( output_shape ) <nl> + <nl> + def compute_output_signature ( self , input_spec ) : <nl> + input_shapes = [ x . shape for x in input_spec ] <nl> + output_shape = self . compute_output_shape ( input_shapes ) <nl> + output_dtype = dtypes . int64 if self . num_bins else dtypes . string <nl> + return sparse_tensor . SparseTensorSpec ( <nl> + shape = output_shape , dtype = output_dtype ) <nl> + <nl> + def get_config ( self ) : <nl> + config = { ' depth ' : self . depth , ' num_bins ' : self . num_bins } <nl> + base_config = super ( CategoryCrossing , self ) . get_config ( ) <nl> + return dict ( list ( base_config . items ( ) ) + list ( config . items ( ) ) ) <nl> mmm a / tensorflow / python / keras / layers / preprocessing / categorical_test . py <nl> ppp b / tensorflow / python / keras / layers / preprocessing / categorical_test . py <nl> <nl> from tensorflow . python . framework import sparse_tensor <nl> from tensorflow . python . framework import tensor_shape <nl> from tensorflow . python . framework import tensor_spec <nl> + from tensorflow . python . framework import test_util as tf_test_util <nl> from tensorflow . python . keras import keras_parameterized <nl> from tensorflow . python . keras . layers . preprocessing import categorical <nl> from tensorflow . python . ops import array_ops <nl> def test_vocab_list_sparse_input ( self ) : <nl> self . assertAllClose ( np . asarray ( [ 0 , 1 , 2 , 2 , 0 ] ) , output . values ) <nl> <nl> <nl> + @ keras_parameterized . run_all_keras_modes ( always_skip_v1 = True ) <nl> + class CategoryCrossingTest ( keras_parameterized . TestCase ) : <nl> + <nl> + def test_crossing_basic ( self ) : <nl> + layer = categorical . CategoryCrossing ( ) <nl> + inputs_0 = sparse_tensor . SparseTensor ( <nl> + indices = [ [ 0 , 0 ] , [ 1 , 0 ] , [ 1 , 1 ] ] , <nl> + values = [ ' a ' , ' b ' , ' c ' ] , <nl> + dense_shape = [ 2 , 2 ] ) <nl> + inputs_1 = sparse_tensor . SparseTensor ( <nl> + indices = [ [ 0 , 1 ] , [ 1 , 2 ] ] , values = [ ' d ' , ' e ' ] , dense_shape = [ 2 , 3 ] ) <nl> + output = layer ( [ inputs_0 , inputs_1 ] ) <nl> + self . assertAllClose ( np . asarray ( [ [ 0 , 0 ] , [ 1 , 0 ] , [ 1 , 1 ] ] ) , output . indices ) <nl> + self . assertAllEqual ( [ b ' a_X_d ' , b ' b_X_e ' , b ' c_X_e ' ] , output . values ) <nl> + <nl> + def test_crossing_hashed_basic ( self ) : <nl> + layer = categorical . CategoryCrossing ( num_bins = 1 ) <nl> + inputs_0 = sparse_tensor . SparseTensor ( <nl> + indices = [ [ 0 , 0 ] , [ 1 , 0 ] , [ 1 , 1 ] ] , <nl> + values = [ ' a ' , ' b ' , ' c ' ] , <nl> + dense_shape = [ 2 , 2 ] ) <nl> + inputs_1 = sparse_tensor . SparseTensor ( <nl> + indices = [ [ 0 , 1 ] , [ 1 , 2 ] ] , values = [ ' d ' , ' e ' ] , dense_shape = [ 2 , 3 ] ) <nl> + output = layer ( [ inputs_0 , inputs_1 ] ) <nl> + self . assertAllClose ( np . asarray ( [ [ 0 , 0 ] , [ 1 , 0 ] , [ 1 , 1 ] ] ) , output . indices ) <nl> + self . assertAllClose ( [ 0 , 0 , 0 ] , output . values ) <nl> + <nl> + def test_crossing_hashed_two_bins ( self ) : <nl> + layer = categorical . CategoryCrossing ( num_bins = 2 ) <nl> + inputs_0 = sparse_tensor . SparseTensor ( <nl> + indices = [ [ 0 , 0 ] , [ 1 , 0 ] , [ 1 , 1 ] ] , <nl> + values = [ ' a ' , ' b ' , ' c ' ] , <nl> + dense_shape = [ 2 , 2 ] ) <nl> + inputs_1 = sparse_tensor . SparseTensor ( <nl> + indices = [ [ 0 , 1 ] , [ 1 , 2 ] ] , values = [ ' d ' , ' e ' ] , dense_shape = [ 2 , 3 ] ) <nl> + output = layer ( [ inputs_0 , inputs_1 ] ) <nl> + self . assertAllClose ( np . asarray ( [ [ 0 , 0 ] , [ 1 , 0 ] , [ 1 , 1 ] ] ) , output . indices ) <nl> + self . assertEqual ( output . values . numpy ( ) . max ( ) , 1 ) <nl> + self . assertEqual ( output . values . numpy ( ) . min ( ) , 0 ) <nl> + <nl> + def test_crossing_with_dense_inputs ( self ) : <nl> + layer = categorical . CategoryCrossing ( ) <nl> + inputs_0 = np . asarray ( [ [ 1 , 2 ] ] ) <nl> + inputs_1 = np . asarray ( [ [ 1 , 3 ] ] ) <nl> + output = layer ( [ inputs_0 , inputs_1 ] ) <nl> + self . assertAllEqual ( [ [ b ' 1_X_1 ' , b ' 1_X_3 ' , b ' 2_X_1 ' , b ' 2_X_3 ' ] ] , output ) <nl> + <nl> + def test_crossing_hashed_with_dense_inputs ( self ) : <nl> + layer = categorical . CategoryCrossing ( num_bins = 2 ) <nl> + inputs_0 = np . asarray ( [ [ 1 , 2 ] ] ) <nl> + inputs_1 = np . asarray ( [ [ 1 , 3 ] ] ) <nl> + output = layer ( [ inputs_0 , inputs_1 ] ) <nl> + self . assertAllClose ( [ [ 1 , 1 , 0 , 0 ] ] , output ) <nl> + <nl> + def test_crossing_compute_output_signature ( self ) : <nl> + input_shapes = [ <nl> + tensor_shape . TensorShape ( [ 2 , 2 ] ) , <nl> + tensor_shape . TensorShape ( [ 2 , 3 ] ) <nl> + ] <nl> + input_specs = [ <nl> + tensor_spec . TensorSpec ( input_shape , dtypes . string ) <nl> + for input_shape in input_shapes <nl> + ] <nl> + layer = categorical . CategoryCrossing ( ) <nl> + output_spec = layer . compute_output_signature ( input_specs ) <nl> + self . assertEqual ( output_spec . shape . dims [ 0 ] , input_shapes [ 0 ] . dims [ 0 ] ) <nl> + self . assertEqual ( output_spec . dtype , dtypes . string ) <nl> + <nl> + layer = categorical . CategoryCrossing ( num_bins = 2 ) <nl> + output_spec = layer . compute_output_signature ( input_specs ) <nl> + self . assertEqual ( output_spec . shape . dims [ 0 ] , input_shapes [ 0 ] . dims [ 0 ] ) <nl> + self . assertEqual ( output_spec . dtype , dtypes . int64 ) <nl> + <nl> + @ tf_test_util . run_v2_only <nl> + def test_config_with_custom_name ( self ) : <nl> + layer = categorical . CategoryCrossing ( num_bins = 2 , name = ' hashing ' ) <nl> + config = layer . get_config ( ) <nl> + layer_1 = categorical . CategoryCrossing . from_config ( config ) <nl> + self . assertEqual ( layer_1 . name , layer . name ) <nl> + <nl> + layer = categorical . CategoryCrossing ( name = ' hashing ' ) <nl> + config = layer . get_config ( ) <nl> + layer_1 = categorical . CategoryCrossing . from_config ( config ) <nl> + self . assertEqual ( layer_1 . name , layer . name ) <nl> + <nl> + def test_incorrect_depth ( self ) : <nl> + with self . assertRaises ( NotImplementedError ) : <nl> + categorical . CategoryCrossing ( depth = 1 ) <nl> + <nl> + <nl> if __name__ = = ' __main__ ' : <nl> test . main ( ) <nl>
Create CategoryCrossing Layer .
tensorflow/tensorflow
e7dca8b51c6a8618f791b55d38521f83ce066dc2
2020-02-04T23:48:36Z
mmm a / lib / IDE / CodeCompletion . cpp <nl> ppp b / lib / IDE / CodeCompletion . cpp <nl> class CompletionLookup final : public swift : : VisibleDeclConsumer { <nl> bool HaveLParen = false ; <nl> bool HaveRParen = false ; <nl> bool IsSuperRefExpr = false ; <nl> + bool IsSelfRefExpr = false ; <nl> bool IsKeyPathExpr = false ; <nl> bool IsDynamicLookup = false ; <nl> bool PreferFunctionReferencesToCalls = false ; <nl> class CompletionLookup final : public swift : : VisibleDeclConsumer { <nl> IsSuperRefExpr = true ; <nl> } <nl> <nl> + void setIsSelfRefExpr ( bool value ) { IsSelfRefExpr = value ; } <nl> + <nl> void setIsKeyPathExpr ( ) { <nl> IsKeyPathExpr = true ; <nl> } <nl> class CompletionLookup final : public swift : : VisibleDeclConsumer { <nl> } <nl> <nl> void addConstructorCall ( const ConstructorDecl * CD , DeclVisibilityKind Reason , <nl> - Optional < Type > Result , <nl> + Optional < Type > Result , bool IsOnMetatype = true , <nl> Identifier addName = Identifier ( ) ) { <nl> foundFunction ( CD ) ; <nl> Type MemberType = getTypeOfMember ( CD ) ; <nl> class CompletionLookup final : public swift : : VisibleDeclConsumer { <nl> - > castTo < AnyFunctionType > ( ) ; <nl> <nl> bool needInit = false ; <nl> - if ( IsSuperRefExpr ) { <nl> + if ( ! IsOnMetatype ) { <nl> assert ( addName . empty ( ) ) ; <nl> assert ( isa < ConstructorDecl > ( CurrDeclContext ) & & <nl> " can call super . init only inside a constructor " ) ; <nl> class CompletionLookup final : public swift : : VisibleDeclConsumer { <nl> for ( auto * init : initializers ) { <nl> if ( shouldHideDeclFromCompletionResults ( init ) ) <nl> continue ; <nl> - addConstructorCall ( cast < ConstructorDecl > ( init ) , Reason , None , name ) ; <nl> + addConstructorCall ( cast < ConstructorDecl > ( init ) , Reason , None , <nl> + / * IsOnMetatype = * / true , name ) ; <nl> } <nl> } <nl> } <nl> class CompletionLookup final : public swift : : VisibleDeclConsumer { <nl> } <nl> addConstructorCall ( CD , Reason , Result ) ; <nl> } <nl> - if ( IsSuperRefExpr ) { <nl> + if ( IsSuperRefExpr | | IsSelfRefExpr ) { <nl> if ( ! isa < ConstructorDecl > ( CurrDeclContext ) ) <nl> return ; <nl> - addConstructorCall ( CD , Reason , None ) ; <nl> + addConstructorCall ( CD , Reason , None , / * IsOnMetatype = * / false ) ; <nl> } <nl> return ; <nl> } <nl> void CodeCompletionCallbacksImpl : : doneParsing ( ) { <nl> if ( ExprType ) { <nl> Lookup . setIsStaticMetatype ( ParsedExpr - > isStaticallyDerivedMetatype ( ) ) ; <nl> } <nl> + if ( auto * DRE = dyn_cast_or_null < DeclRefExpr > ( ParsedExpr ) ) { <nl> + Lookup . setIsSelfRefExpr ( DRE - > getDecl ( ) - > getName ( ) = = Context . Id_self ) ; <nl> + } <nl> + <nl> if ( isInsideObjCSelector ( ) ) <nl> Lookup . includeInstanceMembers ( ) ; <nl> if ( PreferFunctionReferencesToCalls ) <nl> mmm a / test / IDE / complete_after_self . swift <nl> ppp b / test / IDE / complete_after_self . swift <nl> <nl> / / RUN : % FileCheck % s - check - prefix = CONSTRUCTOR_SELF_NO_DOT_1 < % t . self . txt <nl> / / RUN : % FileCheck % s - check - prefix = COMMON_SELF_NO_DOT_1 < % t . self . txt <nl> <nl> + / / RUN : % target - swift - ide - test - code - completion - source - filename % s - code - completion - token = CONSTRUCTOR_NONSELF_NO_DOT_1 > % t . self . txt <nl> + / / RUN : % FileCheck % s - check - prefix = COMMON_SELF_NO_DOT_1 < % t . self . txt <nl> + / / RUN : % FileCheck % s - check - prefix = NO_INIT < % t . self . txt <nl> + <nl> / / RUN : % target - swift - ide - test - code - completion - source - filename % s - code - completion - token = CONSTRUCTOR_SELF_DOT_1 > % t . self . txt <nl> / / RUN : % FileCheck % s - check - prefix = CONSTRUCTOR_SELF_DOT_1 < % t . self . txt <nl> / / RUN : % FileCheck % s - check - prefix = COMMON_SELF_DOT_1 < % t . self . txt <nl> <nl> + / / RUN : % target - swift - ide - test - code - completion - source - filename % s - code - completion - token = CONSTRUCTOR_NONSELF_DOT_1 > % t . self . txt <nl> + / / RUN : % FileCheck % s - check - prefix = COMMON_SELF_DOT_1 < % t . self . txt <nl> + / / RUN : % FileCheck % s - check - prefix = NO_INIT < % t . self . txt <nl> + <nl> / / RUN : % target - swift - ide - test - code - completion - source - filename % s - code - completion - token = DESTRUCTOR_SELF_NO_DOT_1 > % t . self . txt <nl> / / RUN : % FileCheck % s - check - prefix = DESTRUCTOR_SELF_NO_DOT_1 < % t . self . txt <nl> / / RUN : % FileCheck % s - check - prefix = COMMON_SELF_NO_DOT_1 < % t . self . txt <nl> + / / RUN : % FileCheck % s - check - prefix = NO_INIT < % t . self . txt <nl> <nl> / / RUN : % target - swift - ide - test - code - completion - source - filename % s - code - completion - token = DESTRUCTOR_SELF_DOT_1 > % t . self . txt <nl> / / RUN : % FileCheck % s - check - prefix = DESTRUCTOR_SELF_DOT_1 < % t . self . txt <nl> / / RUN : % FileCheck % s - check - prefix = COMMON_SELF_DOT_1 < % t . self . txt <nl> + / / RUN : % FileCheck % s - check - prefix = NO_INIT < % t . self . txt <nl> <nl> / / RUN : % target - swift - ide - test - code - completion - source - filename % s - code - completion - token = FUNC_SELF_NO_DOT_1 > % t . self . txt <nl> / / RUN : % FileCheck % s - check - prefix = FUNC_SELF_NO_DOT_1 < % t . self . txt <nl> / / RUN : % FileCheck % s - check - prefix = COMMON_SELF_NO_DOT_1 < % t . self . txt <nl> + / / RUN : % FileCheck % s - check - prefix = NO_INIT < % t . self . txt <nl> <nl> / / RUN : % target - swift - ide - test - code - completion - source - filename % s - code - completion - token = FUNC_SELF_DOT_1 > % t . self . txt <nl> / / RUN : % FileCheck % s - check - prefix = FUNC_SELF_DOT_1 < % t . self . txt <nl> / / RUN : % FileCheck % s - check - prefix = COMMON_SELF_DOT_1 < % t . self . txt <nl> + / / RUN : % FileCheck % s - check - prefix = NO_INIT < % t . self . txt <nl> <nl> / / RUN : % target - swift - ide - test - code - completion - source - filename % s - code - completion - token = FUNC_STATIC_SELF_NO_DOT_1 > % t . self . txt <nl> / / RUN : % FileCheck % s - check - prefix = FUNC_STATIC_SELF_NO_DOT_1 < % t . self . txt <nl> <nl> / / RUN : % target - swift - ide - test - code - completion - source - filename % s - code - completion - token = FUNC_STATIC_SELF_DOT_1 > % t . self . txt <nl> / / RUN : % FileCheck % s - check - prefix = FUNC_STATIC_SELF_DOT_1 < % t . self . txt <nl> <nl> + / / RUN : % target - swift - ide - test - code - completion - source - filename % s - code - completion - token = STRUCT_CONSTRUCTOR_SELF_DOT_1 > % t . self . txt <nl> + / / RUN : % FileCheck % s - check - prefix = STRUCT_CONSTRUCTOR_SELF_DOT_1 < % t . self . txt <nl> + <nl> + / / RUN : % target - swift - ide - test - code - completion - source - filename % s - code - completion - token = STRUCT_CONSTRUCTOR_NONSELF_DOT_1 > % t . self . txt <nl> + / / RUN : % FileCheck % s - check - prefix = NO_INIT < % t . self . txt <nl> + <nl> + / / RUN : % target - swift - ide - test - code - completion - source - filename % s - code - completion - token = STRUCT_FUNC_SELF_DOT_1 > % t . self . txt <nl> + / / RUN : % FileCheck % s - check - prefix = NO_INIT < % t . self . txt <nl> + <nl> / / = = = mmm <nl> / / = = = mmm Tests for code completion after ' self ' . <nl> / / = = = mmm <nl> class ThisDerived1 : ThisBase1 { <nl> <nl> init ( ) { <nl> self # ^ CONSTRUCTOR_SELF_NO_DOT_1 ^ # <nl> - / / CONSTRUCTOR_SELF_NO_DOT_1 : Begin completions , 20 items <nl> + / / CONSTRUCTOR_SELF_NO_DOT_1 : Begin completions , 23 items <nl> + / / CONSTRUCTOR_SELF_NO_DOT_1 - DAG : Decl [ Constructor ] / CurrNominal : . init ( ) [ # ThisDerived1 # ] ; <nl> + / / CONSTRUCTOR_SELF_NO_DOT_1 - DAG : Decl [ Constructor ] / CurrNominal : . init ( { # a : Int # } ) [ # ThisDerived1 # ] ; <nl> / / CONSTRUCTOR_SELF_NO_DOT_1 : End completions <nl> + let d : ThisDerived1 <nl> + d # ^ CONSTRUCTOR_NONSELF_NO_DOT_1 ^ # <nl> + / / NO_INIT - NOT : init ( ) <nl> } <nl> <nl> init ( a : Int ) { <nl> self . # ^ CONSTRUCTOR_SELF_DOT_1 ^ # <nl> - / / CONSTRUCTOR_SELF_DOT_1 : Begin completions , 15 items <nl> + / / CONSTRUCTOR_SELF_DOT_1 : Begin completions , 18 items <nl> + / / CONSTRUCTOR_SELF_DOT_1 - DAG : Decl [ Constructor ] / CurrNominal : init ( ) [ # ThisDerived1 # ] ; <nl> + / / CONSTRUCTOR_SELF_DOT_1 - DAG : Decl [ Constructor ] / CurrNominal : init ( { # a : Int # } ) [ # ThisDerived1 # ] ; <nl> / / CONSTRUCTOR_SELF_DOT_1 : End completions <nl> + let d : ThisDerived1 <nl> + d . # ^ CONSTRUCTOR_NONSELF_DOT_1 ^ # <nl> } <nl> <nl> deinit { <nl> class ThisDerived1 : ThisBase1 { <nl> / / FUNC_STATIC_SELF_NO_DOT_1 - NEXT : Decl [ Class ] / CurrNominal : . DerivedExtNestedClass [ # ThisDerived1 . DerivedExtNestedClass # ] <nl> / / FUNC_STATIC_SELF_NO_DOT_1 - NEXT : Decl [ Enum ] / CurrNominal : . DerivedExtNestedEnum [ # ThisDerived1 . DerivedExtNestedEnum # ] <nl> / / FUNC_STATIC_SELF_NO_DOT_1 - NEXT : Decl [ TypeAlias ] / CurrNominal : . DerivedExtNestedTypealias [ # Int # ] <nl> + / / FUNC_STATIC_SELF_NO_DOT_1 - NEXT : Decl [ Constructor ] / CurrNominal : ( { # someExtensionArg : Int # } ) [ # ThisDerived1 # ] <nl> / / FUNC_STATIC_SELF_NO_DOT_1 - NEXT : Decl [ InstanceMethod ] / Super : . baseFunc0 ( { # self : ThisBase1 # } ) [ # ( ) - > Void # ] <nl> / / FUNC_STATIC_SELF_NO_DOT_1 - NEXT : Decl [ InstanceMethod ] / Super : . baseFunc1 ( { # self : ThisBase1 # } ) [ # ( Int ) - > Void # ] <nl> / / FUNC_STATIC_SELF_NO_DOT_1 - NEXT : Decl [ StaticVar ] / Super : . baseStaticVar [ # Int # ] <nl> extension ThisDerived1 { <nl> } <nl> <nl> typealias DerivedExtNestedTypealias = Int <nl> + <nl> + convenience init ( someExtensionArg : Int ) { <nl> + self . # ^ EXTENSION_CONSTRUCTOR_SELF_DOT_1 ^ # <nl> + } <nl> + } <nl> + <nl> + struct S1 { <nl> + init ( ) { } <nl> + init ( x : Int ) { <nl> + self . # ^ STRUCT_CONSTRUCTOR_SELF_DOT_1 ^ # <nl> + / / STRUCT_CONSTRUCTOR_SELF_DOT_1 : Begin completions , 3 items <nl> + / / STRUCT_CONSTRUCTOR_SELF_DOT_1 - DAG : Decl [ Constructor ] / CurrNominal : init ( ) [ # S1 # ] ; <nl> + / / STRUCT_CONSTRUCTOR_SELF_DOT_1 - DAG : Decl [ Constructor ] / CurrNominal : init ( { # x : Int # } ) [ # S1 # ] ; <nl> + / / STRUCT_CONSTRUCTOR_SELF_DOT_1 - DAG : Decl [ InstanceMethod ] / CurrNominal : f ( ) [ # Void # ] ; <nl> + / / STRUCT_CONSTRUCTOR_SELF_DOT_1 : End completions <nl> + let s : S1 <nl> + s . # ^ STRUCT_CONSTRUCTOR_NONSELF_DOT_1 ^ # <nl> + } <nl> + func f ( ) { <nl> + self . # ^ STRUCT_FUNC_SELF_DOT_1 ^ # <nl> + } <nl> } <nl>
Merge pull request from benlangmuir / cc - self - init
apple/swift
c39bfac44241b240c77e48509437418537fd61a6
2016-09-12T21:14:30Z
mmm a / bazel / foreign_cc / BUILD <nl> ppp b / bazel / foreign_cc / BUILD <nl> configure_make ( <nl> # https : / / github . com / envoyproxy / envoy / issues / 6084 <nl> # TODO ( htuch ) : Remove when # 6084 is fixed <nl> " / / bazel : asan_build " : { " ENVOY_CONFIG_ASAN " : " 1 " } , <nl> + " / / bazel : msan_build " : { " ENVOY_CONFIG_MSAN " : " 1 " } , <nl> " / / conditions : default " : { } , <nl> } ) , <nl> lib_source = " @ com_github_luajit_luajit / / : all " , <nl> configure_make ( <nl> # https : / / github . com / envoyproxy / envoy / issues / 6084 <nl> # TODO ( htuch ) : Remove when # 6084 is fixed <nl> " / / bazel : asan_build " : { " ENVOY_CONFIG_ASAN " : " 1 " } , <nl> + " / / bazel : msan_build " : { " ENVOY_CONFIG_MSAN " : " 1 " } , <nl> " / / conditions : default " : { } , <nl> } ) , <nl> lib_source = " @ com_github_moonjit_moonjit / / : all " , <nl> mmm a / bazel / foreign_cc / luajit . patch <nl> ppp b / bazel / foreign_cc / luajit . patch <nl> index 0000000 . . 9c71271 <nl> + <nl> + # Remove LuaJIT from ASAN for now . <nl> + # TODO ( htuch ) : Remove this when https : / / github . com / envoyproxy / envoy / issues / 6084 is resolved . <nl> - + if " ENVOY_CONFIG_ASAN " in os . environ : <nl> + + if " ENVOY_CONFIG_ASAN " in os . environ or " ENVOY_CONFIG_MSAN " in os . environ : <nl> + os . environ [ " TARGET_CFLAGS " ] + = " - fsanitize - blacklist = % s / com_github_luajit_luajit / clang - asan - blocklist . txt " % os . environ [ " PWD " ] <nl> + with open ( " clang - asan - blocklist . txt " , " w " ) as f : <nl> + f . write ( " fun : * \ n " ) <nl> mmm a / bazel / foreign_cc / moonjit . patch <nl> ppp b / bazel / foreign_cc / moonjit . patch <nl> index 0000000 . . 9c71271 <nl> + <nl> + # Remove LuaJIT from ASAN for now . <nl> + # TODO ( htuch ) : Remove this when https : / / github . com / envoyproxy / envoy / issues / 6084 is resolved . <nl> - + if " ENVOY_CONFIG_ASAN " in os . environ : <nl> + + if " ENVOY_CONFIG_ASAN " in os . environ or " ENVOY_CONFIG_MSAN " in os . environ : <nl> + os . environ [ " TARGET_CFLAGS " ] + = " - fsanitize - blacklist = % s / com_github_moonjit_moonjit / clang - asan - blocklist . txt " % os . environ [ " PWD " ] <nl> + with open ( " clang - asan - blocklist . txt " , " w " ) as f : <nl> + f . write ( " fun : * \ n " ) <nl> mmm a / source / common / network / address_impl . cc <nl> ppp b / source / common / network / address_impl . cc <nl> Address : : InstanceConstSharedPtr addressFromSockAddr ( const sockaddr_storage & ss , <nl> <nl> Ipv4Instance : : Ipv4Instance ( const sockaddr_in * address , absl : : string_view sock_interface ) <nl> : InstanceBase ( Type : : Ip , sock_interface ) { <nl> + memset ( & ip_ . ipv4_ . address_ , 0 , sizeof ( ip_ . ipv4_ . address_ ) ) ; <nl> ip_ . ipv4_ . address_ = * address ; <nl> ip_ . friendly_address_ = sockaddrToString ( * address ) ; <nl> <nl> mmm a / source / common / network / utility . cc <nl> ppp b / source / common / network / utility . cc <nl> Address : : InstanceConstSharedPtr Utility : : parseInternetAddressAndPort ( const std : : <nl> throwWithMalformedIp ( ip_address ) ; <nl> } <nl> sockaddr_in sa4 ; <nl> + memset ( & sa4 , 0 , sizeof ( sa4 ) ) ; <nl> if ( ip_str . empty ( ) | | inet_pton ( AF_INET , ip_str . c_str ( ) , & sa4 . sin_addr ) ! = 1 ) { <nl> throwWithMalformedIp ( ip_address ) ; <nl> } <nl> mmm a / source / extensions / filters / udp / dns_filter / dns_parser . cc <nl> ppp b / source / extensions / filters / udp / dns_filter / dns_parser . cc <nl> DnsAnswerRecordPtr DnsMessageParser : : parseDnsAnswerRecord ( const Buffer : : Instance <nl> case DNS_RECORD_TYPE_A : <nl> if ( available_bytes > = sizeof ( uint32_t ) ) { <nl> sockaddr_in sa4 ; <nl> + memset ( & sa4 , 0 , sizeof ( sa4 ) ) ; <nl> sa4 . sin_addr . s_addr = buffer - > peekLEInt < uint32_t > ( data_offset ) ; <nl> ip_addr = std : : make_shared < Network : : Address : : Ipv4Instance > ( & sa4 ) ; <nl> data_offset + = data_length ; <nl> DnsAnswerRecordPtr DnsMessageParser : : parseDnsAnswerRecord ( const Buffer : : Instance <nl> case DNS_RECORD_TYPE_AAAA : <nl> if ( available_bytes > = sizeof ( absl : : uint128 ) ) { <nl> sockaddr_in6 sa6 ; <nl> + memset ( & sa6 , 0 , sizeof ( sa6 ) ) ; <nl> uint8_t * address6_bytes = reinterpret_cast < uint8_t * > ( & sa6 . sin6_addr . s6_addr ) ; <nl> static constexpr size_t count = sizeof ( absl : : uint128 ) / sizeof ( uint8_t ) ; <nl> for ( size_t index = 0 ; index < count ; index + + ) { <nl> mmm a / source / extensions / tracers / xray / xray_configuration . h <nl> ppp b / source / extensions / tracers / xray / xray_configuration . h <nl> enum class SamplingDecision { <nl> struct XRayHeader { <nl> std : : string trace_id_ ; <nl> std : : string parent_id_ ; <nl> - SamplingDecision sample_decision_ ; <nl> + SamplingDecision sample_decision_ { } ; <nl> } ; <nl> <nl> } / / namespace XRay <nl> mmm a / test / exe / main_common_test . cc <nl> ppp b / test / exe / main_common_test . cc <nl> INSTANTIATE_TEST_SUITE_P ( IpVersions , MainCommonDeathTest , <nl> <nl> TEST_P ( MainCommonDeathTest , OutOfMemoryHandler ) { <nl> # if defined ( __clang_analyzer__ ) | | ( defined ( __has_feature ) & & ( __has_feature ( thread_sanitizer ) | | \ <nl> - __has_feature ( address_sanitizer ) ) ) <nl> + __has_feature ( address_sanitizer ) | | \ <nl> + __has_feature ( memory_sanitizer ) ) ) <nl> ENVOY_LOG_MISC ( critical , <nl> " MainCommonTest : : OutOfMemoryHandler not supported by this compiler configuration " ) ; <nl> # else <nl> mmm a / test / extensions / filters / network / postgres_proxy / postgres_decoder_test . cc <nl> ppp b / test / extensions / filters / network / postgres_proxy / postgres_decoder_test . cc <nl> class PostgresProxyDecoderTestBase { <nl> <nl> / / fields often used <nl> Buffer : : OwnedImpl data_ ; <nl> - char buf_ [ 256 ] ; <nl> + char buf_ [ 256 ] { } ; <nl> std : : string payload_ ; <nl> } ; <nl> <nl>
build : fix several use of unintialized value detect by MSAN ( )
envoyproxy/envoy
6a71f16c664f2e3a874d2a991262d49e6ed485c8
2020-08-06T16:05:27Z
mmm a / tensorflow / c / eager / gradients_test . cc <nl> ppp b / tensorflow / c / eager / gradients_test . cc <nl> TEST_P ( CppGradients , TestAddGrad ) { <nl> } <nl> <nl> GradientRegistry registry ; <nl> - Status s = RegisterGradientAdd ( & registry ) ; <nl> + Status s = RegisterGradients ( & registry ) ; <nl> ASSERT_EQ ( errors : : OK , s . code ( ) ) < < s . error_message ( ) ; <nl> <nl> / / Pseudo - code : <nl> mmm a / tensorflow / c / eager / mnist_gradients_util . cc <nl> ppp b / tensorflow / c / eager / mnist_gradients_util . cc <nl> Status MNISTGradModel ( AbstractContext * ctx , <nl> / * output_gradients = * / { } , & out_grads ) ) ; <nl> <nl> / / Only release 2nd temp output as first holds loss values . <nl> - / / temp_outputs [ 1 ] - > Unref ( ) ; <nl> + temp_outputs [ 1 ] - > Unref ( ) ; <nl> <nl> outputs [ 0 ] = out_grads [ 0 ] ; / / dW1 <nl> outputs [ 1 ] = out_grads [ 1 ] ; / / dW2 <nl> mmm a / tensorflow / c / experimental / gradients / math_grad . cc <nl> ppp b / tensorflow / c / experimental / gradients / math_grad . cc <nl> using tensorflow : : ops : : Mul ; <nl> using tensorflow : : ops : : MatMul ; <nl> using tensorflow : : ops : : ReluGrad ; <nl> using tensorflow : : ops : : SparseSoftmaxCrossEntropyLoss ; <nl> + using tensorflow : : ops : : ZerosLike ; <nl> <nl> namespace tensorflow { <nl> namespace gradients { <nl> class MatMulGradientFunction : public GradientFunction { <nl> std : : vector < AbstractTensorHandle * > matmul_outputs ( 1 ) ; <nl> <nl> / / Gradient for A <nl> - std : : string name = " matm_A_ " + std : : to_string ( counter ) ; <nl> + std : : string name = " mm_A_ " + std : : to_string ( counter ) ; <nl> TF_RETURN_IF_ERROR ( MatMul ( ctx - > ctx , { upstream_grad , forward_inputs [ 1 ] } , <nl> absl : : MakeSpan ( matmul_outputs ) , name . c_str ( ) , <nl> / * transpose_a = * / false , <nl> class ReluGradientFunction : public GradientFunction { <nl> std : : vector < AbstractTensorHandle * > forward_outputs ; <nl> } ; <nl> <nl> + <nl> + / / FIX ZEROSLIKE <nl> class SparseSoftmaxCrossEntropyLossGradientFunction : public GradientFunction { <nl> public : <nl> explicit SparseSoftmaxCrossEntropyLossGradientFunction ( <nl> class SparseSoftmaxCrossEntropyLossGradientFunction : public GradientFunction { <nl> Status Compute ( Context * ctx , <nl> absl : : Span < AbstractTensorHandle * const > grad_inputs , <nl> std : : vector < AbstractTensorHandle * > * grad_outputs ) override { <nl> - / / Forward Inputs : [ scores , labels ] <nl> - <nl> + <nl> grad_outputs - > resize ( 2 ) ; <nl> - / / std : : vector < AbstractTensorHandle * > sm_outputs ( 2 ) ; <nl> - <nl> - / / / / Calculate Grad <nl> - / / std : : string name = " sm_loss " + std : : to_string ( counter ) ; <nl> - <nl> - / / TF_RETURN_IF_ERROR ( SparseSoftmaxCrossEntropyLoss ( <nl> - / / ctx - > ctx , { forward_inputs [ 0 ] , forward_inputs [ 1 ] } , <nl> - / / absl : : MakeSpan ( sm_outputs ) , name . c_str ( ) ) ) ; <nl> - <nl> - / / TODO ( amturati ) : fix error where we have to return the softmax loss as the <nl> - / / 2nd grad for the labels to avoid mangled stack trace . Also avoid running <nl> - / / forward operation again , check to see if forward_outputs are being <nl> - / / passed . <nl> + std : : string name = " Identity_Softmax_Grad_A_ " + std : : to_string ( counter ) ; <nl> + std : : vector < AbstractTensorHandle * > id_outputs ( 1 ) ; <nl> + TF_RETURN_IF_ERROR ( ops : : Identity ( ctx - > ctx , { forward_outputs [ 1 ] } , <nl> + absl : : MakeSpan ( id_outputs ) , <nl> + name . c_str ( ) ) ) ; <nl> + ( * grad_outputs ) [ 0 ] = id_outputs [ 0 ] ; <nl> <nl> - / / SparseSoftmaxCrossEntropyLoss returns [ loss_vals , grads ] , so return 2nd <nl> - / / output . <nl> - ( * grad_outputs ) [ 0 ] = forward_outputs [ 1 ] ; / / sm_outputs [ 1 ] ; / / return backprop for scores <nl> - ( * grad_outputs ) [ 1 ] = forward_outputs [ 0 ] ; / / nullptr causes Mangled Stack Trace <nl> + / / TODO ( amturati ) : check to see if ZerosLike is ok instead of nullptr <nl> + name = " Zeros_Softmax_Grad_ " + std : : to_string ( counter ) ; <nl> + TF_RETURN_IF_ERROR ( ops : : ZerosLike ( ctx - > ctx , { forward_inputs [ 1 ] } , <nl> + absl : : MakeSpan ( id_outputs ) , <nl> + name . c_str ( ) ) ) ; <nl> + ( * grad_outputs ) [ 1 ] = id_outputs [ 0 ] ; / / nullptr causes Mangled Stack Trace <nl> <nl> counter + = 1 ; <nl> return Status : : OK ( ) ; <nl> mmm a / tensorflow / c / experimental / ops / array_ops . cc <nl> ppp b / tensorflow / c / experimental / ops / array_ops . cc <nl> Status Identity ( AbstractContext * ctx , <nl> return identity_op - > Execute ( outputs , & num_retvals ) ; <nl> } <nl> <nl> + Status ZerosLike ( AbstractContext * ctx , <nl> + absl : : Span < AbstractTensorHandle * const > inputs , <nl> + absl : : Span < AbstractTensorHandle * > outputs , const char * name ) { <nl> + AbstractOperationPtr z_op ( ctx - > CreateOperation ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( <nl> + z_op - > Reset ( " ZerosLike " , / * raw_device_name = * / nullptr ) ) ; <nl> + if ( isa < tensorflow : : tracing : : TracingOperation > ( z_op . get ( ) ) ) { <nl> + TF_RETURN_IF_ERROR ( dyn_cast < tracing : : TracingOperation > ( z_op . get ( ) ) <nl> + - > SetOpName ( name ) ) ; <nl> + } <nl> + TF_RETURN_IF_ERROR ( z_op - > AddInput ( inputs [ 0 ] ) ) ; <nl> + int num_retvals = 1 ; <nl> + return z_op - > Execute ( outputs , & num_retvals ) ; <nl> + } <nl> + <nl> } / / namespace ops <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / c / experimental / ops / array_ops . h <nl> ppp b / tensorflow / c / experimental / ops / array_ops . h <nl> Status Identity ( AbstractContext * ctx , <nl> absl : : Span < AbstractTensorHandle * const > inputs , <nl> absl : : Span < AbstractTensorHandle * > outputs , const char * name ) ; <nl> <nl> + Status ZerosLike ( AbstractContext * ctx , <nl> + absl : : Span < AbstractTensorHandle * const > inputs , <nl> + absl : : Span < AbstractTensorHandle * > outputs , const char * name ) ; <nl> + <nl> } / / namespace ops <nl> } / / namespace tensorflow <nl> <nl>
fixed forward outputs for grads
tensorflow/tensorflow
4e74675d28d5c269451f0902c576f2fe93a1eef2
2020-08-17T22:57:35Z
mmm a / src / rpc / directory / read_manager . tcc <nl> ppp b / src / rpc / directory / read_manager . tcc <nl> void directory_read_manager_t < metadata_t > : : on_message ( peer_id_t source_peer , std <nl> <nl> template < class metadata_t > <nl> void directory_read_manager_t < metadata_t > : : on_disconnect ( peer_id_t peer ) THROWS_NOTHING { <nl> + ASSERT_FINITE_CORO_WAITING ; <nl> assert_thread ( ) ; <nl> <nl> / * Remove the ` global_peer_info_t ` object from the table * / <nl> rassert ( sessions . count ( peer ) = = 1 ) ; <nl> session_t * session_to_destroy = sessions . release ( sessions . find ( peer ) ) . release ( ) ; <nl> <nl> + bool got_initialization = session_to_destroy - > got_initial_message . is_pulsed ( ) ; <nl> + <nl> / * Start interrupting any running calls to ` propagate_update ( ) ` . We need to <nl> explicitly interrupt them rather than letting them finish on their own <nl> because , if the network reordered messages , they might wait indefinitely for <nl> void directory_read_manager_t < metadata_t > : : on_disconnect ( peer_id_t peer ) THROWS_ <nl> ) ) ; <nl> <nl> / * Notify every thread that the peer has disconnected * / <nl> - fifo_enforcer_write_token_t propagation_fifo_token = propagation_fifo_source . enter_write ( ) ; <nl> - for ( int i = 0 ; i < get_num_threads ( ) ; i + + ) { <nl> - coro_t : : spawn_sometime ( boost : : bind ( <nl> - & directory_read_manager_t : : propagate_disconnect_on_thread , this , <nl> - i , propagation_fifo_token , peer , <nl> - auto_drainer_t : : lock_t ( & global_drainer ) <nl> - ) ) ; <nl> + if ( got_initialization ) { <nl> + fifo_enforcer_write_token_t propagation_fifo_token = propagation_fifo_source . enter_write ( ) ; <nl> + for ( int i = 0 ; i < get_num_threads ( ) ; i + + ) { <nl> + coro_t : : spawn_sometime ( boost : : bind ( <nl> + & directory_read_manager_t : : propagate_disconnect_on_thread , this , <nl> + i , propagation_fifo_token , peer , <nl> + auto_drainer_t : : lock_t ( & global_drainer ) <nl> + ) ) ; <nl> + } <nl> } <nl> } <nl> <nl> void directory_read_manager_t < metadata_t > : : propagate_initialization ( peer_id_t pe <nl> per_thread_keepalive . assert_is_holding ( per_thread_drainers . get ( ) ) ; <nl> on_thread_t thread_switcher ( home_thread ( ) ) ; <nl> <nl> + ASSERT_FINITE_CORO_WAITING ; <nl> / * Check to make sure that the peer didn ' t die while we were coming from the <nl> thread on which ` on_message ( ) ` was run * / <nl> typename boost : : ptr_map < peer_id_t , session_t > : : iterator it = sessions . find ( peer ) ; <nl> void directory_read_manager_t < metadata_t > : : propagate_initialize_on_thread ( int de <nl> thread_info . get ( ) - > peers_list_publisher . publish ( <nl> boost : : bind ( & directory_read_manager_t : : ping_connection_watcher , peer , _1 ) <nl> ) ; <nl> + <nl> } <nl> <nl> template < class metadata_t > <nl>
Fixes the bug exposed by destructor race unit test .
rethinkdb/rethinkdb
e0c9cea4afadca30b7680d553ebcb7e41803b7c2
2012-02-20T20:33:25Z
deleted file mode 100644 <nl> index 6cfc9d18a57 . . 00000000000 <nl> mmm a / docs / tools / purge_cache_for_changed_files . py <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python3 <nl> - <nl> - import subprocess <nl> - import requests <nl> - import os <nl> - import time <nl> - <nl> - FNAME_START = " ppp " <nl> - <nl> - CLOUDFLARE_URL = " https : / / api . cloudflare . com / client / v4 / zones / 4fc6fb1d46e87851605aa7fa69ca6fe0 / purge_cache " <nl> - <nl> - # we have changes in revision and commit sha on all pages <nl> - # so such changes have to be ignored <nl> - MIN_CHANGED_WORDS = 4 <nl> - <nl> - <nl> - def collect_changed_files ( ) : <nl> - proc = subprocess . Popen ( " git diff HEAD ~ 1 - - word - diff = porcelain | grep - e ' ^ + [ ^ + ] \ | ^ \ - [ ^ \ - ] \ | ^ \ + \ + \ + ' " , stdout = subprocess . PIPE , shell = True ) <nl> - changed_files = [ ] <nl> - current_file_name = " " <nl> - changed_words = [ ] <nl> - while True : <nl> - line = proc . stdout . readline ( ) . decode ( " utf - 8 " ) . strip ( ) <nl> - if not line : <nl> - break <nl> - if FNAME_START in line : <nl> - if changed_words : <nl> - if len ( changed_words ) > MIN_CHANGED_WORDS : <nl> - changed_files . append ( current_file_name ) <nl> - changed_words = [ ] <nl> - current_file_name = line [ 6 : ] <nl> - else : <nl> - changed_words . append ( line ) <nl> - return changed_files <nl> - <nl> - <nl> - def filter_and_transform_changed_files ( changed_files , base_domain ) : <nl> - result = [ ] <nl> - for f in changed_files : <nl> - if f . endswith ( " . html " ) : <nl> - result . append ( base_domain + f . replace ( " index . html " , " " ) ) <nl> - return result <nl> - <nl> - <nl> - def convert_to_dicts ( changed_files , batch_size ) : <nl> - result = [ ] <nl> - current_batch = { " files " : [ ] } <nl> - for f in changed_files : <nl> - if len ( current_batch [ " files " ] ) > = batch_size : <nl> - result . append ( current_batch ) <nl> - current_batch = { " files " : [ ] } <nl> - current_batch [ " files " ] . append ( f ) <nl> - <nl> - if current_batch [ " files " ] : <nl> - result . append ( current_batch ) <nl> - return result <nl> - <nl> - <nl> - def post_data ( prepared_batches , token ) : <nl> - headers = { " Authorization " : " Bearer { } " . format ( token ) } <nl> - for batch in prepared_batches : <nl> - print ( ( " Pugring cache for " , " , " . join ( batch [ " files " ] ) ) ) <nl> - response = requests . post ( CLOUDFLARE_URL , json = batch , headers = headers ) <nl> - response . raise_for_status ( ) <nl> - time . sleep ( 3 ) <nl> - <nl> - <nl> - if __name__ = = " __main__ " : <nl> - token = os . getenv ( " CLOUDFLARE_TOKEN " ) <nl> - if not token : <nl> - raise Exception ( " Env variable CLOUDFLARE_TOKEN is empty " ) <nl> - base_domain = os . getenv ( " BASE_DOMAIN " , " https : / / content . clickhouse . tech / " ) <nl> - changed_files = collect_changed_files ( ) <nl> - print ( ( " Found " , len ( changed_files ) , " changed files " ) ) <nl> - filtered_files = filter_and_transform_changed_files ( changed_files , base_domain ) <nl> - print ( ( " Files rest after filtering " , len ( filtered_files ) ) ) <nl> - prepared_batches = convert_to_dicts ( filtered_files , 25 ) <nl> - post_data ( prepared_batches , token ) <nl> mmm a / docs / tools / release . sh <nl> ppp b / docs / tools / release . sh <nl> then <nl> if [ [ ! - z " $ { CLOUDFLARE_TOKEN } " ] ] <nl> then <nl> sleep 1m <nl> - python3 " $ { BASE_DIR } / purge_cache_for_changed_files . py " <nl> + # https : / / api . cloudflare . com / # zone - purge - files - by - cache - tags , - host - or - prefix <nl> + POST_DATA = ' { " hosts " : " clickhouse . tech " } ' <nl> + curl - X POST " https : / / api . cloudflare . com / client / v4 / zones / 4fc6fb1d46e87851605aa7fa69ca6fe0 / purge_cache " - H " Authorization : Bearer $ { CLOUDFLARE_TOKEN } " - H " Content - Type : application / json " - - data " $ { POST_DATA } " <nl> fi <nl> fi <nl>
Fix website
ClickHouse/ClickHouse
ddfe9f61e0f360aad55beceb282a411e4737c5c3
2020-12-23T10:57:17Z
mmm a / src / builtins / builtins - async - gen . cc <nl> ppp b / src / builtins / builtins - async - gen . cc <nl> void AsyncBuiltinsAssembler : : InitializeNativeClosure ( Node * context , <nl> Node * native_context , <nl> Node * function , <nl> Node * context_index ) { <nl> - Node * const function_map = LoadContextElement ( <nl> - native_context , Context : : STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX ) ; <nl> + TNode < Map > function_map = CAST ( LoadContextElement ( <nl> + native_context , Context : : STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX ) ) ; <nl> / / Ensure that we don ' t have to initialize prototype_or_initial_map field of <nl> / / JSFunction . <nl> CSA_ASSERT ( this , WordEqual ( LoadMapInstanceSizeInWords ( function_map ) , <nl> void AsyncBuiltinsAssembler : : InitializeNativeClosure ( Node * context , <nl> StoreObjectFieldRoot ( function , JSFunction : : kFeedbackCellOffset , <nl> RootIndex : : kManyClosuresCell ) ; <nl> <nl> - Node * shared_info = LoadContextElement ( native_context , context_index ) ; <nl> - CSA_ASSERT ( this , IsSharedFunctionInfo ( shared_info ) ) ; <nl> + TNode < SharedFunctionInfo > shared_info = <nl> + CAST ( LoadContextElement ( native_context , context_index ) ) ; <nl> StoreObjectFieldNoWriteBarrier ( <nl> function , JSFunction : : kSharedFunctionInfoOffset , shared_info ) ; <nl> StoreObjectFieldNoWriteBarrier ( function , JSFunction : : kContextOffset , context ) ; <nl> <nl> - Node * const code = GetSharedFunctionInfoCode ( shared_info ) ; <nl> + / / For the native closures that are initialized here ( for ` await ` ) <nl> + / / we know that their SharedFunctionInfo : : function_data ( ) slot <nl> + / / contains a builtin index ( as Smi ) , so there ' s no need to use <nl> + / / CodeStubAssembler : : GetSharedFunctionInfoCode ( ) helper here , <nl> + / / which almost doubles the size of ` await ` builtins ( unnecessarily ) . <nl> + TNode < Smi > builtin_id = LoadObjectField < Smi > ( <nl> + shared_info , SharedFunctionInfo : : kFunctionDataOffset ) ; <nl> + TNode < Code > code = LoadBuiltin ( builtin_id ) ; <nl> StoreObjectFieldNoWriteBarrier ( function , JSFunction : : kCodeOffset , code ) ; <nl> } <nl> <nl>
[ async ] Reduce generated size of ` await ` builtins .
v8/v8
8201da29135383354de756218e73ede1f9793610
2018-10-16T05:40:53Z
mmm a / src / parsing / parser . cc <nl> ppp b / src / parsing / parser . cc <nl> FunctionLiteral * Parser : : ParseFunctionLiteral ( <nl> bool use_temp_zone = <nl> ( FLAG_lazy_inner_functions <nl> ? can_preparse <nl> - : ( allow_lazy ( ) & & <nl> + : ( allow_lazy ( ) & & function_type = = FunctionLiteral : : kDeclaration & & <nl> eager_compile_hint = = FunctionLiteral : : kShouldLazyCompile ) ) & & <nl> ! ( FLAG_validate_asm & & scope ( ) - > IsAsmModule ( ) ) ; <nl> bool is_lazy_inner_function = <nl>
Restore kDeclaration as a condition for temp - zone parsing
v8/v8
c5df7fe2f3b5bd4182fe0b51c849cb6c35aadb6b
2016-10-12T12:53:36Z
mmm a / binaries / benchmark_helper . cc <nl> ppp b / binaries / benchmark_helper . cc <nl> <nl> # include " caffe2 / core / logging . h " <nl> # include " caffe2 / core / net . h " <nl> # include " caffe2 / core / operator . h " <nl> + # include " caffe2 / core / tensor_int8 . h " <nl> # include " caffe2 / utils / bench_utils . h " <nl> # include " caffe2 / utils / string_utils . h " <nl> # include " observers / net_observer_reporter_print . h " <nl> void loadInput ( <nl> CAFFE_THROW ( " Not support GPU on mobile . " ) ; <nl> # endif <nl> } else { <nl> - caffe2 : : TensorCPU * tensor = BlobGetMutableTensor ( blob , caffe2 : : CPU ) ; <nl> - CHECK_NOTNULL ( tensor ) ; <nl> - tensor - > Resize ( input_dims ) ; <nl> if ( input_type_list [ i ] = = " uint8_t " ) { <nl> - tensor - > mutable_data < uint8_t > ( ) ; <nl> + caffe2 : : int8 : : Int8TensorCPU * tensor = <nl> + blob - > GetMutable < caffe2 : : int8 : : Int8TensorCPU > ( ) ; <nl> + CHECK_NOTNULL ( tensor ) ; <nl> + tensor - > t . Resize ( input_dims ) ; <nl> + tensor - > t . mutable_data < uint8_t > ( ) ; <nl> } else if ( input_type_list [ i ] = = " float " ) { <nl> + caffe2 : : TensorCPU * tensor = BlobGetMutableTensor ( blob , caffe2 : : CPU ) ; <nl> + CHECK_NOTNULL ( tensor ) ; <nl> + tensor - > Resize ( input_dims ) ; <nl> tensor - > mutable_data < float > ( ) ; <nl> } else { <nl> CAFFE_THROW ( " Unsupported input type : " , input_type_list [ i ] ) ; <nl> mmm a / binaries / speed_benchmark . cc <nl> ppp b / binaries / speed_benchmark . cc <nl> <nl> # include " caffe2 / core / init . h " <nl> # include " caffe2 / core / logging . h " <nl> # include " caffe2 / core / operator . h " <nl> + # include " caffe2 / core / tensor_int8 . h " <nl> # ifdef CAFFE2_OPTIMIZER <nl> # include " caffe2 / opt / optimizer . h " <nl> # endif <nl> int main ( int argc , char * * argv ) { <nl> if ( blob = = nullptr ) { <nl> blob = workspace - > CreateBlob ( input_names [ i ] ) ; <nl> } <nl> - caffe2 : : TensorCPU * tensor = BlobGetMutableTensor ( blob , caffe2 : : CPU ) ; <nl> - CHECK_NOTNULL ( tensor ) ; <nl> - tensor - > Resize ( input_dims ) ; <nl> if ( input_type_list [ i ] = = " uint8_t " ) { <nl> - tensor - > mutable_data < uint8_t > ( ) ; <nl> + caffe2 : : int8 : : Int8TensorCPU * tensor = <nl> + blob - > GetMutable < caffe2 : : int8 : : Int8TensorCPU > ( ) ; <nl> + CHECK_NOTNULL ( tensor ) ; <nl> + tensor - > t . Resize ( input_dims ) ; <nl> + tensor - > t . mutable_data < uint8_t > ( ) ; <nl> } else if ( input_type_list [ i ] = = " float " ) { <nl> + caffe2 : : TensorCPU * tensor = BlobGetMutableTensor ( blob , caffe2 : : CPU ) ; <nl> + CHECK_NOTNULL ( tensor ) ; <nl> + tensor - > Resize ( input_dims ) ; <nl> tensor - > mutable_data < float > ( ) ; <nl> - } else { <nl> + } else { <nl> CAFFE_THROW ( " Unsupported input type : " , input_type_list [ i ] ) ; <nl> } <nl> } <nl>
Use caffe2 : : int8 : : Int8TensorCPU when input type is uint8_t ( )
pytorch/pytorch
04b0774964d8ccc3aa2c97ca21c37b1fc7a15a1a
2018-10-02T21:57:28Z
mmm a / src / grabber / grabber / include / event_grabber . hpp <nl> ppp b / src / grabber / grabber / include / event_grabber . hpp <nl> class event_grabber final { <nl> <nl> if ( dev - > get_serial_number_string ( ) = = " org . pqrs . driver . VirtualHIDKeyboard " ) { <nl> dev - > open ( ) ; <nl> + dev - > create_transaction ( ) ; <nl> dev - > schedule ( ) ; <nl> std : : cout < < " set virtual_keyboard_ " < < std : : endl ; <nl> self - > virtual_keyboard_ = dev ; <nl> mmm a / src / grabber / grabber / include / human_interface_device . hpp <nl> ppp b / src / grabber / grabber / include / human_interface_device . hpp <nl> class human_interface_device final { <nl> public : <nl> human_interface_device ( IOHIDDeviceRef _Nonnull device ) : device_ ( device ) , <nl> queue_ ( nullptr ) , <nl> + transaction_ ( nullptr ) , <nl> grabbed_ ( false ) { <nl> CFRetain ( device_ ) ; <nl> <nl> class human_interface_device final { <nl> if ( elements ) { <nl> for ( CFIndex i = 0 ; i < CFArrayGetCount ( elements ) ; + + i ) { <nl> auto element = static_cast < IOHIDElementRef > ( const_cast < void * > ( CFArrayGetValueAtIndex ( elements , i ) ) ) ; <nl> - uint32_t usage_page = IOHIDElementGetUsagePage ( element ) ; <nl> - uint32_t usage = IOHIDElementGetUsage ( element ) ; <nl> - uint64_t key = ( static_cast < uint64_t > ( usage_page ) < < 32 | usage ) ; <nl> - <nl> + auto key = elements_key ( IOHIDElementGetUsagePage ( element ) , IOHIDElementGetUsage ( element ) ) ; <nl> if ( elements_ . find ( key ) = = elements_ . end ( ) ) { <nl> CFRetain ( element ) ; <nl> elements_ [ key ] = element ; <nl> class human_interface_device final { <nl> close ( ) ; <nl> } <nl> <nl> + if ( transaction_ ) { <nl> + CFRelease ( transaction_ ) ; <nl> + } <nl> + <nl> if ( queue_ ) { <nl> CFRelease ( queue_ ) ; <nl> } <nl> class human_interface_device final { <nl> return IOHIDDeviceClose ( device_ , kIOHIDOptionsTypeNone ) ; <nl> } <nl> <nl> + void create_transaction ( void ) { <nl> + if ( transaction_ ) { <nl> + return ; <nl> + } <nl> + <nl> + transaction_ = IOHIDTransactionCreate ( kCFAllocatorDefault , device_ , kIOHIDTransactionDirectionTypeOutput , kIOHIDOptionsTypeNone ) ; <nl> + } <nl> + <nl> void schedule ( void ) { <nl> IOHIDDeviceScheduleWithRunLoop ( device_ , CFRunLoopGetCurrent ( ) , kCFRunLoopDefaultMode ) ; <nl> if ( queue_ ) { <nl> IOHIDQueueScheduleWithRunLoop ( queue_ , CFRunLoopGetCurrent ( ) , kCFRunLoopDefaultMode ) ; <nl> } <nl> + if ( transaction_ ) { <nl> + IOHIDTransactionScheduleWithRunLoop ( transaction_ , CFRunLoopGetCurrent ( ) , kCFRunLoopDefaultMode ) ; <nl> + } <nl> } <nl> <nl> void unschedule ( void ) { <nl> + if ( transaction_ ) { <nl> + IOHIDTransactionUnscheduleFromRunLoop ( transaction_ , CFRunLoopGetCurrent ( ) , kCFRunLoopDefaultMode ) ; <nl> + } <nl> if ( queue_ ) { <nl> IOHIDQueueUnscheduleFromRunLoop ( queue_ , CFRunLoopGetCurrent ( ) , kCFRunLoopDefaultMode ) ; <nl> } <nl> class human_interface_device final { <nl> element = value_element ; <nl> CFRetain ( value ) ; <nl> } else { <nl> - uint64_t key = ( static_cast < uint64_t > ( usage_page ) < < 32 ) | usage ; <nl> + auto key = elements_key ( usage_page , usage ) ; <nl> auto it = elements_ . find ( key ) ; <nl> if ( it = = elements_ . end ( ) ) { <nl> value = nullptr ; <nl> class human_interface_device final { <nl> } <nl> } <nl> <nl> - std : : cout < < " set_value usage_page : " < < IOHIDElementGetUsagePage ( element ) <nl> - < < " usage : " < < IOHIDElementGetUsage ( element ) <nl> - < < std : : endl ; <nl> + if ( ! element ) { <nl> + return kIOReturnError ; <nl> + } <nl> + <nl> + if ( ! transaction_ ) { <nl> + return kIOReturnError ; <nl> + } <nl> + <nl> + std : : cout < < std : : hex < < " 0x " < < usage_page < < " 0x " < < usage < < std : : endl ; <nl> <nl> - std : : cout < < " device : " < < device_ < < std : : endl <nl> - < < " element - device : " < < IOHIDElementGetDevice ( element ) < < std : : endl ; <nl> + IOHIDTransactionAddElement ( transaction_ , element ) ; <nl> + IOHIDTransactionSetValue ( transaction_ , element , value , kIOHIDOptionsTypeNone ) ; <nl> + auto result = IOHIDTransactionCommit ( transaction_ ) ; <nl> <nl> + # if 0 <nl> auto result = IOHIDDeviceSetValue ( device_ , element , value ) ; <nl> + # endif <nl> + <nl> if ( value ) { <nl> CFRelease ( value ) ; <nl> } <nl> return result ; <nl> } <nl> <nl> + IOReturn set_report ( IOHIDReportType report_type , CFIndex report_id , const uint8_t * _Nonnull report , CFIndex report_length ) { <nl> + return IOHIDDeviceSetReport ( device_ , report_type , report_id , report , report_length ) ; <nl> + } <nl> + <nl> long get_max_input_report_size ( void ) const { <nl> long value = 0 ; <nl> get_long_property ( CFSTR ( kIOHIDMaxInputReportSizeKey ) , value ) ; <nl> class human_interface_device final { <nl> } <nl> <nl> private : <nl> + uint64_t elements_key ( uint32_t usage_page , uint32_t usage ) { <nl> + return ( static_cast < uint64_t > ( usage_page ) < < 32 | usage ) ; <nl> + } <nl> + <nl> IOHIDDeviceRef _Nonnull device_ ; <nl> IOHIDQueueRef _Nullable queue_ ; <nl> + IOHIDTransactionRef _Nullable transaction_ ; <nl> std : : unordered_map < uint64_t , IOHIDElementRef > elements_ ; <nl> <nl> bool grabbed_ ; <nl>
add transaction
pqrs-org/Karabiner-Elements
799d9f778d194053a7e0bb15077ac3cabc6032f1
2016-07-21T13:40:18Z
mmm a / src / Makefile <nl> ppp b / src / Makefile <nl> <nl> CXX = g + + <nl> LDFLAGS = - lrt - laio <nl> CXXFLAGS = - Iinclude <nl> - SERVER_EXEC_NAME = rethink <nl> <nl> DEP_DIR = dep <nl> OBJ_DIR = obj <nl> + SERVER_EXEC_NAME = rethink <nl> <nl> # Configure debug vs . release <nl> ifeq ( $ { DEBUG } , 1 ) <nl> $ ( DEP_DIR ) / % . d : % . cc <nl> rm - f $ @ . $ $ $ $ <nl> - include $ ( DEPS ) <nl> <nl> - # Define pretty printing functions <nl> + # Define pretty printing functions <nl> ifeq ( $ { VERBOSE } , 1 ) <nl> quiet_cc = <nl> quiet_ld = <nl>
Minor makefile rearrengement
rethinkdb/rethinkdb
f538d233e3638aadf321c2c3a438dfaca1a27f2f
2009-12-09T13:07:19Z
mmm a / java / Makefile <nl> ppp b / java / Makefile <nl> <nl> NATIVE_JAVA_CLASSES = org . rocksdb . AbstractComparator \ <nl> + org . rocksdb . AbstractLogger \ <nl> org . rocksdb . AbstractSlice \ <nl> org . rocksdb . BackupableDB \ <nl> org . rocksdb . BackupableDBOptions \ <nl> ifeq ( $ ( PLATFORM ) , OS_MACOSX ) <nl> ROCKSDB_JAR = rocksdbjni - $ ( ROCKSDB_MAJOR ) . $ ( ROCKSDB_MINOR ) . $ ( ROCKSDB_PATCH ) - osx . jar <nl> endif <nl> <nl> - JAVA_TESTS = org . rocksdb . BackupableDBOptionsTest \ <nl> + JAVA_TESTS = org . rocksdb . AbstractLoggerTest \ <nl> + org . rocksdb . BackupableDBOptionsTest \ <nl> org . rocksdb . BackupableDBTest \ <nl> org . rocksdb . BlockBasedTableConfigTest \ <nl> org . rocksdb . CheckPointTest \ <nl> new file mode 100644 <nl> index 0000000000 . . 170be63960 <nl> mmm / dev / null <nl> ppp b / java / rocksjni / loggerjnicallback . cc <nl> <nl> + / / Copyright ( c ) 2014 , Facebook , Inc . All rights reserved . <nl> + / / This source code is licensed under the BSD - style license found in the <nl> + / / LICENSE file in the root directory of this source tree . An additional grant <nl> + / / of patent rights can be found in the PATENTS file in the same directory . <nl> + / / <nl> + / / This file implements the callback " bridge " between Java and C + + for <nl> + / / rocksdb : : Comparator . <nl> + <nl> + # include " include / org_rocksdb_AbstractLogger . h " <nl> + <nl> + # include " rocksjni / loggerjnicallback . h " <nl> + # include " rocksjni / portal . h " <nl> + <nl> + namespace rocksdb { <nl> + <nl> + LoggerJniCallback : : LoggerJniCallback ( <nl> + JNIEnv * env , jobject jAbstractLogger ) { <nl> + <nl> + const jint rs = env - > GetJavaVM ( & m_jvm ) ; <nl> + assert ( rs = = JNI_OK ) ; <nl> + <nl> + / / Note : we want to access the Java Logger instance <nl> + / / across multiple method calls , so we create a global ref <nl> + m_jAbstractLogger = env - > NewGlobalRef ( jAbstractLogger ) ; <nl> + m_jLogMethodId = AbstractLoggerJni : : getLogMethodId ( env ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Get JNIEnv for current native thread <nl> + * / <nl> + JNIEnv * LoggerJniCallback : : getJniEnv ( ) const { <nl> + JNIEnv * env ; <nl> + jint rs = m_jvm - > AttachCurrentThread ( reinterpret_cast < void * * > ( & env ) , NULL ) ; <nl> + assert ( rs = = JNI_OK ) ; <nl> + return env ; <nl> + } <nl> + <nl> + void LoggerJniCallback : : Logv ( const char * format , va_list ap ) { <nl> + / / We implement this method because it is virtual but we don ' t <nl> + / / use it because we need to know about the log level . <nl> + } <nl> + <nl> + void LoggerJniCallback : : Logv ( const InfoLogLevel log_level , <nl> + const char * format , va_list ap ) { <nl> + if ( GetInfoLogLevel ( ) < = log_level ) { <nl> + JNIEnv * env = getJniEnv ( ) ; <nl> + / / We try twice : the first time with a fixed - size stack allocated buffer , <nl> + / / and the second time with a much larger dynamically allocated buffer . <nl> + char buffer [ 500 ] ; <nl> + for ( int iter = 0 ; iter < 2 ; iter + + ) { <nl> + char * base ; <nl> + int bufsize ; <nl> + if ( iter = = 0 ) { <nl> + bufsize = sizeof ( buffer ) ; <nl> + base = buffer ; <nl> + } else { <nl> + bufsize = 30000 ; <nl> + base = new char [ bufsize ] ; <nl> + } <nl> + char * p = base ; <nl> + char * limit = base + bufsize ; <nl> + / / Print the message <nl> + if ( p < limit ) { <nl> + va_list backup_ap ; <nl> + va_copy ( backup_ap , ap ) ; <nl> + p + = vsnprintf ( p , limit - p , format , backup_ap ) ; <nl> + va_end ( backup_ap ) ; <nl> + } <nl> + / / Truncate to available space if necessary <nl> + if ( p > = limit ) { <nl> + if ( iter = = 0 ) { <nl> + continue ; / / Try again with larger buffer <nl> + } else { <nl> + p = limit - 1 ; <nl> + } <nl> + } <nl> + assert ( p < limit ) ; <nl> + * p + + = ' \ 0 ' ; <nl> + <nl> + / / determine InfoLogLevel java enum instance <nl> + jobject jlog_level ; <nl> + switch ( log_level ) { <nl> + case rocksdb : : InfoLogLevel : : DEBUG_LEVEL : <nl> + jlog_level = InfoLogLevelJni : : DEBUG_LEVEL ( env ) ; <nl> + break ; <nl> + case rocksdb : : InfoLogLevel : : INFO_LEVEL : <nl> + jlog_level = InfoLogLevelJni : : INFO_LEVEL ( env ) ; <nl> + break ; <nl> + case rocksdb : : InfoLogLevel : : ERROR_LEVEL : <nl> + jlog_level = InfoLogLevelJni : : ERROR_LEVEL ( env ) ; <nl> + case rocksdb : : InfoLogLevel : : FATAL_LEVEL : <nl> + jlog_level = InfoLogLevelJni : : FATAL_LEVEL ( env ) ; <nl> + default : <nl> + jlog_level = InfoLogLevelJni : : FATAL_LEVEL ( env ) ; <nl> + break ; <nl> + } <nl> + / / pass java string to callback handler <nl> + env - > CallVoidMethod ( <nl> + m_jAbstractLogger , <nl> + m_jLogMethodId , <nl> + jlog_level , <nl> + env - > NewStringUTF ( base ) ) ; <nl> + <nl> + if ( base ! = buffer ) { <nl> + delete [ ] base ; <nl> + } <nl> + break ; <nl> + } <nl> + m_jvm - > DetachCurrentThread ( ) ; <nl> + } <nl> + } <nl> + <nl> + LoggerJniCallback : : ~ LoggerJniCallback ( ) { <nl> + JNIEnv * env = getJniEnv ( ) ; <nl> + env - > DeleteGlobalRef ( m_jAbstractLogger ) ; <nl> + m_jvm - > DetachCurrentThread ( ) ; <nl> + } <nl> + <nl> + } / / namespace rocksdb <nl> + <nl> + / * <nl> + * Class : org_rocksdb_AbstractLogger <nl> + * Method : createNewLoggerOptions <nl> + * Signature : ( J ) V <nl> + * / <nl> + void Java_org_rocksdb_AbstractLogger_createNewLoggerOptions ( <nl> + JNIEnv * env , jobject jobj , jlong joptions ) { <nl> + rocksdb : : LoggerJniCallback * c = <nl> + new rocksdb : : LoggerJniCallback ( env , jobj ) ; <nl> + / / set log level <nl> + c - > SetInfoLogLevel ( reinterpret_cast < rocksdb : : Options * > <nl> + ( joptions ) - > info_log_level ) ; <nl> + std : : shared_ptr < rocksdb : : LoggerJniCallback > * pLoggerJniCallback = <nl> + new std : : shared_ptr < rocksdb : : LoggerJniCallback > ; <nl> + * pLoggerJniCallback = std : : shared_ptr < rocksdb : : LoggerJniCallback > ( c ) ; <nl> + rocksdb : : AbstractLoggerJni : : setHandle ( env , jobj , pLoggerJniCallback ) ; <nl> + } <nl> + <nl> + / * <nl> + * Class : org_rocksdb_AbstractLogger <nl> + * Method : createNewLoggerDbOptions <nl> + * Signature : ( J ) V <nl> + * / <nl> + void Java_org_rocksdb_AbstractLogger_createNewLoggerDbOptions ( <nl> + JNIEnv * env , jobject jobj , jlong jdb_options ) { <nl> + rocksdb : : LoggerJniCallback * c = <nl> + new rocksdb : : LoggerJniCallback ( env , jobj ) ; <nl> + / / set log level <nl> + c - > SetInfoLogLevel ( reinterpret_cast < rocksdb : : DBOptions * > <nl> + ( jdb_options ) - > info_log_level ) ; <nl> + std : : shared_ptr < rocksdb : : LoggerJniCallback > * pLoggerJniCallback = <nl> + new std : : shared_ptr < rocksdb : : LoggerJniCallback > ; <nl> + * pLoggerJniCallback = std : : shared_ptr < rocksdb : : LoggerJniCallback > ( c ) ; <nl> + rocksdb : : AbstractLoggerJni : : setHandle ( env , jobj , pLoggerJniCallback ) ; <nl> + } <nl> + <nl> + / * <nl> + * Class : org_rocksdb_AbstractLogger <nl> + * Method : disposeInternal <nl> + * Signature : ( J ) V <nl> + * / <nl> + void Java_org_rocksdb_AbstractLogger_disposeInternal ( <nl> + JNIEnv * env , jobject jobj , jlong jhandle ) { <nl> + std : : shared_ptr < rocksdb : : LoggerJniCallback > * handle = <nl> + reinterpret_cast < std : : shared_ptr < rocksdb : : LoggerJniCallback > * > ( jhandle ) ; <nl> + handle - > reset ( ) ; <nl> + } <nl> new file mode 100644 <nl> index 0000000000 . . 23bf2ba232 <nl> mmm / dev / null <nl> ppp b / java / rocksjni / loggerjnicallback . h <nl> <nl> + / / Copyright ( c ) 2014 , Facebook , Inc . All rights reserved . <nl> + / / This source code is licensed under the BSD - style license found in the <nl> + / / LICENSE file in the root directory of this source tree . An additional grant <nl> + / / of patent rights can be found in the PATENTS file in the same directory . <nl> + / / <nl> + / / This file implements the callback " bridge " between Java and C + + for <nl> + / / rocksdb : : Logger <nl> + <nl> + # ifndef JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_ <nl> + # define JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_ <nl> + <nl> + # include < jni . h > <nl> + # include < string > <nl> + # include " port / port . h " <nl> + # include " rocksdb / env . h " <nl> + <nl> + namespace rocksdb { <nl> + <nl> + class LoggerJniCallback : public Logger { <nl> + public : <nl> + LoggerJniCallback ( JNIEnv * env , jobject jAbstractLogger ) ; <nl> + virtual ~ LoggerJniCallback ( ) ; <nl> + / / Write an entry to the log file with the specified format . <nl> + virtual void Logv ( const char * format , va_list ap ) ; <nl> + / / Write an entry to the log file with the specified log level <nl> + / / and format . Any log with level under the internal log level <nl> + / / of * this ( see @ SetInfoLogLevel and @ GetInfoLogLevel ) will not be <nl> + / / printed . <nl> + virtual void Logv ( const InfoLogLevel log_level , const char * format , va_list ap ) ; <nl> + <nl> + protected : <nl> + JNIEnv * getJniEnv ( ) const ; <nl> + private : <nl> + JavaVM * m_jvm ; <nl> + jobject m_jAbstractLogger ; <nl> + jmethodID m_jLogMethodId ; <nl> + } ; <nl> + } <nl> + <nl> + # endif <nl> mmm a / java / rocksjni / options . cc <nl> ppp b / java / rocksjni / options . cc <nl> void Java_org_rocksdb_Options_setRateLimiter ( <nl> reinterpret_cast < rocksdb : : RateLimiter * > ( jrate_limiter_handle ) ) ; <nl> } <nl> <nl> + / * <nl> + * Class : org_rocksdb_Options <nl> + * Method : setLogger <nl> + * Signature : ( JJ ) V <nl> + * / <nl> + void Java_org_rocksdb_Options_setLogger ( <nl> + JNIEnv * env , jobject jobj , jlong jhandle , jlong jlogger_handle ) { <nl> + std : : shared_ptr < rocksdb : : LoggerJniCallback > * pLogger = <nl> + reinterpret_cast < std : : shared_ptr < rocksdb : : LoggerJniCallback > * > ( <nl> + jlogger_handle ) ; <nl> + reinterpret_cast < rocksdb : : Options * > ( jhandle ) - > info_log = * pLogger ; <nl> + } <nl> + <nl> / * <nl> * Class : org_rocksdb_Options <nl> * Method : setInfoLogLevel <nl> void Java_org_rocksdb_DBOptions_setRateLimiter ( <nl> reinterpret_cast < rocksdb : : RateLimiter * > ( jrate_limiter_handle ) ) ; <nl> } <nl> <nl> + / * <nl> + * Class : org_rocksdb_DBOptions <nl> + * Method : setLogger <nl> + * Signature : ( JJ ) V <nl> + * / <nl> + void Java_org_rocksdb_DBOptions_setLogger ( <nl> + JNIEnv * env , jobject jobj , jlong jhandle , jlong jlogger_handle ) { <nl> + std : : shared_ptr < rocksdb : : LoggerJniCallback > * pLogger = <nl> + reinterpret_cast < std : : shared_ptr < rocksdb : : LoggerJniCallback > * > ( <nl> + jlogger_handle ) ; <nl> + reinterpret_cast < rocksdb : : DBOptions * > ( jhandle ) - > info_log = * pLogger ; <nl> + } <nl> + <nl> / * <nl> * Class : org_rocksdb_DBOptions <nl> * Method : setInfoLogLevel <nl> mmm a / java / rocksjni / portal . h <nl> ppp b / java / rocksjni / portal . h <nl> <nl> # include " rocksdb / utilities / backupable_db . h " <nl> # include " rocksdb / utilities / write_batch_with_index . h " <nl> # include " rocksjni / comparatorjnicallback . h " <nl> + # include " rocksjni / loggerjnicallback . h " <nl> # include " rocksjni / writebatchhandlerjnicallback . h " <nl> <nl> namespace rocksdb { <nl> class WriteEntryJni { <nl> } <nl> } ; <nl> <nl> + class InfoLogLevelJni { <nl> + public : <nl> + <nl> + / / Get the DEBUG_LEVEL enum field of org . rocksdb . InfoLogLevel <nl> + static jobject DEBUG_LEVEL ( JNIEnv * env ) { <nl> + return getEnum ( env , " DEBUG_LEVEL " ) ; <nl> + } <nl> + <nl> + / / Get the INFO_LEVEL enum field of org . rocksdb . InfoLogLevel <nl> + static jobject INFO_LEVEL ( JNIEnv * env ) { <nl> + return getEnum ( env , " INFO_LEVEL " ) ; <nl> + } <nl> + <nl> + / / Get the WARN_LEVEL enum field of org . rocksdb . InfoLogLevel <nl> + static jobject WARN_LEVEL ( JNIEnv * env ) { <nl> + return getEnum ( env , " WARN_LEVEL " ) ; <nl> + } <nl> + <nl> + / / Get the ERROR_LEVEL enum field of org . rocksdb . InfoLogLevel <nl> + static jobject ERROR_LEVEL ( JNIEnv * env ) { <nl> + return getEnum ( env , " ERROR_LEVEL " ) ; <nl> + } <nl> + <nl> + / / Get the FATAL_LEVEL enum field of org . rocksdb . InfoLogLevel <nl> + static jobject FATAL_LEVEL ( JNIEnv * env ) { <nl> + return getEnum ( env , " FATAL_LEVEL " ) ; <nl> + } <nl> + <nl> + private : <nl> + / / Get the java class id of org . rocksdb . WBWIRocksIterator . WriteType . <nl> + static jclass getJClass ( JNIEnv * env ) { <nl> + jclass jclazz = env - > FindClass ( " org / rocksdb / InfoLogLevel " ) ; <nl> + assert ( jclazz ! = nullptr ) ; <nl> + return jclazz ; <nl> + } <nl> + <nl> + / / Get an enum field of org . rocksdb . WBWIRocksIterator . WriteType <nl> + static jobject getEnum ( JNIEnv * env , const char name [ ] ) { <nl> + jclass jclazz = getJClass ( env ) ; <nl> + jfieldID jfid = <nl> + env - > GetStaticFieldID ( jclazz , name , <nl> + " Lorg / rocksdb / InfoLogLevel ; " ) ; <nl> + assert ( jfid ! = nullptr ) ; <nl> + return env - > GetStaticObjectField ( jclazz , jfid ) ; <nl> + } <nl> + } ; <nl> + <nl> + / / The portal class for org . rocksdb . AbstractLogger <nl> + class AbstractLoggerJni : public RocksDBNativeClass < <nl> + std : : shared_ptr < rocksdb : : LoggerJniCallback > * , AbstractLoggerJni > { <nl> + public : <nl> + static jclass getJClass ( JNIEnv * env ) { <nl> + return RocksDBNativeClass : : getJClass ( env , <nl> + " org / rocksdb / AbstractLogger " ) ; <nl> + } <nl> + <nl> + / / Get the java method ` name ` of org . rocksdb . AbstractLogger . <nl> + static jmethodID getLogMethodId ( JNIEnv * env ) { <nl> + static jmethodID mid = env - > GetMethodID ( <nl> + getJClass ( env ) , " log " , <nl> + " ( Lorg / rocksdb / InfoLogLevel ; Ljava / lang / String ; ) V " ) ; <nl> + assert ( mid ! = nullptr ) ; <nl> + return mid ; <nl> + } <nl> + } ; <nl> + <nl> class JniUtil { <nl> public : <nl> / * <nl> new file mode 100644 <nl> index 0000000000 . . e7a40f3243 <nl> mmm / dev / null <nl> ppp b / java / src / main / java / org / rocksdb / AbstractLogger . java <nl> <nl> + package org . rocksdb ; <nl> + <nl> + / * * <nl> + * < p > This class provides a custom logger functionality <nl> + * in Java which wraps { @ code RocksDB } logging facilities . <nl> + * < / p > <nl> + * <nl> + * < p > Using this class RocksDB can log with common <nl> + * Java logging APIs like Log4j or Slf4j without keeping <nl> + * database logs in the filesystem . < / p > <nl> + * / <nl> + public abstract class AbstractLogger extends RocksObject { <nl> + <nl> + / * * <nl> + * < p > AbstractLogger constructor . < / p > <nl> + * <nl> + * < p > < strong > Important : < / strong > the log level set within <nl> + * the { @ link org . rocksdb . Options } instance will be used as <nl> + * maximum log level of RocksDB . < / p > <nl> + * <nl> + * @ param options { @ link org . rocksdb . Options } instance . <nl> + * / <nl> + public AbstractLogger ( Options options ) { <nl> + createNewLoggerOptions ( options . nativeHandle_ ) ; <nl> + } <nl> + <nl> + / * * <nl> + * < p > AbstractLogger constructor . < / p > <nl> + * <nl> + * < p > < strong > Important : < / strong > the log level set within <nl> + * the { @ link org . rocksdb . DBOptions } instance will be used <nl> + * as maximum log level of RocksDB . < / p > <nl> + * <nl> + * @ param dboptions { @ link org . rocksdb . DBOptions } instance . <nl> + * / <nl> + public AbstractLogger ( DBOptions dboptions ) { <nl> + createNewLoggerDbOptions ( dboptions . nativeHandle_ ) ; <nl> + } <nl> + <nl> + protected abstract void log ( InfoLogLevel infoLogLevel , <nl> + String logMsg ) ; <nl> + <nl> + / * * <nl> + * Deletes underlying C + + slice pointer . <nl> + * Note that this function should be called only after all <nl> + * RocksDB instances referencing the slice are closed . <nl> + * Otherwise an undefined behavior will occur . <nl> + * / <nl> + @ Override <nl> + protected void disposeInternal ( ) { <nl> + assert ( isInitialized ( ) ) ; <nl> + disposeInternal ( nativeHandle_ ) ; <nl> + } <nl> + <nl> + protected native void createNewLoggerOptions ( <nl> + long options ) ; <nl> + protected native void createNewLoggerDbOptions ( <nl> + long dbOptions ) ; <nl> + private native void disposeInternal ( long handle ) ; <nl> + } <nl> mmm a / java / src / main / java / org / rocksdb / DBOptions . java <nl> ppp b / java / src / main / java / org / rocksdb / DBOptions . java <nl> public DBOptions setRateLimiterConfig ( <nl> return this ; <nl> } <nl> <nl> + @ Override <nl> + public DBOptions setLogger ( final AbstractLogger logger ) { <nl> + assert ( isInitialized ( ) ) ; <nl> + setLogger ( nativeHandle_ , logger . nativeHandle_ ) ; <nl> + return this ; <nl> + } <nl> + <nl> @ Override <nl> public DBOptions setInfoLogLevel ( <nl> final InfoLogLevel infoLogLevel ) { <nl> private native void setParanoidChecks ( <nl> private native boolean paranoidChecks ( long handle ) ; <nl> private native void setRateLimiter ( long handle , <nl> long rateLimiterHandle ) ; <nl> + private native void setLogger ( long handle , <nl> + long loggerHandle ) ; <nl> private native void setInfoLogLevel ( long handle , byte logLevel ) ; <nl> private native byte infoLogLevel ( long handle ) ; <nl> private native void setMaxOpenFiles ( long handle , int maxOpenFiles ) ; <nl> mmm a / java / src / main / java / org / rocksdb / DBOptionsInterface . java <nl> ppp b / java / src / main / java / org / rocksdb / DBOptionsInterface . java <nl> <nl> * / <nl> Object setRateLimiterConfig ( RateLimiterConfig config ) ; <nl> <nl> + / * * <nl> + * < p > Any internal progress / error information generated by <nl> + * the db will be written to the Logger if it is non - nullptr , <nl> + * or to a file stored in the same directory as the DB <nl> + * contents if info_log is nullptr . < / p > <nl> + * <nl> + * < p > Default : nullptr < / p > <nl> + * <nl> + * @ param logger { @ link AbstractLogger } instance . <nl> + * @ return the instance of the current Object . <nl> + * / <nl> + Object setLogger ( AbstractLogger logger ) ; <nl> + <nl> / * * <nl> * < p > Sets the RocksDB log level . Default level is INFO < / p > <nl> * <nl> mmm a / java / src / main / java / org / rocksdb / Options . java <nl> ppp b / java / src / main / java / org / rocksdb / Options . java <nl> public Options setRateLimiterConfig ( final RateLimiterConfig config ) { <nl> return this ; <nl> } <nl> <nl> + @ Override <nl> + public Options setLogger ( final AbstractLogger logger ) { <nl> + assert ( isInitialized ( ) ) ; <nl> + setLogger ( nativeHandle_ , logger . nativeHandle_ ) ; <nl> + return this ; <nl> + } <nl> + <nl> @ Override <nl> public Options setInfoLogLevel ( final InfoLogLevel infoLogLevel ) { <nl> assert ( isInitialized ( ) ) ; <nl> private native void setParanoidChecks ( <nl> private native boolean paranoidChecks ( long handle ) ; <nl> private native void setRateLimiter ( long handle , <nl> long rateLimiterHandle ) ; <nl> + private native void setLogger ( long handle , <nl> + long loggerHandle ) ; <nl> private native void setInfoLogLevel ( long handle , byte logLevel ) ; <nl> private native byte infoLogLevel ( long handle ) ; <nl> private native void setMaxOpenFiles ( long handle , int maxOpenFiles ) ; <nl> new file mode 100644 <nl> index 0000000000 . . 64062f1a9a <nl> mmm / dev / null <nl> ppp b / java / src / test / java / org / rocksdb / AbstractLoggerTest . java <nl> <nl> + package org . rocksdb ; <nl> + <nl> + import org . junit . ClassRule ; <nl> + import org . junit . Rule ; <nl> + import org . junit . Test ; <nl> + import org . junit . rules . TemporaryFolder ; <nl> + <nl> + import java . util . ArrayList ; <nl> + import java . util . List ; <nl> + import java . util . concurrent . atomic . AtomicInteger ; <nl> + <nl> + import static org . assertj . core . api . Assertions . assertThat ; <nl> + <nl> + public class AbstractLoggerTest { <nl> + @ ClassRule <nl> + public static final RocksMemoryResource rocksMemoryResource = <nl> + new RocksMemoryResource ( ) ; <nl> + <nl> + @ Rule <nl> + public TemporaryFolder dbFolder = new TemporaryFolder ( ) ; <nl> + <nl> + private AtomicInteger logMessageCounter = new AtomicInteger ( ) ; <nl> + <nl> + @ Test <nl> + public void customLogger ( ) throws RocksDBException { <nl> + RocksDB db = null ; <nl> + logMessageCounter . set ( 0 ) ; <nl> + try { <nl> + <nl> + / / Setup options <nl> + final Options options = new Options ( ) . <nl> + setInfoLogLevel ( InfoLogLevel . DEBUG_LEVEL ) . <nl> + setCreateIfMissing ( true ) ; <nl> + <nl> + / / Create new logger with max log level passed by options <nl> + AbstractLogger abstractLogger = new AbstractLogger ( options ) { <nl> + @ Override <nl> + protected void log ( InfoLogLevel infoLogLevel , String logMsg ) { <nl> + assertThat ( logMsg ) . isNotNull ( ) ; <nl> + assertThat ( logMsg . length ( ) ) . isGreaterThan ( 0 ) ; <nl> + logMessageCounter . incrementAndGet ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + / / Set custom logger to options <nl> + options . setLogger ( abstractLogger ) ; <nl> + <nl> + db = RocksDB . open ( options , dbFolder . getRoot ( ) . getAbsolutePath ( ) ) ; <nl> + <nl> + / / there should be more than zero received log messages in <nl> + / / debug level . <nl> + assertThat ( logMessageCounter . get ( ) ) . isGreaterThan ( 0 ) ; <nl> + } finally { <nl> + if ( db ! = null ) { <nl> + db . close ( ) ; <nl> + } <nl> + } <nl> + logMessageCounter . set ( 0 ) ; <nl> + } <nl> + <nl> + <nl> + @ Test <nl> + public void fatalLogger ( ) throws RocksDBException { <nl> + RocksDB db = null ; <nl> + logMessageCounter . set ( 0 ) ; <nl> + <nl> + try { <nl> + / / Setup options <nl> + final Options options = new Options ( ) . <nl> + setInfoLogLevel ( InfoLogLevel . FATAL_LEVEL ) . <nl> + setCreateIfMissing ( true ) ; <nl> + <nl> + / / Create new logger with max log level passed by options <nl> + AbstractLogger abstractLogger = new AbstractLogger ( options ) { <nl> + @ Override <nl> + protected void log ( InfoLogLevel infoLogLevel , String logMsg ) { <nl> + assertThat ( logMsg ) . isNotNull ( ) ; <nl> + assertThat ( logMsg . length ( ) ) . isGreaterThan ( 0 ) ; <nl> + logMessageCounter . incrementAndGet ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + / / Set custom logger to options <nl> + options . setLogger ( abstractLogger ) ; <nl> + <nl> + db = RocksDB . open ( options , dbFolder . getRoot ( ) . getAbsolutePath ( ) ) ; <nl> + <nl> + / / there should be zero messages <nl> + / / using fatal level as log level . <nl> + assertThat ( logMessageCounter . get ( ) ) . isEqualTo ( 0 ) ; <nl> + } finally { <nl> + if ( db ! = null ) { <nl> + db . close ( ) ; <nl> + } <nl> + } <nl> + logMessageCounter . set ( 0 ) ; <nl> + } <nl> + <nl> + @ Test <nl> + public void dbOptionsLogger ( ) throws RocksDBException { <nl> + RocksDB db = null ; <nl> + List < ColumnFamilyHandle > cfHandles = new ArrayList < > ( ) ; <nl> + List < ColumnFamilyDescriptor > cfDescriptors = new ArrayList < > ( ) ; <nl> + cfDescriptors . add ( new ColumnFamilyDescriptor ( RocksDB . DEFAULT_COLUMN_FAMILY ) ) ; <nl> + <nl> + logMessageCounter . set ( 0 ) ; <nl> + try { <nl> + / / Setup options <nl> + final DBOptions options = new DBOptions ( ) . <nl> + setInfoLogLevel ( InfoLogLevel . FATAL_LEVEL ) . <nl> + setCreateIfMissing ( true ) ; <nl> + <nl> + / / Create new logger with max log level passed by options <nl> + AbstractLogger abstractLogger = new AbstractLogger ( options ) { <nl> + @ Override <nl> + protected void log ( InfoLogLevel infoLogLevel , String logMsg ) { <nl> + assertThat ( logMsg ) . isNotNull ( ) ; <nl> + assertThat ( logMsg . length ( ) ) . isGreaterThan ( 0 ) ; <nl> + logMessageCounter . incrementAndGet ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + / / Set custom logger to options <nl> + options . setLogger ( abstractLogger ) ; <nl> + db = RocksDB . open ( options , dbFolder . getRoot ( ) . getAbsolutePath ( ) , <nl> + cfDescriptors , cfHandles ) ; <nl> + / / there should be zero messages <nl> + / / using fatal level as log level . <nl> + assertThat ( logMessageCounter . get ( ) ) . isEqualTo ( 0 ) ; <nl> + logMessageCounter . set ( 0 ) ; <nl> + } finally { <nl> + for ( ColumnFamilyHandle columnFamilyHandle : cfHandles ) { <nl> + columnFamilyHandle . dispose ( ) ; <nl> + } <nl> + if ( db ! = null ) { <nl> + db . close ( ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl>
[ RocksJava ] Logging JNI callback
facebook/rocksdb
a3bd4142f2074ecac4856ece3105092cbe4e9da1
2015-03-14T19:57:18Z
mmm a / src / core / lib / iomgr / resolve_address_uv . c <nl> ppp b / src / core / lib / iomgr / resolve_address_uv . c <nl> static int retry_named_port_failure ( int status , request * r , <nl> int retry_status ; <nl> uv_getaddrinfo_t * req = gpr_malloc ( sizeof ( uv_getaddrinfo_t ) ) ; <nl> req - > data = r ; <nl> - r - > port = svc [ i ] [ 1 ] ; <nl> + r - > port = gpr_strdup ( svc [ i ] [ 1 ] ) ; <nl> retry_status = uv_getaddrinfo ( uv_default_loop ( ) , req , getaddrinfo_cb , <nl> r - > host , r - > port , r - > hints ) ; <nl> if ( retry_status < 0 | | getaddrinfo_cb = = NULL ) { <nl> static void getaddrinfo_callback ( uv_getaddrinfo_t * req , int status , <nl> GRPC_CLOSURE_SCHED ( & exec_ctx , r - > on_done , error ) ; <nl> grpc_exec_ctx_finish ( & exec_ctx ) ; <nl> gpr_free ( r - > hints ) ; <nl> + gpr_free ( r - > host ) ; <nl> + gpr_free ( r - > port ) ; <nl> gpr_free ( r ) ; <nl> uv_freeaddrinfo ( res ) ; <nl> } <nl> mmm a / src / core / lib / iomgr / tcp_client_uv . c <nl> ppp b / src / core / lib / iomgr / tcp_client_uv . c <nl> typedef struct grpc_uv_tcp_connect { <nl> static void uv_tcp_connect_cleanup ( grpc_exec_ctx * exec_ctx , <nl> grpc_uv_tcp_connect * connect ) { <nl> grpc_resource_quota_unref_internal ( exec_ctx , connect - > resource_quota ) ; <nl> + gpr_free ( connect - > addr_name ) ; <nl> gpr_free ( connect ) ; <nl> } <nl> <nl> static void uv_tc_on_connect ( uv_connect_t * req , int status ) { <nl> } <nl> done = ( - - connect - > refs = = 0 ) ; <nl> if ( done ) { <nl> + grpc_exec_ctx_flush ( & exec_ctx ) ; <nl> uv_tcp_connect_cleanup ( & exec_ctx , connect ) ; <nl> } <nl> GRPC_CLOSURE_SCHED ( & exec_ctx , closure , error ) ; <nl> static void tcp_client_connect_impl ( grpc_exec_ctx * exec_ctx , <nl> connect - > resource_quota = resource_quota ; <nl> uv_tcp_init ( uv_default_loop ( ) , connect - > tcp_handle ) ; <nl> connect - > connect_req . data = connect ; <nl> + connect - > refs = 1 ; <nl> <nl> if ( GRPC_TRACER_ON ( grpc_tcp_trace ) ) { <nl> gpr_log ( GPR_DEBUG , " CLIENT_CONNECT : % s : asynchronously connecting " , <nl> mmm a / src / core / lib / iomgr / tcp_server_uv . c <nl> ppp b / src / core / lib / iomgr / tcp_server_uv . c <nl> static void on_connect ( uv_stream_t * server , int status ) { <nl> sp - > server - > on_accept_cb ( & exec_ctx , sp - > server - > on_accept_cb_arg , ep , NULL , <nl> acceptor ) ; <nl> grpc_exec_ctx_finish ( & exec_ctx ) ; <nl> + gpr_free ( peer_name_string ) ; <nl> } <nl> } <nl> <nl> mmm a / src / core / lib / iomgr / tcp_uv . c <nl> ppp b / src / core / lib / iomgr / tcp_uv . c <nl> typedef struct { <nl> static void tcp_free ( grpc_exec_ctx * exec_ctx , grpc_tcp * tcp ) { <nl> grpc_slice_unref_internal ( exec_ctx , tcp - > read_slice ) ; <nl> grpc_resource_user_unref ( exec_ctx , tcp - > resource_user ) ; <nl> + gpr_free ( tcp - > handle ) ; <nl> + gpr_free ( tcp - > peer_string ) ; <nl> gpr_free ( tcp ) ; <nl> } <nl> <nl>
Merge pull request from murgatroid99 / uv_memory_leak_fixes
grpc/grpc
ee145be9fee92fb95e37844504f45c159dc2eea4
2017-07-11T00:21:25Z
mmm a / src / Makefile . am <nl> ppp b / src / Makefile . am <nl> BITCOIN_CORE_H = \ <nl> protocol . h \ <nl> pubkey . h \ <nl> random . h \ <nl> + reverselock . h \ <nl> rpcclient . h \ <nl> rpcprotocol . h \ <nl> rpcserver . h \ <nl> mmm a / src / Makefile . test . include <nl> ppp b / src / Makefile . test . include <nl> BITCOIN_TESTS = \ <nl> test / pmt_tests . cpp \ <nl> test / policyestimator_tests . cpp \ <nl> test / pow_tests . cpp \ <nl> + test / reverselock_tests . cpp \ <nl> test / rpc_tests . cpp \ <nl> test / sanity_tests . cpp \ <nl> test / scheduler_tests . cpp \ <nl> new file mode 100644 <nl> index 000000000000 . . 567636e16afa <nl> mmm / dev / null <nl> ppp b / src / reverselock . h <nl> <nl> + / / Copyright ( c ) 2015 The Bitcoin Core developers <nl> + / / Distributed under the MIT software license , see the accompanying <nl> + / / file COPYING or http : / / www . opensource . org / licenses / mit - license . php . <nl> + <nl> + # ifndef BITCOIN_REVERSELOCK_H <nl> + # define BITCOIN_REVERSELOCK_H <nl> + <nl> + / * * <nl> + * An RAII - style reverse lock . Unlocks on construction and locks on destruction . <nl> + * / <nl> + template < typename Lock > <nl> + class reverse_lock <nl> + { <nl> + public : <nl> + <nl> + explicit reverse_lock ( Lock & lock ) : lock ( lock ) { <nl> + lock . unlock ( ) ; <nl> + } <nl> + <nl> + ~ reverse_lock ( ) { <nl> + lock . lock ( ) ; <nl> + } <nl> + <nl> + private : <nl> + reverse_lock ( reverse_lock const & ) ; <nl> + reverse_lock & operator = ( reverse_lock const & ) ; <nl> + <nl> + Lock & lock ; <nl> + } ; <nl> + <nl> + # endif / / BITCOIN_REVERSELOCK_H <nl> mmm a / src / scheduler . cpp <nl> ppp b / src / scheduler . cpp <nl> <nl> <nl> # include " scheduler . h " <nl> <nl> + # include " reverselock . h " <nl> + <nl> # include < assert . h > <nl> # include < boost / bind . hpp > <nl> - # include < boost / thread / reverse_lock . hpp > <nl> # include < utility > <nl> <nl> CScheduler : : CScheduler ( ) : nThreadsServicingQueue ( 0 ) , stopRequested ( false ) , stopWhenEmpty ( false ) <nl> void CScheduler : : serviceQueue ( ) <nl> { <nl> / / Unlock before calling f , so it can reschedule itself or another task <nl> / / without deadlocking : <nl> - boost : : reverse_lock < boost : : unique_lock < boost : : mutex > > rlock ( lock ) ; <nl> + reverse_lock < boost : : unique_lock < boost : : mutex > > rlock ( lock ) ; <nl> f ( ) ; <nl> } <nl> } catch ( . . . ) { <nl> new file mode 100644 <nl> index 000000000000 . . e7e627ae0f24 <nl> mmm / dev / null <nl> ppp b / src / test / reverselock_tests . cpp <nl> <nl> + / / Copyright ( c ) 2015 The Bitcoin Core developers <nl> + / / Distributed under the MIT software license , see the accompanying <nl> + / / file COPYING or http : / / www . opensource . org / licenses / mit - license . php . <nl> + <nl> + # include " reverselock . h " <nl> + # include " test / test_bitcoin . h " <nl> + <nl> + # include < boost / test / unit_test . hpp > <nl> + <nl> + BOOST_FIXTURE_TEST_SUITE ( reverselock_tests , BasicTestingSetup ) <nl> + <nl> + BOOST_AUTO_TEST_CASE ( reverselock_basics ) <nl> + { <nl> + boost : : mutex mutex ; <nl> + boost : : unique_lock < boost : : mutex > lock ( mutex ) ; <nl> + <nl> + BOOST_CHECK ( lock . owns_lock ( ) ) ; <nl> + { <nl> + reverse_lock < boost : : unique_lock < boost : : mutex > > rlock ( lock ) ; <nl> + BOOST_CHECK ( ! lock . owns_lock ( ) ) ; <nl> + } <nl> + BOOST_CHECK ( lock . owns_lock ( ) ) ; <nl> + } <nl> + <nl> + BOOST_AUTO_TEST_CASE ( reverselock_errors ) <nl> + { <nl> + boost : : mutex mutex ; <nl> + boost : : unique_lock < boost : : mutex > lock ( mutex ) ; <nl> + <nl> + / / Make sure trying to reverse lock an unlocked lock fails <nl> + lock . unlock ( ) ; <nl> + <nl> + BOOST_CHECK ( ! lock . owns_lock ( ) ) ; <nl> + <nl> + bool failed = false ; <nl> + try { <nl> + reverse_lock < boost : : unique_lock < boost : : mutex > > rlock ( lock ) ; <nl> + } catch ( . . . ) { <nl> + failed = true ; <nl> + } <nl> + <nl> + BOOST_CHECK ( failed ) ; <nl> + BOOST_CHECK ( ! lock . owns_lock ( ) ) ; <nl> + <nl> + / / Make sure trying to lock a lock after it has been reverse locked fails <nl> + failed = false ; <nl> + bool locked = false ; <nl> + <nl> + lock . lock ( ) ; <nl> + BOOST_CHECK ( lock . owns_lock ( ) ) ; <nl> + <nl> + try { <nl> + reverse_lock < boost : : unique_lock < boost : : mutex > > rlock ( lock ) ; <nl> + lock . lock ( ) ; <nl> + locked = true ; <nl> + } catch ( . . . ) { <nl> + failed = true ; <nl> + } <nl> + <nl> + BOOST_CHECK ( locked & & failed ) ; <nl> + BOOST_CHECK ( lock . owns_lock ( ) ) ; <nl> + } <nl> + <nl> + BOOST_AUTO_TEST_SUITE_END ( ) <nl>
Replace boost : : reverse_lock with our own .
bitcoin/bitcoin
86270c816411680c33a60adfa768c7a647fce08f
2015-09-03T19:13:40Z
mmm a / src / gfx / rect . h <nl> ppp b / src / gfx / rect . h <nl> class RectT <nl> return * this ; <nl> } <nl> <nl> + const RectT & operator | = ( const RectT & rc ) { <nl> + operator = ( createUnion ( rc ) ) ; <nl> + return * this ; <nl> + } <nl> + <nl> + const RectT & operator & = ( const RectT & rc ) { <nl> + operator = ( createIntersect ( rc ) ) ; <nl> + return * this ; <nl> + } <nl> + <nl> RectT operator + ( const BorderT < T > & br ) const { <nl> return RectT ( * this ) . enlarge ( br ) ; <nl> } <nl> class RectT <nl> return RectT ( * this ) . shrink ( br ) ; <nl> } <nl> <nl> + RectT operator | ( const RectT & other ) const { <nl> + return createUnion ( other ) ; <nl> + } <nl> + <nl> + RectT operator & ( const RectT & other ) const { <nl> + return createIntersect ( other ) ; <nl> + } <nl> + <nl> bool operator = = ( const RectT & rc ) const { <nl> return <nl> x = = rc . x & & w = = rc . w & & <nl>
Add operator | and operator & to gfx : : Rect
aseprite/aseprite
62b9f30f929140bac138e7545950987b4d6035b5
2015-03-19T13:10:29Z
mmm a / tensorflow / contrib / reduce_slice_ops / kernels / reduce_slice_ops . cc <nl> ppp b / tensorflow / contrib / reduce_slice_ops / kernels / reduce_slice_ops . cc <nl> using thread : : ThreadPool ; <nl> <nl> namespace functor { <nl> <nl> + # define Sum ( a , b ) ( ( a ) + ( b ) ) <nl> + # define Prod ( a , b ) ( ( a ) * ( b ) ) <nl> + # define Max ( a , b ) ( ( a ) > ( b ) ? ( a ) : ( b ) ) <nl> + # define Min ( a , b ) ( ( a ) < ( b ) ? ( a ) : ( b ) ) <nl> + <nl> # define CPUReduceSliceFunctorReduceop ( reduceop , beginning ) \ <nl> template < typename T , typename Index > \ <nl> struct ReduceSliceFunctor # # reduceop < CPUDevice , T , Index > { \ <nl> TF_CALL_REAL_NUMBER_TYPES ( REGISTER_GPU_REDUCE_SLICE_KERNELS_ALL ) ; <nl> # undef REGISTER_GPU_REDUCE_SLICE_KERNELS <nl> # undef REGISTER_GPU_REDUCE_SLICE_KERNELS_ALL <nl> <nl> + # undef Sum <nl> + # undef Prod <nl> + # undef Min <nl> + # undef Max <nl> + <nl> # endif / / GOOGLE_CUDA <nl> <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / contrib / reduce_slice_ops / kernels / reduce_slice_ops . h <nl> ppp b / tensorflow / contrib / reduce_slice_ops / kernels / reduce_slice_ops . h <nl> limitations under the License . <nl> # include " tensorflow / core / framework / tensor_shape . h " <nl> # include " tensorflow / core / framework / tensor_types . h " <nl> <nl> - # define Sum ( a , b ) ( ( a ) + ( b ) ) <nl> - # define Prod ( a , b ) ( ( a ) * ( b ) ) <nl> - # define Max ( a , b ) ( ( a ) > ( b ) ? ( a ) : ( b ) ) <nl> - # define Min ( a , b ) ( ( a ) < ( b ) ? ( a ) : ( b ) ) <nl> - <nl> namespace tensorflow { <nl> <nl> class OpKernelContext ; <nl> mmm a / tensorflow / contrib / reduce_slice_ops / kernels / reduce_slice_ops_gpu . cu . cc <nl> ppp b / tensorflow / contrib / reduce_slice_ops / kernels / reduce_slice_ops_gpu . cu . cc <nl> using GPUDevice = Eigen : : GpuDevice ; <nl> <nl> namespace functor { <nl> <nl> + # define Sum ( a , b ) ( ( a ) + ( b ) ) <nl> + # define Prod ( a , b ) ( ( a ) * ( b ) ) <nl> + # define Max ( a , b ) ( ( a ) > ( b ) ? ( a ) : ( b ) ) <nl> + # define Min ( a , b ) ( ( a ) < ( b ) ? ( a ) : ( b ) ) <nl> + <nl> # define GPUReduceSliceFunctorReduceop ( reduceop , beginning ) \ <nl> template < typename T , typename Index > \ <nl> __global__ void ReduceSliceDeviceKernel # # reduceop ( \ <nl> TF_CALL_REAL_NUMBER_TYPES ( DEFINE_GPU_SPECS ) <nl> # undef DEFINE_GPU_REDUCEOP_SPECS_INDEX <nl> # undef DEFINE_GPU_SPECS <nl> <nl> + # undef Sum <nl> + # undef Prod <nl> + # undef Min <nl> + # undef Max <nl> + <nl> } / / namespace functor <nl> } / / namespace tensorflow <nl> <nl>
Move macros to cc from headers . This will allow us to undef the macros .
tensorflow/tensorflow
eddd87f0e3b7ae67a4476436b77cf311e0d448d5
2018-10-11T22:52:52Z
mmm a / test / test_nn . py <nl> ppp b / test / test_nn . py <nl> def smoothl1loss_no_reduce_test ( ) : <nl> def multilabelmarginloss_1d_no_reduce_test ( ) : <nl> t = Variable ( torch . rand ( 10 ) . mul ( 10 ) . floor ( ) . long ( ) ) <nl> return dict ( <nl> - fullname = ' MultiLabelMarginLoss_no_reduce ' , <nl> + fullname = ' MultiLabelMarginLoss_1d_no_reduce ' , <nl> constructor = wrap_functional ( <nl> lambda i : F . multilabel_margin_loss ( i , t . type_as ( i ) . long ( ) , reduce = False ) ) , <nl> input_fn = lambda : torch . randn ( 10 ) , <nl> def multilabelmarginloss_index_neg_test ( ) : <nl> def multilabelmarginloss_no_reduce_test ( ) : <nl> t = Variable ( torch . rand ( 5 , 10 ) . mul ( 10 ) . floor ( ) . long ( ) ) <nl> return dict ( <nl> - fullname = ' MultiLabelMarginLoss_1d_no_reduce ' , <nl> + fullname = ' MultiLabelMarginLoss_no_reduce ' , <nl> constructor = wrap_functional ( <nl> lambda i : F . multilabel_margin_loss ( i , t . type_as ( i ) . long ( ) , reduce = False ) ) , <nl> input_fn = lambda : torch . randn ( 5 , 10 ) , <nl>
fix MultiLabelMarginLoss test names ( )
pytorch/pytorch
e75b434ca27f261d63c5306ecd986aa2f868272f
2018-02-07T10:28:36Z
mmm a / requirements . txt <nl> ppp b / requirements . txt <nl> coverage > = 4 . 0 <nl> cython > = 0 . 23 <nl> enum34 > = 1 . 0 . 4 <nl> futures > = 2 . 2 . 0 <nl> - protobuf > = 3 . 0 . 0 <nl> + protobuf > = 3 . 2 . 0 <nl> six > = 1 . 10 <nl> wheel > = 0 . 29 <nl>
Merge pull request from mehrdada / server - reflection - for - grpc - python
grpc/grpc
c35a5b0e74b036acaf9129118fe449c72bb5f696
2017-03-08T21:31:06Z
mmm a / tensorflow / compiler / xla / client / xla_builder . cc <nl> ppp b / tensorflow / compiler / xla / client / xla_builder . cc <nl> Status XlaBuilder : : SetDynamicBinding ( int64 dynamic_size_param_num , <nl> for ( int64 index : target_param_index ) { <nl> param_shape_ptr = param_shape_ptr - > mutable_tuple_shapes ( index ) ; <nl> } <nl> - param_shape_ptr - > set_dynamic_dimension ( target_dim_num , <nl> - / * is_dynamic = * / true ) ; <nl> + / / TODO ( b / 121223198 ) : Set ` is_dynamic ` to the parameter shape when XLA <nl> + / / backend can handle dynamic dimensions . <nl> * instr . mutable_shape ( ) = param_shape . ToProto ( ) ; <nl> } <nl> } <nl> mmm a / tensorflow / compiler / xla / client / xla_builder_test . cc <nl> ppp b / tensorflow / compiler / xla / client / xla_builder_test . cc <nl> TEST_F ( XlaBuilderTest , ProtoMatches ) { <nl> TEST_F ( XlaBuilderTest , DynamicParameter ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 5 } ) , ShapeUtil : : MakeShape ( F32 , { 6 } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( F32 , { 5 } ) , ShapeUtil : : MakeShape ( F32 , { 6 } , { true } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> Parameter ( & b , 1 , ShapeUtil : : MakeShape ( U32 , { } ) , " p1 " ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 1 , <nl> TEST_F ( XlaBuilderTest , DynamicParameter ) { <nl> TEST_F ( XlaBuilderTest , DynamicUnary ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 5 } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( F32 , { 5 } , { true } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 0 , <nl> / * dynamic_size_param_index = * / { 1 } , <nl> TEST_F ( XlaBuilderTest , DynamicUnary ) { <nl> TEST_F ( XlaBuilderTest , DynamicBinary ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 5 } ) , ShapeUtil : : MakeShape ( F32 , { 5 } ) , <nl> - ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( F32 , { 5 } , { true } ) , <nl> + ShapeUtil : : MakeShape ( F32 , { 5 } , { true } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 0 , <nl> / * dynamic_size_param_index = * / { 2 } , <nl> TEST_F ( XlaBuilderTest , DynamicBinary ) { <nl> TEST_F ( XlaBuilderTest , DynamicBinaryHasBroadcast ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 5 , 4 } ) , ShapeUtil : : MakeShape ( F32 , { 5 } ) , <nl> - ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( F32 , { 5 , 4 } , { true , false } ) , <nl> + ShapeUtil : : MakeShape ( F32 , { 5 } , { true } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 0 , <nl> / * dynamic_size_param_index = * / { 2 } , <nl> TEST_F ( XlaBuilderTest , DynamicBinaryHasBroadcast ) { <nl> TEST_F ( XlaBuilderTest , DynamicBroadcast ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 5 , 4 } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( F32 , { 5 , 4 } , { true , false } ) , <nl> + ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 0 , <nl> / * dynamic_size_param_index = * / { 1 } , <nl> TEST_F ( XlaBuilderTest , DynamicBroadcast ) { <nl> TEST_F ( XlaBuilderTest , DynamicBinaryHasDegenerateBroadcast ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 10 } ) , ShapeUtil : : MakeShape ( F32 , { 1 , 15 } ) , <nl> - ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( F32 , { 10 } , { true } ) , <nl> + ShapeUtil : : MakeShape ( F32 , { 1 , 15 } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 0 , <nl> / * dynamic_size_param_index = * / { 1 } , <nl> TEST_F ( XlaBuilderTest , DynamicBinaryHasDegenerateBroadcast ) { <nl> TEST_F ( XlaBuilderTest , DynamicSelectOnlyPredDynamic ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( PRED , { 10 } ) , ShapeUtil : : MakeShape ( F32 , { 10 } ) , <nl> - ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( PRED , { 10 } , { true } ) , <nl> + ShapeUtil : : MakeShape ( F32 , { 10 } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 0 , <nl> / * dynamic_size_param_index = * / { 1 } , <nl> TEST_F ( XlaBuilderTest , DynamicSelectOnlyPredDynamic ) { <nl> TEST_F ( XlaBuilderTest , DynamicPad ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 5 , 4 } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( F32 , { 5 , 4 } , { true , false } ) , <nl> + ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> auto pad_val = ConstantR0 < float > ( & b , - 1 ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 0 , <nl> TEST_F ( XlaBuilderTest , DynamicPad ) { <nl> TEST_F ( XlaBuilderTest , DynamicConvolution ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 1 , 2 , 2 , 128 } ) , <nl> - ShapeUtil : : MakeShape ( F32 , { 2 , 2 , 128 , 8 } ) , ShapeUtil : : MakeShape ( U32 , { } ) , <nl> - ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( F32 , { 1 , 2 , 2 , 128 } , { true , false , false , false } ) , <nl> + ShapeUtil : : MakeShape ( F32 , { 2 , 2 , 128 , 8 } , { false , false , true , false } ) , <nl> + ShapeUtil : : MakeShape ( U32 , { } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 0 , <nl> / * dynamic_size_param_index = * / { 2 } , <nl> TEST_F ( XlaBuilderTest , DynamicConvolution ) { <nl> TEST_F ( XlaBuilderTest , DynamicDot ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 2 , 3 , 4 } ) , <nl> - ShapeUtil : : MakeShape ( F32 , { 2 , 4 , 5 } ) , ShapeUtil : : MakeShape ( U32 , { } ) , <nl> - ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( F32 , { 2 , 3 , 4 } , { true , true , false } ) , <nl> + ShapeUtil : : MakeShape ( F32 , { 2 , 4 , 5 } , { true , false , false } ) , <nl> + ShapeUtil : : MakeShape ( U32 , { } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 0 , <nl> / * dynamic_size_param_index = * / { 2 } , <nl> TEST_F ( XlaBuilderTest , DynamicDot ) { <nl> TEST_F ( XlaBuilderTest , DynamicReduce ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 5 , 4 , 3 } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( F32 , { 5 , 4 , 3 } , { false , true , false } ) , <nl> + ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> auto init = ConstantR0 < float > ( & b , 0 ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 0 , <nl> TEST_F ( XlaBuilderTest , DynamicReduce ) { <nl> TEST_F ( XlaBuilderTest , DynamicReduceWindow ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 2 , 4 , 8 } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( F32 , { 2 , 4 , 8 } , { true , false , false } ) , <nl> + ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> auto init = ConstantR0 < float > ( & b , 0 . f ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 0 , <nl> TEST_F ( XlaBuilderTest , DynamicReduceWindow ) { <nl> TEST_F ( XlaBuilderTest , DynamicSelectAndScatter ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 2 , 4 , 8 } ) , <nl> - ShapeUtil : : MakeShape ( F32 , { 2 , 2 , 2 } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( F32 , { 2 , 4 , 8 } , { true , false , false } ) , <nl> + ShapeUtil : : MakeShape ( F32 , { 2 , 2 , 2 } , { true , false , false } ) , <nl> + ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> auto init = ConstantR0 < float > ( & b , 0 . f ) ; <nl> XlaBuilder bsum ( TestName ( ) ) ; <nl> TEST_F ( XlaBuilderTest , DynamicSelectAndScatter ) { <nl> TEST_F ( XlaBuilderTest , DynamicReshape ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 2 , 3 , 4 , 5 , 6 } ) , <nl> + { ShapeUtil : : MakeShape ( F32 , { 2 , 3 , 4 , 5 , 6 } , <nl> + { false , false , true , true , false , false } ) , <nl> ShapeUtil : : MakeShape ( U32 , { } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 0 , <nl> TEST_F ( XlaBuilderTest , DynamicReshape ) { <nl> TEST_F ( XlaBuilderTest , DynamicSelect ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 4 , 5 , 6 } ) , <nl> - ShapeUtil : : MakeShape ( F32 , { 4 , 5 , 6 } ) , ShapeUtil : : MakeShape ( U32 , { } ) , <nl> - ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( F32 , { 4 , 5 , 6 } , { false , true , false } ) , <nl> + ShapeUtil : : MakeShape ( F32 , { 4 , 5 , 6 } , { false , true , false } ) , <nl> + ShapeUtil : : MakeShape ( U32 , { } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> auto pred = Parameter ( & b , 1 , ShapeUtil : : MakeShape ( PRED , { } ) , " pred " ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 0 , <nl> TEST_F ( XlaBuilderTest , DynamicSelect ) { <nl> TEST_F ( XlaBuilderTest , DynamicSelectNotCompatible ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 4 , 5 , 6 } ) , <nl> - ShapeUtil : : MakeShape ( F32 , { 4 , 5 , 6 } ) , ShapeUtil : : MakeShape ( U32 , { } ) , <nl> - ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( F32 , { 4 , 5 , 6 } , { false , true , false } ) , <nl> + ShapeUtil : : MakeShape ( F32 , { 4 , 5 , 6 } , { false , false , true } ) , <nl> + ShapeUtil : : MakeShape ( U32 , { } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> auto pred = Parameter ( & b , 1 , ShapeUtil : : MakeShape ( PRED , { } ) , " pred " ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 0 , <nl> TEST_F ( XlaBuilderTest , DynamicSelectNotCompatible ) { <nl> TEST_F ( XlaBuilderTest , DynamicTranspose ) { <nl> XlaBuilder b ( TestName ( ) ) ; <nl> Shape tuple_param_shape = ShapeUtil : : MakeTupleShape ( <nl> - { ShapeUtil : : MakeShape ( F32 , { 3 , 5 } ) , ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> + { ShapeUtil : : MakeShape ( F32 , { 3 , 5 } , { true , false } ) , <nl> + ShapeUtil : : MakeShape ( U32 , { } ) } ) ; <nl> auto p0 = Parameter ( & b , 0 , tuple_param_shape , " p0 " ) ; <nl> ASSERT_IS_OK ( b . SetDynamicBinding ( / * dynamic_size_param_num = * / 0 , <nl> / * dynamic_size_param_index = * / { 1 } , <nl> mmm a / tensorflow / compiler / xla / service / dynamic_dimension_inference . cc <nl> ppp b / tensorflow / compiler / xla / service / dynamic_dimension_inference . cc <nl> class DynamicDimensionInferenceVisitor : public DfsHloVisitorWithDefault { <nl> <nl> Status HandleWhile ( HloInstruction * hlo ) override ; <nl> <nl> + Status HandleSlice ( HloInstruction * hlo ) override ; <nl> + <nl> private : <nl> using OperandDynamicDimensionFn = std : : function < Status ( <nl> HloInstruction * operand , ShapeIndex index , int64 dimension , <nl> Status DynamicDimensionInferenceVisitor : : HandleBroadcast ( HloInstruction * hlo ) { <nl> hlo , [ & ] ( HloInstruction * operand , ShapeIndex index , int64 dimension , <nl> int64 operand_index , HloInstruction * dynamic_size ) { <nl> int64 broadcast_dim = hlo - > dimensions ( dimension ) ; <nl> - parent_ - > SetDynamicSize ( hlo , index , broadcast_dim , dynamic_size ) ; <nl> + parent_ - > SetDynamicSize ( hlo , { } , broadcast_dim , dynamic_size ) ; <nl> return Status : : OK ( ) ; <nl> } ) ; <nl> } <nl> Status DynamicDimensionInferenceVisitor : : HandleSelectAndScatter ( <nl> } ) ; <nl> } <nl> <nl> + Status DynamicDimensionInferenceVisitor : : HandleSlice ( HloInstruction * hlo ) { <nl> + return ForEachOperandDynamicDimension ( <nl> + hlo , [ & ] ( HloInstruction * operand , ShapeIndex / * index * / , int64 dimension , <nl> + int64 / * operand_index * / , HloInstruction * dynamic_size ) { <nl> + if ( hlo - > slice_starts ( dimension ) ! = 0 | | <nl> + hlo - > slice_strides ( dimension ) ! = 1 | | <nl> + hlo - > slice_limits ( dimension ) ! = <nl> + operand - > shape ( ) . dimensions ( dimension ) ) { <nl> + return Unimplemented ( <nl> + " Dynamic dimension propagation on Slice where it doesn ' t slice " <nl> + " out an entire dimension is not supported % s " , <nl> + hlo - > ToString ( ) ) ; <nl> + } <nl> + <nl> + parent_ - > SetDynamicSize ( hlo , { } , dimension , dynamic_size ) ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } ) ; <nl> + } <nl> + <nl> Status DynamicDimensionInferenceVisitor : : HandleWhile ( HloInstruction * hlo ) { <nl> / / While loop is handled by passing dynamic size hlos as parameters into the <nl> / / hlo while loop . This is done by replacing the original while with a new <nl> mmm a / tensorflow / compiler / xla / service / dynamic_dimension_inference_test . cc <nl> ppp b / tensorflow / compiler / xla / service / dynamic_dimension_inference_test . cc <nl> TEST_F ( DynamicDimensionInferenceTest , SelectAndScatterTest ) { <nl> EXPECT_EQ ( inference_ - > GetDynamicSize ( sns , { } , 0 ) , size_param ) ; <nl> } <nl> <nl> + TEST_F ( DynamicDimensionInferenceTest , SliceTest ) { <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + auto data_param = builder . AddInstruction ( HloInstruction : : CreateParameter ( <nl> + 0 , ShapeUtil : : MakeShape ( F32 , { 5 , 7 } ) , " data_param " ) ) ; <nl> + auto size_param = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , scalar_shape_ , " size_param " ) ) ; <nl> + <nl> + auto * slice = builder . AddInstruction ( HloInstruction : : CreateSlice ( <nl> + ShapeUtil : : MakeShape ( F32 , { 5 , 7 } ) , data_param , / * start_indices = * / { 0 , 0 } , <nl> + / * limit_indices = * / { 5 , 7 } , / * strides = * / { 1 , 1 } ) ) ; <nl> + <nl> + module_ - > AddEntryComputation ( builder . Build ( ) ) ; <nl> + / / Set up dynamic parameter binding . <nl> + TF_CHECK_OK ( module_ - > dynamic_parameter_binding ( ) . Bind ( <nl> + DynamicParameterBinding : : DynamicParameter { 1 , { } } , <nl> + DynamicParameterBinding : : DynamicDimension { 0 , { } , 1 } ) ) ; <nl> + <nl> + TF_ASSERT_OK ( RunInference ( ) ) ; <nl> + EXPECT_EQ ( inference_ - > GetDynamicSize ( slice , { } , 1 ) , size_param ) ; <nl> + } <nl> + <nl> } / / namespace <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / service / dynamic_padder . cc <nl> ppp b / tensorflow / compiler / xla / service / dynamic_padder . cc <nl> StatusOr < HloInstruction * > ChooseIdentityValue ( HloInstruction * inst ) { <nl> case HloOpcode : : kPad : { <nl> return inst - > mutable_operand ( 1 ) ; <nl> } <nl> + <nl> + case HloOpcode : : kSelectAndScatter : { <nl> + return inst - > mutable_operand ( 2 ) ; <nl> + } <nl> case HloOpcode : : kParameter : <nl> case HloOpcode : : kGetDimensionSize : <nl> case HloOpcode : : kReshape : <nl> case HloOpcode : : kTuple : <nl> case HloOpcode : : kAllReduce : <nl> case HloOpcode : : kBroadcast : <nl> + case HloOpcode : : kTranspose : <nl> + case HloOpcode : : kSlice : <nl> return nullptr ; <nl> default : <nl> return UnimplementedStrCat ( " Unimplimented padding for instruction : " , <nl>
[ XLA ] Support Slice in dynamic dimension inference and handle padding values for SelectAndScatter .
tensorflow/tensorflow
48f2aa607a37e808f89b8b741f15ea98853dacb3
2019-03-01T01:34:05Z
mmm a / src / init . cpp <nl> ppp b / src / init . cpp <nl> std : : string HelpMessage ( HelpMessageMode mode ) <nl> strUsage + = " \ n " + _ ( " Debugging / Testing options : " ) + " \ n " ; <nl> if ( GetBoolArg ( " - help - debug " , false ) ) <nl> { <nl> - strUsage + = " - benchmark " + _ ( " Show benchmark information ( default : 0 ) " ) + " \ n " ; <nl> strUsage + = " - checkpoints " + _ ( " Only accept block chain matching built - in checkpoints ( default : 1 ) " ) + " \ n " ; <nl> strUsage + = " - dblogsize = < n > " + _ ( " Flush database activity from memory pool to disk log every < n > megabytes ( default : 100 ) " ) + " \ n " ; <nl> strUsage + = " - disablesafemode " + _ ( " Disable safemode , override a real safe mode event ( default : 0 ) " ) + " \ n " ; <nl> std : : string HelpMessage ( HelpMessageMode mode ) <nl> strUsage + = " - debug = < category > " + _ ( " Output debugging information ( default : 0 , supplying < category > is optional ) " ) + " \ n " ; <nl> strUsage + = " " + _ ( " If < category > is not supplied , output all debugging information . " ) + " \ n " ; <nl> strUsage + = " " + _ ( " < category > can be : " ) ; <nl> - strUsage + = " addrman , alert , coindb , db , lock , rand , rpc , selectcoins , mempool , net " ; / / Don ' t translate these and qt below <nl> + strUsage + = " addrman , alert , bench , coindb , db , lock , rand , rpc , selectcoins , mempool , net " ; / / Don ' t translate these and qt below <nl> if ( mode = = HMM_BITCOIN_QT ) <nl> strUsage + = " , qt " ; <nl> strUsage + = " . \ n " ; <nl> bool AppInit2 ( boost : : thread_group & threadGroup ) <nl> if ( GetBoolArg ( " - tor " , false ) ) <nl> return InitError ( _ ( " Error : Unsupported argument - tor found , use - onion . " ) ) ; <nl> <nl> - fBenchmark = GetBoolArg ( " - benchmark " , false ) ; <nl> + if ( GetBoolArg ( " - benchmark " , false ) ) <nl> + InitWarning ( _ ( " Warning : Unsupported argument - benchmark ignored , use - debug = bench . " ) ) ; <nl> + <nl> / / Checkmempool defaults to true in regtest mode <nl> mempool . setSanityCheck ( GetBoolArg ( " - checkmempool " , Params ( ) . DefaultCheckMemPool ( ) ) ) ; <nl> Checkpoints : : fEnabled = GetBoolArg ( " - checkpoints " , true ) ; <nl> mmm a / src / main . cpp <nl> ppp b / src / main . cpp <nl> CConditionVariable cvBlockChange ; <nl> int nScriptCheckThreads = 0 ; <nl> bool fImporting = false ; <nl> bool fReindex = false ; <nl> - bool fBenchmark = false ; <nl> bool fTxIndex = false ; <nl> bool fIsBareMultisigStd = true ; <nl> unsigned int nCoinCacheSize = 5000 ; <nl> void ThreadScriptCheck ( ) { <nl> scriptcheckqueue . Thread ( ) ; <nl> } <nl> <nl> + static int64_t nTimeVerify = 0 ; <nl> + static int64_t nTimeConnect = 0 ; <nl> + static int64_t nTimeIndex = 0 ; <nl> + static int64_t nTimeCallbacks = 0 ; <nl> + static int64_t nTimeTotal = 0 ; <nl> + <nl> bool ConnectBlock ( CBlock & block , CValidationState & state , CBlockIndex * pindex , CCoinsViewCache & view , bool fJustCheck ) <nl> { <nl> AssertLockHeld ( cs_main ) ; <nl> bool ConnectBlock ( CBlock & block , CValidationState & state , CBlockIndex * pindex , C <nl> <nl> CCheckQueueControl < CScriptCheck > control ( fScriptChecks & & nScriptCheckThreads ? & scriptcheckqueue : NULL ) ; <nl> <nl> - int64_t nStart = GetTimeMicros ( ) ; <nl> + int64_t nTimeStart = GetTimeMicros ( ) ; <nl> int64_t nFees = 0 ; <nl> int nInputs = 0 ; <nl> unsigned int nSigOps = 0 ; <nl> bool ConnectBlock ( CBlock & block , CValidationState & state , CBlockIndex * pindex , C <nl> vPos . push_back ( std : : make_pair ( tx . GetHash ( ) , pos ) ) ; <nl> pos . nTxOffset + = : : GetSerializeSize ( tx , SER_DISK , CLIENT_VERSION ) ; <nl> } <nl> - int64_t nTime = GetTimeMicros ( ) - nStart ; <nl> - if ( fBenchmark ) <nl> - LogPrintf ( " - Connect % u transactions : % . 2fms ( % . 3fms / tx , % . 3fms / txin ) \ n " , ( unsigned ) block . vtx . size ( ) , 0 . 001 * nTime , 0 . 001 * nTime / block . vtx . size ( ) , nInputs < = 1 ? 0 : 0 . 001 * nTime / ( nInputs - 1 ) ) ; <nl> + int64_t nTime1 = GetTimeMicros ( ) ; nTimeConnect + = nTime1 - nTimeStart ; <nl> + LogPrint ( " bench " , " - Connect % u transactions : % . 2fms ( % . 3fms / tx , % . 3fms / txin ) [ % . 2fs ] \ n " , ( unsigned ) block . vtx . size ( ) , 0 . 001 * ( nTime1 - nTimeStart ) , 0 . 001 * ( nTime1 - nTimeStart ) / block . vtx . size ( ) , nInputs < = 1 ? 0 : 0 . 001 * ( nTime1 - nTimeStart ) / ( nInputs - 1 ) , nTimeConnect * 0 . 000001 ) ; <nl> <nl> if ( block . vtx [ 0 ] . GetValueOut ( ) > GetBlockValue ( pindex - > nHeight , nFees ) ) <nl> return state . DoS ( 100 , <nl> bool ConnectBlock ( CBlock & block , CValidationState & state , CBlockIndex * pindex , C <nl> <nl> if ( ! control . Wait ( ) ) <nl> return state . DoS ( 100 , false ) ; <nl> - int64_t nTime2 = GetTimeMicros ( ) - nStart ; <nl> - if ( fBenchmark ) <nl> - LogPrintf ( " - Verify % u txins : % . 2fms ( % . 3fms / txin ) \ n " , nInputs - 1 , 0 . 001 * nTime2 , nInputs < = 1 ? 0 : 0 . 001 * nTime2 / ( nInputs - 1 ) ) ; <nl> + int64_t nTime2 = GetTimeMicros ( ) ; nTimeVerify + = nTime2 - nTimeStart ; <nl> + LogPrint ( " bench " , " - Verify % u txins : % . 2fms ( % . 3fms / txin ) [ % . 2fs ] \ n " , nInputs - 1 , 0 . 001 * ( nTime2 - nTimeStart ) , nInputs < = 1 ? 0 : 0 . 001 * ( nTime2 - nTimeStart ) / ( nInputs - 1 ) , nTimeVerify * 0 . 000001 ) ; <nl> <nl> if ( fJustCheck ) <nl> return true ; <nl> bool ConnectBlock ( CBlock & block , CValidationState & state , CBlockIndex * pindex , C <nl> ret = view . SetBestBlock ( pindex - > GetBlockHash ( ) ) ; <nl> assert ( ret ) ; <nl> <nl> + int64_t nTime3 = GetTimeMicros ( ) ; nTimeIndex + = nTime3 - nTime2 ; <nl> + LogPrint ( " bench " , " - Index writing : % . 2fms [ % . 2fs ] \ n " , 0 . 001 * ( nTime3 - nTime2 ) , nTimeIndex * 0 . 000001 ) ; <nl> + <nl> / / Watch for transactions paying to me <nl> BOOST_FOREACH ( const CTransaction & tx , block . vtx ) <nl> g_signals . SyncTransaction ( tx , & block ) ; <nl> bool ConnectBlock ( CBlock & block , CValidationState & state , CBlockIndex * pindex , C <nl> g_signals . UpdatedTransaction ( hashPrevBestCoinBase ) ; <nl> hashPrevBestCoinBase = block . vtx [ 0 ] . GetHash ( ) ; <nl> <nl> + int64_t nTime4 = GetTimeMicros ( ) ; nTimeCallbacks + = nTime4 - nTime3 ; <nl> + LogPrint ( " bench " , " - Callbacks : % . 2fms [ % . 2fs ] \ n " , 0 . 001 * ( nTime4 - nTime3 ) , nTimeCallbacks * 0 . 000001 ) ; <nl> + <nl> return true ; <nl> } <nl> <nl> bool static DisconnectTip ( CValidationState & state ) { <nl> return error ( " DisconnectTip ( ) : DisconnectBlock % s failed " , pindexDelete - > GetBlockHash ( ) . ToString ( ) ) ; <nl> assert ( view . Flush ( ) ) ; <nl> } <nl> - if ( fBenchmark ) <nl> - LogPrintf ( " - Disconnect : % . 2fms \ n " , ( GetTimeMicros ( ) - nStart ) * 0 . 001 ) ; <nl> + LogPrint ( " bench " , " - Disconnect block : % . 2fms \ n " , ( GetTimeMicros ( ) - nStart ) * 0 . 001 ) ; <nl> / / Write the chain state to disk , if necessary . <nl> if ( ! WriteChainState ( state ) ) <nl> return false ; <nl> bool static DisconnectTip ( CValidationState & state ) { <nl> return true ; <nl> } <nl> <nl> + static int64_t nTimeReadFromDisk = 0 ; <nl> + static int64_t nTimeConnectTotal = 0 ; <nl> + static int64_t nTimeFlush = 0 ; <nl> + static int64_t nTimeChainState = 0 ; <nl> + static int64_t nTimePostConnect = 0 ; <nl> + <nl> / / Connect a new block to chainActive . <nl> bool static ConnectTip ( CValidationState & state , CBlockIndex * pindexNew ) { <nl> assert ( pindexNew - > pprev = = chainActive . Tip ( ) ) ; <nl> mempool . check ( pcoinsTip ) ; <nl> / / Read block from disk . <nl> + int64_t nTime1 = GetTimeMicros ( ) ; <nl> CBlock block ; <nl> if ( ! ReadBlockFromDisk ( block , pindexNew ) ) <nl> return state . Abort ( _ ( " Failed to read block " ) ) ; <nl> / / Apply the block atomically to the chain state . <nl> - int64_t nStart = GetTimeMicros ( ) ; <nl> + int64_t nTime2 = GetTimeMicros ( ) ; nTimeReadFromDisk + = nTime2 - nTime1 ; <nl> + int64_t nTime3 ; <nl> + LogPrint ( " bench " , " - Load block from disk : % . 2fms [ % . 2fs ] \ n " , ( nTime2 - nTime1 ) * 0 . 001 , nTimeReadFromDisk * 0 . 000001 ) ; <nl> { <nl> CCoinsViewCache view ( * pcoinsTip , true ) ; <nl> CInv inv ( MSG_BLOCK , pindexNew - > GetBlockHash ( ) ) ; <nl> bool static ConnectTip ( CValidationState & state , CBlockIndex * pindexNew ) { <nl> return error ( " ConnectTip ( ) : ConnectBlock % s failed " , pindexNew - > GetBlockHash ( ) . ToString ( ) ) ; <nl> } <nl> mapBlockSource . erase ( inv . hash ) ; <nl> + nTime3 = GetTimeMicros ( ) ; nTimeConnectTotal + = nTime3 - nTime2 ; <nl> + LogPrint ( " bench " , " - Connect total : % . 2fms [ % . 2fs ] \ n " , ( nTime3 - nTime2 ) * 0 . 001 , nTimeConnectTotal * 0 . 000001 ) ; <nl> assert ( view . Flush ( ) ) ; <nl> } <nl> - if ( fBenchmark ) <nl> - LogPrintf ( " - Connect : % . 2fms \ n " , ( GetTimeMicros ( ) - nStart ) * 0 . 001 ) ; <nl> + int64_t nTime4 = GetTimeMicros ( ) ; nTimeFlush + = nTime4 - nTime3 ; <nl> + LogPrint ( " bench " , " - Flush : % . 2fms [ % . 2fs ] \ n " , ( nTime4 - nTime3 ) * 0 . 001 , nTimeFlush * 0 . 000001 ) ; <nl> / / Write the chain state to disk , if necessary . <nl> if ( ! WriteChainState ( state ) ) <nl> return false ; <nl> + int64_t nTime5 = GetTimeMicros ( ) ; nTimeChainState + = nTime5 - nTime4 ; <nl> + LogPrint ( " bench " , " - Writing chainstate : % . 2fms [ % . 2fs ] \ n " , ( nTime5 - nTime4 ) * 0 . 001 , nTimeChainState * 0 . 000001 ) ; <nl> / / Remove conflicting transactions from the mempool . <nl> list < CTransaction > txConflicted ; <nl> mempool . removeForBlock ( block . vtx , pindexNew - > nHeight , txConflicted ) ; <nl> bool static ConnectTip ( CValidationState & state , CBlockIndex * pindexNew ) { <nl> BOOST_FOREACH ( const CTransaction & tx , block . vtx ) { <nl> SyncWithWallets ( tx , & block ) ; <nl> } <nl> + int64_t nTime6 = GetTimeMicros ( ) ; nTimePostConnect + = nTime6 - nTime5 ; nTimeTotal + = nTime6 - nTime1 ; <nl> + LogPrint ( " bench " , " - Connect postprocess : % . 2fms [ % . 2fs ] \ n " , ( nTime6 - nTime5 ) * 0 . 001 , nTimePostConnect * 0 . 000001 ) ; <nl> + LogPrint ( " bench " , " - Connect block : % . 2fms [ % . 2fs ] \ n " , ( nTime6 - nTime1 ) * 0 . 001 , nTimeTotal * 0 . 000001 ) ; <nl> return true ; <nl> } <nl> <nl> mmm a / src / main . h <nl> ppp b / src / main . h <nl> extern CWaitableCriticalSection csBestBlock ; <nl> extern CConditionVariable cvBlockChange ; <nl> extern bool fImporting ; <nl> extern bool fReindex ; <nl> - extern bool fBenchmark ; <nl> extern int nScriptCheckThreads ; <nl> extern bool fTxIndex ; <nl> extern bool fIsBareMultisigStd ; <nl>
Merge pull request
bitcoin/bitcoin
efec4ec6c55562390908e5170d4451f5212aa538
2014-07-30T09:06:33Z
mmm a / tensorflow / core / common_runtime / gpu / gpu_process_state . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_process_state . cc <nl> Allocator * GPUProcessState : : GetGPUAllocator ( const GPUOptions & options , <nl> / / If true , checks for memory overwrites by writing <nl> / / distinctive patterns on both ends of allocated memory . <nl> if ( useCudaMemoryGuardAllocator ( ) ) { <nl> + LOG ( INFO ) < < " Using memory guard allocator for GPU . " ; <nl> gpu_allocator = new GPUDebugAllocator ( gpu_allocator , platform_gpu_id ) ; <nl> gpu_allocator = new GPUNanResetAllocator ( gpu_allocator , platform_gpu_id ) ; <nl> } else if ( useCudaMallocAllocator ( ) ) { <nl> + LOG ( INFO ) < < " Using CUDA malloc allocator for GPU . " ; <nl> / / If true , passes all allocation requests through to cudaMalloc <nl> / / useful for doing memory debugging with tools like cuda - memcheck <nl> / / * * WARNING * * probably will not work in a multi - gpu scenario <nl>
LOG the effect of TF_GPU_ALLOCATOR
tensorflow/tensorflow
73260e03aaa5c18a9bd9bc46d9fcb16ed091cbbd
2019-09-04T17:13:29Z
mmm a / hphp / runtime / vm / jit / func - prologues - arm . cpp <nl> ppp b / hphp / runtime / vm / jit / func - prologues - arm . cpp <nl> SrcKey emitPrologueWork ( Func * func , int nPassed ) { <nl> <nl> / / Resolve cases where the wrong number of args was passed . <nl> if ( nPassed > numNonVariadicParams ) { <nl> - void ( * helper ) ( ActRec * ) = JIT : : shuffleExtraArgs ; <nl> + void ( * helper ) ( ActRec * ) ; <nl> + if ( func - > attrs ( ) & AttrMayUseVV ) { <nl> + helper = func - > hasVariadicCaptureParam ( ) <nl> + ? JIT : : shuffleExtraArgsVariadicAndVV <nl> + : JIT : : shuffleExtraArgsMayUseVV ; <nl> + } else if ( func - > hasVariadicCaptureParam ( ) ) { <nl> + helper = JIT : : shuffleExtraArgsVariadic ; <nl> + } else { <nl> + helper = JIT : : shuffleExtraArgs ; <nl> + } <nl> a . Mov ( argReg ( 0 ) , rStashedAR ) ; <nl> emitCall ( a , CppCall ( helper ) ) ; <nl> / / We ' ll fix rVmSp below . <nl> mmm a / hphp / runtime / vm / jit / func - prologues - x64 . cpp <nl> ppp b / hphp / runtime / vm / jit / func - prologues - x64 . cpp <nl> SrcKey emitPrologueWork ( Func * func , int nPassed ) { <nl> / / came from emitMagicFuncPrologue . <nl> <nl> if ( nPassed > numNonVariadicParams ) { <nl> - / / Too many args ; a weird case , so just callout . Stash ar <nl> - / / somewhere callee - saved . <nl> + / / Too many args ; a weird case , so call out to an appropriate helper . <nl> + / / Stash ar somewhere callee - saved . <nl> if ( false ) { / / typecheck <nl> + JIT : : shuffleExtraArgsMayUseVV ( ( ActRec * ) nullptr ) ; <nl> + JIT : : shuffleExtraArgsVariadicAndVV ( ( ActRec * ) nullptr ) ; <nl> + JIT : : shuffleExtraArgsVariadic ( ( ActRec * ) nullptr ) ; <nl> JIT : : shuffleExtraArgs ( ( ActRec * ) nullptr ) ; <nl> } <nl> a . movq ( rStashedAR , argNumToRegName [ 0 ] ) ; <nl> - emitCall ( a , TCA ( JIT : : shuffleExtraArgs ) ) ; <nl> + <nl> + if ( func - > attrs ( ) & AttrMayUseVV ) { <nl> + emitCall ( a , func - > hasVariadicCaptureParam ( ) <nl> + ? TCA ( JIT : : shuffleExtraArgsVariadicAndVV ) <nl> + : TCA ( JIT : : shuffleExtraArgsMayUseVV ) ) ; <nl> + } else if ( func - > hasVariadicCaptureParam ( ) ) { <nl> + emitCall ( a , TCA ( JIT : : shuffleExtraArgsVariadic ) ) ; <nl> + } else { <nl> + emitCall ( a , TCA ( JIT : : shuffleExtraArgs ) ) ; <nl> + } <nl> / / We ' ll fix rVmSp below . <nl> } else if ( nPassed < numNonVariadicParams ) { <nl> TRACE ( 1 , " Only have % d of % d args ; getting dvFunclet \ n " , <nl> mmm a / hphp / runtime / vm / jit / translator - runtime . cpp <nl> ppp b / hphp / runtime / vm / jit / translator - runtime . cpp <nl> static void sync_regstate_to_caller ( ActRec * preLive ) { <nl> tl_regState = VMRegState : : CLEAN ; <nl> } <nl> <nl> - / / This function is the JIT version of bytecode . cpp ' s shuffleExtraStackArgs <nl> + # define SHUFFLE_EXTRA_ARGS_PRELUDE ( ) \ <nl> + assert ( ! ar - > hasInvName ( ) ) ; \ <nl> + sync_regstate_to_caller ( ar ) ; \ <nl> + const Func * f = ar - > m_func ; \ <nl> + int numParams = f - > numNonVariadicParams ( ) ; \ <nl> + int numArgs = ar - > numArgs ( ) ; \ <nl> + assert ( numArgs > numParams ) ; \ <nl> + int numExtra = numArgs - numParams ; \ <nl> + TRACE ( 1 , " shuffleExtraArgs : % d args , function % s takes only % d , ar % p \ n " , \ <nl> + numArgs , f - > name ( ) - > data ( ) , numParams , ar ) ; \ <nl> + auto tvArgs = reinterpret_cast < TypedValue * > ( ar ) - numArgs ; \ <nl> + / * end SHUFFLE_EXTRA_ARGS_PRELUDE * / <nl> + <nl> void shuffleExtraArgs ( ActRec * ar ) { <nl> - assert ( ! ar - > hasInvName ( ) ) ; <nl> - <nl> - sync_regstate_to_caller ( ar ) ; <nl> - const Func * f = ar - > m_func ; <nl> - int numParams = f - > numNonVariadicParams ( ) ; <nl> - int numArgs = ar - > numArgs ( ) ; <nl> - assert ( numArgs > numParams ) ; <nl> - int numExtra = numArgs - numParams ; <nl> - <nl> - TRACE ( 1 , " shuffleExtraArgs : % d args , function % s takes only % d , ar % p \ n " , <nl> - numArgs , f - > name ( ) - > data ( ) , numParams , ar ) ; <nl> - auto const takesVariadicParam = f - > hasVariadicCaptureParam ( ) ; <nl> - auto tvArgs = reinterpret_cast < TypedValue * > ( ar ) - numArgs ; <nl> - if ( f - > attrs ( ) & AttrMayUseVV ) { <nl> + SHUFFLE_EXTRA_ARGS_PRELUDE ( ) <nl> + assert ( ! f - > hasVariadicCaptureParam ( ) ) ; <nl> + assert ( ! ( f - > attrs ( ) & AttrMayUseVV ) ) ; <nl> + <nl> + { <nl> + / / Function is not marked as " MayUseVV " , so discard the extra arguments <nl> + for ( int i = 0 ; i < numExtra ; + + i ) { <nl> + tvRefcountedDecRef ( tvArgs ) ; <nl> + + + tvArgs ; <nl> + } <nl> + assert ( f - > numParams ( ) = = ( numArgs - numExtra ) ) ; <nl> + assert ( f - > numParams ( ) = = numParams ) ; <nl> + ar - > setNumArgs ( numParams ) ; <nl> + } <nl> + <nl> + / / Only go back to dirty in a non - exception case . ( Same reason as <nl> + / / above . ) <nl> + tl_regState = VMRegState : : DIRTY ; <nl> + } <nl> + <nl> + void shuffleExtraArgsMayUseVV ( ActRec * ar ) { <nl> + SHUFFLE_EXTRA_ARGS_PRELUDE ( ) <nl> + assert ( ! f - > hasVariadicCaptureParam ( ) ) ; <nl> + assert ( f - > attrs ( ) & AttrMayUseVV ) ; <nl> + <nl> + { <nl> assert ( ! ar - > hasExtraArgs ( ) ) ; <nl> ar - > setExtraArgs ( ExtraArgs : : allocateCopy ( tvArgs , numExtra ) ) ; <nl> - if ( takesVariadicParam ) { <nl> - auto varArgsArray = <nl> - Array : : attach ( MixedArray : : MakePacked ( numExtra , tvArgs ) ) ; <nl> - auto tvIncr = tvArgs ; uint32_t i = 0 ; <nl> - / / an incref is needed to compensate for discarding from the stack <nl> - for ( ; i < numExtra ; + + i , + + tvIncr ) { tvRefcountedIncRef ( tvIncr ) ; } <nl> - / / write into the last ( variadic ) param <nl> - auto tv = reinterpret_cast < TypedValue * > ( ar ) - numParams - 1 ; <nl> - tv - > m_type = KindOfArray ; <nl> - tv - > m_data . parr = varArgsArray . detach ( ) ; <nl> - assert ( tv - > m_data . parr - > hasExactlyOneRef ( ) ) ; <nl> - / / Before , for each arg : refcount = n + 1 ( stack ) <nl> - / / After , for each arg : refcount = n + 2 ( ExtraArgs , varArgsArray ) <nl> - } <nl> - } else if ( takesVariadicParam ) { <nl> + } <nl> + <nl> + / / Only go back to dirty in a non - exception case . ( Same reason as <nl> + / / above . ) <nl> + tl_regState = VMRegState : : DIRTY ; <nl> + } <nl> + <nl> + void shuffleExtraArgsVariadic ( ActRec * ar ) { <nl> + SHUFFLE_EXTRA_ARGS_PRELUDE ( ) <nl> + assert ( f - > hasVariadicCaptureParam ( ) ) ; <nl> + assert ( ! ( f - > attrs ( ) & AttrMayUseVV ) ) ; <nl> + <nl> + { <nl> auto varArgsArray = Array : : attach ( MixedArray : : MakePacked ( numExtra , tvArgs ) ) ; <nl> / / write into the last ( variadic ) param <nl> auto tv = reinterpret_cast < TypedValue * > ( ar ) - numParams - 1 ; <nl> void shuffleExtraArgs ( ActRec * ar ) { <nl> assert ( f - > numParams ( ) = = ( numArgs - numExtra + 1 ) ) ; <nl> assert ( f - > numParams ( ) = = ( numParams + 1 ) ) ; <nl> ar - > setNumArgs ( numParams + 1 ) ; <nl> - } else { <nl> - / / Function is not marked as " MayUseVV " , so discard the extra arguments <nl> - for ( int i = 0 ; i < numExtra ; + + i ) { <nl> - tvRefcountedDecRef ( tvArgs ) ; <nl> - + + tvArgs ; <nl> - } <nl> - assert ( f - > numParams ( ) = = ( numArgs - numExtra ) ) ; <nl> - assert ( f - > numParams ( ) = = numParams ) ; <nl> - ar - > setNumArgs ( numParams ) ; <nl> } <nl> <nl> / / Only go back to dirty in a non - exception case . ( Same reason as <nl> void shuffleExtraArgs ( ActRec * ar ) { <nl> tl_regState = VMRegState : : DIRTY ; <nl> } <nl> <nl> + void shuffleExtraArgsVariadicAndVV ( ActRec * ar ) { <nl> + SHUFFLE_EXTRA_ARGS_PRELUDE ( ) <nl> + assert ( f - > hasVariadicCaptureParam ( ) ) ; <nl> + assert ( f - > attrs ( ) & AttrMayUseVV ) ; <nl> + <nl> + { <nl> + assert ( ! ar - > hasExtraArgs ( ) ) ; <nl> + ar - > setExtraArgs ( ExtraArgs : : allocateCopy ( tvArgs , numExtra ) ) ; <nl> + <nl> + auto varArgsArray = <nl> + Array : : attach ( MixedArray : : MakePacked ( numExtra , tvArgs ) ) ; <nl> + auto tvIncr = tvArgs ; uint32_t i = 0 ; <nl> + / / an incref is needed to compensate for discarding from the stack <nl> + for ( ; i < numExtra ; + + i , + + tvIncr ) { tvRefcountedIncRef ( tvIncr ) ; } <nl> + / / write into the last ( variadic ) param <nl> + auto tv = reinterpret_cast < TypedValue * > ( ar ) - numParams - 1 ; <nl> + tv - > m_type = KindOfArray ; <nl> + tv - > m_data . parr = varArgsArray . detach ( ) ; <nl> + assert ( tv - > m_data . parr - > hasExactlyOneRef ( ) ) ; <nl> + / / Before , for each arg : refcount = n + 1 ( stack ) <nl> + / / After , for each arg : refcount = n + 2 ( ExtraArgs , varArgsArray ) <nl> + } <nl> + <nl> + / / Only go back to dirty in a non - exception case . ( Same reason as <nl> + / / above . ) <nl> + tl_regState = VMRegState : : DIRTY ; <nl> + } <nl> + <nl> + # undef SHUFFLE_EXTRA_ARGS_PRELUDE <nl> + <nl> void raiseMissingArgument ( const char * name , int expected , <nl> int got , bool variadic ) { <nl> if ( expected = = 1 ) { <nl> mmm a / hphp / runtime / vm / jit / translator - runtime . h <nl> ppp b / hphp / runtime / vm / jit / translator - runtime . h <nl> ObjectData * colAddNewElemCHelper ( ObjectData * coll , TypedValue value ) ; <nl> ObjectData * colAddElemCHelper ( ObjectData * coll , TypedValue key , <nl> TypedValue value ) ; <nl> <nl> + / / These shuffle * functions are the JIT ' s version of bytecode . cpp ' s <nl> + / / shuffleExtraStackArgs <nl> void shuffleExtraArgs ( ActRec * ar ) ; <nl> + void shuffleExtraArgsMayUseVV ( ActRec * ar ) ; <nl> + void shuffleExtraArgsVariadic ( ActRec * ar ) ; <nl> + void shuffleExtraArgsVariadicAndVV ( ActRec * ar ) ; <nl> <nl> void raiseMissingArgument ( const char * name , int expected , <nl> int got , bool variadic ) ; <nl>
variadic functions : split " extra args " jit helpers
facebook/hhvm
bc53871994eff9352bf5a792fab08b0881d4d93a
2014-04-14T16:05:46Z
mmm a / cocos2dx / kazmath / src / ray2 . c <nl> ppp b / cocos2dx / kazmath / src / ray2 . c <nl> kmBool kmRay2IntersectTriangle ( const kmRay2 * ray , const kmVec2 * p1 , const kmVec2 <nl> if ( this_distance < distance ) { <nl> final_intersect . x = intersect . x ; <nl> final_intersect . y = intersect . y ; <nl> - distance = this_distance ; <nl> + / / distance = this_distance ; <nl> <nl> calculate_line_normal ( * p3 , * p1 , & normal ) ; <nl> } <nl>
Commented out unused debug variable to silence Static Analyzer Warning . ( Value never read . )
cocos2d/cocos2d-x
abe45e53b592ec9f3d1342c4ad93276ed1613e64
2012-08-20T23:16:00Z
mmm a / docs / CHANGELOG . txt <nl> ppp b / docs / CHANGELOG . txt <nl> Other Changes : <nl> when enabled will have small overlap glitches with ( style . Alpha < 1 . 0 ) . <nl> - TabBar : fixed ScrollToBar request creating bouncing loop when tab is larger than available space . <nl> - TabBar : fixed single - tab not shrinking their width down . <nl> + - SliderScalar : Improved assert when using U32 or U64 types with a large v_max value . ( # 2765 ) [ @ loicmouton ] <nl> - ImDrawList : clarified the name of many parameters so reading the code is a little easier . ( # 2740 ) <nl> - Using offsetof ( ) when available in C + + 11 . Avoids Clang sanitizer complaining about old - style macros . ( # 94 ) <nl> - Backends : DX11 : Fixed GSGetShader ( ) call not passing an initialized instance count , <nl> mmm a / imgui . h <nl> ppp b / imgui . h <nl> struct ImDrawCmd <nl> ImDrawCallback UserCallback ; / / If ! = NULL , call the function instead of rendering the vertices . clip_rect and texture_id will be set normally . <nl> void * UserCallbackData ; / / The draw callback code can access this . <nl> <nl> - ImDrawCmd ( ) { ElemCount = 0 ; ClipRect . x = ClipRect . y = ClipRect . z = ClipRect . w = 0 . 0f ; TextureId = ( ImTextureID ) NULL ; VtxOffset = IdxOffset = 0 ; UserCallback = NULL ; UserCallbackData = NULL ; } <nl> + ImDrawCmd ( ) { ElemCount = 0 ; TextureId = ( ImTextureID ) NULL ; VtxOffset = IdxOffset = 0 ; UserCallback = NULL ; UserCallbackData = NULL ; } <nl> } ; <nl> <nl> / / Vertex index <nl> mmm a / imgui_widgets . cpp <nl> ppp b / imgui_widgets . cpp <nl> bool ImGui : : SliderBehavior ( const ImRect & bb , ImGuiID id , ImGuiDataType data_type <nl> IM_ASSERT ( * ( const ImS32 * ) v_min > = IM_S32_MIN / 2 & & * ( const ImS32 * ) v_max < = IM_S32_MAX / 2 ) ; <nl> return SliderBehaviorT < ImS32 , ImS32 , float > ( bb , id , data_type , ( ImS32 * ) v , * ( const ImS32 * ) v_min , * ( const ImS32 * ) v_max , format , power , flags , out_grab_bb ) ; <nl> case ImGuiDataType_U32 : <nl> - IM_ASSERT ( * ( const ImU32 * ) v_min < = IM_U32_MAX / 2 ) ; <nl> + IM_ASSERT ( * ( const ImU32 * ) v_max < = IM_U32_MAX / 2 ) ; <nl> return SliderBehaviorT < ImU32 , ImS32 , float > ( bb , id , data_type , ( ImU32 * ) v , * ( const ImU32 * ) v_min , * ( const ImU32 * ) v_max , format , power , flags , out_grab_bb ) ; <nl> case ImGuiDataType_S64 : <nl> IM_ASSERT ( * ( const ImS64 * ) v_min > = IM_S64_MIN / 2 & & * ( const ImS64 * ) v_max < = IM_S64_MAX / 2 ) ; <nl> return SliderBehaviorT < ImS64 , ImS64 , double > ( bb , id , data_type , ( ImS64 * ) v , * ( const ImS64 * ) v_min , * ( const ImS64 * ) v_max , format , power , flags , out_grab_bb ) ; <nl> case ImGuiDataType_U64 : <nl> - IM_ASSERT ( * ( const ImU64 * ) v_min < = IM_U64_MAX / 2 ) ; <nl> + IM_ASSERT ( * ( const ImU64 * ) v_max < = IM_U64_MAX / 2 ) ; <nl> return SliderBehaviorT < ImU64 , ImS64 , double > ( bb , id , data_type , ( ImU64 * ) v , * ( const ImU64 * ) v_min , * ( const ImU64 * ) v_max , format , power , flags , out_grab_bb ) ; <nl> case ImGuiDataType_Float : <nl> IM_ASSERT ( * ( const float * ) v_min > = - FLT_MAX / 2 . 0f & & * ( const float * ) v_max < = FLT_MAX / 2 . 0f ) ; <nl> bool ImGui : : InputTextEx ( const char * label , const char * hint , char * buf , int buf_ <nl> BeginGroup ( ) ; <nl> const ImGuiID id = window - > GetID ( label ) ; <nl> const ImVec2 label_size = CalcTextSize ( label , NULL , true ) ; <nl> - ImVec2 size = CalcItemSize ( size_arg , CalcItemWidth ( ) , ( is_multiline ? GetTextLineHeight ( ) * 8 . 0f : label_size . y ) + style . FramePadding . y * 2 . 0f ) ; / / Arbitrary default of 8 lines high for multi - line <nl> + ImVec2 size = CalcItemSize ( size_arg , CalcItemWidth ( ) , ( is_multiline ? g . FontSize * 8 . 0f : label_size . y ) + style . FramePadding . y * 2 . 0f ) ; / / Arbitrary default of 8 lines high for multi - line <nl> const ImRect frame_bb ( window - > DC . CursorPos , window - > DC . CursorPos + size ) ; <nl> const ImRect total_bb ( frame_bb . Min , frame_bb . Max + ImVec2 ( label_size . x > 0 . 0f ? ( style . ItemInnerSpacing . x + label_size . x ) : 0 . 0f , 0 . 0f ) ) ; <nl> <nl>
SliderScalar : Improved assert when using U32 or U64 types with a large v_max value . ( )
ocornut/imgui
c8418015c22fad3479ebd5cd91f7658d56e47ad8
2019-08-28T13:19:10Z
new file mode 100644 <nl> index 0000000000 . . b8c89a39dc <nl> mmm / dev / null <nl> ppp b / change / react - native - windows - 2019 - 10 - 15 - 17 - 00 - 10 - popup - shadow - fix . json <nl> <nl> + { <nl> + " type " : " prerelease " , <nl> + " comment " : " Extend flyout shadow fix to Popups " , <nl> + " packageName " : " react - native - windows " , <nl> + " email " : " kenander @ microsoft . com " , <nl> + " commit " : " 2e896420694fe6b89d3a0a46450b9ea6c887b058 " , <nl> + " date " : " 2019 - 10 - 16T00 : 00 : 10 . 032Z " , <nl> + " file " : " F : \ \ react - native - windows \ \ change \ \ react - native - windows - 2019 - 10 - 15 - 17 - 00 - 10 - popup - shadow - fix . json " <nl> + } <nl> \ No newline at end of file <nl> mmm a / vnext / ReactUWP / Utils / Helpers . cpp <nl> ppp b / vnext / ReactUWP / Utils / Helpers . cpp <nl> <nl> # include < Modules / NativeUIManager . h > <nl> # include " Helpers . h " <nl> <nl> + namespace winrt { <nl> + using namespace Windows : : UI : : Xaml : : Controls : : Primitives ; <nl> + using namespace Windows : : UI : : Xaml : : Media ; <nl> + } / / namespace winrt <nl> + <nl> namespace react { <nl> namespace uwp { <nl> <nl> ReactId getViewId ( <nl> return reactId ; <nl> } ; <nl> <nl> + std : : int32_t CountOpenPopups ( ) { <nl> + / / TODO : Use VisualTreeHelper : : GetOpenPopupsFromXamlRoot when running against <nl> + / / RS6 <nl> + winrt : : Windows : : Foundation : : Collections : : IVectorView < winrt : : Popup > popups = <nl> + winrt : : VisualTreeHelper : : GetOpenPopups ( winrt : : Window : : Current ( ) ) ; <nl> + return ( int32_t ) popups . Size ( ) ; <nl> + } <nl> + <nl> } / / namespace uwp <nl> } ; / / namespace react <nl> mmm a / vnext / ReactUWP / Utils / Helpers . h <nl> ppp b / vnext / ReactUWP / Utils / Helpers . h <nl> inline typename T asEnum ( folly : : dynamic const & obj ) { <nl> ReactId getViewId ( <nl> _In_ IReactInstance * instance , <nl> winrt : : FrameworkElement const & fe ) ; <nl> + std : : int32_t CountOpenPopups ( ) ; <nl> } / / namespace uwp <nl> } / / namespace react <nl> mmm a / vnext / ReactUWP / Views / FlyoutViewManager . cpp <nl> ppp b / vnext / ReactUWP / Views / FlyoutViewManager . cpp <nl> <nl> # include " ViewPanel . h " <nl> <nl> # include < Modules / NativeUIManager . h > <nl> + # include < Utils / Helpers . h > <nl> # include < Utils / PropertyHandlerUtils . h > <nl> # include < winrt / Windows . UI . Xaml . Controls . Primitives . h > <nl> <nl> class FlyoutShadowNode : public ShadowNodeBase { <nl> float m_verticalOffset = 0 ; <nl> bool m_isFlyoutShowOptionsSupported = false ; <nl> winrt : : FlyoutShowOptions m_showOptions = nullptr ; <nl> - static thread_local std : : int32_t s_cOpenFlyouts ; <nl> <nl> std : : unique_ptr < TouchEventHandler > m_touchEventHanadler ; <nl> std : : unique_ptr < PreviewKeyboardEventHandlerOnRoot > <nl> class FlyoutShadowNode : public ShadowNodeBase { <nl> winrt : : Flyout : : Opened_revoker m_flyoutOpenedRevoker { } ; <nl> } ; <nl> <nl> - thread_local std : : int32_t FlyoutShadowNode : : s_cOpenFlyouts = 0 ; <nl> - <nl> FlyoutShadowNode : : ~ FlyoutShadowNode ( ) { <nl> m_touchEventHanadler - > RemoveTouchHandlers ( ) ; <nl> m_previewKeyboardEventHandlerOnRoot - > unhook ( ) ; <nl> void FlyoutShadowNode : : createView ( ) { <nl> <nl> if ( ! m_updating & & instance ! = nullptr ) { <nl> if ( auto flyoutPresenter = GetFlyoutPresenter ( ) ) { <nl> - / / When multiple flyouts are overlapping , XAML ' s theme shadows <nl> - / / don ' t render properly . As a workaround we enable a z - index <nl> - / / translation based on an elevation derived from the count of <nl> - / / open flyouts . We apply this translation on open of the flyout . <nl> - / / ( Translation is only supported on RS5 + , eg . IUIElement9 ) <nl> + / / When multiple flyouts / popups are overlapping , XAML ' s theme <nl> + / / shadows don ' t render properly . As a workaround we enable a <nl> + / / z - index translation based on an elevation derived from the count <nl> + / / of open popups / flyouts . We apply this translation on open of the <nl> + / / flyout . ( Translation is only supported on RS5 + , eg . IUIElement9 ) <nl> if ( auto uiElement9 = GetView ( ) . try_as < winrt : : IUIElement9 > ( ) ) { <nl> - winrt : : Numerics : : float3 translation { <nl> - 0 , 0 , ( float ) 16 * s_cOpenFlyouts } ; <nl> - flyoutPresenter . Translation ( translation ) ; <nl> + auto numOpenPopups = CountOpenPopups ( ) ; <nl> + if ( numOpenPopups > 0 ) { <nl> + winrt : : Numerics : : float3 translation { <nl> + 0 , 0 , ( float ) 16 * numOpenPopups } ; <nl> + flyoutPresenter . Translation ( translation ) ; <nl> + } <nl> } <nl> <nl> flyoutPresenter . AllowFocusOnInteraction ( false ) ; <nl> void FlyoutShadowNode : : onDropViewInstance ( ) { <nl> if ( m_isOpen ) { <nl> m_isOpen = false ; <nl> m_flyout . Hide ( ) ; <nl> - s_cOpenFlyouts - = 1 ; <nl> - assert ( s_cOpenFlyouts > = 0 ) ; <nl> } <nl> } <nl> <nl> void FlyoutShadowNode : : updateProperties ( const folly : : dynamic & & props ) { <nl> <nl> if ( updateIsOpen ) { <nl> if ( m_isOpen ) { <nl> - s_cOpenFlyouts + = 1 ; <nl> AdjustDefaultFlyoutStyle ( 50000 , 50000 ) ; <nl> if ( m_isFlyoutShowOptionsSupported ) { <nl> m_flyout . ShowAt ( m_targetElement , m_showOptions ) ; <nl> void FlyoutShadowNode : : updateProperties ( const folly : : dynamic & & props ) { <nl> popup . IsLightDismissEnabled ( m_isLightDismissEnabled ) ; <nl> } else { <nl> m_flyout . Hide ( ) ; <nl> - s_cOpenFlyouts - = 1 ; <nl> - assert ( s_cOpenFlyouts > = 0 ) ; <nl> } <nl> } <nl> <nl> mmm a / vnext / ReactUWP / Views / PopupViewManager . cpp <nl> ppp b / vnext / ReactUWP / Views / PopupViewManager . cpp <nl> <nl> # include " TouchEventHandler . h " <nl> <nl> # include < Modules / NativeUIManager . h > <nl> + # include < Utils / Helpers . h > <nl> # include < Utils / ValueUtils . h > <nl> # include < winrt / Windows . UI . Core . h > <nl> <nl> void PopupShadowNode : : createView ( ) { <nl> m_popupOpenedRevoker = <nl> popup . Opened ( winrt : : auto_revoke , [ = ] ( auto & & , auto & & ) { <nl> auto instance = wkinstance . lock ( ) ; <nl> - if ( ! m_updating & & instance ! = nullptr ) <nl> + if ( ! m_updating & & instance ! = nullptr ) { <nl> SetAnchorPosition ( popup ) ; <nl> + <nl> + / / When multiple flyouts / popups are overlapping , XAML ' s theme shadows <nl> + / / don ' t render properly . As a workaround we enable a z - index <nl> + / / translation based on an elevation derived from the count of open <nl> + / / popups / flyouts . We apply this translation on open of the popup . <nl> + / / ( Translation is only supported on RS5 + , eg . IUIElement9 ) <nl> + if ( auto uiElement9 = GetView ( ) . try_as < winrt : : IUIElement9 > ( ) ) { <nl> + auto numOpenPopups = CountOpenPopups ( ) ; <nl> + if ( numOpenPopups > 0 ) { <nl> + winrt : : Numerics : : float3 translation { <nl> + 0 , 0 , ( float ) 16 * numOpenPopups } ; <nl> + popup . Translation ( translation ) ; <nl> + } <nl> + } <nl> + } <nl> } ) ; <nl> } <nl> <nl> mmm a / vnext / src / RNTester / FlyoutExample . windows . tsx <nl> ppp b / vnext / src / RNTester / FlyoutExample . windows . tsx <nl> <nl> <nl> import React = require ( ' react ' ) ; <nl> import { Button , CheckBox , Text , TextInput , View } from ' react - native ' ; <nl> - import { Flyout , Picker } from ' react - native - windows ' ; <nl> + import { Flyout , Picker , Popup } from ' react - native - windows ' ; <nl> import { Placement } from ' . . / Libraries / Components / Flyout / FlyoutProps ' ; <nl> <nl> interface IFlyoutExampleState { <nl> isFlyoutVisible : boolean ; <nl> isFlyoutTwoVisible : boolean ; <nl> + isPopupVisible : boolean ; <nl> buttonTitle : string ; <nl> isLightDismissEnabled : boolean ; <nl> isOverlayEnabled : boolean ; <nl> class FlyoutExample extends React . Component < { } , IFlyoutExampleState > { <nl> public state : IFlyoutExampleState = { <nl> isFlyoutVisible : false , <nl> isFlyoutTwoVisible : false , <nl> + isPopupVisible : false , <nl> buttonTitle : ' Open Flyout ' , <nl> isLightDismissEnabled : true , <nl> isOverlayEnabled : false , <nl> class FlyoutExample extends React . Component < { } , IFlyoutExampleState > { <nl> ref = { this . _setRefTwo } <nl> / > <nl> < / View > <nl> + < View <nl> + style = { { <nl> + width : 150 , <nl> + marginLeft : 75 , <nl> + marginTop : 10 , <nl> + } } > <nl> + < Button <nl> + onPress = { ( ) = > { <nl> + this . setState ( { isPopupVisible : true } ) ; <nl> + } } <nl> + title = { ' Open A Popup ' } <nl> + / > <nl> + < / View > <nl> < / View > <nl> < / Flyout > <nl> ) } <nl> class FlyoutExample extends React . Component < { } , IFlyoutExampleState > { <nl> < / View > <nl> < / Flyout > <nl> ) } <nl> + { this . state . isPopupVisible & & ( <nl> + < Popup <nl> + isOpen = { this . state . isPopupVisible } <nl> + isLightDismissEnabled = { this . state . isLightDismissEnabled } <nl> + target = { this . _anchorTwo } <nl> + onDismiss = { ( ) = > { <nl> + this . setState ( { isPopupVisible : false } ) ; <nl> + } } > <nl> + < View <nl> + style = { { backgroundColor : ' lightblue ' , width : 200 , height : 300 } } > <nl> + < Text > { lorumIpsum } < / Text > <nl> + < Button <nl> + onPress = { ( ) = > { <nl> + this . setState ( { isPopupVisible : false } ) ; <nl> + } } <nl> + title = " Close " <nl> + / > <nl> + < / View > <nl> + < / Popup > <nl> + ) } <nl> < / View > <nl> ) ; <nl> } <nl> class FlyoutExample extends React . Component < { } , IFlyoutExampleState > { <nl> _onFlyoutDismissed = ( _isOpen : boolean ) = > { <nl> this . setState ( { isFlyoutVisible : false } ) ; <nl> this . setState ( { isFlyoutTwoVisible : false } ) ; <nl> + this . setState ( { isPopupVisible : false } ) ; <nl> this . setState ( { buttonTitle : ' Open Flyout ' } ) ; <nl> } ; <nl> <nl>
( Forward port ) Extend overlapping Flyout shadow fix to Popups ( )
microsoft/react-native-windows
a4670ab127e8cc0bbf47b0b948341cdb16adf314
2019-10-16T02:14:20Z
mmm a / vs2008 / tesseract . vcproj <nl> ppp b / vs2008 / tesseract . vcproj <nl> <nl> < ? xml version = " 1 . 0 " encoding = " Windows - 1252 " ? > <nl> < VisualStudioProject <nl> ProjectType = " Visual C + + " <nl> - Version = " 9 , 00 " <nl> + Version = " 9 . 00 " <nl> Name = " tesseract " <nl> ProjectGUID = " { 47519557 - 3296 - 407A - BE51 - 8175C77B0868 } " <nl> RootNamespace = " tesseract " <nl> <nl> > <nl> < Tool <nl> Name = " VCPreBuildEventTool " <nl> - CommandLine = " xcopy / Y $ ( SolutionDir ) \ lib \ leptonlib . dll $ ( TargetDir ) " <nl> + CommandLine = " xcopy / Y & quot ; $ ( SolutionDir ) lib \ leptonlibd . dll & quot ; & quot ; $ ( TargetDir ) & quot ; " <nl> / > <nl> < Tool <nl> Name = " VCCustomBuildTool " <nl> <nl> > <nl> < Tool <nl> Name = " VCPreBuildEventTool " <nl> - CommandLine = " xcopy / Y $ ( SolutionDir ) \ lib \ leptonlibd . dll $ ( TargetDir ) " <nl> + CommandLine = " xcopy / Y & quot ; $ ( SolutionDir ) lib \ leptonlibd . dll & quot ; & quot ; $ ( TargetDir ) & quot ; " <nl> / > <nl> < Tool <nl> Name = " VCCustomBuildTool " <nl>
added quotes in xcopy
tesseract-ocr/tesseract
459ff6229338a58dad3aa39053020cc468cc43c4
2010-10-29T21:26:07Z
mmm a / scala - package / examples / src / main / scala / org / apache / mxnetexamples / gan / GanMnist . scala <nl> ppp b / scala - package / examples / src / main / scala / org / apache / mxnetexamples / gan / GanMnist . scala <nl> <nl> <nl> package org . apache . mxnetexamples . gan <nl> <nl> + import org . apache . mxnet . { Context , CustomMetric , DataBatch , IO , NDArray , Shape , Symbol , Xavier } <nl> + import org . apache . mxnet . optimizer . Adam <nl> import org . kohsuke . args4j . { CmdLineParser , Option } <nl> import org . slf4j . LoggerFactory <nl> + <nl> import scala . collection . JavaConverters . _ <nl> - import Viz . _ <nl> - import org . apache . mxnet . Context <nl> - import org . apache . mxnet . Shape <nl> - import org . apache . mxnet . IO <nl> - import org . apache . mxnet . NDArray <nl> - import org . apache . mxnet . CustomMetric <nl> - import org . apache . mxnet . Xavier <nl> - import org . apache . mxnet . optimizer . Adam <nl> - import org . apache . mxnet . DataBatch <nl> - import org . apache . mxnet . Symbol <nl> - import org . apache . mxnet . Shape <nl> <nl> - / * * <nl> - * @ author Depeng Liang <nl> - * / <nl> object GanMnist { <nl> <nl> private val logger = LoggerFactory . getLogger ( classOf [ GanMnist ] ) <nl> <nl> - / / a deconv layer that enlarges the feature map <nl> + / / a deconv layer that enlarges the feature map <nl> def deconv2D ( data : Symbol , iShape : Shape , oShape : Shape , <nl> - kShape : ( Int , Int ) , name : String , stride : ( Int , Int ) = ( 2 , 2 ) ) : Symbol = { <nl> - val targetShape = ( oShape ( oShape . length - 2 ) , oShape ( oShape . length - 1 ) ) <nl> - val net = Symbol . Deconvolution ( name ) ( ) ( Map ( <nl> - " data " - > data , <nl> - " kernel " - > s " $ kShape " , <nl> - " stride " - > s " $ stride " , <nl> - " target_shape " - > s " $ targetShape " , <nl> - " num_filter " - > oShape ( 0 ) , <nl> - " no_bias " - > true ) ) <nl> + kShape : ( Int , Int ) , name : String , stride : ( Int , Int ) = ( 2 , 2 ) ) : Symbol = { <nl> + val targetShape = Shape ( oShape ( oShape . length - 2 ) , oShape ( oShape . length - 1 ) ) <nl> + val net = Symbol . api . Deconvolution ( data = Some ( data ) , kernel = Shape ( kShape . _1 , kShape . _2 ) , <nl> + stride = Some ( Shape ( stride . _1 , stride . _2 ) ) , target_shape = Some ( targetShape ) , <nl> + num_filter = oShape ( 0 ) , no_bias = Some ( true ) , name = name ) <nl> net <nl> } <nl> <nl> def deconv2DBnRelu ( data : Symbol , prefix : String , iShape : Shape , <nl> - oShape : Shape , kShape : ( Int , Int ) , eps : Float = 1e - 5f + 1e - 12f ) : Symbol = { <nl> + oShape : Shape , kShape : ( Int , Int ) , eps : Float = 1e - 5f + 1e - 12f ) : Symbol = { <nl> var net = deconv2D ( data , iShape , oShape , kShape , name = s " $ { prefix } _deconv " ) <nl> - net = Symbol . BatchNorm ( s " $ { prefix } _bn " ) ( ) ( Map ( " data " - > net , " fix_gamma " - > true , " eps " - > eps ) ) <nl> - net = Symbol . Activation ( s " $ { prefix } _act " ) ( ) ( Map ( " data " - > net , " act_type " - > " relu " ) ) <nl> + net = Symbol . api . BatchNorm ( name = s " $ { prefix } _bn " , data = Some ( net ) , <nl> + fix_gamma = Some ( true ) , eps = Some ( eps ) ) <nl> + net = Symbol . api . Activation ( data = Some ( net ) , act_type = " relu " , name = s " $ { prefix } _act " ) <nl> net <nl> } <nl> <nl> def deconv2DAct ( data : Symbol , prefix : String , actType : String , <nl> - iShape : Shape , oShape : Shape , kShape : ( Int , Int ) ) : Symbol = { <nl> + iShape : Shape , oShape : Shape , kShape : ( Int , Int ) ) : Symbol = { <nl> var net = deconv2D ( data , iShape , oShape , kShape , name = s " $ { prefix } _deconv " ) <nl> - net = Symbol . Activation ( s " $ { prefix } _act " ) ( ) ( Map ( " data " - > net , " act_type " - > actType ) ) <nl> + net = Symbol . api . Activation ( data = Some ( net ) , act_type = " relu " , name = s " $ { prefix } _act " ) <nl> net <nl> } <nl> <nl> def makeDcganSym ( oShape : Shape , ngf : Int = 128 , finalAct : String = " sigmoid " , <nl> - eps : Float = 1e - 5f + 1e - 12f ) : ( Symbol , Symbol ) = { <nl> + eps : Float = 1e - 5f + 1e - 12f ) : ( Symbol , Symbol ) = { <nl> <nl> val code = Symbol . Variable ( " rand " ) <nl> - var net = Symbol . FullyConnected ( " g1 " ) ( ) ( Map ( " data " - > code , <nl> - " num_hidden " - > 4 * 4 * ngf * 4 , " no_bias " - > true ) ) <nl> - net = Symbol . Activation ( " gact1 " ) ( ) ( Map ( " data " - > net , " act_type " - > " relu " ) ) <nl> + var net = Symbol . api . FullyConnected ( data = Some ( code ) , num_hidden = 4 * 4 * ngf * 4 , <nl> + no_bias = Some ( true ) , name = " g1 " ) <nl> + net = Symbol . api . Activation ( data = Some ( net ) , act_type = " relu " , name = " gact1 " ) <nl> / / 4 x 4 <nl> - net = Symbol . Reshape ( ) ( ) ( Map ( " data " - > net , " shape " - > s " ( - 1 , $ { ngf * 4 } , 4 , 4 ) " ) ) <nl> + net = Symbol . api . Reshape ( data = Some ( net ) , shape = Some ( Shape ( - 1 , ngf * 4 , 4 , 4 ) ) ) <nl> / / 8 x 8 <nl> net = deconv2DBnRelu ( net , prefix = " g2 " , <nl> iShape = Shape ( ngf * 4 , 4 , 4 ) , oShape = Shape ( ngf * 2 , 8 , 8 ) , kShape = ( 3 , 3 ) ) <nl> object GanMnist { <nl> <nl> val data = Symbol . Variable ( " data " ) <nl> / / 28 x 28 <nl> - val conv1 = Symbol . Convolution ( " conv1 " ) ( ) ( Map ( " data " - > data , <nl> - " kernel " - > " ( 5 , 5 ) " , " num_filter " - > 20 ) ) <nl> - val tanh1 = Symbol . Activation ( ) ( ) ( Map ( " data " - > conv1 , " act_type " - > " tanh " ) ) <nl> - val pool1 = Symbol . Pooling ( ) ( ) ( Map ( " data " - > tanh1 , <nl> - " pool_type " - > " max " , " kernel " - > " ( 2 , 2 ) " , " stride " - > " ( 2 , 2 ) " ) ) <nl> + val conv1 = Symbol . api . Convolution ( data = Some ( data ) , kernel = Shape ( 5 , 5 ) , <nl> + num_filter = 20 , name = " conv1 " ) <nl> + val tanh1 = Symbol . api . Activation ( data = Some ( conv1 ) , act_type = " tanh " ) <nl> + val pool1 = Symbol . api . Pooling ( data = Some ( tanh1 ) , pool_type = Some ( " max " ) , <nl> + kernel = Some ( Shape ( 2 , 2 ) ) , stride = Some ( Shape ( 2 , 2 ) ) ) <nl> / / second conv <nl> - val conv2 = Symbol . Convolution ( " conv2 " ) ( ) ( Map ( " data " - > pool1 , <nl> - " kernel " - > " ( 5 , 5 ) " , " num_filter " - > 50 ) ) <nl> - val tanh2 = Symbol . Activation ( ) ( ) ( Map ( " data " - > conv2 , " act_type " - > " tanh " ) ) <nl> - val pool2 = Symbol . Pooling ( ) ( ) ( Map ( " data " - > tanh2 , " pool_type " - > " max " , <nl> - " kernel " - > " ( 2 , 2 ) " , " stride " - > " ( 2 , 2 ) " ) ) <nl> - var d5 = Symbol . Flatten ( ) ( ) ( Map ( " data " - > pool2 ) ) <nl> - d5 = Symbol . FullyConnected ( " fc1 " ) ( ) ( Map ( " data " - > d5 , " num_hidden " - > 500 ) ) <nl> - d5 = Symbol . Activation ( ) ( ) ( Map ( " data " - > d5 , " act_type " - > " tanh " ) ) <nl> - d5 = Symbol . FullyConnected ( " fc_dloss " ) ( ) ( Map ( " data " - > d5 , " num_hidden " - > 1 ) ) <nl> - val dloss = Symbol . LogisticRegressionOutput ( " dloss " ) ( ) ( Map ( " data " - > d5 ) ) <nl> + val conv2 = Symbol . api . Convolution ( data = Some ( pool1 ) , kernel = Shape ( 5 , 5 ) , <nl> + num_filter = 50 , name = " conv2 " ) <nl> + val tanh2 = Symbol . api . Activation ( data = Some ( conv2 ) , act_type = " tanh " ) <nl> + val pool2 = Symbol . api . Pooling ( data = Some ( tanh2 ) , pool_type = Some ( " max " ) , <nl> + kernel = Some ( Shape ( 2 , 2 ) ) , stride = Some ( Shape ( 2 , 2 ) ) ) <nl> + var d5 = Symbol . api . Flatten ( data = Some ( pool2 ) ) <nl> + d5 = Symbol . api . FullyConnected ( data = Some ( d5 ) , num_hidden = 500 , name = " fc1 " ) <nl> + d5 = Symbol . api . Activation ( data = Some ( d5 ) , act_type = " tanh " ) <nl> + d5 = Symbol . api . FullyConnected ( data = Some ( d5 ) , num_hidden = 1 , name = " fc_dloss " ) <nl> + val dloss = Symbol . api . LogisticRegressionOutput ( data = Some ( d5 ) , name = " dloss " ) <nl> <nl> ( gout , dloss ) <nl> } <nl> object GanMnist { <nl> labelArr . zip ( predArr ) . map { case ( l , p ) = > Math . abs ( l - p ) } . sum / label . shape ( 0 ) <nl> } <nl> <nl> + def runTraining ( dataPath : String , context : Context , <nl> + outputPath : String , numEpoch : Int ) : Float = { <nl> + val lr = 0 . 0005f <nl> + val beta1 = 0 . 5f <nl> + val batchSize = 100 <nl> + val randShape = Shape ( batchSize , 100 ) <nl> + val dataShape = Shape ( batchSize , 1 , 28 , 28 ) <nl> + <nl> + val ( symGen , symDec ) = <nl> + makeDcganSym ( oShape = dataShape , ngf = 32 , finalAct = " sigmoid " ) <nl> + <nl> + val gMod = new GANModule ( <nl> + symGen , <nl> + symDec , <nl> + context = context , <nl> + dataShape = dataShape , <nl> + codeShape = randShape ) <nl> + <nl> + gMod . initGParams ( new Xavier ( factorType = " in " , magnitude = 2 . 34f ) ) <nl> + gMod . initDParams ( new Xavier ( factorType = " in " , magnitude = 2 . 34f ) ) <nl> + <nl> + gMod . initOptimizer ( new Adam ( learningRate = lr , wd = 0f , beta1 = beta1 ) ) <nl> + <nl> + val params = Map ( <nl> + " image " - > s " $ dataPath / train - images - idx3 - ubyte " , <nl> + " label " - > s " $ dataPath / train - labels - idx1 - ubyte " , <nl> + " input_shape " - > s " ( 1 , 28 , 28 ) " , <nl> + " batch_size " - > s " $ batchSize " , <nl> + " shuffle " - > " True " <nl> + ) <nl> + <nl> + val mnistIter = IO . MNISTIter ( params ) <nl> + <nl> + val metricAcc = new CustomMetric ( ferr , " ferr " ) <nl> + <nl> + var t = 0 <nl> + var dataBatch : DataBatch = null <nl> + var acc = 0 . 0f <nl> + for ( epoch < - 0 until numEpoch ) { <nl> + mnistIter . reset ( ) <nl> + metricAcc . reset ( ) <nl> + t = 0 <nl> + while ( mnistIter . hasNext ) { <nl> + dataBatch = mnistIter . next ( ) <nl> + gMod . update ( dataBatch ) <nl> + gMod . dLabel . set ( 0f ) <nl> + metricAcc . update ( Array ( gMod . dLabel ) , gMod . outputsFake ) <nl> + gMod . dLabel . set ( 1f ) <nl> + metricAcc . update ( Array ( gMod . dLabel ) , gMod . outputsReal ) <nl> + <nl> + if ( t % 50 = = 0 ) { <nl> + val ( name , value ) = metricAcc . get <nl> + acc = value ( 0 ) <nl> + logger . info ( s " epoch : $ epoch , iter $ t , metric = $ { value . mkString ( " " ) } " ) <nl> + Viz . imSave ( " gout " , outputPath , gMod . tempOutG ( 0 ) , flip = true ) <nl> + val diff = gMod . tempDiffD <nl> + val arr = diff . toArray <nl> + val mean = arr . sum / arr . length <nl> + val std = { <nl> + val tmpA = arr . map ( a = > ( a - mean ) * ( a - mean ) ) <nl> + Math . sqrt ( tmpA . sum / tmpA . length ) . toFloat <nl> + } <nl> + diff . set ( ( diff - mean ) / std + 0 . 5f ) <nl> + Viz . imSave ( " diff " , outputPath , diff , flip = true ) <nl> + Viz . imSave ( " data " , outputPath , dataBatch . data ( 0 ) , flip = true ) <nl> + } <nl> + <nl> + t + = 1 <nl> + } <nl> + } <nl> + acc <nl> + } <nl> + <nl> def main ( args : Array [ String ] ) : Unit = { <nl> val anst = new GanMnist <nl> val parser : CmdLineParser = new CmdLineParser ( anst ) <nl> object GanMnist { <nl> parser . parseArgument ( args . toList . asJava ) <nl> <nl> val dataPath = if ( anst . mnistDataPath = = null ) System . getenv ( " MXNET_DATA_DIR " ) <nl> - else anst . mnistDataPath <nl> + else anst . mnistDataPath <nl> <nl> assert ( dataPath ! = null ) <nl> - <nl> - val lr = 0 . 0005f <nl> - val beta1 = 0 . 5f <nl> - val batchSize = 100 <nl> - val randShape = Shape ( batchSize , 100 ) <nl> - val numEpoch = 100 <nl> - val dataShape = Shape ( batchSize , 1 , 28 , 28 ) <nl> val context = if ( anst . gpu = = - 1 ) Context . cpu ( ) else Context . gpu ( anst . gpu ) <nl> <nl> - val ( symGen , symDec ) = <nl> - makeDcganSym ( oShape = dataShape , ngf = 32 , finalAct = " sigmoid " ) <nl> - <nl> - val gMod = new GANModule ( <nl> - symGen , <nl> - symDec , <nl> - context = context , <nl> - dataShape = dataShape , <nl> - codeShape = randShape ) <nl> - <nl> - gMod . initGParams ( new Xavier ( factorType = " in " , magnitude = 2 . 34f ) ) <nl> - gMod . initDParams ( new Xavier ( factorType = " in " , magnitude = 2 . 34f ) ) <nl> - <nl> - gMod . initOptimizer ( new Adam ( learningRate = lr , wd = 0f , beta1 = beta1 ) ) <nl> - <nl> - val params = Map ( <nl> - " image " - > s " $ { dataPath } / train - images - idx3 - ubyte " , <nl> - " label " - > s " $ { dataPath } / train - labels - idx1 - ubyte " , <nl> - " input_shape " - > s " ( 1 , 28 , 28 ) " , <nl> - " batch_size " - > s " $ batchSize " , <nl> - " shuffle " - > " True " <nl> - ) <nl> - <nl> - val mnistIter = IO . MNISTIter ( params ) <nl> - <nl> - val metricAcc = new CustomMetric ( ferr , " ferr " ) <nl> - <nl> - var t = 0 <nl> - var dataBatch : DataBatch = null <nl> - for ( epoch < - 0 until numEpoch ) { <nl> - mnistIter . reset ( ) <nl> - metricAcc . reset ( ) <nl> - t = 0 <nl> - while ( mnistIter . hasNext ) { <nl> - dataBatch = mnistIter . next ( ) <nl> - gMod . update ( dataBatch ) <nl> - gMod . dLabel . set ( 0f ) <nl> - metricAcc . update ( Array ( gMod . dLabel ) , gMod . outputsFake ) <nl> - gMod . dLabel . set ( 1f ) <nl> - metricAcc . update ( Array ( gMod . dLabel ) , gMod . outputsReal ) <nl> - <nl> - if ( t % 50 = = 0 ) { <nl> - val ( name , value ) = metricAcc . get <nl> - logger . info ( s " epoch : $ epoch , iter $ t , metric = $ value " ) <nl> - Viz . imSave ( " gout " , anst . outputPath , gMod . tempOutG ( 0 ) , flip = true ) <nl> - val diff = gMod . tempDiffD <nl> - val arr = diff . toArray <nl> - val mean = arr . sum / arr . length <nl> - val std = { <nl> - val tmpA = arr . map ( a = > ( a - mean ) * ( a - mean ) ) <nl> - Math . sqrt ( tmpA . sum / tmpA . length ) . toFloat <nl> - } <nl> - diff . set ( ( diff - mean ) / std + 0 . 5f ) <nl> - Viz . imSave ( " diff " , anst . outputPath , diff , flip = true ) <nl> - Viz . imSave ( " data " , anst . outputPath , dataBatch . data ( 0 ) , flip = true ) <nl> - } <nl> - <nl> - t + = 1 <nl> - } <nl> - } <nl> + runTraining ( dataPath , context , anst . outputPath , 100 ) <nl> } catch { <nl> case ex : Exception = > { <nl> logger . error ( ex . getMessage , ex ) <nl> mmm a / scala - package / examples / src / main / scala / org / apache / mxnetexamples / gan / Module . scala <nl> ppp b / scala - package / examples / src / main / scala / org / apache / mxnetexamples / gan / Module . scala <nl> import org . apache . mxnet . Initializer <nl> import org . apache . mxnet . DataBatch <nl> import org . apache . mxnet . Random <nl> <nl> - / * * <nl> - * @ author Depeng Liang <nl> - * / <nl> class GANModule ( <nl> symbolGenerator : Symbol , <nl> symbolEncoder : Symbol , <nl> new file mode 100644 <nl> index 00000000000 . . 40db092727c <nl> mmm / dev / null <nl> ppp b / scala - package / examples / src / main / scala / org / apache / mxnetexamples / gan / README . md <nl> <nl> + # GAN MNIST Example for Scala <nl> + This is the GAN MNIST Training Example implemented for Scala type - safe api <nl> + <nl> + This example is only for Illustration and not modeled to achieve the best accuracy . <nl> + # # Setup <nl> + # # # Download the source File <nl> + ` ` ` $ xslt <nl> + https : / / s3 . us - east - 2 . amazonaws . com / mxnet - scala / scala - example - ci / mnist / mnist . zip <nl> + ` ` ` <nl> + # # # Unzip the file <nl> + ` ` ` $ xslt <nl> + unzip mnist . zip <nl> + ` ` ` <nl> + # # # Arguement Configuration <nl> + Then you need to define the arguments that you would like to pass in the model : <nl> + ` ` ` $ xslt <nl> + - - mnist - data - path < location of your downloaded file > <nl> + ` ` ` <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . 12459fb1cc1 <nl> mmm / dev / null <nl> ppp b / scala - package / examples / src / test / scala / org / apache / mxnetexamples / gan / GanExampleSuite . scala <nl> <nl> + / * <nl> + * Licensed to the Apache Software Foundation ( ASF ) under one or more <nl> + * contributor license agreements . See the NOTICE file distributed with <nl> + * this work for additional information regarding copyright ownership . <nl> + * The ASF licenses this file to You under the Apache License , Version 2 . 0 <nl> + * ( the " License " ) ; you may not use this file except in compliance with <nl> + * the License . You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + package org . apache . mxnetexamples . gan <nl> + <nl> + import java . io . File <nl> + import java . net . URL <nl> + <nl> + import org . apache . commons . io . FileUtils <nl> + import org . apache . mxnet . Context <nl> + import org . scalatest . { BeforeAndAfterAll , FunSuite } <nl> + import org . slf4j . LoggerFactory <nl> + <nl> + import scala . sys . process . Process <nl> + <nl> + class GanExampleSuite extends FunSuite with BeforeAndAfterAll { <nl> + private val logger = LoggerFactory . getLogger ( classOf [ GanExampleSuite ] ) <nl> + <nl> + test ( " Example CI : Test GAN MNIST " ) { <nl> + if ( System . getenv ( ) . containsKey ( " SCALA_TEST_ON_GPU " ) & & <nl> + System . getenv ( " SCALA_TEST_ON_GPU " ) . toInt = = 1 ) { <nl> + logger . info ( " Downloading mnist model " ) <nl> + val baseUrl = " https : / / s3 . us - east - 2 . amazonaws . com / mxnet - scala / scala - example - ci " <nl> + val tempDirPath = System . getProperty ( " java . io . tmpdir " ) <nl> + val modelDirPath = tempDirPath + File . separator + " mnist / " <nl> + logger . info ( " tempDirPath : % s " . format ( tempDirPath ) ) <nl> + val tmpFile = new File ( tempDirPath + " / mnist / mnist . zip " ) <nl> + if ( ! tmpFile . exists ( ) ) { <nl> + FileUtils . copyURLToFile ( new URL ( baseUrl + " / mnist / mnist . zip " ) , <nl> + tmpFile ) <nl> + } <nl> + / / TODO : Need to confirm with Windows <nl> + Process ( " unzip " + tempDirPath + " / mnist / mnist . zip - d " <nl> + + tempDirPath + " / mnist / " ) ! <nl> + <nl> + val context = Context . gpu ( ) <nl> + <nl> + val output = GanMnist . runTraining ( modelDirPath , context , modelDirPath , 5 ) <nl> + Process ( " rm - rf " + modelDirPath ) ! <nl> + <nl> + assert ( output > = 0 . 0f ) <nl> + } else { <nl> + logger . info ( " GPU test only , skipped . . . " ) <nl> + } <nl> + } <nl> + } <nl>
[ MXNET - 531 ] GAN MNIST Examples for Scala new API ( )
apache/incubator-mxnet
92f0c512336db4219fdcd97fccf74dcdeb56dbf4
2018-07-04T05:19:10Z
mmm a / iOS / MMKV / MMKV / MMKV . mm <nl> ppp b / iOS / MMKV / MMKV / MMKV . mm <nl> - ( BOOL ) ensureMemorySize : ( size_t ) newSize { <nl> } <nl> <nl> / / make some room for placeholder <nl> - constexpr size_t ItemSizeHolderSize = 4 ; <nl> + constexpr uint32_t / * ItemSizeHolder = 0x00ffffff , * / ItemSizeHolderSize = 4 ; <nl> if ( m_dic . count = = 0 ) { <nl> newSize + = ItemSizeHolderSize ; <nl> } <nl> - if ( newSize > = m_output - > spaceLeft ( ) ) { <nl> + if ( newSize > = m_output - > spaceLeft ( ) | | m_dic . count = = 0 ) { <nl> / / try a full rewrite to make space <nl> static const int offset = pbFixed32Size ( 0 ) ; <nl> NSData * data = [ MiniPBCoder encodeDataWithObject : m_dic ] ; <nl> - ( BOOL ) setRawData : ( NSData * ) data forKey : ( NSString * ) key { <nl> return ret ; <nl> } <nl> <nl> - constexpr uint32_t ItemSizeHolder = 0x00ffffff , ItemSizeHolderSize = 4 ; <nl> - <nl> - ( BOOL ) appendData : ( NSData * ) data forKey : ( NSString * ) key { <nl> size_t keyLength = [ key lengthOfBytesUsingEncoding : NSUTF8StringEncoding ] ; <nl> auto size = keyLength + pbRawVarint32Size ( ( int32_t ) keyLength ) ; / / size needed to encode the key <nl> mmm a / iOS / MMKVDemo / MMKVDemo / ViewController . mm <nl> ppp b / iOS / MMKVDemo / MMKVDemo / ViewController . mm <nl> - ( void ) viewDidLoad { <nl> } <nl> <nl> [ self funcionalTest ] ; <nl> - / / [ self testReKey ] ; <nl> + [ self testReKey ] ; <nl> / / [ self testImportFromUserDefault ] ; <nl> / / [ self testCornerSize ] ; <nl> / / [ self testFastRemoveCornerSize ] ; <nl>
fix first insert error
Tencent/MMKV
82541c04c9c0b01db25146acee03ad620990d158
2019-06-10T03:56:29Z
mmm a / src / ast / ast . h <nl> ppp b / src / ast / ast . h <nl> class AstNodeFactory final BASE_EMBEDDED { <nl> <nl> SloppyBlockFunctionStatement * NewSloppyBlockFunctionStatement ( <nl> Statement * statement , Scope * scope ) { <nl> - return new ( local_zone_ ) <nl> - SloppyBlockFunctionStatement ( local_zone_ , statement , scope ) ; <nl> + return new ( parser_zone_ ) <nl> + SloppyBlockFunctionStatement ( parser_zone_ , statement , scope ) ; <nl> } <nl> <nl> CaseClause * NewCaseClause ( <nl> mmm a / test / cctest / test - parsing . cc <nl> ppp b / test / cctest / test - parsing . cc <nl> TEST ( DiscardFunctionBody ) { <nl> / / See comments in ParseFunctionLiteral in parser . cc . <nl> const char * discard_sources [ ] = { <nl> " ( function f ( ) { function g ( ) { var a ; } } ) ( ) ; " , <nl> + " ( function f ( ) { function g ( ) { { function h ( ) { } } } } ) ( ) ; " , <nl> / * TODO ( conradw ) : In future it may be possible to apply this optimisation <nl> * to these productions . <nl> " ( function f ( ) { 0 , function g ( ) { var a ; } } ) ( ) ; " , <nl> " ( function f ( ) { 0 , { g ( ) { var a ; } } } ) ( ) ; " , <nl> " ( function f ( ) { 0 , class c { g ( ) { var a ; } } } ) ( ) ; " , * / <nl> - NULL <nl> - } ; <nl> + NULL } ; <nl> <nl> i : : Isolate * isolate = CcTest : : i_isolate ( ) ; <nl> i : : Factory * factory = isolate - > factory ( ) ; <nl> TEST ( DiscardFunctionBody ) { <nl> } else { <nl> / / TODO ( conradw ) : This path won ' t be hit until the other test cases can be <nl> / / uncommented . <nl> + UNREACHABLE ( ) ; <nl> CHECK_NOT_NULL ( inner - > body ( ) ) ; <nl> CHECK_GE ( 2 , inner - > body ( ) - > length ( ) ) ; <nl> i : : Expression * exp = inner - > body ( ) - > at ( 1 ) - > AsExpressionStatement ( ) - > <nl>
Fix sloppy block - scoped function hoisting with nested zones
v8/v8
eb9deba81545e294e83c6c96ce84d74b4fc9ae52
2016-01-08T16:16:46Z
mmm a / cmake / modules / FindSSE . cmake <nl> ppp b / cmake / modules / FindSSE . cmake <nl> if ( CMAKE_SYSTEM_NAME MATCHES " Linux " ) <nl> string ( COMPARE EQUAL " sse2 " " $ { _SSE_THERE } " _SSE2_TRUE ) <nl> CHECK_CXX_ACCEPTS_FLAG ( " - msse2 " _SSE2_OK ) <nl> <nl> - # / proc / cpuinfo apparently omits sse3 : ( <nl> - string ( REGEX REPLACE " ^ . * [ ^ s ] ( sse3 ) . * $ " " \ \ 1 " _SSE_THERE $ { CPUINFO } ) <nl> - string ( COMPARE EQUAL " sse3 " " $ { _SSE_THERE } " _SSE3_TRUE ) <nl> - if ( NOT _SSE3_TRUE ) <nl> - string ( REGEX REPLACE " ^ . * ( T2300 ) . * $ " " \ \ 1 " _SSE_THERE $ { CPUINFO } ) <nl> - string ( COMPARE EQUAL " T2300 " " $ { _SSE_THERE } " _SSE3_TRUE ) <nl> - endif ( ) <nl> + # SSE3 is also known as the Prescott New Instructions ( PNI ) <nl> + # it ' s labeled as pni in / proc / cpuinfo <nl> + string ( REGEX REPLACE " ^ . * ( pni ) . * $ " " \ \ 1 " _SSE_THERE $ { CPUINFO } ) <nl> + string ( COMPARE EQUAL " pni " " $ { _SSE_THERE } " _SSE3_TRUE ) <nl> CHECK_CXX_ACCEPTS_FLAG ( " - msse3 " _SSE3_OK ) <nl> <nl> string ( REGEX REPLACE " ^ . * ( ssse3 ) . * $ " " \ \ 1 " _SSE_THERE $ { CPUINFO } ) <nl>
Update SSE3 check to match with the others
xbmc/xbmc
6b46b3313eb5bbccc06adf5f89e27b9c2fe9f9ab
2017-02-07T03:38:45Z
mmm a / src / objects - printer . cc <nl> ppp b / src / objects - printer . cc <nl> void JSObject : : PrintProperties ( std : : ostream & os ) { / / NOLINT <nl> } <nl> } <nl> <nl> + namespace { <nl> + <nl> + template < class T > <nl> + double GetScalarElement ( T * array , int index ) { <nl> + return array - > get_scalar ( index ) ; <nl> + } <nl> + <nl> + double GetScalarElement ( FixedDoubleArray * array , int index ) { <nl> + if ( array - > is_the_hole ( index ) ) return bit_cast < double > ( kHoleNanInt64 ) ; <nl> + return array - > get_scalar ( index ) ; <nl> + } <nl> + <nl> + bool is_the_hole ( double maybe_hole ) { <nl> + return bit_cast < uint64_t > ( maybe_hole ) = = kHoleNanInt64 ; <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> template < class T , bool print_the_hole > <nl> static void DoPrintElements ( std : : ostream & os , Object * object ) { / / NOLINT <nl> T * array = T : : cast ( object ) ; <nl> if ( array - > length ( ) = = 0 ) return ; <nl> int previous_index = 0 ; <nl> - double previous_value = array - > get_scalar ( 0 ) ; <nl> + double previous_value = GetScalarElement ( array , 0 ) ; <nl> double value = 0 . 0 ; <nl> int i ; <nl> for ( i = 1 ; i < = array - > length ( ) ; i + + ) { <nl> - if ( i < array - > length ( ) ) value = array - > get_scalar ( i ) ; <nl> + if ( i < array - > length ( ) ) value = GetScalarElement ( array , i ) ; <nl> bool values_are_nan = std : : isnan ( previous_value ) & & std : : isnan ( value ) ; <nl> - if ( ( previous_value = = value | | values_are_nan ) & & i ! = array - > length ( ) ) { <nl> + if ( i ! = array - > length ( ) & & ( previous_value = = value | | values_are_nan ) & & <nl> + is_the_hole ( previous_value ) = = is_the_hole ( value ) ) { <nl> continue ; <nl> } <nl> os < < " \ n " ; <nl> static void DoPrintElements ( std : : ostream & os , Object * object ) { / / NOLINT <nl> ss < < ' - ' < < ( i - 1 ) ; <nl> } <nl> os < < std : : setw ( 12 ) < < ss . str ( ) < < " : " ; <nl> - if ( print_the_hole & & <nl> - FixedDoubleArray : : cast ( object ) - > is_the_hole ( previous_index ) ) { <nl> + if ( print_the_hole & & is_the_hole ( previous_value ) ) { <nl> os < < " < the_hole > " ; <nl> } else { <nl> os < < previous_value ; <nl> new file mode 100644 <nl> index 00000000000 . . b0e141d709b <nl> mmm / dev / null <nl> ppp b / test / mjsunit / debug - print . js <nl> <nl> + / / Copyright 2016 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + / / Flags : - - allow - natives - syntax <nl> + <nl> + / / Make sure printing different element kinds doesn ' t crash . <nl> + <nl> + var array ; <nl> + var obj = { } ; <nl> + <nl> + array = [ ] ; <nl> + % DebugPrint ( array ) ; <nl> + <nl> + / / FAST_SMI_ELEMENTS <nl> + array = [ 1 , 2 , 3 ] ; <nl> + % DebugPrint ( array ) ; <nl> + <nl> + / / FAST_HOLEY_SMI_ELEMENTS <nl> + array [ 10 ] = 100 ; <nl> + array [ 11 ] = 100 ; <nl> + % DebugPrint ( array ) ; <nl> + <nl> + / / FAST_ELEMENTS <nl> + array = [ 1 , obj , obj ] ; <nl> + % DebugPrint ( array ) ; <nl> + <nl> + / / FAST_HOLEY_ELEMENTS <nl> + array [ 100 ] = obj ; <nl> + array [ 101 ] = obj ; <nl> + % DebugPrint ( array ) ; <nl> + <nl> + / / FAST_DOUBLE_ELEMENTS <nl> + array = [ 1 . 1 , 2 . 2 , 3 . 3 , 3 . 3 , 3 . 3 , NaN ] ; <nl> + % DebugPrint ( array ) ; <nl> + array . push ( NaN ) ; <nl> + array . push ( NaN ) ; <nl> + % DebugPrint ( array ) ; <nl> + <nl> + / / FAST_HOLEY_DOUBLE_ELEMENTS <nl> + array [ 100 ] = 1 . 2 ; <nl> + array [ 101 ] = 1 . 2 ; <nl> + % DebugPrint ( array ) ; <nl> + <nl> + / / DICTIONARY_ELEMENTS <nl> + % NormalizeElements ( array ) ; <nl> + % DebugPrint ( array ) ; <nl>
[ printing ] Fix DCHECK failure when printing FAST_HOLEY_DOUBLE_ELEMENTS
v8/v8
cd86053facda29f5ea2edb47c24f4f6d99f1520a
2016-09-08T18:31:42Z
mmm a / editor / plugins / particles_2d_editor_plugin . cpp <nl> ppp b / editor / plugins / particles_2d_editor_plugin . cpp <nl> void Particles2DEditorPlugin : : _menu_callback ( int p_idx ) { <nl> cpu_particles - > set_transform ( particles - > get_transform ( ) ) ; <nl> cpu_particles - > set_visible ( particles - > is_visible ( ) ) ; <nl> cpu_particles - > set_pause_mode ( particles - > get_pause_mode ( ) ) ; <nl> + cpu_particles - > set_z_index ( particles - > get_z_index ( ) ) ; <nl> <nl> EditorNode : : get_singleton ( ) - > get_scene_tree_dock ( ) - > replace_node ( particles , cpu_particles , false ) ; <nl> <nl>
Merge pull request from samH - FIT / particle_fix
godotengine/godot
cc76f3c71d6d645ff454d142237985421439f722
2019-02-12T21:05:14Z
mmm a / . gitmodules <nl> ppp b / . gitmodules <nl> <nl> path = contracts / libc + + / upstream <nl> url = https : / / github . com / EOSIO / libcxx . git <nl> branch = eosio <nl> - [ submodule " externals / binaryen " ] <nl> - path = externals / binaryen <nl> - url = https : / / github . com / EOSIO / binaryen <nl> - <nl> [ submodule " libraries / softfloat " ] <nl> path = libraries / softfloat <nl> url = https : / / github . com / eosio / berkeley - softfloat - 3 <nl> - [ submodule " externals / magic_get " ] <nl> - path = externals / magic_get <nl> - url = https : / / github . com / EOSIO / magic_get <nl> [ submodule " libraries / fc " ] <nl> path = libraries / fc <nl> url = https : / / github . com / EOSIO / fc <nl> mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> if ( ENABLE_COVERAGE_TESTING ) <nl> endif ( ) <nl> <nl> include ( utils ) <nl> - add_subdirectory ( externals ) <nl> <nl> if ( " $ { CORE_SYMBOL_NAME } " STREQUAL " " ) <nl> set ( CORE_SYMBOL_NAME " SYS " ) <nl> endif ( ) <nl> <nl> message ( STATUS " Using ' $ { EOSIO_ROOT_KEY } ' as public key for ' eosio ' account " ) <nl> <nl> - include ( wasm ) <nl> add_subdirectory ( libraries ) <nl> add_subdirectory ( plugins ) <nl> add_subdirectory ( programs ) <nl> configure_file ( $ { CMAKE_SOURCE_DIR } / libraries / fc / secp256k1 / upstream / COPYING <nl> $ { CMAKE_BINARY_DIR } / licenses / eosio / LICENSE . secp256k1 COPYONLY ) <nl> configure_file ( $ { CMAKE_SOURCE_DIR } / libraries / fc / src / network / LICENSE . go <nl> $ { CMAKE_BINARY_DIR } / licenses / eosio / LICENSE . go COPYONLY ) <nl> - configure_file ( $ { CMAKE_SOURCE_DIR } / externals / binaryen / LICENSE <nl> - $ { CMAKE_BINARY_DIR } / licenses / eosio / LICENSE . binaryen COPYONLY ) <nl> <nl> install ( FILES LICENSE DESTINATION $ { CMAKE_INSTALL_FULL_DATAROOTDIR } / licenses / eosio / ) <nl> install ( FILES libraries / wabt / LICENSE DESTINATION $ { CMAKE_INSTALL_FULL_DATAROOTDIR } / licenses / eosio / RENAME LICENSE . wabt ) <nl> install ( FILES libraries / softfloat / COPYING . txt DESTINATION $ { CMAKE_INSTALL_FULL_DATAROOTDIR } / licenses / eosio / RENAME LICENSE . softfloat ) <nl> install ( FILES libraries / wasm - jit / LICENSE DESTINATION $ { CMAKE_INSTALL_FULL_DATAROOTDIR } / licenses / eosio / RENAME LICENSE . wavm ) <nl> install ( FILES libraries / fc / secp256k1 / upstream / COPYING DESTINATION $ { CMAKE_INSTALL_FULL_DATAROOTDIR } / licenses / eosio / RENAME LICENSE . secp256k1 ) <nl> - install ( FILES externals / binaryen / LICENSE DESTINATION $ { CMAKE_INSTALL_FULL_DATAROOTDIR } / licenses / eosio / RENAME LICENSE . binaryen ) <nl> install ( FILES libraries / fc / src / network / LICENSE . go DESTINATION $ { CMAKE_INSTALL_FULL_DATAROOTDIR } / licenses / eosio / ) <nl> <nl> include ( package ) <nl> deleted file mode 100644 <nl> index a8475728e4 . . 0000000000 <nl> mmm a / CMakeModules / wasm . cmake <nl> ppp / dev / null <nl> <nl> - find_package ( Wasm ) <nl> - <nl> - if ( WASM_FOUND ) <nl> - message ( STATUS " Using WASM clang = > " $ { WASM_CLANG } ) <nl> - message ( STATUS " Using WASM llc = > " $ { WASM_LLC } ) <nl> - message ( STATUS " Using WASM llvm - link = > " $ { WASM_LLVM_LINK } ) <nl> - else ( ) <nl> - message ( FATAL_ERROR " No WASM compiler cound be found ( make sure WASM_ROOT is set ) " ) <nl> - return ( ) <nl> - endif ( ) <nl> - macro ( compile_wast ) <nl> - # read arguments include ones that we don ' t since arguments get forwared " as is " and we don ' t want to threat unknown argument names as values <nl> - cmake_parse_arguments ( ARG " NOWARNINGS " " TARGET ; DESTINATION_FOLDER " " SOURCE_FILES ; INCLUDE_FOLDERS ; SYSTEM_INCLUDE_FOLDERS ; LIBRARIES " $ { ARGN } ) <nl> - set ( target $ { ARG_TARGET } ) <nl> - <nl> - # NOTE : Setting SOURCE_FILE and looping over it to avoid cmake issue with compilation $ { target } . bc ' s rule colliding with <nl> - # linking $ { target } . bc ' s rule <nl> - if ( " $ { ARG_SOURCE_FILES } " STREQUAL " " ) <nl> - set ( SOURCE_FILES $ { target } . cpp ) <nl> - else ( ) <nl> - set ( SOURCE_FILES $ { ARG_SOURCE_FILES } ) <nl> - endif ( ) <nl> - set ( outfiles " " ) <nl> - foreach ( srcfile $ { SOURCE_FILES } ) <nl> - <nl> - get_filename_component ( outfile $ { srcfile } NAME ) <nl> - get_filename_component ( extension $ { srcfile } EXT ) <nl> - get_filename_component ( infile $ { srcfile } ABSOLUTE ) <nl> - <nl> - # - ffreestanding <nl> - # Assert that compilation targets a freestanding environment . <nl> - # This implies - fno - builtin . A freestanding environment is one in which the standard library may not exist , and program startup may not necessarily be at main . <nl> - # The most obvious example is an OS kernel . <nl> - <nl> - # - nostdlib <nl> - # Do not use the standard system startup files or libraries when linking . <nl> - # No startup files and only the libraries you specify are passed to the linker , and options specifying linkage of the system libraries , such as - static - libgcc or - shared - libgcc , are ignored . <nl> - # The compiler may generate calls to memcmp , memset , memcpy and memmove . <nl> - # These entries are usually resolved by entries in libc . These entry points should be supplied through some other mechanism when this option is specified . <nl> - <nl> - # - fno - threadsafe - statics <nl> - # Do not emit the extra code to use the routines specified in the C + + ABI for thread - safe initialization of local statics . <nl> - # You can use this option to reduce code size slightly in code that doesn ’ t need to be thread - safe . <nl> - <nl> - # - fno - rtti <nl> - # Disable generation of information about every class with virtual functions for use by the C + + run - time type identification features ( dynamic_cast and typeid ) . <nl> - <nl> - # - fno - exceptions <nl> - # Disable the generation of extra code needed to propagate exceptions <nl> - if ( " $ { extension } " STREQUAL " . c " ) <nl> - set ( STDFLAG - D_XOPEN_SOURCE = 700 ) <nl> - else ( ) <nl> - set ( STDFLAG " - - std = c + + 14 " ) <nl> - endif ( ) <nl> - <nl> - set ( WASM_COMMAND $ { WASM_CLANG } - emit - llvm - O3 $ { STDFLAG } - - target = wasm32 - ffreestanding <nl> - - nostdlib - nostdlibinc - DBOOST_DISABLE_ASSERTS - DBOOST_EXCEPTION_DISABLE - fno - threadsafe - statics - fno - rtti - fno - exceptions <nl> - - c $ { infile } - o $ { outfile } . bc <nl> - ) <nl> - if ( $ { ARG_NOWARNINGS } ) <nl> - list ( APPEND WASM_COMMAND - Wno - everything ) <nl> - else ( ) <nl> - list ( APPEND WASM_COMMAND - Weverything - Wno - c + + 98 - compat - Wno - old - style - cast - Wno - vla - Wno - vla - extension - Wno - c + + 98 - compat - pedantic <nl> - - Wno - missing - prototypes - Wno - missing - variable - declarations - Wno - packed - Wno - padded - Wno - c99 - extensions - Wno - documentation - unknown - command ) <nl> - endif ( ) <nl> - <nl> - foreach ( folder $ { ARG_INCLUDE_FOLDERS } ) <nl> - list ( APPEND WASM_COMMAND - I $ { folder } ) <nl> - endforeach ( ) <nl> - <nl> - if ( " $ { ARG_SYSTEM_INCLUDE_FOLDERS } " STREQUAL " " ) <nl> - set ( ARG_SYSTEM_INCLUDE_FOLDERS $ { DEFAULT_SYSTEM_INCLUDE_FOLDERS } ) <nl> - endif ( ) <nl> - foreach ( folder $ { ARG_SYSTEM_INCLUDE_FOLDERS } ) <nl> - list ( APPEND WASM_COMMAND - isystem $ { folder } ) <nl> - endforeach ( ) <nl> - <nl> - add_custom_command ( OUTPUT $ { outfile } . bc <nl> - DEPENDS $ { infile } <nl> - COMMAND $ { WASM_COMMAND } <nl> - IMPLICIT_DEPENDS CXX $ { infile } <nl> - COMMENT " Building LLVM bitcode $ { outfile } . bc " <nl> - WORKING_DIRECTORY $ { CMAKE_CURRENT_BINARY_DIR } <nl> - VERBATIM <nl> - ) <nl> - set_property ( DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES $ { outfile } . bc ) <nl> - list ( APPEND outfiles $ { outfile } . bc ) <nl> - <nl> - endforeach ( srcfile ) <nl> - <nl> - set_property ( DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES $ { target } . bc ) <nl> - <nl> - endmacro ( compile_wast ) <nl> - <nl> - macro ( add_wast_library ) <nl> - cmake_parse_arguments ( ARG " NOWARNINGS " " TARGET ; DESTINATION_FOLDER " " SOURCE_FILES ; INCLUDE_FOLDERS ; SYSTEM_INCLUDE_FOLDERS " $ { ARGN } ) <nl> - set ( target $ { ARG_TARGET } ) <nl> - compile_wast ( $ { ARGV } ) <nl> - <nl> - get_filename_component ( " $ { ARG_TARGET } _BC_FILENAME " " $ { ARG_DESTINATION_FOLDER } / $ { ARG_TARGET } . bc " ABSOLUTE CACHE ) <nl> - add_custom_target ( $ { target } ALL DEPENDS $ { $ { ARG_TARGET } _BC_FILENAME } ) <nl> - <nl> - add_custom_command ( OUTPUT $ { $ { ARG_TARGET } _BC_FILENAME } <nl> - DEPENDS $ { outfiles } <nl> - COMMAND $ { WASM_LLVM_LINK } - o $ { $ { ARG_TARGET } _BC_FILENAME } $ { outfiles } <nl> - COMMENT " Linking LLVM bitcode library $ { target } . bc " <nl> - WORKING_DIRECTORY $ { CMAKE_CURRENT_BINARY_DIR } <nl> - VERBATIM <nl> - ) <nl> - <nl> - endmacro ( add_wast_library ) <nl> - <nl> - macro ( add_wast_executable ) <nl> - cmake_parse_arguments ( ARG " NOWARNINGS " " TARGET ; DESTINATION_FOLDER ; MAX_MEMORY " " SOURCE_FILES ; INCLUDE_FOLDERS ; SYSTEM_INCLUDE_FOLDERS ; LIBRARIES " $ { ARGN } ) <nl> - set ( target $ { ARG_TARGET } ) <nl> - set ( DESTINATION_FOLDER $ { ARG_DESTINATION_FOLDER } ) <nl> - <nl> - compile_wast ( $ { ARGV } ) <nl> - <nl> - foreach ( lib $ { ARG_LIBRARIES } ) <nl> - list ( APPEND LIBRARIES $ { $ { lib } _BC_FILENAME } ) <nl> - endforeach ( ) <nl> - add_custom_command ( OUTPUT $ { target } . bc <nl> - DEPENDS $ { outfiles } $ { ARG_LIBRARIES } $ { LIBRARIES } <nl> - COMMAND $ { WASM_LLVM_LINK } - only - needed - o $ { target } . bc $ { outfiles } $ { LIBRARIES } <nl> - COMMENT " Linking LLVM bitcode executable $ { target } . bc " <nl> - WORKING_DIRECTORY $ { CMAKE_CURRENT_BINARY_DIR } <nl> - VERBATIM <nl> - ) <nl> - <nl> - set_property ( DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES $ { target } . bc ) <nl> - <nl> - add_custom_command ( OUTPUT $ { target } . s <nl> - DEPENDS $ { target } . bc <nl> - COMMAND $ { WASM_LLC } - thread - model = single - asm - verbose = false - o $ { target } . s $ { target } . bc <nl> - COMMENT " Generating textual assembly $ { target } . s " <nl> - WORKING_DIRECTORY $ { CMAKE_CURRENT_BINARY_DIR } <nl> - VERBATIM <nl> - ) <nl> - set_property ( DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES $ { target } . s ) <nl> - <nl> - if ( ARG_MAX_MEMORY ) <nl> - set ( MAX_MEMORY_PARAM " - m " $ { ARG_MAX_MEMORY } ) <nl> - endif ( ) <nl> - <nl> - add_custom_command ( OUTPUT $ { DESTINATION_FOLDER } / $ { target } . wast <nl> - DEPENDS $ { target } . s <nl> - COMMAND $ < TARGET_FILE : eosio - s2wasm > - o $ { DESTINATION_FOLDER } / $ { target } . wast - s 10240 $ { MAX_MEMORY_PARAM } $ { target } . s <nl> - COMMENT " Generating WAST $ { target } . wast " <nl> - WORKING_DIRECTORY $ { CMAKE_CURRENT_BINARY_DIR } <nl> - VERBATIM <nl> - ) <nl> - set_property ( DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES $ { target } . wast ) <nl> - <nl> - add_custom_command ( OUTPUT $ { DESTINATION_FOLDER } / $ { target } . wasm <nl> - DEPENDS $ { target } . wast <nl> - COMMAND $ < TARGET_FILE : eosio - wast2wasm > $ { DESTINATION_FOLDER } / $ { target } . wast $ { DESTINATION_FOLDER } / $ { target } . wasm - n <nl> - COMMENT " Generating WASM $ { target } . wasm " <nl> - WORKING_DIRECTORY $ { CMAKE_CURRENT_BINARY_DIR } <nl> - VERBATIM <nl> - ) <nl> - set_property ( DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES $ { target } . wasm ) <nl> - <nl> - STRING ( REPLACE " . " " _ " TARGET_VARIABLE " $ { target } " ) <nl> - <nl> - add_custom_command ( OUTPUT $ { DESTINATION_FOLDER } / $ { target } . wast . hpp <nl> - DEPENDS $ { DESTINATION_FOLDER } / $ { target } . wast <nl> - COMMAND echo " const char * const $ { TARGET_VARIABLE } _wast = R \ " = = = = = ( " > $ { DESTINATION_FOLDER } / $ { target } . wast . hpp <nl> - COMMAND cat $ { DESTINATION_FOLDER } / $ { target } . wast > > $ { DESTINATION_FOLDER } / $ { target } . wast . hpp <nl> - COMMAND echo " ) = = = = = \ " ; " > > $ { DESTINATION_FOLDER } / $ { target } . wast . hpp <nl> - COMMENT " Generating $ { target } . wast . hpp " <nl> - VERBATIM <nl> - ) <nl> - <nl> - if ( EXISTS $ { CMAKE_CURRENT_SOURCE_DIR } / $ { target } . abi ) <nl> - add_custom_command ( OUTPUT $ { DESTINATION_FOLDER } / $ { target } . abi . hpp <nl> - DEPENDS $ { DESTINATION_FOLDER } / $ { target } . abi <nl> - COMMAND echo " const char * const $ { TARGET_VARIABLE } _abi = R \ " = = = = = ( " > $ { DESTINATION_FOLDER } / $ { target } . abi . hpp <nl> - COMMAND cat $ { DESTINATION_FOLDER } / $ { target } . abi > > $ { DESTINATION_FOLDER } / $ { target } . abi . hpp <nl> - COMMAND echo " ) = = = = = \ " ; " > > $ { DESTINATION_FOLDER } / $ { target } . abi . hpp <nl> - COMMENT " Generating $ { target } . abi . hpp " <nl> - VERBATIM <nl> - ) <nl> - set_property ( DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES $ { target } . abi . hpp ) <nl> - set ( extra_target_dependency $ { DESTINATION_FOLDER } / $ { target } . abi . hpp ) <nl> - else ( ) <nl> - endif ( ) <nl> - <nl> - add_custom_target ( $ { target } ALL DEPENDS $ { DESTINATION_FOLDER } / $ { target } . wast . hpp $ { extra_target_dependency } $ { DESTINATION_FOLDER } / $ { target } . wasm ) <nl> - <nl> - set_property ( DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES $ { DESTINATION_FOLDER } / $ { target } . wast . hpp ) <nl> - <nl> - set_property ( TARGET $ { target } PROPERTY INCLUDE_DIRECTORIES $ { ARG_INCLUDE_FOLDERS } ) <nl> - <nl> - set ( extra_target_dependency ) <nl> - <nl> - # For CLion code insight <nl> - foreach ( folder $ { ARG_INCLUDE_FOLDERS } ) <nl> - include_directories ( $ { folder } ) <nl> - endforeach ( ) <nl> - include_directories ( $ { Boost_INCLUDE_DIR } ) <nl> - <nl> - if ( EXISTS $ { CMAKE_CURRENT_SOURCE_DIR } / $ { target } . hpp ) <nl> - set ( HEADER_FILE $ { CMAKE_CURRENT_SOURCE_DIR } / $ { target } . hpp ) <nl> - endif ( ) <nl> - file ( GLOB HEADER_FILES $ { ARG_INCLUDE_FOLDERS } / * . hpp $ { SYSTEM_INCLUDE_FOLDERS } / * . hpp ) <nl> - add_executable ( $ { target } . tmp EXCLUDE_FROM_ALL $ { SOURCE_FILES } $ { HEADER_FILE } $ { HEADER_FILES } ) <nl> - <nl> - add_test ( NAME " validate_ $ { target } _abi " <nl> - COMMAND $ { CMAKE_BINARY_DIR } / scripts / abi_is_json . py $ { ABI_FILES } ) <nl> - <nl> - endmacro ( add_wast_executable ) <nl> deleted file mode 100644 <nl> index 4fac352874 . . 0000000000 <nl> mmm a / externals / CMakeLists . txt <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - add_subdirectory ( binaryen ) <nl> deleted file mode 160000 <nl> index 16d641f62a . . 0000000000 <nl> mmm a / externals / binaryen <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - Subproject commit 16d641f62ab14df845c87a63efe4d991b508d19a <nl> deleted file mode 160000 <nl> index 89fda1da70 . . 0000000000 <nl> mmm a / externals / magic_get <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - Subproject commit 89fda1da702e6c76a22bfb6233e9e3d0641708ec <nl>
Remove binaryen & other vesigigial pieces
EOSIO/eos
738dbe9f5a565b419a38524d1b9bd1e66f6ba5f7
2019-01-04T15:44:30Z
mmm a / src / google / protobuf / compiler / java / java_enum . cc <nl> ppp b / src / google / protobuf / compiler / java / java_enum . cc <nl> void EnumGenerator : : Generate ( io : : Printer * printer ) { <nl> " } \ n " <nl> " \ n " <nl> " public static $ classname $ valueOf ( int value ) { \ n " <nl> + " return forNumber ( value ) ; \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " public static $ classname $ forNumber ( int value ) { \ n " <nl> " switch ( value ) { \ n " , <nl> " classname " , descriptor_ - > name ( ) ) ; <nl> printer - > Indent ( ) ; <nl> void EnumGenerator : : Generate ( io : : Printer * printer ) { <nl> " $ classname $ > internalValueMap = \ n " <nl> " new com . google . protobuf . Internal . EnumLiteMap < $ classname $ > ( ) { \ n " <nl> " public $ classname $ findValueByNumber ( int number ) { \ n " <nl> - " return $ classname $ . valueOf ( number ) ; \ n " <nl> + " return $ classname $ . forNumber ( number ) ; \ n " <nl> " } \ n " <nl> " } ; \ n " <nl> " \ n " , <nl> mmm a / src / google / protobuf / compiler / java / java_enum_field . cc <nl> ppp b / src / google / protobuf / compiler / java / java_enum_field . cc <nl> GenerateMembers ( io : : Printer * printer ) const { <nl> WriteFieldDocComment ( printer , descriptor_ ) ; <nl> printer - > Print ( variables_ , <nl> " $ deprecation $ public $ type $ get $ capitalized_name $ ( ) { \ n " <nl> - " $ type $ result = $ type $ . valueOf ( $ name $ _ ) ; \ n " <nl> + " $ type $ result = $ type $ . forNumber ( $ name $ _ ) ; \ n " <nl> " return result = = null ? $ unknown $ : result ; \ n " <nl> " } \ n " ) ; <nl> } <nl> GenerateBuilderMembers ( io : : Printer * printer ) const { <nl> WriteFieldDocComment ( printer , descriptor_ ) ; <nl> printer - > Print ( variables_ , <nl> " $ deprecation $ public $ type $ get $ capitalized_name $ ( ) { \ n " <nl> - " $ type $ result = $ type $ . valueOf ( $ name $ _ ) ; \ n " <nl> + " $ type $ result = $ type $ . forNumber ( $ name $ _ ) ; \ n " <nl> " return result = = null ? $ unknown $ : result ; \ n " <nl> " } \ n " ) ; <nl> WriteFieldDocComment ( printer , descriptor_ ) ; <nl> GenerateParsingCode ( io : : Printer * printer ) const { <nl> } else { <nl> printer - > Print ( variables_ , <nl> " int rawValue = input . readEnum ( ) ; \ n " <nl> - " $ type $ value = $ type $ . valueOf ( rawValue ) ; \ n " <nl> + " $ type $ value = $ type $ . forNumber ( rawValue ) ; \ n " <nl> " if ( value = = null ) { \ n " ) ; <nl> if ( PreserveUnknownFields ( descriptor_ - > containing_type ( ) ) ) { <nl> printer - > Print ( variables_ , <nl> GenerateMembers ( io : : Printer * printer ) const { <nl> printer - > Print ( variables_ , <nl> " $ deprecation $ public $ type $ get $ capitalized_name $ ( ) { \ n " <nl> " if ( $ has_oneof_case_message $ ) { \ n " <nl> - " $ type $ result = $ type $ . valueOf ( ( java . lang . Integer ) $ oneof_name $ _ ) ; \ n " <nl> + " $ type $ result = $ type $ . forNumber ( ( java . lang . Integer ) $ oneof_name $ _ ) ; \ n " <nl> " return result = = null ? $ unknown $ : result ; \ n " <nl> " } \ n " <nl> " return $ default $ ; \ n " <nl> GenerateBuilderMembers ( io : : Printer * printer ) const { <nl> printer - > Print ( variables_ , <nl> " $ deprecation $ public $ type $ get $ capitalized_name $ ( ) { \ n " <nl> " if ( $ has_oneof_case_message $ ) { \ n " <nl> - " $ type $ result = $ type $ . valueOf ( ( java . lang . Integer ) $ oneof_name $ _ ) ; \ n " <nl> + " $ type $ result = $ type $ . forNumber ( ( java . lang . Integer ) $ oneof_name $ _ ) ; \ n " <nl> " return result = = null ? $ unknown $ : result ; \ n " <nl> " } \ n " <nl> " return $ default $ ; \ n " <nl> GenerateParsingCode ( io : : Printer * printer ) const { <nl> } else { <nl> printer - > Print ( variables_ , <nl> " int rawValue = input . readEnum ( ) ; \ n " <nl> - " $ type $ value = $ type $ . valueOf ( rawValue ) ; \ n " <nl> + " $ type $ value = $ type $ . forNumber ( rawValue ) ; \ n " <nl> " if ( value = = null ) { \ n " ) ; <nl> if ( PreserveUnknownFields ( descriptor_ - > containing_type ( ) ) ) { <nl> printer - > Print ( variables_ , <nl> GenerateMembers ( io : : Printer * printer ) const { <nl> " new com . google . protobuf . Internal . ListAdapter . Converter < \ n " <nl> " java . lang . Integer , $ type $ > ( ) { \ n " <nl> " public $ type $ convert ( java . lang . Integer from ) { \ n " <nl> - " $ type $ result = $ type $ . valueOf ( from ) ; \ n " <nl> + " $ type $ result = $ type $ . forNumber ( from ) ; \ n " <nl> " return result = = null ? $ unknown $ : result ; \ n " <nl> " } \ n " <nl> " } ; \ n " ) ; <nl> GenerateParsingCode ( io : : Printer * printer ) const { <nl> } else { <nl> printer - > Print ( variables_ , <nl> " int rawValue = input . readEnum ( ) ; \ n " <nl> - " $ type $ value = $ type $ . valueOf ( rawValue ) ; \ n " <nl> + " $ type $ value = $ type $ . forNumber ( rawValue ) ; \ n " <nl> " if ( value = = null ) { \ n " ) ; <nl> if ( PreserveUnknownFields ( descriptor_ - > containing_type ( ) ) ) { <nl> printer - > Print ( variables_ , <nl> mmm a / src / google / protobuf / compiler / java / java_enum_field_lite . cc <nl> ppp b / src / google / protobuf / compiler / java / java_enum_field_lite . cc <nl> GenerateMembers ( io : : Printer * printer ) const { <nl> WriteFieldDocComment ( printer , descriptor_ ) ; <nl> printer - > Print ( variables_ , <nl> " $ deprecation $ public $ type $ get $ capitalized_name $ ( ) { \ n " <nl> - " $ type $ result = $ type $ . valueOf ( $ name $ _ ) ; \ n " <nl> + " $ type $ result = $ type $ . forNumber ( $ name $ _ ) ; \ n " <nl> " return result = = null ? $ unknown $ : result ; \ n " <nl> " } \ n " ) ; <nl> <nl> GenerateParsingCode ( io : : Printer * printer ) const { <nl> } else { <nl> printer - > Print ( variables_ , <nl> " int rawValue = input . readEnum ( ) ; \ n " <nl> - " $ type $ value = $ type $ . valueOf ( rawValue ) ; \ n " <nl> + " $ type $ value = $ type $ . forNumber ( rawValue ) ; \ n " <nl> " if ( value = = null ) { \ n " ) ; <nl> if ( PreserveUnknownFields ( descriptor_ - > containing_type ( ) ) ) { <nl> printer - > Print ( variables_ , <nl> GenerateMembers ( io : : Printer * printer ) const { <nl> printer - > Print ( variables_ , <nl> " $ deprecation $ public $ type $ get $ capitalized_name $ ( ) { \ n " <nl> " if ( $ has_oneof_case_message $ ) { \ n " <nl> - " $ type $ result = $ type $ . valueOf ( ( java . lang . Integer ) $ oneof_name $ _ ) ; \ n " <nl> + " $ type $ result = $ type $ . forNumber ( ( java . lang . Integer ) $ oneof_name $ _ ) ; \ n " <nl> " return result = = null ? $ unknown $ : result ; \ n " <nl> " } \ n " <nl> " return $ default $ ; \ n " <nl> GenerateParsingCode ( io : : Printer * printer ) const { <nl> } else { <nl> printer - > Print ( variables_ , <nl> " int rawValue = input . readEnum ( ) ; \ n " <nl> - " $ type $ value = $ type $ . valueOf ( rawValue ) ; \ n " <nl> + " $ type $ value = $ type $ . forNumber ( rawValue ) ; \ n " <nl> " if ( value = = null ) { \ n " ) ; <nl> if ( PreserveUnknownFields ( descriptor_ - > containing_type ( ) ) ) { <nl> printer - > Print ( variables_ , <nl> GenerateMembers ( io : : Printer * printer ) const { <nl> " new com . google . protobuf . Internal . ListAdapter . Converter < \ n " <nl> " java . lang . Integer , $ type $ > ( ) { \ n " <nl> " public $ type $ convert ( java . lang . Integer from ) { \ n " <nl> - " $ type $ result = $ type $ . valueOf ( from ) ; \ n " <nl> + " $ type $ result = $ type $ . forNumber ( from ) ; \ n " <nl> " return result = = null ? $ unknown $ : result ; \ n " <nl> " } \ n " <nl> " } ; \ n " ) ; <nl> GenerateParsingCode ( io : : Printer * printer ) const { <nl> } else { <nl> printer - > Print ( variables_ , <nl> " int rawValue = input . readEnum ( ) ; \ n " <nl> - " $ type $ value = $ type $ . valueOf ( rawValue ) ; \ n " <nl> + " $ type $ value = $ type $ . forNumber ( rawValue ) ; \ n " <nl> " if ( value = = null ) { \ n " ) ; <nl> if ( PreserveUnknownFields ( descriptor_ - > containing_type ( ) ) ) { <nl> printer - > Print ( variables_ , <nl> mmm a / src / google / protobuf / compiler / java / java_enum_lite . cc <nl> ppp b / src / google / protobuf / compiler / java / java_enum_lite . cc <nl> void EnumLiteGenerator : : Generate ( io : : Printer * printer ) { <nl> " } \ n " <nl> " \ n " <nl> " public static $ classname $ valueOf ( int value ) { \ n " <nl> + " return forNumber ( value ) ; \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " public static $ classname $ forNumber ( int value ) { \ n " <nl> " switch ( value ) { \ n " , <nl> " classname " , descriptor_ - > name ( ) ) ; <nl> printer - > Indent ( ) ; <nl> void EnumLiteGenerator : : Generate ( io : : Printer * printer ) { <nl> " $ classname $ > internalValueMap = \ n " <nl> " new com . google . protobuf . Internal . EnumLiteMap < $ classname $ > ( ) { \ n " <nl> " public $ classname $ findValueByNumber ( int number ) { \ n " <nl> - " return $ classname $ . valueOf ( number ) ; \ n " <nl> + " return $ classname $ . forNumber ( number ) ; \ n " <nl> " } \ n " <nl> " } ; \ n " <nl> " \ n " , <nl> mmm a / src / google / protobuf / compiler / java / java_message . cc <nl> ppp b / src / google / protobuf / compiler / java / java_message . cc <nl> void ImmutableMessageGenerator : : Generate ( io : : Printer * printer ) { <nl> " } \ n " ) ; <nl> printer - > Print ( vars , <nl> " public static $ oneof_capitalized_name $ Case valueOf ( int value ) { \ n " <nl> + " return forNumber ( value ) ; \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " public static $ oneof_capitalized_name $ Case forNumber ( int value ) { \ n " <nl> " switch ( value ) { \ n " ) ; <nl> for ( int j = 0 ; j < descriptor_ - > oneof_decl ( i ) - > field_count ( ) ; j + + ) { <nl> const FieldDescriptor * field = descriptor_ - > oneof_decl ( i ) - > field ( j ) ; <nl> void ImmutableMessageGenerator : : Generate ( io : : Printer * printer ) { <nl> printer - > Print ( vars , <nl> " public $ oneof_capitalized_name $ Case \ n " <nl> " get $ oneof_capitalized_name $ Case ( ) { \ n " <nl> - " return $ oneof_capitalized_name $ Case . valueOf ( \ n " <nl> + " return $ oneof_capitalized_name $ Case . forNumber ( \ n " <nl> " $ oneof_name $ Case_ ) ; \ n " <nl> " } \ n " <nl> " \ n " ) ; <nl> mmm a / src / google / protobuf / compiler / java / java_message_builder . cc <nl> ppp b / src / google / protobuf / compiler / java / java_message_builder . cc <nl> Generate ( io : : Printer * printer ) { <nl> printer - > Print ( vars , <nl> " public $ oneof_capitalized_name $ Case \ n " <nl> " get $ oneof_capitalized_name $ Case ( ) { \ n " <nl> - " return $ oneof_capitalized_name $ Case . valueOf ( \ n " <nl> + " return $ oneof_capitalized_name $ Case . forNumber ( \ n " <nl> " $ oneof_name $ Case_ ) ; \ n " <nl> " } \ n " <nl> " \ n " <nl> mmm a / src / google / protobuf / compiler / java / java_message_lite . cc <nl> ppp b / src / google / protobuf / compiler / java / java_message_lite . cc <nl> void ImmutableMessageLiteGenerator : : Generate ( io : : Printer * printer ) { <nl> " } \ n " ) ; <nl> printer - > Print ( vars , <nl> " public static $ oneof_capitalized_name $ Case valueOf ( int value ) { \ n " <nl> + " return forNumber ( value ) ; \ n " <nl> + " } \ n " <nl> + " \ n " <nl> + " public static $ oneof_capitalized_name $ Case forNumber ( int value ) { \ n " <nl> " switch ( value ) { \ n " ) ; <nl> for ( int j = 0 ; j < descriptor_ - > oneof_decl ( i ) - > field_count ( ) ; j + + ) { <nl> const FieldDescriptor * field = descriptor_ - > oneof_decl ( i ) - > field ( j ) ; <nl> void ImmutableMessageLiteGenerator : : Generate ( io : : Printer * printer ) { <nl> printer - > Print ( vars , <nl> " public $ oneof_capitalized_name $ Case \ n " <nl> " get $ oneof_capitalized_name $ Case ( ) { \ n " <nl> - " return $ oneof_capitalized_name $ Case . valueOf ( \ n " <nl> + " return $ oneof_capitalized_name $ Case . forNumber ( \ n " <nl> " $ oneof_name $ Case_ ) ; \ n " <nl> " } \ n " <nl> " \ n " <nl> mmm a / src / google / protobuf / dynamic_message . cc <nl> ppp b / src / google / protobuf / dynamic_message . cc <nl> class DynamicMessage : public Message { <nl> DynamicMessage ( const TypeInfo * type_info ) ; <nl> ~ DynamicMessage ( ) ; <nl> <nl> + # ifndef _MSC_VER <nl> + void operator delete ( void * p ) { <nl> + : : operator delete ( p ) ; / / non - sized deallocation <nl> + } <nl> + # endif <nl> + <nl> / / Called on the prototype after construction to initialize message fields . <nl> void CrossLinkPrototypes ( ) ; <nl> <nl>
Merge pull request from anandolee / master
protocolbuffers/protobuf
09292d5759cbf3e82dcfae67e01ee31ed8906b5a
2016-04-07T18:05:05Z
mmm a / trunk / src / core / srs_core_client . cpp <nl> ppp b / trunk / src / core / srs_core_client . cpp <nl> using namespace std ; <nl> # include < srs_core_http . hpp > <nl> # include < srs_core_bandwidth . hpp > <nl> <nl> - # define SRS_PULSE_TIMEOUT_MS 100 <nl> - # define SRS_SEND_TIMEOUT_US 5000000L <nl> - # define SRS_RECV_TIMEOUT_US SRS_SEND_TIMEOUT_US <nl> - # define SRS_STREAM_BUSY_SLEEP_MS 2000 <nl> - <nl> SrsClient : : SrsClient ( SrsServer * srs_server , st_netfd_t client_stfd ) <nl> : SrsConnection ( srs_server , client_stfd ) <nl> { <nl> int SrsClient : : service_cycle ( ) <nl> srs_warn ( " stream % s is already publishing . ret = % d " , <nl> req - > get_stream_url ( ) . c_str ( ) , ret ) ; <nl> / / to delay request <nl> - st_usleep ( SRS_STREAM_BUSY_SLEEP_MS * 1000 ) ; <nl> + st_usleep ( SRS_STREAM_BUSY_SLEEP_US ) ; <nl> return ret ; <nl> } <nl> <nl> int SrsClient : : playing ( SrsSource * source ) <nl> SrsAutoFree ( SrsConsumer , consumer , false ) ; <nl> srs_verbose ( " consumer created success . " ) ; <nl> <nl> - rtmp - > set_recv_timeout ( SRS_PULSE_TIMEOUT_MS * 1000 ) ; <nl> + rtmp - > set_recv_timeout ( SRS_PULSE_TIMEOUT_US ) ; <nl> <nl> SrsPithyPrint pithy_print ( SRS_STAGE_PLAY_USER ) ; <nl> <nl> while ( true ) { <nl> - pithy_print . elapse ( SRS_PULSE_TIMEOUT_MS ) ; <nl> + pithy_print . elapse ( SRS_PULSE_TIMEOUT_US / 1000 ) ; <nl> <nl> / / switch to other st - threads . <nl> st_usleep ( 0 ) ; <nl> mmm a / trunk / src / core / srs_core_encoder . cpp <nl> ppp b / trunk / src / core / srs_core_encoder . cpp <nl> CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE . <nl> # include < srs_core_config . hpp > <nl> # include < srs_core_rtmp . hpp > <nl> # include < srs_core_pithy_print . hpp > <nl> + # include < srs_core_protocol . hpp > <nl> <nl> # ifdef SRS_FFMPEG <nl> <nl> - # define SRS_ENCODER_SLEEP_MS 2000 <nl> - <nl> # define SRS_ENCODER_COPY " copy " <nl> # define SRS_ENCODER_VCODEC " libx264 " <nl> # define SRS_ENCODER_ACODEC " libaacplus " <nl> void SrsFFMPEG : : stop ( ) <nl> <nl> SrsEncoder : : SrsEncoder ( ) <nl> { <nl> - pthread = new SrsThread ( this , SRS_ENCODER_SLEEP_MS ) ; <nl> + pthread = new SrsThread ( this , SRS_ENCODER_SLEEP_US ) ; <nl> pithy_print = new SrsPithyPrint ( SRS_STAGE_ENCODER ) ; <nl> } <nl> <nl> int SrsEncoder : : cycle ( ) <nl> <nl> / / pithy print <nl> encoder ( ) ; <nl> - pithy_print - > elapse ( SRS_ENCODER_SLEEP_MS ) ; <nl> + pithy_print - > elapse ( SRS_ENCODER_SLEEP_US / 1000 ) ; <nl> <nl> return ret ; <nl> } <nl> mmm a / trunk / src / core / srs_core_forward . cpp <nl> ppp b / trunk / src / core / srs_core_forward . cpp <nl> CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE . <nl> # include < srs_core_source . hpp > <nl> # include < srs_core_autofree . hpp > <nl> <nl> - # define SRS_PULSE_TIMEOUT_MS 100 <nl> - # define SRS_FORWARDER_SLEEP_MS 2000 <nl> - # define SRS_SEND_TIMEOUT_US 3000000L <nl> - # define SRS_RECV_TIMEOUT_US SRS_SEND_TIMEOUT_US <nl> - <nl> SrsForwarder : : SrsForwarder ( SrsSource * _source ) <nl> { <nl> source = _source ; <nl> SrsForwarder : : SrsForwarder ( SrsSource * _source ) <nl> stfd = NULL ; <nl> stream_id = 0 ; <nl> <nl> - pthread = new SrsThread ( this , SRS_FORWARDER_SLEEP_MS ) ; <nl> + pthread = new SrsThread ( this , SRS_FORWARDER_SLEEP_US ) ; <nl> queue = new SrsMessageQueue ( ) ; <nl> jitter = new SrsRtmpJitter ( ) ; <nl> } <nl> int SrsForwarder : : forward ( ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> - client - > set_recv_timeout ( SRS_PULSE_TIMEOUT_MS * 1000 ) ; <nl> + client - > set_recv_timeout ( SRS_PULSE_TIMEOUT_US ) ; <nl> <nl> SrsPithyPrint pithy_print ( SRS_STAGE_FORWARDER ) ; <nl> <nl> int SrsForwarder : : forward ( ) <nl> SrsAutoFree ( SrsSharedPtrMessage * , msgs , true ) ; <nl> <nl> / / pithy print <nl> - pithy_print . elapse ( SRS_PULSE_TIMEOUT_MS ) ; <nl> + pithy_print . elapse ( SRS_PULSE_TIMEOUT_US / 1000 ) ; <nl> if ( pithy_print . can_print ( ) ) { <nl> srs_trace ( " - > time = % " PRId64 " , msgs = % d , obytes = % " PRId64 " , ibytes = % " PRId64 " , okbps = % d , ikbps = % d " , <nl> pithy_print . get_age ( ) , count , client - > get_send_bytes ( ) , client - > get_recv_bytes ( ) , client - > get_send_kbps ( ) , client - > get_recv_kbps ( ) ) ; <nl> mmm a / trunk / src / core / srs_core_protocol . cpp <nl> ppp b / trunk / src / core / srs_core_protocol . cpp <nl> messages . <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - / / when got a messae header , increase recv timeout to got an entire message . <nl> - # define SRS_MIN_RECV_TIMEOUT_US 3000 <nl> <nl> SrsProtocol : : AckWindowSize : : AckWindowSize ( ) <nl> { <nl> mmm a / trunk / src / core / srs_core_protocol . hpp <nl> ppp b / trunk / src / core / srs_core_protocol . hpp <nl> CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE . <nl> # include < srs_core_log . hpp > <nl> # include < srs_core_error . hpp > <nl> <nl> + / / the following is the timeout for rtmp protocol , <nl> + / / to avoid death connection . <nl> + <nl> + / / when got a messae header , there must be some data , <nl> + / / increase recv timeout to got an entire message . <nl> + # define SRS_MIN_RECV_TIMEOUT_US 60 * 1000 * 1000L <nl> + <nl> + / / the timeout to wait for client control message , <nl> + / / if timeout , we generally ignore and send the data to client , <nl> + / / generally , it ' s the pulse time for data seding . <nl> + # define SRS_PULSE_TIMEOUT_US 200 * 1000L <nl> + <nl> + / / the timeout to wait client data , <nl> + / / if timeout , close the connection . <nl> + # define SRS_SEND_TIMEOUT_US 30 * 1000 * 1000L <nl> + <nl> + / / the timeout to send data to client , <nl> + / / if timeout , close the connection . <nl> + # define SRS_RECV_TIMEOUT_US 30 * 1000 * 1000L <nl> + <nl> + / / when stream is busy , for example , streaming is already <nl> + / / publishing , when a new client to request to publish , <nl> + / / sleep a while and close the connection . <nl> + # define SRS_STREAM_BUSY_SLEEP_US 3 * 1000 * 1000L <nl> + <nl> + / / when error , forwarder sleep for a while and retry . <nl> + # define SRS_FORWARDER_SLEEP_US 3 * 1000 * 1000L <nl> + <nl> + / / when error , encoder sleep for a while and retry . <nl> + # define SRS_ENCODER_SLEEP_US 3 * 1000 * 1000L <nl> + <nl> class SrsSocket ; <nl> class SrsBuffer ; <nl> class SrsPacket ; <nl> mmm a / trunk / src / core / srs_core_thread . cpp <nl> ppp b / trunk / src / core / srs_core_thread . cpp <nl> void ISrsThreadHandler : : on_leave_loop ( ) <nl> { <nl> } <nl> <nl> - SrsThread : : SrsThread ( ISrsThreadHandler * thread_handler , int64_t interval_ms ) <nl> + SrsThread : : SrsThread ( ISrsThreadHandler * thread_handler , int64_t interval_us ) <nl> { <nl> handler = thread_handler ; <nl> - cycle_interval_milliseconds = interval_ms ; <nl> + cycle_interval_us = interval_us ; <nl> <nl> tid = NULL ; <nl> loop = false ; <nl> void SrsThread : : thread_cycle ( ) <nl> break ; <nl> } <nl> <nl> - st_usleep ( cycle_interval_milliseconds * 1000 ) ; <nl> + st_usleep ( cycle_interval_us ) ; <nl> } <nl> <nl> handler - > on_leave_loop ( ) ; <nl> mmm a / trunk / src / core / srs_core_thread . hpp <nl> ppp b / trunk / src / core / srs_core_thread . hpp <nl> class SrsThread <nl> bool loop ; <nl> private : <nl> ISrsThreadHandler * handler ; <nl> - int64_t cycle_interval_milliseconds ; <nl> + int64_t cycle_interval_us ; <nl> public : <nl> / * * <nl> * initialize the thread . <nl> * @ param thread_handler , the cycle handler for the thread . <nl> - * @ param interval_ms , the sleep interval when cycle finished . <nl> + * @ param interval_us , the sleep interval when cycle finished . <nl> * / <nl> - SrsThread ( ISrsThreadHandler * thread_handler , int64_t interval_ms ) ; <nl> + SrsThread ( ISrsThreadHandler * thread_handler , int64_t interval_us ) ; <nl> virtual ~ SrsThread ( ) ; <nl> public : <nl> / * * <nl>
change server timeout longer , recv timeout from 5s to 30s
ossrs/srs
3dbb2132d322473b5ad87bc6fab48319b9fac0ff
2014-01-01T11:58:33Z
mmm a / hphp / runtime / vm / jit / vasm - xls . cpp <nl> ppp b / hphp / runtime / vm / jit / vasm - xls . cpp <nl> struct LiveDefVisitor { <nl> void def ( Vreg r ) { m_live . reset ( r ) ; } <nl> void def ( RegSet rs ) { rs . forEach ( [ & ] ( Vreg r ) { def ( r ) ; } ) ; } <nl> void def ( Vtuple defs ) { for ( auto r : m_tuples [ defs ] ) def ( r ) ; } <nl> - void defHint ( Vtuple def_tuple , Vtuple hint_tuple ) { <nl> - auto & defs = m_tuples [ def_tuple ] ; <nl> - for ( int i = 0 ; i < defs . size ( ) ; i + + ) { <nl> - def ( defs [ i ] ) ; <nl> - } <nl> - } <nl> + void defHint ( Vtuple def_tuple , Vtuple hint_tuple ) { def ( def_tuple ) ; } <nl> void defHint ( Vreg d , Vreg hint ) { def ( d ) ; } <nl> <nl> private : <nl> struct LiveUseVisitor { <nl> if ( m . index . isValid ( ) ) use ( m . index ) ; <nl> } <nl> template < class S , class H > void useHint ( S src , H hint ) { use ( src ) ; } <nl> - void useHint ( Vtuple src_tuple , Vtuple hint_tuple ) { <nl> - auto & uses = m_tuples [ src_tuple ] ; <nl> - for ( int i = 0 , n = uses . size ( ) ; i < n ; i + + ) { <nl> - use ( uses [ i ] ) ; <nl> - } <nl> - } <nl> + void useHint ( Vtuple src_tuple , Vtuple hint_tuple ) { use ( src_tuple ) ; } <nl> <nl> private : <nl> jit : : vector < VregList > & m_tuples ; <nl> struct DefVisitor { <nl> void def ( Vtuple defs ) { <nl> for ( auto r : m_tuples [ defs ] ) def ( r ) ; <nl> } <nl> - void defHint ( Vtuple def_tuple , Vtuple hint_tuple ) { def ( def_tuple ) ; } <nl> + void defHint ( Vtuple def_tuple , Vtuple hint_tuple ) { <nl> + auto & defs = m_tuples [ def_tuple ] ; <nl> + auto & hints = m_tuples [ hint_tuple ] ; <nl> + for ( int i = 0 ; i < defs . size ( ) ; i + + ) { <nl> + def ( defs [ i ] , VregKind : : Any , hints [ i ] ) ; <nl> + } <nl> + } <nl> template < class D , class H > void defHint ( D dst , H hint ) { <nl> def ( dst , dst . kind , hint , dst . bits = = 128 ) ; <nl> } <nl> struct UseVisitor { <nl> void use ( VcallArgsId id ) { <nl> always_assert ( false & & " vcall unsupported in vxls " ) ; <nl> } <nl> - void useHint ( Vtuple src_tuple , Vtuple hint_tuple ) { use ( src_tuple ) ; } <nl> + void useHint ( Vtuple src_tuple , Vtuple hint_tuple ) { <nl> + auto & uses = m_tuples [ src_tuple ] ; <nl> + auto & hints = m_tuples [ hint_tuple ] ; <nl> + for ( int i = 0 , n = uses . size ( ) ; i < n ; i + + ) { <nl> + useHint ( uses [ i ] , hints [ i ] ) ; <nl> + } <nl> + } <nl> void use ( RegSet regs ) { <nl> regs . forEach ( [ & ] ( Vreg r ) { use ( r ) ; } ) ; <nl> } <nl>
Fix XLS Vreg hinting
facebook/hhvm
6260190b92607347130582efe6ddd500dd8d118c
2015-05-07T17:00:39Z
mmm a / test / smoke_test . py <nl> ppp b / test / smoke_test . py <nl> <nl> " fsck " : True } , <nl> repeat = 1 , timeout = 60 ) <nl> <nl> + do_test_cloud ( " integration / rget . py " , <nl> + { " auto " : True , <nl> + " mode " : mode , <nl> + " no - valgrind " : not checker , <nl> + " protocol " : protocol } , timeout = 45 ) <nl> + <nl> # More advanced tests in various cores / slices configuration <nl> for ( cores , slices ) in [ ( 1 , 1 ) ] : <nl> do_test_cloud ( " integration / many_keys . py " , <nl>
Include rget test into smoke test
rethinkdb/rethinkdb
1ab6f57acdb48966811883323223325655d8f817
2011-02-11T03:14:25Z
mmm a / src / runtime / eval / parser / scanner . cpp <nl> ppp b / src / runtime / eval / parser / scanner . cpp <nl> ExpressionPtr & TokenPayload : : exp ( ) { <nl> if ( m_mode = = None ) { <nl> m_mode = SingleExpression ; <nl> } <nl> - ASSERT ( m_mode = SingleExpression ) ; <nl> + ASSERT ( m_mode = = SingleExpression ) ; <nl> return m_exp ; <nl> } <nl> <nl> StatementPtr & TokenPayload : : stmt ( ) { <nl> if ( m_mode = = None ) { <nl> m_mode = SingleStatement ; <nl> } <nl> - ASSERT ( m_mode = SingleStatement ) ; <nl> + ASSERT ( m_mode = = SingleStatement ) ; <nl> return m_stmt ; <nl> } <nl> <nl> NamePtr & TokenPayload : : name ( ) { <nl> if ( m_mode = = None ) { <nl> m_mode = SingleName ; <nl> } <nl> - ASSERT ( m_mode = SingleName ) ; <nl> + ASSERT ( m_mode = = SingleName ) ; <nl> return m_name ; <nl> } <nl> <nl>
Fix ASSERT to use = =
facebook/hhvm
4b63c156c84cb7836602e190a0a9ad7ebccb0203
2010-07-22T19:02:17Z
mmm a / test / IDE / complete_skipbody . swift <nl> ppp b / test / IDE / complete_skipbody . swift <nl> func test ( valueOptOpt : MyStruct ? ? ) { <nl> <nl> if ( value . x = = 1 ) { <nl> let unrelated2 = FORBIDDEN_Struct ( ) <nl> - _ = value . # ^ FUNCTIONBODY ^ # <nl> + switch value . x { <nl> + case let x where x < 2 : <nl> + let unrelated3 = FORBIDDEN_Struct ( ) <nl> + if x = = value . # ^ FUNCTIONBODY ^ # { } <nl> + default : <nl> + break <nl> + } <nl> } <nl> } <nl> <nl> let globalValue = globalValueOpt ! <nl> <nl> - let FORBIDDEN_localVar = 1 <nl> + let FORBIDDEN_globalVar = 1 <nl> <nl> switch glovalValue . x { <nl> case let x where x < 2 : <nl>
Update a test case
apple/swift
36123a7ed34fa968d1a264dc603b9938c6df9b4b
2020-06-01T19:36:39Z
mmm a / docs / howto / how_to_create_a_pull . md <nl> ppp b / docs / howto / how_to_create_a_pull . md <nl> git config user . email " XXX @ [ XXX . com ] " <nl> - Clone your fork ( Please replace " USERNAME " with your GitHub user name . ) <nl> <nl> ` ` ` <nl> - git clone git @ github . com : USERNAME / apollo . git <nl> + ( Use SSH ) git clone git @ github . com : USERNAME / apollo . git <nl> + ( Use HTTPS ) git clone https : / / github . com / USERNAME / apollo . git <nl> ` ` ` <nl> <nl> - Add Apollo repository as upstream <nl> <nl> ` ` ` <nl> - git remote add upstream git @ github . com : ApolloAuto / apollo . git <nl> + ( Use SSH ) git remote add upstream git @ github . com : ApolloAuto / apollo . git <nl> + ( Use HTTPS ) git remote add upstream https : / / github . com / ApolloAuto / apollo . git <nl> ` ` ` <nl> <nl> - Create a new branch , make changes and commit . <nl> mmm a / modules / common / adapters / adapter_gflags . cc <nl> ppp b / modules / common / adapters / adapter_gflags . cc <nl> DEFINE_string ( mobileye_topic , " / apollo / sensor / mobileye " , " mobileye topic name " ) ; <nl> DEFINE_string ( delphi_esr_topic , " / apollo / sensor / delphi_esr " , <nl> " delphi esr radar topic name " ) ; <nl> DEFINE_string ( conti_radar_topic , " / apollo / sensor / conti_radar " , <nl> - " delphi esr radar topic name " ) ; <nl> + " continental radar topic name " ) ; <nl> DEFINE_string ( ultrasonic_radar_topic , " / apollo / sensor / ultrasonic_radar " , <nl> " ultrasonic esr radar topic name " ) ; <nl> / / TODO ( Authors ) : Change the topic name <nl>
Document : update how_to_create_pull and adapter_gflags
ApolloAuto/apollo
f5ea45fa9abbf1498b1f4a057e71b3f05e8070d3
2018-05-29T07:02:11Z
mmm a / drivers / python / rethinkdb / query . py <nl> ppp b / drivers / python / rethinkdb / query . py <nl> def grouped_map_reduce ( self , group_mapping , value_mapping , reduction_base , reduc <nl> value_mapping = FunctionExpr ( value_mapping ) <nl> return JSONExpression ( internal . GroupedMapReduce ( self , group_mapping , value_mapping , reduction_base , reduction_func ) ) <nl> <nl> + def group_by ( self , * args ) : <nl> + attrs = args [ : - 1 ] <nl> + groupByObject = args [ - 1 ] <nl> + <nl> + if len ( attrs ) > 1 : <nl> + grouping = FunctionExpr ( lambda row : [ row [ attr ] for attr in attrs ] ) <nl> + else : <nl> + grouping = FunctionExpr ( lambda row : row [ attrs [ 0 ] ] ) <nl> + <nl> + mapping = groupByObject . get ( ' mapping ' , lambda row : row ) <nl> + <nl> + try : <nl> + reduction = FunctionExpr ( groupByObject [ ' reduction ' ] ) <nl> + except KeyError : <nl> + raise ValueError ( " Groupby requires a reduction to be specified " ) <nl> + try : <nl> + base = groupByObject [ ' base ' ] <nl> + except KeyError : <nl> + raise ValueError ( " Groupby requires a base for the reduction " ) <nl> + <nl> + gmr = self . grouped_map_reduce ( grouping , mapping , base , reduction ) <nl> + <nl> + try : <nl> + finalizer = groupByObject [ ' finalizer ' ] <nl> + gmr = gmr . map ( lambda group : group . extend ( { ' reduction ' : finalizer ( group [ ' reduction ' ] ) } ) ) <nl> + except KeyError : <nl> + pass <nl> + <nl> + return gmr <nl> + <nl> def distinct ( self ) : <nl> " " " Discards duplicate elements from an array . <nl> <nl> def grouped_map_reduce ( self , group_mapping , value_mapping , reduction_base , reduc <nl> value_mapping = FunctionExpr ( value_mapping ) <nl> return JSONExpression ( internal . GroupedMapReduce ( self , group_mapping , value_mapping , reduction_base , reduction_func ) ) <nl> <nl> + def group_by ( self , * args ) : <nl> + attrs = args [ : - 1 ] <nl> + groupByObject = args [ - 1 ] <nl> + <nl> + if len ( attrs ) > 1 : <nl> + grouping = FunctionExpr ( lambda row : [ row [ attr ] for attr in attrs ] ) <nl> + else : <nl> + grouping = FunctionExpr ( lambda row : row [ attrs [ 0 ] ] ) <nl> + <nl> + mapping = groupByObject . get ( ' mapping ' , lambda row : row ) <nl> + <nl> + try : <nl> + reduction = FunctionExpr ( groupByObject [ ' reduction ' ] ) <nl> + except KeyError : <nl> + raise ValueError ( " Groupby requires a reduction to be specified " ) <nl> + try : <nl> + base = groupByObject [ ' base ' ] <nl> + except KeyError : <nl> + raise ValueError ( " Groupby requires a base for the reduction " ) <nl> + <nl> + gmr = self . grouped_map_reduce ( grouping , mapping , base , reduction ) <nl> + <nl> + try : <nl> + finalizer = groupByObject [ ' finalizer ' ] <nl> + gmr = gmr . map ( lambda group : group . extend ( { ' reduction ' : finalizer ( group [ ' reduction ' ] ) } ) ) <nl> + except KeyError : <nl> + pass <nl> + <nl> + return gmr <nl> + <nl> def distinct ( self ) : <nl> " " " Discards duplicate elements from a stream . <nl> <nl> def __len__ ( self ) : <nl> " illegal to return anything other than an integer from ` __len__ ( ) ` " <nl> " in Python . ) " ) <nl> <nl> + count = { <nl> + ' mapping ' : lambda row : 1 , <nl> + ' base ' : 0 , <nl> + ' reduction ' : lambda acc , val : acc + val <nl> + } <nl> + <nl> + def sum ( attr ) : <nl> + return { <nl> + ' mapping ' : lambda row : row [ attr ] , <nl> + ' base ' : 0 , <nl> + ' reduction ' : lambda acc , val : acc + val <nl> + } <nl> + <nl> + def average ( attr ) : <nl> + return { <nl> + ' mapping ' : lambda row : [ row [ attr ] , 1 ] , <nl> + ' base ' : [ 0 , 0 ] , <nl> + ' reduction ' : lambda acc , val : [ acc [ 0 ] + val [ 0 ] , acc [ 1 ] + val [ 1 ] ] , <nl> + ' finalizer ' : lambda res : res [ 0 ] / res [ 1 ] <nl> + } <nl> + <nl> def expr ( val ) : <nl> " " " Converts a python value to a ReQL : class : ` JSONExpression ` . <nl> <nl> def let ( * bindings ) : <nl> <nl> class FunctionExpr ( object ) : <nl> " " " TODO document me " " " <nl> + unique_counter = 0 <nl> + <nl> def __init__ ( self , body ) : <nl> + global unique_counter <nl> if isinstance ( body , types . FunctionType ) : <nl> - self . args = body . func_code . co_varnames <nl> + self . args = [ ' arg ' + str ( i ) + ' _ ' + str ( self . unique_counter ) <nl> + for i in range ( body . func_code . co_argcount ) ] <nl> + self . unique_counter + = 1 <nl> res = body ( * [ JSONExpression ( internal . Var ( arg ) ) for arg in self . args ] ) <nl> if not isinstance ( res , BaseQuery ) : <nl> res = expr ( res ) <nl>
python client , group by for
rethinkdb/rethinkdb
01b8e9dfe54971d3e117e3bc70f40a8eb131faaa
2012-09-27T22:52:12Z
mmm a / src / objective / rank_obj . cc <nl> ppp b / src / objective / rank_obj . cc <nl> class LambdaRankObjNDCG : public LambdaRankObj { <nl> for ( size_t i = 0 ; i < pairs . size ( ) ; + + i ) { <nl> unsigned pos_idx = pairs [ i ] . pos_index ; <nl> unsigned neg_idx = pairs [ i ] . neg_index ; <nl> - float pos_loginv = 1 . 0f / std : : log ( pos_idx + 2 . 0f ) ; <nl> - float neg_loginv = 1 . 0f / std : : log ( neg_idx + 2 . 0f ) ; <nl> + float pos_loginv = 1 . 0f / std : : log2 ( pos_idx + 2 . 0f ) ; <nl> + float neg_loginv = 1 . 0f / std : : log2 ( neg_idx + 2 . 0f ) ; <nl> int pos_label = static_cast < int > ( sorted_list [ pos_idx ] . label ) ; <nl> int neg_label = static_cast < int > ( sorted_list [ neg_idx ] . label ) ; <nl> bst_float original = <nl>
Fix issue introduced from correction to log2 ( )
dmlc/xgboost
1683e074615494320e13f38eca3f5317e295455d
2016-12-04T19:11:56Z
mmm a / docs / api / python / ndarray / ndarray . md <nl> ppp b / docs / api / python / ndarray / ndarray . md <nl> The ` ndarray ` package provides several classes : <nl> relu <nl> sigmoid <nl> erf <nl> + erfinv <nl> ` ` ` <nl> <nl> # # # More <nl> mmm a / docs / api / python / symbol / symbol . md <nl> ppp b / docs / api / python / symbol / symbol . md <nl> Composite multiple symbols into a new one by an operator . <nl> relu <nl> sigmoid <nl> erf <nl> + erfinv <nl> ` ` ` <nl> <nl> # # # More <nl> new file mode 100644 <nl> index 00000000000 . . 8d718ade656 <nl> mmm / dev / null <nl> ppp b / src / operator / contrib / erfinv - inl . h <nl> <nl> + / * <nl> + * Copyright ( c ) 2014 Indiana University <nl> + * All rights reserved . <nl> + * Written by Prof . Gary L . Pavlis , Dept . of Geol . Sci . , <nl> + * Indiana University , Bloomington , IN <nl> + * This software is licensed under the New BSD license : <nl> + * Redistribution and use in source and binary forms , <nl> + * with or without modification , are permitted provided <nl> + * that the following conditions are met : <nl> + * Redistributions of source code must retain the above <nl> + * copyright notice , this list of conditions and the <nl> + * following disclaimer . <nl> + * Redistributions in binary form must reproduce the <nl> + * above copyright notice , this list of conditions and <nl> + * the following disclaimer in the documentation and / or <nl> + * other materials provided with the distribution . <nl> + * Neither the name of Indiana University nor <nl> + * the names of its contributors may be used to endorse <nl> + * or promote products derived from this software without <nl> + * specific prior written permission . <nl> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND <nl> + * CONTRIBUTORS " AS IS " AND ANY EXPRESS OR IMPLIED <nl> + * WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE IMPLIED <nl> + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A <nl> + * PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL <nl> + * THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY <nl> + * DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR <nl> + * CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT LIMITED TO , <nl> + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF <nl> + * USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) <nl> + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER <nl> + * IN CONTRACT , STRICT LIABILITY , OR TORT ( INCLUDING <nl> + * NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE <nl> + * USE OF THIS SOFTWARE , EVEN IF ADVISED OF THE <nl> + * POSSIBILITY OF SUCH DAMAGE . <nl> + * / <nl> + / * <nl> + * The next function is taken from <nl> + * https : / / github . com / antelopeusersgroup / antelope_contrib / blob / master / lib / location / libgenloc / erfinv . c . <nl> + * Output was modified to be inf or - inf when input is 1 or - 1 . <nl> + * / <nl> + # ifndef MXNET_OPERATOR_CONTRIB_ERFINV_INL_H_ <nl> + # define MXNET_OPERATOR_CONTRIB_ERFINV_INL_H_ <nl> + <nl> + # define _USE_MATH_DEFINES <nl> + <nl> + # include < mxnet / base . h > <nl> + # include < limits > <nl> + # include " math . h " <nl> + <nl> + namespace mxnet { <nl> + namespace op { <nl> + namespace mshadow_op { <nl> + <nl> + / * ! \ brief inverse gauss error function * / <nl> + struct erfinv : public mxnet_op : : tunable { <nl> + template < typename DType > <nl> + MSHADOW_XINLINE static DType Map ( DType v ) { <nl> + / * Function to calculate inverse error function . Rational approximation <nl> + is used to generate an initial approximation , which is then improved to <nl> + full accuracy by two steps of Newton ' s method . Code is a direct <nl> + translation of the erfinv m file in matlab version 2 . 0 . <nl> + Author : Gary L . Pavlis , Indiana University <nl> + Date : February 1996 <nl> + * / <nl> + const double central_range = 0 . 7 ; <nl> + double y = static_cast < double > ( v ) ; <nl> + double y_fab = std : : fabs ( y ) ; <nl> + / * working variables * / <nl> + double x = 0 . 0 ; <nl> + double z , num , dem ; <nl> + / * coefficients in rational expansion * / <nl> + double a [ 4 ] = { 0 . 886226899 , - 1 . 645349621 , 0 . 914624893 , - 0 . 140543331 } ; <nl> + double b [ 4 ] = { - 2 . 118377725 , 1 . 442710462 , - 0 . 329097515 , 0 . 012229801 } ; <nl> + double c [ 4 ] = { - 1 . 970840454 , - 1 . 624906493 , 3 . 429567803 , 1 . 641345311 } ; <nl> + double d [ 2 ] = { 3 . 543889200 , 1 . 637067800 } ; <nl> + if ( y_fab > 1 . 0 ) { <nl> + / * This needs IEEE constant * / <nl> + return DType ( std : : numeric_limits < double > : : quiet_NaN ( ) ) ; <nl> + } else if ( y_fab = = 1 . 0 ) { <nl> + return DType ( ( std : : copysign ( 1 . 0 , y ) ) * std : : numeric_limits < double > : : infinity ( ) ) ; <nl> + } else if ( y_fab < = central_range ) { <nl> + z = y * y ; <nl> + num = ( ( ( a [ 3 ] * z + a [ 2 ] ) * z + a [ 1 ] ) * z + a [ 0 ] ) ; <nl> + dem = ( ( ( ( b [ 3 ] * z + b [ 2 ] ) * z + b [ 1 ] ) * z + b [ 0 ] ) * z + 1 . 0 ) ; <nl> + x = y * num / dem ; <nl> + } else { <nl> + z = std : : sqrt ( - std : : log ( ( 1 . 0 - y_fab ) / 2 . 0 ) ) ; <nl> + num = ( ( c [ 3 ] * z + c [ 2 ] ) * z + c [ 1 ] ) * z + c [ 0 ] ; <nl> + dem = ( d [ 1 ] * z + d [ 0 ] ) * z + 1 . 0 ; <nl> + x = ( std : : copysign ( 1 . 0 , y ) ) * num / dem ; <nl> + } <nl> + / * Two steps of Newton - Raphson correction * / <nl> + x = x - ( std : : erf ( x ) - y ) / ( ( 2 . 0 / std : : sqrt ( M_PI ) ) * std : : exp ( - x * x ) ) ; <nl> + x = x - ( std : : erf ( x ) - y ) / ( ( 2 . 0 / std : : sqrt ( M_PI ) ) * std : : exp ( - x * x ) ) ; <nl> + <nl> + return DType ( x ) ; <nl> + } <nl> + } ; <nl> + <nl> + } / / namespace mshadow_op <nl> + } / / namespace op <nl> + } / / namespace mxnet <nl> + <nl> + # endif / / MXNET_OPERATOR_CONTRIB_ERFINV_INL_H_ <nl> mmm a / src / operator / mshadow_op . h <nl> ppp b / src / operator / mshadow_op . h <nl> <nl> # include " math_functions - inl . h " <nl> # include " special_functions - inl . h " <nl> # include " . / operator_tune . h " <nl> + # include " . / contrib / erfinv - inl . h " <nl> <nl> # ifdef __CUDACC__ <nl> # include < cuda_fp16 . h > <nl> struct softrelu : public mxnet_op : : tunable { <nl> <nl> MXNET_UNARY_MATH_OP ( softrelu_grad , - math : : expm1 ( - a ) ) ; <nl> <nl> + MXNET_UNARY_MATH_OP ( erfinv_grad , 0 . 5 * math : : sqrt ( PI ) * math : : exp ( math : : sqr ( erfinv : : Map ( a ) ) ) ) ; <nl> + <nl> MXNET_UNARY_MATH_OP ( erf_grad , 2 . 0 / math : : sqrt ( PI ) * math : : exp ( - ( a * a ) ) ) ; <nl> <nl> MXNET_SIMPLE_UNARY_MATH_OP ( erf ) ; <nl> mmm a / src / operator / operator_tune . cc <nl> ppp b / src / operator / operator_tune . cc <nl> IMPLEMENT_UNARY_WORKLOAD_FWD ( mxnet : : op : : mshadow_op : : log2 ) ; / / NOLINT ( ) <nl> IMPLEMENT_UNARY_WORKLOAD_BWD ( mxnet : : op : : mshadow_op : : log2_grad ) ; / / NOLINT ( ) <nl> IMPLEMENT_UNARY_WORKLOAD_FWD ( mxnet : : op : : mshadow_op : : log10 ) ; / / NOLINT ( ) <nl> IMPLEMENT_UNARY_WORKLOAD_BWD ( mxnet : : op : : mshadow_op : : log10_grad ) ; / / NOLINT ( ) <nl> - IMPLEMENT_UNARY_WORKLOAD_FWD ( mxnet : : op : : mshadow_op : : sin ) ; / / NOLINT ( ) <nl> IMPLEMENT_UNARY_WORKLOAD_FWD ( mxnet : : op : : mshadow_op : : erf ) ; / / NOLINT ( ) <nl> IMPLEMENT_UNARY_WORKLOAD_BWD ( mxnet : : op : : mshadow_op : : erf_grad ) ; / / NOLINT ( ) <nl> + IMPLEMENT_UNARY_WORKLOAD_FWD ( mxnet : : op : : mshadow_op : : erfinv ) ; / / NOLINT ( ) <nl> + IMPLEMENT_UNARY_WORKLOAD_BWD ( mxnet : : op : : mshadow_op : : erfinv_grad ) ; / / NOLINT ( ) <nl> + IMPLEMENT_UNARY_WORKLOAD_FWD ( mxnet : : op : : mshadow_op : : sin ) ; / / NOLINT ( ) <nl> IMPLEMENT_UNARY_WORKLOAD_BWD ( mxnet : : op : : mshadow_op : : sin_grad ) ; / / NOLINT ( ) <nl> IMPLEMENT_UNARY_WORKLOAD_FWD ( mxnet : : op : : mshadow_op : : sinh ) ; / / NOLINT ( ) <nl> IMPLEMENT_UNARY_WORKLOAD_BWD ( mxnet : : op : : mshadow_op : : sinh_grad ) ; / / NOLINT ( ) <nl> mmm a / src / operator / tensor / elemwise_unary_op_basic . cc <nl> ppp b / src / operator / tensor / elemwise_unary_op_basic . cc <nl> MXNET_OPERATOR_REGISTER_BINARY ( _backward_erf ) <nl> . set_attr < FCompute > ( " FCompute < cpu > " , <nl> ElemwiseBinaryOp : : Compute < cpu , unary_bwd < mshadow_op : : erf_grad > > ) ; <nl> <nl> + / / erfinv <nl> + MXNET_OPERATOR_REGISTER_UNARY ( erfinv ) <nl> + . describe ( R " code ( Returns element - wise inverse gauss error function of the input . <nl> + <nl> + Example : : <nl> + <nl> + erfinv ( [ 0 , 0 . 5 . , - 1 . ] ) = [ 0 . , 0 . 4769 , - inf ] <nl> + <nl> + ) code " ADD_FILELINE ) <nl> + . set_attr < FCompute > ( " FCompute < cpu > " , UnaryOp : : Compute < cpu , mshadow_op : : erfinv > ) <nl> + . set_attr < nnvm : : FGradient > ( " FGradient " , ElemwiseGradUseIn { " _backward_erfinv " } ) ; <nl> + <nl> + MXNET_OPERATOR_REGISTER_BINARY ( _backward_erfinv ) <nl> + . set_attr < FCompute > ( " FCompute < cpu > " , <nl> + ElemwiseBinaryOp : : Compute < cpu , unary_bwd < mshadow_op : : erfinv_grad > > ) ; <nl> + <nl> / / rcbrt <nl> MXNET_OPERATOR_REGISTER_UNARY ( rcbrt ) <nl> . describe ( R " code ( Returns element - wise inverse cube - root value of the input . <nl> mmm a / src / operator / tensor / elemwise_unary_op_basic . cu <nl> ppp b / src / operator / tensor / elemwise_unary_op_basic . cu <nl> NNVM_REGISTER_OP ( _backward_erf ) <nl> . set_attr < FCompute > ( " FCompute < gpu > " , <nl> ElemwiseBinaryOp : : Compute < gpu , unary_bwd < mshadow_op : : erf_grad > > ) ; <nl> <nl> + / / erfinv <nl> + NNVM_REGISTER_OP ( erfinv ) <nl> + . set_attr < FCompute > ( " FCompute < gpu > " , UnaryOp : : Compute < gpu , mshadow_op : : erfinv > ) ; <nl> + <nl> + NNVM_REGISTER_OP ( _backward_erfinv ) <nl> + . set_attr < FCompute > ( " FCompute < gpu > " , <nl> + ElemwiseBinaryOp : : Compute < gpu , unary_bwd < mshadow_op : : erfinv_grad > > ) ; <nl> + <nl> / / copy <nl> NNVM_REGISTER_OP ( _copy ) <nl> . set_attr < FCompute > ( " FCompute < gpu > " , UnaryOp : : IdentityCompute < gpu > ) <nl> mmm a / tests / nightly / apache_rat_license_check / rat - excludes <nl> ppp b / tests / nightly / apache_rat_license_check / rat - excludes <nl> _mask . pyx <nl> coco . py <nl> base . pyi <nl> special_functions - inl . h <nl> + erfinv - inl . h <nl> im2col . cuh <nl> im2col . h <nl> pool . h <nl> deformable_im2col . h <nl> REQUIRE <nl> include / * <nl> . * . iml <nl> - . * . json . ref <nl> \ No newline at end of file <nl> + . * . json . ref <nl> mmm a / tests / python / unittest / test_operator . py <nl> ppp b / tests / python / unittest / test_operator . py <nl> def test_special_functions_using_scipy ( ) : <nl> <nl> # erf <nl> mathematical_core ( " erf " , lambda x : mx . sym . erf ( x ) , lambda x : scipy_special . erf ( x ) , <nl> - lambda x : 2 . 0 / math . sqrt ( math . pi ) * math . exp ( - ( x * * 2 ) ) , 0 . 5 , 0 . 5 ) <nl> + lambda x : 2 . 0 / math . sqrt ( math . pi ) * np . exp ( - ( x * * 2 ) ) , 0 . 5 , 0 . 5 ) <nl> + <nl> + # erfinv <nl> + mathematical_core ( " erfinv " , lambda x : mx . sym . erfinv ( x ) , lambda x : scipy_special . erfinv ( x ) , <nl> + lambda x : 0 . 5 * math . sqrt ( math . pi ) * np . exp ( scipy_special . erfinv ( x ) * * 2 ) , 0 . 5 , 0 . 5 ) <nl> <nl> <nl> def rounding ( name , forward_mxnet_call , forward_numpy_call , data_init = 5 . , grad_init = 2 . ) : <nl> mmm a / tools / license_header . py <nl> ppp b / tools / license_header . py <nl> <nl> ' src / operator / nn / im2col . cuh ' , <nl> <nl> # Licenses in headers <nl> + ' src / operator / contrib / erfinv - inl . h ' , <nl> ' docs / _static / searchtools_custom . js ' , <nl> ' docs / _static / js / clipboard . js ' , <nl> ' docs / _static / js / clipboard . min . js ' , <nl>
Add erfinv operator for calculating inverse error function ( )
apache/incubator-mxnet
b86ccf1be704f5f97e085f3fb21e781bceac884d
2019-01-22T16:36:31Z
mmm a / src / python / grpcio / grpc / _cython / _cygrpc / aio / call . pyx . pxi <nl> ppp b / src / python / grpcio / grpc / _cython / _cygrpc / aio / call . pyx . pxi <nl> cdef class _AioCall ( GrpcCallWrapper ) : <nl> self , <nl> self . _loop <nl> ) <nl> - if received_message : <nl> + if received_message is not None : <nl> return received_message <nl> else : <nl> return EOF <nl> mmm a / src / python / grpcio / grpc / _cython / _cygrpc / aio / callback_common . pyx . pxi <nl> ppp b / src / python / grpcio / grpc / _cython / _cygrpc / aio / callback_common . pyx . pxi <nl> async def _receive_message ( GrpcCallWrapper grpc_call_wrapper , <nl> # <nl> # Since they all indicates finish , they are better be merged . <nl> _LOGGER . debug ( ' Failed to receive any message from Core ' ) <nl> + # NOTE ( lidiz ) The returned message might be an empty bytess ( aka . b ' ' ) . <nl> + # Please explicitly check if it is None or falsified string object ! <nl> return receive_op . message ( ) <nl> <nl> <nl> mmm a / src / python / grpcio_tests / tests_aio / unit / _test_server . py <nl> ppp b / src / python / grpcio_tests / tests_aio / unit / _test_server . py <nl> async def StreamingOutputCall ( <nl> await asyncio . sleep ( <nl> datetime . timedelta ( microseconds = response_parameters . <nl> interval_us ) . total_seconds ( ) ) <nl> - yield messages_pb2 . StreamingOutputCallResponse ( <nl> - payload = messages_pb2 . Payload ( type = request . response_type , <nl> - body = b ' \ x00 ' * <nl> - response_parameters . size ) ) <nl> + if response_parameters . size ! = 0 : <nl> + yield messages_pb2 . StreamingOutputCallResponse ( <nl> + payload = messages_pb2 . Payload ( type = request . response_type , <nl> + body = b ' \ x00 ' * <nl> + response_parameters . size ) ) <nl> + else : <nl> + yield messages_pb2 . StreamingOutputCallResponse ( ) <nl> <nl> # Next methods are extra ones that are registred programatically <nl> # when the sever is instantiated . They are not being provided by <nl> async def FullDuplexCall ( self , request_async_iterator , context ) : <nl> await asyncio . sleep ( <nl> datetime . timedelta ( microseconds = response_parameters . <nl> interval_us ) . total_seconds ( ) ) <nl> - yield messages_pb2 . StreamingOutputCallResponse ( <nl> - payload = messages_pb2 . Payload ( type = request . payload . type , <nl> - body = b ' \ x00 ' * <nl> - response_parameters . size ) ) <nl> + if response_parameters . size ! = 0 : <nl> + yield messages_pb2 . StreamingOutputCallResponse ( <nl> + payload = messages_pb2 . Payload ( type = request . payload . type , <nl> + body = b ' \ x00 ' * <nl> + response_parameters . size ) ) <nl> + else : <nl> + yield messages_pb2 . StreamingOutputCallResponse ( ) <nl> <nl> <nl> def _create_extra_generic_handler ( servicer : TestServiceServicer ) : <nl> mmm a / src / python / grpcio_tests / tests_aio / unit / call_test . py <nl> ppp b / src / python / grpcio_tests / tests_aio / unit / call_test . py <nl> async def test_time_remaining ( self ) : <nl> <nl> self . assertEqual ( grpc . StatusCode . OK , await call . code ( ) ) <nl> <nl> + async def test_empty_responses ( self ) : <nl> + # Prepares the request <nl> + request = messages_pb2 . StreamingOutputCallRequest ( ) <nl> + for _ in range ( _NUM_STREAM_RESPONSES ) : <nl> + request . response_parameters . append ( <nl> + messages_pb2 . ResponseParameters ( ) ) <nl> + <nl> + # Invokes the actual RPC <nl> + call = self . _stub . StreamingOutputCall ( request ) <nl> + <nl> + for _ in range ( _NUM_STREAM_RESPONSES ) : <nl> + response = await call . read ( ) <nl> + self . assertIs ( type ( response ) , <nl> + messages_pb2 . StreamingOutputCallResponse ) <nl> + self . assertEqual ( b ' ' , response . SerializeToString ( ) ) <nl> + <nl> + self . assertEqual ( grpc . StatusCode . OK , await call . code ( ) ) <nl> + <nl> <nl> class TestStreamUnaryCall ( _MulticallableTestMixin , AioTestBase ) : <nl> <nl> async def test_timeout ( self ) : <nl> _STREAM_OUTPUT_REQUEST_ONE_RESPONSE = messages_pb2 . StreamingOutputCallRequest ( ) <nl> _STREAM_OUTPUT_REQUEST_ONE_RESPONSE . response_parameters . append ( <nl> messages_pb2 . ResponseParameters ( size = _RESPONSE_PAYLOAD_SIZE ) ) <nl> + _STREAM_OUTPUT_REQUEST_ONE_EMPTY_RESPONSE = messages_pb2 . StreamingOutputCallRequest ( <nl> + ) <nl> + _STREAM_OUTPUT_REQUEST_ONE_EMPTY_RESPONSE . response_parameters . append ( <nl> + messages_pb2 . ResponseParameters ( ) ) <nl> <nl> <nl> class TestStreamStreamCall ( _MulticallableTestMixin , AioTestBase ) : <nl> async def test_normal_iterable_requests ( self ) : <nl> <nl> self . assertEqual ( await call . code ( ) , grpc . StatusCode . OK ) <nl> <nl> + async def test_empty_ping_pong ( self ) : <nl> + call = self . _stub . FullDuplexCall ( ) <nl> + for _ in range ( _NUM_STREAM_RESPONSES ) : <nl> + await call . write ( _STREAM_OUTPUT_REQUEST_ONE_EMPTY_RESPONSE ) <nl> + response = await call . read ( ) <nl> + self . assertEqual ( b ' ' , response . SerializeToString ( ) ) <nl> + await call . done_writing ( ) <nl> + self . assertEqual ( await call . code ( ) , grpc . StatusCode . OK ) <nl> + <nl> <nl> if __name__ = = ' __main__ ' : <nl> logging . basicConfig ( level = logging . DEBUG ) <nl>
Fix the emtpy response handling in streaming RPC
grpc/grpc
2996c03114726f9607819c1419e38478c355eb7b
2020-11-20T20:16:21Z
mmm a / folly / experimental / pushmi / forwards . h <nl> ppp b / folly / experimental / pushmi / forwards . h <nl> template < PUSHMI_TYPE_CONSTRAINT ( SemiMovable ) . . . TN > <nl> class single_sender ; <nl> <nl> template < PUSHMI_TYPE_CONSTRAINT ( SemiMovable ) . . . TN > <nl> - class many_sender ; <nl> + class sender ; <nl> <nl> template < PUSHMI_TYPE_CONSTRAINT ( SemiMovable ) . . . TN > <nl> class flow_single_sender ; <nl> <nl> template < PUSHMI_TYPE_CONSTRAINT ( SemiMovable ) . . . TN > <nl> - class flow_many_sender ; <nl> + class flow_sender ; <nl> <nl> template < class E = std : : exception_ptr , class . . . VN > <nl> class any_receiver ; <nl> template < class E = std : : exception_ptr , class . . . VN > <nl> class any_single_sender ; <nl> <nl> template < class E = std : : exception_ptr , class . . . VN > <nl> - class any_many_sender ; <nl> + class any_sender ; <nl> <nl> template < class PE = std : : exception_ptr , class E = PE , class . . . VN > <nl> class any_flow_single_sender ; <nl> template < <nl> class PV = std : : ptrdiff_t , <nl> class E = PE , <nl> class . . . VN > <nl> - class any_flow_many_sender ; <nl> + class any_flow_sender ; <nl> <nl> template < class E = std : : exception_ptr > <nl> class any_executor ; <nl> template < > <nl> struct construct_deduced < single_sender > ; <nl> <nl> template < > <nl> - struct construct_deduced < many_sender > ; <nl> + struct construct_deduced < sender > ; <nl> <nl> template < > <nl> struct construct_deduced < flow_single_sender > ; <nl> <nl> template < > <nl> - struct construct_deduced < flow_many_sender > ; <nl> + struct construct_deduced < flow_sender > ; <nl> <nl> } / / namespace pushmi <nl> } / / namespace folly <nl> mmm a / folly / experimental / pushmi / o / extension_operators . h <nl> ppp b / folly / experimental / pushmi / o / extension_operators . h <nl> <nl> # include < folly / experimental / pushmi / executor / executor . h > <nl> # include < folly / experimental / pushmi / executor / inline . h > <nl> # include < folly / experimental / pushmi / executor / trampoline . h > <nl> - # include < folly / experimental / pushmi / sender / flow_many_sender . h > <nl> + # include < folly / experimental / pushmi / sender / flow_sender . h > <nl> # include < folly / experimental / pushmi / receiver / flow_receiver . h > <nl> # include < folly / experimental / pushmi / sender / flow_single_sender . h > <nl> # include < folly / experimental / pushmi / forwards . h > <nl> - # include < folly / experimental / pushmi / sender / many_sender . h > <nl> + # include < folly / experimental / pushmi / sender / sender . h > <nl> # include < folly / experimental / pushmi / piping . h > <nl> # include < folly / experimental / pushmi / properties . h > <nl> # include < folly / experimental / pushmi / receiver / receiver . h > <nl> struct make_sender ; <nl> template < > <nl> struct make_sender < single_sender_tag > : construct_deduced < single_sender > { } ; <nl> template < > <nl> - struct make_sender < sender_tag > : construct_deduced < many_sender > { } ; <nl> + struct make_sender < sender_tag > : construct_deduced < sender > { } ; <nl> template < > <nl> struct make_sender < flow_single_sender_tag > <nl> : construct_deduced < flow_single_sender > { } ; <nl> template < > <nl> struct make_sender < flow_sender_tag > <nl> - : construct_deduced < flow_many_sender > { } ; <nl> + : construct_deduced < flow_sender > { } ; <nl> <nl> PUSHMI_INLINE_VAR constexpr struct sender_from_fn { <nl> PUSHMI_TEMPLATE ( class In , class . . . FN ) <nl> mmm a / folly / experimental / pushmi / o / from . h <nl> ppp b / folly / experimental / pushmi / o / from . h <nl> <nl> * / <nl> # pragma once <nl> <nl> - # include < folly / experimental / pushmi / sender / flow_many_sender . h > <nl> - # include < folly / experimental / pushmi / sender / many_sender . h > <nl> + # include < folly / experimental / pushmi / sender / flow_sender . h > <nl> + # include < folly / experimental / pushmi / sender / sender . h > <nl> # include < folly / experimental / pushmi / sender / properties . h > <nl> # include < folly / experimental / pushmi / o / extension_operators . h > <nl> # include < folly / experimental / pushmi / o / submit . h > <nl> similarity index 75 % <nl> rename from folly / experimental / pushmi / sender / flow_many_sender . h <nl> rename to folly / experimental / pushmi / sender / flow_sender . h <nl> mmm a / folly / experimental / pushmi / sender / flow_many_sender . h <nl> ppp b / folly / experimental / pushmi / sender / flow_sender . h <nl> namespace folly { <nl> namespace pushmi { <nl> <nl> template < class PE , class PV , class E , class . . . VN > <nl> - class any_flow_many_sender <nl> + class any_flow_sender <nl> : public flow_sender_tag : : with_values < VN . . . > : : template with_error < E > { <nl> using insitu_t = void * [ 2 ] ; <nl> union data { <nl> class any_flow_many_sender <nl> static constexpr vtable const noop_ { } ; <nl> vtable const * vptr_ = & noop_ ; <nl> template < class Wrapped > <nl> - any_flow_many_sender ( Wrapped obj , std : : false_type ) : any_flow_many_sender ( ) { <nl> + any_flow_sender ( Wrapped obj , std : : false_type ) : any_flow_sender ( ) { <nl> struct s { <nl> static void op ( data & src , data * dst ) { <nl> if ( dst ) <nl> class any_flow_many_sender <nl> vptr_ = & vtbl ; <nl> } <nl> template < class Wrapped > <nl> - any_flow_many_sender ( Wrapped obj , std : : true_type ) noexcept <nl> - : any_flow_many_sender ( ) { <nl> + any_flow_sender ( Wrapped obj , std : : true_type ) noexcept <nl> + : any_flow_sender ( ) { <nl> struct s { <nl> static void op ( data & src , data * dst ) { <nl> if ( dst ) <nl> class any_flow_many_sender <nl> } <nl> template < class T , class U = std : : decay_t < T > > <nl> using wrapped_t = <nl> - std : : enable_if_t < ! std : : is_same < U , any_flow_many_sender > : : value , U > ; <nl> + std : : enable_if_t < ! std : : is_same < U , any_flow_sender > : : value , U > ; <nl> public : <nl> - any_flow_many_sender ( ) = default ; <nl> - any_flow_many_sender ( any_flow_many_sender & & that ) noexcept <nl> - : any_flow_many_sender ( ) { <nl> + any_flow_sender ( ) = default ; <nl> + any_flow_sender ( any_flow_sender & & that ) noexcept <nl> + : any_flow_sender ( ) { <nl> that . vptr_ - > op_ ( that . data_ , & data_ ) ; <nl> std : : swap ( that . vptr_ , vptr_ ) ; <nl> } <nl> PUSHMI_TEMPLATE ( class Wrapped ) <nl> ( requires FlowSender < wrapped_t < Wrapped > > ) <nl> - explicit any_flow_many_sender ( Wrapped obj ) noexcept ( insitu < Wrapped > ( ) ) <nl> - : any_flow_many_sender { std : : move ( obj ) , bool_ < insitu < Wrapped > ( ) > { } } { } <nl> - ~ any_flow_many_sender ( ) { <nl> + explicit any_flow_sender ( Wrapped obj ) noexcept ( insitu < Wrapped > ( ) ) <nl> + : any_flow_sender { std : : move ( obj ) , bool_ < insitu < Wrapped > ( ) > { } } { } <nl> + ~ any_flow_sender ( ) { <nl> vptr_ - > op_ ( data_ , nullptr ) ; <nl> } <nl> - any_flow_many_sender & operator = ( any_flow_many_sender & & that ) noexcept { <nl> - this - > ~ any_flow_many_sender ( ) ; <nl> - new ( ( void * ) this ) any_flow_many_sender ( std : : move ( that ) ) ; <nl> + any_flow_sender & operator = ( any_flow_sender & & that ) noexcept { <nl> + this - > ~ any_flow_sender ( ) ; <nl> + new ( ( void * ) this ) any_flow_sender ( std : : move ( that ) ) ; <nl> return * this ; <nl> } <nl> PUSHMI_TEMPLATE ( class Out ) <nl> class any_flow_many_sender <nl> <nl> / / Class static definitions : <nl> template < class PE , class PV , class E , class . . . VN > <nl> - constexpr typename any_flow_many_sender < PE , PV , E , VN . . . > : : vtable const <nl> - any_flow_many_sender < PE , PV , E , VN . . . > : : noop_ ; <nl> + constexpr typename any_flow_sender < PE , PV , E , VN . . . > : : vtable const <nl> + any_flow_sender < PE , PV , E , VN . . . > : : noop_ ; <nl> <nl> template < class SF > <nl> - class flow_many_sender < SF > { <nl> + class flow_sender < SF > { <nl> SF sf_ ; <nl> <nl> public : <nl> using sender_category = flow_sender_tag ; <nl> <nl> - constexpr flow_many_sender ( ) = default ; <nl> - constexpr explicit flow_many_sender ( SF sf ) <nl> + constexpr flow_sender ( ) = default ; <nl> + constexpr explicit flow_sender ( SF sf ) <nl> : sf_ ( std : : move ( sf ) ) { } <nl> <nl> PUSHMI_TEMPLATE ( class Out ) <nl> class flow_many_sender < SF > { <nl> } ; <nl> <nl> template < PUSHMI_TYPE_CONSTRAINT ( FlowSender ) Data , class DSF > <nl> - class flow_many_sender < Data , DSF > { <nl> + class flow_sender < Data , DSF > { <nl> Data data_ ; <nl> DSF sf_ ; <nl> <nl> class flow_many_sender < Data , DSF > { <nl> FlowSender < Data > , <nl> " Data must be a flow sender " ) ; <nl> <nl> - constexpr flow_many_sender ( ) = default ; <nl> - constexpr explicit flow_many_sender ( Data data ) <nl> + constexpr flow_sender ( ) = default ; <nl> + constexpr explicit flow_sender ( Data data ) <nl> : data_ ( std : : move ( data ) ) { } <nl> - constexpr flow_many_sender ( Data data , DSF sf ) <nl> + constexpr flow_sender ( Data data , DSF sf ) <nl> : data_ ( std : : move ( data ) ) , sf_ ( std : : move ( sf ) ) { } <nl> <nl> PUSHMI_TEMPLATE ( class Out ) <nl> class flow_many_sender < Data , DSF > { <nl> } ; <nl> <nl> template < > <nl> - class flow_many_sender < > <nl> - : public flow_many_sender < ignoreSF > { <nl> + class flow_sender < > <nl> + : public flow_sender < ignoreSF > { <nl> public : <nl> - flow_many_sender ( ) = default ; <nl> + flow_sender ( ) = default ; <nl> } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / make_flow_many_sender <nl> + / / make_flow_sender <nl> PUSHMI_INLINE_VAR constexpr struct make_flow_many_sender_fn { <nl> inline auto operator ( ) ( ) const { <nl> - return flow_many_sender < ignoreSF > { } ; <nl> + return flow_sender < ignoreSF > { } ; <nl> } <nl> PUSHMI_TEMPLATE ( class SF ) <nl> ( requires True < > PUSHMI_BROKEN_SUBSUMPTION ( & & not Sender < SF > ) ) <nl> auto operator ( ) ( SF sf ) const { <nl> - return flow_many_sender < SF > { std : : move ( sf ) } ; <nl> + return flow_sender < SF > { std : : move ( sf ) } ; <nl> } <nl> PUSHMI_TEMPLATE ( class Data ) <nl> ( requires True < > & & FlowSender < Data > ) <nl> auto operator ( ) ( Data d ) const { <nl> - return flow_many_sender < Data , passDSF > { std : : move ( d ) } ; <nl> + return flow_sender < Data , passDSF > { std : : move ( d ) } ; <nl> } <nl> PUSHMI_TEMPLATE ( class Data , class DSF ) <nl> ( requires FlowSender < Data > ) <nl> auto operator ( ) ( Data d , DSF sf ) const { <nl> - return flow_many_sender < Data , DSF > { std : : move ( d ) , std : : move ( sf ) } ; <nl> + return flow_sender < Data , DSF > { std : : move ( d ) , std : : move ( sf ) } ; <nl> } <nl> - } const make_flow_many_sender { } ; <nl> + } const make_flow_sender { } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / deduction guides <nl> # if __cpp_deduction_guides > = 201703 & & PUSHMI_NOT_ON_WINDOWS <nl> - flow_many_sender ( ) - > flow_many_sender < ignoreSF > ; <nl> + flow_sender ( ) - > flow_sender < ignoreSF > ; <nl> <nl> PUSHMI_TEMPLATE ( class SF ) <nl> ( requires True < > PUSHMI_BROKEN_SUBSUMPTION ( & & not Sender < SF > ) ) <nl> - flow_many_sender ( SF ) - > flow_many_sender < SF > ; <nl> + flow_sender ( SF ) - > flow_sender < SF > ; <nl> <nl> PUSHMI_TEMPLATE ( class Data ) <nl> ( requires True < > & & FlowSender < Data > ) <nl> - flow_many_sender ( Data ) - > flow_many_sender < Data , passDSF > ; <nl> + flow_sender ( Data ) - > flow_sender < Data , passDSF > ; <nl> <nl> PUSHMI_TEMPLATE ( class Data , class DSF ) <nl> ( requires FlowSender < Data > ) <nl> - flow_many_sender ( Data , DSF ) - > flow_many_sender < Data , DSF > ; <nl> + flow_sender ( Data , DSF ) - > flow_sender < Data , DSF > ; <nl> # endif <nl> <nl> template < > <nl> - struct construct_deduced < flow_many_sender > <nl> + struct construct_deduced < flow_sender > <nl> : make_flow_many_sender_fn { } ; <nl> <nl> / / / / TODO constrain me <nl> / / template < class V , class E = std : : exception_ptr , Sender Wrapped > <nl> / / auto erase_cast ( Wrapped w ) { <nl> - / / return flow_many_sender < V , E > { std : : move ( w ) } ; <nl> + / / return flow_sender < V , E > { std : : move ( w ) } ; <nl> / / } <nl> <nl> } / / namespace pushmi <nl> similarity index 76 % <nl> rename from folly / experimental / pushmi / sender / many_sender . h <nl> rename to folly / experimental / pushmi / sender / sender . h <nl> mmm a / folly / experimental / pushmi / sender / many_sender . h <nl> ppp b / folly / experimental / pushmi / sender / sender . h <nl> namespace folly { <nl> namespace pushmi { <nl> <nl> template < class E , class . . . VN > <nl> - class any_many_sender <nl> + class any_sender <nl> : public sender_tag : : with_values < VN . . . > : : template with_error < E > { <nl> using insitu_t = void * [ 2 ] ; <nl> union data { <nl> class any_many_sender <nl> static constexpr vtable const noop_ { } ; <nl> vtable const * vptr_ = & noop_ ; <nl> template < class Wrapped > <nl> - any_many_sender ( Wrapped obj , std : : false_type ) : any_many_sender ( ) { <nl> + any_sender ( Wrapped obj , std : : false_type ) : any_sender ( ) { <nl> struct s { <nl> static void op ( data & src , data * dst ) { <nl> if ( dst ) <nl> class any_many_sender <nl> vptr_ = & vtbl ; <nl> } <nl> template < class Wrapped > <nl> - any_many_sender ( Wrapped obj , std : : true_type ) noexcept : any_many_sender ( ) { <nl> + any_sender ( Wrapped obj , std : : true_type ) noexcept : any_sender ( ) { <nl> struct s { <nl> static void op ( data & src , data * dst ) { <nl> if ( dst ) <nl> class any_many_sender <nl> } <nl> template < class T , class U = std : : decay_t < T > > <nl> using wrapped_t = <nl> - std : : enable_if_t < ! std : : is_same < U , any_many_sender > : : value , U > ; <nl> + std : : enable_if_t < ! std : : is_same < U , any_sender > : : value , U > ; <nl> <nl> public : <nl> - any_many_sender ( ) = default ; <nl> - any_many_sender ( any_many_sender & & that ) noexcept : any_many_sender ( ) { <nl> + any_sender ( ) = default ; <nl> + any_sender ( any_sender & & that ) noexcept : any_sender ( ) { <nl> that . vptr_ - > op_ ( that . data_ , & data_ ) ; <nl> std : : swap ( that . vptr_ , vptr_ ) ; <nl> } <nl> <nl> PUSHMI_TEMPLATE ( class Wrapped ) <nl> ( requires SenderTo < wrapped_t < Wrapped > , any_receiver < E , VN . . . > > ) / / <nl> - explicit any_many_sender ( Wrapped obj ) noexcept ( insitu < Wrapped > ( ) ) <nl> - : any_many_sender { std : : move ( obj ) , bool_ < insitu < Wrapped > ( ) > { } } { } <nl> - ~ any_many_sender ( ) { <nl> + explicit any_sender ( Wrapped obj ) noexcept ( insitu < Wrapped > ( ) ) <nl> + : any_sender { std : : move ( obj ) , bool_ < insitu < Wrapped > ( ) > { } } { } <nl> + ~ any_sender ( ) { <nl> vptr_ - > op_ ( data_ , nullptr ) ; <nl> } <nl> - any_many_sender & operator = ( any_many_sender & & that ) noexcept { <nl> - this - > ~ any_many_sender ( ) ; <nl> - new ( ( void * ) this ) any_many_sender ( std : : move ( that ) ) ; <nl> + any_sender & operator = ( any_sender & & that ) noexcept { <nl> + this - > ~ any_sender ( ) ; <nl> + new ( ( void * ) this ) any_sender ( std : : move ( that ) ) ; <nl> return * this ; <nl> } <nl> PUSHMI_TEMPLATE ( class Out ) <nl> class any_many_sender <nl> <nl> / / Class static definitions : <nl> template < class E , class . . . VN > <nl> - constexpr typename any_many_sender < E , VN . . . > : : vtable const <nl> - any_many_sender < E , VN . . . > : : noop_ ; <nl> + constexpr typename any_sender < E , VN . . . > : : vtable const <nl> + any_sender < E , VN . . . > : : noop_ ; <nl> <nl> template < class SF > <nl> - class many_sender < SF > : public sender_tag { <nl> + class sender < SF > : public sender_tag { <nl> SF sf_ ; <nl> <nl> public : <nl> - constexpr many_sender ( ) = default ; <nl> - constexpr explicit many_sender ( SF sf ) : sf_ ( std : : move ( sf ) ) { } <nl> + constexpr sender ( ) = default ; <nl> + constexpr explicit sender ( SF sf ) : sf_ ( std : : move ( sf ) ) { } <nl> <nl> PUSHMI_TEMPLATE ( class Out ) <nl> ( requires PUSHMI_EXP ( <nl> class many_sender < SF > : public sender_tag { <nl> } ; <nl> <nl> template < PUSHMI_TYPE_CONSTRAINT ( Sender ) Data , class DSF > <nl> - class many_sender < Data , DSF > { <nl> + class sender < Data , DSF > { <nl> Data data_ ; <nl> DSF sf_ ; <nl> <nl> class many_sender < Data , DSF > { <nl> Sender < Data > , <nl> " Data must be a sender " ) ; <nl> <nl> - constexpr many_sender ( ) = default ; <nl> - constexpr explicit many_sender ( Data data ) : data_ ( std : : move ( data ) ) { } <nl> - constexpr many_sender ( Data data , DSF sf ) <nl> + constexpr sender ( ) = default ; <nl> + constexpr explicit sender ( Data data ) : data_ ( std : : move ( data ) ) { } <nl> + constexpr sender ( Data data , DSF sf ) <nl> : data_ ( std : : move ( data ) ) , sf_ ( std : : move ( sf ) ) { } <nl> <nl> PUSHMI_TEMPLATE ( class Out ) <nl> class many_sender < Data , DSF > { <nl> } ; <nl> <nl> template < > <nl> - class many_sender < > : public many_sender < ignoreSF > { <nl> + class sender < > : public sender < ignoreSF > { <nl> public : <nl> - many_sender ( ) = default ; <nl> + sender ( ) = default ; <nl> } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / make_many_sender <nl> + / / make_sender <nl> PUSHMI_INLINE_VAR constexpr struct make_many_sender_fn { <nl> inline auto operator ( ) ( ) const { <nl> - return many_sender < ignoreSF > { } ; <nl> + return sender < ignoreSF > { } ; <nl> } <nl> PUSHMI_TEMPLATE ( class SF ) <nl> ( requires True < > PUSHMI_BROKEN_SUBSUMPTION ( & & not Sender < SF > ) ) / / <nl> auto <nl> operator ( ) ( SF sf ) const { <nl> - return many_sender < SF > { std : : move ( sf ) } ; <nl> + return sender < SF > { std : : move ( sf ) } ; <nl> } <nl> PUSHMI_TEMPLATE ( class Data ) <nl> ( requires True < > & & Sender < Data > ) / / <nl> auto <nl> operator ( ) ( Data d ) const { <nl> - return many_sender < Data , passDSF > { std : : move ( d ) } ; <nl> + return sender < Data , passDSF > { std : : move ( d ) } ; <nl> } <nl> PUSHMI_TEMPLATE ( class Data , class DSF ) <nl> ( requires Sender < Data > ) / / <nl> auto <nl> operator ( ) ( Data d , DSF sf ) const { <nl> - return many_sender < Data , DSF > { std : : move ( d ) , std : : move ( sf ) } ; <nl> + return sender < Data , DSF > { std : : move ( d ) , std : : move ( sf ) } ; <nl> } <nl> - } const make_many_sender { } ; <nl> + } const make_sender { } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / deduction guides <nl> # if __cpp_deduction_guides > = 201703 & & PUSHMI_NOT_ON_WINDOWS <nl> - many_sender ( ) - > many_sender < ignoreSF > ; <nl> + sender ( ) - > sender < ignoreSF > ; <nl> <nl> PUSHMI_TEMPLATE ( class SF ) <nl> ( requires True < > PUSHMI_BROKEN_SUBSUMPTION ( & & not Sender < SF > ) ) / / <nl> - many_sender ( SF ) <nl> - - > many_sender < SF > ; <nl> + sender ( SF ) <nl> + - > sender < SF > ; <nl> <nl> PUSHMI_TEMPLATE ( class Data ) <nl> ( requires True < > & & Sender < Data > ) / / <nl> - many_sender ( Data ) <nl> - - > many_sender < Data , passDSF > ; <nl> + sender ( Data ) <nl> + - > sender < Data , passDSF > ; <nl> <nl> PUSHMI_TEMPLATE ( class Data , class DSF ) <nl> ( requires Sender < Data > ) / / <nl> - many_sender ( Data , DSF ) <nl> - - > many_sender < Data , DSF > ; <nl> + sender ( Data , DSF ) <nl> + - > sender < Data , DSF > ; <nl> # endif <nl> <nl> template < > <nl> - struct construct_deduced < many_sender > : make_many_sender_fn { } ; <nl> + struct construct_deduced < sender > : make_many_sender_fn { } ; <nl> <nl> } / / namespace pushmi <nl> } / / namespace folly <nl> mmm a / folly / experimental / pushmi / test / CompileTest . cpp <nl> ppp b / folly / experimental / pushmi / test / CompileTest . cpp <nl> void single_sender_test ( ) { <nl> } <nl> <nl> void many_sender_test ( ) { <nl> - auto in0 = mi : : MAKE ( many_sender ) ( ) ; <nl> + auto in0 = mi : : MAKE ( sender ) ( ) ; <nl> static_assert ( mi : : Sender < decltype ( in0 ) > , " in0 not a sender " ) ; <nl> - auto in1 = mi : : MAKE ( many_sender ) ( mi : : ignoreSF { } ) ; <nl> + auto in1 = mi : : MAKE ( sender ) ( mi : : ignoreSF { } ) ; <nl> static_assert ( mi : : Sender < decltype ( in1 ) > , " in1 not a sender " ) ; <nl> - auto in2 = mi : : MAKE ( many_sender ) ( <nl> + auto in2 = mi : : MAKE ( sender ) ( <nl> [ & ] ( auto out ) { <nl> in0 . submit ( mi : : MAKE ( receiver ) ( <nl> std : : move ( out ) , <nl> void many_sender_test ( ) { <nl> out0 , mi : : on_value ( [ ] ( auto d , int v ) { mi : : set_value ( d , v ) ; } ) ) ; <nl> in2 . submit ( out1 ) ; <nl> <nl> - auto any0 = mi : : any_many_sender < std : : exception_ptr , int > ( in0 ) ; <nl> + auto any0 = mi : : any_sender < std : : exception_ptr , int > ( in0 ) ; <nl> } <nl> <nl> void flow_receiver_1_test ( ) { <nl> void flow_single_sender_test ( ) { <nl> } <nl> <nl> void flow_many_sender_test ( ) { <nl> - auto in0 = mi : : MAKE ( flow_many_sender ) ( ) ; <nl> + auto in0 = mi : : MAKE ( flow_sender ) ( ) ; <nl> static_assert ( mi : : Sender < decltype ( in0 ) > , " in0 not a sender " ) ; <nl> static_assert ( mi : : FlowSender < decltype ( in0 ) > , " in0 not flow " ) ; <nl> - auto in1 = mi : : MAKE ( flow_many_sender ) ( mi : : ignoreSF { } ) ; <nl> + auto in1 = mi : : MAKE ( flow_sender ) ( mi : : ignoreSF { } ) ; <nl> static_assert ( mi : : Sender < decltype ( in1 ) > , " in1 not a sender " ) ; <nl> static_assert ( mi : : FlowSender < decltype ( in1 ) > , " in1 not flow " ) ; <nl> - auto in2 = mi : : MAKE ( flow_many_sender ) ( <nl> + auto in2 = mi : : MAKE ( flow_sender ) ( <nl> [ & ] ( auto out ) { <nl> in0 . submit ( mi : : MAKE ( flow_receiver ) ( <nl> std : : move ( out ) , <nl> void flow_many_sender_test ( ) { <nl> out0 , mi : : on_value ( [ ] ( auto d , int v ) { mi : : set_value ( d , v ) ; } ) ) ; <nl> in2 . submit ( out1 ) ; <nl> <nl> - auto any0 = mi : : any_flow_many_sender < <nl> + auto any0 = mi : : any_flow_sender < <nl> std : : exception_ptr , <nl> std : : ptrdiff_t , <nl> std : : exception_ptr , <nl> mmm a / folly / experimental / pushmi / test / FlowManyTest . cpp <nl> ppp b / folly / experimental / pushmi / test / FlowManyTest . cpp <nl> <nl> # include < chrono > <nl> using namespace std : : literals ; <nl> <nl> - # include < folly / experimental / pushmi / sender / flow_many_sender . h > <nl> + # include < folly / experimental / pushmi / sender / flow_sender . h > <nl> # include < folly / experimental / pushmi / o / for_each . h > <nl> # include < folly / experimental / pushmi / o / from . h > <nl> # include < folly / experimental / pushmi / o / submit . h > <nl> using namespace testing ; <nl> class ImmediateFlowManySender : public Test { <nl> protected : <nl> auto make_producer ( ) { <nl> - return mi : : MAKE ( flow_many_sender ) ( [ & ] ( auto out ) { <nl> + return mi : : MAKE ( flow_sender ) ( [ & ] ( auto out ) { <nl> using Out = decltype ( out ) ; <nl> struct Data : mi : : receiver < > { <nl> explicit Data ( Out out_ ) : out ( std : : move ( out_ ) ) , stop ( false ) { } <nl> class ImmediateFlowManySender : public Test { <nl> TEST ( AnyFlowManySender , Construct ) { <nl> std : : array < int , 3 > arr { { 0 , 9 , 99 } } ; <nl> auto m = folly : : pushmi : : operators : : flow_from ( arr ) ; <nl> - auto any_m = folly : : pushmi : : any_flow_many_sender < <nl> + auto any_m = folly : : pushmi : : any_flow_sender < <nl> std : : exception_ptr , <nl> std : : ptrdiff_t , <nl> std : : exception_ptr , <nl> class ConcurrentFlowManySender : public Test { <nl> } <nl> <nl> void cancellation_test ( std : : chrono : : system_clock : : time_point at ) { <nl> - auto f = mi : : MAKE ( flow_many_sender ) ( [ & ] ( auto out ) { <nl> + auto f = mi : : MAKE ( flow_sender ) ( [ & ] ( auto out ) { <nl> using Out = decltype ( out ) ; <nl> <nl> / / boolean cancellation <nl>
rename many_sender to simply sender
facebook/folly
fbc1a619a6286cbcf6314f0203e6d51d232a0c89
2019-04-05T20:31:48Z
similarity index 100 % <nl> rename from modules / perception / camera / app / BUILD_ <nl> rename to modules / perception / camera / app / BUILD <nl> similarity index 100 % <nl> rename from modules / perception / camera / app / proto / BUILD_ <nl> rename to modules / perception / camera / app / proto / BUILD <nl> mmm a / modules / perception / camera / app / traffic_light_camera_perception . cc <nl> ppp b / modules / perception / camera / app / traffic_light_camera_perception . cc <nl> bool TrafficLightCameraPerception : : Perception ( <nl> AERROR < < " tl failed to detect . " ; <nl> return false ; <nl> } <nl> - const auto traffic_light_detect_time = PERF_BLOCK_END_WITH_INDICATOR ( <nl> - frame - > data_provider - > sensor_name ( ) , " traffic_light_detect " ) ; <nl> <nl> TrafficLightDetectorOptions recognizer_options ; <nl> if ( ! recognizer_ - > Detect ( recognizer_options , frame ) ) { <nl> AERROR < < " tl failed to recognize . " ; <nl> return false ; <nl> } <nl> - const auto traffic_light_recognize_time = PERF_BLOCK_END_WITH_INDICATOR ( <nl> - frame - > data_provider - > sensor_name ( ) , " traffic_light_recognize " ) ; <nl> <nl> TrafficLightTrackerOptions tracker_options ; <nl> if ( ! tracker_ - > Track ( tracker_options , frame ) ) { <nl> AERROR < < " tl failed to track . " ; <nl> return false ; <nl> } <nl> - const auto traffic_light_track_time = PERF_BLOCK_END_WITH_INDICATOR ( <nl> - frame - > data_provider - > sensor_name ( ) , " traffic_light_track " ) ; <nl> - AINFO < < " TrafficLightsPerception perf_info . " <nl> - < < " number_of_lights : " < < frame - > traffic_lights . size ( ) <nl> - < < " traffic_light_detect_time : " < < traffic_light_detect_time < < " ms . " <nl> - < < " traffic_light_recognize_time : " < < traffic_light_recognize_time <nl> - < < " ms . " <nl> - < < " traffic_light_track_time : " < < traffic_light_track_time < < " ms . " ; <nl> return true ; <nl> } <nl> <nl> mmm a / modules / perception / camera / common / camera_frame . h <nl> ppp b / modules / perception / camera / common / camera_frame . h <nl> struct CameraFrame { <nl> DataProvider * data_provider = nullptr ; <nl> / / calibration service <nl> BaseCalibrationService * calibration_service = nullptr ; <nl> - / / camera intrinsics <nl> - Eigen : : Matrix3f camera_k_matrix = Eigen : : Matrix3f : : Identity ( ) ; <nl> - / / narrow to obstacle projected_matrix <nl> - Eigen : : Matrix3d project_matrix = Eigen : : Matrix3d : : Identity ( ) ; <nl> - / / camera to world pose <nl> - Eigen : : Affine3d camera2world_pose = Eigen : : Affine3d : : Identity ( ) ; <nl> / / hdmap struct <nl> base : : HdmapStructPtr hdmap_struct = nullptr ; <nl> / / tracker proposed objects <nl> struct CameraFrame { <nl> std : : shared_ptr < base : : Blob < float > > lane_detected_blob = nullptr ; <nl> / / detected traffic lights <nl> std : : vector < base : : TrafficLightPtr > traffic_lights ; <nl> - <nl> - void Reset ( ) { } <nl> - <nl> + / / camera intrinsics <nl> + Eigen : : Matrix3f camera_k_matrix = Eigen : : Matrix3f : : Identity ( ) ; <nl> + / / narrow to obstacle projected_matrix <nl> + Eigen : : Matrix3d project_matrix = Eigen : : Matrix3d : : Identity ( ) ; <nl> + / / camera to world pose <nl> + Eigen : : Affine3d camera2world_pose = Eigen : : Affine3d : : Identity ( ) ; <nl> EIGEN_MAKE_ALIGNED_OPERATOR_NEW <nl> } EIGEN_ALIGN16 ; / / struct CameraFrame <nl> <nl> mmm a / modules / perception / camera / lib / traffic_light / detector / detection / detection . cc <nl> ppp b / modules / perception / camera / lib / traffic_light / detector / detection / detection . cc <nl> bool TrafficLightDetection : : Init ( <nl> int resize_height = detection_param_ . min_crop_size ( ) ; <nl> int resize_width = detection_param_ . min_crop_size ( ) ; <nl> max_batch_size_ = detection_param_ . max_batch_size ( ) ; <nl> - param_blob_length_ = 4 ; <nl> + param_blob_length_ = 3 ; <nl> <nl> CHECK_GT ( resize_height , 0 ) ; <nl> CHECK_GT ( resize_width , 0 ) ; <nl> bool TrafficLightDetection : : Init ( <nl> param_data [ offset + 0 ] = static_cast < float > ( resize_width ) ; <nl> param_data [ offset + 1 ] = static_cast < float > ( resize_height ) ; <nl> param_data [ offset + 2 ] = 1 ; <nl> - param_data [ offset + 3 ] = 1 ; <nl> } <nl> <nl> switch ( detection_param_ . crop_method ( ) ) { <nl> bool TrafficLightDetection : : Inference ( <nl> static_cast < int > ( detection_param_ . min_crop_size ( ) ) , <nl> static_cast < int > ( detection_param_ . min_crop_size ( ) ) , <nl> 3 ) ; <nl> - param_blob_ - > Reshape ( static_cast < int > ( batch_num ) , 4 , 1 , 1 ) ; <nl> + param_blob_ - > Reshape ( static_cast < int > ( batch_num ) , 1 , 3 , 1 ) ; <nl> float * param_data = param_blob_ - > mutable_cpu_data ( ) ; <nl> for ( size_t i = 0 ; i < batch_num ; + + i ) { <nl> auto offset = i * param_blob_length_ ; <nl> bool TrafficLightDetection : : Inference ( <nl> param_data [ offset + 1 ] = <nl> static_cast < float > ( detection_param_ . min_crop_size ( ) ) ; <nl> param_data [ offset + 2 ] = 1 ; <nl> - param_data [ offset + 3 ] = 1 ; <nl> } <nl> <nl> AINFO < < " reshape inputblob " < < input_img_blob - > shape_string ( ) ; <nl> bool TrafficLightDetection : : SelectOutputBoxes ( <nl> float x2 = result_data [ 3 ] ; <nl> float y2 = result_data [ 4 ] ; <nl> std : : vector < float > score { result_data [ 5 ] , result_data [ 6 ] , result_data [ 7 ] , <nl> - result_data [ 8 ] } ; <nl> + result_data [ 8 ] } ; <nl> for ( int i = 0 ; i < 9 ; + + i ) { <nl> ADEBUG < < " result_data " < < result_data [ i ] ; <nl> } <nl> bool TrafficLightDetection : : SelectOutputBoxes ( <nl> tmp - > region . detect_score = * biggest ; <nl> <nl> if ( OutOfValidRegion ( tmp - > region . detection_roi , <nl> - crop_box_list . at ( img_id ) . width , <nl> - crop_box_list . at ( img_id ) . height ) | | <nl> + crop_box_list . at ( img_id ) . width , <nl> + crop_box_list . at ( img_id ) . height ) | | <nl> tmp - > region . detection_roi . Area ( ) < = 0 ) { <nl> AINFO < < " Invalid width or height or x or y : " <nl> < < tmp - > region . detection_roi . width < < " | " <nl> mmm a / modules / perception / camera / lib / traffic_light / detector / recognition / classify . cc <nl> ppp b / modules / perception / camera / lib / traffic_light / detector / recognition / classify . cc <nl> void ClassifyBySimple : : Perform ( const CameraFrame * frame , <nl> <nl> data_provider_image_option_ . crop_roi = light - > region . detection_roi ; <nl> data_provider_image_option_ . do_crop = true ; <nl> - data_provider_image_option_ . target_color = base : : Color : : BGR ; <nl> + data_provider_image_option_ . target_color = base : : Color : : RGB ; <nl> frame - > data_provider - > GetImage ( data_provider_image_option_ , image_ . get ( ) ) ; <nl> <nl> AINFO < < " get img done " ; <nl> mmm a / modules / perception / camera / lib / traffic_light / preprocessor / BUILD <nl> ppp b / modules / perception / camera / lib / traffic_light / preprocessor / BUILD <nl> cc_library ( <nl> hdrs = [ " pose . h " ] , <nl> deps = [ <nl> " / / cyber " , <nl> + " / / modules / common / util : eigen_defs " , <nl> " / / modules / perception / base " , <nl> ] , <nl> ) <nl> mmm a / modules / perception / camera / lib / traffic_light / preprocessor / pose . h <nl> ppp b / modules / perception / camera / lib / traffic_light / preprocessor / pose . h <nl> <nl> <nl> # include " Eigen / Core " <nl> <nl> + # include " modules / common / util / eigen_defs . h " <nl> # include " modules / perception / base / image_8u . h " <nl> <nl> namespace apollo { <nl> class CarPose { <nl> double getTimestamp ( ) const { return timestamp_ ; } <nl> <nl> Eigen : : Matrix4d pose_ ; / / car ( novatel ) to world pose <nl> - std : : map < std : : string , Eigen : : Matrix4d > c2w_poses_ ; / / camera to world poses <nl> + / / camera to world poses <nl> + apollo : : common : : EigenMap < std : : string , Eigen : : Matrix4d > c2w_poses_ ; <nl> double timestamp_ ; <nl> <nl> private : <nl> mmm a / modules / perception / inference / libtorch / torch_det . cc <nl> ppp b / modules / perception / inference / libtorch / torch_det . cc <nl> void TorchDet : : Infer ( ) { <nl> 1 ) ; <nl> blobs_ [ output_names_ [ 0 ] ] - > Reshape ( { static_cast < int > ( result . size ( 0 ) ) , <nl> static_cast < int > ( result . size ( 1 ) ) , 1 , 1 } ) ; <nl> + if ( result . size ( 0 ) = = 0 ) { <nl> + result = torch : : zeros ( { 1 , 9 } , torch : : kFloat ) . to ( device ) ; <nl> + } <nl> blobs_ [ output_names_ [ 0 ] ] - > data ( ) - > set_gpu_data ( result . data_ptr ( ) ) ; <nl> } <nl> <nl> mmm a / modules / perception / onboard / component / BUILD <nl> ppp b / modules / perception / onboard / component / BUILD <nl> load ( " @ rules_cc / / cc : defs . bzl " , " cc_binary " , " cc_library " ) <nl> load ( " / / tools : cpplint . bzl " , " cpplint " ) <nl> <nl> package ( default_visibility = [ " / / visibility : public " ] ) <nl> + PERCEPTION_COPTS = [ ' - DMODULE_NAME = \ \ " perception \ \ " ' ] <nl> + <nl> + cc_binary ( <nl> + name = " libperception_component_camera . so " , <nl> + linkshared = True , <nl> + linkstatic = False , <nl> + deps = [ " : perception_component_inner_camera " ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " perception_component_inner_camera " , <nl> + srcs = [ <nl> + " trafficlights_perception_component . cc " , <nl> + ] , <nl> + hdrs = [ <nl> + " trafficlights_perception_component . h " , <nl> + ] , <nl> + copts = PERCEPTION_COPTS , <nl> + deps = [ <nl> + " / / cyber " , <nl> + " / / modules / common / math " , <nl> + " / / modules / common / util : time_util " , <nl> + " / / modules / drivers / proto : sensor_image_cc_proto " , <nl> + " / / modules / map / proto : map_cc_proto " , <nl> + " / / modules / perception / base " , <nl> + " / / modules / perception / camera / app : traffic_light_camera_perception " , <nl> + " / / modules / perception / camera / common " , <nl> + " / / modules / perception / camera / lib / traffic_light / preprocessor : tl_preprocessor " , <nl> + " / / modules / perception / common / sensor_manager " , <nl> + " / / modules / perception / map / hdmap : hdmap_input " , <nl> + " / / modules / perception / onboard / common_flags " , <nl> + " / / modules / perception / onboard / proto : trafficlights_perception_component_cc_proto " , <nl> + " / / modules / perception / onboard / transform_wrapper " , <nl> + " / / modules / perception / proto : traffic_light_detection_cc_proto " , <nl> + " / / modules / transform / proto : transform_cc_proto " , <nl> + " / / modules / v2x / common : v2x_proxy_gflags " , <nl> + " / / modules / v2x / proto : v2x_traffic_light_cc_proto " , <nl> + " @ eigen " , <nl> + " @ opencv / / : highgui " , <nl> + ] , <nl> + alwayslink = True , <nl> + ) <nl> <nl> cc_binary ( <nl> name = " libperception_component_lidar . so " , <nl> mmm a / modules / perception / onboard / component / trafficlights_perception_component . cc <nl> ppp b / modules / perception / onboard / component / trafficlights_perception_component . cc <nl> <nl> # include < boost / algorithm / string . hpp > <nl> # include < opencv2 / highgui / highgui . hpp > <nl> # include < opencv2 / imgproc / imgproc . hpp > <nl> + # include < opencv2 / opencv . hpp > <nl> <nl> # include " absl / strings / str_cat . h " <nl> <nl> <nl> namespace apollo { <nl> namespace perception { <nl> namespace onboard { <nl> + <nl> using TLCamID = apollo : : perception : : TrafficLightDetection : : CameraID ; <nl> using apollo : : cyber : : common : : GetAbsolutePath ; <nl> + using apollo : : cyber : : Clock ; <nl> using apollo : : perception : : common : : SensorManager ; <nl> <nl> class TLInfo { <nl> static int GetGpuId ( <nl> } <nl> <nl> bool TrafficLightsPerceptionComponent : : Init ( ) { <nl> + frame_ . reset ( new camera : : CameraFrame ) ; <nl> writer_ = node_ - > CreateWriter < apollo : : perception : : TrafficLightDetection > ( <nl> " / apollo / perception / traffic_light " ) ; <nl> <nl> void TrafficLightsPerceptionComponent : : OnReceiveImage ( <nl> const std : : shared_ptr < apollo : : drivers : : Image > msg , <nl> const std : : string & camera_name ) { <nl> std : : lock_guard < std : : mutex > lck ( mutex_ ) ; <nl> - double receive_img_timestamp = apollo : : common : : time : : Clock : : NowInSeconds ( ) ; <nl> + double receive_img_timestamp = Clock : : NowInSeconds ( ) ; <nl> double image_msg_ts = msg - > measurement_time ( ) ; <nl> image_msg_ts + = image_timestamp_offset_ ; <nl> last_sub_camera_image_ts_ [ camera_name ] = image_msg_ts ; <nl> <nl> { <nl> - const double cur_time = apollo : : common : : time : : Clock : : NowInSeconds ( ) ; <nl> + const double cur_time = Clock : : NowInSeconds ( ) ; <nl> const double start_latency = ( cur_time - msg - > measurement_time ( ) ) * 1e3 ; <nl> AINFO < < " FRAME_STATISTICS : TrafficLights : Start : msg_time [ " <nl> < < FORMAT_TIMESTAMP ( msg - > measurement_time ( ) ) < < " ] : cur_time [ " <nl> void TrafficLightsPerceptionComponent : : OnReceiveImage ( <nl> } <nl> <nl> const std : : string perf_indicator = " trafficlights " ; <nl> - PERF_BLOCK_START ( ) ; <nl> if ( ! CheckCameraImageStatus ( image_msg_ts , check_image_status_interval_thresh_ , <nl> camera_name ) ) { <nl> AERROR < < " CheckCameraImageStatus failed " ; <nl> return ; <nl> } <nl> - const auto check_camera_status_time = <nl> - PERF_BLOCK_END_WITH_INDICATOR ( perf_indicator , " CheckCameraImageStatus " ) ; <nl> <nl> camera : : TLPreprocessorOption preprocess_option ; <nl> preprocess_option . image_borders_size = & image_border_sizes_ ; <nl> <nl> / / query pose and signals , add cached camera selection by lights ' projections <nl> - if ( ! UpdateCameraSelection ( image_msg_ts , preprocess_option , & frame_ ) ) { <nl> + if ( ! UpdateCameraSelection ( image_msg_ts , preprocess_option , frame_ . get ( ) ) ) { <nl> AWARN < < " add_cached_camera_selection failed , ts : " < < image_msg_ts ; <nl> } <nl> - const auto update_camera_selection_time = <nl> - PERF_BLOCK_END_WITH_INDICATOR ( perf_indicator , " UpdateCameraSelection " ) ; <nl> <nl> / / skipping frame according to last proc image timestamp <nl> if ( last_proc_image_ts_ > 0 . 0 & & <nl> void TrafficLightsPerceptionComponent : : OnReceiveImage ( <nl> < < " , receive_img_timestamp : " < < receive_img_timestamp <nl> < < " , _last_proc_image_ts : " < < last_proc_image_ts_ <nl> < < " , _proc_interval_seconds : " < < proc_interval_seconds_ ; <nl> - / / SendSimulationMsg ( ) ; <nl> return ; <nl> } <nl> / / sync image with cached projections <nl> bool sync_image_ok = <nl> preprocessor_ - > SyncInformation ( image_msg_ts , camera_name ) ; <nl> - const auto sync_information_time = <nl> - PERF_BLOCK_END_WITH_INDICATOR ( perf_indicator , " SyncInformation " ) ; <nl> <nl> if ( ! sync_image_ok ) { <nl> AINFO < < " PreprocessComponent not publish image , ts : " < < image_msg_ts <nl> void TrafficLightsPerceptionComponent : : OnReceiveImage ( <nl> / / Fill camera frame <nl> camera : : DataProvider : : ImageOptions image_options ; <nl> image_options . target_color = base : : Color : : RGB ; <nl> - frame_ . data_provider = data_providers_map_ . at ( camera_name ) . get ( ) ; <nl> - frame_ . data_provider - > FillImageData ( <nl> + frame_ - > data_provider = data_providers_map_ . at ( camera_name ) . get ( ) ; <nl> + frame_ - > data_provider - > FillImageData ( <nl> image_height_ , image_width_ , <nl> reinterpret_cast < const uint8_t * > ( msg - > data ( ) . data ( ) ) , msg - > encoding ( ) ) ; <nl> - frame_ . timestamp = image_msg_ts ; <nl> - const auto fill_image_data_time = <nl> - PERF_BLOCK_END_WITH_INDICATOR ( perf_indicator , " FillImageData " ) ; <nl> - <nl> + frame_ - > timestamp = image_msg_ts ; <nl> / / caros monitor - - image system time diff <nl> const auto & diff_image_sys_ts = image_msg_ts - receive_img_timestamp ; <nl> if ( fabs ( diff_image_sys_ts ) > image_sys_ts_diff_threshold_ ) { <nl> void TrafficLightsPerceptionComponent : : OnReceiveImage ( <nl> } <nl> <nl> if ( ! VerifyLightsProjection ( image_msg_ts , preprocess_option , camera_name , <nl> - & frame_ ) ) { <nl> + frame_ . get ( ) ) ) { <nl> AINFO < < " VerifyLightsProjection on image failed , ts : " < < image_msg_ts <nl> < < " , camera_name : " < < camera_name <nl> < < " last_query_tf_ts_ : " < < last_query_tf_ts_ <nl> void TrafficLightsPerceptionComponent : : OnReceiveImage ( <nl> < < " reset last_query_tf_ts_ to - 1 " ; <nl> last_query_tf_ts_ = - 1 . 0 ; <nl> } <nl> - const auto verify_lights_projection_time = <nl> - PERF_BLOCK_END_WITH_INDICATOR ( perf_indicator , " VerifyLightsProjection " ) ; <nl> - last_proc_image_ts_ = apollo : : common : : time : : Clock : : NowInSeconds ( ) ; <nl> + last_proc_image_ts_ = Clock : : NowInSeconds ( ) ; <nl> <nl> AINFO < < " start proc . " ; <nl> - traffic_light_pipeline_ - > Perception ( camera_perception_options_ , & frame_ ) ; <nl> + traffic_light_pipeline_ - > Perception ( camera_perception_options_ , frame_ . get ( ) ) ; <nl> <nl> - const auto traffic_lights_perception_time = <nl> - PERF_BLOCK_END_WITH_INDICATOR ( perf_indicator , " TrafficLightsPerception " ) ; <nl> - for ( auto light : frame_ . traffic_lights ) { <nl> + for ( auto light : frame_ - > traffic_lights ) { <nl> AINFO < < " after tl pipeline " < < light - > id < < " color " <nl> < < static_cast < int > ( light - > status . color ) ; <nl> } <nl> <nl> - SyncV2XTrafficLights ( & frame_ ) ; <nl> + SyncV2XTrafficLights ( frame_ . get ( ) ) ; <nl> <nl> std : : shared_ptr < TrafficLightDetection > out_msg ( new TrafficLightDetection ) ; <nl> - if ( ! TransformOutputMessage ( & frame_ , camera_name , & out_msg ) ) { <nl> + if ( ! TransformOutputMessage ( frame_ . get ( ) , camera_name , & out_msg ) ) { <nl> AERROR < < " transform_output_message failed , msg_time : " <nl> < < FORMAT_TIMESTAMP ( msg - > measurement_time ( ) ) ; <nl> return ; <nl> void TrafficLightsPerceptionComponent : : OnReceiveImage ( <nl> <nl> / / SendSimulationMsg ( ) ; <nl> <nl> - const auto send_message_time = <nl> - PERF_BLOCK_END_WITH_INDICATOR ( perf_indicator , " SendMessage " ) ; <nl> - <nl> - const auto total_time = static_cast < int64_t > ( <nl> - ( apollo : : common : : time : : Clock : : NowInSeconds ( ) - receive_img_timestamp ) * <nl> - 1e3 ) ; <nl> - AINFO < < " TrafficLightsPerception perf_info . " <nl> - < < " number_of_lights : " < < frame_ . traffic_lights . size ( ) <nl> - < < " check_camera_status_time : " < < check_camera_status_time < < " ms . " <nl> - < < " update_camera_selection_time : " < < update_camera_selection_time <nl> - < < " ms . " <nl> - < < " sync_information_time : " < < sync_information_time < < " ms . " <nl> - < < " fill_image_data_time : " < < fill_image_data_time < < " ms . " <nl> - < < " verify_lights_projection_time : " < < verify_lights_projection_time <nl> - < < " ms . " <nl> - < < " traffic_lights_perception_time : " < < traffic_lights_perception_time <nl> - < < " ms . " <nl> - < < " send_message_time : " < < send_message_time < < " ms . " <nl> - < < " total : " < < total_time < < " ms . " ; <nl> AINFO < < out_msg - > DebugString ( ) ; <nl> { <nl> - const double end_timestamp = apollo : : common : : time : : Clock : : NowInSeconds ( ) ; <nl> + const double end_timestamp = Clock : : NowInSeconds ( ) ; <nl> const double end_latency = ( end_timestamp - msg - > measurement_time ( ) ) * 1e3 ; <nl> AINFO < < " FRAME_STATISTICS : TrafficLights : End : msg_time [ " <nl> < < FORMAT_TIMESTAMP ( msg - > measurement_time ( ) ) < < " ] : cur_time [ " <nl> bool TrafficLightsPerceptionComponent : : UpdateCameraSelection ( <nl> double timestamp , const camera : : TLPreprocessorOption & option , <nl> camera : : CameraFrame * frame ) { <nl> PERF_FUNCTION ( ) ; <nl> - const double current_ts = apollo : : common : : time : : Clock : : NowInSeconds ( ) ; <nl> + const double current_ts = Clock : : NowInSeconds ( ) ; <nl> if ( last_query_tf_ts_ > 0 . 0 & & <nl> current_ts - last_query_tf_ts_ < query_tf_interval_seconds_ ) { <nl> AINFO < < " skip current tf msg , img_ts : " < < timestamp <nl> bool TrafficLightsPerceptionComponent : : TransformOutputMessage ( <nl> <nl> auto & lights = frame - > traffic_lights ; <nl> auto * header = ( * out_msg ) - > mutable_header ( ) ; <nl> - double publish_time = apollo : : common : : time : : Clock : : NowInSeconds ( ) ; <nl> + double publish_time = Clock : : NowInSeconds ( ) ; <nl> header - > set_timestamp_sec ( publish_time ) ; / / message publishing time <nl> AINFO < < " set header time sec : " < < frame - > timestamp ; <nl> <nl> void TrafficLightsPerceptionComponent : : Visualize ( <nl> cv : : imshow ( " Traffic Light " , output_image ) ; <nl> cv : : imwrite ( absl : : StrCat ( " / apollo / debug_vis / " , frame . timestamp , " . jpg " ) , <nl> output_image ) ; <nl> - cvWaitKey ( 30 ) ; <nl> + cv : : waitKey ( 30 ) ; <nl> } <nl> <nl> void TrafficLightsPerceptionComponent : : SyncV2XTrafficLights ( <nl> void TrafficLightsPerceptionComponent : : SyncV2XTrafficLights ( <nl> auto sync_single_light = [ & ] ( base : : TrafficLightPtr light ) { <nl> for ( auto itr = v2x_msg_buffer_ . rbegin ( ) ; itr ! = v2x_msg_buffer_ . rend ( ) ; <nl> + + itr ) { <nl> - double v2x_timestamp = ( * itr ) . header ( ) . timestamp_sec ( ) ; <nl> + double v2x_timestamp = itr - > header ( ) . timestamp_sec ( ) ; <nl> / / find close enough v2x msg <nl> if ( std : : fabs ( camera_frame_timestamp - v2x_timestamp ) < <nl> v2x_sync_interval_seconds_ ) { <nl> const int v2x_lights_num = <nl> - ( * itr ) . current_lane_trafficlight ( ) . single_traffic_light_size ( ) ; <nl> - const auto & v2x_lights = ( * itr ) . current_lane_trafficlight ( ) ; <nl> + itr - > road_traffic_light ( 0 ) . single_traffic_light_size ( ) ; <nl> + const auto & v2x_lights = itr - > road_traffic_light ( 0 ) ; <nl> for ( int i = 0 ; i < v2x_lights_num ; + + i ) { <nl> const auto & v2x_light = v2x_lights . single_traffic_light ( i ) ; <nl> / / check signal id <nl> mmm a / modules / perception / onboard / component / trafficlights_perception_component . h <nl> ppp b / modules / perception / onboard / component / trafficlights_perception_component . h <nl> class TrafficLightsPerceptionComponent : public apollo : : cyber : : Component < > { <nl> data_providers_map_ ; <nl> <nl> / / image <nl> - camera : : CameraFrame frame_ ; <nl> + std : : shared_ptr < camera : : CameraFrame > frame_ ; <nl> <nl> / / proc <nl> camera : : CameraPerceptionInitOptions camera_perception_init_options_ ; <nl> mmm a / modules / perception / production / dag / dag_streaming_perception_trafficlights . dag <nl> ppp b / modules / perception / production / dag / dag_streaming_perception_trafficlights . dag <nl> <nl> module_config { <nl> - # module_library : " / apollo / bazel - bin / modules / perception / onboard / component / libperception_component . so " <nl> module_library : " / apollo / bazel - bin / modules / perception / onboard / component / libperception_component_camera . so " <nl> - # components { <nl> - # comname : " TrafficLightsPerceptionComponent " <nl> - # comclass : " TrafficLightsPerceptionComponent " <nl> - # confpath : " conf / perception / camera / trafficlights_perception_component . config " <nl> - # sender : { <nl> - # outchannelnames : [ " / perception / traffic_light_status " , " / perception / traffic_light_simulation " ] <nl> - # } <nl> - # } <nl> - components { <nl> + <nl> + components { <nl> class_name : " TrafficLightsPerceptionComponent " <nl> config { <nl> name : " TrafficLightsComponent " <nl> module_config { <nl> flag_file_path : " / apollo / modules / perception / production / conf / perception / perception_common . flag " <nl> } <nl> } <nl> - <nl> - # channels { <nl> - # channelname : " / apollo / sensor / camera / front_12mm " <nl> - # msgtype : " adu . common . sensor . Image " <nl> - # } <nl> - # channels { <nl> - # channelname : " / apollo / sensor / camera / front_6mm " <nl> - # msgtype : " adu . common . sensor . Image " <nl> - # } <nl> - # channels { <nl> - # channelname : " / sensor / camera / obstacle / image_wide " <nl> - # msgtype : " adu . common . sensor . Image " <nl> - # } <nl> - # channels { <nl> - # channelname : " / sensor / camera / obstacle / image_narrow " <nl> - # msgtype : " adu . common . sensor . Image " <nl> - # } <nl> - # channels { <nl> - # channelname : " / perception / traffic_light_status " <nl> - # msgtype : " adu . common . traffic_light . TrafficLightDetection " <nl> - # } <nl> - # channels { <nl> - # channelname : " / perception / traffic_light_simulation " <nl> - # msgtype : " adu . common . traffic_light . TrafficLightDetection " <nl> - # } <nl> } <nl> mmm a / modules / perception / production / data / perception / camera / models / traffic_light_detection / detection . pt <nl> ppp b / modules / perception / production / data / perception / camera / models / traffic_light_detection / detection . pt <nl> output_blob_name : " bboxes " <nl> model_name : " . / " <nl> model_type : " TorchDet " <nl> proto_file : " deploy . prototxt " <nl> - weight_file : " faster_rcnn_model . ts " <nl> + weight_file : " faster_rcnn_model / faster_rcnn_model . ts " <nl> max_batch_size : 1 <nl>
Perception : reactivate traffic light detection component ( )
ApolloAuto/apollo
4ee34b427b860c3fc267ff328cb996027008b11e
2020-09-18T09:01:22Z
mmm a / dbms / include / DB / Common / AutoArray . h <nl> ppp b / dbms / include / DB / Common / AutoArray . h <nl> class AutoArray <nl> { <nl> setEmpty ( ) ; <nl> } <nl> - <nl> + <nl> / * * Если указать dont_init_elems = true , то не будут вызваны конструкторы по - умолчанию для элементов . <nl> * В этом случае , вы должны вставить все элементы с помощью функции place и placement new , <nl> * так как для них потом будут вызваны деструкторы . <nl> class AutoArray <nl> { <nl> init ( size_ , dont_init_elems ) ; <nl> } <nl> + <nl> + / * * Инициализирует все элементы копирующим конструктором с параметром value . <nl> + * / <nl> + AutoArray ( size_t size_ , const T & value ) <nl> + { <nl> + init ( size_ , true ) ; <nl> + <nl> + for ( size_t i = 0 ; i < size_ ; + + i ) <nl> + { <nl> + new ( place ( i ) ) T ( value ) ; <nl> + } <nl> + } <nl> <nl> / * * resize удаляет все существующие элементы . <nl> * / <nl>
dbms : improved AutoArray compatibility with vector ( this should also fix a bug ) [ # CONV - 6318 ] .
ClickHouse/ClickHouse
62c85998deaacdc59f82d5f11c9339338fddbda3
2013-01-09T10:49:57Z
mmm a / tools / docdump / makedocs . py <nl> ppp b / tools / docdump / makedocs . py <nl> def langavailable ( ) : <nl> <nl> _ = gettext . gettext <nl> if args . language ! = " none " : <nl> - logging . info ( " Language changed to : " + args . language ) <nl> lang = gettext . translation ( domain = " makedocs " , <nl> localedir = " locales " , <nl> languages = [ args . language ] ) <nl>
Minor changes
godotengine/godot
0fb91ef95b7887eae1a8a7741f3e20e66c2b4998
2015-10-07T19:15:28Z
mmm a / Documentation / current_iteration . md <nl> ppp b / Documentation / current_iteration . md <nl> <nl> # # Highlights of this Release <nl> - Better ONNX support . <nl> - Improved C # API . <nl> - - OpenCV is not required to install CNTK but to use Tensorboard Image feature . <nl> + - OpenCV is not required to install CNTK , it is only required forTensorboard Image feature and image reader . <nl> <nl> # # API <nl> # # # C # API <nl> <nl> - Internally , data marshalling is done more efficiently than Release 2 . 2 . Use of chatty FloatVector has been avoided during training and evaluation . <nl> # # # C + + <nl> - Exported “ PreorderTraverse ” C + + API : use to search the graph based on the provided criteria . <nl> + # # # Python and C + + <nl> + - Add custom attributes to primitive function , which would be serialized / deserialized when model save / load . <nl> + - Some usage : <nl> + ` ` ` python <nl> + func = C . plus ( a , b ) <nl> + func . custom_attributes = { ' test ' : ' abc ' , ' dict ' : { ' a ' : 1 , ' b ' : 2 } , ' list ' : [ 1 , 2 , 3 ] } <nl> + func . custom_attributes [ ' test2 ' ] = ' def ' <nl> + ` ` ` <nl> <nl> # # Operators <nl> # # # Group convolution <nl> <nl> <nl> # # Performance <nl> # # # Convolution with free static axes support <nl> - - We have improved the training performance for models that use convolution operation with free static axes support . For certain models , we see training speed up of more than x5 . <nl> + - We have improved the training performance for models that use convolution operation with free static axes support . For certain models , we see training speed up of more than x5 . <nl> + # # # Validation Performance <nl> + - Improve validation performance and remove a lot of unneeded validation check . <nl> <nl> # # ONNX <nl> - Improved ONNX support in CNTK . <nl> <nl> - Cover most vision model such as Resnet , Inception and VGG ( Only model saved in V2 CNTK format ) . <nl> - Fix a lot of bugs . <nl> <nl> + # # Dependencies <nl> + # # # Removed OpenCV dependency from CNTK core . <nl> + - CNTK 2 . 2 requires you to install OpenCV to use CNTK but it is optional for CNTK 2 . 3 <nl> + - You need to install OpenCV only if you are planning to use ImageReader or TensorBoard ’ s Image feature . <nl> + # # # Upgraded ImageIO to 2 . 2 . 0 <nl> + - Anaconda doesn ’ t support Python 3 . 4 . CNTK will also remove Python 3 . 4 support in future releases . <nl> + <nl> # # Deprecated <nl> # # # Support for Python 3 . 4 will be removed from CNTK releases later than v2 . 3 . <nl>
More update for 2 . 3 release .
microsoft/CNTK
3fc5638accea3e349419f0bf52d088e3ead0c393
2017-11-21T02:34:57Z
mmm a / test / test_jit . py <nl> ppp b / test / test_jit . py <nl> def create_module ( * args , * * kwargs ) : <nl> call_args_str = ' , ' . join ( actuals ) <nl> call = " self . submodule ( { } ) " . format ( call_args_str ) <nl> script = script_method_template . format ( method_args , call ) <nl> - print ( script ) <nl> <nl> # Create module to use the script method <nl> class TheModule ( torch . jit . ScriptModule ) : <nl>
Removes debug spew in test_jit . py ( )
pytorch/pytorch
955a01562dd67d84dd9b22800fc6604fab0a40ff
2018-10-30T01:25:30Z
mmm a / tools / run_tests / run_node . bat <nl> ppp b / tools / run_tests / run_node . bat <nl> <nl> <nl> set JUNIT_REPORT_PATH = src \ node \ reports . xml <nl> set JUNIT_REPORT_STACK = 1 <nl> - . \ node_modules \ . bin \ mocha . cmd - - reporter mocha - jenkins - reporter src \ node \ test <nl> \ No newline at end of file <nl> + . \ node_modules \ . bin \ mocha . cmd - - reporter mocha - jenkins - reporter - - timeout 8000 src \ node \ test <nl> \ No newline at end of file <nl> mmm a / tools / run_tests / run_node . sh <nl> ppp b / tools / run_tests / run_node . sh <nl> cd $ ( dirname $ 0 ) / . . / . . <nl> <nl> root = ` pwd ` <nl> <nl> + test_directory = ' src / node / test ' <nl> + timeout = 8000 <nl> + <nl> if [ " $ CONFIG " = " gcov " ] <nl> then <nl> . / node_modules / . bin / istanbul cover - - dir reports / node_coverage \ <nl> - - x * * / interop / * . / node_modules / . bin / _mocha - - - - timeout 8000 src / node / test <nl> + - x * * / interop / * . / node_modules / . bin / _mocha - - - - timeout $ timeout $ test_directory <nl> cd build <nl> gcov Release / obj . target / grpc / ext / * . o <nl> lcov - - base - directory . - - directory . - c - o coverage . info <nl> then <nl> echo ' < html > < head > < meta http - equiv = " refresh " content = " 0 ; URL = lcov - report / index . html " > < / head > < / html > ' > \ <nl> . . / reports / node_coverage / index . html <nl> else <nl> - JUNIT_REPORT_PATH = src / node / reports . xml JUNIT_REPORT_STACK = 1 . / node_modules / . bin / mocha - - reporter mocha - jenkins - reporter src / node / test <nl> + JUNIT_REPORT_PATH = src / node / reports . xml JUNIT_REPORT_STACK = 1 \ <nl> + . / node_modules / . bin / mocha - - timeout $ timeout \ <nl> + - - reporter mocha - jenkins - reporter $ test_directory <nl> fi <nl> mmm a / tools / run_tests / run_tests . py <nl> ppp b / tools / run_tests / run_tests . py <nl> class RubyLanguage ( object ) : <nl> <nl> def test_specs ( self , config , args ) : <nl> return [ config . job_spec ( [ ' tools / run_tests / run_ruby . sh ' ] , None , <nl> + timeout_seconds = 10 * 60 , <nl> environ = _FORCE_ENVIRON_FOR_WRAPPERS ) ] <nl> <nl> def pre_build_steps ( self ) : <nl> return [ [ ' tools / run_tests / pre_build_ruby . sh ' ] ] <nl> <nl> def make_targets ( self , test_regex ) : <nl> - return [ ' static_c ' ] <nl> + return [ ] <nl> <nl> def make_options ( self ) : <nl> return [ ] <nl> def _build_and_run ( <nl> if BuildAndRunError . POST_TEST in errors : <nl> exit_code | = 4 <nl> sys . exit ( exit_code ) <nl> - <nl>
Increase Node ' s per - test timeout and Ruby ' s overall test timeout
grpc/grpc
7d243df88f359263f4c1ca82a472e49199bd52fe
2016-02-18T17:58:05Z
mmm a / include / swift / AST / Diagnostics . def <nl> ppp b / include / swift / AST / Diagnostics . def <nl> ERROR ( self_assignment_var , tce_sema , none , <nl> ERROR ( self_assignment_prop , tce_sema , none , <nl> " assigning a property to itself " , ( ) ) <nl> <nl> + WARNING ( unreachable_code_after_return , tce_sema , none , <nl> + " expression following ' return ' will never be executed " , ( ) ) <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> / / Type Check Statements <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> mmm a / include / swift / AST / Stmt . h <nl> ppp b / include / swift / AST / Stmt . h <nl> class ReturnStmt : public Stmt { <nl> SourceRange getSourceRange ( ) const ; <nl> SourceLoc getReturnLoc ( ) const { return ReturnLoc ; } <nl> <nl> - bool hasResult ( ) { return Result ! = 0 ; } <nl> + bool hasResult ( ) const { return Result ! = 0 ; } <nl> Expr * getResult ( ) const { <nl> assert ( Result & & " ReturnStmt doesn ' t have a result " ) ; <nl> return Result ; <nl> mmm a / lib / Sema / MiscDiagnostics . cpp <nl> ppp b / lib / Sema / MiscDiagnostics . cpp <nl> <nl> <nl> # include " MiscDiagnostics . h " <nl> # include " TypeChecker . h " <nl> + # include " swift / Basic / SourceManager . h " <nl> <nl> using namespace swift ; <nl> <nl> static void diagSelfAssignment ( TypeChecker & TC , const Expr * E ) { <nl> } <nl> } <nl> <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - = = = / / <nl> + / / Diagnose unreachable code . <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - = = = / / <nl> + <nl> + / / / Issue a warning on code containing retrun expression on a differnt line than <nl> + / / / the return keyword and both have the same indentation : <nl> + / / / . . . <nl> + / / / return <nl> + / / / foo ( ) <nl> + static void diagUnreachableCode ( TypeChecker & TC , const Stmt * S ) { <nl> + auto * RS = dyn_cast < ReturnStmt > ( S ) ; <nl> + if ( ! RS ) <nl> + return ; <nl> + if ( ! RS - > hasResult ( ) ) <nl> + return ; <nl> + <nl> + auto RetExpr = RS - > getResult ( ) ; <nl> + auto RSLoc = RS - > getStartLoc ( ) ; <nl> + auto RetExprLoc = RetExpr - > getStartLoc ( ) ; <nl> + if ( RSLoc . isInvalid ( ) | | RetExprLoc . isInvalid ( ) | | ( RSLoc = = RetExprLoc ) ) <nl> + return ; <nl> + SourceManager & SM = TC . Context . SourceMgr ; <nl> + if ( SM . getLineAndColumn ( RSLoc ) . second = = <nl> + SM . getLineAndColumn ( RetExprLoc ) . second ) { <nl> + TC . diagnose ( RetExpr - > getStartLoc ( ) , diag : : unreachable_code_after_return ) ; <nl> + return ; <nl> + } <nl> + return ; <nl> + <nl> + } <nl> + <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - = = = / / <nl> / / High - level entry points . <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - = = = / / <nl> void swift : : performExprDiagnostics ( TypeChecker & TC , const Expr * E ) { <nl> diagSelfAssignment ( TC , E ) ; <nl> } <nl> <nl> + void swift : : performStmtDiagnostics ( TypeChecker & TC , const Stmt * S ) { <nl> + return diagUnreachableCode ( TC , S ) ; <nl> + } <nl> + <nl> mmm a / lib / Sema / MiscDiagnostics . h <nl> ppp b / lib / Sema / MiscDiagnostics . h <nl> <nl> <nl> namespace swift { <nl> class Expr ; <nl> + class Stmt ; <nl> class TypeChecker ; <nl> <nl> - / / / Emit diagnostics for a given expression . <nl> + / / / \ brief Emit diagnostics for a given expression . <nl> void performExprDiagnostics ( TypeChecker & TC , const Expr * E ) ; <nl> + / / / \ brief Emit diagnostics for a given statement . <nl> + void performStmtDiagnostics ( TypeChecker & TC , const Stmt * S ) ; <nl> <nl> } / / namespace swift <nl> <nl> mmm a / lib / Sema / TypeCheckStmt . cpp <nl> ppp b / lib / Sema / TypeCheckStmt . cpp <nl> <nl> / / <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> - # include " swift / Subsystems . h " <nl> # include " TypeChecker . h " <nl> + # include " MiscDiagnostics . h " <nl> + # include " swift / Subsystems . h " <nl> # include " swift / Basic / Optional . h " <nl> # include " swift / AST / ASTWalker . h " <nl> # include " swift / AST / ASTVisitor . h " <nl> class StmtChecker : public StmtVisitor < StmtChecker , Stmt * > { <nl> StmtTy * S2 = cast_or_null < StmtTy > ( visit ( S ) ) ; <nl> if ( S2 = = 0 ) return true ; <nl> S = S2 ; <nl> + performStmtDiagnostics ( TC , S ) ; <nl> return false ; <nl> } <nl> <nl> new file mode 100644 <nl> index 000000000000 . . b702c06113d7 <nl> mmm / dev / null <nl> ppp b / test / Sema / diag_unreachable_after_return . swift <nl> <nl> + / / RUN : % swift % s - verify <nl> + <nl> + / / Warn when the indentation is the same . <nl> + def f_returns_void ( ) { } <nl> + def unreachable_returns_void ( ) { <nl> + return <nl> + f_returns_void ( ) / / expected - warning { { expression following ' return ' will never be executed } } <nl> + } <nl> + <nl> + def f_returns_Int ( ) { } <nl> + def unreachable_returns_Int ( ) { <nl> + return <nl> + f_returns_Int ( ) / / expected - warning { { expression following ' return ' will never be executed } } <nl> + } <nl> + <nl> + / / Do not warn when the indentation is differnt . <nl> + def reachable_returns_void ( ) { <nl> + return <nl> + f_returns_void ( ) / / no - warning <nl> + } <nl> + <nl> + def reachable_returns_Int ( ) { <nl> + return <nl> + f_returns_Int ( ) / / no - warning <nl> + } <nl> + <nl>
Issue a warning when returned expression is on the next line after the return keyword .
apple/swift
c8ed1954f29e00e30be5b55dffb5ecb467a2f7fb
2013-11-06T02:00:13Z
mmm a / scene / 2d / line_builder . cpp <nl> ppp b / scene / 2d / line_builder . cpp <nl> void LineBuilder : : build ( ) { <nl> } <nl> } else { <nl> / / No intersection : fallback <nl> + if ( current_joint_mode = = Line2D : : LINE_JOINT_SHARP ) { <nl> + / / There is no fallback implementation for LINE_JOINT_SHARP so switch to the LINE_JOINT_BEVEL <nl> + current_joint_mode = Line2D : : LINE_JOINT_BEVEL ; <nl> + } <nl> pos_up1 = corner_pos_up ; <nl> pos_down1 = corner_pos_down ; <nl> } <nl>
Merge pull request from BlackCatter / line2d - fix
godotengine/godot
03b8168c2e6c7a116f58aebb0f1ff790b2ec20c6
2019-01-02T18:39:31Z
mmm a / torch / nn / init . py <nl> ppp b / torch / nn / init . py <nl> def uniform_ ( tensor , a = 0 . , b = 1 . ) : <nl> def normal_ ( tensor , mean = 0 . , std = 1 . ) : <nl> # type : ( Tensor , float , float ) - > Tensor <nl> r " " " Fills the input Tensor with values drawn from the normal <nl> - distribution : math : ` \ mathcal { N } ( \ text { mean } , \ text { std } ) ` . <nl> + distribution : math : ` \ mathcal { N } ( \ text { mean } , \ text { std } ^ 2 ) ` . <nl> <nl> Args : <nl> tensor : an n - dimensional ` torch . Tensor ` <nl> def xavier_normal_ ( tensor , gain = 1 . ) : <nl> described in ` Understanding the difficulty of training deep feedforward <nl> neural networks ` - Glorot , X . & Bengio , Y . ( 2010 ) , using a normal <nl> distribution . The resulting tensor will have values sampled from <nl> - : math : ` \ mathcal { N } ( 0 , \ text { std } ) ` where <nl> + : math : ` \ mathcal { N } ( 0 , \ text { std } ^ 2 ) ` where <nl> <nl> . . math : : <nl> \ text { std } = \ text { gain } \ times \ sqrt { \ frac { 2 } { \ text { fan \ _in } + \ text { fan \ _out } } } <nl> def kaiming_normal_ ( tensor , a = 0 , mode = ' fan_in ' , nonlinearity = ' leaky_relu ' ) : <nl> described in ` Delving deep into rectifiers : Surpassing human - level <nl> performance on ImageNet classification ` - He , K . et al . ( 2015 ) , using a <nl> normal distribution . The resulting tensor will have values sampled from <nl> - : math : ` \ mathcal { N } ( 0 , \ text { std } ) ` where <nl> + : math : ` \ mathcal { N } ( 0 , \ text { std } ^ 2 ) ` where <nl> <nl> . . math : : <nl> \ text { std } = \ sqrt { \ frac { 2 } { ( 1 + a ^ 2 ) \ times \ text { fan \ _in } } } <nl>
Fix latex formular error about * normal ( )
pytorch/pytorch
ae18f8e7617af601c63e460976caa57ab5215090
2019-06-06T15:47:42Z
mmm a / test / test_dataloader . py <nl> ppp b / test / test_dataloader . py <nl> def sample_stat ( sampler , num_samples ) : <nl> <nl> self . assertRaises ( ValueError , lambda : RandomSampler ( self . dataset , num_samples = 0 ) ) <nl> <nl> + def test_duplicating_data_with_drop_last ( self ) : <nl> + <nl> + from torch . utils . data . distributed import DistributedSampler <nl> + <nl> + num_processes = 4 <nl> + num_batches = 9 <nl> + data_set = torch . IntTensor ( range ( num_batches ) ) <nl> + scanned_data = torch . IntTensor ( [ ] ) <nl> + for i in range ( num_processes ) : <nl> + s = DistributedSampler ( data_set , num_processes , i ) <nl> + d_loader = DataLoader ( data_set , batch_size = int ( num_batches / num_processes ) , drop_last = True , sampler = s ) <nl> + for k , data in enumerate ( d_loader ) : <nl> + scanned_data = torch . cat ( ( scanned_data , data ) , 0 ) <nl> + <nl> + self . assertEqual ( scanned_data . size ( ) , scanned_data . unique ( ) . size ( ) ) <nl> + <nl> @ unittest . skipIf ( NO_MULTIPROCESSING_SPAWN , " Disabled for environments that \ <nl> don ' t support multiprocessing with spawn start method " ) <nl> def test_batch_sampler ( self ) : <nl> mmm a / torch / utils / data / distributed . py <nl> ppp b / torch / utils / data / distributed . py <nl> def __iter__ ( self ) : <nl> assert len ( indices ) = = self . total_size <nl> <nl> # subsample <nl> - offset = self . num_samples * self . rank <nl> - indices = indices [ offset : offset + self . num_samples ] <nl> + indices = indices [ self . rank : self . total_size : self . num_replicas ] <nl> assert len ( indices ) = = self . num_samples <nl> <nl> return iter ( indices ) <nl>
( )
pytorch/pytorch
a3fb004b1829880547dd7b3e2cd9d16af657b869
2018-10-09T18:23:50Z
mmm a / hphp / runtime / vm / bytecode . cpp <nl> ppp b / hphp / runtime / vm / bytecode . cpp <nl> OPTBLD_INLINE void iopClsRefGetTS ( clsref_slot slot ) { <nl> auto const mangledTypeName = <nl> makeStaticString ( mangleReifiedGenericsName ( reified_types ) ) ; <nl> bool didAdd = false ; <nl> - addToReifiedGenericsTable ( mangledTypeName , reified_types , didAdd ) ; <nl> + reified_types = <nl> + addToReifiedGenericsTable ( mangledTypeName , reified_types , didAdd ) ; <nl> mangledName = mangleReifiedName ( name , mangledTypeName ) ; <nl> } <nl> auto tv = make_tv < KindOfString > ( mangledName ) ; <nl>
Fix a bug with clsrefgetts
facebook/hhvm
48d317c7741e9c5109163c7f8b41ed813375b3ec
2019-06-04T17:05:27Z
mmm a / Marlin / MarlinConfig . h <nl> ppp b / Marlin / MarlinConfig . h <nl> <nl> # endif <nl> # include " Arduino . h " <nl> # include " Conditionals_post . h " <nl> + # include " SanityCheck . h " <nl> <nl> # endif / / MARLIN_CONFIG_H <nl> deleted file mode 100644 <nl> index f3ef238b75e . . 00000000000 <nl> mmm a / Marlin / SanityCheck . cpp <nl> ppp / dev / null <nl> <nl> - / * * <nl> - * Marlin 3D Printer Firmware <nl> - * Copyright ( C ) 2016 MarlinFirmware [ https : / / github . com / MarlinFirmware / Marlin ] <nl> - * <nl> - * Based on Sprinter and grbl . <nl> - * Copyright ( C ) 2011 Camiel Gubbels / Erik van der Zalm <nl> - * <nl> - * This program is free software : you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation , either version 3 of the License , or <nl> - * ( at your option ) any later version . <nl> - * <nl> - * This program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - / * * <nl> - * SanityCheck . cpp <nl> - * <nl> - * Test configuration values for errors at compile - time . <nl> - * This is done only once , here , to speed up compilation time . <nl> - * / <nl> - # include " MarlinConfig . h " <nl> - <nl> - / * * <nl> - * Due to the high number of issues related with old versions of Arduino IDE <nl> - * we are now warning our users to update their toolkits . In a future Marlin <nl> - * release we will stop supporting old IDE versions and will require user <nl> - * action to proceed with compilation in such environments . <nl> - * / <nl> - # if ! defined ( ARDUINO ) | | ARDUINO < 10600 <nl> - # error " Versions of Arduino IDE prior to 1 . 6 . 0 are no longer supported , please update your toolkit . " <nl> - # endif <nl> - <nl> - / * * <nl> - * We try our best to include sanity checks for all the changes configuration <nl> - * directives because people have a tendency to use outdated config files with <nl> - * the bleding edge source code , but sometimes this is not enough . This check <nl> - * will force a minimum config file revision , otherwise Marlin will not build . <nl> - * / <nl> - # if ! defined ( CONFIGURATION_H_VERSION ) | | CONFIGURATION_H_VERSION < REQUIRED_CONFIGURATION_H_VERSION <nl> - # error " You are using an old Configuration . h file , update it before building Marlin . " <nl> - # endif <nl> - <nl> - # if ! defined ( CONFIGURATION_ADV_H_VERSION ) | | CONFIGURATION_ADV_H_VERSION < REQUIRED_CONFIGURATION_ADV_H_VERSION <nl> - # error " You are using an old Configuration_adv . h file , update it before building Marlin . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Marlin release , version and default string <nl> - * / <nl> - # ifndef SHORT_BUILD_VERSION <nl> - # error " SHORT_BUILD_VERSION must be specified . " <nl> - # elif ! defined ( DETAILED_BUILD_VERSION ) <nl> - # error " BUILD_VERSION must be specified . " <nl> - # elif ! defined ( STRING_DISTRIBUTION_DATE ) <nl> - # error " STRING_DISTRIBUTION_DATE must be specified . " <nl> - # elif ! defined ( PROTOCOL_VERSION ) <nl> - # error " PROTOCOL_VERSION must be specified . " <nl> - # elif ! defined ( MACHINE_NAME ) <nl> - # error " MACHINE_NAME must be specified . " <nl> - # elif ! defined ( SOURCE_CODE_URL ) <nl> - # error " SOURCE_CODE_URL must be specified . " <nl> - # elif ! defined ( DEFAULT_MACHINE_UUID ) <nl> - # error " DEFAULT_MACHINE_UUID must be specified . " <nl> - # elif ! defined ( WEBSITE_URL ) <nl> - # error " WEBSITE_URL must be specified . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Dual Stepper Drivers <nl> - * / <nl> - # if ENABLED ( X_DUAL_STEPPER_DRIVERS ) & & ENABLED ( DUAL_X_CARRIAGE ) <nl> - # error " DUAL_X_CARRIAGE is not compatible with X_DUAL_STEPPER_DRIVERS . " <nl> - # elif ENABLED ( X_DUAL_STEPPER_DRIVERS ) & & ( ! HAS_X2_ENABLE | | ! HAS_X2_STEP | | ! HAS_X2_DIR ) <nl> - # error " X_DUAL_STEPPER_DRIVERS requires X2 pins ( and an extra E plug ) . " <nl> - # elif ENABLED ( Y_DUAL_STEPPER_DRIVERS ) & & ( ! HAS_Y2_ENABLE | | ! HAS_Y2_STEP | | ! HAS_Y2_DIR ) <nl> - # error " Y_DUAL_STEPPER_DRIVERS requires Y2 pins ( and an extra E plug ) . " <nl> - # elif ENABLED ( Z_DUAL_STEPPER_DRIVERS ) & & ( ! HAS_Z2_ENABLE | | ! HAS_Z2_STEP | | ! HAS_Z2_DIR ) <nl> - # error " Z_DUAL_STEPPER_DRIVERS requires Z2 pins ( and an extra E plug ) . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Progress Bar <nl> - * / <nl> - # if ENABLED ( LCD_PROGRESS_BAR ) <nl> - # if DISABLED ( SDSUPPORT ) <nl> - # error " LCD_PROGRESS_BAR requires SDSUPPORT . " <nl> - # endif <nl> - # if ENABLED ( DOGLCD ) <nl> - # error " LCD_PROGRESS_BAR does not apply to graphical displays . " <nl> - # endif <nl> - # if ENABLED ( FILAMENT_LCD_DISPLAY ) <nl> - # error " LCD_PROGRESS_BAR and FILAMENT_LCD_DISPLAY are not fully compatible . Comment out this line to use both . " <nl> - # endif <nl> - # endif <nl> - <nl> - / * * <nl> - * Babystepping <nl> - * / <nl> - # if ENABLED ( BABYSTEPPING ) <nl> - # if DISABLED ( ULTRA_LCD ) <nl> - # error " BABYSTEPPING requires an LCD controller . " <nl> - # endif <nl> - # if ENABLED ( SCARA ) <nl> - # error " BABYSTEPPING is not implemented for SCARA yet . " <nl> - # endif <nl> - # if ENABLED ( DELTA ) & & ENABLED ( BABYSTEP_XY ) <nl> - # error " BABYSTEPPING only implemented for Z axis on deltabots . " <nl> - # endif <nl> - # endif <nl> - <nl> - / * * <nl> - * Filament Runout needs a pin and either SD Support or Auto print start detection <nl> - * / <nl> - # if ENABLED ( FILAMENT_RUNOUT_SENSOR ) <nl> - # if ! HAS_FIL_RUNOUT <nl> - # error " FILAMENT_RUNOUT_SENSOR requires FIL_RUNOUT_PIN . " <nl> - # elif DISABLED ( SDSUPPORT ) & & DISABLED ( PRINTJOB_TIMER_AUTOSTART ) <nl> - # error " FILAMENT_RUNOUT_SENSOR requires SDSUPPORT or PRINTJOB_TIMER_AUTOSTART . " <nl> - # endif <nl> - # endif <nl> - <nl> - / * * <nl> - * Filament Change with Extruder Runout Prevention <nl> - * / <nl> - # if ENABLED ( FILAMENT_CHANGE_FEATURE ) & & ENABLED ( EXTRUDER_RUNOUT_PREVENT ) <nl> - # error " EXTRUDER_RUNOUT_PREVENT is incompatible with FILAMENT_CHANGE_FEATURE . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Individual axis homing is useless for DELTAS <nl> - * / <nl> - # if ENABLED ( INDIVIDUAL_AXIS_HOMING_MENU ) & & ENABLED ( DELTA ) <nl> - # error " INDIVIDUAL_AXIS_HOMING_MENU is incompatible with DELTA kinematics . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Options only for EXTRUDERS > 1 <nl> - * / <nl> - # if EXTRUDERS > 1 <nl> - <nl> - # if EXTRUDERS > 4 <nl> - # error " The maximum number of EXTRUDERS in Marlin is 4 . " <nl> - # endif <nl> - <nl> - # if ENABLED ( TEMP_SENSOR_1_AS_REDUNDANT ) <nl> - # error " EXTRUDERS must be 1 with TEMP_SENSOR_1_AS_REDUNDANT . " <nl> - # endif <nl> - <nl> - # if ENABLED ( HEATERS_PARALLEL ) <nl> - # error " EXTRUDERS must be 1 with HEATERS_PARALLEL . " <nl> - # endif <nl> - <nl> - # elif ENABLED ( SINGLENOZZLE ) <nl> - # error " SINGLENOZZLE requires 2 or more EXTRUDERS . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Only one type of extruder allowed <nl> - * / <nl> - # if ( ENABLED ( SWITCHING_EXTRUDER ) & & ( ENABLED ( SINGLENOZZLE ) | | ENABLED ( MIXING_EXTRUDER ) ) ) \ <nl> - | | ( ENABLED ( SINGLENOZZLE ) & & ENABLED ( MIXING_EXTRUDER ) ) <nl> - # error " Please define only one type of extruder : SINGLENOZZLE , SWITCHING_EXTRUDER , or MIXING_EXTRUDER . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Single Stepper Dual Extruder with switching servo <nl> - * / <nl> - # if ENABLED ( SWITCHING_EXTRUDER ) <nl> - # if ENABLED ( DUAL_X_CARRIAGE ) <nl> - # error " SWITCHING_EXTRUDER and DUAL_X_CARRIAGE are incompatible . " <nl> - # elif EXTRUDERS ! = 2 <nl> - # error " SWITCHING_EXTRUDER requires exactly 2 EXTRUDERS . " <nl> - # elif NUM_SERVOS < 1 <nl> - # error " SWITCHING_EXTRUDER requires NUM_SERVOS > = 1 . " <nl> - # endif <nl> - # endif <nl> - <nl> - / * * <nl> - * Mixing Extruder requirements <nl> - * / <nl> - # if ENABLED ( MIXING_EXTRUDER ) <nl> - # if EXTRUDERS > 1 <nl> - # error " MIXING_EXTRUDER currently only supports one extruder . " <nl> - # endif <nl> - # if MIXING_STEPPERS < 2 <nl> - # error " You must set MIXING_STEPPERS > = 2 for a mixing extruder . " <nl> - # endif <nl> - # if ENABLED ( FILAMENT_SENSOR ) <nl> - # error " MIXING_EXTRUDER is incompatible with FILAMENT_SENSOR . Comment out this line to use it anyway . " <nl> - # endif <nl> - # endif <nl> - <nl> - / * * <nl> - * Limited number of servos <nl> - * / <nl> - # if defined ( NUM_SERVOS ) & & NUM_SERVOS > 0 <nl> - # if NUM_SERVOS > 4 <nl> - # error " The maximum number of SERVOS in Marlin is 4 . " <nl> - # elif HAS_Z_SERVO_ENDSTOP & & Z_ENDSTOP_SERVO_NR > = NUM_SERVOS <nl> - # error " Z_ENDSTOP_SERVO_NR must be smaller than NUM_SERVOS . " <nl> - # endif <nl> - # endif <nl> - <nl> - / * * <nl> - * Servo deactivation depends on servo endstops <nl> - * / <nl> - # if ENABLED ( DEACTIVATE_SERVOS_AFTER_MOVE ) & & ! HAS_Z_SERVO_ENDSTOP <nl> - # error " Z_ENDSTOP_SERVO_NR is required for DEACTIVATE_SERVOS_AFTER_MOVE . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Required LCD language <nl> - * / <nl> - # if DISABLED ( DOGLCD ) & & ENABLED ( ULTRA_LCD ) & & ! defined ( DISPLAY_CHARSET_HD44780 ) <nl> - # error " You must set DISPLAY_CHARSET_HD44780 to JAPANESE , WESTERN or CYRILLIC for your LCD controller . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Bed Heating Options - PID vs Limit Switching <nl> - * / <nl> - # if ENABLED ( PIDTEMPBED ) & & ENABLED ( BED_LIMIT_SWITCHING ) <nl> - # error " To use BED_LIMIT_SWITCHING you must disable PIDTEMPBED . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Mesh Bed Leveling <nl> - * / <nl> - # if ENABLED ( MESH_BED_LEVELING ) <nl> - # if ENABLED ( DELTA ) <nl> - # error " MESH_BED_LEVELING does not yet support DELTA printers . " <nl> - # elif ENABLED ( AUTO_BED_LEVELING_FEATURE ) <nl> - # error " Select AUTO_BED_LEVELING_FEATURE or MESH_BED_LEVELING , not both . " <nl> - # elif MESH_NUM_X_POINTS > 9 | | MESH_NUM_Y_POINTS > 9 <nl> - # error " MESH_NUM_X_POINTS and MESH_NUM_Y_POINTS must be less than 10 . " <nl> - # endif <nl> - # elif ENABLED ( MANUAL_BED_LEVELING ) <nl> - # error " MESH_BED_LEVELING is required for MANUAL_BED_LEVELING . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Probes <nl> - * / <nl> - <nl> - # if PROBE_SELECTED <nl> - <nl> - # if ENABLED ( Z_PROBE_SLED ) & & ENABLED ( DELTA ) <nl> - # error " You cannot use Z_PROBE_SLED with DELTA . " <nl> - # endif <nl> - <nl> - / * * <nl> - * NUM_SERVOS is required for a Z servo probe <nl> - * / <nl> - # if HAS_Z_SERVO_ENDSTOP <nl> - # ifndef NUM_SERVOS <nl> - # error " You must set NUM_SERVOS for a Z servo probe ( Z_ENDSTOP_SERVO_NR ) . " <nl> - # elif Z_ENDSTOP_SERVO_NR > = NUM_SERVOS <nl> - # error " Z_ENDSTOP_SERVO_NR must be less than NUM_SERVOS . " <nl> - # endif <nl> - # endif <nl> - <nl> - / * * <nl> - * A probe needs a pin <nl> - * / <nl> - # if ! PROBE_PIN_CONFIGURED <nl> - # error " A probe needs a pin ! Use Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN or Z_MIN_PROBE_PIN . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Z_MIN_PIN and Z_MIN_PROBE_PIN can ' t co - exist when Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN <nl> - * / <nl> - # if HAS_Z_MIN & & HAS_Z_MIN_PROBE_PIN & & ENABLED ( Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN ) <nl> - # error " A probe cannot have more than one pin ! Use Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN or Z_MIN_PROBE_PIN . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Make sure the plug is enabled if it ' s used <nl> - * / <nl> - # if ENABLED ( Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN ) & & DISABLED ( USE_ZMIN_PLUG ) <nl> - # error " You must enable USE_ZMIN_PLUG if any probe or endstop is connected to the ZMIN plug . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Only allow one probe option to be defined <nl> - * / <nl> - # if ( ENABLED ( FIX_MOUNTED_PROBE ) & & ( ENABLED ( Z_PROBE_ALLEN_KEY ) | | HAS_Z_SERVO_ENDSTOP | | ENABLED ( Z_PROBE_SLED ) ) ) \ <nl> - | | ( ENABLED ( Z_PROBE_ALLEN_KEY ) & & ( HAS_Z_SERVO_ENDSTOP | | ENABLED ( Z_PROBE_SLED ) ) ) \ <nl> - | | ( HAS_Z_SERVO_ENDSTOP & & ENABLED ( Z_PROBE_SLED ) ) <nl> - # error " Please define only one type of probe : Z Servo , Z_PROBE_ALLEN_KEY , Z_PROBE_SLED , or FIX_MOUNTED_PROBE . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Don ' t allow nonsense probe - pin settings <nl> - * / <nl> - # if ENABLED ( Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN ) & & ENABLED ( Z_MIN_PROBE_ENDSTOP ) <nl> - # error " You can ' t enable both Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN and Z_MIN_PROBE_ENDSTOP . " <nl> - # elif ENABLED ( Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN ) & & ENABLED ( DISABLE_Z_MIN_PROBE_ENDSTOP ) <nl> - # error " Don ' t enable DISABLE_Z_MIN_PROBE_ENDSTOP with Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN . " <nl> - # elif ENABLED ( DISABLE_Z_MIN_PROBE_ENDSTOP ) & & DISABLED ( Z_MIN_PROBE_ENDSTOP ) <nl> - # error " DISABLE_Z_MIN_PROBE_ENDSTOP requires Z_MIN_PROBE_ENDSTOP to be set . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Require a Z probe pin if Z_MIN_PROBE_ENDSTOP is enabled . <nl> - * / <nl> - # if ENABLED ( Z_MIN_PROBE_ENDSTOP ) <nl> - # if ! HAS_Z_MIN_PROBE_PIN <nl> - # error " Z_MIN_PROBE_ENDSTOP requires a Z_MIN_PROBE_PIN in your board ' s pins_XXXX . h file . " <nl> - # endif <nl> - / / Forcing Servo definitions can break some hall effect sensor setups . Leaving these here for further comment . <nl> - / / # ifndef NUM_SERVOS <nl> - / / # error " You must have NUM_SERVOS defined and there must be at least 1 configured to use Z_MIN_PROBE_ENDSTOP . " <nl> - / / # endif <nl> - / / # if defined ( NUM_SERVOS ) & & NUM_SERVOS < 1 <nl> - / / # error " You must have at least 1 servo defined for NUM_SERVOS to use Z_MIN_PROBE_ENDSTOP . " <nl> - / / # endif <nl> - / / # if Z_ENDSTOP_SERVO_NR < 0 <nl> - / / # error " You must have Z_ENDSTOP_SERVO_NR set to at least 0 or above to use Z_MIN_PROBE_ENDSTOP . " <nl> - / / # endif <nl> - / / # ifndef Z_SERVO_ANGLES <nl> - / / # error " You must have Z_SERVO_ANGLES defined for Z Extend and Retract to use Z_MIN_PROBE_ENDSTOP . " <nl> - / / # endif <nl> - # endif <nl> - <nl> - / * * <nl> - * Make sure Z raise values are set <nl> - * / <nl> - # if defined ( Z_RAISE_BEFORE_PROBING ) | | defined ( Z_RAISE_AFTER_PROBING ) <nl> - # error " Z_RAISE_ ( BEFORE | AFTER ) _PROBING are deprecated . Use Z_RAISE_PROBE_DEPLOY_STOW instead . " <nl> - # elif ! defined ( Z_RAISE_PROBE_DEPLOY_STOW ) <nl> - # error " You must set Z_RAISE_PROBE_DEPLOY_STOW in your configuration . " <nl> - # elif ! defined ( Z_RAISE_BETWEEN_PROBINGS ) <nl> - # error " You must set Z_RAISE_BETWEEN_PROBINGS in your configuration . " <nl> - # elif Z_RAISE_PROBE_DEPLOY_STOW < 0 <nl> - # error " Probes need Z_RAISE_PROBE_DEPLOY_STOW > = 0 . " <nl> - # elif Z_RAISE_BETWEEN_PROBINGS < 0 <nl> - # error " Probes need Z_RAISE_BETWEEN_PROBINGS > = 0 . " <nl> - # endif <nl> - <nl> - # else <nl> - <nl> - / * * <nl> - * Require some kind of probe for bed leveling and probe testing <nl> - * / <nl> - # if ENABLED ( AUTO_BED_LEVELING_FEATURE ) <nl> - # error " AUTO_BED_LEVELING_FEATURE requires a probe ! Define a Z Servo , Z_PROBE_ALLEN_KEY , Z_PROBE_SLED , or FIX_MOUNTED_PROBE . " <nl> - # elif ENABLED ( Z_MIN_PROBE_REPEATABILITY_TEST ) <nl> - # error " Z_MIN_PROBE_REPEATABILITY_TEST requires a probe ! Define a Z Servo , Z_PROBE_ALLEN_KEY , Z_PROBE_SLED , or FIX_MOUNTED_PROBE . " <nl> - # endif <nl> - <nl> - # endif <nl> - <nl> - / * * <nl> - * Make sure Z_SAFE_HOMING point is reachable <nl> - * / <nl> - # if ENABLED ( Z_SAFE_HOMING ) <nl> - # if Z_SAFE_HOMING_X_POINT < MIN_PROBE_X | | Z_SAFE_HOMING_X_POINT > MAX_PROBE_X <nl> - # if HAS_BED_PROBE <nl> - # error " Z_SAFE_HOMING_X_POINT can ' t be reached by the Z probe . " <nl> - # else <nl> - # error " Z_SAFE_HOMING_X_POINT can ' t be reached by the nozzle . " <nl> - # endif <nl> - # elif Z_SAFE_HOMING_Y_POINT < MIN_PROBE_Y | | Z_SAFE_HOMING_Y_POINT > MAX_PROBE_Y <nl> - # if HAS_BED_PROBE <nl> - # error " Z_SAFE_HOMING_Y_POINT can ' t be reached by the Z probe . " <nl> - # else <nl> - # error " Z_SAFE_HOMING_Y_POINT can ' t be reached by the nozzle . " <nl> - # endif <nl> - # endif <nl> - # endif / / Z_SAFE_HOMING <nl> - <nl> - / * * <nl> - * Auto Bed Leveling <nl> - * / <nl> - # if ENABLED ( AUTO_BED_LEVELING_FEATURE ) <nl> - <nl> - / * * <nl> - * Delta has limited bed leveling options <nl> - * / <nl> - # if ENABLED ( DELTA ) & & DISABLED ( AUTO_BED_LEVELING_GRID ) <nl> - # error " You must use AUTO_BED_LEVELING_GRID for DELTA bed leveling . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Require a Z min pin <nl> - * / <nl> - # if ! PIN_EXISTS ( Z_MIN ) <nl> - # if ! PIN_EXISTS ( Z_MIN_PROBE ) | | ( DISABLED ( Z_MIN_PROBE_ENDSTOP ) | | ENABLED ( DISABLE_Z_MIN_PROBE_ENDSTOP ) ) / / It ' s possible for someone to set a pin for the Z probe , but not enable it . <nl> - # error " AUTO_BED_LEVELING_FEATURE requires a Z_MIN or Z_PROBE endstop . Z_MIN_PIN or Z_MIN_PROBE_PIN must point to a valid hardware pin . " <nl> - # endif <nl> - # endif <nl> - <nl> - / * * <nl> - * Check if Probe_Offset * Grid Points is greater than Probing Range <nl> - * / <nl> - # if ENABLED ( AUTO_BED_LEVELING_GRID ) <nl> - # ifndef DELTA_PROBEABLE_RADIUS <nl> - / / Be sure points are in the right order <nl> - # if LEFT_PROBE_BED_POSITION > RIGHT_PROBE_BED_POSITION <nl> - # error " LEFT_PROBE_BED_POSITION must be less than RIGHT_PROBE_BED_POSITION . " <nl> - # elif FRONT_PROBE_BED_POSITION > BACK_PROBE_BED_POSITION <nl> - # error " FRONT_PROBE_BED_POSITION must be less than BACK_PROBE_BED_POSITION . " <nl> - # endif <nl> - / / Make sure probing points are reachable <nl> - # if LEFT_PROBE_BED_POSITION < MIN_PROBE_X <nl> - # error " The given LEFT_PROBE_BED_POSITION can ' t be reached by the Z probe . " <nl> - # elif RIGHT_PROBE_BED_POSITION > MAX_PROBE_X <nl> - # error " The given RIGHT_PROBE_BED_POSITION can ' t be reached by the Z probe . " <nl> - # elif FRONT_PROBE_BED_POSITION < MIN_PROBE_Y <nl> - # error " The given FRONT_PROBE_BED_POSITION can ' t be reached by the Z probe . " <nl> - # elif BACK_PROBE_BED_POSITION > MAX_PROBE_Y <nl> - # error " The given BACK_PROBE_BED_POSITION can ' t be reached by the Z probe . " <nl> - # endif <nl> - # endif <nl> - # else / / ! AUTO_BED_LEVELING_GRID <nl> - <nl> - / / Check the triangulation points <nl> - # if ABL_PROBE_PT_1_X < MIN_PROBE_X | | ABL_PROBE_PT_1_X > MAX_PROBE_X <nl> - # error " The given ABL_PROBE_PT_1_X can ' t be reached by the Z probe . " <nl> - # elif ABL_PROBE_PT_2_X < MIN_PROBE_X | | ABL_PROBE_PT_2_X > MAX_PROBE_X <nl> - # error " The given ABL_PROBE_PT_2_X can ' t be reached by the Z probe . " <nl> - # elif ABL_PROBE_PT_3_X < MIN_PROBE_X | | ABL_PROBE_PT_3_X > MAX_PROBE_X <nl> - # error " The given ABL_PROBE_PT_3_X can ' t be reached by the Z probe . " <nl> - # elif ABL_PROBE_PT_1_Y < MIN_PROBE_Y | | ABL_PROBE_PT_1_Y > MAX_PROBE_Y <nl> - # error " The given ABL_PROBE_PT_1_Y can ' t be reached by the Z probe . " <nl> - # elif ABL_PROBE_PT_2_Y < MIN_PROBE_Y | | ABL_PROBE_PT_2_Y > MAX_PROBE_Y <nl> - # error " The given ABL_PROBE_PT_2_Y can ' t be reached by the Z probe . " <nl> - # elif ABL_PROBE_PT_3_Y < MIN_PROBE_Y | | ABL_PROBE_PT_3_Y > MAX_PROBE_Y <nl> - # error " The given ABL_PROBE_PT_3_Y can ' t be reached by the Z probe . " <nl> - # endif <nl> - <nl> - # endif / / ! AUTO_BED_LEVELING_GRID <nl> - <nl> - # endif / / AUTO_BED_LEVELING_FEATURE <nl> - <nl> - / * * <nl> - * Advance Extrusion <nl> - * / <nl> - # if ENABLED ( ADVANCE ) & & ENABLED ( LIN_ADVANCE ) <nl> - # error " You can enable ADVANCE or LIN_ADVANCE , but not both . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Filament Width Sensor <nl> - * / <nl> - # if ENABLED ( FILAMENT_WIDTH_SENSOR ) & & ! HAS_FILAMENT_WIDTH_SENSOR <nl> - # error " FILAMENT_WIDTH_SENSOR requires a FILWIDTH_PIN to be defined . " <nl> - # endif <nl> - <nl> - / * * <nl> - * ULTIPANEL encoder <nl> - * / <nl> - # if ENABLED ( ULTIPANEL ) & & DISABLED ( NEWPANEL ) & & DISABLED ( SR_LCD_2W_NL ) & & ! defined ( SHIFT_CLK ) <nl> - # error " ULTIPANEL requires some kind of encoder . " <nl> - # endif <nl> - <nl> - # if ENCODER_PULSES_PER_STEP < 0 <nl> - # error " ENCODER_PULSES_PER_STEP should not be negative , use REVERSE_MENU_DIRECTION instead . " <nl> - # endif <nl> - <nl> - / * * <nl> - * SAV_3DGLCD display options <nl> - * / <nl> - # if ENABLED ( U8GLIB_SSD1306 ) & & ENABLED ( U8GLIB_SH1106 ) <nl> - # error " Only enable one SAV_3DGLCD display type : U8GLIB_SSD1306 or U8GLIB_SH1106 . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Don ' t set more than one kinematic type <nl> - * / <nl> - # if ( ENABLED ( DELTA ) & & ( ENABLED ( SCARA ) | | ENABLED ( COREXY ) | | ENABLED ( COREXZ ) | | ENABLED ( COREYZ ) ) ) \ <nl> - | | ( ENABLED ( SCARA ) & & ( ENABLED ( COREXY ) | | ENABLED ( COREXZ ) | | ENABLED ( COREYZ ) ) ) \ <nl> - | | ( ENABLED ( COREXY ) & & ( ENABLED ( COREXZ ) | | ENABLED ( COREYZ ) ) ) \ <nl> - | | ( ENABLED ( COREXZ ) & & ENABLED ( COREYZ ) ) <nl> - # error " Please enable only one of DELTA , SCARA , COREXY , COREXZ , or COREYZ . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Allen Key <nl> - * Deploying the Allen Key probe uses big moves in z direction . Too dangerous for an unhomed z - axis . <nl> - * / <nl> - # if ENABLED ( Z_PROBE_ALLEN_KEY ) & & ( Z_HOME_DIR < 0 ) & & ENABLED ( Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN ) <nl> - # error " You can ' t home to a z min endstop with a Z_PROBE_ALLEN_KEY " <nl> - # endif <nl> - <nl> - / * * <nl> - * Dual X Carriage requirements <nl> - * / <nl> - # if ENABLED ( DUAL_X_CARRIAGE ) <nl> - # if EXTRUDERS = = 1 <nl> - # error " DUAL_X_CARRIAGE requires 2 ( or more ) extruders . " <nl> - # elif ENABLED ( COREXY ) | | ENABLED ( COREXZ ) <nl> - # error " DUAL_X_CARRIAGE cannot be used with COREXY or COREXZ . " <nl> - # elif ! HAS_X2_ENABLE | | ! HAS_X2_STEP | | ! HAS_X2_DIR <nl> - # error " DUAL_X_CARRIAGE requires X2 stepper pins to be defined . " <nl> - # elif ! HAS_X_MAX <nl> - # error " DUAL_X_CARRIAGE requires USE_XMAX_PLUG and an X Max Endstop . " <nl> - # elif ! defined ( X2_HOME_POS ) | | ! defined ( X2_MIN_POS ) | | ! defined ( X2_MAX_POS ) <nl> - # error " DUAL_X_CARRIAGE requires X2_HOME_POS , X2_MIN_POS , and X2_MAX_POS . " <nl> - # elif X_HOME_DIR ! = - 1 | | X2_HOME_DIR ! = 1 <nl> - # error " DUAL_X_CARRIAGE requires X_HOME_DIR - 1 and X2_HOME_DIR 1 . " <nl> - # endif <nl> - # endif / / DUAL_X_CARRIAGE <nl> - <nl> - / * * <nl> - * Make sure auto fan pins don ' t conflict with the fan pin <nl> - * / <nl> - # if HAS_AUTO_FAN <nl> - # if HAS_FAN0 <nl> - # if EXTRUDER_0_AUTO_FAN_PIN = = FAN_PIN <nl> - # error " You cannot set EXTRUDER_0_AUTO_FAN_PIN equal to FAN_PIN . " <nl> - # elif EXTRUDER_1_AUTO_FAN_PIN = = FAN_PIN <nl> - # error " You cannot set EXTRUDER_1_AUTO_FAN_PIN equal to FAN_PIN . " <nl> - # elif EXTRUDER_2_AUTO_FAN_PIN = = FAN_PIN <nl> - # error " You cannot set EXTRUDER_2_AUTO_FAN_PIN equal to FAN_PIN . " <nl> - # elif EXTRUDER_3_AUTO_FAN_PIN = = FAN_PIN <nl> - # error " You cannot set EXTRUDER_3_AUTO_FAN_PIN equal to FAN_PIN . " <nl> - # endif <nl> - # endif <nl> - # endif <nl> - <nl> - # if HAS_FAN0 & & CONTROLLERFAN_PIN = = FAN_PIN <nl> - # error " You cannot set CONTROLLERFAN_PIN equal to FAN_PIN . " <nl> - # endif <nl> - <nl> - # if HAS_CONTROLLERFAN <nl> - # if EXTRUDER_0_AUTO_FAN_PIN = = CONTROLLERFAN_PIN <nl> - # error " You cannot set EXTRUDER_0_AUTO_FAN_PIN equal to CONTROLLERFAN_PIN . " <nl> - # elif EXTRUDER_1_AUTO_FAN_PIN = = CONTROLLERFAN_PIN <nl> - # error " You cannot set EXTRUDER_1_AUTO_FAN_PIN equal to CONTROLLERFAN_PIN . " <nl> - # elif EXTRUDER_2_AUTO_FAN_PIN = = CONTROLLERFAN_PIN <nl> - # error " You cannot set EXTRUDER_2_AUTO_FAN_PIN equal to CONTROLLERFAN_PIN . " <nl> - # elif EXTRUDER_3_AUTO_FAN_PIN = = CONTROLLERFAN_PIN <nl> - # error " You cannot set EXTRUDER_3_AUTO_FAN_PIN equal to CONTROLLERFAN_PIN . " <nl> - # endif <nl> - # endif <nl> - <nl> - / * * <nl> - * Test Heater , Temp Sensor , and Extruder Pins ; Sensor Type must also be set . <nl> - * / <nl> - # if ! HAS_HEATER_0 <nl> - # error " HEATER_0_PIN not defined for this board . " <nl> - # elif ! PIN_EXISTS ( TEMP_0 ) <nl> - # error " TEMP_0_PIN not defined for this board . " <nl> - # elif ! PIN_EXISTS ( E0_STEP ) | | ! PIN_EXISTS ( E0_DIR ) | | ! PIN_EXISTS ( E0_ENABLE ) <nl> - # error " E0_STEP_PIN , E0_DIR_PIN , or E0_ENABLE_PIN not defined for this board . " <nl> - # elif TEMP_SENSOR_0 = = 0 <nl> - # error " TEMP_SENSOR_0 is required . " <nl> - # endif <nl> - <nl> - # if HOTENDS > 1 | | ENABLED ( HEATERS_PARALLEL ) <nl> - # if ! HAS_HEATER_1 <nl> - # error " HEATER_1_PIN not defined for this board . " <nl> - # endif <nl> - # endif <nl> - <nl> - # if HOTENDS > 1 <nl> - # if TEMP_SENSOR_1 = = 0 <nl> - # error " TEMP_SENSOR_1 is required with 2 or more HOTENDS . " <nl> - # elif ! PIN_EXISTS ( TEMP_1 ) <nl> - # error " TEMP_1_PIN not defined for this board . " <nl> - # endif <nl> - # if HOTENDS > 2 <nl> - # if TEMP_SENSOR_2 = = 0 <nl> - # error " TEMP_SENSOR_2 is required with 3 or more HOTENDS . " <nl> - # elif ! HAS_HEATER_2 <nl> - # error " HEATER_2_PIN not defined for this board . " <nl> - # elif ! PIN_EXISTS ( TEMP_2 ) <nl> - # error " TEMP_2_PIN not defined for this board . " <nl> - # endif <nl> - # if HOTENDS > 3 <nl> - # if TEMP_SENSOR_3 = = 0 <nl> - # error " TEMP_SENSOR_3 is required with 4 HOTENDS . " <nl> - # elif ! HAS_HEATER_3 <nl> - # error " HEATER_3_PIN not defined for this board . " <nl> - # elif ! PIN_EXISTS ( TEMP_3 ) <nl> - # error " TEMP_3_PIN not defined for this board . " <nl> - # endif <nl> - # elif TEMP_SENSOR_3 ! = 0 <nl> - # error " TEMP_SENSOR_3 shouldn ' t be set with only 3 extruders . " <nl> - # endif <nl> - # elif TEMP_SENSOR_2 ! = 0 <nl> - # error " TEMP_SENSOR_2 shouldn ' t be set with only 2 extruders . " <nl> - # elif TEMP_SENSOR_3 ! = 0 <nl> - # error " TEMP_SENSOR_3 shouldn ' t be set with only 2 extruders . " <nl> - # endif <nl> - # elif TEMP_SENSOR_1 ! = 0 & & DISABLED ( TEMP_SENSOR_1_AS_REDUNDANT ) <nl> - # error " TEMP_SENSOR_1 shouldn ' t be set with only 1 extruder . " <nl> - # elif TEMP_SENSOR_2 ! = 0 <nl> - # error " TEMP_SENSOR_2 shouldn ' t be set with only 1 extruder . " <nl> - # elif TEMP_SENSOR_3 ! = 0 <nl> - # error " TEMP_SENSOR_3 shouldn ' t be set with only 1 extruder . " <nl> - # endif <nl> - <nl> - # if ENABLED ( TEMP_SENSOR_1_AS_REDUNDANT ) & & TEMP_SENSOR_1 = = 0 <nl> - # error " TEMP_SENSOR_1 is required with TEMP_SENSOR_1_AS_REDUNDANT . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Basic 2 - nozzle duplication mode <nl> - * / <nl> - # if ENABLED ( DUAL_NOZZLE_DUPLICATION_MODE ) <nl> - # if HOTENDS ! = 2 <nl> - # error " DUAL_NOZZLE_DUPLICATION_MODE requires exactly 2 hotends . " <nl> - # elif ENABLED ( DUAL_X_CARRIAGE ) <nl> - # error " DUAL_NOZZLE_DUPLICATION_MODE is incompatible with DUAL_X_CARRIAGE . " <nl> - # elif ENABLED ( SINGLENOZZLE ) <nl> - # error " DUAL_NOZZLE_DUPLICATION_MODE is incompatible with SINGLENOZZLE . " <nl> - # elif ENABLED ( MIXING_EXTRUDER ) <nl> - # error " DUAL_NOZZLE_DUPLICATION_MODE is incompatible with MIXING_EXTRUDER . " <nl> - # elif ENABLED ( SWITCHING_EXTRUDER ) <nl> - # error " DUAL_NOZZLE_DUPLICATION_MODE is incompatible with SWITCHING_EXTRUDER . " <nl> - # endif <nl> - # endif <nl> - <nl> - / * * <nl> - * Test Extruder Pins <nl> - * / <nl> - # if EXTRUDERS > 3 <nl> - # if ! PIN_EXISTS ( E3_STEP ) | | ! PIN_EXISTS ( E3_DIR ) | | ! PIN_EXISTS ( E3_ENABLE ) <nl> - # error " E3_STEP_PIN , E3_DIR_PIN , or E3_ENABLE_PIN not defined for this board . " <nl> - # endif <nl> - # elif EXTRUDERS > 2 <nl> - # if ! PIN_EXISTS ( E2_STEP ) | | ! PIN_EXISTS ( E2_DIR ) | | ! PIN_EXISTS ( E2_ENABLE ) <nl> - # error " E2_STEP_PIN , E2_DIR_PIN , or E2_ENABLE_PIN not defined for this board . " <nl> - # endif <nl> - # elif EXTRUDERS > 1 <nl> - # if ! PIN_EXISTS ( E1_STEP ) | | ! PIN_EXISTS ( E1_DIR ) | | ! PIN_EXISTS ( E1_ENABLE ) <nl> - # error " E1_STEP_PIN , E1_DIR_PIN , or E1_ENABLE_PIN not defined for this board . " <nl> - # endif <nl> - # endif <nl> - <nl> - / * * <nl> - * Endstops <nl> - * / <nl> - # if DISABLED ( USE_XMIN_PLUG ) & & DISABLED ( USE_XMAX_PLUG ) & & ! ( ENABLED ( Z_DUAL_ENDSTOPS ) & & Z2_USE_ENDSTOP > = _XMAX_ & & Z2_USE_ENDSTOP < = _XMIN_ ) <nl> - # error " You must enable USE_XMIN_PLUG or USE_XMAX_PLUG " <nl> - # elif DISABLED ( USE_YMIN_PLUG ) & & DISABLED ( USE_YMAX_PLUG ) & & ! ( ENABLED ( Z_DUAL_ENDSTOPS ) & & Z2_USE_ENDSTOP > = _YMAX_ & & Z2_USE_ENDSTOP < = _YMIN_ ) <nl> - # error " You must enable USE_YMIN_PLUG or USE_YMAX_PLUG " <nl> - # elif DISABLED ( USE_ZMIN_PLUG ) & & DISABLED ( USE_ZMAX_PLUG ) & & ! ( ENABLED ( Z_DUAL_ENDSTOPS ) & & Z2_USE_ENDSTOP > = _ZMAX_ & & Z2_USE_ENDSTOP < = _ZMIN_ ) <nl> - # error " You must enable USE_ZMIN_PLUG or USE_ZMAX_PLUG " <nl> - # elif ENABLED ( Z_DUAL_ENDSTOPS ) & & ! Z2_USE_ENDSTOP <nl> - # error " You must set Z2_USE_ENDSTOP with Z_DUAL_ENDSTOPS " <nl> - # endif <nl> - <nl> - / * * <nl> - * emergency - command parser <nl> - * / <nl> - # if ENABLED ( EMERGENCY_PARSER ) & & ENABLED ( USBCON ) <nl> - # error " EMERGENCY_PARSER does not work on boards with AT90USB processors ( USBCON ) . " <nl> - # endif <nl> - <nl> - / * * <nl> - * Warnings for old configurations <nl> - * / <nl> - # if WATCH_TEMP_PERIOD > 500 <nl> - # error " WATCH_TEMP_PERIOD now uses seconds instead of milliseconds . " <nl> - # elif DISABLED ( THERMAL_PROTECTION_HOTENDS ) & & ( defined ( WATCH_TEMP_PERIOD ) | | defined ( THERMAL_PROTECTION_PERIOD ) ) <nl> - # error " Thermal Runaway Protection for hotends is now enabled with THERMAL_PROTECTION_HOTENDS . " <nl> - # elif DISABLED ( THERMAL_PROTECTION_BED ) & & defined ( THERMAL_PROTECTION_BED_PERIOD ) <nl> - # error " Thermal Runaway Protection for the bed is now enabled with THERMAL_PROTECTION_BED . " <nl> - # elif ENABLED ( COREXZ ) & & ENABLED ( Z_LATE_ENABLE ) <nl> - # error " Z_LATE_ENABLE can ' t be used with COREXZ . " <nl> - # elif defined ( X_HOME_RETRACT_MM ) <nl> - # error " [ XYZ ] _HOME_RETRACT_MM settings have been renamed [ XYZ ] _HOME_BUMP_MM . " <nl> - # elif defined ( BEEPER ) <nl> - # error " BEEPER is now BEEPER_PIN . Please update your pins definitions . " <nl> - # elif defined ( SDCARDDETECT ) <nl> - # error " SDCARDDETECT is now SD_DETECT_PIN . Please update your pins definitions . " <nl> - # elif defined ( SDCARDDETECTINVERTED ) <nl> - # error " SDCARDDETECTINVERTED is now SD_DETECT_INVERTED . Please update your configuration . " <nl> - # elif defined ( BTENABLED ) <nl> - # error " BTENABLED is now BLUETOOTH . Please update your configuration . " <nl> - # elif defined ( CUSTOM_MENDEL_NAME ) <nl> - # error " CUSTOM_MENDEL_NAME is now CUSTOM_MACHINE_NAME . Please update your configuration . " <nl> - # elif defined ( HAS_AUTOMATIC_VERSIONING ) <nl> - # error " HAS_AUTOMATIC_VERSIONING is now USE_AUTOMATIC_VERSIONING . Please update your configuration . " <nl> - # elif defined ( ENABLE_AUTO_BED_LEVELING ) <nl> - # error " ENABLE_AUTO_BED_LEVELING is now AUTO_BED_LEVELING_FEATURE . Please update your configuration . " <nl> - # elif defined ( SDSLOW ) <nl> - # error " SDSLOW deprecated . Set SPI_SPEED to SPI_HALF_SPEED instead . " <nl> - # elif defined ( SDEXTRASLOW ) <nl> - # error " SDEXTRASLOW deprecated . Set SPI_SPEED to SPI_QUARTER_SPEED instead . " <nl> - # elif defined ( Z_RAISE_BEFORE_HOMING ) <nl> - # error " Z_RAISE_BEFORE_HOMING is deprecated . Use MIN_Z_HEIGHT_FOR_HOMING instead . " <nl> - # elif defined ( FILAMENT_SENSOR ) <nl> - # error " FILAMENT_SENSOR is deprecated . Use FILAMENT_WIDTH_SENSOR instead . " <nl> - # elif defined ( DISABLE_MAX_ENDSTOPS ) | | defined ( DISABLE_MIN_ENDSTOPS ) <nl> - # error " DISABLE_MAX_ENDSTOPS and DISABLE_MIN_ENDSTOPS deprecated . Use individual USE_ * _PLUG options instead . " <nl> - # elif ENABLED ( Z_DUAL_ENDSTOPS ) & & ! defined ( Z2_USE_ENDSTOP ) <nl> - # error " Z_DUAL_ENDSTOPS settings are simplified . Just set Z2_USE_ENDSTOP to the endstop you want to repurpose for Z2 " <nl> - # elif defined ( LANGUAGE_INCLUDE ) <nl> - # error " LANGUAGE_INCLUDE has been replaced by LCD_LANGUAGE . Please update your configuration . " <nl> - # elif defined ( EXTRUDER_OFFSET_X ) | | defined ( EXTRUDER_OFFSET_Y ) <nl> - # error " EXTRUDER_OFFSET_ [ XY ] is deprecated . Use HOTEND_OFFSET_ [ XY ] instead . " <nl> - # elif defined ( PID_PARAMS_PER_EXTRUDER ) <nl> - # error " PID_PARAMS_PER_EXTRUDER is deprecated . Use PID_PARAMS_PER_HOTEND instead . " <nl> - # elif defined ( EXTRUDER_WATTS ) <nl> - # error " EXTRUDER_WATTS is deprecated . Use HOTEND_WATTS instead . " <nl> - # elif defined ( SERVO_ENDSTOP_ANGLES ) <nl> - # error " SERVO_ENDSTOP_ANGLES is deprecated . Use Z_SERVO_ANGLES instead . " <nl> - # elif defined ( X_ENDSTOP_SERVO_NR ) | | defined ( Y_ENDSTOP_SERVO_NR ) <nl> - # error " X_ENDSTOP_SERVO_NR and Y_ENDSTOP_SERVO_NR are deprecated and should be removed . " <nl> - # elif defined ( XY_TRAVEL_SPEED ) <nl> - # error " XY_TRAVEL_SPEED is deprecated . Use XY_PROBE_SPEED instead . " <nl> - # elif defined ( PROBE_SERVO_DEACTIVATION_DELAY ) <nl> - # error " PROBE_SERVO_DEACTIVATION_DELAY is deprecated . Use DEACTIVATE_SERVOS_AFTER_MOVE instead . " <nl> - # elif defined ( SERVO_DEACTIVATION_DELAY ) <nl> - # error " SERVO_DEACTIVATION_DELAY is deprecated . Use SERVO_DELAY instead . " <nl> - # elif ENABLED ( FILAMENTCHANGEENABLE ) <nl> - # error " FILAMENTCHANGEENABLE is now FILAMENT_CHANGE_FEATURE . Please update your configuration . " <nl> - # elif defined ( PLA_PREHEAT_HOTEND_TEMP ) <nl> - # error " PLA_PREHEAT_HOTEND_TEMP is now PREHEAT_1_TEMP_HOTEND . Please update your configuration . " <nl> - # elif defined ( PLA_PREHEAT_HPB_TEMP ) <nl> - # error " PLA_PREHEAT_HPB_TEMP is now PREHEAT_1_TEMP_BED . Please update your configuration . " <nl> - # elif defined ( PLA_PREHEAT_FAN_SPEED ) <nl> - # error " PLA_PREHEAT_FAN_SPEED is now PREHEAT_1_FAN_SPEED . Please update your configuration . " <nl> - # elif defined ( ABS_PREHEAT_HOTEND_TEMP ) <nl> - # error " ABS_PREHEAT_HOTEND_TEMP is now PREHEAT_2_TEMP_HOTEND . Please update your configuration . " <nl> - # elif defined ( ABS_PREHEAT_HPB_TEMP ) <nl> - # error " ABS_PREHEAT_HPB_TEMP is now PREHEAT_2_TEMP_BED . Please update your configuration . " <nl> - # elif defined ( ABS_PREHEAT_FAN_SPEED ) <nl> - # error " ABS_PREHEAT_FAN_SPEED is now PREHEAT_2_FAN_SPEED . Please update your configuration . " <nl> - # elif defined ( ENDSTOPS_ONLY_FOR_HOMING ) <nl> - # error " ENDSTOPS_ONLY_FOR_HOMING is deprecated . Use ( disable ) ENDSTOPS_ALWAYS_ON_DEFAULT instead . " <nl> - # elif defined ( HOMING_FEEDRATE ) <nl> - # error " HOMING_FEEDRATE is deprecated . Set individual rates with HOMING_FEEDRATE_ ( XY | Z | E ) instead . " <nl> - # endif <nl> mmm a / Marlin / SanityCheck . h <nl> ppp b / Marlin / SanityCheck . h <nl> <nl> <nl> / * * <nl> * SanityCheck . h <nl> - * OBSOLETE : Moved to SanityCheck . cpp <nl> + * <nl> + * Test configuration values for errors at compile - time . <nl> + * / <nl> + <nl> + / * * <nl> + * Due to the high number of issues related with old versions of Arduino IDE <nl> + * we now prevent Marlin from compiling with older toolkits . <nl> + * / <nl> + # if ! defined ( ARDUINO ) | | ARDUINO < 10600 <nl> + # error " Versions of Arduino IDE prior to 1 . 6 . 0 are no longer supported , please update your toolkit . " <nl> + # endif <nl> + <nl> + / * * <nl> + * We try our best to include sanity checks for all the changes configuration <nl> + * directives because people have a tendency to use outdated config files with <nl> + * the bleding edge source code , but sometimes this is not enough . This check <nl> + * will force a minimum config file revision , otherwise Marlin will not build . <nl> + * / <nl> + # if ! defined ( CONFIGURATION_H_VERSION ) | | CONFIGURATION_H_VERSION < REQUIRED_CONFIGURATION_H_VERSION <nl> + # error " You are using an old Configuration . h file , update it before building Marlin . " <nl> + # endif <nl> + <nl> + # if ! defined ( CONFIGURATION_ADV_H_VERSION ) | | CONFIGURATION_ADV_H_VERSION < REQUIRED_CONFIGURATION_ADV_H_VERSION <nl> + # error " You are using an old Configuration_adv . h file , update it before building Marlin . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Marlin release , version and default string <nl> + * / <nl> + # ifndef SHORT_BUILD_VERSION <nl> + # error " SHORT_BUILD_VERSION must be specified . " <nl> + # elif ! defined ( DETAILED_BUILD_VERSION ) <nl> + # error " BUILD_VERSION must be specified . " <nl> + # elif ! defined ( STRING_DISTRIBUTION_DATE ) <nl> + # error " STRING_DISTRIBUTION_DATE must be specified . " <nl> + # elif ! defined ( PROTOCOL_VERSION ) <nl> + # error " PROTOCOL_VERSION must be specified . " <nl> + # elif ! defined ( MACHINE_NAME ) <nl> + # error " MACHINE_NAME must be specified . " <nl> + # elif ! defined ( SOURCE_CODE_URL ) <nl> + # error " SOURCE_CODE_URL must be specified . " <nl> + # elif ! defined ( DEFAULT_MACHINE_UUID ) <nl> + # error " DEFAULT_MACHINE_UUID must be specified . " <nl> + # elif ! defined ( WEBSITE_URL ) <nl> + # error " WEBSITE_URL must be specified . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Dual Stepper Drivers <nl> + * / <nl> + # if ENABLED ( X_DUAL_STEPPER_DRIVERS ) & & ENABLED ( DUAL_X_CARRIAGE ) <nl> + # error " DUAL_X_CARRIAGE is not compatible with X_DUAL_STEPPER_DRIVERS . " <nl> + # elif ENABLED ( X_DUAL_STEPPER_DRIVERS ) & & ( ! HAS_X2_ENABLE | | ! HAS_X2_STEP | | ! HAS_X2_DIR ) <nl> + # error " X_DUAL_STEPPER_DRIVERS requires X2 pins ( and an extra E plug ) . " <nl> + # elif ENABLED ( Y_DUAL_STEPPER_DRIVERS ) & & ( ! HAS_Y2_ENABLE | | ! HAS_Y2_STEP | | ! HAS_Y2_DIR ) <nl> + # error " Y_DUAL_STEPPER_DRIVERS requires Y2 pins ( and an extra E plug ) . " <nl> + # elif ENABLED ( Z_DUAL_STEPPER_DRIVERS ) & & ( ! HAS_Z2_ENABLE | | ! HAS_Z2_STEP | | ! HAS_Z2_DIR ) <nl> + # error " Z_DUAL_STEPPER_DRIVERS requires Z2 pins ( and an extra E plug ) . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Progress Bar <nl> + * / <nl> + # if ENABLED ( LCD_PROGRESS_BAR ) <nl> + # if DISABLED ( SDSUPPORT ) <nl> + # error " LCD_PROGRESS_BAR requires SDSUPPORT . " <nl> + # endif <nl> + # if ENABLED ( DOGLCD ) <nl> + # error " LCD_PROGRESS_BAR does not apply to graphical displays . " <nl> + # endif <nl> + # if ENABLED ( FILAMENT_LCD_DISPLAY ) <nl> + # error " LCD_PROGRESS_BAR and FILAMENT_LCD_DISPLAY are not fully compatible . Comment out this line to use both . " <nl> + # endif <nl> + # endif <nl> + <nl> + / * * <nl> + * Babystepping <nl> + * / <nl> + # if ENABLED ( BABYSTEPPING ) <nl> + # if DISABLED ( ULTRA_LCD ) <nl> + # error " BABYSTEPPING requires an LCD controller . " <nl> + # endif <nl> + # if ENABLED ( SCARA ) <nl> + # error " BABYSTEPPING is not implemented for SCARA yet . " <nl> + # endif <nl> + # if ENABLED ( DELTA ) & & ENABLED ( BABYSTEP_XY ) <nl> + # error " BABYSTEPPING only implemented for Z axis on deltabots . " <nl> + # endif <nl> + # endif <nl> + <nl> + / * * <nl> + * Filament Runout needs a pin and either SD Support or Auto print start detection <nl> + * / <nl> + # if ENABLED ( FILAMENT_RUNOUT_SENSOR ) <nl> + # if ! HAS_FIL_RUNOUT <nl> + # error " FILAMENT_RUNOUT_SENSOR requires FIL_RUNOUT_PIN . " <nl> + # elif DISABLED ( SDSUPPORT ) & & DISABLED ( PRINTJOB_TIMER_AUTOSTART ) <nl> + # error " FILAMENT_RUNOUT_SENSOR requires SDSUPPORT or PRINTJOB_TIMER_AUTOSTART . " <nl> + # endif <nl> + # endif <nl> + <nl> + / * * <nl> + * Filament Change with Extruder Runout Prevention <nl> + * / <nl> + # if ENABLED ( FILAMENT_CHANGE_FEATURE ) & & ENABLED ( EXTRUDER_RUNOUT_PREVENT ) <nl> + # error " EXTRUDER_RUNOUT_PREVENT is incompatible with FILAMENT_CHANGE_FEATURE . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Individual axis homing is useless for DELTAS <nl> + * / <nl> + # if ENABLED ( INDIVIDUAL_AXIS_HOMING_MENU ) & & ENABLED ( DELTA ) <nl> + # error " INDIVIDUAL_AXIS_HOMING_MENU is incompatible with DELTA kinematics . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Options only for EXTRUDERS > 1 <nl> + * / <nl> + # if EXTRUDERS > 1 <nl> + <nl> + # if EXTRUDERS > 4 <nl> + # error " The maximum number of EXTRUDERS in Marlin is 4 . " <nl> + # endif <nl> + <nl> + # if ENABLED ( TEMP_SENSOR_1_AS_REDUNDANT ) <nl> + # error " EXTRUDERS must be 1 with TEMP_SENSOR_1_AS_REDUNDANT . " <nl> + # endif <nl> + <nl> + # if ENABLED ( HEATERS_PARALLEL ) <nl> + # error " EXTRUDERS must be 1 with HEATERS_PARALLEL . " <nl> + # endif <nl> + <nl> + # elif ENABLED ( SINGLENOZZLE ) <nl> + # error " SINGLENOZZLE requires 2 or more EXTRUDERS . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Only one type of extruder allowed <nl> + * / <nl> + # if ( ENABLED ( SWITCHING_EXTRUDER ) & & ( ENABLED ( SINGLENOZZLE ) | | ENABLED ( MIXING_EXTRUDER ) ) ) \ <nl> + | | ( ENABLED ( SINGLENOZZLE ) & & ENABLED ( MIXING_EXTRUDER ) ) <nl> + # error " Please define only one type of extruder : SINGLENOZZLE , SWITCHING_EXTRUDER , or MIXING_EXTRUDER . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Single Stepper Dual Extruder with switching servo <nl> + * / <nl> + # if ENABLED ( SWITCHING_EXTRUDER ) <nl> + # if ENABLED ( DUAL_X_CARRIAGE ) <nl> + # error " SWITCHING_EXTRUDER and DUAL_X_CARRIAGE are incompatible . " <nl> + # elif EXTRUDERS ! = 2 <nl> + # error " SWITCHING_EXTRUDER requires exactly 2 EXTRUDERS . " <nl> + # elif NUM_SERVOS < 1 <nl> + # error " SWITCHING_EXTRUDER requires NUM_SERVOS > = 1 . " <nl> + # endif <nl> + # endif <nl> + <nl> + / * * <nl> + * Mixing Extruder requirements <nl> + * / <nl> + # if ENABLED ( MIXING_EXTRUDER ) <nl> + # if EXTRUDERS > 1 <nl> + # error " MIXING_EXTRUDER currently only supports one extruder . " <nl> + # endif <nl> + # if MIXING_STEPPERS < 2 <nl> + # error " You must set MIXING_STEPPERS > = 2 for a mixing extruder . " <nl> + # endif <nl> + # if ENABLED ( FILAMENT_SENSOR ) <nl> + # error " MIXING_EXTRUDER is incompatible with FILAMENT_SENSOR . Comment out this line to use it anyway . " <nl> + # endif <nl> + # endif <nl> + <nl> + / * * <nl> + * Limited number of servos <nl> + * / <nl> + # if defined ( NUM_SERVOS ) & & NUM_SERVOS > 0 <nl> + # if NUM_SERVOS > 4 <nl> + # error " The maximum number of SERVOS in Marlin is 4 . " <nl> + # elif HAS_Z_SERVO_ENDSTOP & & Z_ENDSTOP_SERVO_NR > = NUM_SERVOS <nl> + # error " Z_ENDSTOP_SERVO_NR must be smaller than NUM_SERVOS . " <nl> + # endif <nl> + # endif <nl> + <nl> + / * * <nl> + * Servo deactivation depends on servo endstops <nl> + * / <nl> + # if ENABLED ( DEACTIVATE_SERVOS_AFTER_MOVE ) & & ! HAS_Z_SERVO_ENDSTOP <nl> + # error " Z_ENDSTOP_SERVO_NR is required for DEACTIVATE_SERVOS_AFTER_MOVE . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Required LCD language <nl> + * / <nl> + # if DISABLED ( DOGLCD ) & & ENABLED ( ULTRA_LCD ) & & ! defined ( DISPLAY_CHARSET_HD44780 ) <nl> + # error " You must set DISPLAY_CHARSET_HD44780 to JAPANESE , WESTERN or CYRILLIC for your LCD controller . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Bed Heating Options - PID vs Limit Switching <nl> + * / <nl> + # if ENABLED ( PIDTEMPBED ) & & ENABLED ( BED_LIMIT_SWITCHING ) <nl> + # error " To use BED_LIMIT_SWITCHING you must disable PIDTEMPBED . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Mesh Bed Leveling <nl> + * / <nl> + # if ENABLED ( MESH_BED_LEVELING ) <nl> + # if ENABLED ( DELTA ) <nl> + # error " MESH_BED_LEVELING does not yet support DELTA printers . " <nl> + # elif ENABLED ( AUTO_BED_LEVELING_FEATURE ) <nl> + # error " Select AUTO_BED_LEVELING_FEATURE or MESH_BED_LEVELING , not both . " <nl> + # elif MESH_NUM_X_POINTS > 9 | | MESH_NUM_Y_POINTS > 9 <nl> + # error " MESH_NUM_X_POINTS and MESH_NUM_Y_POINTS must be less than 10 . " <nl> + # endif <nl> + # elif ENABLED ( MANUAL_BED_LEVELING ) <nl> + # error " MESH_BED_LEVELING is required for MANUAL_BED_LEVELING . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Probes <nl> + * / <nl> + <nl> + # if PROBE_SELECTED <nl> + <nl> + # if ENABLED ( Z_PROBE_SLED ) & & ENABLED ( DELTA ) <nl> + # error " You cannot use Z_PROBE_SLED with DELTA . " <nl> + # endif <nl> + <nl> + / * * <nl> + * NUM_SERVOS is required for a Z servo probe <nl> + * / <nl> + # if HAS_Z_SERVO_ENDSTOP <nl> + # ifndef NUM_SERVOS <nl> + # error " You must set NUM_SERVOS for a Z servo probe ( Z_ENDSTOP_SERVO_NR ) . " <nl> + # elif Z_ENDSTOP_SERVO_NR > = NUM_SERVOS <nl> + # error " Z_ENDSTOP_SERVO_NR must be less than NUM_SERVOS . " <nl> + # endif <nl> + # endif <nl> + <nl> + / * * <nl> + * A probe needs a pin <nl> + * / <nl> + # if ! PROBE_PIN_CONFIGURED <nl> + # error " A probe needs a pin ! Use Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN or Z_MIN_PROBE_PIN . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Z_MIN_PIN and Z_MIN_PROBE_PIN can ' t co - exist when Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN <nl> + * / <nl> + # if HAS_Z_MIN & & HAS_Z_MIN_PROBE_PIN & & ENABLED ( Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN ) <nl> + # error " A probe cannot have more than one pin ! Use Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN or Z_MIN_PROBE_PIN . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Make sure the plug is enabled if it ' s used <nl> + * / <nl> + # if ENABLED ( Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN ) & & DISABLED ( USE_ZMIN_PLUG ) <nl> + # error " You must enable USE_ZMIN_PLUG if any probe or endstop is connected to the ZMIN plug . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Only allow one probe option to be defined <nl> + * / <nl> + # if ( ENABLED ( FIX_MOUNTED_PROBE ) & & ( ENABLED ( Z_PROBE_ALLEN_KEY ) | | HAS_Z_SERVO_ENDSTOP | | ENABLED ( Z_PROBE_SLED ) ) ) \ <nl> + | | ( ENABLED ( Z_PROBE_ALLEN_KEY ) & & ( HAS_Z_SERVO_ENDSTOP | | ENABLED ( Z_PROBE_SLED ) ) ) \ <nl> + | | ( HAS_Z_SERVO_ENDSTOP & & ENABLED ( Z_PROBE_SLED ) ) <nl> + # error " Please define only one type of probe : Z Servo , Z_PROBE_ALLEN_KEY , Z_PROBE_SLED , or FIX_MOUNTED_PROBE . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Don ' t allow nonsense probe - pin settings <nl> + * / <nl> + # if ENABLED ( Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN ) & & ENABLED ( Z_MIN_PROBE_ENDSTOP ) <nl> + # error " You can ' t enable both Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN and Z_MIN_PROBE_ENDSTOP . " <nl> + # elif ENABLED ( Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN ) & & ENABLED ( DISABLE_Z_MIN_PROBE_ENDSTOP ) <nl> + # error " Don ' t enable DISABLE_Z_MIN_PROBE_ENDSTOP with Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN . " <nl> + # elif ENABLED ( DISABLE_Z_MIN_PROBE_ENDSTOP ) & & DISABLED ( Z_MIN_PROBE_ENDSTOP ) <nl> + # error " DISABLE_Z_MIN_PROBE_ENDSTOP requires Z_MIN_PROBE_ENDSTOP to be set . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Require a Z probe pin if Z_MIN_PROBE_ENDSTOP is enabled . <nl> + * / <nl> + # if ENABLED ( Z_MIN_PROBE_ENDSTOP ) <nl> + # if ! HAS_Z_MIN_PROBE_PIN <nl> + # error " Z_MIN_PROBE_ENDSTOP requires a Z_MIN_PROBE_PIN in your board ' s pins_XXXX . h file . " <nl> + # endif <nl> + / / Forcing Servo definitions can break some hall effect sensor setups . Leaving these here for further comment . <nl> + / / # ifndef NUM_SERVOS <nl> + / / # error " You must have NUM_SERVOS defined and there must be at least 1 configured to use Z_MIN_PROBE_ENDSTOP . " <nl> + / / # endif <nl> + / / # if defined ( NUM_SERVOS ) & & NUM_SERVOS < 1 <nl> + / / # error " You must have at least 1 servo defined for NUM_SERVOS to use Z_MIN_PROBE_ENDSTOP . " <nl> + / / # endif <nl> + / / # if Z_ENDSTOP_SERVO_NR < 0 <nl> + / / # error " You must have Z_ENDSTOP_SERVO_NR set to at least 0 or above to use Z_MIN_PROBE_ENDSTOP . " <nl> + / / # endif <nl> + / / # ifndef Z_SERVO_ANGLES <nl> + / / # error " You must have Z_SERVO_ANGLES defined for Z Extend and Retract to use Z_MIN_PROBE_ENDSTOP . " <nl> + / / # endif <nl> + # endif <nl> + <nl> + / * * <nl> + * Make sure Z raise values are set <nl> + * / <nl> + # if defined ( Z_RAISE_BEFORE_PROBING ) | | defined ( Z_RAISE_AFTER_PROBING ) <nl> + # error " Z_RAISE_ ( BEFORE | AFTER ) _PROBING are deprecated . Use Z_RAISE_PROBE_DEPLOY_STOW instead . " <nl> + # elif ! defined ( Z_RAISE_PROBE_DEPLOY_STOW ) <nl> + # error " You must set Z_RAISE_PROBE_DEPLOY_STOW in your configuration . " <nl> + # elif ! defined ( Z_RAISE_BETWEEN_PROBINGS ) <nl> + # error " You must set Z_RAISE_BETWEEN_PROBINGS in your configuration . " <nl> + # elif Z_RAISE_PROBE_DEPLOY_STOW < 0 <nl> + # error " Probes need Z_RAISE_PROBE_DEPLOY_STOW > = 0 . " <nl> + # elif Z_RAISE_BETWEEN_PROBINGS < 0 <nl> + # error " Probes need Z_RAISE_BETWEEN_PROBINGS > = 0 . " <nl> + # endif <nl> + <nl> + # else <nl> + <nl> + / * * <nl> + * Require some kind of probe for bed leveling and probe testing <nl> + * / <nl> + # if ENABLED ( AUTO_BED_LEVELING_FEATURE ) <nl> + # error " AUTO_BED_LEVELING_FEATURE requires a probe ! Define a Z Servo , Z_PROBE_ALLEN_KEY , Z_PROBE_SLED , or FIX_MOUNTED_PROBE . " <nl> + # elif ENABLED ( Z_MIN_PROBE_REPEATABILITY_TEST ) <nl> + # error " Z_MIN_PROBE_REPEATABILITY_TEST requires a probe ! Define a Z Servo , Z_PROBE_ALLEN_KEY , Z_PROBE_SLED , or FIX_MOUNTED_PROBE . " <nl> + # endif <nl> + <nl> + # endif <nl> + <nl> + / * * <nl> + * Make sure Z_SAFE_HOMING point is reachable <nl> + * / <nl> + # if ENABLED ( Z_SAFE_HOMING ) <nl> + # if Z_SAFE_HOMING_X_POINT < MIN_PROBE_X | | Z_SAFE_HOMING_X_POINT > MAX_PROBE_X <nl> + # if HAS_BED_PROBE <nl> + # error " Z_SAFE_HOMING_X_POINT can ' t be reached by the Z probe . " <nl> + # else <nl> + # error " Z_SAFE_HOMING_X_POINT can ' t be reached by the nozzle . " <nl> + # endif <nl> + # elif Z_SAFE_HOMING_Y_POINT < MIN_PROBE_Y | | Z_SAFE_HOMING_Y_POINT > MAX_PROBE_Y <nl> + # if HAS_BED_PROBE <nl> + # error " Z_SAFE_HOMING_Y_POINT can ' t be reached by the Z probe . " <nl> + # else <nl> + # error " Z_SAFE_HOMING_Y_POINT can ' t be reached by the nozzle . " <nl> + # endif <nl> + # endif <nl> + # endif / / Z_SAFE_HOMING <nl> + <nl> + / * * <nl> + * Auto Bed Leveling <nl> + * / <nl> + # if ENABLED ( AUTO_BED_LEVELING_FEATURE ) <nl> + <nl> + / * * <nl> + * Delta has limited bed leveling options <nl> + * / <nl> + # if ENABLED ( DELTA ) & & DISABLED ( AUTO_BED_LEVELING_GRID ) <nl> + # error " You must use AUTO_BED_LEVELING_GRID for DELTA bed leveling . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Require a Z min pin <nl> + * / <nl> + # if ! PIN_EXISTS ( Z_MIN ) <nl> + # if ! PIN_EXISTS ( Z_MIN_PROBE ) | | ( DISABLED ( Z_MIN_PROBE_ENDSTOP ) | | ENABLED ( DISABLE_Z_MIN_PROBE_ENDSTOP ) ) / / It ' s possible for someone to set a pin for the Z probe , but not enable it . <nl> + # error " AUTO_BED_LEVELING_FEATURE requires a Z_MIN or Z_PROBE endstop . Z_MIN_PIN or Z_MIN_PROBE_PIN must point to a valid hardware pin . " <nl> + # endif <nl> + # endif <nl> + <nl> + / * * <nl> + * Check if Probe_Offset * Grid Points is greater than Probing Range <nl> + * / <nl> + # if ENABLED ( AUTO_BED_LEVELING_GRID ) <nl> + # ifndef DELTA_PROBEABLE_RADIUS <nl> + / / Be sure points are in the right order <nl> + # if LEFT_PROBE_BED_POSITION > RIGHT_PROBE_BED_POSITION <nl> + # error " LEFT_PROBE_BED_POSITION must be less than RIGHT_PROBE_BED_POSITION . " <nl> + # elif FRONT_PROBE_BED_POSITION > BACK_PROBE_BED_POSITION <nl> + # error " FRONT_PROBE_BED_POSITION must be less than BACK_PROBE_BED_POSITION . " <nl> + # endif <nl> + / / Make sure probing points are reachable <nl> + # if LEFT_PROBE_BED_POSITION < MIN_PROBE_X <nl> + # error " The given LEFT_PROBE_BED_POSITION can ' t be reached by the Z probe . " <nl> + # elif RIGHT_PROBE_BED_POSITION > MAX_PROBE_X <nl> + # error " The given RIGHT_PROBE_BED_POSITION can ' t be reached by the Z probe . " <nl> + # elif FRONT_PROBE_BED_POSITION < MIN_PROBE_Y <nl> + # error " The given FRONT_PROBE_BED_POSITION can ' t be reached by the Z probe . " <nl> + # elif BACK_PROBE_BED_POSITION > MAX_PROBE_Y <nl> + # error " The given BACK_PROBE_BED_POSITION can ' t be reached by the Z probe . " <nl> + # endif <nl> + # endif <nl> + # else / / ! AUTO_BED_LEVELING_GRID <nl> + <nl> + / / Check the triangulation points <nl> + # if ABL_PROBE_PT_1_X < MIN_PROBE_X | | ABL_PROBE_PT_1_X > MAX_PROBE_X <nl> + # error " The given ABL_PROBE_PT_1_X can ' t be reached by the Z probe . " <nl> + # elif ABL_PROBE_PT_2_X < MIN_PROBE_X | | ABL_PROBE_PT_2_X > MAX_PROBE_X <nl> + # error " The given ABL_PROBE_PT_2_X can ' t be reached by the Z probe . " <nl> + # elif ABL_PROBE_PT_3_X < MIN_PROBE_X | | ABL_PROBE_PT_3_X > MAX_PROBE_X <nl> + # error " The given ABL_PROBE_PT_3_X can ' t be reached by the Z probe . " <nl> + # elif ABL_PROBE_PT_1_Y < MIN_PROBE_Y | | ABL_PROBE_PT_1_Y > MAX_PROBE_Y <nl> + # error " The given ABL_PROBE_PT_1_Y can ' t be reached by the Z probe . " <nl> + # elif ABL_PROBE_PT_2_Y < MIN_PROBE_Y | | ABL_PROBE_PT_2_Y > MAX_PROBE_Y <nl> + # error " The given ABL_PROBE_PT_2_Y can ' t be reached by the Z probe . " <nl> + # elif ABL_PROBE_PT_3_Y < MIN_PROBE_Y | | ABL_PROBE_PT_3_Y > MAX_PROBE_Y <nl> + # error " The given ABL_PROBE_PT_3_Y can ' t be reached by the Z probe . " <nl> + # endif <nl> + <nl> + # endif / / ! AUTO_BED_LEVELING_GRID <nl> + <nl> + # endif / / AUTO_BED_LEVELING_FEATURE <nl> + <nl> + / * * <nl> + * Advance Extrusion <nl> + * / <nl> + # if ENABLED ( ADVANCE ) & & ENABLED ( LIN_ADVANCE ) <nl> + # error " You can enable ADVANCE or LIN_ADVANCE , but not both . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Filament Width Sensor <nl> + * / <nl> + # if ENABLED ( FILAMENT_WIDTH_SENSOR ) & & ! HAS_FILAMENT_WIDTH_SENSOR <nl> + # error " FILAMENT_WIDTH_SENSOR requires a FILWIDTH_PIN to be defined . " <nl> + # endif <nl> + <nl> + / * * <nl> + * ULTIPANEL encoder <nl> + * / <nl> + # if ENABLED ( ULTIPANEL ) & & DISABLED ( NEWPANEL ) & & DISABLED ( SR_LCD_2W_NL ) & & ! defined ( SHIFT_CLK ) <nl> + # error " ULTIPANEL requires some kind of encoder . " <nl> + # endif <nl> + <nl> + # if ENCODER_PULSES_PER_STEP < 0 <nl> + # error " ENCODER_PULSES_PER_STEP should not be negative , use REVERSE_MENU_DIRECTION instead . " <nl> + # endif <nl> + <nl> + / * * <nl> + * SAV_3DGLCD display options <nl> + * / <nl> + # if ENABLED ( U8GLIB_SSD1306 ) & & ENABLED ( U8GLIB_SH1106 ) <nl> + # error " Only enable one SAV_3DGLCD display type : U8GLIB_SSD1306 or U8GLIB_SH1106 . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Don ' t set more than one kinematic type <nl> + * / <nl> + # if ( ENABLED ( DELTA ) & & ( ENABLED ( SCARA ) | | ENABLED ( COREXY ) | | ENABLED ( COREXZ ) | | ENABLED ( COREYZ ) ) ) \ <nl> + | | ( ENABLED ( SCARA ) & & ( ENABLED ( COREXY ) | | ENABLED ( COREXZ ) | | ENABLED ( COREYZ ) ) ) \ <nl> + | | ( ENABLED ( COREXY ) & & ( ENABLED ( COREXZ ) | | ENABLED ( COREYZ ) ) ) \ <nl> + | | ( ENABLED ( COREXZ ) & & ENABLED ( COREYZ ) ) <nl> + # error " Please enable only one of DELTA , SCARA , COREXY , COREXZ , or COREYZ . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Allen Key <nl> + * Deploying the Allen Key probe uses big moves in z direction . Too dangerous for an unhomed z - axis . <nl> + * / <nl> + # if ENABLED ( Z_PROBE_ALLEN_KEY ) & & ( Z_HOME_DIR < 0 ) & & ENABLED ( Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN ) <nl> + # error " You can ' t home to a z min endstop with a Z_PROBE_ALLEN_KEY " <nl> + # endif <nl> + <nl> + / * * <nl> + * Dual X Carriage requirements <nl> + * / <nl> + # if ENABLED ( DUAL_X_CARRIAGE ) <nl> + # if EXTRUDERS = = 1 <nl> + # error " DUAL_X_CARRIAGE requires 2 ( or more ) extruders . " <nl> + # elif ENABLED ( COREXY ) | | ENABLED ( COREXZ ) <nl> + # error " DUAL_X_CARRIAGE cannot be used with COREXY or COREXZ . " <nl> + # elif ! HAS_X2_ENABLE | | ! HAS_X2_STEP | | ! HAS_X2_DIR <nl> + # error " DUAL_X_CARRIAGE requires X2 stepper pins to be defined . " <nl> + # elif ! HAS_X_MAX <nl> + # error " DUAL_X_CARRIAGE requires USE_XMAX_PLUG and an X Max Endstop . " <nl> + # elif ! defined ( X2_HOME_POS ) | | ! defined ( X2_MIN_POS ) | | ! defined ( X2_MAX_POS ) <nl> + # error " DUAL_X_CARRIAGE requires X2_HOME_POS , X2_MIN_POS , and X2_MAX_POS . " <nl> + # elif X_HOME_DIR ! = - 1 | | X2_HOME_DIR ! = 1 <nl> + # error " DUAL_X_CARRIAGE requires X_HOME_DIR - 1 and X2_HOME_DIR 1 . " <nl> + # endif <nl> + # endif / / DUAL_X_CARRIAGE <nl> + <nl> + / * * <nl> + * Make sure auto fan pins don ' t conflict with the fan pin <nl> + * / <nl> + # if HAS_AUTO_FAN <nl> + # if HAS_FAN0 <nl> + # if EXTRUDER_0_AUTO_FAN_PIN = = FAN_PIN <nl> + # error " You cannot set EXTRUDER_0_AUTO_FAN_PIN equal to FAN_PIN . " <nl> + # elif EXTRUDER_1_AUTO_FAN_PIN = = FAN_PIN <nl> + # error " You cannot set EXTRUDER_1_AUTO_FAN_PIN equal to FAN_PIN . " <nl> + # elif EXTRUDER_2_AUTO_FAN_PIN = = FAN_PIN <nl> + # error " You cannot set EXTRUDER_2_AUTO_FAN_PIN equal to FAN_PIN . " <nl> + # elif EXTRUDER_3_AUTO_FAN_PIN = = FAN_PIN <nl> + # error " You cannot set EXTRUDER_3_AUTO_FAN_PIN equal to FAN_PIN . " <nl> + # endif <nl> + # endif <nl> + # endif <nl> + <nl> + # if HAS_FAN0 & & CONTROLLERFAN_PIN = = FAN_PIN <nl> + # error " You cannot set CONTROLLERFAN_PIN equal to FAN_PIN . " <nl> + # endif <nl> + <nl> + # if HAS_CONTROLLERFAN <nl> + # if EXTRUDER_0_AUTO_FAN_PIN = = CONTROLLERFAN_PIN <nl> + # error " You cannot set EXTRUDER_0_AUTO_FAN_PIN equal to CONTROLLERFAN_PIN . " <nl> + # elif EXTRUDER_1_AUTO_FAN_PIN = = CONTROLLERFAN_PIN <nl> + # error " You cannot set EXTRUDER_1_AUTO_FAN_PIN equal to CONTROLLERFAN_PIN . " <nl> + # elif EXTRUDER_2_AUTO_FAN_PIN = = CONTROLLERFAN_PIN <nl> + # error " You cannot set EXTRUDER_2_AUTO_FAN_PIN equal to CONTROLLERFAN_PIN . " <nl> + # elif EXTRUDER_3_AUTO_FAN_PIN = = CONTROLLERFAN_PIN <nl> + # error " You cannot set EXTRUDER_3_AUTO_FAN_PIN equal to CONTROLLERFAN_PIN . " <nl> + # endif <nl> + # endif <nl> + <nl> + / * * <nl> + * Test Heater , Temp Sensor , and Extruder Pins ; Sensor Type must also be set . <nl> + * / <nl> + # if ! HAS_HEATER_0 <nl> + # error " HEATER_0_PIN not defined for this board . " <nl> + # elif ! PIN_EXISTS ( TEMP_0 ) <nl> + # error " TEMP_0_PIN not defined for this board . " <nl> + # elif ! PIN_EXISTS ( E0_STEP ) | | ! PIN_EXISTS ( E0_DIR ) | | ! PIN_EXISTS ( E0_ENABLE ) <nl> + # error " E0_STEP_PIN , E0_DIR_PIN , or E0_ENABLE_PIN not defined for this board . " <nl> + # elif TEMP_SENSOR_0 = = 0 <nl> + # error " TEMP_SENSOR_0 is required . " <nl> + # endif <nl> + <nl> + # if HOTENDS > 1 | | ENABLED ( HEATERS_PARALLEL ) <nl> + # if ! HAS_HEATER_1 <nl> + # error " HEATER_1_PIN not defined for this board . " <nl> + # endif <nl> + # endif <nl> + <nl> + # if HOTENDS > 1 <nl> + # if TEMP_SENSOR_1 = = 0 <nl> + # error " TEMP_SENSOR_1 is required with 2 or more HOTENDS . " <nl> + # elif ! PIN_EXISTS ( TEMP_1 ) <nl> + # error " TEMP_1_PIN not defined for this board . " <nl> + # endif <nl> + # if HOTENDS > 2 <nl> + # if TEMP_SENSOR_2 = = 0 <nl> + # error " TEMP_SENSOR_2 is required with 3 or more HOTENDS . " <nl> + # elif ! HAS_HEATER_2 <nl> + # error " HEATER_2_PIN not defined for this board . " <nl> + # elif ! PIN_EXISTS ( TEMP_2 ) <nl> + # error " TEMP_2_PIN not defined for this board . " <nl> + # endif <nl> + # if HOTENDS > 3 <nl> + # if TEMP_SENSOR_3 = = 0 <nl> + # error " TEMP_SENSOR_3 is required with 4 HOTENDS . " <nl> + # elif ! HAS_HEATER_3 <nl> + # error " HEATER_3_PIN not defined for this board . " <nl> + # elif ! PIN_EXISTS ( TEMP_3 ) <nl> + # error " TEMP_3_PIN not defined for this board . " <nl> + # endif <nl> + # elif TEMP_SENSOR_3 ! = 0 <nl> + # error " TEMP_SENSOR_3 shouldn ' t be set with only 3 extruders . " <nl> + # endif <nl> + # elif TEMP_SENSOR_2 ! = 0 <nl> + # error " TEMP_SENSOR_2 shouldn ' t be set with only 2 extruders . " <nl> + # elif TEMP_SENSOR_3 ! = 0 <nl> + # error " TEMP_SENSOR_3 shouldn ' t be set with only 2 extruders . " <nl> + # endif <nl> + # elif TEMP_SENSOR_1 ! = 0 & & DISABLED ( TEMP_SENSOR_1_AS_REDUNDANT ) <nl> + # error " TEMP_SENSOR_1 shouldn ' t be set with only 1 extruder . " <nl> + # elif TEMP_SENSOR_2 ! = 0 <nl> + # error " TEMP_SENSOR_2 shouldn ' t be set with only 1 extruder . " <nl> + # elif TEMP_SENSOR_3 ! = 0 <nl> + # error " TEMP_SENSOR_3 shouldn ' t be set with only 1 extruder . " <nl> + # endif <nl> + <nl> + # if ENABLED ( TEMP_SENSOR_1_AS_REDUNDANT ) & & TEMP_SENSOR_1 = = 0 <nl> + # error " TEMP_SENSOR_1 is required with TEMP_SENSOR_1_AS_REDUNDANT . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Basic 2 - nozzle duplication mode <nl> + * / <nl> + # if ENABLED ( DUAL_NOZZLE_DUPLICATION_MODE ) <nl> + # if HOTENDS ! = 2 <nl> + # error " DUAL_NOZZLE_DUPLICATION_MODE requires exactly 2 hotends . " <nl> + # elif ENABLED ( DUAL_X_CARRIAGE ) <nl> + # error " DUAL_NOZZLE_DUPLICATION_MODE is incompatible with DUAL_X_CARRIAGE . " <nl> + # elif ENABLED ( SINGLENOZZLE ) <nl> + # error " DUAL_NOZZLE_DUPLICATION_MODE is incompatible with SINGLENOZZLE . " <nl> + # elif ENABLED ( MIXING_EXTRUDER ) <nl> + # error " DUAL_NOZZLE_DUPLICATION_MODE is incompatible with MIXING_EXTRUDER . " <nl> + # elif ENABLED ( SWITCHING_EXTRUDER ) <nl> + # error " DUAL_NOZZLE_DUPLICATION_MODE is incompatible with SWITCHING_EXTRUDER . " <nl> + # endif <nl> + # endif <nl> + <nl> + / * * <nl> + * Test Extruder Pins <nl> + * / <nl> + # if EXTRUDERS > 3 <nl> + # if ! PIN_EXISTS ( E3_STEP ) | | ! PIN_EXISTS ( E3_DIR ) | | ! PIN_EXISTS ( E3_ENABLE ) <nl> + # error " E3_STEP_PIN , E3_DIR_PIN , or E3_ENABLE_PIN not defined for this board . " <nl> + # endif <nl> + # elif EXTRUDERS > 2 <nl> + # if ! PIN_EXISTS ( E2_STEP ) | | ! PIN_EXISTS ( E2_DIR ) | | ! PIN_EXISTS ( E2_ENABLE ) <nl> + # error " E2_STEP_PIN , E2_DIR_PIN , or E2_ENABLE_PIN not defined for this board . " <nl> + # endif <nl> + # elif EXTRUDERS > 1 <nl> + # if ! PIN_EXISTS ( E1_STEP ) | | ! PIN_EXISTS ( E1_DIR ) | | ! PIN_EXISTS ( E1_ENABLE ) <nl> + # error " E1_STEP_PIN , E1_DIR_PIN , or E1_ENABLE_PIN not defined for this board . " <nl> + # endif <nl> + # endif <nl> + <nl> + / * * <nl> + * Endstops <nl> + * / <nl> + # if DISABLED ( USE_XMIN_PLUG ) & & DISABLED ( USE_XMAX_PLUG ) & & ! ( ENABLED ( Z_DUAL_ENDSTOPS ) & & Z2_USE_ENDSTOP > = _XMAX_ & & Z2_USE_ENDSTOP < = _XMIN_ ) <nl> + # error " You must enable USE_XMIN_PLUG or USE_XMAX_PLUG " <nl> + # elif DISABLED ( USE_YMIN_PLUG ) & & DISABLED ( USE_YMAX_PLUG ) & & ! ( ENABLED ( Z_DUAL_ENDSTOPS ) & & Z2_USE_ENDSTOP > = _YMAX_ & & Z2_USE_ENDSTOP < = _YMIN_ ) <nl> + # error " You must enable USE_YMIN_PLUG or USE_YMAX_PLUG " <nl> + # elif DISABLED ( USE_ZMIN_PLUG ) & & DISABLED ( USE_ZMAX_PLUG ) & & ! ( ENABLED ( Z_DUAL_ENDSTOPS ) & & Z2_USE_ENDSTOP > = _ZMAX_ & & Z2_USE_ENDSTOP < = _ZMIN_ ) <nl> + # error " You must enable USE_ZMIN_PLUG or USE_ZMAX_PLUG " <nl> + # elif ENABLED ( Z_DUAL_ENDSTOPS ) & & ! Z2_USE_ENDSTOP <nl> + # error " You must set Z2_USE_ENDSTOP with Z_DUAL_ENDSTOPS " <nl> + # endif <nl> + <nl> + / * * <nl> + * emergency - command parser <nl> + * / <nl> + # if ENABLED ( EMERGENCY_PARSER ) & & ENABLED ( USBCON ) <nl> + # error " EMERGENCY_PARSER does not work on boards with AT90USB processors ( USBCON ) . " <nl> + # endif <nl> + <nl> + / * * <nl> + * Warnings for old configurations <nl> * / <nl> - # error " Please remove all # include lines from your Configuration . h and Configuration_adv . h files ! " <nl> + # if WATCH_TEMP_PERIOD > 500 <nl> + # error " WATCH_TEMP_PERIOD now uses seconds instead of milliseconds . " <nl> + # elif DISABLED ( THERMAL_PROTECTION_HOTENDS ) & & ( defined ( WATCH_TEMP_PERIOD ) | | defined ( THERMAL_PROTECTION_PERIOD ) ) <nl> + # error " Thermal Runaway Protection for hotends is now enabled with THERMAL_PROTECTION_HOTENDS . " <nl> + # elif DISABLED ( THERMAL_PROTECTION_BED ) & & defined ( THERMAL_PROTECTION_BED_PERIOD ) <nl> + # error " Thermal Runaway Protection for the bed is now enabled with THERMAL_PROTECTION_BED . " <nl> + # elif ENABLED ( COREXZ ) & & ENABLED ( Z_LATE_ENABLE ) <nl> + # error " Z_LATE_ENABLE can ' t be used with COREXZ . " <nl> + # elif defined ( X_HOME_RETRACT_MM ) <nl> + # error " [ XYZ ] _HOME_RETRACT_MM settings have been renamed [ XYZ ] _HOME_BUMP_MM . " <nl> + # elif defined ( BEEPER ) <nl> + # error " BEEPER is now BEEPER_PIN . Please update your pins definitions . " <nl> + # elif defined ( SDCARDDETECT ) <nl> + # error " SDCARDDETECT is now SD_DETECT_PIN . Please update your pins definitions . " <nl> + # elif defined ( SDCARDDETECTINVERTED ) <nl> + # error " SDCARDDETECTINVERTED is now SD_DETECT_INVERTED . Please update your configuration . " <nl> + # elif defined ( BTENABLED ) <nl> + # error " BTENABLED is now BLUETOOTH . Please update your configuration . " <nl> + # elif defined ( CUSTOM_MENDEL_NAME ) <nl> + # error " CUSTOM_MENDEL_NAME is now CUSTOM_MACHINE_NAME . Please update your configuration . " <nl> + # elif defined ( HAS_AUTOMATIC_VERSIONING ) <nl> + # error " HAS_AUTOMATIC_VERSIONING is now USE_AUTOMATIC_VERSIONING . Please update your configuration . " <nl> + # elif defined ( ENABLE_AUTO_BED_LEVELING ) <nl> + # error " ENABLE_AUTO_BED_LEVELING is now AUTO_BED_LEVELING_FEATURE . Please update your configuration . " <nl> + # elif defined ( SDSLOW ) <nl> + # error " SDSLOW deprecated . Set SPI_SPEED to SPI_HALF_SPEED instead . " <nl> + # elif defined ( SDEXTRASLOW ) <nl> + # error " SDEXTRASLOW deprecated . Set SPI_SPEED to SPI_QUARTER_SPEED instead . " <nl> + # elif defined ( Z_RAISE_BEFORE_HOMING ) <nl> + # error " Z_RAISE_BEFORE_HOMING is deprecated . Use MIN_Z_HEIGHT_FOR_HOMING instead . " <nl> + # elif defined ( FILAMENT_SENSOR ) <nl> + # error " FILAMENT_SENSOR is deprecated . Use FILAMENT_WIDTH_SENSOR instead . " <nl> + # elif defined ( DISABLE_MAX_ENDSTOPS ) | | defined ( DISABLE_MIN_ENDSTOPS ) <nl> + # error " DISABLE_MAX_ENDSTOPS and DISABLE_MIN_ENDSTOPS deprecated . Use individual USE_ * _PLUG options instead . " <nl> + # elif ENABLED ( Z_DUAL_ENDSTOPS ) & & ! defined ( Z2_USE_ENDSTOP ) <nl> + # error " Z_DUAL_ENDSTOPS settings are simplified . Just set Z2_USE_ENDSTOP to the endstop you want to repurpose for Z2 " <nl> + # elif defined ( LANGUAGE_INCLUDE ) <nl> + # error " LANGUAGE_INCLUDE has been replaced by LCD_LANGUAGE . Please update your configuration . " <nl> + # elif defined ( EXTRUDER_OFFSET_X ) | | defined ( EXTRUDER_OFFSET_Y ) <nl> + # error " EXTRUDER_OFFSET_ [ XY ] is deprecated . Use HOTEND_OFFSET_ [ XY ] instead . " <nl> + # elif defined ( PID_PARAMS_PER_EXTRUDER ) <nl> + # error " PID_PARAMS_PER_EXTRUDER is deprecated . Use PID_PARAMS_PER_HOTEND instead . " <nl> + # elif defined ( EXTRUDER_WATTS ) <nl> + # error " EXTRUDER_WATTS is deprecated . Use HOTEND_WATTS instead . " <nl> + # elif defined ( SERVO_ENDSTOP_ANGLES ) <nl> + # error " SERVO_ENDSTOP_ANGLES is deprecated . Use Z_SERVO_ANGLES instead . " <nl> + # elif defined ( X_ENDSTOP_SERVO_NR ) | | defined ( Y_ENDSTOP_SERVO_NR ) <nl> + # error " X_ENDSTOP_SERVO_NR and Y_ENDSTOP_SERVO_NR are deprecated and should be removed . " <nl> + # elif defined ( XY_TRAVEL_SPEED ) <nl> + # error " XY_TRAVEL_SPEED is deprecated . Use XY_PROBE_SPEED instead . " <nl> + # elif defined ( PROBE_SERVO_DEACTIVATION_DELAY ) <nl> + # error " PROBE_SERVO_DEACTIVATION_DELAY is deprecated . Use DEACTIVATE_SERVOS_AFTER_MOVE instead . " <nl> + # elif defined ( SERVO_DEACTIVATION_DELAY ) <nl> + # error " SERVO_DEACTIVATION_DELAY is deprecated . Use SERVO_DELAY instead . " <nl> + # elif ENABLED ( FILAMENTCHANGEENABLE ) <nl> + # error " FILAMENTCHANGEENABLE is now FILAMENT_CHANGE_FEATURE . Please update your configuration . " <nl> + # elif defined ( PLA_PREHEAT_HOTEND_TEMP ) <nl> + # error " PLA_PREHEAT_HOTEND_TEMP is now PREHEAT_1_TEMP_HOTEND . Please update your configuration . " <nl> + # elif defined ( PLA_PREHEAT_HPB_TEMP ) <nl> + # error " PLA_PREHEAT_HPB_TEMP is now PREHEAT_1_TEMP_BED . Please update your configuration . " <nl> + # elif defined ( PLA_PREHEAT_FAN_SPEED ) <nl> + # error " PLA_PREHEAT_FAN_SPEED is now PREHEAT_1_FAN_SPEED . Please update your configuration . " <nl> + # elif defined ( ABS_PREHEAT_HOTEND_TEMP ) <nl> + # error " ABS_PREHEAT_HOTEND_TEMP is now PREHEAT_2_TEMP_HOTEND . Please update your configuration . " <nl> + # elif defined ( ABS_PREHEAT_HPB_TEMP ) <nl> + # error " ABS_PREHEAT_HPB_TEMP is now PREHEAT_2_TEMP_BED . Please update your configuration . " <nl> + # elif defined ( ABS_PREHEAT_FAN_SPEED ) <nl> + # error " ABS_PREHEAT_FAN_SPEED is now PREHEAT_2_FAN_SPEED . Please update your configuration . " <nl> + # elif defined ( ENDSTOPS_ONLY_FOR_HOMING ) <nl> + # error " ENDSTOPS_ONLY_FOR_HOMING is deprecated . Use ( disable ) ENDSTOPS_ALWAYS_ON_DEFAULT instead . " <nl> + # elif defined ( HOMING_FEEDRATE ) <nl> + # error " HOMING_FEEDRATE is deprecated . Set individual rates with HOMING_FEEDRATE_ ( XY | Z | E ) instead . " <nl> + # endif <nl>
Merge pull request from thinkyhead / rc_arduino_160_minimum
MarlinFirmware/Marlin
5d8d18d03dc388a426d2a68d638eb0b71ace1630
2016-07-29T00:37:10Z
mmm a / CocosDenshion / android / Android . mk <nl> ppp b / CocosDenshion / android / Android . mk <nl> LOCAL_MODULE_FILENAME : = libcocosdenshion <nl> LOCAL_SRC_FILES : = SimpleAudioEngine . cpp \ <nl> ccdandroidUtils . cpp \ <nl> jni / SimpleAudioEngineJni . cpp \ <nl> + jni / cddandroidAndroidJavaEngine . cpp \ <nl> opensl / OpenSLEngine . cpp \ <nl> - opensl / SimpleAudioEngineOpenSL . cpp <nl> + opensl / SimpleAudioEngineOpenSL . cpp \ <nl> + opensl / cddandroidOpenSLEngine . cpp <nl> <nl> LOCAL_EXPORT_C_INCLUDES : = $ ( LOCAL_PATH ) / . . / include <nl> <nl> deleted file mode 100644 <nl> index 824c5fe80abe . . 000000000000 <nl> mmm a / CocosDenshion / android / SimpleAudioEngine . cpp <nl> ppp / dev / null <nl> <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> - Copyright ( c ) 2010 cocos2d - x . org <nl> - <nl> - http : / / www . cocos2d - x . org <nl> - <nl> - Permission is hereby granted , free of charge , to any person obtaining a copy <nl> - of this software and associated documentation files ( the " Software " ) , to deal <nl> - in the Software without restriction , including without limitation the rights <nl> - to use , copy , modify , merge , publish , distribute , sublicense , and / or sell <nl> - copies of the Software , and to permit persons to whom the Software is <nl> - furnished to do so , subject to the following conditions : <nl> - <nl> - The above copyright notice and this permission notice shall be included in <nl> - all copies or substantial portions of the Software . <nl> - <nl> - THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> - IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE <nl> - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER <nl> - LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , <nl> - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN <nl> - THE SOFTWARE . <nl> - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - <nl> - # include " SimpleAudioEngine . h " <nl> - # include " jni / SimpleAudioEngineJni . h " <nl> - # include " opensl / SimpleAudioEngineOpenSL . h " <nl> - <nl> - # include " cocos2d . h " <nl> - # include < cstring > <nl> - # include < android / log . h > <nl> - # include < jni / JniHelper . h > <nl> - # include < jni . h > <nl> - <nl> - # define I9100_MODEL " GT - I9100 " <nl> - # define LOG_TAG " Device Model " <nl> - # define LOGD ( . . . ) __android_log_print ( ANDROID_LOG_DEBUG , LOG_TAG , __VA_ARGS__ ) <nl> - <nl> - static bool s_bI9100 = false ; <nl> - <nl> - USING_NS_CC ; <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> - * jni <nl> - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - # define CLASS_NAME " org / cocos2dx / lib / Cocos2dxHelper " <nl> - # define METHOD_NAME " getDeviceModel " <nl> - <nl> - namespace CocosDenshion { <nl> - <nl> - static std : : string getFullPathWithoutAssetsPrefix ( const char * pszFilename ) <nl> - { <nl> - / / Changing file path to full path <nl> - std : : string fullPath = FileUtils : : getInstance ( ) - > fullPathForFilename ( pszFilename ) ; <nl> - / / Removing ` assets ` since it isn ' t needed for the API of playing sound . <nl> - size_t pos = fullPath . find ( " assets / " ) ; <nl> - if ( pos = = 0 ) <nl> - { <nl> - fullPath = fullPath . substr ( strlen ( " assets / " ) ) ; <nl> - } <nl> - return fullPath ; <nl> - } <nl> - <nl> - static SimpleAudioEngine * s_pEngine = 0 ; <nl> - <nl> - SimpleAudioEngine : : SimpleAudioEngine ( ) <nl> - { <nl> - JniMethodInfo methodInfo ; <nl> - jstring jstr ; <nl> - if ( JniHelper : : getStaticMethodInfo ( methodInfo , CLASS_NAME , METHOD_NAME , " ( ) Ljava / lang / String ; " ) ) <nl> - { <nl> - jstr = ( jstring ) methodInfo . env - > CallStaticObjectMethod ( methodInfo . classID , methodInfo . methodID ) ; <nl> - } <nl> - methodInfo . env - > DeleteLocalRef ( methodInfo . classID ) ; <nl> - <nl> - const char * deviceModel = methodInfo . env - > GetStringUTFChars ( jstr , NULL ) ; <nl> - <nl> - LOGD ( " % s " , deviceModel ) ; <nl> - <nl> - if ( strcmp ( I9100_MODEL , deviceModel ) = = 0 ) <nl> - { <nl> - LOGD ( " i9100 model \ nSwitch to OpenSLES " ) ; <nl> - s_bI9100 = true ; <nl> - } <nl> - <nl> - methodInfo . env - > ReleaseStringUTFChars ( jstr , deviceModel ) ; <nl> - methodInfo . env - > DeleteLocalRef ( jstr ) ; <nl> - } <nl> - <nl> - SimpleAudioEngine : : ~ SimpleAudioEngine ( ) <nl> - { <nl> - if ( s_bI9100 ) <nl> - { <nl> - SimpleAudioEngineOpenSL : : sharedEngine ( ) - > end ( ) ; <nl> - } <nl> - } <nl> - <nl> - SimpleAudioEngine * SimpleAudioEngine : : getInstance ( ) <nl> - { <nl> - if ( ! s_pEngine ) <nl> - { <nl> - s_pEngine = new SimpleAudioEngine ( ) ; <nl> - } <nl> - <nl> - return s_pEngine ; <nl> - } <nl> - <nl> - void SimpleAudioEngine : : end ( ) <nl> - { <nl> - if ( s_bI9100 ) <nl> - { <nl> - SimpleAudioEngineOpenSL : : sharedEngine ( ) - > end ( ) ; <nl> - } <nl> - else <nl> - { <nl> - endJNI ( ) ; <nl> - } <nl> - } <nl> - <nl> - void SimpleAudioEngine : : preloadBackgroundMusic ( const char * pszFilePath ) <nl> - { <nl> - std : : string fullPath = getFullPathWithoutAssetsPrefix ( pszFilePath ) ; <nl> - preloadBackgroundMusicJNI ( fullPath . c_str ( ) ) ; <nl> - } <nl> - <nl> - void SimpleAudioEngine : : playBackgroundMusic ( const char * pszFilePath , bool bLoop ) <nl> - { <nl> - std : : string fullPath = getFullPathWithoutAssetsPrefix ( pszFilePath ) ; <nl> - playBackgroundMusicJNI ( fullPath . c_str ( ) , bLoop ) ; <nl> - } <nl> - <nl> - void SimpleAudioEngine : : stopBackgroundMusic ( bool bReleaseData ) <nl> - { <nl> - stopBackgroundMusicJNI ( ) ; <nl> - } <nl> - <nl> - void SimpleAudioEngine : : pauseBackgroundMusic ( ) <nl> - { <nl> - pauseBackgroundMusicJNI ( ) ; <nl> - } <nl> - <nl> - void SimpleAudioEngine : : resumeBackgroundMusic ( ) <nl> - { <nl> - resumeBackgroundMusicJNI ( ) ; <nl> - } <nl> - <nl> - void SimpleAudioEngine : : rewindBackgroundMusic ( ) <nl> - { <nl> - rewindBackgroundMusicJNI ( ) ; <nl> - } <nl> - <nl> - bool SimpleAudioEngine : : willPlayBackgroundMusic ( ) <nl> - { <nl> - return true ; <nl> - } <nl> - <nl> - bool SimpleAudioEngine : : isBackgroundMusicPlaying ( ) <nl> - { <nl> - return isBackgroundMusicPlayingJNI ( ) ; <nl> - } <nl> - <nl> - float SimpleAudioEngine : : getBackgroundMusicVolume ( ) <nl> - { <nl> - return getBackgroundMusicVolumeJNI ( ) ; <nl> - } <nl> - <nl> - void SimpleAudioEngine : : setBackgroundMusicVolume ( float volume ) <nl> - { <nl> - setBackgroundMusicVolumeJNI ( volume ) ; <nl> - } <nl> - <nl> - float SimpleAudioEngine : : getEffectsVolume ( ) <nl> - { <nl> - if ( s_bI9100 ) <nl> - { <nl> - return SimpleAudioEngineOpenSL : : sharedEngine ( ) - > getEffectsVolume ( ) ; <nl> - } <nl> - else <nl> - { <nl> - return getEffectsVolumeJNI ( ) ; <nl> - } <nl> - } <nl> - <nl> - void SimpleAudioEngine : : setEffectsVolume ( float volume ) <nl> - { <nl> - if ( s_bI9100 ) <nl> - { <nl> - SimpleAudioEngineOpenSL : : sharedEngine ( ) - > setEffectsVolume ( volume ) ; <nl> - } <nl> - else <nl> - { <nl> - setEffectsVolumeJNI ( volume ) ; <nl> - } <nl> - } <nl> - <nl> - unsigned int SimpleAudioEngine : : playEffect ( const char * pszFilePath , bool bLoop , <nl> - float pitch , float pan , float gain ) <nl> - { <nl> - std : : string fullPath = getFullPathWithoutAssetsPrefix ( pszFilePath ) ; <nl> - if ( s_bI9100 ) <nl> - { <nl> - return SimpleAudioEngineOpenSL : : sharedEngine ( ) - > playEffect ( fullPath . c_str ( ) , bLoop , pitch , pan , gain ) ; <nl> - } <nl> - else <nl> - { <nl> - return playEffectJNI ( fullPath . c_str ( ) , bLoop , pitch , pan , gain ) ; <nl> - } <nl> - } <nl> - <nl> - void SimpleAudioEngine : : stopEffect ( unsigned int nSoundId ) <nl> - { <nl> - if ( s_bI9100 ) <nl> - { <nl> - SimpleAudioEngineOpenSL : : sharedEngine ( ) - > stopEffect ( nSoundId ) ; <nl> - } <nl> - else <nl> - { <nl> - stopEffectJNI ( nSoundId ) ; <nl> - } <nl> - } <nl> - <nl> - void SimpleAudioEngine : : preloadEffect ( const char * pszFilePath ) <nl> - { <nl> - std : : string fullPath = getFullPathWithoutAssetsPrefix ( pszFilePath ) ; <nl> - <nl> - if ( s_bI9100 ) <nl> - { <nl> - SimpleAudioEngineOpenSL : : sharedEngine ( ) - > preloadEffect ( fullPath . c_str ( ) ) ; <nl> - } <nl> - else <nl> - { <nl> - preloadEffectJNI ( fullPath . c_str ( ) ) ; <nl> - } <nl> - } <nl> - <nl> - void SimpleAudioEngine : : unloadEffect ( const char * pszFilePath ) <nl> - { <nl> - std : : string fullPath = getFullPathWithoutAssetsPrefix ( pszFilePath ) ; <nl> - <nl> - if ( s_bI9100 ) <nl> - { <nl> - SimpleAudioEngineOpenSL : : sharedEngine ( ) - > unloadEffect ( fullPath . c_str ( ) ) ; <nl> - } <nl> - else <nl> - { <nl> - unloadEffectJNI ( fullPath . c_str ( ) ) ; <nl> - } <nl> - } <nl> - <nl> - void SimpleAudioEngine : : pauseEffect ( unsigned int nSoundId ) <nl> - { <nl> - if ( s_bI9100 ) <nl> - { <nl> - SimpleAudioEngineOpenSL : : sharedEngine ( ) - > pauseEffect ( nSoundId ) ; <nl> - } <nl> - else <nl> - { <nl> - pauseEffectJNI ( nSoundId ) ; <nl> - } <nl> - } <nl> - <nl> - void SimpleAudioEngine : : pauseAllEffects ( ) <nl> - { <nl> - if ( s_bI9100 ) <nl> - { <nl> - SimpleAudioEngineOpenSL : : sharedEngine ( ) - > pauseAllEffects ( ) ; <nl> - } <nl> - else <nl> - { <nl> - pauseAllEffectsJNI ( ) ; <nl> - } <nl> - } <nl> - <nl> - void SimpleAudioEngine : : resumeEffect ( unsigned int nSoundId ) <nl> - { <nl> - if ( s_bI9100 ) <nl> - { <nl> - SimpleAudioEngineOpenSL : : sharedEngine ( ) - > resumeEffect ( nSoundId ) ; <nl> - } <nl> - else <nl> - { <nl> - resumeEffectJNI ( nSoundId ) ; <nl> - } <nl> - } <nl> - <nl> - void SimpleAudioEngine : : resumeAllEffects ( ) <nl> - { <nl> - if ( s_bI9100 ) <nl> - { <nl> - SimpleAudioEngineOpenSL : : sharedEngine ( ) - > resumeAllEffects ( ) ; <nl> - } <nl> - else <nl> - { <nl> - resumeAllEffectsJNI ( ) ; <nl> - } <nl> - } <nl> - <nl> - void SimpleAudioEngine : : stopAllEffects ( ) <nl> - { <nl> - if ( s_bI9100 ) <nl> - { <nl> - SimpleAudioEngineOpenSL : : sharedEngine ( ) - > stopAllEffects ( ) ; <nl> - } <nl> - else <nl> - { <nl> - stopAllEffectsJNI ( ) ; <nl> - } <nl> - } <nl> - <nl> - } <nl> mmm a / CocosDenshion / android / ccdandroidUtils . cpp <nl> ppp b / CocosDenshion / android / ccdandroidUtils . cpp <nl> namespace CocosDenshion { <nl> methodInfo . env - > DeleteLocalRef ( methodInfo . classID ) ; <nl> <nl> const char * deviceModel = methodInfo . env - > GetStringUTFChars ( jstr , NULL ) ; <nl> - LOGD ( deviceModel ) ; <nl> + LOGD ( " % s " , deviceModel ) ; <nl> <nl> if ( strcmp ( I9100_MODEL , deviceModel ) = = 0 ) { <nl> LOGD ( " i9100 model \ nSwitch to OpenSLES " ) ; <nl> new file mode 100644 <nl> index 000000000000 . . 04d962b2e3a1 <nl> mmm / dev / null <nl> ppp b / CocosDenshion / android / cddSimpleAudioEngine . cpp <nl> <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + Copyright ( c ) 2010 cocos2d - x . org <nl> + <nl> + http : / / www . cocos2d - x . org <nl> + <nl> + Permission is hereby granted , free of charge , to any person obtaining a copy <nl> + of this software and associated documentation files ( the " Software " ) , to deal <nl> + in the Software without restriction , including without limitation the rights <nl> + to use , copy , modify , merge , publish , distribute , sublicense , and / or sell <nl> + copies of the Software , and to permit persons to whom the Software is <nl> + furnished to do so , subject to the following conditions : <nl> + <nl> + The above copyright notice and this permission notice shall be included in <nl> + all copies or substantial portions of the Software . <nl> + <nl> + THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> + IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE <nl> + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER <nl> + LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , <nl> + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN <nl> + THE SOFTWARE . <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + # include " SimpleAudioEngine . h " <nl> + # include " jni / cddandroidAndroidJavaEngine . h " <nl> + # include " opensl / cddandroidOpenSLEngine . h " <nl> + # include " ccdandroidUtils . h " <nl> + <nl> + namespace CocosDenshion { <nl> + <nl> + static SimpleAudioEngine * s_pEngine = 0 ; <nl> + <nl> + SimpleAudioEngine * SimpleAudioEngine : : sharedEngine ( ) { <nl> + if ( ! s_pEngine ) { <nl> + if ( CocosDenshion : : android : : is_buggy_device ( ) ) { <nl> + s_pEngine = new CocosDenshion : : android : : AndroidJavaEngine ( ) ; <nl> + } else { <nl> + s_pEngine = new CocosDenshion : : android : : OpenSLEngine ( ) ; <nl> + } <nl> + } <nl> + <nl> + return s_pEngine ; <nl> + } <nl> + <nl> + SimpleAudioEngine : : SimpleAudioEngine ( ) { <nl> + } <nl> + <nl> + SimpleAudioEngine : : ~ SimpleAudioEngine ( ) { <nl> + } <nl> + <nl> + } <nl> new file mode 100644 <nl> index 000000000000 . . d7b12405517a <nl> mmm / dev / null <nl> ppp b / CocosDenshion / android / jni / cddandroidAndroidJavaEngine . cpp <nl> <nl> + # include " cddandroidAndroidJavaEngine . h " <nl> + <nl> + namespace CocosDenshion { <nl> + namespace android { <nl> + AndroidJavaEngine : : ~ AndroidJavaEngine ( ) { <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 000000000000 . . 57c3789b6c65 <nl> mmm / dev / null <nl> ppp b / CocosDenshion / android / jni / cddandroidAndroidJavaEngine . h <nl> <nl> + # ifndef __CDDANDRIODANDROIDJAVAENGINE_H__ <nl> + # define __CDDANDRIODANDROIDJAVAENGINE_H__ <nl> + <nl> + # include " SimpleAudioEngine . h " <nl> + <nl> + namespace CocosDenshion { <nl> + namespace android { <nl> + class AndroidJavaEngine : public SimpleAudioEngine { <nl> + ~ AndroidJavaEngine ( ) ; <nl> + <nl> + void preloadBackgroundMusic ( const char * pszFilePath ) ; <nl> + void playBackgroundMusic ( const char * pszFilePath , bool bLoop ) ; <nl> + void stopBackgroundMusic ( bool bReleaseData ) ; <nl> + void pauseBackgroundMusic ( ) ; <nl> + void resumeBackgroundMusic ( ) ; <nl> + void rewindBackgroundMusic ( ) ; <nl> + bool willPlayBackgroundMusic ( ) ; <nl> + bool isBackgroundMusicPlaying ( ) ; <nl> + float getBackgroundMusicVolume ( ) ; <nl> + void setBackgroundMusicVolume ( float volume ) ; <nl> + float getEffectsVolume ( ) ; <nl> + void setEffectsVolume ( float volume ) ; <nl> + unsigned int playEffect ( const char * pszFilePath , bool bLoop ) ; <nl> + void pauseEffect ( unsigned int nSoundId ) ; <nl> + void pauseAllEffects ( ) ; <nl> + void resumeEffect ( unsigned int nSoundId ) ; <nl> + void resumeAllEffects ( ) ; <nl> + void stopEffect ( unsigned int nSoundId ) ; <nl> + void stopAllEffects ( ) ; <nl> + void preloadEffect ( const char * pszFilePath ) ; <nl> + void unloadEffect ( const char * pszFilePath ) ; <nl> + } ; <nl> + } <nl> + } <nl> + <nl> + # endif / / __CDDANDRIODANDROIDJAVAENGINE_H__ <nl> new file mode 100644 <nl> index 000000000000 . . d87e7e93b358 <nl> mmm / dev / null <nl> ppp b / CocosDenshion / android / opensl / cddandroidOpenSLEngine . cpp <nl> <nl> + # include " cddandroidOpenSLEngine . h " <nl> + <nl> + namespace CocosDenshion { <nl> + namespace android { <nl> + OpenSLEngine : : ~ OpenSLEngine ( ) { <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 000000000000 . . 4d0f05289193 <nl> mmm / dev / null <nl> ppp b / CocosDenshion / android / opensl / cddandroidOpenSLEngine . h <nl> <nl> + # ifndef __CDDANDROIDOPENSLENGINE_H__ <nl> + # define __CDDANDROIDOPENSLENGINE_H__ <nl> + <nl> + # include " SimpleAudioEngine . h " <nl> + <nl> + namespace CocosDenshion { <nl> + namespace android { <nl> + class OpenSLEngine : public SimpleAudioEngine { <nl> + ~ OpenSLEngine ( ) ; <nl> + <nl> + void preloadBackgroundMusic ( const char * pszFilePath ) ; <nl> + void playBackgroundMusic ( const char * pszFilePath , bool bLoop ) ; <nl> + void stopBackgroundMusic ( bool bReleaseData ) ; <nl> + void pauseBackgroundMusic ( ) ; <nl> + void resumeBackgroundMusic ( ) ; <nl> + void rewindBackgroundMusic ( ) ; <nl> + bool willPlayBackgroundMusic ( ) ; <nl> + bool isBackgroundMusicPlaying ( ) ; <nl> + float getBackgroundMusicVolume ( ) ; <nl> + void setBackgroundMusicVolume ( float volume ) ; <nl> + float getEffectsVolume ( ) ; <nl> + void setEffectsVolume ( float volume ) ; <nl> + unsigned int playEffect ( const char * pszFilePath , bool bLoop ) ; <nl> + void pauseEffect ( unsigned int nSoundId ) ; <nl> + void pauseAllEffects ( ) ; <nl> + void resumeEffect ( unsigned int nSoundId ) ; <nl> + void resumeAllEffects ( ) ; <nl> + void stopEffect ( unsigned int nSoundId ) ; <nl> + void stopAllEffects ( ) ; <nl> + void preloadEffect ( const char * pszFilePath ) ; <nl> + void unloadEffect ( const char * pszFilePath ) ; <nl> + } ; <nl> + } <nl> + } <nl> + <nl> + # endif / / __CDDANDROIDOPENSLENGINE_H__ <nl>
Subclasses for Java and OpenSL variants of SimpleAudioEngine . SimpleAudioEngine factory chooses between the two .
cocos2d/cocos2d-x
8c047b7217ad87a91a8f9ef784afbde49f52ed34
2013-08-05T09:05:43Z
mmm a / swoole_mysql . c <nl> ppp b / swoole_mysql . c <nl> static void swoole_mysql_onConnect ( mysql_client * client TSRMLS_DC ) <nl> { <nl> zend_update_property_stringl ( swoole_mysql_class_entry_ptr , zobject , ZEND_STRL ( " connect_error " ) , client - > connector . error_msg , client - > connector . error_length TSRMLS_CC ) ; <nl> zend_update_property_long ( swoole_mysql_class_entry_ptr , zobject , ZEND_STRL ( " connect_errno " ) , client - > connector . error_code TSRMLS_CC ) ; <nl> - <nl> ZVAL_BOOL ( result , 0 ) ; <nl> + / / close <nl> + sw_zend_call_method_with_0_params ( & zobject , swoole_mysql_class_entry_ptr , NULL , " close " , & retval ) ; <nl> + if ( retval ) <nl> + { <nl> + sw_zval_ptr_dtor ( & retval ) ; <nl> + } <nl> } <nl> else <nl> { <nl>
fixed
swoole/swoole-src
230fb53f8d984418ac58d698a95484d5995e3181
2016-12-16T08:13:56Z
mmm a / cpp / CMakeLists . txt <nl> ppp b / cpp / CMakeLists . txt <nl> <nl> <nl> # Author : Philippe Liard <nl> <nl> - cmake_minimum_required ( VERSION 3 . 1 ) <nl> + cmake_minimum_required ( VERSION 2 . 8 ) <nl> <nl> project ( libphonenumber ) <nl> set ( libphonenumber_VERSION_MAJOR 7 ) <nl> find_required_program ( JAVA java <nl> if ( APPLE ) <nl> FIND_LIBRARY ( COREFOUNDATION_LIB CoreFoundation ) <nl> FIND_LIBRARY ( FOUNDATION_LIB Foundation ) <nl> + set ( CMAKE_MACOSX_RPATH " OFF " ) <nl> set ( CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS <nl> " $ { CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS } - undefined dynamic_lookup " ) <nl> endif ( ) <nl> mmm a / cpp / README <nl> ppp b / cpp / README <nl> Quickstart : <nl> - In recent Debian - based distributions , it should be sufficent to run : <nl> $ sudo apt - get install \ <nl> cmake cmake - curses - gui libprotobuf - dev libgtest - dev libre2 - dev \ <nl> - libicu - dev libboost - dev libboost - thread - dev libboost - system - dev <nl> + libicu - dev libboost - dev libboost - thread - dev libboost - system - dev \ <nl> + protobuf - compiler <nl> <nl> If any of these packages fails to install correctly , follow the instructions <nl> in the appropriate section below . <nl> Requirements : <nl> recent Debian - based GNU / Linux distributions ) . <nl> <nl> You can check which version is available : <nl> - $ apt - cache show libprotobuf - dev <nl> + $ apt - cache show libprotobuf - dev <nl> Package : libprotobuf - dev <nl> Source : protobuf <nl> Version : 2 . 5 . 0 - 9ubuntu1 < - - This must be > = 2 . 4 . 0 <nl> Building and testing the library <nl> $ make <nl> $ . / libphonenumber_test <nl> <nl> + <nl> + Manually installing the library on Mac <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> + You can easily install dependencies on Mac using a package manager . In these <nl> + instructions we use Homebrew ( http : / / brew . sh ) . <nl> + <nl> + Install Homebrew package manager and use it to install dependencies : <nl> + $ / usr / bin / ruby - e " $ ( curl - fsSL \ <nl> + https : / / raw . githubusercontent . com / Homebrew / install / master / install ) " <nl> + $ brew install boost cmake icu4c pkg - config protobuf wget <nl> + <nl> + See https : / / github . com / Homebrew / homebrew / issues / 14099 - homebrew does not have <nl> + gtest . We don ' t need to install gtest , we only copy sources . For example : <nl> + $ mkdir ~ / googletest_clone <nl> + $ cd ~ / googletest_clone <nl> + $ git clone https : / / github . com / google / googletest . git <nl> + <nl> + Get the libphonenumber source . For example : <nl> + $ mkdir ~ / libphonenumber_clone <nl> + $ cd ~ / libphonenumber_clone <nl> + $ git clone https : / / github . com / googlei18n / libphonenumber . git <nl> + <nl> + Build and test the library : <nl> + $ cd libphonenumber / cpp <nl> + $ mkdir build <nl> + $ cd build <nl> + Replace XXX in the commands below with the appropriate version number : <nl> + $ cmake \ <nl> + - DGTEST_SOURCE_DIR = ~ / googletest_clone / googletest / googletest / \ <nl> + - DGTEST_INCLUDE_DIR = ~ / googletest_clone / googletest / googletest / include / \ <nl> + - DICU_UC_INCLUDE_DIR = / usr / local / Cellar / icu4c / XXX / include / \ <nl> + - DICU_UC_LIB = / usr / local / Cellar / icu4c / XXX / lib / libicuuc . dylib \ <nl> + - DICU_I18N_INCLUDE_DIR = / usr / local / Cellar / icu4c / XXX / include / \ <nl> + - DICU_I18N_LIB = / usr / local / Cellar / icu4c / XXX / lib / libicui18n . dylib \ <nl> + - DUSE_STD_MAP = ON \ <nl> + . . <nl> + $ make <nl> + $ . / libphonenumber_test <nl> + <nl> + Optional : Deleting & uninstalling everything again : <nl> + $ cd <nl> + $ rm - rf ~ / libphonenumber_clone ~ / googletest_clone <nl> + <nl> + openssl is a dependency of wget and installed with it by Homebrew . If you had <nl> + openssl before installing wget don ' t uninstall here . <nl> + $ brew uninstall boost cmake icu4c openssl pkg - config protobuf wget <nl> + <nl> + $ / usr / bin / ruby - e " $ ( curl - fsSL \ <nl> + https : / / raw . githubusercontent . com / Homebrew / install / master / uninstall ) " <nl> + <nl> + Homebrew will have changed permissions at installation . See output of previous <nl> + command for how to change them back , for example : <nl> + $ sudo chmod 0755 / usr / local <nl> + $ sudo chgrp wheel / usr / local <nl> + <nl> + <nl> Troubleshooting CMake via ccmake <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> Follow these instructions if the build steps above don ' t work for you . <nl> Troubleshooting CMake via ccmake <nl> $ cmake . . <nl> $ make <nl> <nl> + <nl> Building the library on Windows ( Visual Studio ) <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> The library was tested with Visual Studio 2010 . <nl> C : / Program Files / libphonenumber / . <nl> Note that this path can be set by overriding the CMAKE_INSTALL_PREFIX variable <nl> with cmake - gui . <nl> <nl> + <nl> Supported build parameters <nl> mmmmmmmmmmmmmmmmmmmmmmmm - - <nl> Build parameters can be specified invoking CMake with ' - DKEY = VALUE ' or using a <nl> mmm a / debian / control <nl> ppp b / debian / control <nl> Priority : optional <nl> Maintainer : Debian Java Maintainers < pkg - java - maintainers @ lists . alioth . debian . org > <nl> Uploaders : Daniel Pocock < daniel @ pocock . pro > , Fredrik Roubert < roubert @ google . com > <nl> Build - Depends : cdbs , <nl> - cmake ( > = 3 . 1 ) , <nl> + cmake ( > = 2 . 8 ) , <nl> debhelper ( > = 9 ) , <nl> default - jdk | java - sdk , <nl> libboost - dev ( > = 1 . 40 ) , <nl> mmm a / java / pending_code_changes . txt <nl> ppp b / java / pending_code_changes . txt <nl> <nl> Code changes : <nl> - Added java / pending_code_changes . txt for contributors to track code changes <nl> between releases . <nl> + - Reduced minimum required version of cmake to 2 . 8 . <nl> + - Added cmake installation instructions for Mac . <nl> - Added getExampleNumberForType that doesn ' t take in a region , and <nl> getInvalidExampleNumber <nl> - Improvements to javadoc for parse method <nl>
Merge pull request from googlei18n / cmake_back_to_2 . 8
google/libphonenumber
053e3c0ad044e76146b7f95b3cdb621d6d16bfc1
2016-04-01T09:21:21Z
mmm a / doc / specs / js - framework - apis . md <nl> ppp b / doc / specs / js - framework - apis . md <nl> <nl> # JS Framework APIs <nl> <nl> + # # Intro about JS Runtime <nl> + <nl> There APIs is designed for JS Framework and Native Engine working together . <nl> <nl> - # # Called from native and implemented by JS Framework <nl> + Considering the limitation of mobile phone resource , * Weex runs only one JS runtime * to handle all Weex instances . So it need a multi - instance management layer in JavaScript . These JS Framework APIs are just designed to finish the management job . <nl> + <nl> + * First , each Weex instance have a lifecycle , from ` createInstance ` to ` destroyInstance ` . During this period , we can import some extra data by ` refreshInstance ` . <nl> + * To communicate with Native Engine , we have a couple of APIs : ` callNative ` and ` callJS ` . They are used to call each other by some commands and messages . <nl> + * And when JS runtime start at the beginning of the app launching , we need something initialized and configured . So we supply some APIs like ` registerComponents ` , ` registerModules ` . <nl> + * The last API is just for debugging , we supply an API named ` getRoot ` to return the whole virtual - DOM data for developers . <nl> + <nl> + # # Called by native and supplied from JS Framework <nl> <nl> # # # ` createInstance ( instanceId , code , options , data ) ` <nl> <nl> Example : <nl> <nl> ` ` ` <nl> createInstance ( ' x ' , ' define ( . . . ) ; define ( . . . ) ; define ( . . . ) ; bootstrap ( . . . ) ' ) <nl> + createInstance ( ' x ' , ' . . . ' , { bundleUrl , debug , . . . } , { a : 1 , b : 2 } } ) <nl> ` ` ` <nl> <nl> # # # ` destroyInstance ( instanceId ) ` <nl> refreshInstance ( ' x ' , { a : 100 , b : 200 } ) <nl> <nl> Register all native components <nl> <nl> - * ` components ` : A map that the keys are component types and the values are config of each type of component . * Currently it supports any attrubite of node by defualt . For example , the ` append ` which forces the appending way ( ` tree ` or ` node ` ) when first rendering , but will be overwritten by given the attribute on element in source code . * <nl> + * ` components ` : A map of which keys are component types and values are force options part of each type of component . * Currently it supports ` append ` attribute which forces the appending mechanism ( ` tree ` or ` node ` ) when first time rendering . * <nl> <nl> Example : <nl> <nl> ` ` ` <nl> registerComponents ( { <nl> container : { } , <nl> - text : { <nl> - style : { <nl> - color : ' red ' <nl> - } <nl> - } , <nl> + text : { } , <nl> image : { } , <nl> slider : { append : ' tree ' } , <nl> list : { } , <nl> registerComponents ( { <nl> <nl> Register the name , methods and args format of each module <nl> <nl> - * ` modules ` : A map that collects all module definitions . Each module definition is a list which has several API definitions . And each API definition has a ` name ` string and a ` args ` array which contains a list of each parameter ' s type . <nl> + * ` modules ` : A map that collects all native module definitions . Each module definition is an array which has several API definitions . Each API definition has a ` name ` string and an ` args ` array which contains a list of each parameter ' s type . <nl> <nl> - * * NOTE : if the parameter ' s type is ` node ` or ` function ` , then it will automatically transfer to a string of ` node reference ` or ` function id ` * * <nl> + * * NOTE : the ` node ` type data will actually return its ` ref ` property . And the ` function ` type data will actually return a unique function id referring to it . * * <nl> <nl> Example : <nl> <nl> registerModules ( { <nl> Fire events or callbacks to an existed Weex instance from Native Engine <nl> <nl> * ` tasks [ ] ` : A task list . Each task has a ` method = " fireEvent | callback " ` property and a list of ` args ` . <nl> - - In ` fireEvent ` method , the ` args ` is ` ref ` of the target , event ` type ` and event ` data ` in order . <nl> + - In ` fireEvent ` method , the ` args ` is ` ref ` of the target , event ` type ` , event ` data ` and ` domChanges ` description in order . * * Note : if some event make virtual - DOM data changed ( e . g . value changed in ` < input > ` or current index changed in ` < slider > ` ) , the changing of the target element will be passed as ` domChanges ` . * * <nl> - In ` callback ` method , the ` args ` is ` funcId ` of a handler , ` data ` and ` ifKeepAlive ` which describes whether this callback handler should be keeping called . ( Each callback handler is matched with a ` funcId ` when the original call happens . ) <nl> <nl> Example : <nl>
Merge pull request from alibaba / doc - feature - jsfm
apache/incubator-weex
a17a614531aa5ec33fc89eaef495f7eb5947d3fc
2016-06-01T03:59:04Z
mmm a / docs / en / operations / system - tables / asynchronous_metric_log . md <nl> ppp b / docs / en / operations / system - tables / asynchronous_metric_log . md <nl> SELECT * FROM system . asynchronous_metric_log LIMIT 10 <nl> * * See Also * * <nl> <nl> - [ system . asynchronous \ _metrics ] ( . . / system - tables / asynchronous_metrics . md ) — Contains metrics that are calculated periodically in the background . <nl> - - [ system . metric_log ] ( . . / system - tables / metric_log ) — Contains history of metrics values from tables ` system . metrics ` and ` system . events ` , periodically flushed to disk . <nl> + - [ system . metric_log ] ( . . / system - tables / metric_log . md ) — Contains history of metrics values from tables ` system . metrics ` and ` system . events ` , periodically flushed to disk . <nl>
Update asynchronous_metric_log . md
ClickHouse/ClickHouse
81636eb69e6b84ce9a9dbd17f0f233d70b55ae37
2020-07-26T21:43:03Z
mmm a / test / IRGen / objc_generic_class_stub . swift <nl> ppp b / test / IRGen / objc_generic_class_stub . swift <nl> <nl> <nl> / / REQUIRES : objc_interop <nl> <nl> - / / REQUIRES : rdar71476765 <nl> - <nl> import Foundation <nl> <nl> public class GenericNSObjectSubclass < T > : NSObject { } <nl> public class ConcreteNSObjectSubclass : GenericNSObjectSubclass < Int > { } <nl> / / Note the stub here is internal ; it ' s only purpose is to appear in the stub list <nl> / / so that it can be realized by objc_copyClassList ( ) : <nl> <nl> - / / CHECK - LABEL : @ " $ s23objc_generic_class_stub24ConcreteNSObjectSubclassCMt " = internal global % objc_full_class_stub { i64 0 , i64 1 , % objc_class * ( % objc_class * , i8 * ) * @ " $ s23objc_generic_class_stub24ConcreteNSObjectSubclassCMU " } <nl> + / / CHECK - LABEL : @ " $ s23objc_generic_class_stub24ConcreteNSObjectSubclassCMt " = internal global % objc_full_class_stub { { { i32 | i64 } } 0 , { { i32 | i64 } } 1 , % objc_class * ( % objc_class * , i8 * ) * @ " $ s23objc_generic_class_stub24ConcreteNSObjectSubclassCMU " } <nl> <nl> / / CHECK - LABEL : @ objc_class_stubs = internal global { { . * } } @ " $ s23objc_generic_class_stub24ConcreteNSObjectSubclassCMt " { { . * } } , section " __DATA , __objc_stublist , regular , no_dead_strip " <nl>
Merge remote - tracking branch ' origin / main ' into next
apple/swift
8629ce68c7c2fc20ba32b791adc6e1ebc17d0511
2020-11-18T03:24:08Z
mmm a / docs / Filament . md . html <nl> ppp b / docs / Filament . md . html <nl> <nl> <nl> # # # # # Simplifying the BRDF integration # # # # # <nl> <nl> - Since there is no closed - form or an easy way to compute the $ \ Lout $ integral , we use a simplified <nl> - equation instead , $ \ hat { I } $ , whereby we assume that $ v = n $ , that is the view direction $ v $ is always <nl> + Since there is no closed - form solution or an easy way to compute the $ \ Lout $ integral , we use a simplified <nl> + equation instead : $ \ hat { I } $ , whereby we assume that $ v = n $ , that is the view direction $ v $ is always <nl> equal to the surface normal $ n $ . Clearly , this assumption will break all view - dependant effects of <nl> the convolution , such as the increased blur in reflections closer to the viewer <nl> ( a . k . a . stretchy reflections ) . <nl> <nl> Such a simplification would also have a severe impact on constant environments , such as the white <nl> furnace , because it would affect the magnitude of the the constant ( i . e . DC ) term of the result . We <nl> - can at least correct for that by using a scale factor , $ K $ , in our simplified integral , which , <nl> - when chosen properly will make sure the average irradiance stay correct . <nl> + can at least correct for that by using a scale factor , $ K $ , in our simplified integral , which <nl> + will make sure the average irradiance stay correct when chosen properly . <nl> <nl> - $ I $ is our original integral , i . e . : $ I ( g ) = \ int_ \ Omega g ( l ) \ left < \ NoL \ right > \ partial l $ <nl> - $ \ hat { I } $ is the simplified integral where $ v = n $ <nl> <nl> - $ \ tilde { I } $ is our final approximation of $ I $ , $ \ tilde { I } = \ hat { I } \ times K $ <nl> <nl> <nl> - Mathematically , <nl> + Because $ I $ is an integral multiplications can be distributed over it . i . e . : $ I ( g ( ) f ( ) ) = I ( g ( ) ) I ( f ( ) ) $ . <nl> + <nl> + Armed with that , <nl> <nl> $ $ \ begin { equation } <nl> I ( f ( \ Theta ) \ Lt ) \ approx \ tilde { I } ( f ( \ Theta ) \ Lt ) \ \ <nl> <nl> & = I ( f ( \ Theta ) \ Lt ) <nl> \ end { align * } $ $ <nl> <nl> - Finally , we can show that the scale factor $ K $ satisfies our average irradiance requirement , <nl> - plugging $ \ Lt = \ bar { \ Lt } + ( \ Lt - \ bar { \ Lt } ) $ into $ \ tilde { I } $ , we get : <nl> + Finally , we can show that the scale factor $ K $ satisfies our average irradiance ( $ \ bar { \ Lt } $ ) <nl> + requirement by plugging $ \ Lt = \ bar { \ Lt } + ( \ Lt - \ bar { \ Lt } ) = \ bar { \ Lt } + \ Delta \ Lt $ into $ \ tilde { I } $ : <nl> <nl> $ $ \ begin { align * } <nl> - \ tilde { I } ( f ( \ Theta ) \ Lt ) & = \ tilde { I } [ f ( \ Theta ) \ times ( \ bar { \ Lt } + ( \ Lt - \ bar { \ Lt } ) ) ] \ \ <nl> - & = K \ times \ hat { I } [ f ( \ Theta ) \ times ( \ bar { \ Lt } + ( \ Lt - \ bar { \ Lt } ) ) ] \ \ <nl> - & = K \ times [ \ hat { I } ( f ( \ Theta ) \ bar { \ Lt } ) + \ hat { I } ( f ( \ Theta ) \ times ( \ Lt - \ bar { \ Lt } ) ) ] \ \ <nl> - & = K \ times \ hat { I } ( f ( \ Theta ) \ bar { \ Lt } ) + K \ times \ hat { I } ( f ( \ Theta ) \ times ( \ Lt - \ bar { \ Lt } ) ) \ \ <nl> - & = \ tilde { I } ( f ( \ Theta ) \ bar { \ Lt } ) + \ tilde { I } ( f ( \ Theta ) \ times ( \ Lt - \ bar { \ Lt } ) ) \ \ <nl> - & = I ( f ( \ Theta ) \ bar { \ Lt } ) + \ tilde { I } ( f ( \ Theta ) \ times ( \ Lt - \ bar { \ Lt } ) ) <nl> + \ tilde { I } ( f ( \ Theta ) \ Lt ) & = \ tilde { I } \ left [ f \ left ( \ Theta \ right ) \ left ( \ bar { \ Lt } + \ Delta \ Lt \ right ) \ right ] \ \ <nl> + & = K \ times \ hat { I } \ left [ f \ left ( \ Theta \ right ) \ left ( \ bar { \ Lt } + \ Delta \ Lt \ right ) \ right ] \ \ <nl> + & = K \ times \ left [ \ hat { I } \ left ( f \ left ( \ Theta \ right ) \ bar { \ Lt } \ right ) + \ hat { I } \ left ( f \ left ( \ Theta \ right ) \ Delta \ Lt \ right ) \ right ] \ \ <nl> + & = K \ times \ hat { I } \ left ( f \ left ( \ Theta \ right ) \ bar { \ Lt } \ right ) + K \ times \ hat { I } \ left ( f \ left ( \ Theta \ right ) \ Delta \ Lt \ right ) \ \ <nl> + & = \ tilde { I } \ left ( f \ left ( \ Theta \ right ) \ bar { \ Lt } \ right ) + \ tilde { I } \ left ( f \ left ( \ Theta \ right ) \ Delta \ Lt \ right ) \ \ <nl> + & = I \ left ( f \ left ( \ Theta \ right ) \ bar { \ Lt } \ right ) + \ tilde { I } \ left ( f \ left ( \ Theta \ right ) \ Delta \ Lt \ right ) <nl> \ end { align * } $ $ <nl> <nl> The above result shows that the average irradiance is computed correctly , i . e . : $ I ( f ( \ Theta ) \ bar { \ Lt } ) $ . <nl> <nl> A way to think about this approximation is that it splits the radiance $ \ Lt $ in two parts , <nl> - the average $ \ bar { \ Lt } $ and the detla from the average $ \ Lt - \ bar { \ Lt } $ and computes the correct <nl> - integration on the average part and adds the simplified integration on the delta part : <nl> + the average $ \ bar { \ Lt } $ and the delta from the average $ \ Lt - \ bar { \ Lt } $ and computes the correct <nl> + integration of the average part then adds the simplified integration of the delta part : <nl> <nl> $ $ \ begin { equation } <nl> approximation ( \ Lt ) = correct ( \ bar { \ Lt } ) + simplified ( \ Lt - \ bar { \ Lt } ) <nl> <nl> \ end { equation } $ $ <nl> <nl> <nl> - All three of these equations can be easily pre - calculated and stored in look - up tables , we will show <nl> - how below . <nl> + All three of these equations can be easily pre - calculated and stored in look - up tables , as explained <nl> + below . <nl> <nl> <nl> <nl>
fix @ romainguy comments and improve equations
google/filament
cb886090156478082d6c00ac26f5d9cfebc76da1
2018-09-18T21:45:47Z
mmm a / utils / list - versions / version_date . tsv <nl> ppp b / utils / list - versions / version_date . tsv <nl> v20 . 10 . 5 . 10 - stable 2020 - 11 - 20 <nl> v20 . 10 . 4 . 1 - stable 2020 - 11 - 13 <nl> v20 . 10 . 3 . 30 - stable 2020 - 10 - 29 <nl> v20 . 10 . 2 . 20 - stable 2020 - 10 - 23 <nl> + v20 . 9 . 7 . 11 - stable 2020 - 12 - 07 <nl> v20 . 9 . 6 . 14 - stable 2020 - 11 - 20 <nl> v20 . 9 . 5 . 5 - stable 2020 - 11 - 13 <nl> v20 . 9 . 4 . 76 - stable 2020 - 10 - 29 <nl>
Update version_date . tsv after release 20 . 9 . 7 . 11
ClickHouse/ClickHouse
f2b63f0702690834151c1cb740fe04fdd39cf4c2
2020-12-06T21:11:19Z
mmm a / benchmark / CMakeLists . txt <nl> ppp b / benchmark / CMakeLists . txt <nl> set ( SWIFT_BENCH_MODULES <nl> single - source / DictTest3 <nl> single - source / DropLast <nl> single - source / ErrorHandling <nl> + single - source / ExistentialPerformance <nl> single - source / Fibonacci <nl> single - source / GlobalClass <nl> single - source / Hanoi <nl> new file mode 100644 <nl> index 000000000000 . . f4f968d83414 <nl> mmm / dev / null <nl> ppp b / benchmark / single - source / ExistentialPerformance . swift <nl> <nl> + protocol Existential { <nl> + init ( ) <nl> + func doIt ( ) - > Bool <nl> + func reallyDoIt ( ) - > Bool <nl> + mutating func mutateIt ( ) - > Bool <nl> + } <nl> + <nl> + struct IntValueBuffer0 : Existential { <nl> + func doIt ( ) - > Bool { <nl> + return true <nl> + } <nl> + func reallyDoIt ( ) - > Bool { <nl> + return true <nl> + } <nl> + mutating func mutateIt ( ) - > Bool { <nl> + return true <nl> + } <nl> + } <nl> + <nl> + func next ( _ x : inout Int , upto mod : Int ) { <nl> + x = ( x + 1 ) % ( mod + 1 ) <nl> + } <nl> + <nl> + struct IntValueBuffer1 : Existential { <nl> + var f0 : Int = 0 <nl> + <nl> + func doIt ( ) - > Bool { <nl> + return f0 = = 0 <nl> + } <nl> + func reallyDoIt ( ) - > Bool { <nl> + return true <nl> + } <nl> + mutating func mutateIt ( ) - > Bool { <nl> + next ( & f0 , upto : 1 ) <nl> + return true <nl> + } <nl> + } <nl> + <nl> + struct IntValueBuffer2 : Existential { <nl> + var f0 : Int = 0 <nl> + var f1 : Int = 3 <nl> + <nl> + func doIt ( ) - > Bool { <nl> + return f0 = = 0 <nl> + } <nl> + func reallyDoIt ( ) - > Bool { <nl> + return f0 = = 0 & & f1 = = 3 <nl> + } <nl> + mutating func mutateIt ( ) - > Bool { <nl> + next ( & f0 , upto : 1 ) <nl> + next ( & f1 , upto : 3 ) <nl> + return true <nl> + } <nl> + } <nl> + <nl> + struct IntValueBuffer3 : Existential { <nl> + var f0 : Int = 0 <nl> + var f1 : Int = 3 <nl> + var f2 : Int = 7 <nl> + <nl> + func doIt ( ) - > Bool { <nl> + return f0 = = 0 <nl> + } <nl> + func reallyDoIt ( ) - > Bool { <nl> + return f0 = = 0 & & f1 = = 3 & & f2 = = 7 <nl> + } <nl> + <nl> + mutating func mutateIt ( ) - > Bool { <nl> + next ( & f0 , upto : 1 ) <nl> + next ( & f1 , upto : 3 ) <nl> + next ( & f2 , upto : 7 ) <nl> + return true <nl> + } <nl> + } <nl> + <nl> + struct IntValueBuffer4 : Existential { <nl> + var f0 : Int = 0 <nl> + var f1 : Int = 3 <nl> + var f2 : Int = 7 <nl> + var f3 : Int = 13 <nl> + <nl> + func doIt ( ) - > Bool { <nl> + return f0 = = 0 <nl> + } <nl> + func reallyDoIt ( ) - > Bool { <nl> + return f0 = = 0 & & f1 = = 3 & & f2 = = 7 & & f3 = = 13 <nl> + } <nl> + <nl> + mutating func mutateIt ( ) - > Bool { <nl> + next ( & f0 , upto : 1 ) <nl> + next ( & f1 , upto : 3 ) <nl> + next ( & f2 , upto : 7 ) <nl> + next ( & f3 , upto : 13 ) <nl> + return true <nl> + } <nl> + } <nl> + <nl> + class Klazz { <nl> + var f0 : Int = 0 <nl> + var f1 : Int = 3 <nl> + <nl> + func doIt ( ) - > Bool { <nl> + return f0 = = 0 <nl> + } <nl> + func reallyDoIt ( ) - > Bool { <nl> + return f0 = = 0 & & f1 = = 3 <nl> + } <nl> + <nl> + func mutateIt ( ) - > Bool { <nl> + next ( & f0 , upto : 1 ) <nl> + next ( & f1 , upto : 3 ) <nl> + return true <nl> + } <nl> + } <nl> + <nl> + struct ClassValueBuffer1 : Existential { <nl> + var f0 : Klazz = Klazz ( ) <nl> + <nl> + func doIt ( ) - > Bool { <nl> + return f0 . doIt ( ) <nl> + } <nl> + func reallyDoIt ( ) - > Bool { <nl> + return f0 . reallyDoIt ( ) <nl> + } <nl> + <nl> + mutating func mutateIt ( ) - > Bool { <nl> + return f0 . mutateIt ( ) <nl> + } <nl> + } <nl> + <nl> + struct ClassValueBuffer2 : Existential { <nl> + var f0 : Klazz = Klazz ( ) <nl> + var f1 : Klazz = Klazz ( ) <nl> + <nl> + func doIt ( ) - > Bool { <nl> + return f0 . doIt ( ) <nl> + } <nl> + func reallyDoIt ( ) - > Bool { <nl> + return f0 . reallyDoIt ( ) <nl> + } <nl> + <nl> + mutating func mutateIt ( ) - > Bool { <nl> + return f0 . mutateIt ( ) <nl> + } <nl> + } <nl> + <nl> + struct ClassValueBuffer3 : Existential { <nl> + var f0 : Klazz = Klazz ( ) <nl> + var f1 : Klazz = Klazz ( ) <nl> + var f2 : Klazz = Klazz ( ) <nl> + <nl> + func doIt ( ) - > Bool { <nl> + return f0 . doIt ( ) <nl> + } <nl> + func reallyDoIt ( ) - > Bool { <nl> + return f0 . reallyDoIt ( ) <nl> + } <nl> + <nl> + mutating func mutateIt ( ) - > Bool { <nl> + return f0 . mutateIt ( ) <nl> + } <nl> + } <nl> + <nl> + struct ClassValueBuffer4 : Existential { <nl> + var f0 : Klazz = Klazz ( ) <nl> + var f1 : Klazz = Klazz ( ) <nl> + var f2 : Klazz = Klazz ( ) <nl> + var f3 : Int = 0 <nl> + <nl> + func doIt ( ) - > Bool { <nl> + return f0 . doIt ( ) <nl> + } <nl> + <nl> + func reallyDoIt ( ) - > Bool { <nl> + return f0 . reallyDoIt ( ) <nl> + } <nl> + <nl> + mutating func mutateIt ( ) - > Bool { <nl> + return f0 . mutateIt ( ) <nl> + } <nl> + } <nl> + <nl> + <nl> + @ inline ( never ) <nl> + func initExistential < T : Existential > ( withType : T . Type ) - > Existential { <nl> + return T ( ) <nl> + } <nl> + <nl> + @ inline ( never ) <nl> + func initExistentialArray < T : Existential > ( withType : T . Type , count c : Int ) - > [ Existential ] { <nl> + return [ T ] ( repeating : T ( ) , count : c ) <nl> + } <nl> + <nl> + @ inline ( never ) <nl> + func passExistentialTwiceOneMethodCall ( _ e0 : Existential , _ e1 : Existential ) - > Bool { <nl> + return e0 . doIt ( ) & & e1 . doIt ( ) <nl> + } <nl> + <nl> + @ inline ( never ) <nl> + func passExistentialTwiceTwoMethodCalls ( _ e0 : Existential , _ e1 : Existential ) - > Bool { <nl> + return e0 . doIt ( ) & & e1 . doIt ( ) & & e0 . reallyDoIt ( ) & & e1 . reallyDoIt ( ) <nl> + } <nl> + <nl> + func runTestOneMethodCall < T : Existential > ( withType : T . Type , numberOfTimes N : Int ) { <nl> + let existential = initExistential ( withType : T . self ) <nl> + for _ in 0 . . < N { <nl> + for _ in 0 . . < 5_000_000 { <nl> + if ! existential . doIt ( ) { <nl> + fatalError ( " expected true " ) <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + func runTestTwoMethodCalls < T : Existential > ( withType : T . Type , numberOfTimes N : Int ) { <nl> + let existential = initExistential ( withType : T . self ) <nl> + for _ in 0 . . < N { <nl> + for _ in 0 . . < 5_000_000 { <nl> + if ! existential . doIt ( ) | | ! existential . reallyDoIt ( ) { <nl> + fatalError ( " expected true " ) <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + func runTestPassExistentialOneMethodCall < T : Existential > ( withType : T . Type , numberOfTimes N : Int ) { <nl> + let existential = initExistential ( withType : T . self ) <nl> + let existential2 = initExistential ( withType : T . self ) <nl> + for _ in 0 . . < N { <nl> + for _ in 0 . . < 5_000_000 { <nl> + if ! passExistentialTwiceOneMethodCall ( existential , existential2 ) { <nl> + fatalError ( " expected true " ) <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + func runTestPassExistentialTwoMethodCalls < T : Existential > ( withType : T . Type , numberOfTimes N : Int ) { <nl> + let existential = initExistential ( withType : T . self ) <nl> + let existential2 = initExistential ( withType : T . self ) <nl> + for _ in 0 . . < N { <nl> + for _ in 0 . . < 5_000_000 { <nl> + if ! passExistentialTwiceTwoMethodCalls ( existential , existential2 ) { <nl> + fatalError ( " expected true " ) <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + func runTestMutating < T : Existential > ( withType : T . Type , numberOfTimes N : Int ) { <nl> + var existential = initExistential ( withType : T . self ) <nl> + for _ in 0 . . < N { <nl> + for _ in 0 . . < 5_000_000 { <nl> + if ! existential . mutateIt ( ) { <nl> + fatalError ( " expected true " ) <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + func runTestMutatingAndNonMutating < T : Existential > ( withType : T . Type , numberOfTimes N : Int ) { <nl> + var existential = initExistential ( withType : T . self ) <nl> + for _ in 0 . . < N { <nl> + for _ in 0 . . < 5_000_000 { <nl> + let _ = existential . doIt ( ) <nl> + if ! existential . mutateIt ( ) { <nl> + fatalError ( " expected true " ) <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + func runTestArrayOneMethodCall < T : Existential > ( withType : T . Type , numberOfTimes N : Int ) { <nl> + let existentialArray = initExistentialArray ( withType : T . self , count : 128 ) <nl> + for _ in 0 . . < N { <nl> + for _ in 0 . . < 5_000 { <nl> + for elt in existentialArray { <nl> + if ! elt . doIt ( ) { <nl> + fatalError ( " expected true " ) <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + func runTestArrayTwoMethodCalls < T : Existential > ( withType : T . Type , numberOfTimes N : Int ) { <nl> + let existentialArray = initExistentialArray ( withType : T . self , count : 128 ) <nl> + for _ in 0 . . < N { <nl> + for _ in 0 . . < 5_000 { <nl> + for elt in existentialArray { <nl> + if ! elt . doIt ( ) | | ! elt . reallyDoIt ( ) { <nl> + fatalError ( " expected true " ) <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + func runTestArrayMutating < T : Existential > ( withType : T . Type , numberOfTimes N : Int ) { <nl> + var existentialArray = initExistentialArray ( withType : T . self , count : 128 ) <nl> + for _ in 0 . . < N { <nl> + for _ in 0 . . < 5_000 { <nl> + for i in 0 . . < existentialArray . count { <nl> + if ! existentialArray [ i ] . mutateIt ( ) { <nl> + fatalError ( " expected true " ) <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + func runTestArrayShift < T : Existential > ( withType : T . Type , numberOfTimes N : Int ) { <nl> + var existentialArray = initExistentialArray ( withType : T . self , count : 128 ) <nl> + for _ in 0 . . < N { <nl> + for _ in 0 . . < 5_000 { <nl> + for i in 0 . . < existentialArray . count - 1 { <nl> + swap ( & existentialArray [ i ] , & existentialArray [ i + 1 ] ) <nl> + } <nl> + } <nl> + } <nl> + } <nl> + func runTestArrayConditionalShift < T : Existential > ( withType : T . Type , numberOfTimes N : Int ) { <nl> + var existentialArray = initExistentialArray ( withType : T . self , count : 128 ) <nl> + for _ in 0 . . < N { <nl> + for _ in 0 . . < 5_000 { <nl> + for i in 0 . . < existentialArray . count - 1 { <nl> + let curr = existentialArray [ i ] <nl> + if curr . doIt ( ) { <nl> + existentialArray [ i ] = existentialArray [ i + 1 ] <nl> + existentialArray [ i + 1 ] = curr <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / TestOneMethodCall . <nl> + public func runTestOneMethodCall_IntValueBuffer0 ( _ N : Int ) { <nl> + runTestOneMethodCall ( withType : IntValueBuffer0 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestOneMethodCall_IntValueBuffer1 ( _ N : Int ) { <nl> + runTestOneMethodCall ( withType : IntValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestOneMethodCall_IntValueBuffer2 ( _ N : Int ) { <nl> + runTestOneMethodCall ( withType : IntValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestOneMethodCall_IntValueBuffer3 ( _ N : Int ) { <nl> + runTestOneMethodCall ( withType : IntValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestOneMethodCall_IntValueBuffer4 ( _ N : Int ) { <nl> + runTestOneMethodCall ( withType : IntValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestOneMethodCall_ClassValueBuffer1 ( _ N : Int ) { <nl> + runTestOneMethodCall ( withType : ClassValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestOneMethodCall_ClassValueBuffer2 ( _ N : Int ) { <nl> + runTestOneMethodCall ( withType : ClassValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestOneMethodCall_ClassValueBuffer3 ( _ N : Int ) { <nl> + runTestOneMethodCall ( withType : ClassValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestOneMethodCall_ClassValueBuffer4 ( _ N : Int ) { <nl> + runTestOneMethodCall ( withType : ClassValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + <nl> + / / TestTwoMethodCalls . <nl> + public func runTestTwoMethodCalls_IntValueBuffer0 ( _ N : Int ) { <nl> + runTestTwoMethodCalls ( withType : IntValueBuffer0 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestTwoMethodCalls_IntValueBuffer1 ( _ N : Int ) { <nl> + runTestTwoMethodCalls ( withType : IntValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestTwoMethodCalls_IntValueBuffer2 ( _ N : Int ) { <nl> + runTestTwoMethodCalls ( withType : IntValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestTwoMethodCalls_IntValueBuffer3 ( _ N : Int ) { <nl> + runTestTwoMethodCalls ( withType : IntValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestTwoMethodCalls_IntValueBuffer4 ( _ N : Int ) { <nl> + runTestTwoMethodCalls ( withType : IntValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestTwoMethodCalls_ClassValueBuffer1 ( _ N : Int ) { <nl> + runTestTwoMethodCalls ( withType : ClassValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestTwoMethodCalls_ClassValueBuffer2 ( _ N : Int ) { <nl> + runTestTwoMethodCalls ( withType : ClassValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestTwoMethodCalls_ClassValueBuffer3 ( _ N : Int ) { <nl> + runTestTwoMethodCalls ( withType : ClassValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestTwoMethodCalls_ClassValueBuffer4 ( _ N : Int ) { <nl> + runTestTwoMethodCalls ( withType : ClassValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + <nl> + / / TestPassExistentialOneMethodCall . <nl> + public func runTestPassExistentialOneMethodCall_IntValueBuffer0 ( _ N : Int ) { <nl> + runTestPassExistentialOneMethodCall ( withType : IntValueBuffer0 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialOneMethodCall_IntValueBuffer1 ( _ N : Int ) { <nl> + runTestPassExistentialOneMethodCall ( withType : IntValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialOneMethodCall_IntValueBuffer2 ( _ N : Int ) { <nl> + runTestPassExistentialOneMethodCall ( withType : IntValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialOneMethodCall_IntValueBuffer3 ( _ N : Int ) { <nl> + runTestPassExistentialOneMethodCall ( withType : IntValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialOneMethodCall_IntValueBuffer4 ( _ N : Int ) { <nl> + runTestPassExistentialOneMethodCall ( withType : IntValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialOneMethodCall_ClassValueBuffer1 ( _ N : Int ) { <nl> + runTestPassExistentialOneMethodCall ( withType : ClassValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialOneMethodCall_ClassValueBuffer2 ( _ N : Int ) { <nl> + runTestPassExistentialOneMethodCall ( withType : ClassValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialOneMethodCall_ClassValueBuffer3 ( _ N : Int ) { <nl> + runTestPassExistentialOneMethodCall ( withType : ClassValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialOneMethodCall_ClassValueBuffer4 ( _ N : Int ) { <nl> + runTestPassExistentialOneMethodCall ( withType : ClassValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + <nl> + / / TestPassExistentialTwoMethodCalls . <nl> + public func runTestPassExistentialTwoMethodCalls_IntValueBuffer0 ( _ N : Int ) { <nl> + runTestPassExistentialTwoMethodCalls ( withType : IntValueBuffer0 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialTwoMethodCalls_IntValueBuffer1 ( _ N : Int ) { <nl> + runTestPassExistentialTwoMethodCalls ( withType : IntValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialTwoMethodCalls_IntValueBuffer2 ( _ N : Int ) { <nl> + runTestPassExistentialTwoMethodCalls ( withType : IntValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialTwoMethodCalls_IntValueBuffer3 ( _ N : Int ) { <nl> + runTestPassExistentialTwoMethodCalls ( withType : IntValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialTwoMethodCalls_IntValueBuffer4 ( _ N : Int ) { <nl> + runTestPassExistentialTwoMethodCalls ( withType : IntValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialTwoMethodCalls_ClassValueBuffer1 ( _ N : Int ) { <nl> + runTestPassExistentialTwoMethodCalls ( withType : ClassValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialTwoMethodCalls_ClassValueBuffer2 ( _ N : Int ) { <nl> + runTestPassExistentialTwoMethodCalls ( withType : ClassValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialTwoMethodCalls_ClassValueBuffer3 ( _ N : Int ) { <nl> + runTestPassExistentialTwoMethodCalls ( withType : ClassValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestPassExistentialTwoMethodCalls_ClassValueBuffer4 ( _ N : Int ) { <nl> + runTestPassExistentialTwoMethodCalls ( withType : ClassValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + <nl> + / / TestMutating . <nl> + public func runTestMutating_IntValueBuffer0 ( _ N : Int ) { <nl> + runTestMutating ( withType : IntValueBuffer0 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutating_IntValueBuffer1 ( _ N : Int ) { <nl> + runTestMutating ( withType : IntValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutating_IntValueBuffer2 ( _ N : Int ) { <nl> + runTestMutating ( withType : IntValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutating_IntValueBuffer3 ( _ N : Int ) { <nl> + runTestMutating ( withType : IntValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutating_IntValueBuffer4 ( _ N : Int ) { <nl> + runTestMutating ( withType : IntValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutating_ClassValueBuffer1 ( _ N : Int ) { <nl> + runTestMutating ( withType : ClassValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutating_ClassValueBuffer2 ( _ N : Int ) { <nl> + runTestMutating ( withType : ClassValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutating_ClassValueBuffer3 ( _ N : Int ) { <nl> + runTestMutating ( withType : ClassValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutating_ClassValueBuffer4 ( _ N : Int ) { <nl> + runTestMutating ( withType : ClassValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + <nl> + / / TestMutatingAndNonMutating . <nl> + public func runTestMutatingAndNonMutating_IntValueBuffer0 ( _ N : Int ) { <nl> + runTestMutatingAndNonMutating ( withType : IntValueBuffer0 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutatingAndNonMutating_IntValueBuffer1 ( _ N : Int ) { <nl> + runTestMutatingAndNonMutating ( withType : IntValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutatingAndNonMutating_IntValueBuffer2 ( _ N : Int ) { <nl> + runTestMutatingAndNonMutating ( withType : IntValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutatingAndNonMutating_IntValueBuffer3 ( _ N : Int ) { <nl> + runTestMutatingAndNonMutating ( withType : IntValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutatingAndNonMutating_IntValueBuffer4 ( _ N : Int ) { <nl> + runTestMutatingAndNonMutating ( withType : IntValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutatingAndNonMutating_ClassValueBuffer1 ( _ N : Int ) { <nl> + runTestMutatingAndNonMutating ( withType : ClassValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutatingAndNonMutating_ClassValueBuffer2 ( _ N : Int ) { <nl> + runTestMutatingAndNonMutating ( withType : ClassValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutatingAndNonMutating_ClassValueBuffer3 ( _ N : Int ) { <nl> + runTestMutatingAndNonMutating ( withType : ClassValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestMutatingAndNonMutating_ClassValueBuffer4 ( _ N : Int ) { <nl> + runTestMutatingAndNonMutating ( withType : ClassValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + <nl> + / / TestArrayOneMethodCall . <nl> + public func runTestArrayOneMethodCall_IntValueBuffer0 ( _ N : Int ) { <nl> + runTestArrayOneMethodCall ( withType : IntValueBuffer0 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayOneMethodCall_IntValueBuffer1 ( _ N : Int ) { <nl> + runTestArrayOneMethodCall ( withType : IntValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayOneMethodCall_IntValueBuffer2 ( _ N : Int ) { <nl> + runTestArrayOneMethodCall ( withType : IntValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayOneMethodCall_IntValueBuffer3 ( _ N : Int ) { <nl> + runTestArrayOneMethodCall ( withType : IntValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayOneMethodCall_IntValueBuffer4 ( _ N : Int ) { <nl> + runTestArrayOneMethodCall ( withType : IntValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayOneMethodCall_ClassValueBuffer1 ( _ N : Int ) { <nl> + runTestArrayOneMethodCall ( withType : ClassValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayOneMethodCall_ClassValueBuffer2 ( _ N : Int ) { <nl> + runTestArrayOneMethodCall ( withType : ClassValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayOneMethodCall_ClassValueBuffer3 ( _ N : Int ) { <nl> + runTestArrayOneMethodCall ( withType : ClassValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayOneMethodCall_ClassValueBuffer4 ( _ N : Int ) { <nl> + runTestArrayOneMethodCall ( withType : ClassValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + <nl> + / / TestArrayTwoMethodCalls . <nl> + public func runTestArrayTwoMethodCalls_IntValueBuffer0 ( _ N : Int ) { <nl> + runTestArrayTwoMethodCalls ( withType : IntValueBuffer0 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayTwoMethodCalls_IntValueBuffer1 ( _ N : Int ) { <nl> + runTestArrayTwoMethodCalls ( withType : IntValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayTwoMethodCalls_IntValueBuffer2 ( _ N : Int ) { <nl> + runTestArrayTwoMethodCalls ( withType : IntValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayTwoMethodCalls_IntValueBuffer3 ( _ N : Int ) { <nl> + runTestArrayTwoMethodCalls ( withType : IntValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayTwoMethodCalls_IntValueBuffer4 ( _ N : Int ) { <nl> + runTestArrayTwoMethodCalls ( withType : IntValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayTwoMethodCalls_ClassValueBuffer1 ( _ N : Int ) { <nl> + runTestArrayTwoMethodCalls ( withType : ClassValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayTwoMethodCalls_ClassValueBuffer2 ( _ N : Int ) { <nl> + runTestArrayTwoMethodCalls ( withType : ClassValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayTwoMethodCalls_ClassValueBuffer3 ( _ N : Int ) { <nl> + runTestArrayTwoMethodCalls ( withType : ClassValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayTwoMethodCalls_ClassValueBuffer4 ( _ N : Int ) { <nl> + runTestArrayTwoMethodCalls ( withType : ClassValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + <nl> + / / TestArrayMutating . <nl> + public func runTestArrayMutating_IntValueBuffer0 ( _ N : Int ) { <nl> + runTestArrayMutating ( withType : IntValueBuffer0 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayMutating_IntValueBuffer1 ( _ N : Int ) { <nl> + runTestArrayMutating ( withType : IntValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayMutating_IntValueBuffer2 ( _ N : Int ) { <nl> + runTestArrayMutating ( withType : IntValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayMutating_IntValueBuffer3 ( _ N : Int ) { <nl> + runTestArrayMutating ( withType : IntValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayMutating_IntValueBuffer4 ( _ N : Int ) { <nl> + runTestArrayMutating ( withType : IntValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayMutating_ClassValueBuffer1 ( _ N : Int ) { <nl> + runTestArrayMutating ( withType : ClassValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayMutating_ClassValueBuffer2 ( _ N : Int ) { <nl> + runTestArrayMutating ( withType : ClassValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayMutating_ClassValueBuffer3 ( _ N : Int ) { <nl> + runTestArrayMutating ( withType : ClassValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayMutating_ClassValueBuffer4 ( _ N : Int ) { <nl> + runTestArrayMutating ( withType : ClassValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + <nl> + / / TestArrayShift . <nl> + public func runTestArrayShift_IntValueBuffer0 ( _ N : Int ) { <nl> + runTestArrayShift ( withType : IntValueBuffer0 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayShift_IntValueBuffer1 ( _ N : Int ) { <nl> + runTestArrayShift ( withType : IntValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayShift_IntValueBuffer2 ( _ N : Int ) { <nl> + runTestArrayShift ( withType : IntValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayShift_IntValueBuffer3 ( _ N : Int ) { <nl> + runTestArrayShift ( withType : IntValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayShift_IntValueBuffer4 ( _ N : Int ) { <nl> + runTestArrayShift ( withType : IntValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayShift_ClassValueBuffer1 ( _ N : Int ) { <nl> + runTestArrayShift ( withType : ClassValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayShift_ClassValueBuffer2 ( _ N : Int ) { <nl> + runTestArrayShift ( withType : ClassValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayShift_ClassValueBuffer3 ( _ N : Int ) { <nl> + runTestArrayShift ( withType : ClassValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayShift_ClassValueBuffer4 ( _ N : Int ) { <nl> + runTestArrayShift ( withType : ClassValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + <nl> + / / TestArrayConditionalShift . <nl> + public func runTestArrayConditionalShift_IntValueBuffer0 ( _ N : Int ) { <nl> + runTestArrayConditionalShift ( withType : IntValueBuffer0 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayConditionalShift_IntValueBuffer1 ( _ N : Int ) { <nl> + runTestArrayConditionalShift ( withType : IntValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayConditionalShift_IntValueBuffer2 ( _ N : Int ) { <nl> + runTestArrayConditionalShift ( withType : IntValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayConditionalShift_IntValueBuffer3 ( _ N : Int ) { <nl> + runTestArrayConditionalShift ( withType : IntValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayConditionalShift_IntValueBuffer4 ( _ N : Int ) { <nl> + runTestArrayConditionalShift ( withType : IntValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayConditionalShift_ClassValueBuffer1 ( _ N : Int ) { <nl> + runTestArrayConditionalShift ( withType : ClassValueBuffer1 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayConditionalShift_ClassValueBuffer2 ( _ N : Int ) { <nl> + runTestArrayConditionalShift ( withType : ClassValueBuffer2 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayConditionalShift_ClassValueBuffer3 ( _ N : Int ) { <nl> + runTestArrayConditionalShift ( withType : ClassValueBuffer3 . self , numberOfTimes : N ) <nl> + } <nl> + public func runTestArrayConditionalShift_ClassValueBuffer4 ( _ N : Int ) { <nl> + runTestArrayConditionalShift ( withType : ClassValueBuffer4 . self , numberOfTimes : N ) <nl> + } <nl> mmm a / benchmark / utils / main . swift <nl> ppp b / benchmark / utils / main . swift <nl> import DictTest <nl> import DictTest2 <nl> import DictTest3 <nl> import DropLast <nl> + import ExistentialPerformance <nl> import ErrorHandling <nl> import Fibonacci <nl> import GlobalClass <nl> precommitTests = [ <nl> " DropLastArray " : run_DropLastArray , <nl> " DropLastCountableRange " : run_DropLastCountableRange , <nl> " DropLastSequence " : run_DropLastSequence , <nl> + " ExistentialTestOneMethodCall_IntValueBuffer0 " : runTestOneMethodCall_IntValueBuffer0 , <nl> + " ExistentialTestOneMethodCall_IntValueBuffer1 " : runTestOneMethodCall_IntValueBuffer1 , <nl> + " ExistentialTestOneMethodCall_IntValueBuffer2 " : runTestOneMethodCall_IntValueBuffer2 , <nl> + " ExistentialTestOneMethodCall_IntValueBuffer3 " : runTestOneMethodCall_IntValueBuffer3 , <nl> + " ExistentialTestOneMethodCall_IntValueBuffer4 " : runTestOneMethodCall_IntValueBuffer4 , <nl> + " ExistentialTestOneMethodCall_ClassValueBuffer1 " : runTestOneMethodCall_ClassValueBuffer1 , <nl> + " ExistentialTestOneMethodCall_ClassValueBuffer2 " : runTestOneMethodCall_ClassValueBuffer2 , <nl> + " ExistentialTestOneMethodCall_ClassValueBuffer3 " : runTestOneMethodCall_ClassValueBuffer3 , <nl> + " ExistentialTestOneMethodCall_ClassValueBuffer4 " : runTestOneMethodCall_ClassValueBuffer4 , <nl> + " ExistentialTestTwoMethodCalls_IntValueBuffer0 " : runTestTwoMethodCalls_IntValueBuffer0 , <nl> + " ExistentialTestTwoMethodCalls_IntValueBuffer1 " : runTestTwoMethodCalls_IntValueBuffer1 , <nl> + " ExistentialTestTwoMethodCalls_IntValueBuffer2 " : runTestTwoMethodCalls_IntValueBuffer2 , <nl> + " ExistentialTestTwoMethodCalls_IntValueBuffer3 " : runTestTwoMethodCalls_IntValueBuffer3 , <nl> + " ExistentialTestTwoMethodCalls_IntValueBuffer4 " : runTestTwoMethodCalls_IntValueBuffer4 , <nl> + " ExistentialTestTwoMethodCalls_ClassValueBuffer1 " : runTestTwoMethodCalls_ClassValueBuffer1 , <nl> + " ExistentialTestTwoMethodCalls_ClassValueBuffer2 " : runTestTwoMethodCalls_ClassValueBuffer2 , <nl> + " ExistentialTestTwoMethodCalls_ClassValueBuffer3 " : runTestTwoMethodCalls_ClassValueBuffer3 , <nl> + " ExistentialTestTwoMethodCalls_ClassValueBuffer4 " : runTestTwoMethodCalls_ClassValueBuffer4 , <nl> + " ExistentialTestPassExistentialOneMethodCall_IntValueBuffer0 " : runTestPassExistentialOneMethodCall_IntValueBuffer0 , <nl> + " ExistentialTestPassExistentialOneMethodCall_IntValueBuffer1 " : runTestPassExistentialOneMethodCall_IntValueBuffer1 , <nl> + " ExistentialTestPassExistentialOneMethodCall_IntValueBuffer2 " : runTestPassExistentialOneMethodCall_IntValueBuffer2 , <nl> + " ExistentialTestPassExistentialOneMethodCall_IntValueBuffer3 " : runTestPassExistentialOneMethodCall_IntValueBuffer3 , <nl> + " ExistentialTestPassExistentialOneMethodCall_IntValueBuffer4 " : runTestPassExistentialOneMethodCall_IntValueBuffer4 , <nl> + " ExistentialTestPassExistentialOneMethodCall_ClassValueBuffer1 " : runTestPassExistentialOneMethodCall_ClassValueBuffer1 , <nl> + " ExistentialTestPassExistentialOneMethodCall_ClassValueBuffer2 " : runTestPassExistentialOneMethodCall_ClassValueBuffer2 , <nl> + " ExistentialTestPassExistentialOneMethodCall_ClassValueBuffer3 " : runTestPassExistentialOneMethodCall_ClassValueBuffer3 , <nl> + " ExistentialTestPassExistentialOneMethodCall_ClassValueBuffer4 " : runTestPassExistentialOneMethodCall_ClassValueBuffer4 , <nl> + " ExistentialTestPassExistentialTwoMethodCalls_IntValueBuffer0 " : runTestPassExistentialTwoMethodCalls_IntValueBuffer0 , <nl> + " ExistentialTestPassExistentialTwoMethodCalls_IntValueBuffer1 " : runTestPassExistentialTwoMethodCalls_IntValueBuffer1 , <nl> + " ExistentialTestPassExistentialTwoMethodCalls_IntValueBuffer2 " : runTestPassExistentialTwoMethodCalls_IntValueBuffer2 , <nl> + " ExistentialTestPassExistentialTwoMethodCalls_IntValueBuffer3 " : runTestPassExistentialTwoMethodCalls_IntValueBuffer3 , <nl> + " ExistentialTestPassExistentialTwoMethodCalls_IntValueBuffer4 " : runTestPassExistentialTwoMethodCalls_IntValueBuffer4 , <nl> + " ExistentialTestPassExistentialTwoMethodCalls_ClassValueBuffer1 " : runTestPassExistentialTwoMethodCalls_ClassValueBuffer1 , <nl> + " ExistentialTestPassExistentialTwoMethodCalls_ClassValueBuffer2 " : runTestPassExistentialTwoMethodCalls_ClassValueBuffer2 , <nl> + " ExistentialTestPassExistentialTwoMethodCalls_ClassValueBuffer3 " : runTestPassExistentialTwoMethodCalls_ClassValueBuffer3 , <nl> + " ExistentialTestPassExistentialTwoMethodCalls_ClassValueBuffer4 " : runTestPassExistentialTwoMethodCalls_ClassValueBuffer4 , <nl> + " ExistentialTestMutating_IntValueBuffer0 " : runTestMutating_IntValueBuffer0 , <nl> + " ExistentialTestMutating_IntValueBuffer1 " : runTestMutating_IntValueBuffer1 , <nl> + " ExistentialTestMutating_IntValueBuffer2 " : runTestMutating_IntValueBuffer2 , <nl> + " ExistentialTestMutating_IntValueBuffer3 " : runTestMutating_IntValueBuffer3 , <nl> + " ExistentialTestMutating_IntValueBuffer4 " : runTestMutating_IntValueBuffer4 , <nl> + " ExistentialTestMutating_ClassValueBuffer1 " : runTestMutating_ClassValueBuffer1 , <nl> + " ExistentialTestMutating_ClassValueBuffer2 " : runTestMutating_ClassValueBuffer2 , <nl> + " ExistentialTestMutating_ClassValueBuffer3 " : runTestMutating_ClassValueBuffer3 , <nl> + " ExistentialTestMutating_ClassValueBuffer4 " : runTestMutating_ClassValueBuffer4 , <nl> + " ExistentialTestMutatingAndNonMutating_IntValueBuffer0 " : runTestMutatingAndNonMutating_IntValueBuffer0 , <nl> + " ExistentialTestMutatingAndNonMutating_IntValueBuffer1 " : runTestMutatingAndNonMutating_IntValueBuffer1 , <nl> + " ExistentialTestMutatingAndNonMutating_IntValueBuffer2 " : runTestMutatingAndNonMutating_IntValueBuffer2 , <nl> + " ExistentialTestMutatingAndNonMutating_IntValueBuffer3 " : runTestMutatingAndNonMutating_IntValueBuffer3 , <nl> + " ExistentialTestMutatingAndNonMutating_IntValueBuffer4 " : runTestMutatingAndNonMutating_IntValueBuffer4 , <nl> + " ExistentialTestMutatingAndNonMutating_ClassValueBuffer1 " : runTestMutatingAndNonMutating_ClassValueBuffer1 , <nl> + " ExistentialTestMutatingAndNonMutating_ClassValueBuffer2 " : runTestMutatingAndNonMutating_ClassValueBuffer2 , <nl> + " ExistentialTestMutatingAndNonMutating_ClassValueBuffer3 " : runTestMutatingAndNonMutating_ClassValueBuffer3 , <nl> + " ExistentialTestMutatingAndNonMutating_ClassValueBuffer4 " : runTestMutatingAndNonMutating_ClassValueBuffer4 , <nl> + " ExistentialTestArrayOneMethodCall_IntValueBuffer0 " : runTestArrayOneMethodCall_IntValueBuffer0 , <nl> + " ExistentialTestArrayOneMethodCall_IntValueBuffer1 " : runTestArrayOneMethodCall_IntValueBuffer1 , <nl> + " ExistentialTestArrayOneMethodCall_IntValueBuffer2 " : runTestArrayOneMethodCall_IntValueBuffer2 , <nl> + " ExistentialTestArrayOneMethodCall_IntValueBuffer3 " : runTestArrayOneMethodCall_IntValueBuffer3 , <nl> + " ExistentialTestArrayOneMethodCall_IntValueBuffer4 " : runTestArrayOneMethodCall_IntValueBuffer4 , <nl> + " ExistentialTestArrayOneMethodCall_ClassValueBuffer1 " : runTestArrayOneMethodCall_ClassValueBuffer1 , <nl> + " ExistentialTestArrayOneMethodCall_ClassValueBuffer2 " : runTestArrayOneMethodCall_ClassValueBuffer2 , <nl> + " ExistentialTestArrayOneMethodCall_ClassValueBuffer3 " : runTestArrayOneMethodCall_ClassValueBuffer3 , <nl> + " ExistentialTestArrayOneMethodCall_ClassValueBuffer4 " : runTestArrayOneMethodCall_ClassValueBuffer4 , <nl> + " ExistentialTestArrayTwoMethodCalls_IntValueBuffer0 " : runTestArrayTwoMethodCalls_IntValueBuffer0 , <nl> + " ExistentialTestArrayTwoMethodCalls_IntValueBuffer1 " : runTestArrayTwoMethodCalls_IntValueBuffer1 , <nl> + " ExistentialTestArrayTwoMethodCalls_IntValueBuffer2 " : runTestArrayTwoMethodCalls_IntValueBuffer2 , <nl> + " ExistentialTestArrayTwoMethodCalls_IntValueBuffer3 " : runTestArrayTwoMethodCalls_IntValueBuffer3 , <nl> + " ExistentialTestArrayTwoMethodCalls_IntValueBuffer4 " : runTestArrayTwoMethodCalls_IntValueBuffer4 , <nl> + " ExistentialTestArrayTwoMethodCalls_ClassValueBuffer1 " : runTestArrayTwoMethodCalls_ClassValueBuffer1 , <nl> + " ExistentialTestArrayTwoMethodCalls_ClassValueBuffer2 " : runTestArrayTwoMethodCalls_ClassValueBuffer2 , <nl> + " ExistentialTestArrayTwoMethodCalls_ClassValueBuffer3 " : runTestArrayTwoMethodCalls_ClassValueBuffer3 , <nl> + " ExistentialTestArrayTwoMethodCalls_ClassValueBuffer4 " : runTestArrayTwoMethodCalls_ClassValueBuffer4 , <nl> + " ExistentialTestArrayMutating_IntValueBuffer0 " : runTestArrayMutating_IntValueBuffer0 , <nl> + " ExistentialTestArrayMutating_IntValueBuffer1 " : runTestArrayMutating_IntValueBuffer1 , <nl> + " ExistentialTestArrayMutating_IntValueBuffer2 " : runTestArrayMutating_IntValueBuffer2 , <nl> + " ExistentialTestArrayMutating_IntValueBuffer3 " : runTestArrayMutating_IntValueBuffer3 , <nl> + " ExistentialTestArrayMutating_IntValueBuffer4 " : runTestArrayMutating_IntValueBuffer4 , <nl> + " ExistentialTestArrayMutating_ClassValueBuffer1 " : runTestArrayMutating_ClassValueBuffer1 , <nl> + " ExistentialTestArrayMutating_ClassValueBuffer2 " : runTestArrayMutating_ClassValueBuffer2 , <nl> + " ExistentialTestArrayMutating_ClassValueBuffer3 " : runTestArrayMutating_ClassValueBuffer3 , <nl> + " ExistentialTestArrayMutating_ClassValueBuffer4 " : runTestArrayMutating_ClassValueBuffer4 , <nl> + " ExistentialTestArrayShift_IntValueBuffer0 " : runTestArrayShift_IntValueBuffer0 , <nl> + " ExistentialTestArrayShift_IntValueBuffer1 " : runTestArrayShift_IntValueBuffer1 , <nl> + " ExistentialTestArrayShift_IntValueBuffer2 " : runTestArrayShift_IntValueBuffer2 , <nl> + " ExistentialTestArrayShift_IntValueBuffer3 " : runTestArrayShift_IntValueBuffer3 , <nl> + " ExistentialTestArrayShift_IntValueBuffer4 " : runTestArrayShift_IntValueBuffer4 , <nl> + " ExistentialTestArrayShift_ClassValueBuffer1 " : runTestArrayShift_ClassValueBuffer1 , <nl> + " ExistentialTestArrayShift_ClassValueBuffer2 " : runTestArrayShift_ClassValueBuffer2 , <nl> + " ExistentialTestArrayShift_ClassValueBuffer3 " : runTestArrayShift_ClassValueBuffer3 , <nl> + " ExistentialTestArrayShift_ClassValueBuffer4 " : runTestArrayShift_ClassValueBuffer4 , <nl> + " ExistentialTestArrayConditionalShift_IntValueBuffer0 " : runTestArrayConditionalShift_IntValueBuffer0 , <nl> + " ExistentialTestArrayConditionalShift_IntValueBuffer1 " : runTestArrayConditionalShift_IntValueBuffer1 , <nl> + " ExistentialTestArrayConditionalShift_IntValueBuffer2 " : runTestArrayConditionalShift_IntValueBuffer2 , <nl> + " ExistentialTestArrayConditionalShift_IntValueBuffer3 " : runTestArrayConditionalShift_IntValueBuffer3 , <nl> + " ExistentialTestArrayConditionalShift_IntValueBuffer4 " : runTestArrayConditionalShift_IntValueBuffer4 , <nl> + " ExistentialTestArrayConditionalShift_ClassValueBuffer1 " : runTestArrayConditionalShift_ClassValueBuffer1 , <nl> + " ExistentialTestArrayConditionalShift_ClassValueBuffer2 " : runTestArrayConditionalShift_ClassValueBuffer2 , <nl> + " ExistentialTestArrayConditionalShift_ClassValueBuffer3 " : runTestArrayConditionalShift_ClassValueBuffer3 , <nl> + " ExistentialTestArrayConditionalShift_ClassValueBuffer4 " : runTestArrayConditionalShift_ClassValueBuffer4 , <nl> " ErrorHandling " : run_ErrorHandling , <nl> " GlobalClass " : run_GlobalClass , <nl> " Hanoi " : run_Hanoi , <nl>
Add existential performance benchmarks
apple/swift
f5cf0aa5d60817124ec21c8b91070dd51090910f
2017-03-27T18:32:47Z
mmm a / src / mips / macro - assembler - mips . cc <nl> ppp b / src / mips / macro - assembler - mips . cc <nl> void MacroAssembler : : Ext ( Register rt , <nl> / / Move rs to rt and shift it left then right to get the <nl> / / desired bitfield on the right side and zeroes on the left . <nl> int shift_left = 32 - ( pos + size ) ; <nl> - if ( shift_left > 0 ) { <nl> - sll ( rt , rs , shift_left ) ; <nl> - } <nl> + sll ( rt , rs , shift_left ) ; / / Acts as a move if shift_left = = 0 . <nl> <nl> int shift_right = 32 - size ; <nl> if ( shift_right > 0 ) { <nl>
Landing : MIPS : Fixed a bug in a special case of MacroAssembler : : Ext .
v8/v8
a2deb8af0bb577a5666b11e44d9fa6ee2500cf5b
2011-08-29T07:13:06Z
mmm a / tensorflow / compiler / xla / service / batchnorm_expander_test . cc <nl> ppp b / tensorflow / compiler / xla / service / batchnorm_expander_test . cc <nl> TEST_F ( BatchNormExpanderTest , BatchNormTraining ) { <nl> <nl> HloComputation : : Builder builder ( TestName ( ) ) ; <nl> HloInstruction * param0 = builder . AddInstruction ( <nl> - HloInstruction : : CreateParameter ( 0 , input_shape , " activiation " ) ) ; <nl> + HloInstruction : : CreateParameter ( 0 , input_shape , " activation " ) ) ; <nl> <nl> HloInstruction * param1 = builder . AddInstruction ( <nl> HloInstruction : : CreateParameter ( 1 , scale_shape , " scale " ) ) ; <nl>
Update batchnorm_expander_test . cc
tensorflow/tensorflow
7d14b0e3beeea2b59d17145309918c41c6e5505c
2019-02-26T08:20:14Z
mmm a / 3rdParty / Makefile . v8 - windows <nl> ppp b / 3rdParty / Makefile . v8 - windows <nl> install_bits : <nl> mkdir - p . . / WindowsLibraries / $ ( BITS ) / include / unicode <nl> for i in ` find $ ( V8 ) / build - name $ ( PDBNAME ) | grep $ ( BITS ) ` ; do \ <nl> LIBNAME = ` echo $ $ i | sed ' s ; . * / \ ( . * \ ) / $ ( PDBNAME ) ; \ 1 ; ' ` ; \ <nl> - BUILD = ` echo $ $ i | sed ' s ; $ ( V8 ) / build / \ ( . * \ ) / obj / $ $ { LIBNAME } / $ ( PDBNAME ) ; \ 1 ; ' ` ; \ <nl> + BUILD = ` echo $ $ i | sed " s ; $ ( V8 ) / build / \ ( . * \ ) / obj / $ $ { LIBNAME } / $ ( PDBNAME ) ; \ 1 ; " ` ; \ <nl> cp $ $ i $ ( V8 ) / build / $ $ { BUILD } / lib / $ $ { LIBNAME } . pdb ; \ <nl> done <nl> <nl>
Fix windows install : variables aren ' t replaced in single quote strings .
arangodb/arangodb
af3002bdcbd1db53b95ae64fc61d2539db1f06f6
2016-01-18T14:36:08Z
mmm a / test / mozilla / testcfg . py <nl> ppp b / test / mozilla / testcfg . py <nl> def IsFailureOutput ( self , output ) : <nl> def GetCommand ( self ) : <nl> result = self . context . GetVmCommand ( self , self . mode ) + \ <nl> [ ' - - expose - gc ' , join ( self . root , ' mozilla - shell - emulation . js ' ) ] <nl> + result + = [ ' - - es5_readonly ' ] # Temporary hack until we can remove flag <nl> result + = self . framework <nl> result . append ( self . filename ) <nl> return result <nl> mmm a / test / test262 / testcfg . py <nl> ppp b / test / test262 / testcfg . py <nl> def IsFailureOutput ( self , output ) : <nl> <nl> def GetCommand ( self ) : <nl> result = self . context . GetVmCommand ( self , self . mode ) <nl> + result + = [ ' - - es5_readonly ' ] # Temporary hack until we can remove flag <nl> result + = self . framework <nl> result . append ( self . filename ) <nl> return result <nl>
Temporarily hack test frameworks to use the temporary flag . : (
v8/v8
c54adffc2d0cb1102b86d49b7b639e40e695177f
2012-06-05T13:38:32Z
mmm a / tests / runner . py <nl> ppp b / tests / runner . py <nl> def test_sdl_audio_mix ( self ) : <nl> shutil . copyfile ( path_from_root ( ' tests ' , ' sounds ' , ' the_entertainer . ogg ' ) , os . path . join ( self . get_dir ( ) , ' music . ogg ' ) ) <nl> open ( os . path . join ( self . get_dir ( ) , ' sdl_audio_mix . c ' ) , ' w ' ) . write ( self . with_report_result ( open ( path_from_root ( ' tests ' , ' sdl_audio_mix . c ' ) ) . read ( ) ) ) <nl> <nl> - # use closure to check for a possible bug with closure minifying away newer Audio ( ) attributes <nl> Popen ( [ PYTHON , EMCC , ' - O2 ' , ' - - minify ' , ' 0 ' , os . path . join ( self . get_dir ( ) , ' sdl_audio_mix . c ' ) , ' - - preload - file ' , ' sound . ogg ' , ' - - preload - file ' , ' music . ogg ' , ' - o ' , ' page . html ' ] ) . communicate ( ) <nl> self . run_browser ( ' page . html ' , ' ' , ' / report_result ? 1 ' ) <nl> <nl> def test_sdl_audio_quickload ( self ) : <nl> open ( os . path . join ( self . get_dir ( ) , ' sdl_audio_quickload . c ' ) , ' w ' ) . write ( self . with_report_result ( open ( path_from_root ( ' tests ' , ' sdl_audio_quickload . c ' ) ) . read ( ) ) ) <nl> <nl> - # use closure to check for a possible bug with closure minifying away newer Audio ( ) attributes <nl> Popen ( [ PYTHON , EMCC , ' - O2 ' , ' - - minify ' , ' 0 ' , os . path . join ( self . get_dir ( ) , ' sdl_audio_quickload . c ' ) , ' - o ' , ' page . html ' , ' - s ' , ' EXPORTED_FUNCTIONS = [ " _main " , " _play " ] ' ] ) . communicate ( ) <nl> self . run_browser ( ' page . html ' , ' ' , ' / report_result ? 1 ' ) <nl> <nl> def test_sdl_gl_read ( self ) : <nl> self . run_browser ( ' something . html ' , ' . ' , ' / report_result ? 1 ' ) <nl> <nl> def test_sdl_ogl ( self ) : <nl> - # SDL , OpenGL , textures , immediate mode . Closure for more coverage <nl> shutil . copyfile ( path_from_root ( ' tests ' , ' screenshot . png ' ) , os . path . join ( self . get_dir ( ) , ' screenshot . png ' ) ) <nl> self . reftest ( path_from_root ( ' tests ' , ' screenshot - gray - purple . png ' ) ) <nl> Popen ( [ PYTHON , EMCC , path_from_root ( ' tests ' , ' sdl_ogl . c ' ) , ' - O2 ' , ' - - minify ' , ' 0 ' , ' - o ' , ' something . html ' , ' - - pre - js ' , ' reftest . js ' , ' - - preload - file ' , ' screenshot . png ' , ' - s ' , ' GL_TESTING = 1 ' ] ) . communicate ( ) <nl> self . run_browser ( ' something . html ' , ' You should see an image with gray at the top . ' , ' / report_result ? 0 ' ) <nl> <nl> def test_sdl_ogl_defaultmatrixmode ( self ) : <nl> - # SDL , OpenGL , textures , immediate mode . Closure for more coverage <nl> shutil . copyfile ( path_from_root ( ' tests ' , ' screenshot . png ' ) , os . path . join ( self . get_dir ( ) , ' screenshot . png ' ) ) <nl> self . reftest ( path_from_root ( ' tests ' , ' screenshot - gray - purple . png ' ) ) <nl> Popen ( [ PYTHON , EMCC , path_from_root ( ' tests ' , ' sdl_ogl_defaultMatrixMode . c ' ) , ' - - minify ' , ' 0 ' , ' - o ' , ' something . html ' , ' - - pre - js ' , ' reftest . js ' , ' - - preload - file ' , ' screenshot . png ' , ' - s ' , ' GL_TESTING = 1 ' ] ) . communicate ( ) <nl> def test_sdl_ogl_p ( self ) : <nl> self . run_browser ( ' something . html ' , ' You should see an image with gray at the top . ' , ' / report_result ? 0 ' ) <nl> <nl> def test_sdl_fog_simple ( self ) : <nl> - # SDL , OpenGL , textures , fog , immediate mode . Closure for more coverage <nl> shutil . copyfile ( path_from_root ( ' tests ' , ' screenshot . png ' ) , os . path . join ( self . get_dir ( ) , ' screenshot . png ' ) ) <nl> self . reftest ( path_from_root ( ' tests ' , ' screenshot - fog - simple . png ' ) ) <nl> Popen ( [ PYTHON , EMCC , path_from_root ( ' tests ' , ' sdl_fog_simple . c ' ) , ' - O2 ' , ' - - minify ' , ' 0 ' , ' - o ' , ' something . html ' , ' - - pre - js ' , ' reftest . js ' , ' - - preload - file ' , ' screenshot . png ' , ' - s ' , ' GL_TESTING = 1 ' ] ) . communicate ( ) <nl> self . run_browser ( ' something . html ' , ' You should see an image with fog . ' , ' / report_result ? 0 ' ) <nl> <nl> def test_sdl_fog_negative ( self ) : <nl> - # SDL , OpenGL , textures , fog , immediate mode . Closure for more coverage <nl> shutil . copyfile ( path_from_root ( ' tests ' , ' screenshot . png ' ) , os . path . join ( self . get_dir ( ) , ' screenshot . png ' ) ) <nl> self . reftest ( path_from_root ( ' tests ' , ' screenshot - fog - negative . png ' ) ) <nl> Popen ( [ PYTHON , EMCC , path_from_root ( ' tests ' , ' sdl_fog_negative . c ' ) , ' - o ' , ' something . html ' , ' - - pre - js ' , ' reftest . js ' , ' - - preload - file ' , ' screenshot . png ' , ' - s ' , ' GL_TESTING = 1 ' ] ) . communicate ( ) <nl> self . run_browser ( ' something . html ' , ' You should see an image with fog . ' , ' / report_result ? 0 ' ) <nl> <nl> def test_sdl_fog_density ( self ) : <nl> - # SDL , OpenGL , textures , fog , immediate mode . Closure for more coverage <nl> shutil . copyfile ( path_from_root ( ' tests ' , ' screenshot . png ' ) , os . path . join ( self . get_dir ( ) , ' screenshot . png ' ) ) <nl> self . reftest ( path_from_root ( ' tests ' , ' screenshot - fog - density . png ' ) ) <nl> Popen ( [ PYTHON , EMCC , path_from_root ( ' tests ' , ' sdl_fog_density . c ' ) , ' - o ' , ' something . html ' , ' - - pre - js ' , ' reftest . js ' , ' - - preload - file ' , ' screenshot . png ' , ' - s ' , ' GL_TESTING = 1 ' ] ) . communicate ( ) <nl> self . run_browser ( ' something . html ' , ' You should see an image with fog . ' , ' / report_result ? 0 ' ) <nl> <nl> def test_sdl_fog_exp2 ( self ) : <nl> - # SDL , OpenGL , textures , fog , immediate mode . Closure for more coverage <nl> shutil . copyfile ( path_from_root ( ' tests ' , ' screenshot . png ' ) , os . path . join ( self . get_dir ( ) , ' screenshot . png ' ) ) <nl> self . reftest ( path_from_root ( ' tests ' , ' screenshot - fog - exp2 . png ' ) ) <nl> Popen ( [ PYTHON , EMCC , path_from_root ( ' tests ' , ' sdl_fog_exp2 . c ' ) , ' - o ' , ' something . html ' , ' - - pre - js ' , ' reftest . js ' , ' - - preload - file ' , ' screenshot . png ' , ' - s ' , ' GL_TESTING = 1 ' ] ) . communicate ( ) <nl> self . run_browser ( ' something . html ' , ' You should see an image with fog . ' , ' / report_result ? 0 ' ) <nl> <nl> def test_sdl_fog_linear ( self ) : <nl> - # SDL , OpenGL , textures , fog , immediate mode . Closure for more coverage <nl> shutil . copyfile ( path_from_root ( ' tests ' , ' screenshot . png ' ) , os . path . join ( self . get_dir ( ) , ' screenshot . png ' ) ) <nl> self . reftest ( path_from_root ( ' tests ' , ' screenshot - fog - linear . png ' ) ) <nl> Popen ( [ PYTHON , EMCC , path_from_root ( ' tests ' , ' sdl_fog_linear . c ' ) , ' - o ' , ' something . html ' , ' - - pre - js ' , ' reftest . js ' , ' - - preload - file ' , ' screenshot . png ' , ' - s ' , ' GL_TESTING = 1 ' ] ) . communicate ( ) <nl> def test_glshaderinfo ( self ) : <nl> self . btest ( ' glshaderinfo . cpp ' , ' 1 ' ) <nl> <nl> def test_sdlglshader ( self ) : <nl> - self . btest ( ' sdlglshader . c ' , reference = ' sdlglshader . png ' , args = [ ' - - closure ' , ' 1 ' ] ) <nl> + self . btest ( ' sdlglshader . c ' , reference = ' sdlglshader . png ' , args = [ ' - O2 ' , ' - - closure ' , ' 1 ' ] ) <nl> <nl> def test_gl_ps ( self ) : <nl> # pointers and a shader <nl> def test_cubegeom ( self ) : <nl> self . btest ( ' cubegeom . c ' , args = [ ' - O2 ' , ' - g ' ] , expected = [ ' 188641320 ' , ' 1522377227 ' , ' - 1054007155 ' , ' - 1111866053 ' ] ) <nl> <nl> def test_cubegeom_glew ( self ) : <nl> - self . btest ( ' cubegeom_glew . c ' , expected = [ ' 188641320 ' , ' 1522377227 ' , ' - 1054007155 ' , ' - 1111866053 ' ] ) <nl> + self . btest ( ' cubegeom_glew . c ' , args = [ ' - O2 ' , ' - - closure ' , ' 1 ' ] , expected = [ ' 188641320 ' , ' 1522377227 ' , ' - 1054007155 ' , ' - 1111866053 ' ] ) <nl> <nl> def test_cubegeom_color ( self ) : <nl> self . btest ( ' cubegeom_color . c ' , expected = [ ' 588472350 ' , ' - 687660609 ' , ' - 818120875 ' ] ) <nl>
add browser testing for - O2 - - closure 1
emscripten-core/emscripten
7c02f322ae31e401f87fe4bf922020c951c59c15
2013-05-17T17:45:41Z
mmm a / lib / IRGen / GenProto . cpp <nl> ppp b / lib / IRGen / GenProto . cpp <nl> static void getArgAsLocalSelfTypeMetadata ( IRGenFunction & IGF , <nl> CanType abstractType ) ; <nl> <nl> / / / Build a value witness that initializes an array front - to - back . <nl> - static void emitInitializeArrayFrontToBack ( IRGenFunction & IGF , <nl> + static void emitInitializeArrayFrontToBackWitness ( IRGenFunction & IGF , <nl> llvm : : Function : : arg_iterator argv , <nl> CanType abstractType , <nl> SILType concreteType , <nl> const TypeInfo & type , <nl> - void ( * emitInitializeElement ) ( IRGenFunction & , <nl> - SILType , <nl> - const TypeInfo & , <nl> - Address , <nl> - Address ) ) { <nl> - auto & IGM = IGF . IGM ; <nl> - <nl> + IsTake_t take ) { <nl> Address destArray = getArgAs ( IGF , argv , type , " dest " ) ; <nl> Address srcArray = getArgAs ( IGF , argv , type , " src " ) ; <nl> llvm : : Value * count = getArg ( argv , " count " ) ; <nl> getArgAsLocalSelfTypeMetadata ( IGF , argv , abstractType ) ; <nl> <nl> - auto entry = IGF . Builder . GetInsertBlock ( ) ; <nl> - auto iter = IGF . createBasicBlock ( " iter " ) ; <nl> - auto loop = IGF . createBasicBlock ( " loop " ) ; <nl> - auto exit = IGF . createBasicBlock ( " exit " ) ; <nl> - IGF . Builder . CreateBr ( iter ) ; <nl> - IGF . Builder . emitBlock ( iter ) ; <nl> - <nl> - auto counter = IGF . Builder . CreatePHI ( IGM . SizeTy , 2 ) ; <nl> - counter - > addIncoming ( count , entry ) ; <nl> - auto destVal = IGF . Builder . CreatePHI ( destArray . getType ( ) , 2 ) ; <nl> - destVal - > addIncoming ( destArray . getAddress ( ) , entry ) ; <nl> - auto srcVal = IGF . Builder . CreatePHI ( srcArray . getType ( ) , 2 ) ; <nl> - srcVal - > addIncoming ( srcArray . getAddress ( ) , entry ) ; <nl> - Address dest ( destVal , destArray . getAlignment ( ) ) ; <nl> - Address src ( srcVal , srcArray . getAlignment ( ) ) ; <nl> - <nl> - auto done = IGF . Builder . CreateICmpEQ ( counter , <nl> - llvm : : ConstantInt : : get ( IGM . SizeTy , 0 ) ) ; <nl> - IGF . Builder . CreateCondBr ( done , exit , loop ) ; <nl> - <nl> - IGF . Builder . emitBlock ( loop ) ; <nl> - emitInitializeElement ( IGF , concreteType , type , dest , src ) ; <nl> - <nl> - auto nextCounter = IGF . Builder . CreateSub ( counter , <nl> - llvm : : ConstantInt : : get ( IGM . SizeTy , 1 ) ) ; <nl> - auto nextDest = type . indexArray ( IGF , dest , <nl> - llvm : : ConstantInt : : get ( IGM . SizeTy , 1 ) , <nl> - concreteType ) ; <nl> - auto nextSrc = type . indexArray ( IGF , src , <nl> - llvm : : ConstantInt : : get ( IGM . SizeTy , 1 ) , <nl> - concreteType ) ; <nl> - auto loopEnd = IGF . Builder . GetInsertBlock ( ) ; <nl> - counter - > addIncoming ( nextCounter , loopEnd ) ; <nl> - destVal - > addIncoming ( nextDest . getAddress ( ) , loopEnd ) ; <nl> - srcVal - > addIncoming ( nextSrc . getAddress ( ) , loopEnd ) ; <nl> - IGF . Builder . CreateBr ( iter ) ; <nl> - <nl> - IGF . Builder . emitBlock ( exit ) ; <nl> + emitInitializeArrayFrontToBack ( IGF , type , destArray , srcArray , count , <nl> + concreteType , take ) ; <nl> + <nl> destArray = IGF . Builder . CreateBitCast ( destArray , IGF . IGM . OpaquePtrTy ) ; <nl> IGF . Builder . CreateRet ( destArray . getAddress ( ) ) ; <nl> } <nl> <nl> / / / Build a value witness that initializes an array back - to - front . <nl> - static void emitInitializeArrayBackToFront ( IRGenFunction & IGF , <nl> + static void emitInitializeArrayBackToFrontWitness ( IRGenFunction & IGF , <nl> llvm : : Function : : arg_iterator argv , <nl> CanType abstractType , <nl> SILType concreteType , <nl> const TypeInfo & type , <nl> - void ( * emitInitializeElement ) ( IRGenFunction & , <nl> - SILType , <nl> - const TypeInfo & , <nl> - Address , <nl> - Address ) ) <nl> - { <nl> - auto & IGM = IGF . IGM ; <nl> - <nl> + IsTake_t take ) { <nl> Address destArray = getArgAs ( IGF , argv , type , " dest " ) ; <nl> Address srcArray = getArgAs ( IGF , argv , type , " src " ) ; <nl> llvm : : Value * count = getArg ( argv , " count " ) ; <nl> getArgAsLocalSelfTypeMetadata ( IGF , argv , abstractType ) ; <nl> <nl> - auto destEnd = type . indexArray ( IGF , destArray , count , concreteType ) ; <nl> - auto srcEnd = type . indexArray ( IGF , destArray , count , concreteType ) ; <nl> - <nl> - auto entry = IGF . Builder . GetInsertBlock ( ) ; <nl> - auto iter = IGF . createBasicBlock ( " iter " ) ; <nl> - auto loop = IGF . createBasicBlock ( " loop " ) ; <nl> - auto exit = IGF . createBasicBlock ( " exit " ) ; <nl> - IGF . Builder . CreateBr ( iter ) ; <nl> - IGF . Builder . emitBlock ( iter ) ; <nl> - <nl> - auto counter = IGF . Builder . CreatePHI ( IGM . SizeTy , 2 ) ; <nl> - counter - > addIncoming ( count , entry ) ; <nl> - auto destVal = IGF . Builder . CreatePHI ( destEnd . getType ( ) , 2 ) ; <nl> - destVal - > addIncoming ( destArray . getAddress ( ) , entry ) ; <nl> - auto srcVal = IGF . Builder . CreatePHI ( srcEnd . getType ( ) , 2 ) ; <nl> - srcVal - > addIncoming ( srcArray . getAddress ( ) , entry ) ; <nl> - Address dest ( destVal , destArray . getAlignment ( ) ) ; <nl> - Address src ( srcVal , srcArray . getAlignment ( ) ) ; <nl> - <nl> - auto done = IGF . Builder . CreateICmpEQ ( counter , <nl> - llvm : : ConstantInt : : get ( IGM . SizeTy , 0 ) ) ; <nl> - IGF . Builder . CreateCondBr ( done , exit , loop ) ; <nl> - <nl> - IGF . Builder . emitBlock ( loop ) ; <nl> - auto prevDest = type . indexArray ( IGF , dest , <nl> - llvm : : ConstantInt : : getSigned ( IGM . SizeTy , - 1 ) , <nl> - concreteType ) ; <nl> - auto prevSrc = type . indexArray ( IGF , src , <nl> - llvm : : ConstantInt : : getSigned ( IGM . SizeTy , - 1 ) , <nl> - concreteType ) ; <nl> - <nl> - emitInitializeElement ( IGF , concreteType , type , prevDest , prevSrc ) ; <nl> - <nl> - auto nextCounter = IGF . Builder . CreateSub ( counter , <nl> - llvm : : ConstantInt : : get ( IGM . SizeTy , 1 ) ) ; <nl> - auto loopEnd = IGF . Builder . GetInsertBlock ( ) ; <nl> - counter - > addIncoming ( nextCounter , loopEnd ) ; <nl> - destVal - > addIncoming ( prevDest . getAddress ( ) , loopEnd ) ; <nl> - srcVal - > addIncoming ( prevSrc . getAddress ( ) , loopEnd ) ; <nl> - IGF . Builder . CreateBr ( iter ) ; <nl> - <nl> - IGF . Builder . emitBlock ( exit ) ; <nl> + emitInitializeArrayBackToFront ( IGF , type , destArray , srcArray , count , <nl> + concreteType , take ) ; <nl> + <nl> destArray = IGF . Builder . CreateBitCast ( destArray , IGF . IGM . OpaquePtrTy ) ; <nl> IGF . Builder . CreateRet ( destArray . getAddress ( ) ) ; <nl> } <nl> static void buildValueWitnessFunction ( IRGenModule & IGM , <nl> } <nl> <nl> case ValueWitness : : InitializeArrayWithCopy : { <nl> - emitInitializeArrayFrontToBack ( IGF , argv , abstractType , concreteType , <nl> - type , emitInitializeWithCopy ) ; <nl> + emitInitializeArrayFrontToBackWitness ( IGF , argv , abstractType , concreteType , <nl> + type , IsNotTake ) ; <nl> return ; <nl> } <nl> <nl> static void buildValueWitnessFunction ( IRGenModule & IGM , <nl> } <nl> <nl> case ValueWitness : : InitializeArrayWithTakeFrontToBack : { <nl> - emitInitializeArrayFrontToBack ( IGF , argv , abstractType , concreteType , <nl> - type , emitInitializeWithTake ) ; <nl> + emitInitializeArrayFrontToBackWitness ( IGF , argv , abstractType , concreteType , <nl> + type , IsTake ) ; <nl> return ; <nl> } <nl> <nl> case ValueWitness : : InitializeArrayWithTakeBackToFront : { <nl> - emitInitializeArrayBackToFront ( IGF , argv , abstractType , concreteType , <nl> - type , emitInitializeWithTake ) ; <nl> + emitInitializeArrayBackToFrontWitness ( IGF , argv , abstractType , concreteType , <nl> + type , IsTake ) ; <nl> return ; <nl> } <nl> <nl> mmm a / lib / IRGen / GenType . cpp <nl> ppp b / lib / IRGen / GenType . cpp <nl> void TypeInfo : : destroyArray ( IRGenFunction & IGF , Address array , <nl> } <nl> <nl> / / / Build a value witness that initializes an array front - to - back . <nl> - static void emitInitializeArrayFrontToBack ( IRGenFunction & IGF , <nl> + void irgen : : emitInitializeArrayFrontToBack ( IRGenFunction & IGF , <nl> const TypeInfo & type , <nl> Address destArray , <nl> Address srcArray , <nl> llvm : : Value * count , <nl> SILType T , <nl> - void ( TypeInfo : : * emitInitializeElement ) ( IRGenFunction & , <nl> - Address , Address , <nl> - SILType ) const ) { <nl> + IsTake_t take ) { <nl> auto & IGM = IGF . IGM ; <nl> <nl> auto entry = IGF . Builder . GetInsertBlock ( ) ; <nl> static void emitInitializeArrayFrontToBack ( IRGenFunction & IGF , <nl> IGF . Builder . CreateCondBr ( done , exit , loop ) ; <nl> <nl> IGF . Builder . emitBlock ( loop ) ; <nl> - ( type . * emitInitializeElement ) ( IGF , dest , src , T ) ; <nl> - <nl> + if ( take ) <nl> + type . initializeWithTake ( IGF , dest , src , T ) ; <nl> + else <nl> + type . initializeWithCopy ( IGF , dest , src , T ) ; <nl> + <nl> auto nextCounter = IGF . Builder . CreateSub ( counter , <nl> llvm : : ConstantInt : : get ( IGM . SizeTy , 1 ) ) ; <nl> auto nextDest = type . indexArray ( IGF , dest , <nl> static void emitInitializeArrayFrontToBack ( IRGenFunction & IGF , <nl> } <nl> <nl> / / / Build a value witness that initializes an array back - to - front . <nl> - static void emitInitializeArrayBackToFront ( IRGenFunction & IGF , <nl> + void irgen : : emitInitializeArrayBackToFront ( IRGenFunction & IGF , <nl> const TypeInfo & type , <nl> Address destArray , <nl> Address srcArray , <nl> llvm : : Value * count , <nl> SILType T , <nl> - void ( TypeInfo : : * emitInitializeElement ) ( IRGenFunction & , <nl> - Address , Address , <nl> - SILType ) const ) { <nl> + IsTake_t take ) { <nl> auto & IGM = IGF . IGM ; <nl> <nl> auto destEnd = type . indexArray ( IGF , destArray , count , T ) ; <nl> - auto srcEnd = type . indexArray ( IGF , destArray , count , T ) ; <nl> + auto srcEnd = type . indexArray ( IGF , srcArray , count , T ) ; <nl> <nl> auto entry = IGF . Builder . GetInsertBlock ( ) ; <nl> auto iter = IGF . createBasicBlock ( " iter " ) ; <nl> static void emitInitializeArrayBackToFront ( IRGenFunction & IGF , <nl> auto counter = IGF . Builder . CreatePHI ( IGM . SizeTy , 2 ) ; <nl> counter - > addIncoming ( count , entry ) ; <nl> auto destVal = IGF . Builder . CreatePHI ( destEnd . getType ( ) , 2 ) ; <nl> - destVal - > addIncoming ( destArray . getAddress ( ) , entry ) ; <nl> + destVal - > addIncoming ( destEnd . getAddress ( ) , entry ) ; <nl> auto srcVal = IGF . Builder . CreatePHI ( srcEnd . getType ( ) , 2 ) ; <nl> - srcVal - > addIncoming ( srcArray . getAddress ( ) , entry ) ; <nl> + srcVal - > addIncoming ( srcEnd . getAddress ( ) , entry ) ; <nl> Address dest ( destVal , destArray . getAlignment ( ) ) ; <nl> Address src ( srcVal , srcArray . getAlignment ( ) ) ; <nl> <nl> static void emitInitializeArrayBackToFront ( IRGenFunction & IGF , <nl> llvm : : ConstantInt : : getSigned ( IGM . SizeTy , - 1 ) , T ) ; <nl> auto prevSrc = type . indexArray ( IGF , src , <nl> llvm : : ConstantInt : : getSigned ( IGM . SizeTy , - 1 ) , T ) ; <nl> - <nl> - ( type . * emitInitializeElement ) ( IGF , prevDest , prevSrc , T ) ; <nl> + <nl> + if ( take ) <nl> + type . initializeWithTake ( IGF , prevDest , prevSrc , T ) ; <nl> + else <nl> + type . initializeWithCopy ( IGF , prevDest , prevSrc , T ) ; <nl> <nl> auto nextCounter = IGF . Builder . CreateSub ( counter , <nl> llvm : : ConstantInt : : get ( IGM . SizeTy , 1 ) ) ; <nl> void TypeInfo : : initializeArrayWithCopy ( IRGenFunction & IGF , <nl> return ; <nl> } <nl> <nl> - emitInitializeArrayFrontToBack ( IGF , * this , dest , src , count , T , <nl> - & TypeInfo : : initializeWithCopy ) ; <nl> + emitInitializeArrayFrontToBack ( IGF , * this , dest , src , count , T , IsNotTake ) ; <nl> } <nl> <nl> void TypeInfo : : initializeArrayWithTakeFrontToBack ( IRGenFunction & IGF , <nl> const { <nl> return ; <nl> } <nl> <nl> - emitInitializeArrayFrontToBack ( IGF , * this , dest , src , count , T , <nl> - & TypeInfo : : initializeWithTake ) ; <nl> + emitInitializeArrayFrontToBack ( IGF , * this , dest , src , count , T , IsTake ) ; <nl> } <nl> <nl> void TypeInfo : : initializeArrayWithTakeBackToFront ( IRGenFunction & IGF , <nl> const { <nl> return ; <nl> } <nl> <nl> - emitInitializeArrayBackToFront ( IGF , * this , dest , src , count , T , <nl> - & TypeInfo : : initializeWithTake ) ; <nl> + emitInitializeArrayBackToFront ( IGF , * this , dest , src , count , T , IsTake ) ; <nl> } <nl> <nl> ExplosionSchema TypeInfo : : getSchema ( ) const { <nl> mmm a / lib / IRGen / GenType . h <nl> ppp b / lib / IRGen / GenType . h <nl> namespace swift { <nl> class EnumDecl ; <nl> class UnownedStorageType ; <nl> class WeakStorageType ; <nl> - <nl> + enum IsTake_t : bool ; <nl> + <nl> namespace irgen { <nl> class Alignment ; <nl> class ProtocolInfo ; <nl> namespace irgen { <nl> class TypeInfo ; <nl> class UnownedTypeInfo ; <nl> class WeakTypeInfo ; <nl> - <nl> + <nl> / / / Either a type or a forward - declaration . <nl> typedef llvm : : PointerUnion < const TypeInfo * , llvm : : Type * > TypeCacheEntry ; <nl> <nl> class GenericContextScope { <nl> / / / Generate code to verify that static type assumptions agree with the runtime . <nl> void emitTypeLayoutVerifier ( IRGenFunction & IGF , <nl> ArrayRef < CanType > formalTypes ) ; <nl> + <nl> + / / / Build a value witness that initializes an array front - to - back . <nl> + void emitInitializeArrayFrontToBack ( IRGenFunction & IGF , <nl> + const TypeInfo & type , <nl> + Address destArray , <nl> + Address srcArray , <nl> + llvm : : Value * count , <nl> + SILType T , <nl> + IsTake_t take ) ; <nl> + <nl> + / / / Build a value witness that initializes an array back - to - front . <nl> + void emitInitializeArrayBackToFront ( IRGenFunction & IGF , <nl> + const TypeInfo & type , <nl> + Address destArray , <nl> + Address srcArray , <nl> + llvm : : Value * count , <nl> + SILType T , <nl> + IsTake_t take ) ; <nl> <nl> } / / end namespace irgen <nl> } / / end namespace swift <nl> mmm a / test / IRGen / array_value_witnesses . sil <nl> ppp b / test / IRGen / array_value_witnesses . sil <nl> <nl> - / / RUN : % target - swift - frontend % s - emit - ir | FileCheck % s <nl> + / / RUN : % target - swift - frontend % s - emit - ir | FileCheck % s - - check - prefix = CHECK - - check - prefix = CHECK - % target - ptrsize <nl> <nl> - / / REQUIRES : CPU = x86_64 <nl> <nl> import Swift <nl> <nl> struct SomeWeak { <nl> weak var y : C ? <nl> } <nl> <nl> - / / CHECK : @ _TWVV21array_value_witnesses3POD = <nl> - / / CHECK - NOT : @ _TwXxV21array_value_witnesses3POD <nl> - / / CHECK : @ __swift_noop_void_return <nl> - / / CHECK - NOT : @ _TwCcV21array_value_witnesses3POD <nl> - / / CHECK : @ __swift_memcpy_array8_8 <nl> - / / CHECK - NOT : @ _TwTtV21array_value_witnesses3POD <nl> - / / CHECK : @ __swift_memmove_array8_8 <nl> - / / CHECK - NOT : @ _TwtTV21array_value_witnesses3POD <nl> - / / CHECK : @ __swift_memmove_array8_8 <nl> - <nl> - / / CHECK : @ _TWVV21array_value_witnesses14BitwiseTakable = <nl> - / / CHECK : @ _TwXxV21array_value_witnesses14BitwiseTakable <nl> - / / CHECK : @ _TwCcV21array_value_witnesses14BitwiseTakable <nl> - / / CHECK - NOT : @ _TwTtV21array_value_witnesses14BitwiseTakable <nl> - / / CHECK : @ __swift_memmove_array8_8 <nl> - / / CHECK - NOT : @ _TwtTV21array_value_witnesses14BitwiseTakable <nl> - / / CHECK : @ __swift_memmove_array8_8 <nl> - <nl> - / / CHECK : @ _TWVV21array_value_witnesses8SomeWeak = <nl> - / / CHECK : @ _TwXxV21array_value_witnesses8SomeWeak <nl> - / / CHECK : @ _TwCcV21array_value_witnesses8SomeWeak <nl> - / / CHECK : @ _TwTtV21array_value_witnesses8SomeWeak <nl> - / / CHECK : @ _TwtTV21array_value_witnesses8SomeWeak <nl> + / / CHECK - LABEL : @ _TWVV21array_value_witnesses3POD = <nl> + / / CHECK - NOT : @ _TwXxV21array_value_witnesses3POD <nl> + / / CHECK : @ __swift_noop_void_return <nl> + / / CHECK - NOT : @ _TwCcV21array_value_witnesses3POD <nl> + / / CHECK - 32 : @ __swift_memcpy_array4_4 <nl> + / / CHECK - 64 : @ __swift_memcpy_array8_8 <nl> + / / CHECK - NOT : @ _TwTtV21array_value_witnesses3POD <nl> + / / CHECK - 32 : @ __swift_memmove_array4_4 <nl> + / / CHECK - 64 : @ __swift_memmove_array8_8 <nl> + / / CHECK - NOT : @ _TwtTV21array_value_witnesses3POD <nl> + / / CHECK - 32 : @ __swift_memmove_array4_4 <nl> + / / CHECK - 32 : @ __swift_memmove_array8_8 <nl> <nl> + / / CHECK - LABEL : @ _TWVV21array_value_witnesses14BitwiseTakable = <nl> + / / CHECK : @ _TwXxV21array_value_witnesses14BitwiseTakable <nl> + / / CHECK : @ _TwCcV21array_value_witnesses14BitwiseTakable <nl> + / / CHECK - NOT : @ _TwTtV21array_value_witnesses14BitwiseTakable <nl> + / / CHECK - 32 : @ __swift_memmove_array4_4 <nl> + / / CHECK - 64 : @ __swift_memmove_array8_8 <nl> + / / CHECK - NOT : @ _TwtTV21array_value_witnesses14BitwiseTakable <nl> + / / CHECK - 32 : @ __swift_memmove_array4_4 <nl> + / / CHECK - 64 : @ __swift_memmove_array8_8 <nl> <nl> + / / CHECK - LABEL : @ _TWVV21array_value_witnesses8SomeWeak = <nl> + / / CHECK : @ _TwXxV21array_value_witnesses8SomeWeak <nl> + / / CHECK : @ _TwCcV21array_value_witnesses8SomeWeak <nl> + / / CHECK : @ _TwTtV21array_value_witnesses8SomeWeak <nl> + / / CHECK : @ _TwtTV21array_value_witnesses8SomeWeak <nl> + <nl> + / / CHECK - LABEL : define linkonce_odr hidden void @ _TwXxV21array_value_witnesses8SomeWeak <nl> + / / CHECK : ( % swift . opaque * [ [ ARRAY_PTR : % . * ] ] , [ [ WORD : i [ 0 - 9 ] + ] ] [ [ COUNT : % . * ] ] , % swift . type * % Self ) { <nl> + / / CHECK : [ [ BEGIN : % . * ] ] = bitcast % swift . opaque * [ [ ARRAY_PTR ] ] to [ [ SOMEWEAK : % V21array_value_witnesses8SomeWeak ] ] * <nl> + / / CHECK : br label % iter <nl> + / / CHECK : iter : <nl> + / / CHECK : [ [ I : % . * ] ] = phi [ [ WORD ] ] [ [ [ COUNT ] ] , % entry ] , [ [ [ DEC : % . * ] ] , % loop ] <nl> + / / CHECK : [ [ CURRENT : % . * ] ] = phi [ [ SOMEWEAK ] ] * [ [ [ BEGIN ] ] , % entry ] , [ [ [ NEXT : % . * ] ] , % loop ] <nl> + / / CHECK : [ [ DONE : % . * ] ] = icmp eq [ [ WORD ] ] [ [ I ] ] , 0 <nl> + / / CHECK : br i1 [ [ DONE ] ] , label % exit , label % loop <nl> + / / CHECK : loop : <nl> + / / CHECK : [ [ WEAK : % . * ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ CURRENT ] ] , i32 0 , i32 1 <nl> + / / CHECK : call void @ swift_weakDestroy ( % swift . weak * [ [ WEAK ] ] ) <nl> + / / CHECK : [ [ DEC ] ] = sub [ [ WORD ] ] [ [ I ] ] , 1 <nl> + / / CHECK : [ [ NEXT ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ CURRENT ] ] , [ [ WORD ] ] 1 <nl> + / / CHECK : br label % iter <nl> + / / CHECK : exit : <nl> + / / CHECK : ret <nl> + <nl> + / / CHECK - LABEL : define linkonce_odr hidden % swift . opaque * @ _TwCcV21array_value_witnesses8SomeWeak <nl> + / / CHECK : ( % swift . opaque * [ [ DEST_PTR : % . * ] ] , % swift . opaque * [ [ SRC_PTR : % . * ] ] , [ [ WORD : i [ 0 - 9 ] + ] ] [ [ COUNT : % . * ] ] , % swift . type * % Self ) { <nl> + / / CHECK : [ [ DEST_BEGIN : % . * ] ] = bitcast % swift . opaque * [ [ DEST_PTR ] ] to [ [ SOMEWEAK ] ] * <nl> + / / CHECK : [ [ SRC_BEGIN : % . * ] ] = bitcast % swift . opaque * [ [ SRC_PTR ] ] to [ [ SOMEWEAK ] ] * <nl> + / / CHECK : br label % iter <nl> + / / CHECK : iter : <nl> + / / CHECK : [ [ I : % . * ] ] = phi [ [ WORD ] ] [ [ [ COUNT ] ] , % entry ] , [ [ [ DEC : % . * ] ] , % loop ] <nl> + / / CHECK : [ [ DEST_CURRENT : % . * ] ] = phi [ [ SOMEWEAK ] ] * [ [ [ DEST_BEGIN ] ] , % entry ] , [ [ [ DEST_NEXT : % . * ] ] , % loop ] <nl> + / / CHECK : [ [ SRC_CURRENT : % . * ] ] = phi [ [ SOMEWEAK ] ] * [ [ [ SRC_BEGIN ] ] , % entry ] , [ [ [ SRC_NEXT : % . * ] ] , % loop ] <nl> + / / CHECK : [ [ DONE : % . * ] ] = icmp eq [ [ WORD ] ] [ [ I ] ] , 0 <nl> + / / CHECK : br i1 [ [ DONE ] ] , label % exit , label % loop <nl> + / / CHECK : loop : <nl> + / / CHECK : [ [ DEST_WEAK : % . * ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ DEST_CURRENT ] ] , i32 0 , i32 1 <nl> + / / CHECK : [ [ SRC_WEAK : % . * ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ SRC_CURRENT ] ] , i32 0 , i32 1 <nl> + / / CHECK : call void @ swift_weakCopyInit ( % swift . weak * [ [ DEST_WEAK ] ] , % swift . weak * [ [ SRC_WEAK ] ] ) <nl> + / / CHECK : [ [ DEC ] ] = sub [ [ WORD ] ] [ [ I ] ] , 1 <nl> + / / CHECK : [ [ DEST_NEXT ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ DEST_CURRENT ] ] , [ [ WORD ] ] 1 <nl> + / / CHECK : [ [ SRC_NEXT ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ SRC_CURRENT ] ] , [ [ WORD ] ] 1 <nl> + / / CHECK : br label % iter <nl> + / / CHECK : exit : <nl> + / / CHECK : ret <nl> + <nl> + / / CHECK - LABEL : define linkonce_odr hidden % swift . opaque * @ _TwTtV21array_value_witnesses8SomeWeak <nl> + / / CHECK : ( % swift . opaque * [ [ DEST_PTR : % . * ] ] , % swift . opaque * [ [ SRC_PTR : % . * ] ] , [ [ WORD : i [ 0 - 9 ] + ] ] [ [ COUNT : % . * ] ] , % swift . type * % Self ) { <nl> + / / CHECK : [ [ DEST_BEGIN : % . * ] ] = bitcast % swift . opaque * [ [ DEST_PTR ] ] to [ [ SOMEWEAK ] ] * <nl> + / / CHECK : [ [ SRC_BEGIN : % . * ] ] = bitcast % swift . opaque * [ [ SRC_PTR ] ] to [ [ SOMEWEAK ] ] * <nl> + / / CHECK : br label % iter <nl> + / / CHECK : iter : <nl> + / / CHECK : [ [ I : % . * ] ] = phi [ [ WORD ] ] [ [ [ COUNT ] ] , % entry ] , [ [ [ DEC : % . * ] ] , % loop ] <nl> + / / CHECK : [ [ DEST_CURRENT : % . * ] ] = phi [ [ SOMEWEAK ] ] * [ [ [ DEST_BEGIN ] ] , % entry ] , [ [ [ DEST_NEXT : % . * ] ] , % loop ] <nl> + / / CHECK : [ [ SRC_CURRENT : % . * ] ] = phi [ [ SOMEWEAK ] ] * [ [ [ SRC_BEGIN ] ] , % entry ] , [ [ [ SRC_NEXT : % . * ] ] , % loop ] <nl> + / / CHECK : [ [ DONE : % . * ] ] = icmp eq [ [ WORD ] ] [ [ I ] ] , 0 <nl> + / / CHECK : br i1 [ [ DONE ] ] , label % exit , label % loop <nl> + / / CHECK : loop : <nl> + / / CHECK : [ [ DEST_WEAK : % . * ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ DEST_CURRENT ] ] , i32 0 , i32 1 <nl> + / / CHECK : [ [ SRC_WEAK : % . * ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ SRC_CURRENT ] ] , i32 0 , i32 1 <nl> + / / CHECK : call void @ swift_weakTakeInit ( % swift . weak * [ [ DEST_WEAK ] ] , % swift . weak * [ [ SRC_WEAK ] ] ) <nl> + / / CHECK : [ [ DEC ] ] = sub [ [ WORD ] ] [ [ I ] ] , 1 <nl> + / / CHECK : [ [ DEST_NEXT ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ DEST_CURRENT ] ] , [ [ WORD ] ] 1 <nl> + / / CHECK : [ [ SRC_NEXT ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ SRC_CURRENT ] ] , [ [ WORD ] ] 1 <nl> + / / CHECK : br label % iter <nl> + / / CHECK : exit : <nl> + / / CHECK : ret <nl> + <nl> + / / CHECK - LABEL : define linkonce_odr hidden % swift . opaque * @ _TwtTV21array_value_witnesses8SomeWeak <nl> + / / CHECK : ( % swift . opaque * [ [ DEST_PTR : % . * ] ] , % swift . opaque * [ [ SRC_PTR : % . * ] ] , [ [ WORD : i [ 0 - 9 ] + ] ] [ [ COUNT : % . * ] ] , % swift . type * % Self ) { <nl> + / / CHECK : [ [ DEST_BEGIN : % . * ] ] = bitcast % swift . opaque * [ [ DEST_PTR ] ] to [ [ SOMEWEAK ] ] * <nl> + / / CHECK : [ [ SRC_BEGIN : % . * ] ] = bitcast % swift . opaque * [ [ SRC_PTR ] ] to [ [ SOMEWEAK ] ] * <nl> + / / CHECK : [ [ DEST_END : % . * ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ DEST_BEGIN ] ] , [ [ WORD ] ] [ [ COUNT ] ] <nl> + / / CHECK : [ [ SRC_END : % . * ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ SRC_BEGIN ] ] , [ [ WORD ] ] [ [ COUNT ] ] <nl> + / / CHECK : br label % iter <nl> + / / CHECK : iter : <nl> + / / CHECK : [ [ I : % . * ] ] = phi [ [ WORD ] ] [ [ [ COUNT ] ] , % entry ] , [ [ [ DEC : % . * ] ] , % loop ] <nl> + / / CHECK : [ [ DEST_PREV : % . * ] ] = phi [ [ SOMEWEAK ] ] * [ [ [ DEST_END ] ] , % entry ] , [ [ [ DEST_CURRENT : % . * ] ] , % loop ] <nl> + / / CHECK : [ [ SRC_PREV : % . * ] ] = phi [ [ SOMEWEAK ] ] * [ [ [ SRC_END ] ] , % entry ] , [ [ [ SRC_CURRENT : % . * ] ] , % loop ] <nl> + / / CHECK : [ [ DONE : % . * ] ] = icmp eq [ [ WORD ] ] [ [ I ] ] , 0 <nl> + / / CHECK : br i1 [ [ DONE ] ] , label % exit , label % loop <nl> + / / CHECK : loop : <nl> + / / CHECK : [ [ DEST_CURRENT ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ DEST_PREV ] ] , [ [ WORD ] ] - 1 <nl> + / / CHECK : [ [ SRC_CURRENT ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ SRC_PREV ] ] , [ [ WORD ] ] - 1 <nl> + / / CHECK : [ [ DEST_WEAK : % . * ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ DEST_CURRENT ] ] , i32 0 , i32 1 <nl> + / / CHECK : [ [ SRC_WEAK : % . * ] ] = getelementptr inbounds [ [ SOMEWEAK ] ] , [ [ SOMEWEAK ] ] * [ [ SRC_CURRENT ] ] , i32 0 , i32 1 <nl> + / / CHECK : call void @ swift_weakTakeInit ( % swift . weak * [ [ DEST_WEAK ] ] , % swift . weak * [ [ SRC_WEAK ] ] ) <nl> + / / CHECK : [ [ DEC ] ] = sub [ [ WORD ] ] [ [ I ] ] , 1 <nl> + / / CHECK : br label % iter <nl> + / / CHECK : exit : <nl> + / / CHECK : ret <nl>
IRGen : Fix up InitializeArrayBackToFront witness emission .
apple/swift
ce5a81ec2e5c15347e8c7d2b7770643d56e9f6e9
2015-04-03T02:13:00Z
mmm a / brightray / browser / inspectable_web_contents_impl . cc <nl> ppp b / brightray / browser / inspectable_web_contents_impl . cc <nl> <nl> # include " content / public / browser / devtools_client_host . h " <nl> # include " content / public / browser / devtools_http_handler . h " <nl> # include " content / public / browser / devtools_manager . h " <nl> + # include " content / public / browser / web_contents_view . h " <nl> <nl> namespace brightray { <nl> <nl> void InspectableWebContentsImpl : : ChangeAttachedWindowHeight ( unsigned height ) { <nl> } <nl> <nl> void InspectableWebContentsImpl : : CloseWindow ( ) { <nl> + view_ - > CloseDevTools ( ) ; <nl> + devtools_web_contents_ . reset ( ) ; <nl> + web_contents_ - > GetView ( ) - > Focus ( ) ; <nl> } <nl> <nl> void InspectableWebContentsImpl : : MoveWindow ( int x , int y ) { <nl> void InspectableWebContentsImpl : : WebContentsDestroyed ( content : : WebContents * ) { <nl> Observe ( nullptr ) ; <nl> agent_host_ = nullptr ; <nl> frontend_host_ . reset ( ) ; <nl> - devtools_web_contents_ . reset ( ) ; <nl> } <nl> <nl> void InspectableWebContentsImpl : : HandleKeyboardEvent ( content : : WebContents * source , const content : : NativeWebKeyboardEvent & event ) { <nl> mmm a / brightray / browser / inspectable_web_contents_view . h <nl> ppp b / brightray / browser / inspectable_web_contents_view . h <nl> class InspectableWebContentsView { <nl> virtual gfx : : NativeView GetNativeView ( ) const = 0 ; <nl> <nl> virtual void ShowDevTools ( ) = 0 ; <nl> + virtual void CloseDevTools ( ) = 0 ; <nl> } ; <nl> <nl> } <nl> mmm a / brightray / browser / inspectable_web_contents_view_mac . h <nl> ppp b / brightray / browser / inspectable_web_contents_view_mac . h <nl> class InspectableWebContentsViewMac : public InspectableWebContentsView { <nl> <nl> virtual gfx : : NativeView GetNativeView ( ) const OVERRIDE ; <nl> virtual void ShowDevTools ( ) OVERRIDE ; <nl> + virtual void CloseDevTools ( ) OVERRIDE ; <nl> <nl> InspectableWebContentsImpl * inspectable_web_contents ( ) { return inspectable_web_contents_ ; } <nl> <nl> mmm a / brightray / browser / inspectable_web_contents_view_mac . mm <nl> ppp b / brightray / browser / inspectable_web_contents_view_mac . mm <nl> <nl> [ view_ setDevToolsVisible : YES ] ; <nl> } <nl> <nl> + void InspectableWebContentsViewMac : : CloseDevTools ( ) { <nl> + [ view_ setDevToolsVisible : NO ] ; <nl> + } <nl> + <nl> } <nl>
Make the devtools close button work
electron/electron
001d0197de17c07fbde127100b2a78e81871262d
2013-03-27T15:20:21Z
mmm a / src / php / tests / generated_code / AbstractGeneratedCodeTest . php <nl> ppp b / src / php / tests / generated_code / AbstractGeneratedCodeTest . php <nl> abstract class AbstractGeneratedCodeTest extends PHPUnit_Framework_TestCase { <nl> protected static $ client ; <nl> protected static $ timeout ; <nl> <nl> + public function testWaitForNotReady ( ) { <nl> + $ this - > assertFalse ( self : : $ client - > waitForReady ( 1 ) ) ; <nl> + } <nl> + <nl> + public function testWaitForReady ( ) { <nl> + $ this - > assertTrue ( self : : $ client - > waitForReady ( 250000 ) ) ; <nl> + } <nl> + <nl> public function testSimpleRequest ( ) { <nl> $ div_arg = new math \ DivArgs ( ) ; <nl> $ div_arg - > setDividend ( 7 ) ; <nl>
php : add tests for waitForReady
grpc/grpc
4c0fcda20c33b78d77fddee18cf7a07b6da65fe7
2015-08-20T21:54:34Z
mmm a / taichi / gui / win32 . cpp <nl> ppp b / taichi / gui / win32 . cpp <nl> std : : map < HWND , taichi : : GUI * > gui_from_hwnd ; <nl> <nl> static std : : string lookup_keysym ( WPARAM wParam , LPARAM lParam ) <nl> { <nl> - / * * * TODO : lParam has modifier info according to MSDN ( ? ) * * * / <nl> - int key = wParam ; <nl> - if ( isascii ( key ) ) <nl> - return std : : string ( 1 , key ) ; <nl> - switch ( key ) { <nl> - case VK_RETURN : <nl> - return " Return " ; <nl> - case VK_F1 : <nl> - return " F1 " ; <nl> - case VK_LSHIFT : <nl> - return " Shift_L " ; <nl> - / * * * TODO : win32 keyboard WIP , add more cases , match XKeysymToString ( ) * * * / <nl> - default : <nl> - return std : : format ( " Vk { } " , key ) ; <nl> - } <nl> + return std : : string ( " VirtualKey " ) ; <nl> } <nl> <nl> LRESULT CALLBACK WindowProc ( HWND hwnd , <nl>
gui / win32 . cpp test 2
taichi-dev/taichi
939dc29d6e7f9cc2b33cfdf6de11f7bb1dd6462e
2020-02-13T06:34:15Z
mmm a / drivers / python / rethinkdb / _backup . py <nl> ppp b / drivers / python / rethinkdb / _backup . py <nl> <nl> from __future__ import print_function <nl> <nl> from copy import deepcopy <nl> - import socket , sys , string , re <nl> + import socket , sys , string , re , getpass <nl> <nl> try : <nl> import rethinkdb as r <nl> def ssl_option ( str ) : <nl> else : <nl> return { " ca_certs " : str } <nl> <nl> + def get_password ( interactive , filename ) : <nl> + password = " " <nl> + if filename is not None : <nl> + password_file = open ( filename ) <nl> + password = password_file . read ( ) . rstrip ( ' \ n ' ) <nl> + password_file . close ( ) <nl> + elif interactive : <nl> + password = getpass . getpass ( " Password for ` admin ` : " ) <nl> + return password <nl> + <nl> # This function is used to wrap rethinkdb calls to recover from connection errors <nl> # The first argument to the function is an output parameter indicating if progress <nl> # has been made since the last call . This is passed as an array so it works as an <nl> mmm a / drivers / python / rethinkdb / _dump . py <nl> ppp b / drivers / python / rethinkdb / _dump . py <nl> <nl> from . _backup import * <nl> <nl> info = " ' rethinkdb dump ' creates an archive of data from a RethinkDB cluster " <nl> - usage = " rethinkdb dump [ - c HOST : PORT ] [ - a AUTH_KEY ] [ - f FILE ] [ - - clients NUM ] [ - e ( DB | DB . TABLE ) ] . . . " <nl> + usage = " rethinkdb dump [ - c HOST : PORT ] [ - p ] [ - - password - file FILENAME ] [ - - tls - cert FILENAME ] [ - f FILE ] [ - - clients NUM ] [ - e ( DB | DB . TABLE ) ] . . . " <nl> <nl> def print_dump_help ( ) : <nl> print ( info ) <nl> def print_dump_help ( ) : <nl> print ( " - h [ - - help ] print this help " ) <nl> print ( " - c [ - - connect ] HOST : PORT host and client port of a rethinkdb node to connect " ) <nl> print ( " to ( defaults to localhost : 28015 ) " ) <nl> - print ( " - a [ - - auth ] AUTH_KEY authorization key for rethinkdb clients " ) <nl> + print ( " - - tls - cert FILENAME certificate file to use for TLS encryption . " ) <nl> + print ( " - p [ - - password ] interactively prompt for a password required to connect . " ) <nl> + print ( " - - password - file FILENAME read password required to connect from file . " ) <nl> print ( " - f [ - - file ] FILE file to write archive to ( defaults to " ) <nl> print ( " rethinkdb_dump_DATE_TIME . tar . gz ) ; " ) <nl> print ( " if FILE is - , use standard output ( note that " ) <nl> def print_dump_help ( ) : <nl> def parse_options ( ) : <nl> parser = OptionParser ( add_help_option = False , usage = usage ) <nl> parser . add_option ( " - c " , " - - connect " , dest = " host " , metavar = " host : port " , default = " localhost : 28015 " , type = " string " ) <nl> - parser . add_option ( " - a " , " - - auth " , dest = " auth_key " , metavar = " key " , default = " " , type = " string " ) <nl> parser . add_option ( " - f " , " - - file " , dest = " out_file " , metavar = " file " , default = None , type = " string " ) <nl> parser . add_option ( " - e " , " - - export " , dest = " tables " , metavar = " ( db | db . table ) " , default = [ ] , action = " append " , type = " string " ) <nl> <nl> def parse_options ( ) : <nl> parser . add_option ( " - q " , " - - quiet " , dest = " quiet " , default = False , action = " store_true " ) <nl> parser . add_option ( " - - debug " , dest = " debug " , default = False , action = " store_true " ) <nl> parser . add_option ( " - h " , " - - help " , dest = " help " , default = False , action = " store_true " ) <nl> + parser . add_option ( " - p " , " - - password " , dest = " password " , default = False , action = " store_true " ) <nl> + parser . add_option ( " - - password - file " , dest = " password_file " , default = None , type = " string " ) <nl> ( options , args ) = parser . parse_args ( ) <nl> <nl> # Check validity of arguments <nl> def parse_options ( ) : <nl> raise RuntimeError ( " Error : Temporary directory inaccessible : % s " % res [ " temp_dir " ] ) <nl> <nl> res [ " tables " ] = options . tables <nl> - res [ " auth_key " ] = options . auth_key <nl> res [ " quiet " ] = True if res [ " out_file " ] is sys . stdout else options . quiet <nl> res [ " debug " ] = options . debug <nl> + res [ " password " ] = options . password <nl> + res [ " password - file " ] = options . password_file <nl> return res <nl> <nl> def do_export ( temp_dir , options ) : <nl> def do_export ( temp_dir , options ) : <nl> export_args = [ " rethinkdb - export " ] <nl> export_args . extend ( [ " - - connect " , " % s : % s " % ( options [ " host " ] , options [ " port " ] ) ] ) <nl> export_args . extend ( [ " - - directory " , os . path . join ( temp_dir , options [ " temp_filename " ] ) ] ) <nl> - export_args . extend ( [ " - - auth " , options [ " auth_key " ] ] ) <nl> + if options [ " password " ] : <nl> + export_args . append ( " - - password " ) <nl> + if options [ " password - file " ] : <nl> + export_args . extend ( [ " - - password - file " , options [ " password - file " ] ] ) <nl> export_args . extend ( [ " - - clients " , str ( options [ " clients " ] ) ] ) <nl> export_args . extend ( [ " - - tls - cert " , options [ " tls_cert " ] ] ) <nl> for table in options [ " tables " ] : <nl> mmm a / drivers / python / rethinkdb / _export . py <nl> ppp b / drivers / python / rethinkdb / _export . py <nl> <nl> import multiprocessing , subprocess , re , ctypes , numbers <nl> from optparse import OptionParser <nl> from . _backup import * <nl> + <nl> import rethinkdb as r <nl> <nl> try : <nl> <nl> <nl> info = " ' rethinkdb export ` exports data from a RethinkDB cluster into a directory " <nl> usage = " \ <nl> - rethinkdb export [ - c HOST : PORT ] [ - a AUTH_KEY ] [ - d DIR ] [ - e ( DB | DB . TABLE ) ] . . . \ n \ <nl> + rethinkdb export [ - c HOST : PORT ] [ - p ] [ - - password - file FILENAME ] [ - - tls - cert filename ] [ - d DIR ] [ - e ( DB | DB . TABLE ) ] . . . \ n \ <nl> [ - - format ( csv | json | ndjson ) ] [ - - fields FIELD , FIELD . . . ] [ - - delimiter CHARACTER ] \ n \ <nl> [ - - clients NUM ] " <nl> <nl> def print_export_help ( ) : <nl> print ( " - h [ - - help ] print this help " ) <nl> print ( " - c [ - - connect ] HOST : PORT host and client port of a rethinkdb node to connect " ) <nl> print ( " to ( defaults to localhost : 28015 ) " ) <nl> - print ( " - a [ - - auth ] AUTH_KEY authorization key for rethinkdb clients " ) <nl> + print ( " - - tls - cert FILENAME certificate file to use for TLS encryption . " ) <nl> + print ( " - p [ - - password ] interactively prompt for a password required to connect . " ) <nl> + print ( " - - password - file FILENAME read password required to connect from file . " ) <nl> print ( " - d [ - - directory ] DIR directory to output to ( defaults to " ) <nl> print ( " rethinkdb_export_DATE_TIME ) " ) <nl> print ( " - - format ( csv | json | ndjson ) format to write ( defaults to json . " ) <nl> def print_export_help ( ) : <nl> def parse_options ( ) : <nl> parser = OptionParser ( add_help_option = False , usage = usage ) <nl> parser . add_option ( " - c " , " - - connect " , dest = " host " , metavar = " HOST : PORT " , default = " localhost : 28015 " , type = " string " ) <nl> - parser . add_option ( " - a " , " - - auth " , dest = " auth_key " , metavar = " AUTHKEY " , default = " " , type = " string " ) <nl> parser . add_option ( " - - format " , dest = " format " , metavar = " json | csv | ndjson " , default = " json " , type = " string " ) <nl> parser . add_option ( " - d " , " - - directory " , dest = " directory " , metavar = " DIRECTORY " , default = None , type = " string " ) <nl> parser . add_option ( " - e " , " - - export " , dest = " tables " , metavar = " DB | DB . TABLE " , default = [ ] , action = " append " , type = " string " ) <nl> def parse_options ( ) : <nl> parser . add_option ( " - h " , " - - help " , dest = " help " , default = False , action = " store_true " ) <nl> parser . add_option ( " - q " , " - - quiet " , dest = " quiet " , default = False , action = " store_true " ) <nl> parser . add_option ( " - - debug " , dest = " debug " , default = False , action = " store_true " ) <nl> + parser . add_option ( " - p " , " - - password " , dest = " password " , default = False , action = " store_true " ) <nl> + parser . add_option ( " - - password - file " , dest = " password_file " , default = None , type = " string " ) <nl> ( options , args ) = parser . parse_args ( ) <nl> <nl> # Check validity of arguments <nl> def parse_options ( ) : <nl> raise RuntimeError ( " Error : invalid number of clients ( % d ) , must be greater than zero " % options . clients ) <nl> res [ " clients " ] = options . clients <nl> <nl> - res [ " auth_key " ] = options . auth_key <nl> res [ " quiet " ] = options . quiet <nl> res [ " debug " ] = options . debug <nl> + <nl> + res [ " password " ] = options . password <nl> + res [ " password - file " ] = options . password_file <nl> return res <nl> <nl> # This is called through rdb_call_wrapper and may be called multiple times if <nl> def launch_writer ( format , directory , db , table , fields , delimiter , task_queue , e <nl> else : <nl> raise RuntimeError ( " unknown format type : % s " % format ) <nl> <nl> + < < < < < < < HEAD <nl> def get_all_table_sizes ( host , port , auth_key , db_table_set , ssl_op ) : <nl> def get_table_size ( progress , conn , db , table ) : <nl> return r . db ( db ) . table ( table ) . info ( ) [ ' doc_count_estimates ' ] . sum ( ) . run ( conn ) <nl> <nl> conn_fn = lambda : r . connect ( host , port , ssl = ssl_op , auth_key = auth_key ) <nl> + = = = = = = = <nl> + def get_all_table_sizes ( host , port , db_table_set , admin_password ) : <nl> + def get_table_size ( progress , conn , db , table ) : <nl> + return r . db ( db ) . table ( table ) . info ( ) [ ' doc_count_estimates ' ] . sum ( ) . run ( conn ) <nl> + <nl> + conn_fn = lambda : r . connect ( host , <nl> + port , <nl> + user = " admin " , <nl> + password = admin_password ) <nl> + > > > > > > > nighelles / 5464 <nl> <nl> ret = dict ( ) <nl> for pair in db_table_set : <nl> def get_table_size ( progress , conn , db , table ) : <nl> <nl> return ret <nl> <nl> + < < < < < < < HEAD <nl> def export_table ( host , port , auth_key , db , table , directory , fields , delimiter , format , <nl> error_queue , progress_info , sindex_counter , exit_event , ssl_op ) : <nl> + = = = = = = = <nl> + def export_table ( host , port , db , table , directory , fields , delimiter , format , <nl> + error_queue , progress_info , sindex_counter , exit_event , admin_password ) : <nl> + > > > > > > > nighelles / 5464 <nl> writer = None <nl> <nl> try : <nl> # This will open at least one connection for each rdb_call_wrapper , which is <nl> # a little wasteful , but shouldn ' t be a big performance hit <nl> + < < < < < < < HEAD <nl> conn_fn = lambda : r . connect ( host , port , ssl = ssl_op , auth_key = auth_key ) <nl> + = = = = = = = <nl> + conn_fn = lambda : r . connect ( host , <nl> + port , <nl> + user = " admin " , <nl> + password = admin_password ) <nl> + > > > > > > > nighelles / 5464 <nl> table_info = rdb_call_wrapper ( conn_fn , " info " , write_table_metadata , db , table , directory ) <nl> sindex_counter . value + = len ( table_info [ " indexes " ] ) <nl> <nl> def update_progress ( progress_info , options ) : <nl> if not options [ " quiet " ] : <nl> print_progress ( float ( rows_done ) / total_rows ) <nl> <nl> - def run_clients ( options , db_table_set ) : <nl> + def run_clients ( options , db_table_set , admin_password ) : <nl> # Spawn one client for each db . table <nl> exit_event = multiprocessing . Event ( ) <nl> processes = [ ] <nl> def run_clients ( options , db_table_set ) : <nl> errors = [ ] <nl> <nl> try : <nl> + < < < < < < < HEAD <nl> sizes = get_all_table_sizes ( options [ " host " ] , options [ " port " ] , options [ " auth_key " ] , db_table_set , options [ " tls_cert " ] ) <nl> + = = = = = = = <nl> + sizes = get_all_table_sizes ( options [ " host " ] , options [ " port " ] , db_table_set , admin_password ) <nl> + > > > > > > > nighelles / 5464 <nl> <nl> progress_info = [ ] <nl> <nl> def run_clients ( options , db_table_set ) : <nl> multiprocessing . Value ( ctypes . c_longlong , sizes [ ( db , table ) ] ) ) ) <nl> arg_lists . append ( ( options [ " host " ] , <nl> options [ " port " ] , <nl> - options [ " auth_key " ] , <nl> db , table , <nl> options [ " directory_partial " ] , <nl> options [ " fields " ] , <nl> def run_clients ( options , db_table_set ) : <nl> progress_info [ - 1 ] , <nl> sindex_counter , <nl> exit_event , <nl> + < < < < < < < HEAD <nl> options [ " tls_cert " ] ) ) <nl> + = = = = = = = <nl> + admin_password ) ) <nl> + > > > > > > > nighelles / 5464 <nl> <nl> <nl> # Wait for all tables to finish <nl> def main ( ) : <nl> return 1 <nl> <nl> try : <nl> + < < < < < < < HEAD <nl> conn_fn = lambda : r . connect ( options [ " host " ] , options [ " port " ] , ssl = options [ " tls_cert " ] , auth_key = options [ " auth_key " ] ) <nl> + = = = = = = = <nl> + admin_password = get_password ( options [ " password " ] , options [ " password - file " ] ) <nl> + conn_fn = lambda : r . connect ( options [ " host " ] , <nl> + options [ " port " ] , <nl> + user = " admin " , <nl> + password = admin_password ) <nl> + > > > > > > > nighelles / 5464 <nl> # Make sure this isn ' t a pre - ` reql_admin ` cluster - which could result in data loss <nl> # if the user has a database named ' rethinkdb ' <nl> rdb_call_wrapper ( conn_fn , " version check " , check_minimum_version , ( 1 , 16 , 0 ) ) <nl> def main ( ) : <nl> <nl> prepare_directories ( options [ " directory " ] , options [ " directory_partial " ] , db_table_set ) <nl> start_time = time . time ( ) <nl> - run_clients ( options , db_table_set ) <nl> + run_clients ( options , db_table_set , admin_password ) <nl> finalize_directory ( options [ " directory " ] , options [ " directory_partial " ] ) <nl> except RuntimeError as ex : <nl> print ( ex , file = sys . stderr ) <nl> mmm a / drivers / python / rethinkdb / _import . py <nl> ppp b / drivers / python / rethinkdb / _import . py <nl> <nl> rethinkdb import - d DIR [ - c HOST : PORT ] [ - a AUTH_KEY ] [ - - force ] \ n \ <nl> [ - i ( DB | DB . TABLE ) ] [ - - clients NUM ] \ n \ <nl> [ - - shards NUM_SHARDS ] [ - - replicas NUM_REPLICAS ] \ n \ <nl> - rethinkdb import - f FILE - - table DB . TABLE [ - c HOST : PORT ] [ - a AUTH_KEY ] \ n \ <nl> + rethinkdb import - f FILE - - table DB . TABLE [ - c HOST : PORT ] [ - - tls - cert FILENAME ] [ - p ] [ - - password - file FILENAME ] \ n \ <nl> [ - - force ] [ - - clients NUM ] [ - - format ( csv | json ) ] [ - - pkey PRIMARY_KEY ] \ n \ <nl> [ - - shards NUM_SHARDS ] [ - - replicas NUM_REPLICAS ] \ n \ <nl> [ - - delimiter CHARACTER ] [ - - custom - header FIELD , FIELD . . . [ - - no - header ] ] " <nl> def print_import_help ( ) : <nl> print ( " - h [ - - help ] print this help " ) <nl> print ( " - c [ - - connect ] HOST : PORT host and client port of a rethinkdb node to connect " ) <nl> print ( " to ( defaults to localhost : 28015 ) " ) <nl> - print ( " - a [ - - auth ] AUTH_KEY authorization key for rethinkdb clients " ) <nl> + print ( " - - tls - cert FILENAME certificate file to use for TLS encryption . " ) <nl> + print ( " - p [ - - password ] interactively prompt for a password required to connect . " ) <nl> + print ( " - - password - file FILENAME read password required to connect from file . " ) <nl> print ( " - - clients NUM_CLIENTS the number of client connections to use ( defaults " ) <nl> print ( " to 8 ) " ) <nl> print ( " - - hard - durability use hard durability writes ( slower , but less memory " ) <nl> def print_import_help ( ) : <nl> def parse_options ( ) : <nl> parser = OptionParser ( add_help_option = False , usage = usage ) <nl> parser . add_option ( " - c " , " - - connect " , dest = " host " , metavar = " HOST : PORT " , default = " localhost : 28015 " , type = " string " ) <nl> - parser . add_option ( " - a " , " - - auth " , dest = " auth_key " , metavar = " AUTHKEY " , default = " " , type = " string " ) <nl> parser . add_option ( " - - fields " , dest = " fields " , metavar = " FIELD , FIELD . . . " , default = None , type = " string " ) <nl> parser . add_option ( " - - clients " , dest = " clients " , metavar = " NUM_CLIENTS " , default = 8 , type = " int " ) <nl> parser . add_option ( " - - hard - durability " , dest = " hard " , action = " store_true " , default = False ) <nl> def parse_options ( ) : <nl> parser . add_option ( " - - no - header " , dest = " no_header " , action = " store_true " , default = False ) <nl> parser . add_option ( " - - custom - header " , dest = " custom_header " , metavar = " FIELD , FIELD . . . " , default = None , type = " string " ) <nl> parser . add_option ( " - h " , " - - help " , dest = " help " , default = False , action = " store_true " ) <nl> + parser . add_option ( " - p " , " - - password " , dest = " password " , default = False , action = " store_true " ) <nl> + parser . add_option ( " - - password - file " , dest = " password_file " , default = None , type = " string " ) <nl> ( options , args ) = parser . parse_args ( ) <nl> <nl> # Check validity of arguments <nl> def parse_options ( ) : <nl> raise RuntimeError ( " Error : - - client option too low , must have at least one client connection " ) <nl> <nl> res [ " tls_cert " ] = ssl_option ( options . tls_cert ) <nl> - <nl> - res [ " auth_key " ] = options . auth_key <nl> res [ " clients " ] = options . clients <nl> res [ " durability " ] = " hard " if options . hard else " soft " <nl> res [ " force " ] = options . force <nl> def parse_options ( ) : <nl> else : <nl> raise RuntimeError ( " Error : Must specify one of - - directory or - - file to import " ) <nl> <nl> + res [ " password " ] = get_password ( options . password , options . password_file ) <nl> return res <nl> <nl> # This is called through rdb_call_wrapper so reattempts can be tried as long as progress <nl> def import_from_queue ( progress , conn , task_queue , error_queue , replace_conflicts <nl> task = task_queue . get ( ) <nl> <nl> # This is run for each client requested , and accepts tasks from the reader processes <nl> + < < < < < < < HEAD <nl> def client_process ( host , port , auth_key , task_queue , error_queue , rows_written , replace_conflicts , durability , ssl_op ) : <nl> try : <nl> conn_fn = lambda : r . connect ( host , port , ssl = ssl_op , auth_key = auth_key ) <nl> + = = = = = = = <nl> + def client_process ( host , port , task_queue , error_queue , rows_written , replace_conflicts , durability , admin_password ) : <nl> + try : <nl> + conn_fn = lambda : r . connect ( host , <nl> + port , <nl> + user = " admin " , <nl> + password = admin_password ) <nl> + > > > > > > > nighelles / 5464 <nl> write_count = [ 0 ] <nl> rdb_call_wrapper ( conn_fn , " import " , import_from_queue , task_queue , error_queue , replace_conflicts , durability , write_count ) <nl> except : <nl> def table_reader ( options , file_info , task_queue , error_queue , warning_queue , pro <nl> create_args = dict ( options [ " create_args " ] ) <nl> create_args [ " primary_key " ] = file_info [ " info " ] [ " primary_key " ] <nl> <nl> + < < < < < < < HEAD <nl> conn_fn = lambda : r . connect ( options [ " host " ] , options [ " port " ] , ssl = options [ " tls_cert " ] , auth_key = options [ " auth_key " ] ) <nl> + = = = = = = = <nl> + conn_fn = lambda : r . connect ( options [ " host " ] , <nl> + options [ " port " ] , <nl> + user = " admin " , <nl> + password = options [ " password " ] ) <nl> + > > > > > > > nighelles / 5464 <nl> try : <nl> rdb_call_wrapper ( conn_fn , " create table " , create_table , db , table , create_args , <nl> file_info [ " info " ] [ " indexes " ] if options [ " create_sindexes " ] else [ ] ) <nl> def spawn_import_clients ( options , files_info ) : <nl> client_procs . append ( multiprocessing . Process ( target = client_process , <nl> args = ( options [ " host " ] , <nl> options [ " port " ] , <nl> - options [ " auth_key " ] , <nl> task_queue , <nl> error_queue , <nl> rows_written , <nl> options [ " force " ] , <nl> options [ " durability " ] , <nl> + < < < < < < < HEAD <nl> options [ " tls_cert " ] ) ) ) <nl> + = = = = = = = <nl> + options [ " password " ] ) ) ) <nl> + > > > > > > > nighelles / 5464 <nl> client_procs [ - 1 ] . start ( ) <nl> <nl> for file_info in files_info : <nl> def import_directory ( options ) : <nl> <nl> db_tables . add ( ( file_info [ " db " ] , file_info [ " table " ] ) ) <nl> <nl> + < < < < < < < HEAD <nl> conn_fn = lambda : r . connect ( options [ " host " ] , options [ " port " ] , ssl = options [ " tls_cert " ] , auth_key = options [ " auth_key " ] ) <nl> + = = = = = = = <nl> + conn_fn = lambda : r . connect ( options [ " host " ] , <nl> + options [ " port " ] , <nl> + user = " admin " , <nl> + password = options [ " password " ] ) <nl> + > > > > > > > nighelles / 5464 <nl> # Make sure this isn ' t a pre - ` reql_admin ` cluster - which could result in data loss <nl> # if the user has a database named ' rethinkdb ' <nl> rdb_call_wrapper ( conn_fn , " version check " , check_minimum_version , ( 1 , 16 , 0 ) ) <nl> def import_file ( options ) : <nl> table = options [ " import_db_table " ] [ 1 ] <nl> <nl> # Ensure that the database and table exist with the right primary key <nl> + < < < < < < < HEAD <nl> conn_fn = lambda : r . connect ( options [ " host " ] , options [ " port " ] , ssl = options [ " tls_cert " ] , auth_key = options [ " auth_key " ] ) <nl> + = = = = = = = <nl> + conn_fn = lambda : r . connect ( options [ " host " ] , <nl> + options [ " port " ] , <nl> + user = " admin " , <nl> + password = options [ " password " ] ) <nl> + > > > > > > > nighelles / 5464 <nl> # Make sure this isn ' t a pre - ` reql_admin ` cluster - which could result in data loss <nl> # if the user has a database named ' rethinkdb ' <nl> rdb_call_wrapper ( conn_fn , " version check " , check_minimum_version , ( 1 , 16 , 0 ) ) <nl> mmm a / drivers / python / rethinkdb / _index_rebuild . py <nl> ppp b / drivers / python / rethinkdb / _index_rebuild . py <nl> <nl> info = " ' rethinkdb index - rebuild ' recreates outdated secondary indexes in a cluster . \ n " + \ <nl> " This should be used after upgrading to a newer version of rethinkdb . There \ n " + \ <nl> " will be a notification in the web UI if any secondary indexes are out - of - date . " <nl> - usage = " rethinkdb index - rebuild [ - c HOST : PORT ] [ - a AUTH_KEY ] [ - n NUM ] [ - r ( DB | DB . TABLE ) ] . . . " <nl> + usage = " rethinkdb index - rebuild [ - c HOST : PORT ] [ - n NUM ] [ - r ( DB | DB . TABLE ) ] [ - - tls - cert FILENAME ] [ - p ] [ - - password - file FILENAME ] . . . " <nl> <nl> # Prefix used for indexes that are being rebuilt <nl> temp_index_prefix = ' $ reql_temp_index $ _ ' <nl> def print_restore_help ( ) : <nl> print ( " - h [ - - help ] print this help " ) <nl> print ( " - c [ - - connect ] HOST : PORT host and client port of a rethinkdb node to connect " ) <nl> print ( " to ( defaults to localhost : 28015 ) " ) <nl> - print ( " - a [ - - auth ] AUTH_KEY authorization key for rethinkdb clients " ) <nl> + print ( " - - tls - cert FILENAME certificate file to use for TLS encryption . " ) <nl> + print ( " - p [ - - password ] interactively prompt for a password required to connect . " ) <nl> + print ( " - - password - file FILENAME read password required to connect from file . " ) <nl> print ( " - r [ - - rebuild ] ( DB | DB . TABLE ) the databases or tables to rebuild indexes on " ) <nl> print ( " ( defaults to all databases and tables ) " ) <nl> print ( " - n NUM the number of concurrent indexes to rebuild " ) <nl> def print_restore_help ( ) : <nl> def parse_options ( ) : <nl> parser = OptionParser ( add_help_option = False , usage = usage ) <nl> parser . add_option ( " - c " , " - - connect " , dest = " host " , metavar = " HOST : PORT " , default = " localhost : 28015 " , type = " string " ) <nl> - parser . add_option ( " - a " , " - - auth " , dest = " auth_key " , metavar = " KEY " , default = " " , type = " string " ) <nl> parser . add_option ( " - r " , " - - rebuild " , dest = " tables " , metavar = " DB | DB . TABLE " , default = [ ] , action = " append " , type = " string " ) <nl> parser . add_option ( " - - tls - cert " , dest = " tls_cert " , metavar = " TLS_CERT " , default = " " , type = " string " ) <nl> <nl> parser . add_option ( " - n " , dest = " concurrent " , metavar = " NUM " , default = 1 , type = " int " ) <nl> parser . add_option ( " - - debug " , dest = " debug " , default = False , action = " store_true " ) <nl> parser . add_option ( " - h " , " - - help " , dest = " help " , default = False , action = " store_true " ) <nl> + parser . add_option ( " - p " , " - - password " , dest = " password " , default = False , action = " store_true " ) <nl> + parser . add_option ( " - - password - file " , dest = " password_file " , default = None , type = " string " ) <nl> + <nl> ( options , args ) = parser . parse_args ( ) <nl> <nl> if options . help : <nl> def parse_options ( ) : <nl> # Verify valid - - import options <nl> res [ " tables " ] = parse_db_table_options ( options . tables ) <nl> <nl> - res [ " auth_key " ] = options . auth_key <nl> res [ " concurrent " ] = options . concurrent <nl> res [ " debug " ] = options . debug <nl> + <nl> + res [ " password " ] = get_password ( options . password , options . password_file ) <nl> return res <nl> <nl> def print_progress ( ratio ) : <nl> def print_progress ( ratio ) : <nl> <nl> def do_connect ( options ) : <nl> try : <nl> - return r . connect ( options [ ' host ' ] , options [ ' port ' ] , ssl = options [ ' tls_cert ' ] , auth_key = options [ ' auth_key ' ] ) <nl> + return r . connect ( options [ ' host ' ] , <nl> + options [ ' port ' ] , <nl> + ssl = options [ ' tls_cert ' ] <nl> + user = " admin " , <nl> + password = options [ " password " ] ) <nl> except ( r . ReqlError , r . ReqlDriverError ) as ex : <nl> raise RuntimeError ( " Error when connecting : % s " % ex . message ) <nl> <nl> mmm a / drivers / python / rethinkdb / _restore . py <nl> ppp b / drivers / python / rethinkdb / _restore . py <nl> <nl> from . _backup import * <nl> <nl> info = " ' rethinkdb restore ' loads data into a RethinkDB cluster from an archive " <nl> - usage = " rethinkdb restore FILE [ - c HOST : PORT ] [ - a AUTH_KEY ] [ - - clients NUM ] [ - - shards NUM_SHARDS ] [ - - replicas NUM_REPLICAS ] [ - - force ] [ - i ( DB | DB . TABLE ) ] . . . " <nl> + usage = " rethinkdb restore FILE [ - c HOST : PORT ] [ - - tls - cert FILENAME ] [ - p ] [ - - password - file FILENAME ] [ - - clients NUM ] [ - - shards NUM_SHARDS ] [ - - replicas NUM_REPLICAS ] [ - - force ] [ - i ( DB | DB . TABLE ) ] . . . " <nl> <nl> def print_restore_help ( ) : <nl> print ( info ) <nl> def print_restore_help ( ) : <nl> print ( " - h [ - - help ] print this help " ) <nl> print ( " - c [ - - connect ] HOST : PORT host and client port of a rethinkdb node to connect " ) <nl> print ( " to ( defaults to localhost : 28015 ) " ) <nl> - print ( " - a [ - - auth ] AUTH_KEY authorization key for rethinkdb clients " ) <nl> + print ( " - - tls - cert FILENAME certificate file to use for TLS encryption . " ) <nl> + print ( " - p [ - - password ] interactively prompt for a password required to connect . " ) <nl> + print ( " - - password - file FILENAME read password required to connect from file . " ) <nl> print ( " - i [ - - import ] ( DB | DB . TABLE ) limit restore to the given database or table ( may " ) <nl> print ( " be specified multiple times ) " ) <nl> print ( " - - clients NUM_CLIENTS the number of client connections to use ( defaults " ) <nl> def print_restore_help ( ) : <nl> def parse_options ( ) : <nl> parser = OptionParser ( add_help_option = False , usage = usage ) <nl> parser . add_option ( " - c " , " - - connect " , dest = " host " , metavar = " HOST : PORT " , default = " localhost : 28015 " , type = " string " ) <nl> - parser . add_option ( " - a " , " - - auth " , dest = " auth_key " , metavar = " KEY " , default = " " , type = " string " ) <nl> parser . add_option ( " - i " , " - - import " , dest = " tables " , metavar = " DB | DB . TABLE " , default = [ ] , action = " append " , type = " string " ) <nl> <nl> parser . add_option ( " - - shards " , dest = " shards " , metavar = " NUM_SHARDS " , default = 0 , type = " int " ) <nl> def parse_options ( ) : <nl> parser . add_option ( " - q " , " - - quiet " , dest = " quiet " , default = False , action = " store_true " ) <nl> parser . add_option ( " - - debug " , dest = " debug " , default = False , action = " store_true " ) <nl> parser . add_option ( " - h " , " - - help " , dest = " help " , default = False , action = " store_true " ) <nl> + parser . add_option ( " - p " , " - - password " , dest = " password " , default = False , action = " store_true " ) <nl> + parser . add_option ( " - - password - file " , dest = " password_file " , default = None , type = " string " ) <nl> ( options , args ) = parser . parse_args ( ) <nl> <nl> if options . help : <nl> def parse_options ( ) : <nl> if not os . access ( res [ " temp_dir " ] , os . W_OK ) : <nl> raise RuntimeError ( " Error : Temporary directory inaccessible : % s " % res [ " temp_dir " ] ) <nl> <nl> - res [ " auth_key " ] = options . auth_key <nl> res [ " clients " ] = options . clients <nl> res [ " shards " ] = options . shards <nl> res [ " replicas " ] = options . replicas <nl> def parse_options ( ) : <nl> res [ " debug " ] = options . debug <nl> <nl> res [ " tls_cert " ] = options . tls_cert <nl> + res [ " password " ] = options . password <nl> + res [ " password - file " ] = options . password_file <nl> return res <nl> <nl> def do_unzip ( temp_dir , options ) : <nl> def do_import ( temp_dir , options ) : <nl> import_args = [ " rethinkdb - import " ] <nl> import_args . extend ( [ " - - connect " , " % s : % s " % ( options [ " host " ] , options [ " port " ] ) ] ) <nl> import_args . extend ( [ " - - directory " , temp_dir ] ) <nl> - import_args . extend ( [ " - - auth " , options [ " auth_key " ] ] ) <nl> + if options [ " password " ] : <nl> + import_args . append ( " - - password " ) <nl> + if options [ " password - file " ] : <nl> + import_args . extend ( [ " - - password - file " , options [ " password - file " ] ] ) <nl> import_args . extend ( [ " - - clients " , str ( options [ " clients " ] ) ] ) <nl> import_args . extend ( [ " - - shards " , str ( options [ " shards " ] ) ] ) <nl> import_args . extend ( [ " - - replicas " , str ( options [ " replicas " ] ) ] ) <nl>
Add username and password to rethinkdb dump , etc .
rethinkdb/rethinkdb
7d85054bca1ba77481583d1e1200de0bd3bc6156
2016-04-05T22:35:38Z
mmm a / dbms / tests / performance / group_array_moving_sum . xml <nl> ppp b / dbms / tests / performance / group_array_moving_sum . xml <nl> <nl> < / stop_conditions > <nl> <nl> <nl> - < create_query > CREATE TABLE moving_sum_1m ( k UInt64 , v UInt64 ) ENGINE = MergeTree ORDER BY k < / create_query > <nl> < create_query > CREATE TABLE moving_sum_10m ( k UInt64 , v UInt64 ) ENGINE = MergeTree ORDER BY k < / create_query > <nl> + < create_query > CREATE TABLE moving_sum_100m ( k UInt64 , v UInt64 ) ENGINE = MergeTree ORDER BY k < / create_query > <nl> <nl> - < fill_query > INSERT INTO moving_sum_1m SELECT number % 100 , rand ( ) from numbers ( 1000000 ) < / fill_query > <nl> < fill_query > INSERT INTO moving_sum_10m SELECT number % 100 , rand ( ) from numbers ( 10000000 ) < / fill_query > <nl> - <nl> - < query tag = ' MovingSumSize10_1M ' > SELECT k , groupArrayMovingSum ( 10 ) ( v ) FROM moving_sum_1m GROUP BY k FORMAT Null < / query > <nl> - < query tag = ' MovingSumSize10WithKey_1M ' > SELECT k , groupArrayMovingSum ( 10 ) ( v ) FROM moving_sum_1m WHERE k in ( 49 , 50 , 51 ) GROUP BY k FORMAT Null < / query > <nl> - < query tag = ' MovingSumSize1000_1M ' > SELECT k , groupArrayMovingSum ( 1000 ) ( v ) FROM moving_sum_1m GROUP BY k FORMAT Null < / query > <nl> - < query tag = ' MovingSumSize1000WithKey_1M ' > SELECT k , groupArrayMovingSum ( 1000 ) ( v ) FROM moving_sum_1m WHERE k in ( 49 , 50 , 51 ) GROUP BY k FORMAT Null < / query > <nl> - < query tag = ' MovingSumSize10000_1M ' > SELECT k , groupArrayMovingSum ( 10000 ) ( v ) FROM moving_sum_1m GROUP BY k FORMAT Null < / query > <nl> - < query tag = ' MovingSumSize10000WithKey_1M ' > SELECT k , groupArrayMovingSum ( 10000 ) ( v ) FROM moving_sum_1m WHERE k in ( 49 , 50 , 51 ) GROUP BY k FORMAT Null < / query > <nl> - <nl> - < query tag = ' MovingSumSize10_10M ' > SELECT k , groupArrayMovingSum ( 10 ) ( v ) FROM moving_sum_10m GROUP BY k FORMAT Null < / query > <nl> - < query tag = ' MovingSumSize10WithKey_10M ' > SELECT k , groupArrayMovingSum ( 10 ) ( v ) FROM moving_sum_10m WHERE k in ( 49 , 50 , 51 ) GROUP BY k FORMAT Null < / query > <nl> - < query tag = ' MovingSumSize1000_10M ' > SELECT k , groupArrayMovingSum ( 1000 ) ( v ) FROM moving_sum_10m GROUP BY k FORMAT Null < / query > <nl> - < query tag = ' MovingSumSize1000WithKey_10M ' > SELECT k , groupArrayMovingSum ( 1000 ) ( v ) FROM moving_sum_10m WHERE k in ( 49 , 50 , 51 ) GROUP BY k FORMAT Null < / query > <nl> - < query tag = ' MovingSumSize10000_10M ' > SELECT k , groupArrayMovingSum ( 10000 ) ( v ) FROM moving_sum_10m GROUP BY k FORMAT Null < / query > <nl> - < query tag = ' MovingSumSize10000WithKey_10M ' > SELECT k , groupArrayMovingSum ( 10000 ) ( v ) FROM moving_sum_10m WHERE k in ( 49 , 50 , 51 ) GROUP BY k FORMAT Null < / query > <nl> - <nl> + < fill_query > INSERT INTO moving_sum_100m SELECT number % 100 , rand ( ) from numbers ( 100000000 ) < / fill_query > <nl> + <nl> + < query tag = ' MovingSumSize10_1M ' > SELECT k , groupArrayMovingSum ( 10 ) ( v ) FROM moving_sum_10m GROUP BY k FORMAT Null < / query > <nl> + < query tag = ' MovingSumSize10WithKey_1M ' > SELECT k , groupArrayMovingSum ( 10 ) ( v ) FROM moving_sum_10m WHERE k in ( 49 , 50 , 51 ) GROUP BY k FORMAT Null < / query > <nl> + < query tag = ' MovingSumSize1000_1M ' > SELECT k , groupArrayMovingSum ( 1000 ) ( v ) FROM moving_sum_10m GROUP BY k FORMAT Null < / query > <nl> + < query tag = ' MovingSumSize1000WithKey_1M ' > SELECT k , groupArrayMovingSum ( 1000 ) ( v ) FROM moving_sum_10m WHERE k in ( 49 , 50 , 51 ) GROUP BY k FORMAT Null < / query > <nl> + < query tag = ' MovingSumSize10000_1M ' > SELECT k , groupArrayMovingSum ( 10000 ) ( v ) FROM moving_sum_10m GROUP BY k FORMAT Null < / query > <nl> + < query tag = ' MovingSumSize10000WithKey_1M ' > SELECT k , groupArrayMovingSum ( 10000 ) ( v ) FROM moving_sum_10m WHERE k in ( 49 , 50 , 51 ) GROUP BY k FORMAT Null < / query > <nl> + <nl> + < query tag = ' MovingSumSize10_10M ' > SELECT k , groupArrayMovingSum ( 10 ) ( v ) FROM moving_sum_100m GROUP BY k FORMAT Null < / query > <nl> + < query tag = ' MovingSumSize10WithKey_10M ' > SELECT k , groupArrayMovingSum ( 10 ) ( v ) FROM moving_sum_100m WHERE k in ( 49 , 50 , 51 ) GROUP BY k FORMAT Null < / query > <nl> + < query tag = ' MovingSumSize1000_10M ' > SELECT k , groupArrayMovingSum ( 1000 ) ( v ) FROM moving_sum_100m GROUP BY k FORMAT Null < / query > <nl> + < query tag = ' MovingSumSize1000WithKey_10M ' > SELECT k , groupArrayMovingSum ( 1000 ) ( v ) FROM moving_sum_100m WHERE k in ( 49 , 50 , 51 ) GROUP BY k FORMAT Null < / query > <nl> + < query tag = ' MovingSumSize10000_10M ' > SELECT k , groupArrayMovingSum ( 10000 ) ( v ) FROM moving_sum_100m GROUP BY k FORMAT Null < / query > <nl> + < query tag = ' MovingSumSize10000WithKey_10M ' > SELECT k , groupArrayMovingSum ( 10000 ) ( v ) FROM moving_sum_100m WHERE k in ( 49 , 50 , 51 ) GROUP BY k FORMAT Null < / query > <nl> + <nl> + < drop_query > DROP TABLE IF EXISTS moving_sum_100m < / drop_query > <nl> < drop_query > DROP TABLE IF EXISTS moving_sum_10m < / drop_query > <nl> - < drop_query > DROP TABLE IF EXISTS moving_sum_1m < / drop_query > <nl> < / test > <nl>
Update group_array_moving_sum . xml
ClickHouse/ClickHouse
3b54c5eae794b2494624a4b812d7d91dd5c32537
2020-03-02T20:36:44Z
mmm a / tensorflow / api_template . __init__ . py <nl> ppp b / tensorflow / api_template . __init__ . py <nl> <nl> from tensorflow . python . util . lazy_loader import LazyLoader as _LazyLoader <nl> <nl> # Make sure code inside the TensorFlow codebase can use tf2 . enabled ( ) at import . <nl> + _os . environ [ ' TF2_BEHAVIOR ' ] = ' 1 ' <nl> from tensorflow . python import tf2 as _tf2 <nl> _tf2 . enable ( ) <nl> <nl> mmm a / tensorflow / tools / api / tests / module_test . py <nl> ppp b / tensorflow / tools / api / tests / module_test . py <nl> def testSummaryMerged ( self ) : <nl> tf . compat . v1 . summary . FileWriter <nl> # pylint : enable = pointless - statement <nl> <nl> + def testInternalKerasImport ( self ) : <nl> + # pylint : disable = g - import - not - at - top <nl> + from tensorflow . python . keras import layers <nl> + normalization_parent = layers . Normalization . __module__ . split ( ' . ' ) [ - 1 ] <nl> + if tf . _major_api_version = = 2 : <nl> + self . assertEqual ( ' normalization ' , normalization_parent ) <nl> + self . assertTrue ( layers . BatchNormalization . _USE_V2_BEHAVIOR ) <nl> + else : <nl> + self . assertEqual ( ' normalization_v1 ' , normalization_parent ) <nl> + self . assertFalse ( layers . BatchNormalization . _USE_V2_BEHAVIOR ) <nl> + # pylint : enable = g - import - not - at - top <nl> + <nl> <nl> if __name__ = = ' __main__ ' : <nl> test . main ( ) <nl>
Set tf2_behavior to 1 to enable V2 for early loading cases
tensorflow/tensorflow
410852dbd24899e22f0020f9fdc9757f527dda55
2020-03-26T14:57:17Z