diff
stringlengths
41
2.03M
msg
stringlengths
1
1.5k
repo
stringlengths
5
40
sha
stringlengths
40
40
time
stringlengths
20
20
mmm a / db / dbcommands_generic . cpp <nl> ppp b / db / dbcommands_generic . cpp <nl> namespace mongo { <nl> false <nl> # endif <nl> ) ; <nl> + result . appendNumber ( " maxBsonObjectSize " , MaxBSONObjectSize ) ; <nl> return true ; <nl> } <nl> } cmdBuildInfo ; <nl>
report max bson obj size
mongodb/mongo
265c172b3a2c04c132b36b9554af74cb5e1e7c46
2010-10-07T03:45:31Z
mmm a / modules / nonfree / src / precomp . hpp <nl> ppp b / modules / nonfree / src / precomp . hpp <nl> <nl> # include " opencv2 / imgproc / imgproc . hpp " <nl> # include " opencv2 / core / internal . hpp " <nl> <nl> - # if defined ( HAVE_OPENCV_GPU ) & & ! defined ( DYNAMIC_CUDA_SUPPORT ) <nl> - # include " opencv2 / nonfree / gpu . hpp " <nl> + # include " opencv2 / nonfree / gpu . hpp " <nl> <nl> - # if defined ( HAVE_CUDA ) <nl> - # include " opencv2 / gpu / stream_accessor . hpp " <nl> - # include " opencv2 / gpu / device / common . hpp " <nl> - <nl> - static inline void throw_nogpu ( ) { CV_Error ( CV_StsNotImplemented , " The called functionality is disabled for current build or platform " ) ; } <nl> - # else <nl> - static inline void throw_nogpu ( ) { CV_Error ( CV_GpuNotSupported , " The library is compiled without GPU support " ) ; } <nl> - # endif <nl> + # if defined ( HAVE_CUDA ) & & defined ( HAVE_OPENCV_GPU ) & & ! defined ( DYNAMIC_CUDA_SUPPORT ) <nl> + # include " opencv2 / gpu / stream_accessor . hpp " <nl> + # include " opencv2 / gpu / device / common . hpp " <nl> + static inline void throw_nogpu ( ) { CV_Error ( CV_StsNotImplemented , " The called functionality is disabled for current build or platform " ) ; } <nl> + # else <nl> + static inline void throw_nogpu ( ) { CV_Error ( CV_GpuNotSupported , " The library is compiled without GPU support " ) ; } <nl> # endif <nl> <nl> # ifdef HAVE_OPENCV_OCL <nl>
fix nonfree module compilation without CUDA
opencv/opencv
3e1f74f2cafc5c38d0e64928149edb87d9b28d28
2014-02-07T09:40:37Z
mmm a / tests / life . c <nl> ppp b / tests / life . c <nl> void game ( int w , int h , int i ) <nl> i - - ; <nl> nudge ( univ , w , h ) ; / / keep it interesting for benchmark <nl> } else { <nl> + # if ! __EMSCRIPTEN__ <nl> usleep ( 20000 ) ; <nl> + # endif <nl> show ( univ , w , h ) ; <nl> } <nl> } <nl>
fix life compilation warning
emscripten-core/emscripten
3a89259c2451d8a8b3d5524705f8cf2b09fc2a0c
2014-05-20T17:20:10Z
mmm a / tensorflow / python / distribute / BUILD <nl> ppp b / tensorflow / python / distribute / BUILD <nl> py_test ( <nl> srcs = [ " multi_process_runner_test . py " ] , <nl> python_version = " PY3 " , <nl> shard_count = 12 , <nl> + tags = [ <nl> + " noasan " , <nl> + " nomsan " , <nl> + " notsan " , <nl> + ] , # b / 175904958 <nl> deps = [ <nl> " : multi_process_runner " , <nl> " : multi_worker_test_base " , <nl>
disable failed tsan test
tensorflow/tensorflow
fcef8dd0789a469f61b66b171729a04e3b9f84ac
2020-12-18T00:33:37Z
mmm a / src / allegro / include / allegro / debug . h <nl> ppp b / src / allegro / include / allegro / debug . h <nl> AL_FUNC ( void , register_trace_handler , ( AL_METHOD ( int , handler , ( AL_CONST char * m <nl> <nl> <nl> # ifdef DEBUGMODE <nl> + # ifndef ASSERT <nl> # define ASSERT ( condition ) { if ( ! ( condition ) ) al_assert ( __FILE__ , __LINE__ ) ; } <nl> + # endif <nl> + <nl> + # ifndef TRACE <nl> # define TRACE al_trace <nl> + # endif <nl> # else <nl> + # ifndef ASSERT <nl> # define ASSERT ( condition ) <nl> + # endif <nl> + <nl> + # ifndef TRACE <nl> # define TRACE 1 ? ( void ) 0 : al_trace <nl> + # endif TRACE <nl> # endif <nl> <nl> # ifdef __cplusplus <nl>
Don ' t redefine ASSERT / TRACE in allegro / debug . h
aseprite/aseprite
35be72833b46d8a5a16ceff9b0ae3c0fa8224423
2015-02-15T01:09:07Z
mmm a / tensorflow / compiler / xla / service / cpu / dot_op_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / dot_op_emitter . cc <nl> DotOpEmitter : : MatMultDims DotOpEmitter : : GetMatMultDims ( ) const { <nl> absl : : optional < int64 > ProfitableToMakeDotOperandColumnMajor ( <nl> const HloInstruction & hlo ) { <nl> if ( hlo . opcode ( ) = = HloOpcode : : kDot & & hlo . shape ( ) . dimensions_size ( ) < = 1 ) { <nl> - if ( hlo . dot_dimension_numbers ( ) . rhs_contracting_dimensions ( 0 ) = = 0 ) { <nl> - return 1 ; <nl> + if ( hlo . operand ( 0 ) - > shape ( ) . rank ( ) ! = 1 | | <nl> + hlo . dot_dimension_numbers ( ) . rhs_contracting_dimensions ( 0 ) ! = 0 ) { <nl> + return { } ; <nl> } <nl> - return { } ; <nl> + <nl> + / / Don ' t bother if the other operand is tiny , switching to column major <nl> + / / wouldn ' t use tiling . <nl> + constexpr int kColumnMajorThresholdInBytes = 32 ; <nl> + int64 lhs_size = <nl> + ShapeUtil : : ByteSizeOfPrimitiveType ( hlo . shape ( ) . element_type ( ) ) * <nl> + ShapeUtil : : ElementsIn ( hlo . operand ( 0 ) - > shape ( ) ) ; <nl> + if ( lhs_size < kColumnMajorThresholdInBytes ) { <nl> + return { } ; <nl> + } <nl> + <nl> + return 1 ; <nl> } <nl> <nl> if ( hlo . IsOutputFusion ( ) ) { <nl>
[ XLA : CPU ] Don ' t switch vector - matrix dots to column major if the vector is tiny
tensorflow/tensorflow
7881830752c700eee5abdb6ca5bacd90ea6fa462
2019-06-14T17:54:34Z
mmm a / dbms / include / DB / Columns / ColumnAggregateFunction . h <nl> ppp b / dbms / include / DB / Columns / ColumnAggregateFunction . h <nl> class ColumnAggregateFunction : public ColumnVectorBase < AggregateDataPtr > <nl> func = func_ ; <nl> } <nl> <nl> - AggregateFunctionPtr getAggregateFunction ( ) <nl> - { <nl> - return func ; <nl> - } <nl> + AggregateFunctionPtr getAggregateFunction ( ) { return func ; } <nl> + AggregateFunctionPtr getAggregateFunction ( ) const { return func ; } <nl> <nl> / / / Захватить владение ареной . <nl> void addArena ( ArenaPtr arena_ ) <nl> mmm a / dbms / include / DB / DataStreams / TotalsHavingBlockInputStream . h <nl> ppp b / dbms / include / DB / DataStreams / TotalsHavingBlockInputStream . h <nl> class TotalsHavingBlockInputStream : public IProfilingBlockInputStream <nl> return res . str ( ) ; <nl> } <nl> <nl> - const Block & getTotals ( ) <nl> - { <nl> - if ( totals & & expression ) <nl> - expression - > execute ( totals ) ; <nl> - <nl> - return totals ; <nl> - } <nl> + const Block & getTotals ( ) ; <nl> <nl> protected : <nl> Block readImpl ( ) ; <nl> mmm a / dbms / src / DataStreams / TotalsHavingBlockInputStream . cpp <nl> ppp b / dbms / src / DataStreams / TotalsHavingBlockInputStream . cpp <nl> static void finalize ( Block & block ) <nl> } <nl> } <nl> <nl> + <nl> + const Block & TotalsHavingBlockInputStream : : getTotals ( ) <nl> + { <nl> + if ( ! totals ) <nl> + { <nl> + / * * Если totals_mode = = AFTER_HAVING_AUTO , нужно решить , добавлять ли в TOTALS агрегаты для строк , <nl> + * не прошедших max_rows_to_group_by . <nl> + * / <nl> + if ( overflow_aggregates & & static_cast < float > ( passed_keys ) / total_keys > = auto_include_threshold ) <nl> + addToTotals ( current_totals , overflow_aggregates , nullptr ) ; <nl> + <nl> + finalize ( current_totals ) ; <nl> + totals = current_totals ; <nl> + } <nl> + <nl> + if ( totals & & expression ) <nl> + expression - > execute ( totals ) ; <nl> + <nl> + return totals ; <nl> + } <nl> + <nl> + <nl> Block TotalsHavingBlockInputStream : : readImpl ( ) <nl> { <nl> Block finalized ; <nl> Block TotalsHavingBlockInputStream : : readImpl ( ) <nl> block = children [ 0 ] - > read ( ) ; <nl> <nl> if ( ! block ) <nl> - { <nl> - / * * Если totals_mode = = AFTER_HAVING_AUTO , нужно решить , добавлять ли в TOTALS агрегаты для строк , <nl> - * не прошедших max_rows_to_group_by . <nl> - * / <nl> - if ( overflow_aggregates & & static_cast < float > ( passed_keys ) / total_keys > = auto_include_threshold ) <nl> - addToTotals ( current_totals , overflow_aggregates , nullptr ) ; <nl> - finalize ( current_totals ) ; <nl> - totals = current_totals ; <nl> return finalized ; <nl> - } <nl> <nl> finalized = block ; <nl> finalize ( finalized ) ; <nl> Block TotalsHavingBlockInputStream : : readImpl ( ) <nl> <nl> if ( filter_column_name . empty ( ) | | totals_mode = = TotalsMode : : BEFORE_HAVING ) <nl> { <nl> - / * * Включая особую нулевую строку , если overflow_row = true . <nl> - * Предполагается , что если totals_mode = AFTER_HAVING_EXCLUSIVE , нам эту строку не дадут . <nl> + / * * Включая особую нулевую строку , если overflow_row = = true . <nl> + * Предполагается , что если totals_mode = = AFTER_HAVING_EXCLUSIVE , нам эту строку не дадут . <nl> * / <nl> addToTotals ( current_totals , block , nullptr ) ; <nl> } <nl> Block TotalsHavingBlockInputStream : : readImpl ( ) <nl> } <nl> } <nl> <nl> - void TotalsHavingBlockInputStream : : addToTotals ( Block & totals , Block & block , const IColumn : : Filter * filter , <nl> - size_t rows ) <nl> + void TotalsHavingBlockInputStream : : addToTotals ( Block & totals , Block & block , const IColumn : : Filter * filter , size_t rows ) <nl> { <nl> bool init = ! totals ; <nl> <nl> void TotalsHavingBlockInputStream : : addToTotals ( Block & totals , Block & block , co <nl> <nl> for ( size_t i = 0 ; i < block . columns ( ) ; + + i ) <nl> { <nl> - ColumnWithNameAndType & current = block . getByPosition ( i ) ; <nl> - ColumnAggregateFunction * column = <nl> - dynamic_cast < ColumnAggregateFunction * > ( & * current . column ) ; <nl> + const ColumnWithNameAndType & current = block . getByPosition ( i ) ; <nl> + const ColumnAggregateFunction * column = dynamic_cast < const ColumnAggregateFunction * > ( & * current . column ) ; <nl> <nl> if ( ! column ) <nl> { <nl> void TotalsHavingBlockInputStream : : addToTotals ( Block & totals , Block & block , co <nl> data = target - > getData ( ) [ 0 ] ; <nl> } <nl> <nl> - ColumnAggregateFunction : : Container_t & vec = column - > getData ( ) ; <nl> + const ColumnAggregateFunction : : Container_t & vec = column - > getData ( ) ; <nl> size_t size = std : : min ( vec . size ( ) , rows ) ; <nl> <nl> if ( filter ) <nl> { <nl> for ( size_t j = 0 ; j < size ; + + j ) <nl> - { <nl> if ( ( * filter ) [ j ] ) <nl> function - > merge ( data , vec [ j ] ) ; <nl> - } <nl> } <nl> else <nl> { <nl> for ( size_t j = 0 ; j < size ; + + j ) <nl> - { <nl> function - > merge ( data , vec [ j ] ) ; <nl> - } <nl> } <nl> } <nl> } <nl>
dbms : fixed WITH TOTALS and LIMIT / DISTINCT [ # METR - 10705 ] .
ClickHouse/ClickHouse
554d329064f7c2fbdd3099c472a874199ed62e6a
2014-04-21T16:09:04Z
mmm a / xbmc / ApplicationMessenger . cpp <nl> ppp b / xbmc / ApplicationMessenger . cpp <nl> <nl> # include " guilib / TextureManager . h " <nl> # include " PlayListPlayer . h " <nl> # include " Util . h " <nl> - # include " SectionLoader . h " <nl> # ifdef HAS_PYTHON <nl> # include " interfaces / python / XBPython . h " <nl> # endif <nl> case TMSG_POWERDOWN : <nl> # ifdef HAS_HTTPAPI <nl> if ( ! m_pXbmcHttp ) <nl> { <nl> - CSectionLoader : : Load ( " LIBHTTP " ) ; <nl> m_pXbmcHttp = new CXbmcHttp ( ) ; <nl> } <nl> switch ( m_pXbmcHttp - > xbmcCommand ( pMsg - > strParam ) ) <nl> mmm a / xbmc / SectionLoader . cpp <nl> ppp b / xbmc / SectionLoader . cpp <nl> CSectionLoader : : ~ CSectionLoader ( void ) <nl> UnloadAll ( ) ; <nl> } <nl> <nl> - bool CSectionLoader : : IsLoaded ( const CStdString & strSection ) <nl> - { <nl> - CSingleLock lock ( g_sectionLoader . m_critSection ) ; <nl> - <nl> - for ( int i = 0 ; i < ( int ) g_sectionLoader . m_vecLoadedSections . size ( ) ; + + i ) <nl> - { <nl> - CSection & section = g_sectionLoader . m_vecLoadedSections [ i ] ; <nl> - if ( section . m_strSectionName = = strSection & & section . m_lReferenceCount > 0 ) return true ; <nl> - } <nl> - return false ; <nl> - } <nl> - <nl> - bool CSectionLoader : : Load ( const CStdString & strSection ) <nl> - { <nl> - CSingleLock lock ( g_sectionLoader . m_critSection ) ; <nl> - <nl> - for ( int i = 0 ; i < ( int ) g_sectionLoader . m_vecLoadedSections . size ( ) ; + + i ) <nl> - { <nl> - CSection & section = g_sectionLoader . m_vecLoadedSections [ i ] ; <nl> - if ( section . m_strSectionName = = strSection ) <nl> - { <nl> - <nl> - # ifdef LOGALL <nl> - CLog : : Log ( LOGDEBUG , " SECTION : LoadSection ( % s ) count : % i \ n " , strSection . c_str ( ) , section . m_lReferenceCount ) ; <nl> - # endif <nl> - <nl> - section . m_lReferenceCount + + ; <nl> - return true ; <nl> - } <nl> - } <nl> - <nl> - # ifdef HAS_SECTIONS <nl> - if ( NULL = = XLoadSection ( strSection . c_str ( ) ) ) <nl> - { <nl> - CLog : : Log ( LOGDEBUG , " SECTION : LoadSection ( % s ) load failed ! ! \ n " , strSection . c_str ( ) ) ; <nl> - return false ; <nl> - } <nl> - HANDLE hHandle = XGetSectionHandle ( strSection . c_str ( ) ) ; <nl> - <nl> - CLog : : Log ( LOGDEBUG , " SECTION : Section % s loaded count : 1 size : % i \ n " , strSection . c_str ( ) , XGetSectionSize ( hHandle ) ) ; <nl> - # endif <nl> - <nl> - CSection newSection ; <nl> - newSection . m_strSectionName = strSection ; <nl> - newSection . m_lReferenceCount = 1 ; <nl> - g_sectionLoader . m_vecLoadedSections . push_back ( newSection ) ; <nl> - return true ; <nl> - } <nl> - <nl> - void CSectionLoader : : Unload ( const CStdString & strSection ) <nl> - { <nl> - CSingleLock lock ( g_sectionLoader . m_critSection ) ; <nl> - if ( ! CSectionLoader : : IsLoaded ( strSection ) ) return ; <nl> - <nl> - ivecLoadedSections i ; <nl> - i = g_sectionLoader . m_vecLoadedSections . begin ( ) ; <nl> - while ( i ! = g_sectionLoader . m_vecLoadedSections . end ( ) ) <nl> - { <nl> - CSection & section = * i ; <nl> - if ( section . m_strSectionName = = strSection ) <nl> - { <nl> - # ifdef LOGALL <nl> - CLog : : Log ( LOGDEBUG , " SECTION : FreeSection ( % s ) count : % i \ n " , strSection . c_str ( ) , section . m_lReferenceCount ) ; <nl> - # endif <nl> - section . m_lReferenceCount - - ; <nl> - if ( 0 = = section . m_lReferenceCount ) <nl> - { <nl> - section . m_unloadDelayStartTick = XbmcThreads : : SystemClockMillis ( ) ; <nl> - return ; <nl> - } <nl> - } <nl> - + + i ; <nl> - } <nl> - } <nl> - <nl> LibraryLoader * CSectionLoader : : LoadDLL ( const CStdString & dllname , bool bDelayUnload / * = true * / , bool bLoadSymbols / * = false * / ) <nl> { <nl> CSingleLock lock ( g_sectionLoader . m_critSection ) ; <nl> void CSectionLoader : : UnloadDelayed ( ) <nl> { <nl> CSingleLock lock ( g_sectionLoader . m_critSection ) ; <nl> <nl> - ivecLoadedSections i = g_sectionLoader . m_vecLoadedSections . begin ( ) ; <nl> - while ( i ! = g_sectionLoader . m_vecLoadedSections . end ( ) ) <nl> - { <nl> - CSection & section = * i ; <nl> - if ( section . m_lReferenceCount = = 0 & & XbmcThreads : : SystemClockMillis ( ) - section . m_unloadDelayStartTick > UNLOAD_DELAY ) <nl> - { <nl> - CLog : : Log ( LOGDEBUG , " SECTION : UnloadDelayed ( SECTION : % s ) " , section . m_strSectionName . c_str ( ) ) ; <nl> - # ifdef HAS_SECTIONS <nl> - XFreeSection ( section . m_strSectionName . c_str ( ) ) ; <nl> - # endif <nl> - i = g_sectionLoader . m_vecLoadedSections . erase ( i ) ; <nl> - continue ; <nl> - } <nl> - i + + ; <nl> - } <nl> - <nl> / / check if we can unload any unreferenced dlls <nl> for ( int i = 0 ; i < ( int ) g_sectionLoader . m_vecLoadedDLLs . size ( ) ; + + i ) <nl> { <nl> void CSectionLoader : : UnloadDelayed ( ) <nl> <nl> void CSectionLoader : : UnloadAll ( ) <nl> { <nl> - ivecLoadedSections i ; <nl> - i = g_sectionLoader . m_vecLoadedSections . begin ( ) ; <nl> - while ( i ! = g_sectionLoader . m_vecLoadedSections . end ( ) ) <nl> - { <nl> - CSection & section = * i ; <nl> - / / g_sectionLoader . m_vecLoadedSections . erase ( i ) ; <nl> - CLog : : Log ( LOGDEBUG , " SECTION : UnloadAll ( SECTION : % s ) " , section . m_strSectionName . c_str ( ) ) ; <nl> - # ifdef HAS_SECTIONS <nl> - XFreeSection ( section . m_strSectionName . c_str ( ) ) ; <nl> - # endif <nl> - i = g_sectionLoader . m_vecLoadedSections . erase ( i ) ; <nl> - } <nl> - <nl> / / delete the dll ' s <nl> CSingleLock lock ( g_sectionLoader . m_critSection ) ; <nl> vector < CDll > : : iterator it = g_sectionLoader . m_vecLoadedDLLs . begin ( ) ; <nl> mmm a / xbmc / SectionLoader . h <nl> ppp b / xbmc / SectionLoader . h <nl> class LibraryLoader ; <nl> class CSectionLoader <nl> { <nl> public : <nl> - class CSection <nl> - { <nl> - public : <nl> - CStdString m_strSectionName ; <nl> - long m_lReferenceCount ; <nl> - unsigned int m_unloadDelayStartTick ; <nl> - } ; <nl> class CDll <nl> { <nl> public : <nl> class CSectionLoader <nl> CSectionLoader ( void ) ; <nl> virtual ~ CSectionLoader ( void ) ; <nl> <nl> - static bool IsLoaded ( const CStdString & strSection ) ; <nl> - static bool Load ( const CStdString & strSection ) ; <nl> - static void Unload ( const CStdString & strSection ) ; <nl> static LibraryLoader * LoadDLL ( const CStdString & strSection , bool bDelayUnload = true , bool bLoadSymbols = false ) ; <nl> static void UnloadDLL ( const CStdString & strSection ) ; <nl> static void UnloadDelayed ( ) ; <nl> protected : <nl> - std : : vector < CSection > m_vecLoadedSections ; <nl> - typedef std : : vector < CSection > : : iterator ivecLoadedSections ; <nl> std : : vector < CDll > m_vecLoadedDLLs ; <nl> CCriticalSection m_critSection ; <nl> <nl> mmm a / xbmc / filesystem / DAAPDirectory . cpp <nl> ppp b / xbmc / filesystem / DAAPDirectory . cpp <nl> <nl> # include " DAAPDirectory . h " <nl> # include " music / tags / MusicInfoTag . h " <nl> # include " FileItem . h " <nl> - # include " SectionLoader . h " <nl> # include " utils / log . h " <nl> # include " utils / URIUtils . h " <nl> <nl> const char unknownArtistAlbum [ ] = " Unknown " ; <nl> <nl> CDAAPDirectory : : CDAAPDirectory ( void ) <nl> { <nl> - CSectionLoader : : Load ( " LIBXDAAP " ) ; <nl> / / m_currLevel holds where we are in the playlist / artist / album / songs hierarchy ( 0 , 1 , 2 , 3 ) <nl> m_currLevel = - 1 ; <nl> m_thisHost = NULL ; <nl> CDAAPDirectory : : ~ CDAAPDirectory ( void ) <nl> m_artisthead = NULL ; <nl> <nl> m_currentSongItems = NULL ; <nl> - CSectionLoader : : Unload ( " LIBXDAAP " ) ; <nl> } <nl> <nl> bool CDAAPDirectory : : GetDirectory ( const CStdString & strPath , CFileItemList & items ) <nl> mmm a / xbmc / filesystem / DAAPFile . cpp <nl> ppp b / xbmc / filesystem / DAAPFile . cpp <nl> <nl> * / <nl> <nl> # include " DAAPFile . h " <nl> - # include " SectionLoader . h " <nl> # include " threads / SingleLock . h " <nl> # include " utils / log . h " <nl> # include < sys / stat . h > <nl> void CDaapClient : : Release ( ) <nl> <nl> DAAP_SClientHost * CDaapClient : : GetHost ( const CStdString & strHost ) <nl> { <nl> - / / We need this section from now on <nl> - if ( ! CSectionLoader : : IsLoaded ( " LIBXDAAP " ) ) CSectionLoader : : Load ( " LIBXDAAP " ) ; <nl> try <nl> { <nl> <nl> mmm a / xbmc / filesystem / RTVDirectory . cpp <nl> ppp b / xbmc / filesystem / RTVDirectory . cpp <nl> <nl> <nl> # include " RTVDirectory . h " <nl> # include " utils / URIUtils . h " <nl> - # include " SectionLoader . h " <nl> # include " URL . h " <nl> # include " tinyXML / tinyxml . h " <nl> # include " FileItem . h " <nl> extern " C " <nl> <nl> CRTVDirectory : : CRTVDirectory ( void ) <nl> { <nl> - CSectionLoader : : Load ( " LIBRTV " ) ; <nl> } <nl> <nl> CRTVDirectory : : ~ CRTVDirectory ( void ) <nl> { <nl> - CSectionLoader : : Unload ( " LIBRTV " ) ; <nl> } <nl> <nl> / / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> mmm a / xbmc / filesystem / RTVFile . cpp <nl> ppp b / xbmc / filesystem / RTVFile . cpp <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> # include " RTVFile . h " <nl> - # include " SectionLoader . h " <nl> # include " URL . h " <nl> # include " utils / log . h " <nl> # include < errno . h > <nl> using namespace XFILE ; <nl> <nl> CRTVFile : : CRTVFile ( ) <nl> { <nl> - CSectionLoader : : Load ( " LIBRTV " ) ; <nl> m_filePos = 0 ; <nl> m_fileSize = 0 ; <nl> m_bOpened = false ; <nl> CRTVFile : : CRTVFile ( ) <nl> CRTVFile : : ~ CRTVFile ( ) <nl> { <nl> Close ( ) ; <nl> - CSectionLoader : : Unload ( " LIBRTV " ) ; <nl> } <nl> <nl> / / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> mmm a / xbmc / utils / LangCodeExpander . cpp <nl> ppp b / xbmc / utils / LangCodeExpander . cpp <nl> <nl> * / <nl> <nl> # include " LangCodeExpander . h " <nl> - # include " SectionLoader . h " <nl> # include " tinyXML / tinyxml . h " <nl> # include " utils / log . h " <nl> <nl> bool CLangCodeExpander : : LookupInDb ( CStdString & desc , const CStdString & code ) <nl> sCode . TrimRight ( ) ; <nl> if ( sCode . length ( ) = = 2 ) <nl> { <nl> - CSectionLoader : : Load ( " LCODE " ) ; <nl> longcode = MAKECODE ( ' \ 0 ' , ' \ 0 ' , sCode [ 0 ] , sCode [ 1 ] ) ; <nl> for ( unsigned int i = 0 ; i < sizeof ( g_iso639_1 ) / sizeof ( LCENTRY ) ; i + + ) <nl> { <nl> if ( g_iso639_1 [ i ] . code = = longcode ) <nl> { <nl> desc = g_iso639_1 [ i ] . name ; <nl> - CSectionLoader : : Unload ( " LCODE " ) ; <nl> return true ; <nl> } <nl> } <nl> - CSectionLoader : : Unload ( " LCODE " ) ; <nl> } <nl> else if ( code . length ( ) = = 3 ) <nl> { <nl> - CSectionLoader : : Load ( " LCODE " ) ; <nl> longcode = MAKECODE ( ' \ 0 ' , sCode [ 0 ] , sCode [ 1 ] , sCode [ 2 ] ) ; <nl> for ( unsigned int i = 0 ; i < sizeof ( g_iso639_2 ) / sizeof ( LCENTRY ) ; i + + ) <nl> { <nl> if ( g_iso639_2 [ i ] . code = = longcode ) <nl> { <nl> desc = g_iso639_2 [ i ] . name ; <nl> - CSectionLoader : : Unload ( " LCODE " ) ; <nl> return true ; <nl> } <nl> } <nl> - CSectionLoader : : Unload ( " LCODE " ) ; <nl> } <nl> return false ; <nl> } <nl>
changed : get rid of old EXE section cruft
xbmc/xbmc
d66af942eb5c401e575024a3c51d55ddfd0392b3
2012-04-30T21:17:14Z
mmm a / platforms / ios / build_framework . py <nl> ppp b / platforms / ios / build_framework . py <nl> def getCMakeArgs ( self , arch , target ) : <nl> <nl> b = iOSBuilder ( args . opencv , args . contrib , args . dynamic , args . bitcodedisabled , args . without , <nl> [ <nl> - ( [ " armv7 " , " arm64 " ] , " iPhoneOS " ) , <nl> + ( [ " armv7s " , " arm64 " ] , " iPhoneOS " ) , <nl> ] if os . environ . get ( ' BUILD_PRECOMMIT ' , None ) else <nl> [ <nl> ( [ " armv7 " , " armv7s " , " arm64 " ] , " iPhoneOS " ) , <nl> mmm a / platforms / ios / cmake / Modules / Platform / iOS . cmake <nl> ppp b / platforms / ios / cmake / Modules / Platform / iOS . cmake <nl> set ( CMAKE_C_FLAGS " $ { CMAKE_C_FLAGS } $ { no_warn } " ) <nl> set ( CMAKE_CXX_FLAGS " $ { CMAKE_CXX_FLAGS } - stdlib = libc + + - fvisibility = hidden - fvisibility - inlines - hidden $ { no_warn } " ) <nl> <nl> set ( CMAKE_CXX_FLAGS_RELEASE " - DNDEBUG - O3 - ffast - math " ) <nl> - if ( NOT IOS_ARCH STREQUAL " armv7 " ) <nl> + if ( NOT IOS_ARCH STREQUAL " armv7 " AND NOT IOS_ARCH STREQUAL " armv7s " ) <nl> set ( CMAKE_CXX_FLAGS_RELEASE " $ { CMAKE_CXX_FLAGS_RELEASE } - fomit - frame - pointer " ) <nl> endif ( ) <nl> <nl>
Merge pull request from alalek : ios_fix_build_warnings
opencv/opencv
ef04ca9e0fdf0962b45ab0cdc5b5f1ceee21cac8
2017-12-06T16:26:51Z
mmm a / tensorflow / contrib / learn / python / learn / learn_io / numpy_io . py <nl> ppp b / tensorflow / contrib / learn / python / learn / learn_io / numpy_io . py <nl> <nl> from tensorflow . python . util . deprecation import deprecated <nl> <nl> <nl> - @ deprecated ( None , ' Use tf . estimator . inputs . numpy_input_fn . ' ) <nl> + @ deprecated ( None , ' Use tf . compat . v1 . estimator . inputs . numpy_input_fn . ' ) <nl> def numpy_input_fn ( x , <nl> y = None , <nl> batch_size = 128 , <nl> mmm a / tensorflow / contrib / learn / python / learn / learn_io / pandas_io . py <nl> ppp b / tensorflow / contrib / learn / python / learn / learn_io / pandas_io . py <nl> <nl> } <nl> <nl> <nl> - @ deprecated ( None , ' Please use tf . estimator . inputs . pandas_input_fn ' ) <nl> + @ deprecated ( None , ' Please use tf . compat . v1 . estimator . inputs . pandas_input_fn ' ) <nl> def pandas_input_fn ( x , <nl> y = None , <nl> batch_size = 128 , <nl>
Replace ` tf . estimator . inputs ` with ` tf . compat . v1 . estimator . inputs `
tensorflow/tensorflow
e55a58c9bf6adf07dce074813b54fc5b16c5be1d
2018-11-28T19:46:35Z
mmm a / doc / environment_variables . md <nl> ppp b / doc / environment_variables . md <nl> gRPC environment variables <nl> gRPC C core based implementations ( those contained in this repository ) expose <nl> some configuration as environment variables that can be set . <nl> <nl> + * http_proxy <nl> + The URI of the proxy to use for HTTP CONNECT support . Does not currently <nl> + support username or password information in the URI . <nl> + <nl> * GRPC_ABORT_ON_LEAKS <nl> A debugging aid to cause a call to abort ( ) when gRPC objects are leaked past <nl> grpc_shutdown ( ) . Set to 1 to cause the abort , if unset or 0 it does not <nl>
Document use of $ { http_proxy } environment variable .
grpc/grpc
43b8930b19738b7edb51bb58c0f7b8cb41c1cc78
2017-06-15T18:41:28Z
mmm a / third_party / remote_config / remote_platform_configure . bzl <nl> ppp b / third_party / remote_config / remote_platform_configure . bzl <nl> def _remote_platform_configure_impl ( repository_ctx ) : <nl> platform = " linux " <nl> <nl> cpu = " x86_64 " <nl> - if " MACHTYPE " in repository_ctx . os . environ : <nl> - machine_type = repository_ctx . os . environ [ " MACHTYPE " ] <nl> - if ( machine_type . startswith ( " ppc " ) or <nl> - machine_type . startswith ( " powerpc " ) ) : <nl> - cpu = " ppc " <nl> - elif machine_type . startswith ( " s390x " ) : <nl> - cpu = " s390x " <nl> + machine_type = repository_ctx . execute ( [ " bash " , " - c " , " echo $ MACHTYPE " ] ) . stdout <nl> + if ( machine_type . startswith ( " ppc " ) or <nl> + machine_type . startswith ( " powerpc " ) ) : <nl> + cpu = " ppc " <nl> + elif machine_type . startswith ( " s390x " ) : <nl> + cpu = " s390x " <nl> <nl> exec_properties = repository_ctx . attr . platform_exec_properties <nl> <nl>
Get MACHTYPE via bash call instead of relying on the environment .
tensorflow/tensorflow
79f688158813bfba4ee9689e25d5da101b39a413
2020-05-04T12:05:50Z
mmm a / runautoconf <nl> ppp b / runautoconf <nl> aclocal - I config | | bail_out <nl> # mmm Step 2 : <nl> <nl> echo " Running libtoolize " <nl> - libtoolize - f - c | | bail_out <nl> - libtoolize - - automake | | bail_out <nl> + libtoolize - f - c | | glibtoolize - f - c | | bail_out <nl> + libtoolize - - automake | | glibtoolize - - automake | | bail_out <nl> <nl> # mmm Step 3 : Generate config . h . in from : <nl> # . configure . ac ( look for AM_CONFIG_HEADER tag or AC_CONFIG_HEADER tag ) <nl>
macports installs libtoolize as glibtoolize . A cleaner solution can wait for some later date
tesseract-ocr/tesseract
e7d0029b65adcfc20f90819981be8c421abb0762
2011-08-18T22:02:29Z
mmm a / cocos / scripting / js - bindings / auto / api / jsb_cocos2dx_3d_auto_api . js <nl> ppp b / cocos / scripting / js - bindings / auto / api / jsb_cocos2dx_3d_auto_api . js <nl> blendfunc <nl> { <nl> } , <nl> <nl> + / * * <nl> + * @ method setForce2DQueue <nl> + * @ param { bool } arg0 <nl> + * / <nl> + setForce2DQueue : function ( <nl> + bool <nl> + ) <nl> + { <nl> + } , <nl> + <nl> / * * <nl> * @ method getPrimitiveType <nl> * @ return { unsigned int } <nl> blendfunc <nl> { <nl> } , <nl> <nl> + / * * <nl> + * @ method setForce2DQueue <nl> + * @ param { bool } arg0 <nl> + * / <nl> + setForce2DQueue : function ( <nl> + bool <nl> + ) <nl> + { <nl> + } , <nl> + <nl> / * * <nl> * @ method removeAttachNode <nl> * @ param { String } arg0 <nl> mmm a / cocos / scripting / js - bindings / auto / jsb_cocos2dx_3d_auto . cpp <nl> ppp b / cocos / scripting / js - bindings / auto / jsb_cocos2dx_3d_auto . cpp <nl> bool js_cocos2dx_3d_Mesh_setBlendFunc ( JSContext * cx , uint32_t argc , jsval * vp ) <nl> JS_ReportError ( cx , " js_cocos2dx_3d_Mesh_setBlendFunc : wrong number of arguments : % d , was expecting % d " , argc , 1 ) ; <nl> return false ; <nl> } <nl> + bool js_cocos2dx_3d_Mesh_setForce2DQueue ( JSContext * cx , uint32_t argc , jsval * vp ) <nl> + { <nl> + JS : : CallArgs args = JS : : CallArgsFromVp ( argc , vp ) ; <nl> + bool ok = true ; <nl> + JS : : RootedObject obj ( cx , args . thisv ( ) . toObjectOrNull ( ) ) ; <nl> + js_proxy_t * proxy = jsb_get_js_proxy ( obj ) ; <nl> + cocos2d : : Mesh * cobj = ( cocos2d : : Mesh * ) ( proxy ? proxy - > ptr : NULL ) ; <nl> + JSB_PRECONDITION2 ( cobj , cx , false , " js_cocos2dx_3d_Mesh_setForce2DQueue : Invalid Native Object " ) ; <nl> + if ( argc = = 1 ) { <nl> + bool arg0 ; <nl> + arg0 = JS : : ToBoolean ( args . get ( 0 ) ) ; <nl> + JSB_PRECONDITION2 ( ok , cx , false , " js_cocos2dx_3d_Mesh_setForce2DQueue : Error processing arguments " ) ; <nl> + cobj - > setForce2DQueue ( arg0 ) ; <nl> + args . rval ( ) . setUndefined ( ) ; <nl> + return true ; <nl> + } <nl> + <nl> + JS_ReportError ( cx , " js_cocos2dx_3d_Mesh_setForce2DQueue : wrong number of arguments : % d , was expecting % d " , argc , 1 ) ; <nl> + return false ; <nl> + } <nl> bool js_cocos2dx_3d_Mesh_getPrimitiveType ( JSContext * cx , uint32_t argc , jsval * vp ) <nl> { <nl> JS : : CallArgs args = JS : : CallArgsFromVp ( argc , vp ) ; <nl> void js_register_cocos2dx_3d_Mesh ( JSContext * cx , JS : : HandleObject global ) { <nl> JS_FN ( " setMeshIndexData " , js_cocos2dx_3d_Mesh_setMeshIndexData , 1 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> JS_FN ( " getMeshVertexAttribCount " , js_cocos2dx_3d_Mesh_getMeshVertexAttribCount , 0 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> JS_FN ( " setBlendFunc " , js_cocos2dx_3d_Mesh_setBlendFunc , 1 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> + JS_FN ( " setForce2DQueue " , js_cocos2dx_3d_Mesh_setForce2DQueue , 1 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> JS_FN ( " getPrimitiveType " , js_cocos2dx_3d_Mesh_getPrimitiveType , 0 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> JS_FN ( " setSkin " , js_cocos2dx_3d_Mesh_setSkin , 1 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> JS_FN ( " isVisible " , js_cocos2dx_3d_Mesh_isVisible , 0 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> bool js_cocos2dx_3d_Sprite3D_setBlendFunc ( JSContext * cx , uint32_t argc , jsval * v <nl> JS_ReportError ( cx , " js_cocos2dx_3d_Sprite3D_setBlendFunc : wrong number of arguments : % d , was expecting % d " , argc , 1 ) ; <nl> return false ; <nl> } <nl> + bool js_cocos2dx_3d_Sprite3D_setForce2DQueue ( JSContext * cx , uint32_t argc , jsval * vp ) <nl> + { <nl> + JS : : CallArgs args = JS : : CallArgsFromVp ( argc , vp ) ; <nl> + bool ok = true ; <nl> + JS : : RootedObject obj ( cx , args . thisv ( ) . toObjectOrNull ( ) ) ; <nl> + js_proxy_t * proxy = jsb_get_js_proxy ( obj ) ; <nl> + cocos2d : : Sprite3D * cobj = ( cocos2d : : Sprite3D * ) ( proxy ? proxy - > ptr : NULL ) ; <nl> + JSB_PRECONDITION2 ( cobj , cx , false , " js_cocos2dx_3d_Sprite3D_setForce2DQueue : Invalid Native Object " ) ; <nl> + if ( argc = = 1 ) { <nl> + bool arg0 ; <nl> + arg0 = JS : : ToBoolean ( args . get ( 0 ) ) ; <nl> + JSB_PRECONDITION2 ( ok , cx , false , " js_cocos2dx_3d_Sprite3D_setForce2DQueue : Error processing arguments " ) ; <nl> + cobj - > setForce2DQueue ( arg0 ) ; <nl> + args . rval ( ) . setUndefined ( ) ; <nl> + return true ; <nl> + } <nl> + <nl> + JS_ReportError ( cx , " js_cocos2dx_3d_Sprite3D_setForce2DQueue : wrong number of arguments : % d , was expecting % d " , argc , 1 ) ; <nl> + return false ; <nl> + } <nl> bool js_cocos2dx_3d_Sprite3D_removeAttachNode ( JSContext * cx , uint32_t argc , jsval * vp ) <nl> { <nl> JS : : CallArgs args = JS : : CallArgsFromVp ( argc , vp ) ; <nl> void js_register_cocos2dx_3d_Sprite3D ( JSContext * cx , JS : : HandleObject global ) { <nl> JS_FN ( " getAttachNode " , js_cocos2dx_3d_Sprite3D_getAttachNode , 1 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> JS_FN ( " initWithFile " , js_cocos2dx_3d_Sprite3D_initWithFile , 1 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> JS_FN ( " setBlendFunc " , js_cocos2dx_3d_Sprite3D_setBlendFunc , 1 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> + JS_FN ( " setForce2DQueue " , js_cocos2dx_3d_Sprite3D_setForce2DQueue , 1 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> JS_FN ( " removeAttachNode " , js_cocos2dx_3d_Sprite3D_removeAttachNode , 1 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> JS_FN ( " getSkeleton " , js_cocos2dx_3d_Sprite3D_getSkeleton , 0 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> JS_FN ( " setForceDepthWrite " , js_cocos2dx_3d_Sprite3D_setForceDepthWrite , 1 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> mmm a / cocos / scripting / js - bindings / auto / jsb_cocos2dx_3d_auto . hpp <nl> ppp b / cocos / scripting / js - bindings / auto / jsb_cocos2dx_3d_auto . hpp <nl> bool js_cocos2dx_3d_Mesh_getIndexCount ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_3d_Mesh_setMeshIndexData ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_3d_Mesh_getMeshVertexAttribCount ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_3d_Mesh_setBlendFunc ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> + bool js_cocos2dx_3d_Mesh_setForce2DQueue ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_3d_Mesh_getPrimitiveType ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_3d_Mesh_setSkin ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_3d_Mesh_isVisible ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_3d_Sprite3D_initFrom ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_3d_Sprite3D_getAttachNode ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_3d_Sprite3D_initWithFile ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_3d_Sprite3D_setBlendFunc ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> + bool js_cocos2dx_3d_Sprite3D_setForce2DQueue ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_3d_Sprite3D_removeAttachNode ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_3d_Sprite3D_getSkeleton ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_3d_Sprite3D_setForceDepthWrite ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> mmm a / cocos / scripting / lua - bindings / auto / api / Mesh . lua <nl> ppp b / cocos / scripting / lua - bindings / auto / api / Mesh . lua <nl> <nl> - - @ param # cc . BlendFunc blendFunc <nl> - - @ return Mesh # Mesh self ( return value : cc . Mesh ) <nl> <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + - - force set this Sprite3D to 2D render queue <nl> + - - @ function [ parent = # Mesh ] setForce2DQueue <nl> + - - @ param self <nl> + - - @ param # bool force2D <nl> + - - @ return Mesh # Mesh self ( return value : cc . Mesh ) <nl> + <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - - skin setter <nl> - - @ function [ parent = # Mesh ] setSkin <nl> mmm a / cocos / scripting / lua - bindings / auto / api / Sprite3D . lua <nl> ppp b / cocos / scripting / lua - bindings / auto / api / Sprite3D . lua <nl> <nl> - - @ param # cc . BlendFunc blendFunc <nl> - - @ return Sprite3D # Sprite3D self ( return value : cc . Sprite3D ) <nl> <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + - - force set this Sprite3D to 2D render queue <nl> + - - @ function [ parent = # Sprite3D ] setForce2DQueue <nl> + - - @ param self <nl> + - - @ param # bool force2D <nl> + - - @ return Sprite3D # Sprite3D self ( return value : cc . Sprite3D ) <nl> + <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - - remove attach node <nl> - - @ function [ parent = # Sprite3D ] removeAttachNode <nl> mmm a / cocos / scripting / lua - bindings / auto / lua_cocos2dx_3d_auto . cpp <nl> ppp b / cocos / scripting / lua - bindings / auto / lua_cocos2dx_3d_auto . cpp <nl> int lua_cocos2dx_3d_Mesh_setBlendFunc ( lua_State * tolua_S ) <nl> <nl> return 0 ; <nl> } <nl> + int lua_cocos2dx_3d_Mesh_setForce2DQueue ( lua_State * tolua_S ) <nl> + { <nl> + int argc = 0 ; <nl> + cocos2d : : Mesh * cobj = nullptr ; <nl> + bool ok = true ; <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + tolua_Error tolua_err ; <nl> + # endif <nl> + <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + if ( ! tolua_isusertype ( tolua_S , 1 , " cc . Mesh " , 0 , & tolua_err ) ) goto tolua_lerror ; <nl> + # endif <nl> + <nl> + cobj = ( cocos2d : : Mesh * ) tolua_tousertype ( tolua_S , 1 , 0 ) ; <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + if ( ! cobj ) <nl> + { <nl> + tolua_error ( tolua_S , " invalid ' cobj ' in function ' lua_cocos2dx_3d_Mesh_setForce2DQueue ' " , nullptr ) ; <nl> + return 0 ; <nl> + } <nl> + # endif <nl> + <nl> + argc = lua_gettop ( tolua_S ) - 1 ; <nl> + if ( argc = = 1 ) <nl> + { <nl> + bool arg0 ; <nl> + <nl> + ok & = luaval_to_boolean ( tolua_S , 2 , & arg0 , " cc . Mesh : setForce2DQueue " ) ; <nl> + if ( ! ok ) <nl> + { <nl> + tolua_error ( tolua_S , " invalid arguments in function ' lua_cocos2dx_3d_Mesh_setForce2DQueue ' " , nullptr ) ; <nl> + return 0 ; <nl> + } <nl> + cobj - > setForce2DQueue ( arg0 ) ; <nl> + lua_settop ( tolua_S , 1 ) ; <nl> + return 1 ; <nl> + } <nl> + luaL_error ( tolua_S , " % s has wrong number of arguments : % d , was expecting % d \ n " , " cc . Mesh : setForce2DQueue " , argc , 1 ) ; <nl> + return 0 ; <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + tolua_lerror : <nl> + tolua_error ( tolua_S , " # ferror in function ' lua_cocos2dx_3d_Mesh_setForce2DQueue ' . " , & tolua_err ) ; <nl> + # endif <nl> + <nl> + return 0 ; <nl> + } <nl> int lua_cocos2dx_3d_Mesh_setSkin ( lua_State * tolua_S ) <nl> { <nl> int argc = 0 ; <nl> int lua_register_cocos2dx_3d_Mesh ( lua_State * tolua_S ) <nl> tolua_function ( tolua_S , " setMeshIndexData " , lua_cocos2dx_3d_Mesh_setMeshIndexData ) ; <nl> tolua_function ( tolua_S , " getMeshVertexAttribCount " , lua_cocos2dx_3d_Mesh_getMeshVertexAttribCount ) ; <nl> tolua_function ( tolua_S , " setBlendFunc " , lua_cocos2dx_3d_Mesh_setBlendFunc ) ; <nl> + tolua_function ( tolua_S , " setForce2DQueue " , lua_cocos2dx_3d_Mesh_setForce2DQueue ) ; <nl> tolua_function ( tolua_S , " setSkin " , lua_cocos2dx_3d_Mesh_setSkin ) ; <nl> tolua_function ( tolua_S , " isVisible " , lua_cocos2dx_3d_Mesh_isVisible ) ; <nl> tolua_function ( tolua_S , " setGLProgramState " , lua_cocos2dx_3d_Mesh_setGLProgramState ) ; <nl> int lua_cocos2dx_3d_Sprite3D_setBlendFunc ( lua_State * tolua_S ) <nl> <nl> return 0 ; <nl> } <nl> + int lua_cocos2dx_3d_Sprite3D_setForce2DQueue ( lua_State * tolua_S ) <nl> + { <nl> + int argc = 0 ; <nl> + cocos2d : : Sprite3D * cobj = nullptr ; <nl> + bool ok = true ; <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + tolua_Error tolua_err ; <nl> + # endif <nl> + <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + if ( ! tolua_isusertype ( tolua_S , 1 , " cc . Sprite3D " , 0 , & tolua_err ) ) goto tolua_lerror ; <nl> + # endif <nl> + <nl> + cobj = ( cocos2d : : Sprite3D * ) tolua_tousertype ( tolua_S , 1 , 0 ) ; <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + if ( ! cobj ) <nl> + { <nl> + tolua_error ( tolua_S , " invalid ' cobj ' in function ' lua_cocos2dx_3d_Sprite3D_setForce2DQueue ' " , nullptr ) ; <nl> + return 0 ; <nl> + } <nl> + # endif <nl> + <nl> + argc = lua_gettop ( tolua_S ) - 1 ; <nl> + if ( argc = = 1 ) <nl> + { <nl> + bool arg0 ; <nl> + <nl> + ok & = luaval_to_boolean ( tolua_S , 2 , & arg0 , " cc . Sprite3D : setForce2DQueue " ) ; <nl> + if ( ! ok ) <nl> + { <nl> + tolua_error ( tolua_S , " invalid arguments in function ' lua_cocos2dx_3d_Sprite3D_setForce2DQueue ' " , nullptr ) ; <nl> + return 0 ; <nl> + } <nl> + cobj - > setForce2DQueue ( arg0 ) ; <nl> + lua_settop ( tolua_S , 1 ) ; <nl> + return 1 ; <nl> + } <nl> + luaL_error ( tolua_S , " % s has wrong number of arguments : % d , was expecting % d \ n " , " cc . Sprite3D : setForce2DQueue " , argc , 1 ) ; <nl> + return 0 ; <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + tolua_lerror : <nl> + tolua_error ( tolua_S , " # ferror in function ' lua_cocos2dx_3d_Sprite3D_setForce2DQueue ' . " , & tolua_err ) ; <nl> + # endif <nl> + <nl> + return 0 ; <nl> + } <nl> int lua_cocos2dx_3d_Sprite3D_removeAttachNode ( lua_State * tolua_S ) <nl> { <nl> int argc = 0 ; <nl> int lua_register_cocos2dx_3d_Sprite3D ( lua_State * tolua_S ) <nl> tolua_function ( tolua_S , " setLightMask " , lua_cocos2dx_3d_Sprite3D_setLightMask ) ; <nl> tolua_function ( tolua_S , " getAttachNode " , lua_cocos2dx_3d_Sprite3D_getAttachNode ) ; <nl> tolua_function ( tolua_S , " setBlendFunc " , lua_cocos2dx_3d_Sprite3D_setBlendFunc ) ; <nl> + tolua_function ( tolua_S , " setForce2DQueue " , lua_cocos2dx_3d_Sprite3D_setForce2DQueue ) ; <nl> tolua_function ( tolua_S , " removeAttachNode " , lua_cocos2dx_3d_Sprite3D_removeAttachNode ) ; <nl> tolua_function ( tolua_S , " getSkeleton " , lua_cocos2dx_3d_Sprite3D_getSkeleton ) ; <nl> tolua_function ( tolua_S , " setForceDepthWrite " , lua_cocos2dx_3d_Sprite3D_setForceDepthWrite ) ; <nl> mmm a / cocos / scripting / lua - bindings / auto / lua_cocos2dx_3d_auto . hpp <nl> ppp b / cocos / scripting / lua - bindings / auto / lua_cocos2dx_3d_auto . hpp <nl> int register_all_cocos2dx_3d ( lua_State * tolua_S ) ; <nl> <nl> <nl> <nl> + <nl> + <nl> <nl> <nl> <nl>
[ ci skip ] [ AUTO ] : updating luabinding & jsbinding automatically
cocos2d/cocos2d-x
bf3c97053425d64e078f60ed793b2a94d60c6d31
2015-07-01T14:12:18Z
mmm a / arangod / Agency / Agent . cpp <nl> ppp b / arangod / Agency / Agent . cpp <nl> priv_rpc_ret_t Agent : : sendAppendEntriesRPC ( <nl> } <nl> <nl> <nl> + <nl> + bool Agent : : activateSingle ( ) { <nl> + MUTEX_LOCKER ( mutexLocker , _ioLock ) ; <nl> + if ( _config . active . empty ( ) ) { <nl> + _config . active . push_back ( _config . id ) ; <nl> + return _state . persistActiveAgents ( _config . id ) ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + <nl> / / / Load persistent state <nl> bool Agent : : load ( ) { <nl> <nl> bool Agent : : load ( ) { <nl> if ( size ( ) > 1 ) { <nl> inception ( ) ; <nl> } else { <nl> - MUTEX_LOCKER ( mutexLocker , _cfgLock ) ; <nl> - _config . active . push_back ( _config . id ) ; <nl> + MUTEX_LOCKER ( mutexLocker , _cfgLock ) ; <nl> + activateSingle ( ) ; <nl> } <nl> <nl> LOG_TOPIC ( DEBUG , Logger : : AGENCY ) < < " Reassembling spearhead and read stores . " ; <nl> void Agent : : gossip ( ) { <nl> arangodb : : ClusterComm : : instance ( ) - > asyncRequest ( <nl> " 1 " , 1 , agent . second , GeneralRequest : : RequestType : : POST , path , <nl> std : : make_shared < std : : string > ( word - > toString ( ) ) , headerFields , <nl> - std : : make_shared < GossipCallback > ( ) , 1 . 0 , true ) ; <nl> + std : : make_shared < GossipCallback > ( this , agent . first ) , 1 . 0 , true ) ; <nl> } <nl> <nl> } <nl> inline static query_t getResponse ( <nl> bool Agent : : persistedAgents ( ) { <nl> <nl> std : : vector < std : : string > active ; <nl> + std : : map < std : : string , std : : string > pool ; <nl> + query_t activeBuilder = nullptr ; <nl> { <nl> MUTEX_LOCKER ( mutexLocker , _cfgLock ) ; <nl> active = _config . active ; <nl> + pool = _config . pool ; <nl> + activeBuilder = _config . activeToBuilder ( ) ; <nl> + } <nl> + <nl> + if ( active . empty ( ) ) { <nl> + return false ; <nl> } <nl> <nl> / / 1 min timeout <nl> bool Agent : : persistedAgents ( ) { <nl> <nl> auto const & it = find ( active . begin ( ) , active . end ( ) , _config . id ) ; <nl> auto start = std : : chrono : : system_clock : : now ( ) ; <nl> + std : : string const path = " / _api / agency / activeAgents " ; <nl> <nl> - if ( it ! = active . end ( ) ) { <nl> + if ( it ! = active . end ( ) ) { / / We used to be active agent <nl> <nl> { <nl> MUTEX_LOCKER ( mutexLocker , _cfgLock ) ; <nl> _serveActiveAgent = true ; / / Serve / _api / agency / activeAgents <nl> } <nl> + <nl> + std : : map < std : : string , bool > consens ; <nl> <nl> while ( true ) { <nl> <nl> + / / contact others and count how many succeeded <nl> + for ( auto const & agent : active ) { <nl> + / * std : : unique_ptr < ClusterCommResult > res = <nl> + ClusterComm : : instance ( ) - > syncRequest ( <nl> + " 1 " , 1 , pool . at ( agent ) , GeneralRequest : : RequestType : : POST , path , <nl> + std : : make_shared < std : : string > ( word - > toString ( ) ) , headerFields , <nl> + std : : make_shared < GossipCallback > ( ) , 1 . 0 , true , 0 . 5 ) ; <nl> + if ( res - > status = = CL_COMM_SENT ) { <nl> + if ( res - > result - > getHttpReturnCode ( ) = = 200 ) { <nl> + consens [ agent ] = true ; <nl> + } else if ( res - > result - > getHttpReturnCode ( ) = = 428 ) { <nl> + LOG_TOPIC ( DEBUG , Logger : : AGENCY ) < < <nl> + " Local information on agency is rejected by " < < agent ; <nl> + } else if ( res - > result - > getHttpReturnCode ( ) = = 409 ) { <nl> + LOG_TOPIC ( DEBUG , Logger : : AGENCY ) < < <nl> + " I am no longer active agent according to " < < agent ; <nl> + } <nl> + } * / <nl> + 0 ; <nl> + } <nl> + <nl> + / / Collect what we know . Act accordingly . <nl> + <nl> + / / timeout : clear list of active and give up <nl> if ( std : : chrono : : system_clock : : now ( ) - start > timeout ) { <nl> { <nl> MUTEX_LOCKER ( mutexLocker , _cfgLock ) ; <nl> _config . active . clear ( ) ; <nl> } <nl> - return false ; <nl> - } <nl> + <nl> + } <nl> + <nl> } <nl> <nl> { <nl> mmm a / arangod / Agency / Agent . h <nl> ppp b / arangod / Agency / Agent . h <nl> class Agent : public arangodb : : Thread { <nl> <nl> <nl> private : <nl> + <nl> + / / / @ brief Activate this agent in single agent mode . <nl> + bool activateSingle ( ) ; <nl> + <nl> + / / / @ brief Assignment of persisted state <nl> Agent & operator = ( VPackSlice const & ) ; <nl> <nl> / / / @ brief Get current term <nl> class Agent : public arangodb : : Thread { <nl> / / std : : vector < index_t > _confirmed ; <nl> std : : map < std : : string , index_t > _confirmed ; <nl> std : : map < std : : string , index_t > _lastHighest ; <nl> + <nl> std : : map < std : : string , TimePoint > _lastSent ; <nl> arangodb : : Mutex _ioLock ; / * * < @ brief Read / Write lock * / <nl> mutable arangodb : : Mutex _cfgLock ; / * * < @ brief configuration gossip lock * / <nl> class Agent : public arangodb : : Thread { <nl> <nl> / / / @ brief Next compaction after <nl> arangodb : : consensus : : index_t _nextCompationAfter ; <nl> + <nl> + std : : map < std : : string , bool > _gossipTmp ; <nl> + <nl> } ; <nl> <nl> } <nl> mmm a / arangod / Agency / AgentCallback . h <nl> ppp b / arangod / Agency / AgentCallback . h <nl> namespace consensus { <nl> class Agent ; <nl> <nl> class AgentCallback : public arangodb : : ClusterCommCallback { <nl> - public : <nl> + public : <nl> AgentCallback ( ) ; <nl> <nl> AgentCallback ( Agent * , arangodb : : consensus : : id_t , index_t ) ; <nl> class AgentCallback : public arangodb : : ClusterCommCallback { <nl> <nl> void shutdown ( ) ; <nl> <nl> - private : <nl> + private : <nl> Agent * _agent ; <nl> index_t _last ; <nl> arangodb : : consensus : : id_t _slaveID ; <nl> mmm a / arangod / Agency / AgentConfiguration . h <nl> ppp b / arangod / Agency / AgentConfiguration . h <nl> struct config_t { <nl> / / / @ brief pool size <nl> inline size_t pSize ( ) const { return poolSize ; } <nl> <nl> + <nl> + query_t const activeToBuilder ( ) { <nl> + query_t ret = std : : make_shared < arangodb : : velocypack : : Builder > ( ) ; <nl> + ret - > openArray ( ) ; <nl> + for ( auto const & i : active ) { <nl> + ret - > add ( VPackValue ( i ) ) ; <nl> + } <nl> + ret - > close ( ) ; <nl> + return ret ; <nl> + } <nl> + <nl> <nl> / / / @ brief override this configuration with prevailing opinion ( startup ) <nl> void override ( VPackSlice const & conf ) { <nl> struct config_t { <nl> ret - > add ( supervisionStr , VPackValue ( supervision ) ) ; <nl> ret - > add ( supervisionFrequencyStr , VPackValue ( supervisionFrequency ) ) ; <nl> ret - > add ( compactionStepSizeStr , VPackValue ( compactionStepSize ) ) ; <nl> - ret - > add ( " _key " , VPackValue ( " 0 " ) ) ; <nl> ret - > close ( ) ; <nl> return ret ; <nl> } <nl> <nl> - bool setId ( arangodb : : consensus : : id_t const & i ) { <nl> + bool setId ( arangodb : : consensus : : id_t const & i ) { <nl> if ( id . empty ( ) ) { <nl> id = i ; <nl> pool [ id ] = endpoint ; / / Register my endpoint with it <nl> struct config_t { <nl> <nl> <nl> / / / @ brief merge from persisted configuration <nl> - bool merge ( VPackSlice const & conf ) { <nl> + bool merge ( VPackSlice const & conf ) { <nl> <nl> LOG ( WARN ) < < conf . typeName ( ) ; <nl> <nl> struct config_t { <nl> LOG_TOPIC ( DEBUG , Logger : : AGENCY ) < < ss . str ( ) ; <nl> <nl> LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> - ss . clear ( ) ; <nl> + ss . str ( " " ) ; ss . clear ( ) ; <nl> ss < < " Agency pool size : " ; <nl> if ( poolSize = = 0 ) { / / Command line beats persistence <nl> if ( conf . hasKey ( poolSizeStr ) ) { <nl> struct config_t { <nl> LOG_TOPIC ( DEBUG , Logger : : AGENCY ) < < ss . str ( ) ; <nl> <nl> LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> - ss . clear ( ) ; <nl> + ss . str ( " " ) ; ss . clear ( ) ; <nl> ss < < " Agent pool : " ; <nl> if ( conf . hasKey ( poolStr ) ) { / / Persistence only <nl> LOG_TOPIC ( DEBUG , Logger : : AGENCY ) < < " Found agent pool in persistence : " ; <nl> - for ( auto const & peer : VPackArrayIterator ( conf . get ( poolStr ) ) ) { <nl> - auto key = peer . get ( idStr ) . copyString ( ) ; <nl> - auto value = peer . get ( endpointStr ) . copyString ( ) ; <nl> - pool [ key ] = value ; <nl> + for ( auto const & peer : VPackObjectIterator ( conf . get ( poolStr ) ) ) { <nl> + pool [ peer . key . copyString ( ) ] = peer . value . copyString ( ) ; <nl> } <nl> ss < < conf . get ( poolStr ) . toJson ( ) < < " ( persisted ) " ; <nl> } else { <nl> struct config_t { <nl> LOG_TOPIC ( DEBUG , Logger : : AGENCY ) < < ss . str ( ) ; <nl> <nl> LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> - ss . clear ( ) ; <nl> + ss . str ( " " ) ; ss . clear ( ) ; <nl> ss < < " Active agents : " ; <nl> if ( conf . hasKey ( activeStr ) ) { / / Persistence only ? <nl> for ( auto const & a : VPackArrayIterator ( conf . get ( activeStr ) ) ) { <nl> struct config_t { <nl> LOG_TOPIC ( DEBUG , Logger : : AGENCY ) < < ss . str ( ) ; <nl> <nl> LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> - ss . clear ( ) ; <nl> + ss . str ( " " ) ; ss . clear ( ) ; <nl> ss < < " Min RAFT interval : " ; <nl> if ( minPing = = 0 ) { / / Command line beats persistence <nl> if ( conf . hasKey ( minPingStr ) ) { <nl> struct config_t { <nl> LOG_TOPIC ( DEBUG , Logger : : AGENCY ) < < ss . str ( ) ; <nl> <nl> LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> - ss . clear ( ) ; <nl> + ss . str ( " " ) ; ss . clear ( ) ; <nl> ss < < " Max RAFT interval : " ; <nl> if ( maxPing = = 0 ) { / / Command line beats persistence <nl> if ( conf . hasKey ( maxPingStr ) ) { <nl> struct config_t { <nl> LOG_TOPIC ( DEBUG , Logger : : AGENCY ) < < ss . str ( ) ; <nl> <nl> LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> - ss . clear ( ) ; <nl> + ss . str ( " " ) ; ss . clear ( ) ; <nl> ss < < " Supervision : " ; <nl> if ( supervision = = false ) { / / Command line beats persistence <nl> if ( conf . hasKey ( supervisionStr ) ) { <nl> struct config_t { <nl> } <nl> LOG_TOPIC ( DEBUG , Logger : : AGENCY ) < < ss . str ( ) ; <nl> <nl> - ss . clear ( ) ; <nl> + ss . str ( " " ) ; ss . clear ( ) ; <nl> ss < < " Supervision interval [ s ] : " ; <nl> if ( supervisionFrequency = = 0 ) { / / Command line beats persistence <nl> if ( conf . hasKey ( supervisionFrequencyStr ) ) { <nl> struct config_t { <nl> LOG_TOPIC ( DEBUG , Logger : : AGENCY ) < < ss . str ( ) ; <nl> <nl> LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> - ss . clear ( ) ; <nl> + ss . str ( " " ) ; ss . clear ( ) ; <nl> ss < < " Compaction step size : " ; <nl> if ( compactionStepSize = = 0 ) { / / Command line beats persistence <nl> if ( conf . hasKey ( compactionStepSizeStr ) ) { <nl> mmm a / arangod / Agency / Constituent . cpp <nl> ppp b / arangod / Agency / Constituent . cpp <nl> Constituent : : Constituent ( ) <nl> _leaderID ( NO_LEADER ) , <nl> _role ( FOLLOWER ) , <nl> _agent ( nullptr ) , <nl> - _votedFor ( NO_LEADER ) <nl> - { } <nl> + _votedFor ( NO_LEADER ) { } <nl> <nl> <nl> / / / Shutdown if not already <nl> mmm a / arangod / Agency / GossipCallback . cpp <nl> ppp b / arangod / Agency / GossipCallback . cpp <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> # include " GossipCallback . h " <nl> + # include " Agent . h " <nl> <nl> using namespace arangodb : : consensus ; <nl> using namespace arangodb : : velocypack ; <nl> <nl> - GossipCallback : : GossipCallback ( ) { } <nl> + GossipCallback : : GossipCallback ( Agent * agent , arangodb : : consensus : : id_t peerId ) <nl> + : _agent ( agent ) , _peerId ( peerId ) { } <nl> <nl> bool GossipCallback : : operator ( ) ( arangodb : : ClusterCommResult * res ) { <nl> + / / _agent - > gossipCallback ( _agent , _peerId ) ; <nl> return true ; <nl> } <nl> mmm a / arangod / Agency / GossipCallback . h <nl> ppp b / arangod / Agency / GossipCallback . h <nl> <nl> # ifndef ARANGOD_CONSENSUS_GOSSIP_CALLBACK_H <nl> # define ARANGOD_CONSENSUS_GOSSIP_CALLBACK_H 1 <nl> <nl> + # include " Agency / AgencyCommon . h " <nl> # include " Cluster / ClusterComm . h " <nl> <nl> namespace arangodb { <nl> namespace consensus { <nl> <nl> - class GossipCallback : public arangodb : : ClusterCommCallback { <nl> - public : <nl> - GossipCallback ( ) ; <nl> + class Agent ; <nl> <nl> + class GossipCallback : public arangodb : : ClusterCommCallback { <nl> + public : <nl> + GossipCallback ( Agent * , arangodb : : consensus : : id_t ) ; <nl> + <nl> virtual bool operator ( ) ( arangodb : : ClusterCommResult * ) override final ; <nl> - <nl> + <nl> void shutdown ( ) ; <nl> + <nl> + private : <nl> + <nl> + Agent * _agent ; <nl> + arangodb : : consensus : : id_t _peerId ; <nl> <nl> - private : <nl> } ; <nl> } <nl> } / / namespace <nl> mmm a / arangod / Agency / State . cpp <nl> ppp b / arangod / Agency / State . cpp <nl> bool State : : loadCompacted ( ) { <nl> <nl> / / / Load persisted configuration <nl> bool State : : loadOrPersistConfiguration ( ) { <nl> - <nl> + <nl> LOG ( WARN ) < < __FILE__ < < " " < < __LINE__ ; <nl> - <nl> + <nl> auto bindVars = std : : make_shared < VPackBuilder > ( ) ; <nl> bindVars - > openObject ( ) ; <nl> bindVars - > close ( ) ; <nl> <nl> std : : string const aql ( <nl> - std : : string ( " FOR c in configuration FILTER c . _key = = \ " 0 \ " RETURN c " ) ) ; <nl> + std : : string ( " FOR c in configuration FILTER c . _key = = \ " 0 \ " RETURN c . cfg " ) ) ; <nl> <nl> arangodb : : aql : : Query query ( false , _vocbase , aql . c_str ( ) , aql . size ( ) , bindVars , <nl> nullptr , arangodb : : aql : : PART_MAIN ) ; <nl> - <nl> + <nl> auto queryResult = query . execute ( QueryRegistryFeature : : QUERY_REGISTRY ) ; <nl> - <nl> + <nl> if ( queryResult . code ! = TRI_ERROR_NO_ERROR ) { <nl> THROW_ARANGO_EXCEPTION_MESSAGE ( queryResult . code , queryResult . details ) ; <nl> } <nl> - <nl> + <nl> VPackSlice result = queryResult . result - > slice ( ) ; <nl> - <nl> + <nl> LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> <nl> if ( result . isArray ( ) & & result . length ( ) ) { / / We already have a persisted conf <nl> - <nl> + <nl> try { <nl> + LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> + LOG ( WARN ) < < result [ 0 ] . toJson ( ) ; <nl> LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> _agent - > mergeConfiguration ( result [ 0 ] ) ; <nl> } catch ( std : : exception const & e ) { <nl> LOG_TOPIC ( ERR , Logger : : AGENCY ) <nl> - < < " Failed to merge persisted configuration into runtime configuration : " <nl> + < < " Failed to merge persisted configuration into runtime configuration : " <nl> < < e . what ( ) ; <nl> FATAL_ERROR_EXIT ( ) ; <nl> } <nl> <nl> } else { / / Fresh start <nl> <nl> - LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> + LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> LOG_TOPIC ( DEBUG , Logger : : AGENCY ) < < " New agency ! " ; <nl> - <nl> + <nl> TRI_ASSERT ( _agent ! = nullptr ) ; <nl> _agent - > id ( to_string ( boost : : uuids : : random_generator ( ) ( ) ) ) ; <nl> <nl> - LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> + LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> auto transactionContext = <nl> std : : make_shared < StandaloneTransactionContext > ( _vocbase ) ; <nl> SingleCollectionTransaction trx ( transactionContext , " configuration " , <nl> TRI_TRANSACTION_WRITE ) ; <nl> <nl> - LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> + LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> int res = trx . begin ( ) ; <nl> OperationResult result ; <nl> - <nl> + <nl> if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> THROW_ARANGO_EXCEPTION ( res ) ; <nl> } <nl> <nl> + <nl> + Builder doc ; <nl> + doc . openObject ( ) ; <nl> + doc . add ( " _key " , VPackValue ( " 0 " ) ) ; <nl> + doc . add ( " cfg " , _agent - > config ( ) . toBuilder ( ) - > slice ( ) ) ; <nl> + doc . close ( ) ; <nl> try { <nl> LOG ( WARN ) < < __FILE__ < < " : " < < __LINE__ ; <nl> result = trx . insert ( <nl> - " configuration " , _agent - > config ( ) . toBuilder ( ) - > slice ( ) , _options ) ; <nl> + " configuration " , doc . slice ( ) , _options ) ; <nl> } catch ( std : : exception const & e ) { <nl> LOG_TOPIC ( ERR , Logger : : AGENCY ) <nl> < < " Failed to persist configuration entry : " < < e . what ( ) ; <nl> bool State : : removeObsolete ( arangodb : : consensus : : index_t cind ) { <nl> bindVars , nullptr , arangodb : : aql : : PART_MAIN ) ; <nl> <nl> auto queryResult = query . execute ( QueryRegistryFeature : : QUERY_REGISTRY ) ; <nl> - <nl> - if ( queryResult . code ! = TRI_ERROR_NO_ERROR ) { <nl> - THROW_ARANGO_EXCEPTION_MESSAGE ( queryResult . code , queryResult . details ) ; <nl> - } <nl> - <nl> if ( queryResult . code ! = TRI_ERROR_NO_ERROR ) { <nl> THROW_ARANGO_EXCEPTION_MESSAGE ( queryResult . code , queryResult . details ) ; <nl> } <nl> + <nl> } <nl> return true ; <nl> } <nl> bool State : : persistReadDB ( arangodb : : consensus : : index_t cind ) { <nl> LOG_TOPIC ( ERR , Logger : : AGENCY ) < < " Failed to persist read DB for compaction ! " ; <nl> return false ; <nl> } <nl> + <nl> + bool State : : persistActiveAgents ( arangodb : : consensus : : id_t const & id ) { <nl> + return persistActiveAgents ( std : : vector < arangodb : : consensus : : id_t > ( 1 , id ) ) ; <nl> + } <nl> + <nl> + bool State : : persistActiveAgents ( <nl> + std : : vector < arangodb : : consensus : : id_t > const & ids ) { <nl> + <nl> + TRI_ASSERT ( ! ids . empty ( ) ) ; <nl> + <nl> + auto bindVars = std : : make_shared < VPackBuilder > ( ) ; <nl> + bindVars - > openObject ( ) ; <nl> + bindVars - > close ( ) ; <nl> + <nl> + std : : stringstream aql ; <nl> + aql < < " FOR c IN configuration UPDATE { _key : c . _key } WITH { cfg : { active : [ " ; <nl> + for ( size_t i = 0 ; i < ids . size ( ) - 1 ; + + i ) { <nl> + aql < < " \ " " < < ids . at ( i ) < < " \ " , " ; <nl> + } <nl> + aql < < " \ " " < < ids . back ( ) < < " \ " ] } } IN configuration ' ) " ; <nl> + <nl> + arangodb : : aql : : Query query ( <nl> + false , _vocbase , aql . str ( ) . c_str ( ) , aql . str ( ) . size ( ) , bindVars , nullptr , <nl> + arangodb : : aql : : PART_MAIN ) ; <nl> + <nl> + auto queryResult = query . execute ( QueryRegistryFeature : : QUERY_REGISTRY ) ; <nl> + <nl> + if ( queryResult . code ! = TRI_ERROR_NO_ERROR ) { <nl> + THROW_ARANGO_EXCEPTION_MESSAGE ( queryResult . code , queryResult . details ) ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + <nl> mmm a / arangod / Agency / State . h <nl> ppp b / arangod / Agency / State . h <nl> class State { <nl> <nl> size_t removeConflicts ( query_t const & ) ; <nl> <nl> + bool persistActiveAgents ( arangodb : : consensus : : id_t const & ) ; <nl> + bool persistActiveAgents ( std : : vector < arangodb : : consensus : : id_t > const & ) ; <nl> + <nl> + <nl> <nl> private : <nl> <nl> mmm a / js / client / modules / @ arangodb / testing . js <nl> ppp b / js / client / modules / @ arangodb / testing . js <nl> function startInstanceAgency ( instanceInfo , protocol , options , <nl> } ) ; <nl> l . push ( ' - - agency . endpoint ' ) ; <nl> l . push ( ' tcp : / / 127 . 0 . 0 . 1 : ' + port ) ; <nl> - / / l . push ( ' - - agency . notify ' ) ; <nl> - l . push ( ' true ' ) ; <nl> <nl> instanceArgs [ ' flatCommands ' ] = l ; <nl> } <nl>
single agent with uuid after fresh start
arangodb/arangodb
131291c2f976ab0a8436861cf8be18702f3aca7e
2016-08-15T15:53:01Z
mmm a / arangod / RestHandler / RestVocbaseBaseHandler . cpp <nl> ppp b / arangod / RestHandler / RestVocbaseBaseHandler . cpp <nl> TRI_voc_rid_t RestVocbaseBaseHandler : : extractRevision ( char const * header , <nl> etag = _request - > value ( parameter , found ) ; <nl> <nl> if ( found ) { <nl> - TRI_voc_rid_t rid ; <nl> + TRI_voc_rid_t rid = 0 ; <nl> <nl> try { <nl> rid = StringUtils : : uint64_check ( etag ) ; <nl>
fix compiler warning
arangodb/arangodb
3380360a0c15e4d8f63d36056a1f0d94014dae2f
2016-02-23T12:23:02Z
mmm a / stdlib / public / runtime / Lazy . h <nl> ppp b / stdlib / public / runtime / Lazy . h <nl> template < class T > class Lazy { <nl> # endif <nl> return Value ; <nl> } <nl> + <nl> + / / / Get the value , assuming it must have already been initialized by this <nl> + / / / point . <nl> + T & unsafeGetAlreadyInitialized ( ) { <nl> + return Value ; <nl> + } <nl> <nl> private : <nl> static void lazyInitCallback ( void * Argument ) { <nl> mmm a / stdlib / public / runtime / Metadata . cpp <nl> ppp b / stdlib / public / runtime / Metadata . cpp <nl> <nl> # include < condition_variable > <nl> # include < new > <nl> # include < cctype > <nl> + # include < sys / mman . h > <nl> # include < pthread . h > <nl> # include " llvm / ADT / DenseMap . h " <nl> # include " llvm / ADT / Hashing . h " <nl> <nl> using namespace swift ; <nl> using namespace metadataimpl ; <nl> <nl> + void * MetadataAllocator : : alloc ( size_t size ) { <nl> + static const uintptr_t pagesizeMask = getpagesize ( ) - 1 ; <nl> + <nl> + char * end = next + size ; <nl> + <nl> + / / Allocate a new page if we need one . <nl> + if ( LLVM_UNLIKELY ( ( ( uintptr_t ) next & ~ pagesizeMask ) <nl> + ! = ( ( ( uintptr_t ) end & ~ pagesizeMask ) ) ) ) { <nl> + next = ( char * ) <nl> + mmap ( nullptr , pagesizeMask + 1 , PROT_READ | PROT_WRITE , <nl> + MAP_ANON | MAP_PRIVATE , - 1 , 0 ) ; <nl> + <nl> + if ( next = = MAP_FAILED ) <nl> + crash ( " unable to allocate memory for metadata cache " ) ; <nl> + end = next + size ; <nl> + } <nl> + <nl> + char * addr = next ; <nl> + next = end ; <nl> + return addr ; <nl> + } <nl> + <nl> namespace { <nl> struct GenericCacheEntry ; <nl> <nl> static GenericMetadataCache & getCache ( GenericMetadata * metadata ) { <nl> return lazyCache - > get ( ) ; <nl> } <nl> <nl> + / / / Fetch the metadata cache for a generic metadata structure , <nl> + / / / in a context where it must have already been initialized . <nl> + static GenericMetadataCache & unsafeGetInitializedCache ( GenericMetadata * metadata ) { <nl> + / / Keep this assert even if you change the representation above . <nl> + static_assert ( sizeof ( LazyGenericMetadataCache ) < = <nl> + sizeof ( GenericMetadata : : PrivateData ) , <nl> + " metadata cache is larger than the allowed space " ) ; <nl> + <nl> + auto lazyCache = <nl> + reinterpret_cast < LazyGenericMetadataCache * > ( metadata - > PrivateData ) ; <nl> + return lazyCache - > unsafeGetAlreadyInitialized ( ) ; <nl> + } <nl> + <nl> ClassMetadata * <nl> swift : : swift_allocateGenericClassMetadata ( GenericMetadata * pattern , <nl> const void * arguments , <nl> swift : : swift_allocateGenericClassMetadata ( GenericMetadata * pattern , <nl> assert ( metadataSize = = pattern - > MetadataSize + extraPrefixSize ) ; <nl> assert ( prefixSize = = pattern - > AddressPoint + extraPrefixSize ) ; <nl> <nl> - char * bytes = GenericCacheEntry : : allocate ( argumentsAsArray , <nl> - numGenericArguments , <nl> - metadataSize ) - > getData < char > ( ) ; <nl> + char * bytes = GenericCacheEntry : : allocate ( <nl> + unsafeGetInitializedCache ( pattern ) . getAllocator ( ) , <nl> + argumentsAsArray , <nl> + numGenericArguments , <nl> + metadataSize ) - > getData < char > ( ) ; <nl> <nl> / / Copy any extra prefix bytes in from the superclass . <nl> if ( extraPrefixSize ) { <nl> swift : : swift_allocateGenericValueMetadata ( GenericMetadata * pattern , <nl> size_t numGenericArguments = pattern - > NumKeyArguments ; <nl> <nl> char * bytes = <nl> - GenericCacheEntry : : allocate ( argumentsAsArray , numGenericArguments , <nl> - pattern - > MetadataSize ) - > getData < char > ( ) ; <nl> + GenericCacheEntry : : allocate ( <nl> + unsafeGetInitializedCache ( pattern ) . getAllocator ( ) , <nl> + argumentsAsArray , numGenericArguments , <nl> + pattern - > MetadataSize ) - > getData < char > ( ) ; <nl> <nl> / / Copy in the metadata template . <nl> memcpy ( bytes , pattern - > getMetadataTemplate ( ) , pattern - > MetadataSize ) ; <nl> swift : : swift_getObjCClassMetadata ( const ClassMetadata * theClass ) { <nl> auto entry = ObjCClassWrappers . findOrAdd ( args , numGenericArgs , <nl> [ & ] ( ) - > ObjCClassCacheEntry * { <nl> / / Create a new entry for the cache . <nl> - auto entry = ObjCClassCacheEntry : : allocate ( args , numGenericArgs , 0 ) ; <nl> + auto entry = ObjCClassCacheEntry : : allocate ( ObjCClassWrappers . getAllocator ( ) , <nl> + args , numGenericArgs , 0 ) ; <nl> <nl> auto metadata = entry - > getData ( ) ; <nl> metadata - > setKind ( MetadataKind : : ObjCClassWrapper ) ; <nl> swift : : swift_getFunctionTypeMetadata ( const void * flagsArgsAndResult [ ] ) { <nl> [ & ] ( ) - > FunctionCacheEntry * { <nl> / / Create a new entry for the cache . <nl> auto entry = FunctionCacheEntry : : allocate ( <nl> + FunctionTypes . getAllocator ( ) , <nl> flagsArgsAndResult , <nl> numKeyArguments , <nl> numArguments * sizeof ( FunctionTypeMetadata : : Argument ) ) ; <nl> swift : : swift_getTupleTypeMetadata ( size_t numElements , <nl> <nl> / / Allocate the tuple cache entry , which includes space for both the <nl> / / metadata and a value - witness table . <nl> - auto entry = TupleCacheEntry : : allocate ( genericArgs , numElements , <nl> + auto entry = TupleCacheEntry : : allocate ( TupleTypes . getAllocator ( ) , <nl> + genericArgs , numElements , <nl> numElements * sizeof ( Element ) ) ; <nl> <nl> auto witnesses = & entry - > Witnesses ; <nl> static uint32_t getLog2AlignmentFromMask ( size_t alignMask ) { <nl> / / / Initialize the field offset vector for a dependent - layout class , using the <nl> / / / " Universal " layout strategy . <nl> void swift : : swift_initClassMetadata_UniversalStrategy ( ClassMetadata * self , <nl> - const ClassMetadata * super , <nl> - size_t numFields , <nl> - const ClassFieldLayout * fieldLayouts , <nl> - size_t * fieldOffsets ) { <nl> + const ClassMetadata * super , <nl> + size_t numFields , <nl> + const ClassFieldLayout * fieldLayouts , <nl> + size_t * fieldOffsets ) { <nl> / / Start layout by appending to a standard heap object header . <nl> size_t size , alignMask ; <nl> <nl> void swift : : swift_initClassMetadata_UniversalStrategy ( ClassMetadata * self , <nl> / / even if Swift doesn ' t , because of SwiftObject . ) <nl> rodata - > InstanceStart = size ; <nl> <nl> + auto & allocator = unsafeGetInitializedCache ( <nl> + self - > getDescription ( ) - > GenericMetadataPattern ) <nl> + . getAllocator ( ) ; <nl> + <nl> / / Always clone the ivar descriptors . <nl> if ( numFields ) { <nl> const ClassIvarList * dependentIvars = rodata - > IvarList ; <nl> void swift : : swift_initClassMetadata_UniversalStrategy ( ClassMetadata * self , <nl> <nl> auto ivarListSize = sizeof ( ClassIvarList ) + <nl> numFields * sizeof ( ClassIvarEntry ) ; <nl> - auto ivars = ( ClassIvarList * ) permanentAlloc ( ivarListSize ) ; <nl> + auto ivars = ( ClassIvarList * ) allocator . alloc ( ivarListSize ) ; <nl> memcpy ( ivars , dependentIvars , ivarListSize ) ; <nl> rodata - > IvarList = ivars ; <nl> <nl> swift : : swift_getMetatypeMetadata ( const Metadata * instanceMetadata ) { <nl> auto entry = MetatypeTypes . findOrAdd ( args , numGenericArgs , <nl> [ & ] ( ) - > MetatypeCacheEntry * { <nl> / / Create a new entry for the cache . <nl> - auto entry = MetatypeCacheEntry : : allocate ( args , numGenericArgs , 0 ) ; <nl> + auto entry = MetatypeCacheEntry : : allocate ( MetatypeTypes . getAllocator ( ) , <nl> + args , numGenericArgs , 0 ) ; <nl> <nl> auto metadata = entry - > getData ( ) ; <nl> metadata - > setKind ( MetadataKind : : Metatype ) ; <nl> swift : : swift_getExistentialMetatypeMetadata ( const Metadata * instanceMetadata ) { <nl> [ & ] ( ) - > ExistentialMetatypeCacheEntry * { <nl> / / Create a new entry for the cache . <nl> auto entry = <nl> - ExistentialMetatypeCacheEntry : : allocate ( args , numGenericArgs , 0 ) ; <nl> + ExistentialMetatypeCacheEntry : : allocate ( <nl> + ExistentialMetatypeTypes . getAllocator ( ) , <nl> + args , numGenericArgs , 0 ) ; <nl> <nl> ExistentialTypeFlags flags ; <nl> if ( instanceMetadata - > getKind ( ) = = MetadataKind : : Existential ) { <nl> swift : : swift_getExistentialTypeMetadata ( size_t numProtocols , <nl> auto entry = ExistentialTypes . findOrAdd ( protocolArgs , numProtocols , <nl> [ & ] ( ) - > ExistentialCacheEntry * { <nl> / / Create a new entry for the cache . <nl> - auto entry = ExistentialCacheEntry : : allocate ( protocolArgs , numProtocols , <nl> + auto entry = ExistentialCacheEntry : : allocate ( ExistentialTypes . getAllocator ( ) , <nl> + protocolArgs , numProtocols , <nl> sizeof ( const ProtocolDescriptor * ) * numProtocols ) ; <nl> auto metadata = entry - > getData ( ) ; <nl> <nl> mmm a / stdlib / public / runtime / MetadataCache . h <nl> ppp b / stdlib / public / runtime / MetadataCache . h <nl> <nl> # define SWIFT_DEBUG_RUNTIME 0 <nl> # endif <nl> <nl> - static void * permanentAlloc ( size_t size ) { return malloc ( size ) ; } <nl> + / / / A bump pointer for metadata allocations . Since metadata is ( currently ) <nl> + / / / never released , it does not support deallocation . This allocator by itself <nl> + / / / is not thread - safe ; in concurrent uses , allocations must be guarded by <nl> + / / / a lock , such as the per - metadata - cache lock used to guard metadata <nl> + / / / instantiations . All allocations are pointer - aligned . <nl> + class MetadataAllocator { <nl> + / / / Address of the next available space . The allocator grabs a page at a time , <nl> + / / / so the need for a new page can be determined by page alignment . <nl> + / / / <nl> + / / / Initializing to - 1 instead of nullptr ensures that the first allocation <nl> + / / / triggers a page allocation since it will always span a " page " boundary . <nl> + char * next = ( char * ) ( ~ ( uintptr_t ) 0U ) ; <nl> + <nl> + public : <nl> + MetadataAllocator ( ) = default ; <nl> + <nl> + / / Don ' t copy or move , please . <nl> + MetadataAllocator ( const MetadataAllocator & ) = delete ; <nl> + MetadataAllocator ( MetadataAllocator & & ) = delete ; <nl> + MetadataAllocator & operator = ( const MetadataAllocator & ) = delete ; <nl> + MetadataAllocator & operator = ( MetadataAllocator & & ) = delete ; <nl> + <nl> + void * alloc ( size_t size ) ; <nl> + } ; <nl> <nl> / / A wrapper around a pointer to a metadata cache entry that provides <nl> / / DenseMap semantics that compare values in the key vector for the metadata <nl> class alignas ( void * ) CacheEntry : public Header { <nl> <nl> public : <nl> template < typename . . . ImplArgs > <nl> - static Impl * allocate ( const void * const * arguments , <nl> + static Impl * allocate ( MetadataAllocator & allocator , <nl> + const void * const * arguments , <nl> size_t numArguments , size_t payloadSize ) { <nl> - void * buffer = permanentAlloc ( sizeof ( Impl ) + <nl> - numArguments * sizeof ( void * ) + <nl> - payloadSize ) ; <nl> + void * buffer = allocator . alloc ( sizeof ( Impl ) + <nl> + numArguments * sizeof ( void * ) + <nl> + payloadSize ) ; <nl> void * resultPtr = ( char * ) buffer + numArguments * sizeof ( void * ) ; <nl> auto result = new ( resultPtr ) Impl ( numArguments ) ; <nl> <nl> template < class Entry > class MetadataCache { <nl> <nl> / / / Synchronization of metadata creation . <nl> std : : mutex * Lock ; <nl> - <nl> + <nl> / / / The head of a linked list connecting all the metadata cache entries . <nl> / / / TODO : Remove this when LLDB is able to understand the final data <nl> / / / structure for the metadata cache . <nl> const Entry * Head ; <nl> <nl> + / / / Allocator for entries of this cache . <nl> + MetadataAllocator Allocator ; <nl> + <nl> public : <nl> MetadataCache ( ) : Map ( new MDMapTy ( ) ) , Lock ( new std : : mutex ( ) ) { } <nl> ~ MetadataCache ( ) { delete Map ; delete Lock ; } <nl> template < class Entry > class MetadataCache { <nl> MetadataCache ( const MetadataCache & other ) = delete ; <nl> MetadataCache & operator = ( const MetadataCache & other ) = delete ; <nl> <nl> + / / / Get the allocator for metadata in this cache . <nl> + / / / The allocator can only be safely used while the cache is locked during <nl> + / / / an addMetadataEntry call . <nl> + MetadataAllocator & getAllocator ( ) { return Allocator ; } <nl> + <nl> / / / Call entryBuilder ( ) and add the generated metadata to the cache . <nl> / / / \ p key is the key used by the cache and \ p Bucket is the cache <nl> / / / entry to place the new metadata entry . <nl>
Runtime : Use a per - cache bump allocator to instantiate metadata .
apple/swift
b4436bfc137a3b473fc767413f1cfbc21e4dd811
2015-04-28T04:14:06Z
mmm a / infra / testing / builders . pyl <nl> ppp b / infra / testing / builders . pyl <nl> <nl> # Copyright 2018 The V8 project authors . All rights reserved . <nl> # Use of this source code is governed by a BSD - style license that can be <nl> # found in the LICENSE file . <nl> + # <nl> + # Please keep builder names , builder configs and test definitions sorted . <nl> + # Builder names should be sorted alphabetically . Builder configs should have <nl> + # keys sorted in the alphabetical order except ' tests ' key , which should always <nl> + # come last . Test definitions must have keys in the following order , but omit <nl> + # optional fields : <nl> + # * name ( required ) <nl> + # * variant <nl> + # * test_args <nl> + # * shards <nl> + # * suffix <nl> + # * swarming_dimensions <nl> + # * swarming_task_attrs <nl> + # <nl> + # Please also format test definitions as a single line with ' , ' separating <nl> + # fields , e . g . <nl> + # <nl> + # { ' name ' : ' v8testing ' , ' variant ' : ' extra ' , ' shards ' : 2 } <nl> + # <nl> + # After formatting test definitions this way , please sort them alphabetically by <nl> + # test name . For all variants of the test with the same name , the <nl> + # least - qualified test ( no variant , no test args ) should come first . You may <nl> + # also deviate from the alphabetical order if necessary and group tests <nl> + # differently , but in this case please add a comment before each group and <nl> + # continue to sort tests using the rules above within each group . <nl> <nl> { <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> <nl> { ' name ' : ' mozilla ' } , <nl> { ' name ' : ' mozilla ' , ' variant ' : ' extra ' } , <nl> { ' name ' : ' test262 ' } , <nl> - { ' name ' : ' test262_variants ' , ' shards ' : 3 , ' variant ' : ' extra ' } , <nl> + { ' name ' : ' test262_variants ' , ' variant ' : ' extra ' , ' shards ' : 3 } , <nl> { ' name ' : ' v8testing ' , ' shards ' : 3 } , <nl> - { ' name ' : ' v8testing ' , ' shards ' : 2 , ' variant ' : ' extra ' } , <nl> + { ' name ' : ' v8testing ' , ' variant ' : ' extra ' , ' shards ' : 2 } , <nl> ] , <nl> } , <nl> ' v8_linux_gc_stress_dbg ' : { <nl> <nl> { ' name ' : ' mozilla ' } , <nl> { ' name ' : ' mozilla ' , ' variant ' : ' extra ' } , <nl> { ' name ' : ' test262_variants ' , ' shards ' : 2 } , <nl> - { ' name ' : ' test262_variants ' , ' shards ' : 2 , ' variant ' : ' extra ' } , <nl> + { ' name ' : ' test262_variants ' , ' variant ' : ' extra ' , ' shards ' : 2 } , <nl> { ' name ' : ' v8testing ' } , <nl> { ' name ' : ' v8testing ' , ' variant ' : ' extra ' } , <nl> ] , <nl> <nl> ' tests ' : [ <nl> { ' name ' : ' mozilla ' , ' variant ' : ' default ' } , <nl> { ' name ' : ' test262 ' , ' variant ' : ' default ' } , <nl> - { ' name ' : ' v8testing ' , ' shards ' : 2 , ' variant ' : ' default ' } , <nl> + { ' name ' : ' v8testing ' , ' variant ' : ' default ' , ' shards ' : 2 } , <nl> ] , <nl> } , <nl> ' v8_linux_rel_ng_triggered ' : { <nl> <nl> { ' name ' : ' mozilla ' , ' variant ' : ' extra ' } , <nl> { ' name ' : ' optimize_for_size ' } , <nl> { ' name ' : ' test262_variants ' , ' shards ' : 4 } , <nl> - { ' name ' : ' test262_variants ' , ' shards ' : 2 , ' variant ' : ' extra ' } , <nl> + { ' name ' : ' test262_variants ' , ' variant ' : ' extra ' , ' shards ' : 2 } , <nl> { ' name ' : ' v8testing ' } , <nl> { ' name ' : ' v8testing ' , ' variant ' : ' extra ' } , <nl> ] , <nl> <nl> { ' name ' : ' mozilla ' } , <nl> { ' name ' : ' test262 ' } , <nl> { ' name ' : ' v8testing ' , ' shards ' : 7 } , <nl> - { ' name ' : ' v8testing ' , ' shards ' : 3 , ' variant ' : ' extra ' } , <nl> + { ' name ' : ' v8testing ' , ' variant ' : ' extra ' , ' shards ' : 3 } , <nl> ] , <nl> } , <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> <nl> ' tests ' : [ <nl> { ' name ' : ' test262_variants ' , ' shards ' : 7 } , <nl> { ' name ' : ' v8testing ' , ' shards ' : 3 } , <nl> - { ' name ' : ' v8testing ' , ' shards ' : 2 , ' variant ' : ' extra ' } , <nl> + { ' name ' : ' v8testing ' , ' variant ' : ' extra ' , ' shards ' : 2 } , <nl> { ' name ' : ' v8testing ' , ' variant ' : ' slow_path ' } , <nl> ] , <nl> } , <nl> <nl> { ' name ' : ' mozilla ' } , <nl> { ' name ' : ' mozilla ' , ' variant ' : ' extra ' } , <nl> { ' name ' : ' test262 ' } , <nl> - { ' name ' : ' test262_variants ' , ' shards ' : 3 , ' variant ' : ' extra ' } , <nl> + { ' name ' : ' test262_variants ' , ' variant ' : ' extra ' , ' shards ' : 3 } , <nl> { ' name ' : ' v8testing ' , ' shards ' : 3 } , <nl> - { ' name ' : ' v8testing ' , ' shards ' : 2 , ' variant ' : ' extra ' } , <nl> + { ' name ' : ' v8testing ' , ' variant ' : ' extra ' , ' shards ' : 2 } , <nl> ] , <nl> } , <nl> ' v8_linux64_fyi_rel_ng_triggered ' : { <nl> <nl> { ' name ' : ' mjsunit ' , ' variant ' : ' stress_sampling ' } , <nl> { ' name ' : ' webkit ' , ' variant ' : ' stress_sampling ' } , <nl> # Infra staging . <nl> - { ' name ' : ' test262_variants ' , ' shards ' : 2 , ' variant ' : ' infra_staging ' } , <nl> - { ' name ' : ' v8testing ' , ' shards ' : 2 , ' variant ' : ' infra_staging ' } , <nl> + { ' name ' : ' test262_variants ' , ' variant ' : ' infra_staging ' , ' shards ' : 2 } , <nl> + { ' name ' : ' v8testing ' , ' variant ' : ' infra_staging ' , ' shards ' : 2 } , <nl> ] , <nl> } , <nl> ' v8_linux64_rel_ng_triggered ' : { <nl> <nl> } , <nl> ' tests ' : [ <nl> # TODO ( machenbach ) : Add benchmarks . <nl> - { ' name ' : ' mjsunit_sp_frame_access ' } , <nl> # TODO ( machenbach ) : Add mozilla tests . <nl> + { ' name ' : ' mjsunit_sp_frame_access ' } , <nl> { ' name ' : ' optimize_for_size ' } , <nl> { ' name ' : ' test262_variants ' , ' shards ' : 4 } , <nl> - { ' name ' : ' test262_variants ' , ' shards ' : 2 , ' variant ' : ' extra ' } , <nl> + { ' name ' : ' test262_variants ' , ' variant ' : ' extra ' , ' shards ' : 2 } , <nl> { ' name ' : ' v8initializers ' } , <nl> { ' name ' : ' v8testing ' } , <nl> { ' name ' : ' v8testing ' , ' variant ' : ' extra ' } , <nl> <nl> { ' name ' : ' mozilla ' } , <nl> { ' name ' : ' test262 ' } , <nl> { ' name ' : ' v8testing ' , ' shards ' : 7 } , <nl> - { ' name ' : ' v8testing ' , ' shards ' : 3 , ' variant ' : ' extra ' } , <nl> + { ' name ' : ' v8testing ' , ' variant ' : ' extra ' , ' shards ' : 3 } , <nl> ] , <nl> } , <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> <nl> ' os ' : ' Windows - 7 - SP1 ' , <nl> } , <nl> ' tests ' : [ <nl> - { ' name ' : ' v8testing ' , ' shards ' : 3 , ' variant ' : ' default ' } , <nl> + { ' name ' : ' v8testing ' , ' variant ' : ' default ' , ' shards ' : 3 } , <nl> ] , <nl> } , <nl> ' v8_win_rel_ng_triggered ' : { <nl> <nl> { ' name ' : ' mozilla ' } , <nl> { ' name ' : ' test262 ' } , <nl> { ' name ' : ' v8testing ' , ' shards ' : 3 } , <nl> - { ' name ' : ' v8testing ' , ' shards ' : 2 , ' variant ' : ' extra ' } , <nl> + { ' name ' : ' v8testing ' , ' variant ' : ' extra ' , ' shards ' : 2 } , <nl> ] , <nl> } , <nl> ' v8_mac64_rel_ng_triggered ' : { <nl> <nl> # Linux32 <nl> ' V8 Linux - debug ' : { <nl> ' tests ' : [ <nl> + { ' name ' : ' benchmarks ' , ' variant ' : ' code_serializer ' , ' shards ' : 1 } , <nl> { ' name ' : ' d8testing ' , ' variant ' : ' code_serializer ' , ' shards ' : 1 } , <nl> { ' name ' : ' mozilla ' , ' variant ' : ' code_serializer ' , ' shards ' : 1 } , <nl> { ' name ' : ' test262_variants ' , ' variant ' : ' code_serializer ' , ' shards ' : 1 } , <nl> - { ' name ' : ' benchmarks ' , ' variant ' : ' code_serializer ' , ' shards ' : 1 } , <nl> ] , <nl> } , <nl> ' V8 Linux - gc stress ' : { <nl> <nl> } , <nl> ' V8 Linux64 - debug - fyi ' : { <nl> ' tests ' : [ <nl> - { ' name ' : ' v8testing ' , ' variant ' : ' infra_staging ' , ' shards ' : 2 } , <nl> + # Infra staging . <nl> { ' name ' : ' test262_variants ' , ' variant ' : ' infra_staging ' , ' shards ' : 3 } , <nl> + { ' name ' : ' v8testing ' , ' variant ' : ' infra_staging ' , ' shards ' : 2 } , <nl> + # Stress sampling . <nl> { ' name ' : ' mjsunit ' , ' variant ' : ' stress_sampling ' , ' shards ' : 1 } , <nl> { ' name ' : ' webkit ' , ' variant ' : ' stress_sampling ' , ' shards ' : 1 } , <nl> ] , <nl> } , <nl> ' V8 Linux64 - fyi ' : { <nl> ' tests ' : [ <nl> - { ' name ' : ' v8testing ' , ' variant ' : ' infra_staging ' , ' shards ' : 1 } , <nl> - { ' name ' : ' test262_variants ' , ' variant ' : ' infra_staging ' , ' shards ' : 2 } , <nl> { ' name ' : ' mjsunit ' , ' variant ' : ' stress_sampling ' , ' shards ' : 1 } , <nl> + { ' name ' : ' test262_variants ' , ' variant ' : ' infra_staging ' , ' shards ' : 2 } , <nl> + { ' name ' : ' v8testing ' , ' variant ' : ' infra_staging ' , ' shards ' : 1 } , <nl> { ' name ' : ' webkit ' , ' variant ' : ' stress_sampling ' , ' shards ' : 1 } , <nl> ] , <nl> } , <nl>
[ tools ] Describe sorting rules and format the file according to them
v8/v8
0e37130bce25df6893086b3b33cfe88c66b1ca02
2018-05-15T08:34:23Z
mmm a / dbms / src / Storages / System / StorageSystemStackTrace . cpp <nl> ppp b / dbms / src / Storages / System / StorageSystemStackTrace . cpp <nl> <nl> # include < signal . h > <nl> + # include < poll . h > <nl> <nl> # include < mutex > <nl> - # include < condition_variable > <nl> # include < filesystem > <nl> <nl> # include < ext / scope_guard . h > <nl> <nl> # include < DataTypes / DataTypeString . h > <nl> # include < DataTypes / DataTypesNumber . h > <nl> # include < DataTypes / DataTypeArray . h > <nl> - # include < DataStreams / OneBlockInputStream . h > <nl> # include < IO / ReadHelpers . h > <nl> + # include < Common / PipeFDs . h > <nl> + # include < common / getThreadNumber . h > <nl> <nl> <nl> namespace DB <nl> namespace DB <nl> namespace ErrorCodes <nl> { <nl> extern const int CANNOT_SIGQUEUE ; <nl> + extern const int CANNOT_MANIPULATE_SIGSET ; <nl> + extern const int CANNOT_SET_SIGNAL_HANDLER ; <nl> + extern const int CANNOT_READ_FROM_FILE_DESCRIPTOR ; <nl> + extern const int LOGICAL_ERROR ; <nl> } <nl> <nl> <nl> - NamesAndTypesList StorageSystemStackTrace : : getNamesAndTypes ( ) <nl> - { <nl> - return <nl> - { <nl> - { " thread_number " , std : : make_shared < DataTypeUInt32 > ( ) } , <nl> - { " query_id " , std : : make_shared < DataTypeString > ( ) } , <nl> - { " trace " , std : : make_shared < DataTypeArray > ( std : : make_shared < DataTypeUInt64 > ( ) ) } <nl> - } ; <nl> - } <nl> - <nl> namespace <nl> { <nl> - struct State <nl> - { <nl> - std : : mutex mutex ; <nl> - std : : condition_variable condvar ; <nl> + const pid_t expected_pid = getpid ( ) ; <nl> + const int sig = SIGRTMIN ; <nl> + UInt32 thread_number { 0 } ; <nl> + std : : optional < StackTrace > stack_trace ; <nl> + LazyPipeFDs notification_pipe ; <nl> <nl> - size_t total_threads ; <nl> - size_t threads_processed ; <nl> - std : : exception_ptr exception ; <nl> - MutableColumns * columns_to_fill ; <nl> + void signalHandler ( int , siginfo_t * info , void * context ) <nl> + { <nl> + / / / In case malicious user is sending signals manually ( for unknown reason ) . <nl> + / / / If we don ' t check - it may break our synchronization . <nl> + if ( info - > si_pid ! = expected_pid ) <nl> + return ; <nl> <nl> - State ( ) { reset ( ) ; } <nl> + / / / All these methods are signal - safe . <nl> + const ucontext_t signal_context = * reinterpret_cast < ucontext_t * > ( context ) ; <nl> + stack_trace . emplace ( signal_context ) ; <nl> + thread_number = getThreadNumber ( ) ; <nl> <nl> - void reset ( MutableColumns * columns_to_fill_ = nullptr ) <nl> - { <nl> - total_threads = 0 ; <nl> - threads_processed = 0 ; <nl> - exception = std : : exception_ptr ( ) ; <nl> - columns_to_fill = columns_to_fill_ ; <nl> - } <nl> + char buf = 0 ; <nl> + / / / We cannot do anything if write failed . <nl> + ( void ) : : write ( notification_pipe . fds_rw [ 1 ] , & buf , 1 ) ; <nl> + } <nl> <nl> - operator bool ( ) <nl> + / / / Wait for data in pipe . <nl> + bool wait ( int timeout_ms ) <nl> + { <nl> + while ( true ) <nl> { <nl> - return columns_to_fill ! = nullptr ; <nl> + int fd = notification_pipe . fds_rw [ 0 ] ; <nl> + pollfd poll_fd { fd , POLLIN , 0 } ; <nl> + <nl> + int poll_res = poll ( & poll_fd , 1 , timeout_ms ) ; <nl> + if ( poll_res < 0 ) <nl> + { <nl> + if ( errno = = EINTR ) <nl> + { <nl> + - - timeout_ms ; / / / Quite a hacky way to update timeout . Just to make sure we avoid infinite waiting . <nl> + if ( timeout_ms = = 0 ) <nl> + return false ; <nl> + continue ; <nl> + } <nl> + <nl> + throwFromErrno ( " Cannot poll pipe " , ErrorCodes : : CANNOT_READ_FROM_FILE_DESCRIPTOR ) ; <nl> + } <nl> + if ( poll_res = = 0 ) <nl> + return false ; <nl> + <nl> + char buf = 0 ; <nl> + ssize_t read_res = : : read ( fd , & buf , 1 ) ; <nl> + if ( read_res = = 1 ) <nl> + return true ; <nl> + <nl> + if ( read_res < 0 ) <nl> + { <nl> + if ( errno = = EINTR ) <nl> + continue ; <nl> + <nl> + throwFromErrno ( " Cannot read from pipe " , ErrorCodes : : CANNOT_READ_FROM_FILE_DESCRIPTOR ) ; <nl> + } <nl> + <nl> + throw Exception ( " Logical error : read for one byte returned more than one byte " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> } <nl> - } ; <nl> - <nl> - State state ; <nl> - <nl> - void callback ( const siginfo_t & , const StackTrace & stack_trace , UInt32 thread_number ) <nl> - { <nl> - std : : lock_guard lock ( state . mutex ) ; <nl> + } <nl> + } <nl> <nl> - std : : cerr < < thread_number < < " ! \ n " ; <nl> <nl> - if ( ! state ) <nl> - return ; <nl> + StorageSystemStackTrace : : StorageSystemStackTrace ( const String & name ) <nl> + : IStorageSystemOneBlock < StorageSystemStackTrace > ( name ) <nl> + { <nl> + notification_pipe . open ( ) ; <nl> <nl> - try <nl> - { <nl> - size_t stack_trace_size = stack_trace . getSize ( ) ; <nl> - size_t stack_trace_offset = stack_trace . getOffset ( ) ; <nl> + / / / Setup signal handler . <nl> <nl> - Array arr ; <nl> - arr . reserve ( stack_trace_size - stack_trace_offset ) ; <nl> - for ( size_t i = stack_trace_offset ; i < stack_trace_size ; + + i ) <nl> - arr . emplace_back ( reinterpret_cast < intptr_t > ( stack_trace . getFrames ( ) [ i ] ) ) ; <nl> + struct sigaction sa { } ; <nl> + sa . sa_sigaction = signalHandler ; <nl> + sa . sa_flags = SA_SIGINFO ; <nl> <nl> - std : : cerr < < thread_number < < " ! ! \ n " ; <nl> + if ( sigemptyset ( & sa . sa_mask ) ) <nl> + throwFromErrno ( " Cannot set signal handler . " , ErrorCodes : : CANNOT_MANIPULATE_SIGSET ) ; <nl> <nl> - state . columns_to_fill - > at ( 0 ) - > insert ( thread_number ) ; <nl> - state . columns_to_fill - > at ( 1 ) - > insertDefault ( ) ; <nl> - state . columns_to_fill - > at ( 2 ) - > insert ( arr ) ; <nl> + if ( sigaddset ( & sa . sa_mask , sig ) ) <nl> + throwFromErrno ( " Cannot set signal handler . " , ErrorCodes : : CANNOT_MANIPULATE_SIGSET ) ; <nl> <nl> - std : : cerr < < thread_number < < " ! ! ! \ n " ; <nl> + if ( sigaction ( sig , & sa , nullptr ) ) <nl> + throwFromErrno ( " Cannot set signal handler . " , ErrorCodes : : CANNOT_SET_SIGNAL_HANDLER ) ; <nl> + } <nl> <nl> - + + state . threads_processed ; <nl> <nl> - std : : cerr < < state . threads_processed < < " , " < < state . total_threads < < " ! ! ! ! \ n " ; <nl> - if ( state . threads_processed > = state . total_threads ) <nl> - state . condvar . notify_one ( ) ; <nl> - } <nl> - catch ( . . . ) <nl> - { <nl> - state . reset ( ) ; <nl> - state . exception = std : : current_exception ( ) ; <nl> - state . condvar . notify_one ( ) ; <nl> - } <nl> - } <nl> + NamesAndTypesList StorageSystemStackTrace : : getNamesAndTypes ( ) <nl> + { <nl> + return <nl> + { <nl> + { " thread_number " , std : : make_shared < DataTypeUInt32 > ( ) } , <nl> + { " query_id " , std : : make_shared < DataTypeString > ( ) } , <nl> + { " trace " , std : : make_shared < DataTypeArray > ( std : : make_shared < DataTypeUInt64 > ( ) ) } <nl> + } ; <nl> } <nl> <nl> + <nl> void StorageSystemStackTrace : : fillData ( MutableColumns & res_columns , const Context & , const SelectQueryInfo & ) const <nl> { <nl> - std : : unique_lock lock ( state . mutex ) ; <nl> + / / / It shouldn ' t be possible to do concurrent reads from this table . <nl> + std : : lock_guard lock ( mutex ) ; <nl> + <nl> + / / / Send a signal to every thread and wait for result . <nl> + / / / We must wait for every thread one by one sequentially , <nl> + / / / because there is a limit on number of queued signals in OS and otherwise signals may get lost . <nl> + / / / Also , non - RT signals are not delivered if previous signal is handled right now ( by default ; but we use RT signals ) . <nl> <nl> - state . reset ( & res_columns ) ; <nl> - SCOPE_EXIT ( { state . reset ( ) ; } ) ; <nl> + / / / Obviously , results for different threads may be out of sync . <nl> <nl> - std : : cerr < < state . columns_to_fill - > size ( ) < < " \ n " ; <nl> + / / / There is no better way to enumerate threads in a process other than looking into procfs . <nl> <nl> - / / / Send a signal to every thread <nl> std : : filesystem : : directory_iterator end ; <nl> for ( std : : filesystem : : directory_iterator it ( " / proc / self / task " ) ; it ! = end ; + + it ) <nl> { <nl> - sigval sig_value ; <nl> - sig_value . sival_ptr = reinterpret_cast < void * > ( & callback ) ; <nl> + sigval sig_value { } ; <nl> pid_t tid = parse < pid_t > ( it - > path ( ) . filename ( ) ) ; <nl> - if ( 0 = = : : sigqueue ( tid , SIGTSTP , sig_value ) ) <nl> + <nl> + std : : cerr < < " Requested : " < < tid < < " \ n " ; <nl> + <nl> + if ( 0 ! = : : sigqueue ( tid , sig , sig_value ) ) <nl> { <nl> - + + state . total_threads ; <nl> + / / / The thread may has been already finished . <nl> + if ( ESRCH = = errno ) <nl> + continue ; <nl> + <nl> + throwFromErrno ( " Cannot send signal with sigqueue " , ErrorCodes : : CANNOT_SIGQUEUE ) ; <nl> + } <nl> + <nl> + / / / Just in case we will wait for pipe with timeout . In case signal didn ' t get processed . <nl> + if ( wait ( 100 ) ) <nl> + { <nl> + size_t stack_trace_size = stack_trace - > getSize ( ) ; <nl> + size_t stack_trace_offset = stack_trace - > getOffset ( ) ; <nl> + <nl> + Array arr ; <nl> + arr . reserve ( stack_trace_size - stack_trace_offset ) ; <nl> + for ( size_t i = stack_trace_offset ; i < stack_trace_size ; + + i ) <nl> + arr . emplace_back ( reinterpret_cast < intptr_t > ( stack_trace - > getFrames ( ) [ i ] ) ) ; <nl> + <nl> + std : : cerr < < tid < < " , " < < thread_number < < " ! ! \ n " ; <nl> + <nl> + res_columns [ 0 ] - > insert ( thread_number ) ; <nl> + res_columns [ 1 ] - > insertDefault ( ) ; <nl> + res_columns [ 2 ] - > insert ( arr ) ; <nl> } <nl> else <nl> { <nl> - / / / The thread may have been already finished . <nl> - if ( ESRCH ! = errno ) <nl> - throwFromErrno ( " Cannot send signal with sigqueue " , ErrorCodes : : CANNOT_SIGQUEUE ) ; <nl> + / / / Cannot obtain a stack trace . But create a record in result nevertheless . <nl> + <nl> + res_columns [ 0 ] - > insert ( tid ) ; <nl> + res_columns [ 1 ] - > insertDefault ( ) ; <nl> + res_columns [ 2 ] - > insertDefault ( ) ; <nl> } <nl> } <nl> - <nl> - std : : cerr < < state . threads_processed < < " , " < < state . total_threads < < " sent \ n " ; <nl> - <nl> - / / / Timeout one second for the case the signal pipe will be full and messages will be dropped . <nl> - state . condvar . wait_for ( lock , std : : chrono : : seconds ( 1 ) , [ ] { return state . threads_processed > = state . total_threads | | state . exception ; } ) ; <nl> - if ( state . exception ) <nl> - std : : rethrow_exception ( state . exception ) ; <nl> } <nl> <nl> } <nl> mmm a / dbms / src / Storages / System / StorageSystemStackTrace . h <nl> ppp b / dbms / src / Storages / System / StorageSystemStackTrace . h <nl> <nl> # pragma once <nl> <nl> + # include < mutex > <nl> # include < ext / shared_ptr_helper . h > <nl> # include < Storages / System / IStorageSystemOneBlock . h > <nl> <nl> class StorageSystemStackTrace : public ext : : shared_ptr_helper < StorageSystemStack <nl> friend struct ext : : shared_ptr_helper < StorageSystemStackTrace > ; <nl> public : <nl> String getName ( ) const override { return " SystemStackTrace " ; } <nl> - <nl> static NamesAndTypesList getNamesAndTypes ( ) ; <nl> <nl> + StorageSystemStackTrace ( const String & name ) ; <nl> + <nl> protected : <nl> using IStorageSystemOneBlock : : IStorageSystemOneBlock ; <nl> - <nl> void fillData ( MutableColumns & res_columns , const Context & context , const SelectQueryInfo & query_info ) const override ; <nl> + <nl> + mutable std : : mutex mutex ; <nl> } ; <nl> <nl> } <nl> mmm a / libs / libdaemon / src / BaseDaemon . cpp <nl> ppp b / libs / libdaemon / src / BaseDaemon . cpp <nl> static void terminateRequestedSignalHandler ( int sig , siginfo_t * info , void * co <nl> static void signalHandler ( int sig , siginfo_t * info , void * context ) <nl> { <nl> char buf [ buf_size ] ; <nl> - std : : cerr < < " Size of buffer : " < < buf_size < < " \ n " ; <nl> DB : : WriteBufferFromFileDescriptorDiscardOnFailure out ( signal_pipe . fds_rw [ 1 ] , buf_size , buf ) ; <nl> <nl> const ucontext_t signal_context = * reinterpret_cast < ucontext_t * > ( context ) ; <nl> class SignalListener : public Poco : : Runnable <nl> DB : : readPODBinary ( stack_trace , in ) ; <nl> DB : : readBinary ( thread_num , in ) ; <nl> <nl> - if ( sig = = SIGTSTP & & info . si_value . sival_ptr ) <nl> - { <nl> - / / / TSTP signal with value is used to make a custom callback from this thread . <nl> - try <nl> - { <nl> - reinterpret_cast < SignalCallback * > ( info . si_value . sival_ptr ) ( info , stack_trace , thread_num ) ; <nl> - continue ; <nl> - } <nl> - catch ( . . . ) <nl> - { <nl> - / / / Failed to process , will use ' onFault ' function . <nl> - } <nl> - } <nl> - <nl> / / / This allows to receive more signals if failure happens inside onFault function . <nl> / / / Example : segfault while symbolizing stack trace . <nl> std : : thread ( [ = ] { onFault ( sig , info , context , stack_trace , thread_num ) ; } ) . detach ( ) ; <nl>
Simplification
ClickHouse/ClickHouse
9c868c910a5e7c46178e247393787ff96dc48b3f
2019-12-22T20:17:16Z
mmm a / stdlib / public / core / ArrayBuffer . swift <nl> ppp b / stdlib / public / core / ArrayBuffer . swift <nl> extension _ArrayBuffer { <nl> <nl> / / / Convert to an NSArray . <nl> / / / <nl> - / / / O ( 1 ) if the element type is bridged verbatim , O ( N ) otherwise . <nl> + / / / O ( 1 ) if the element type is bridged verbatim , O ( * n * ) otherwise . <nl> internal func _asCocoaArray ( ) - > _NSArrayCore { <nl> return _fastPath ( _isNative ) ? _native . _asCocoaArray ( ) : _nonNative <nl> } <nl> mmm a / stdlib / public / core / ArrayType . swift <nl> ppp b / stdlib / public / core / ArrayType . swift <nl> internal protocol _ArrayProtocol <nl> / / / <nl> / / / - returns : The removed element . <nl> / / / <nl> - / / / - Complexity : Worst case O ( N ) . <nl> + / / / - Complexity : Worst case O ( * n * ) . <nl> / / / <nl> / / / - Precondition : ` count > index ` . <nl> @ discardableResult <nl> mmm a / stdlib / public / core / Arrays . swift . gyb <nl> ppp b / stdlib / public / core / Arrays . swift . gyb <nl> if True : <nl> / / / <nl> / / / Arrays are one of the most commonly used data types in an app . You use <nl> / / / arrays to organize your app ' s data . Specifically , you use the ` Array ` type <nl> - / / / to hold elements of a single type , the array ' s ` Element ` type . An array ' s <nl> - / / / elements can be anything from an integer to a string to a class . <nl> + / / / to hold elements of a single type , the array ' s ` Element ` type . An array <nl> + / / / can store any kind of elementsmmmfrom integers to strings to classes . <nl> / / / <nl> / / / Swift makes it easy to create arrays in your code using an array literal : <nl> / / / simply surround a comma separated list of values with square brackets . <nl> if True : <nl> / / / var students = [ " Ben " , " Ivy " , " Jordell " ] <nl> / / / <nl> / / / To add single elements to the end of an array , use the ` append ( _ : ) ` method . <nl> - / / / Add multiple elements at once by passing another array or a sequence of <nl> - / / / any kind to the ` append ( contentsOf : ) ` method . <nl> + / / / Add multiple elements at the same time by passing another array or a <nl> + / / / sequence of any kind to the ` append ( contentsOf : ) ` method . <nl> / / / <nl> / / / students . append ( " Maxime " ) <nl> / / / students . append ( contentsOf : [ " Shakia " , " William " ] ) <nl> if True : <nl> / / / You can add new elements in the middle of an array by using the <nl> / / / ` insert ( _ : at : ) ` method for single elements and by using <nl> / / / ` insert ( contentsOf : at : ) ` to insert multiple elements from another <nl> - / / / collection or array literal . The elements at that index and later are <nl> - / / / shifted back to make room . <nl> + / / / collection or array literal . The elements at that index and later indices <nl> + / / / are shifted back to make room . <nl> / / / <nl> / / / students . insert ( " Liam " , at : 3 ) <nl> / / / / / [ " Ben " , " Ivy " , " Jordell " , " Liam " , " Maxime " , " Shakia " , " William " ] <nl> if True : <nl> / / / students . removeLast ( ) <nl> / / / / / [ " Ivy " , " Jordell " , " Liam " , " Maxime " , " Shakia " ] <nl> / / / <nl> - / / / You can replace an existing element with a new value by assigning to the <nl> - / / / subscript . <nl> + / / / You can replace an existing element with a new value by assigning the new <nl> + / / / value to the subscript . <nl> / / / <nl> / / / if let i = students . index ( of : " Maxime " ) { <nl> / / / students [ i ] = " Max " <nl> if True : <nl> / / / <nl> / / / Bridging from ` Array ` to ` NSArray ` takes O ( 1 ) time and O ( 1 ) space if the <nl> / / / array ' s elements are already instances of a class or an ` @ objc ` protocol ; <nl> - / / / otherwise , it takes O ( n ) time and space . <nl> + / / / otherwise , it takes O ( * n * ) time and space . <nl> / / / <nl> / / / Bridging from ` NSArray ` to ` Array ` first calls the ` copy ( with : ) ` <nl> / / / ( ` - copyWithZone : ` in Objective - C ) method on the array to get an immutable <nl> mmm a / stdlib / public / core / Collection . swift <nl> ppp b / stdlib / public / core / Collection . swift <nl> public struct IndexingIterator < <nl> / / / able to calculate its ` count ` property in O ( 1 ) time . Conversely , because a <nl> / / / forward or bidirectional collection must traverse the entire collection to <nl> / / / count the number of contained elements , accessing its ` count ` property is <nl> - / / / an O ( N ) operation . <nl> + / / / an O ( * n * ) operation . <nl> public protocol Collection : Indexable , Sequence { <nl> / / / A type that can represent the number of steps between a pair of <nl> / / / indices . <nl> public protocol Collection : Indexable , Sequence { <nl> / / / or ` Optional ( nil ) ` if an element was determined to be missing ; <nl> / / / otherwise , ` nil ` . <nl> / / / <nl> - / / / - Complexity : O ( N ) . <nl> + / / / - Complexity : O ( * n * ) <nl> func _customIndexOfEquatableElement ( _ element : Iterator . Element ) - > Index ? ? <nl> <nl> / / / The first element of the collection . <nl> extension Collection { <nl> / / / Customization point for ` Collection . index ( of : ) ` . <nl> / / / <nl> / / / Define this method if the collection can find an element in less than <nl> - / / / O ( N ) by exploiting collection - specific knowledge . <nl> + / / / O ( * n * ) by exploiting collection - specific knowledge . <nl> / / / <nl> / / / - Returns : ` nil ` if a linear search should be attempted instead , <nl> / / / ` Optional ( nil ) ` if the element was not found , or <nl> mmm a / stdlib / public / core / ContiguousArrayBuffer . swift <nl> ppp b / stdlib / public / core / ContiguousArrayBuffer . swift <nl> internal struct _ContiguousArrayBuffer < Element > : _ArrayBufferProtocol { <nl> / / / <nl> / / / - Precondition : ` U ` is a class or ` @ objc ` existential . <nl> / / / <nl> - / / / - Complexity : O ( N ) . <nl> + / / / - Complexity : O ( * n * ) <nl> func storesOnlyElementsOfType < U > ( <nl> _ : U . Type <nl> ) - > Bool { <nl> mmm a / stdlib / public / core / Equatable . swift <nl> ppp b / stdlib / public / core / Equatable . swift <nl> public func ! = < T : Equatable > ( lhs : T , rhs : T ) - > Bool { <nl> / / / same value . For value equality , see the equal - to operator ( ` = = ` ) and the <nl> / / / ` Equatable ` protocol . <nl> / / / <nl> - / / / The following example defines an ` IntegerRef ` type ; an integer type with <nl> + / / / The following example defines an ` IntegerRef ` type , an integer type with <nl> / / / reference semantics . <nl> / / / <nl> / / / class IntegerRef : Equatable { <nl> public func ! = < T : Equatable > ( lhs : T , rhs : T ) - > Bool { <nl> / / / / / Prints " true " <nl> / / / <nl> / / / The identical - to operator ( ` = = = ` ) returns ` false ` when comparing two <nl> - / / / references to different objects instances , even if the two instances have <nl> + / / / references to different object instances , even if the two instances have <nl> / / / the same value . <nl> / / / <nl> / / / let c = IntegerRef ( 10 ) <nl> mmm a / stdlib / public / core / ExistentialCollection . swift . gyb <nl> ppp b / stdlib / public / core / ExistentialCollection . swift . gyb <nl> public struct $ { Self } < Element > <nl> <nl> / / / The number of elements . <nl> / / / <nl> - / / / - Complexity : $ { ' O ( 1 ) ' if Traversal = = ' RandomAccess ' else ' O ( N ) ' } <nl> + / / / - Complexity : $ { ' O ( 1 ) ' if Traversal = = ' RandomAccess ' else ' O ( * n * ) ' } <nl> public var count : IntMax { <nl> return _box . _count <nl> } <nl> mmm a / stdlib / public / core / Filter . swift . gyb <nl> ppp b / stdlib / public / core / Filter . swift . gyb <nl> public struct $ { Self } < <nl> / / / <nl> / / / In an empty collection , ` startIndex = = endIndex ` . <nl> / / / <nl> - / / / - Complexity : O ( N ) , where N is the ratio between unfiltered and <nl> + / / / - Complexity : O ( * n * ) , where * n * is the ratio between unfiltered and <nl> / / / filtered collection counts . <nl> public var startIndex : Index { <nl> var index = _base . startIndex <nl> mmm a / stdlib / public / core / HashedCollections . swift . gyb <nl> ppp b / stdlib / public / core / HashedCollections . swift . gyb <nl> internal struct _UnmanagedAnyObjectArray { <nl> / / / set ' s ` Element ` type is neither a class nor an ` @ objc ` protocol , any <nl> / / / required bridging of elements occurs at the first access of each element , <nl> / / / so the first operation that uses the contents of the set ( for example , a <nl> - / / / membership test ) can take O ( N ) . <nl> + / / / membership test ) can take O ( * n * ) . <nl> / / / <nl> / / / Bridging from ` NSSet ` to ` Set ` first calls the ` copy ( with : ) ` method <nl> / / / ( ` - copyWithZone : ` in Objective - C ) on the set to get an immutable copy and <nl> public struct Set < Element : Hashable > : <nl> / / / <nl> / / / In the following example , the ` bothNeighborsAndEmployees ` set is made up <nl> / / / of the elements that are in * both * the ` employees ` and ` neighbors ` sets . <nl> - / / / Elements that are only in one or the other are left out of the result of <nl> + / / / Elements that are in only one or the other are left out of the result of <nl> / / / the intersection . <nl> / / / <nl> / / / let employees : Set = [ " Alicia " , " Bethany " , " Chris " , " Diana " , " Eric " ] <nl> public struct Dictionary < Key : Hashable , Value > : <nl> / / / the dictionary ; otherwise , ` nil ` . <nl> @ inline ( __always ) <nl> public func index ( forKey key : Key ) - > Index ? { <nl> - / / Complexity : amortized O ( 1 ) for native storage , O ( N ) when wrapping an <nl> + / / Complexity : amortized O ( 1 ) for native storage , O ( * n * ) when wrapping an <nl> / / NSDictionary . <nl> return _variantStorage . index ( forKey : key ) <nl> } <nl> extension Set { <nl> / / / <nl> / / / In the following example , the ` bothNeighborsAndEmployees ` set is made up <nl> / / / of the elements that are in * both * the ` employees ` and ` neighbors ` sets . <nl> - / / / Elements that are only in one or the other are left out of the result of <nl> + / / / Elements that are in only one or the other are left out of the result of <nl> / / / the intersection . <nl> / / / <nl> / / / let employees : Set = [ " Alicia " , " Bethany " , " Chris " , " Diana " , " Eric " ] <nl> mmm a / stdlib / public / core / LazyCollection . swift . gyb <nl> ppp b / stdlib / public / core / LazyCollection . swift . gyb <nl> extension $ { Self } : Sequence { <nl> / / / Returns a value less than or equal to the number of elements in <nl> / / / ` self ` , * * nondestructively * * . <nl> / / / <nl> - / / / - Complexity : O ( N ) . <nl> + / / / - Complexity : O ( * n * ) <nl> public var underestimatedCount : Int { return _base . underestimatedCount } <nl> <nl> public func _copyToContiguousArray ( ) <nl> extension $ { Self } : $ { TraversalCollection } { <nl> / / / Returns the number of elements . <nl> / / / <nl> / / / - Complexity : O ( 1 ) if ` Self ` conforms to ` RandomAccessCollection ` ; <nl> - / / / O ( N ) otherwise . <nl> + / / / O ( * n * ) otherwise . <nl> public var count : Base . IndexDistance { <nl> return _base . count <nl> } <nl> extension $ { Self } : $ { TraversalCollection } { <nl> / / / Returns ` Optional ( Optional ( index ) ) ` if an element was found ; <nl> / / / ` nil ` otherwise . <nl> / / / <nl> - / / / - Complexity : O ( N ) . <nl> + / / / - Complexity : O ( * n * ) <nl> public func _customIndexOfEquatableElement ( <nl> _ element : Base . Iterator . Element <nl> ) - > Index ? ? { <nl> mmm a / stdlib / public / core / LazySequence . swift <nl> ppp b / stdlib / public / core / LazySequence . swift <nl> <nl> / / / / / / <nl> / / / / / / ( 1 . . < 6 ) . scan ( 0 , + ) / / [ 0 , 1 , 3 , 6 , 10 , 15 ] <nl> / / / / / / <nl> - / / / / / / - Complexity : O ( N ) <nl> + / / / / / / - Complexity : O ( n ) <nl> / / / func scan < ResultElement > ( <nl> / / / _ initial : ResultElement , <nl> / / / _ nextPartialResult : ( ResultElement , Iterator . Element ) - > ResultElement <nl> mmm a / stdlib / public / core / Map . swift . gyb <nl> ppp b / stdlib / public / core / Map . swift . gyb <nl> public struct LazyMapSequence < Base : Sequence , Element > <nl> / / / Returns a value less than or equal to the number of elements in <nl> / / / ` self ` , * * nondestructively * * . <nl> / / / <nl> - / / / - Complexity : O ( N ) . <nl> + / / / - Complexity : O ( * n * ) <nl> public var underestimatedCount : Int { <nl> return _base . underestimatedCount <nl> } <nl> public struct $ { Self } < <nl> / / / The number of elements in the collection . <nl> / / / <nl> / / / - Complexity : O ( 1 ) if ` Index ` conforms to ` RandomAccessIndex ` ; <nl> - / / / O ( N ) otherwise . <nl> + / / / O ( * n * ) otherwise . <nl> public var count : Base . IndexDistance { <nl> return _base . count <nl> } <nl> mmm a / stdlib / public / core / MutableCollection . swift <nl> ppp b / stdlib / public / core / MutableCollection . swift <nl> public protocol MutableCollection : MutableIndexable , Collection { <nl> / / / collection match ` belongsInSecondPartition ` , the returned index is <nl> / / / equal to the collection ' s ` endIndex ` . <nl> / / / <nl> - / / / - Complexity : O ( n ) <nl> + / / / - Complexity : O ( * n * ) <nl> mutating func partition ( <nl> by belongsInSecondPartition : ( Iterator . Element ) throws - > Bool <nl> ) rethrows - > Index <nl> mmm a / stdlib / public / core / OptionSet . swift <nl> ppp b / stdlib / public / core / OptionSet . swift <nl> <nl> / / / = = = = = = = = = = = = = = = = = = = = = = = = <nl> / / / <nl> / / / When you need to create an instance of an option set , assign one of the <nl> - / / / type ' s static members to your variable or constant . Alternately , to create <nl> - / / / an option set instance with multiple members , assign an array literal with <nl> - / / / multiple static members of the option set . To create an empty instance , <nl> - / / / assign an empty array literal to your variable . <nl> + / / / type ' s static members to your variable or constant . Alternatively , to <nl> + / / / create an option set instance with multiple members , assign an array <nl> + / / / literal with multiple static members of the option set . To create an empty <nl> + / / / instance , assign an empty array literal to your variable . <nl> / / / <nl> / / / let singleOption : ShippingOptions = . priority <nl> / / / let multipleOptions : ShippingOptions = [ . nextDay , . secondDay , . priority ] <nl> extension OptionSet where Element = = Self { <nl> / / / Adds the given element to the option set if it is not already a member . <nl> / / / <nl> / / / In the following example , the ` . secondDay ` shipping option is added to <nl> - / / / the ` freeOptions ` option set if ` purchasePrice ` is greating than 50 . For <nl> + / / / the ` freeOptions ` option set if ` purchasePrice ` is greater than 50 . 0 . For <nl> / / / the ` ShippingOptions ` declaration , see the ` OptionSet ` protocol <nl> / / / discussion . <nl> / / / <nl> extension OptionSet where Element = = Self { <nl> return result <nl> } <nl> <nl> - / / / Removes the given element and all elements subsumed by the given element . <nl> + / / / Removes the given element and all elements subsumed by it . <nl> / / / <nl> / / / In the following example , the ` . priority ` shipping option is removed from <nl> / / / the ` options ` option set . Attempting to remove the same shipping option <nl> extension OptionSet where Element = = Self { <nl> / / / / / Prints " true " <nl> / / / <nl> / / / - Parameter member : The element of the set to remove . <nl> - / / / - Returns : The intersection of ` [ member ] ` and the set if the intersection <nl> - / / / was nonempty ; otherwise , ` nil ` . <nl> + / / / - Returns : The intersection of ` [ member ] ` and the set , if the <nl> + / / / intersection was nonempty ; otherwise , ` nil ` . <nl> @ discardableResult <nl> public mutating func remove ( _ member : Element ) - > Element ? { <nl> let r = isSuperset ( of : member ) ? Optional ( member ) : nil <nl> mmm a / stdlib / public / core / RangeReplaceableCollection . swift . gyb <nl> ppp b / stdlib / public / core / RangeReplaceableCollection . swift . gyb <nl> public protocol RangeReplaceableIndexable : Indexable { <nl> <nl> / / / Inserts a new element into the collection at the specified position . <nl> / / / <nl> - / / / The new element is inserted before the element currently at the <nl> - / / / specified index . If you pass the collection ' s ` endIndex ` property as <nl> - / / / the ` index ` parameter , the new element is appended to the <nl> - / / / collection . <nl> + / / / The new element is inserted before the element currently at the specified <nl> + / / / index . If you pass the collection ' s ` endIndex ` property as the ` index ` <nl> + / / / parameter , the new element is appended to the collection . <nl> / / / <nl> / / / var numbers = [ 1 , 2 , 3 , 4 , 5 ] <nl> / / / numbers . insert ( 100 , at : 3 ) <nl> public protocol RangeReplaceableIndexable : Indexable { <nl> / / / Calling this method may invalidate any existing indices for use with this <nl> / / / collection . <nl> / / / <nl> - / / / - Parameter newElement : The new element to insert into the collection . <nl> - / / / - Parameter i : The position at which to insert the new element . <nl> - / / / ` index ` must be a valid index into the collection . <nl> + / / / - Parameters : <nl> + / / / - newElement : The new element to insert into the collection . <nl> + / / / - i : The position at which to insert the new element . ` index ` must be a <nl> + / / / valid index into the collection . <nl> / / / <nl> / / / - Complexity : O ( * n * ) , where * n * is the length of the collection . <nl> mutating func insert ( _ newElement : _Element , at i : Index ) <nl> public protocol RangeReplaceableIndexable : Indexable { <nl> / / / Calling this method may invalidate any existing indices for use with this <nl> / / / collection . <nl> / / / <nl> - / / / - Parameter newElements : The new elements to insert into the collection . <nl> - / / / - Parameter i : The position at which to insert the new elements . ` index ` <nl> - / / / must be a valid index of the collection . <nl> + / / / - Parameters : <nl> + / / / - newElements : The new elements to insert into the collection . <nl> + / / / - i : The position at which to insert the new elements . ` index ` must be <nl> + / / / a valid index of the collection . <nl> / / / <nl> / / / - Complexity : O ( * m * ) , where * m * is the combined length of the collection <nl> / / / and ` newElements ` . If ` i ` is equal to the collection ' s ` endIndex ` <nl> public protocol RangeReplaceableCollection <nl> where S . Iterator . Element = = Iterator . Element <nl> * / <nl> <nl> - / / / Adds the elements of a sequence to the end of the collection . <nl> + / / / Adds the elements of a sequence or collection to the end of this <nl> + / / / collection . <nl> / / / <nl> - / / / Use this method to append the elements of a sequence or collection to the <nl> - / / / end of this collection . The collection being appended to allocates any <nl> - / / / additional necessary storage to hold the new elements . <nl> + / / / The collection being appended to allocates any additional necessary <nl> + / / / storage to hold the new elements . <nl> / / / <nl> / / / The following example appends the elements of a ` Range < Int > ` instance to <nl> / / / an array of integers : <nl> extension RangeReplaceableCollection { <nl> insert ( newElement , at : endIndex ) <nl> } <nl> <nl> - / / / Adds the elements of a sequence to the end of the collection . <nl> + / / / Adds the elements of a sequence or collection to the end of this <nl> + / / / collection . <nl> / / / <nl> - / / / Use this method to append the elements of a sequence or collection to the <nl> - / / / end of this collection . The collection being appended to allocates any <nl> - / / / additional necessary storage to hold the new elements . <nl> + / / / The collection being appended to allocates any additional necessary <nl> + / / / storage to hold the new elements . <nl> / / / <nl> / / / The following example appends the elements of a ` Range < Int > ` instance to <nl> / / / an array of integers : <nl> mmm a / stdlib / public / core / Sequence . swift <nl> ppp b / stdlib / public / core / Sequence . swift <nl> public protocol IteratorProtocol { <nl> <nl> / / / A type that provides sequential , iterated access to its elements . <nl> / / / <nl> - / / / Sequences are lists of values that let you step over their values <nl> - / / / one at a time . The most common way to iterate over the elements of a <nl> - / / / sequence is to use a ` for ` - ` in ` loop : <nl> + / / / A sequence is a list of values that you can step through one at a time . The <nl> + / / / most common way to iterate over the elements of a sequence is to use a <nl> + / / / ` for ` - ` in ` loop : <nl> / / / <nl> / / / let oneTwoThree = 1 . . . 3 <nl> / / / for number in oneTwoThree { <nl> public protocol IteratorProtocol { <nl> / / / } <nl> / / / / / Prints " Whew , no mosquitos ! " <nl> / / / <nl> - / / / Repeated Access <nl> + / / / Repeated Access <nl> / / / = = = = = = = = = = = = = = = <nl> / / / <nl> / / / The ` Sequence ` protocol makes no requirement on conforming types regarding <nl> - / / / whether they will be destructively " consumed " by iteration . As a <nl> + / / / whether they will be destructively consumed by iteration . As a <nl> / / / consequence , don ' t assume that multiple ` for ` - ` in ` loops on a sequence <nl> - / / / will either " resume " iteration or restart from the beginning : <nl> + / / / will either resume iteration or restart from the beginning : <nl> / / / <nl> / / / for element in sequence { <nl> / / / if . . . some condition { break } <nl> / / / } <nl> - / / / <nl> + / / / <nl> / / / for element in sequence { <nl> / / / / / No defined behavior <nl> / / / } <nl> / / / <nl> - / / / In this case , you cannot assume that a sequence will either be <nl> - / / / " consumable " and will resume iteration , or that a sequence is a <nl> - / / / collection and will restart iteration from the first element . A <nl> - / / / conforming sequence that is not a collection is allowed to produce an <nl> - / / / arbitrary sequence of elements in the second ` for ` - ` in ` loop . <nl> + / / / In this case , you cannot assume either that a sequence will be consumable <nl> + / / / and will resume iteration , or that a sequence is a collection and will <nl> + / / / restart iteration from the first element . A conforming sequence that is <nl> + / / / not a collection is allowed to produce an arbitrary sequence of elements <nl> + / / / in the second ` for ` - ` in ` loop . <nl> / / / <nl> - / / / To establish that a type you ' ve created supports nondestructive <nl> - / / / iteration , add conformance to the ` Collection ` protocol . <nl> + / / / To establish that a type you ' ve created supports nondestructive iteration , <nl> + / / / add conformance to the ` Collection ` protocol . <nl> / / / <nl> - / / / Conforming to the Sequence Protocol <nl> + / / / Conforming to the Sequence Protocol <nl> / / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> / / / <nl> / / / Making your own custom types conform to ` Sequence ` enables many useful <nl> / / / operations , like ` for ` - ` in ` looping and the ` contains ` method , without <nl> - / / / much effort . To add ` Sequence ` conformance to your own custom type , add <nl> - / / / a ` makeIterator ( ) ` method that returns an iterator . <nl> + / / / much effort . To add ` Sequence ` conformance to your own custom type , add a <nl> + / / / ` makeIterator ( ) ` method that returns an iterator . <nl> / / / <nl> / / / Alternatively , if your type can act as its own iterator , implementing the <nl> / / / requirements of the ` IteratorProtocol ` protocol and declaring conformance <nl> public protocol IteratorProtocol { <nl> / / / <nl> / / / struct Countdown : Sequence , IteratorProtocol { <nl> / / / var count : Int <nl> - / / / <nl> + / / / <nl> / / / mutating func next ( ) - > Int ? { <nl> / / / if count = = 0 { <nl> / / / return nil <nl> public protocol IteratorProtocol { <nl> / / / } <nl> / / / } <nl> / / / } <nl> - / / / <nl> + / / / <nl> / / / let threeToGo = Countdown ( count : 3 ) <nl> / / / for i in threeToGo { <nl> / / / print ( i ) <nl> public protocol IteratorProtocol { <nl> / / / / / Prints " 2 " <nl> / / / / / Prints " 1 " <nl> / / / <nl> - / / / Expected Performance <nl> + / / / Expected Performance <nl> / / / = = = = = = = = = = = = = = = = = = = = <nl> / / / <nl> / / / A sequence should provide its iterator in O ( 1 ) . The ` Sequence ` protocol <nl> extension Sequence { <nl> / / / Returns a value less than or equal to the number of elements in <nl> / / / the sequence , nondestructively . <nl> / / / <nl> - / / / - Complexity : O ( N ) . <nl> + / / / - Complexity : O ( * n * ) <nl> public var underestimatedCount : Int { <nl> return 0 <nl> } <nl> mmm a / stdlib / public / core / SequenceAlgorithms . swift . gyb <nl> ppp b / stdlib / public / core / SequenceAlgorithms . swift . gyb <nl> extension Sequence { <nl> / / / These counters can only be used as indices in instances of zero - based , <nl> / / / integer - indexed collections , such as ` Array ` and ` ContiguousArray ` . For <nl> / / / other collections the counters may be out of range or of the wrong type <nl> - / / / to use as an index . To iterate over the elements of collection with its <nl> + / / / to use as an index . To iterate over the elements of a collection with its <nl> / / / indices , use the ` zip ( _ : _ : ) ` function . <nl> / / / <nl> / / / This example iterates over the indices and elements of a set , building a <nl> $ { equivalenceExplanation } <nl> extension Sequence $ { " " if preds else " where Iterator . Element : Equatable " } { <nl> <nl> % if preds : <nl> - / / / Returns a Boolean value indicating whether the sequence and a given <nl> + / / / Returns a Boolean value indicating whether this sequence and another <nl> / / / sequence contain equivalent elements , using the given predicate as the <nl> / / / equivalence test . <nl> / / / <nl> $ { equivalenceExplanation } <nl> / / / <nl> / / / - SeeAlso : ` elementsEqual ( _ : ) ` <nl> % else : <nl> - / / / Returns a Boolean value indicating whether the sequence and a given <nl> + / / / Returns a Boolean value indicating whether this sequence and another <nl> / / / sequence contain the same elements in the same order . <nl> / / / <nl> / / / At least one of the sequences must be finite . <nl> / / / <nl> / / / This example tests whether one countable range shares the same elements <nl> - / / / as another countable range and as an array with the same elements . <nl> + / / / as another countable range and an array . <nl> / / / <nl> / / / let a = 1 . . . 3 <nl> / / / let b = 1 . . . 10 <nl> extension Sequence $ { " " if preds else " where Iterator . Element : Comparable " } { <nl> <nl> % if preds : <nl> / / / Returns a Boolean value indicating whether the sequence precedes another <nl> - / / / sequence in a lexicographical ( " dictionary " ) ordering , using the given <nl> + / / / sequence in a lexicographical ( dictionary ) ordering , using the given <nl> / / / predicate to compare elements . <nl> / / / <nl> $ { orderingExplanation } <nl> $ { orderingExplanation } <nl> / / / - SeeAlso : ` lexicographicallyPrecedes ( _ : ) ` <nl> % else : <nl> / / / Returns a Boolean value indicating whether the sequence precedes another <nl> - / / / sequence in a lexicographical ( " dictionary " ) ordering , using the <nl> + / / / sequence in a lexicographical ( dictionary ) ordering , using the <nl> / / / less - than operator ( ` < ` ) to compare elements . <nl> / / / <nl> / / / This example uses the ` lexicographicallyPrecedes ` method to test which <nl> $ { orderingExplanation } <nl> / / / <nl> / / / - Note : This method implements the mathematical notion of lexicographical <nl> / / / ordering , which has no connection to Unicode . If you are sorting <nl> - / / / strings to present to the end user , you should use ` String ` APIs that <nl> + / / / strings to present to the end user , use ` String ` APIs that <nl> / / / perform localized comparison . <nl> / / / - SeeAlso : ` lexicographicallyPrecedes ( _ : by : ) ` <nl> % end <nl> extension Sequence { <nl> / / / <nl> / / / The sequence must be finite . <nl> / / / <nl> - / / / - Complexity : O ( N ) , where N is the length of the sequence . <nl> + / / / - Complexity : O ( * n * ) , where * n * is the length of the sequence . <nl> / / / <nl> / / / - Returns : An array containing the elements of this sequence in <nl> / / / reverse order . <nl> mmm a / stdlib / public / core / SetAlgebra . swift <nl> ppp b / stdlib / public / core / SetAlgebra . swift <nl> public protocol SetAlgebra : Equatable , ExpressibleByArrayLiteral { <nl> / / / <nl> / / / In the following example , the ` bothNeighborsAndEmployees ` set is made up <nl> / / / of the elements that are in * both * the ` employees ` and ` neighbors ` sets . <nl> - / / / Elements that are only in one or the other are left out of the result of <nl> + / / / Elements that are in only one or the other are left out of the result of <nl> / / / the intersection . <nl> / / / <nl> / / / let employees : Set = [ " Alicia " , " Bethany " , " Chris " , " Diana " , " Eric " ] <nl> mmm a / stdlib / public / core / String . swift <nl> ppp b / stdlib / public / core / String . swift <nl> extension String { <nl> / / / <nl> / / / - Returns : A lowercase copy of the string . <nl> / / / <nl> - / / / - Complexity : O ( n ) <nl> + / / / - Complexity : O ( * n * ) <nl> public func lowercased ( ) - > String { <nl> if self . _core . isASCII { <nl> let count = self . _core . count <nl> extension String { <nl> / / / <nl> / / / - Returns : An uppercase copy of the string . <nl> / / / <nl> - / / / - Complexity : O ( n ) <nl> + / / / - Complexity : O ( * n * ) <nl> public func uppercased ( ) - > String { <nl> if self . _core . isASCII { <nl> let count = self . _core . count <nl> mmm a / stdlib / public / core / StringCore . swift <nl> ppp b / stdlib / public / core / StringCore . swift <nl> public struct _StringCore { <nl> / / / Returns ` true ` iff the contents of this string can be <nl> / / / represented as pure ASCII . <nl> / / / <nl> - / / / - Complexity : O ( N ) in the worst case . <nl> + / / / - Complexity : O ( * n * ) in the worst case . <nl> func isRepresentableAsASCII ( ) - > Bool { <nl> if _slowPath ( ! hasContiguousStorage ) { <nl> return false <nl> extension _StringCore : RangeReplaceableCollection { <nl> / / / Replace the elements within ` bounds ` with ` newElements ` . <nl> / / / <nl> / / / - Complexity : O ( ` bounds . count ` ) if ` bounds . upperBound <nl> - / / / = = self . endIndex ` and ` newElements . isEmpty ` , O ( N ) otherwise . <nl> + / / / = = self . endIndex ` and ` newElements . isEmpty ` , O ( * n * ) otherwise . <nl> public mutating func replaceSubrange < C > ( <nl> _ bounds : Range < Int > , <nl> with newElements : C <nl> mmm a / stdlib / public / core / Unicode . swift <nl> ppp b / stdlib / public / core / Unicode . swift <nl> public protocol UnicodeCodec { <nl> / / / Searches for the first occurrence of a ` CodeUnit ` that is equal to 0 . <nl> / / / <nl> / / / Is an equivalent of ` strlen ` for C - strings . <nl> - / / / - Complexity : O ( n ) <nl> + / / / <nl> + / / / - Complexity : O ( * n * ) <nl> static func _nullCodeUnitOffset ( in input : UnsafePointer < CodeUnit > ) - > Int <nl> } <nl> <nl> mmm a / test / SourceKit / DocSupport / doc_clang_module . swift . response <nl> ppp b / test / SourceKit / DocSupport / doc_clang_module . swift . response <nl> var FooSubUnnamedEnumeratorA1 : Int { get } <nl> key . name : " insert ( _ : ) " , <nl> key . usr : " s : FesRxs9OptionSetxzwx7ElementrS_6insertFwxS0_T8insertedSb17memberAfterInsertwxS0__ : : SYNTHESIZED : : c : @ E @ FooRuncingOptions " , <nl> key . original_usr : " s : FesRxs9OptionSetxzwx7ElementrS_6insertFwxS0_T8insertedSb17memberAfterInsertwxS0__ " , <nl> - key . doc . full_as_xml : " < Function > < Name > insert ( _ : ) < / Name > < USR > s : FesRxs9OptionSetxzwx7ElementrS_6insertFwxS0_T8insertedSb17memberAfterInsertwxS0__ < / USR > < Declaration > mutating func insert ( _ newMember : Self ) - & gt ; ( inserted : Bool , memberAfterInsert : Self ) < / Declaration > < Abstract > < Para > Adds the given element to the option set if it is not already a member . < / Para > < / Abstract > < Parameters > < Parameter > < Name > newMember < / Name > < Direction isExplicit = \ " 0 \ " > in < / Direction > < Discussion > < Para > The element to insert . < / Para > < / Discussion > < / Parameter > < / Parameters > < ResultDiscussion > < Para > < codeVoice > ( true , newMember ) < / codeVoice > if < codeVoice > newMember < / codeVoice > was not contained in < codeVoice > self < / codeVoice > . Otherwise , returns < codeVoice > ( false , oldMember ) < / codeVoice > , where < codeVoice > oldMember < / codeVoice > is the member of the set equal to < codeVoice > newMember < / codeVoice > . < / Para > < / ResultDiscussion > < Discussion > < Para > In the following example , the < codeVoice > . secondDay < / codeVoice > shipping option is added to the < codeVoice > freeOptions < / codeVoice > option set if < codeVoice > purchasePrice < / codeVoice > is greating than 50 . For the < codeVoice > ShippingOptions < / codeVoice > declaration , see the < codeVoice > OptionSet < / codeVoice > protocol discussion . < / Para > < CodeListing language = \ " swift \ " > < zCodeLineNumbered > < ! [ CDATA [ let purchasePrice = 87 . 55 ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ var freeOptions : ShippingOptions = [ . standard , . priority ] ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ if purchasePrice > 50 { ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ freeOptions . insert ( . secondDay ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ } ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ print ( freeOptions . contains ( . secondDay ) ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ / / Prints \ " true \ " ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < / zCodeLineNumbered > < / CodeListing > < / Discussion > < / Function > " , <nl> + key . doc . full_as_xml : " < Function > < Name > insert ( _ : ) < / Name > < USR > s : FesRxs9OptionSetxzwx7ElementrS_6insertFwxS0_T8insertedSb17memberAfterInsertwxS0__ < / USR > < Declaration > mutating func insert ( _ newMember : Self ) - & gt ; ( inserted : Bool , memberAfterInsert : Self ) < / Declaration > < Abstract > < Para > Adds the given element to the option set if it is not already a member . < / Para > < / Abstract > < Parameters > < Parameter > < Name > newMember < / Name > < Direction isExplicit = \ " 0 \ " > in < / Direction > < Discussion > < Para > The element to insert . < / Para > < / Discussion > < / Parameter > < / Parameters > < ResultDiscussion > < Para > < codeVoice > ( true , newMember ) < / codeVoice > if < codeVoice > newMember < / codeVoice > was not contained in < codeVoice > self < / codeVoice > . Otherwise , returns < codeVoice > ( false , oldMember ) < / codeVoice > , where < codeVoice > oldMember < / codeVoice > is the member of the set equal to < codeVoice > newMember < / codeVoice > . < / Para > < / ResultDiscussion > < Discussion > < Para > In the following example , the < codeVoice > . secondDay < / codeVoice > shipping option is added to the < codeVoice > freeOptions < / codeVoice > option set if < codeVoice > purchasePrice < / codeVoice > is greater than 50 . 0 . For the < codeVoice > ShippingOptions < / codeVoice > declaration , see the < codeVoice > OptionSet < / codeVoice > protocol discussion . < / Para > < CodeListing language = \ " swift \ " > < zCodeLineNumbered > < ! [ CDATA [ let purchasePrice = 87 . 55 ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ var freeOptions : ShippingOptions = [ . standard , . priority ] ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ if purchasePrice > 50 { ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ freeOptions . insert ( . secondDay ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ } ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ print ( freeOptions . contains ( . secondDay ) ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ / / Prints \ " true \ " ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < / zCodeLineNumbered > < / CodeListing > < / Discussion > < / Function > " , <nl> key . offset : 1962 , <nl> key . length : 110 , <nl> key . fully_annotated_decl : " < decl . function . method . instance > < syntaxtype . attribute . builtin > < syntaxtype . attribute . name > @ discardableResult < / syntaxtype . attribute . name > < / syntaxtype . attribute . builtin > < syntaxtype . keyword > mutating < / syntaxtype . keyword > < syntaxtype . keyword > func < / syntaxtype . keyword > < decl . name > insert < / decl . name > ( < decl . var . parameter > < decl . var . parameter . argument_label > _ < / decl . var . parameter . argument_label > < decl . var . parameter . name > newMember < / decl . var . parameter . name > : < decl . var . parameter . type > < ref . struct usr = \ " c : @ E @ FooRuncingOptions \ " > FooRuncingOptions < / ref . struct > < / decl . var . parameter . type > < / decl . var . parameter > ) - & gt ; < decl . function . returntype > < tuple > ( < tuple . element > < tuple . element . argument_label > inserted < / tuple . element . argument_label > : < tuple . element . type > < ref . struct usr = \ " s : Sb \ " > Bool < / ref . struct > < / tuple . element . type > < / tuple . element > , < tuple . element > < tuple . element . argument_label > memberAfterInsert < / tuple . element . argument_label > : < tuple . element . type > < ref . struct usr = \ " c : @ E @ FooRuncingOptions \ " > FooRuncingOptions < / ref . struct > < / tuple . element . type > < / tuple . element > ) < / tuple > < / decl . function . returntype > < / decl . function . method . instance > " , <nl> var FooSubUnnamedEnumeratorA1 : Int { get } <nl> key . name : " remove ( _ : ) " , <nl> key . usr : " s : FesRxs9OptionSetxzwx7ElementrS_6removeFwxS0_GSqwxS0__ : : SYNTHESIZED : : c : @ E @ FooRuncingOptions " , <nl> key . original_usr : " s : FesRxs9OptionSetxzwx7ElementrS_6removeFwxS0_GSqwxS0__ " , <nl> - key . doc . full_as_xml : " < Function > < Name > remove ( _ : ) < / Name > < USR > s : FesRxs9OptionSetxzwx7ElementrS_6removeFwxS0_GSqwxS0__ < / USR > < Declaration > mutating func remove ( _ member : Self ) - & gt ; Self ? < / Declaration > < Abstract > < Para > Removes the given element and all elements subsumed by the given element . < / Para > < / Abstract > < Parameters > < Parameter > < Name > member < / Name > < Direction isExplicit = \ " 0 \ " > in < / Direction > < Discussion > < Para > The element of the set to remove . < / Para > < / Discussion > < / Parameter > < / Parameters > < ResultDiscussion > < Para > The intersection of < codeVoice > [ member ] < / codeVoice > and the set if the intersection was nonempty ; otherwise , < codeVoice > nil < / codeVoice > . < / Para > < / ResultDiscussion > < Discussion > < Para > In the following example , the < codeVoice > . priority < / codeVoice > shipping option is removed from the < codeVoice > options < / codeVoice > option set . Attempting to remove the same shipping option a second time results in < codeVoice > nil < / codeVoice > , because < codeVoice > options < / codeVoice > no longer contains < codeVoice > . priority < / codeVoice > as a member . < / Para > < CodeListing language = \ " swift \ " > < zCodeLineNumbered > < ! [ CDATA [ var options : ShippingOptions = [ . secondDay , . priority ] ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ let priorityOption = options . remove ( . priority ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ print ( priorityOption = = . priority ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ / / Prints \ " true \ " ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ print ( options . remove ( . priority ) ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ / / Prints \ " nil \ " ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < / zCodeLineNumbered > < / CodeListing > < Para > In the next example , the < codeVoice > . express < / codeVoice > element is passed to < codeVoice > remove ( _ : ) < / codeVoice > . Although < codeVoice > . express < / codeVoice > is not a member of < codeVoice > options < / codeVoice > , < codeVoice > . express < / codeVoice > subsumes the remaining < codeVoice > . secondDay < / codeVoice > element of the option set . Therefore , < codeVoice > options < / codeVoice > is emptied and the intersection between < codeVoice > . express < / codeVoice > and < codeVoice > options < / codeVoice > is returned . < / Para > < CodeListing language = \ " swift \ " > < zCodeLineNumbered > < ! [ CDATA [ let expressOption = options . remove ( . express ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ print ( expressOption = = . express ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ / / Prints \ " false \ " ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ print ( expressOption = = . secondDay ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ / / Prints \ " true \ " ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < / zCodeLineNumbered > < / CodeListing > < / Discussion > < / Function > " , <nl> + key . doc . full_as_xml : " < Function > < Name > remove ( _ : ) < / Name > < USR > s : FesRxs9OptionSetxzwx7ElementrS_6removeFwxS0_GSqwxS0__ < / USR > < Declaration > mutating func remove ( _ member : Self ) - & gt ; Self ? < / Declaration > < Abstract > < Para > Removes the given element and all elements subsumed by it . < / Para > < / Abstract > < Parameters > < Parameter > < Name > member < / Name > < Direction isExplicit = \ " 0 \ " > in < / Direction > < Discussion > < Para > The element of the set to remove . < / Para > < / Discussion > < / Parameter > < / Parameters > < ResultDiscussion > < Para > The intersection of < codeVoice > [ member ] < / codeVoice > and the set , if the intersection was nonempty ; otherwise , < codeVoice > nil < / codeVoice > . < / Para > < / ResultDiscussion > < Discussion > < Para > In the following example , the < codeVoice > . priority < / codeVoice > shipping option is removed from the < codeVoice > options < / codeVoice > option set . Attempting to remove the same shipping option a second time results in < codeVoice > nil < / codeVoice > , because < codeVoice > options < / codeVoice > no longer contains < codeVoice > . priority < / codeVoice > as a member . < / Para > < CodeListing language = \ " swift \ " > < zCodeLineNumbered > < ! [ CDATA [ var options : ShippingOptions = [ . secondDay , . priority ] ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ let priorityOption = options . remove ( . priority ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ print ( priorityOption = = . priority ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ / / Prints \ " true \ " ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ print ( options . remove ( . priority ) ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ / / Prints \ " nil \ " ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < / zCodeLineNumbered > < / CodeListing > < Para > In the next example , the < codeVoice > . express < / codeVoice > element is passed to < codeVoice > remove ( _ : ) < / codeVoice > . Although < codeVoice > . express < / codeVoice > is not a member of < codeVoice > options < / codeVoice > , < codeVoice > . express < / codeVoice > subsumes the remaining < codeVoice > . secondDay < / codeVoice > element of the option set . Therefore , < codeVoice > options < / codeVoice > is emptied and the intersection between < codeVoice > . express < / codeVoice > and < codeVoice > options < / codeVoice > is returned . < / Para > < CodeListing language = \ " swift \ " > < zCodeLineNumbered > < ! [ CDATA [ let expressOption = options . remove ( . express ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ print ( expressOption = = . express ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ / / Prints \ " false \ " ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ print ( expressOption = = . secondDay ) ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < ! [ CDATA [ / / Prints \ " true \ " ] ] > < / zCodeLineNumbered > < zCodeLineNumbered > < / zCodeLineNumbered > < / CodeListing > < / Discussion > < / Function > " , <nl> key . offset : 2078 , <nl> key . length : 71 , <nl> key . fully_annotated_decl : " < decl . function . method . instance > < syntaxtype . attribute . builtin > < syntaxtype . attribute . name > @ discardableResult < / syntaxtype . attribute . name > < / syntaxtype . attribute . builtin > < syntaxtype . keyword > mutating < / syntaxtype . keyword > < syntaxtype . keyword > func < / syntaxtype . keyword > < decl . name > remove < / decl . name > ( < decl . var . parameter > < decl . var . parameter . argument_label > _ < / decl . var . parameter . argument_label > < decl . var . parameter . name > member < / decl . var . parameter . name > : < decl . var . parameter . type > < ref . struct usr = \ " c : @ E @ FooRuncingOptions \ " > FooRuncingOptions < / ref . struct > < / decl . var . parameter . type > < / decl . var . parameter > ) - & gt ; < decl . function . returntype > < ref . struct usr = \ " c : @ E @ FooRuncingOptions \ " > FooRuncingOptions < / ref . struct > ? < / decl . function . returntype > < / decl . function . method . instance > " , <nl>
Merge pull request from natecook1000 / nc - fixes - 04
apple/swift
b5d9ffb258cdd13c91d9840945a7f211cd09c013
2016-08-13T04:48:48Z
mmm a / tensorflow / python / eager / backprop . py <nl> ppp b / tensorflow / python / eager / backprop . py <nl> def _gradient_function ( op_name , attr_tuple , num_inputs , inputs , outputs , <nl> pywrap_tensorflow . TFE_Py_RegisterGradientFunction ( _gradient_function ) <nl> <nl> <nl> - def _must_record_gradient ( ) : <nl> - return not pywrap_tensorflow . TFE_Py_TapeSetIsEmpty ( ) <nl> - <nl> - <nl> def _record_gradient ( op_name , inputs , attrs , results , name ) : <nl> return pywrap_tensorflow . TFE_Py_RecordGradient ( op_name , inputs , attrs , <nl> results , name ) <nl> <nl> <nl> - execute . must_record_gradient = _must_record_gradient <nl> execute . record_gradient = _record_gradient <nl> <nl> <nl> mmm a / tensorflow / python / eager / execute . py <nl> ppp b / tensorflow / python / eager / execute . py <nl> def execute_with_callbacks ( op_name , num_outputs , inputs , attrs , ctx , name = None ) : <nl> execute = quick_execute <nl> <nl> <nl> - def must_record_gradient ( ) : <nl> - " " " Import backprop if you want gradients recorded . " " " <nl> - return False <nl> - <nl> - <nl> def record_gradient ( unused_op_name , unused_inputs , unused_attrs , unused_results , <nl> unused_name ) : <nl> " " " Import backprop if you want gradients recorded . " " " <nl> mmm a / tensorflow / python / framework / python_op_gen . cc <nl> ppp b / tensorflow / python / framework / python_op_gen . cc <nl> class GenEagerPythonOp : public python_op_gen_internal : : GenPythonOp { <nl> string Code ( ) override ; <nl> <nl> protected : <nl> - void HandleGraphMode ( const string & function_setup , <nl> - const std : : vector < string > & output_sizes ) ; <nl> + void HandleGraphMode ( const string & function_setup ) ; <nl> <nl> string GetEagerNotAllowedError ( ) ; <nl> void ExpectListArg ( const string & indentation , const string & arg_name , <nl> string GenEagerPythonOp : : Code ( ) { <nl> return prelude_ + result_ ; <nl> } <nl> <nl> - void GenEagerPythonOp : : HandleGraphMode ( <nl> - const string & function_setup , const std : : vector < string > & output_sizes ) { <nl> + void GenEagerPythonOp : : HandleGraphMode ( const string & function_setup ) { <nl> strings : : StrAppend ( & result_ , " # Add nodes to the TensorFlow graph . \ n " ) ; <nl> strings : : StrAppend ( & result_ , function_setup ) ; <nl> if ( api_def_ . visibility ( ) = = ApiDef : : VISIBLE ) { <nl> void GenEagerPythonOp : : HandleGraphMode ( <nl> " if not _result : \ n " <nl> " return _op \ n " ) ; <nl> } <nl> + strings : : StrAppend ( & result_ , " _inputs_flat = _op . inputs \ n " ) ; <nl> <nl> / / Compute graph - mode attrs . <nl> if ( op_def_ . attr_size ( ) > 0 ) { <nl> void GenEagerPythonOp : : HandleGraphMode ( <nl> } <nl> } <nl> strings : : StrAppend ( & attr_values , " ) " ) ; <nl> - <nl> - strings : : StrAppend ( & result_ , " if _execute . must_record_gradient ( ) : \ n " ) ; <nl> - strings : : StrAppend ( & result_ , " _inputs_flat = _op . inputs \ n " ) ; <nl> - strings : : StrAppend ( & result_ , <nl> - WordWrap ( " _attrs = ( " , attr_values , kRightMargin ) , <nl> - " \ n " ) ; <nl> - strings : : StrAppend ( & result_ , " _execute . record_gradient ( \ n " , <nl> - " \ " " , op_def_ . name ( ) , <nl> - " \ " , _inputs_flat , _attrs , _result , name ) \ n " ) ; <nl> - } <nl> - if ( num_outs_ = = 1 & & ! output_sizes [ 0 ] . empty ( ) ) { <nl> - / / Single list result . <nl> - } else if ( num_outs_ = = 1 ) { <nl> - / / Execute returns a single - element list which we need to destructure . <nl> - strings : : StrAppend ( & result_ , " " , " _result , = _result \ n " ) ; <nl> + strings : : StrAppend ( <nl> + & result_ , WordWrap ( " _attrs = ( " , attr_values , kRightMargin ) , " \ n " ) ; <nl> } else { <nl> - / / Have multiple outputs , so we will need to reformat the return <nl> - / / value of execute ( ) to be a list with one entry per op output <nl> - / / ( that entry will be a list of tensors if that output is of list <nl> - / / type ) . <nl> - / / For list outputs , convert the right subrange of _result into a list . <nl> - Unflatten ( " " , output_sizes , " _result " , & result_ ) ; <nl> - / / Convert to a named tuple . <nl> - strings : : StrAppend ( & result_ , " _result = _ " , op_def_ . name ( ) , <nl> - " Output . _make ( _result ) \ n " ) ; <nl> + strings : : StrAppend ( & result_ , " _attrs = None \ n " ) ; <nl> } <nl> - strings : : StrAppend ( & result_ , " return _result \ n \ n " ) ; <nl> } else { <nl> strings : : StrAppend ( & result_ , " return _op \ n " ) ; <nl> } <nl> void GenEagerPythonOp : : AddEagerFunctionTeardown ( <nl> bool execute_record_gradient ) { <nl> if ( num_outs_ > 0 ) { <nl> if ( execute_record_gradient ) { <nl> - strings : : StrAppend ( & result_ , indentation , <nl> - " if _execute . must_record_gradient ( ) : \ n " ) ; <nl> - strings : : StrAppend ( & result_ , indentation , " _execute . record_gradient ( \ n " , <nl> - " \ " " , op_def_ . name ( ) , <nl> + strings : : StrAppend ( & result_ , indentation , " _execute . record_gradient ( \ n " , <nl> + " \ " " , op_def_ . name ( ) , <nl> " \ " , _inputs_flat , _attrs , _result , name ) \ n " ) ; <nl> } <nl> if ( num_outs_ = = 1 & & ! output_sizes [ 0 ] . empty ( ) ) { <nl> bool GenEagerPythonOp : : AddEagerFastPathAndGraphCode ( <nl> result_ = function_setup ; <nl> return false ; <nl> } <nl> - HandleGraphMode ( function_setup , output_sizes ) ; <nl> + HandleGraphMode ( function_setup ) ; <nl> + AddEagerFunctionTeardown ( " " , output_sizes , <nl> + true / * execute_record_gradient * / ) ; <nl> <nl> AddRawOpExport ( parameters ) ; <nl> strings : : StrAppend ( & result_ , " \ n \ n " ) ; <nl>
Automated rollback of commit f76521a785f9589cec0bb5f51008832f7b7c13a9
tensorflow/tensorflow
efca0e17cfe1292363ca698ce950be8f7e161a0f
2019-08-26T23:07:30Z
mmm a / stdlib / public / core / HeapBuffer . swift <nl> ppp b / stdlib / public / core / HeapBuffer . swift <nl> typealias _HeapObject = SwiftShims . HeapObject <nl> <nl> @ warn_unused_result <nl> @ _silgen_name ( " swift_bufferAllocate " ) <nl> - func _swift_bufferAllocate ( <nl> - bufferType : AnyClass , _ size : Int , _ alignMask : Int ) - > AnyObject <nl> + internal func _swift_bufferAllocate ( <nl> + bufferType type : AnyClass , <nl> + size : Int , <nl> + alignmentMask : Int <nl> + ) - > AnyObject <nl> <nl> / / / A class containing an ivar " value " of type Value , and <nl> / / / containing storage for an array of Element whose size is <nl> func _swift_bufferAllocate ( <nl> / / / either in a derived class , or it can be in some manager object <nl> / / / that owns the _HeapBuffer . <nl> public / / @ testable ( test / Prototypes / MutableIndexableDict . swift ) <nl> - class _HeapBufferStorage < Value , Element > : NonObjectiveCBase { <nl> + class _HeapBufferStorage < Value , Element > : NonObjectiveCBase { <nl> public override init ( ) { } <nl> <nl> / / / The type used to actually manage instances of <nl> struct _HeapBuffer < Value , Element > : Equatable { <nl> <nl> / / _storage is passed inout to _isUnique . Although its value <nl> / / is unchanged , it must appear mutable to the optimizer . <nl> - var _storage : Builtin . NativeObject ? <nl> + internal var _storage : Builtin . NativeObject ? <nl> <nl> public / / @ testable <nl> var storage : AnyObject ? { <nl> struct _HeapBuffer < Value , Element > : Equatable { <nl> } <nl> <nl> @ warn_unused_result <nl> - static func _valueOffset ( ) - > Int { <nl> + internal static func _valueOffset ( ) - > Int { <nl> return _roundUp ( <nl> sizeof ( _HeapObject . self ) , <nl> toAlignment : alignof ( Value . self ) ) <nl> } <nl> <nl> @ warn_unused_result <nl> - static func _elementOffset ( ) - > Int { <nl> + internal static func _elementOffset ( ) - > Int { <nl> return _roundUp ( <nl> _valueOffset ( ) + sizeof ( Value . self ) , <nl> toAlignment : alignof ( Element . self ) ) <nl> } <nl> <nl> @ warn_unused_result <nl> - static func _requiredAlignMask ( ) - > Int { <nl> + internal static func _requiredAlignMask ( ) - > Int { <nl> / / We can ' t use max here because it can allocate an array . <nl> let heapAlign = alignof ( _HeapObject . self ) & - 1 <nl> let valueAlign = alignof ( Value . self ) & - 1 <nl> struct _HeapBuffer < Value , Element > : Equatable { <nl> : ( heapAlign < elementAlign ? elementAlign : heapAlign ) ) <nl> } <nl> <nl> - var _address : UnsafeMutablePointer < Int8 > { <nl> + internal var _address : UnsafeMutablePointer < Int8 > { <nl> return UnsafeMutablePointer ( <nl> Builtin . bridgeToRawPointer ( self . _nativeObject ) ) <nl> } <nl> <nl> - var _value : UnsafeMutablePointer < Value > { <nl> + internal var _value : UnsafeMutablePointer < Value > { <nl> return UnsafeMutablePointer ( <nl> _HeapBuffer . _valueOffset ( ) + _address ) <nl> } <nl> struct _HeapBuffer < Value , Element > : Equatable { <nl> } <nl> <nl> @ warn_unused_result <nl> - func _allocatedSize ( ) - > Int { <nl> + internal func _allocatedSize ( ) - > Int { <nl> return _swift_stdlib_malloc_size ( _address ) <nl> } <nl> <nl> @ warn_unused_result <nl> - func _allocatedAlignMask ( ) - > Int { <nl> + internal func _allocatedAlignMask ( ) - > Int { <nl> return _HeapBuffer . _requiredAlignMask ( ) <nl> } <nl> <nl> @ warn_unused_result <nl> - func _allocatedSizeAndAlignMask ( ) - > ( Int , Int ) { <nl> + internal func _allocatedSizeAndAlignMask ( ) - > ( Int , Int ) { <nl> return ( _allocatedSize ( ) , _allocatedAlignMask ( ) ) <nl> } <nl> <nl> / / / Returns the actual number of ` Elements ` we can possibly store . <nl> @ warn_unused_result <nl> - func _capacity ( ) - > Int { <nl> + internal func _capacity ( ) - > Int { <nl> return ( _allocatedSize ( ) - _HeapBuffer . _elementOffset ( ) ) <nl> / strideof ( Element . self ) <nl> } <nl> <nl> - init ( ) { <nl> + internal init ( ) { <nl> self . _storage = nil <nl> } <nl> <nl> struct _HeapBuffer < Value , Element > : Equatable { <nl> self . _storage = Builtin . castToNativeObject ( storage ) <nl> } <nl> <nl> - init ( _ storage : AnyObject ) { <nl> + internal init ( _ storage : AnyObject ) { <nl> _sanityCheck ( <nl> _usesNativeSwiftReferenceCounting ( storage . dynamicType ) , <nl> " HeapBuffer manages only native objects " <nl> struct _HeapBuffer < Value , Element > : Equatable { <nl> self . _storage = Builtin . castToNativeObject ( storage ) <nl> } <nl> <nl> - init < T : AnyObject > ( _ storage : T ? ) { <nl> + internal init < T : AnyObject > ( _ storage : T ? ) { <nl> self = storage . map { _HeapBuffer ( $ 0 ) } ? ? _HeapBuffer ( ) <nl> } <nl> <nl> - init ( nativeStorage : Builtin . NativeObject ? ) { <nl> + internal init ( nativeStorage : Builtin . NativeObject ? ) { <nl> self . _storage = nativeStorage <nl> } <nl> <nl> struct _HeapBuffer < Value , Element > : Equatable { <nl> let alignMask = _HeapBuffer . _requiredAlignMask ( ) <nl> <nl> let object : AnyObject = _swift_bufferAllocate ( <nl> - storageClass , totalSize , alignMask ) <nl> + bufferType : storageClass , <nl> + size : totalSize , <nl> + alignmentMask : alignMask ) <nl> self . _storage = Builtin . castToNativeObject ( object ) <nl> self . _value . initializePointee ( initializer ) <nl> } <nl> <nl> public / / @ testable <nl> - var value : Value { <nl> + var value : Value { <nl> unsafeAddress { <nl> return UnsafePointer ( _value ) <nl> } <nl> struct _HeapBuffer < Value , Element > : Equatable { <nl> } <nl> <nl> / / / ` true ` if storage is non - ` nil ` . <nl> - var hasStorage : Bool { <nl> + internal var hasStorage : Bool { <nl> return _storage ! = nil <nl> } <nl> <nl> - subscript ( i : Int ) - > Element { <nl> + internal subscript ( i : Int ) - > Element { <nl> unsafeAddress { <nl> return UnsafePointer ( baseAddress + i ) <nl> } <nl> struct _HeapBuffer < Value , Element > : Equatable { <nl> } <nl> } <nl> <nl> - var _nativeObject : Builtin . NativeObject { <nl> + internal var _nativeObject : Builtin . NativeObject { <nl> return _storage ! <nl> } <nl> <nl> @ warn_unused_result <nl> - static func fromNativeObject ( x : Builtin . NativeObject ) - > _HeapBuffer { <nl> + internal static func fromNativeObject ( x : Builtin . NativeObject ) - > _HeapBuffer { <nl> return _HeapBuffer ( nativeStorage : x ) <nl> } <nl> <nl> mmm a / stdlib / public / core / ManagedBuffer . swift <nl> ppp b / stdlib / public / core / ManagedBuffer . swift <nl> public struct ManagedBufferPointer < Value , Element > : Equatable { <nl> + minimumCapacity * strideof ( Element . self ) <nl> <nl> let newBuffer : AnyObject = _swift_bufferAllocate ( <nl> - _uncheckedBufferClass , totalSize , _My . _alignmentMask ) <nl> + bufferType : _uncheckedBufferClass , <nl> + size : totalSize , <nl> + alignmentMask : _My . _alignmentMask ) <nl> <nl> self . _nativeBuffer = Builtin . castToNativeObject ( newBuffer ) <nl> } <nl>
stdlib : fix coding style
apple/swift
80fa72ad590455ff95ab3b9da2d4a920aa74e009
2016-02-17T22:50:16Z
mmm a / modules / core / test / ocl / test_arithm . cpp <nl> ppp b / modules / core / test / ocl / test_arithm . cpp <nl> OCL_TEST_P ( UMatDot , Mat ) <nl> OCL_OFF ( const double cpuRes = src1_roi . dot ( src2_roi ) ) ; <nl> OCL_ON ( const double gpuRes = usrc1_roi . dot ( usrc2_roi ) ) ; <nl> <nl> - EXPECT_PRED3 ( relativeError , cpuRes , gpuRes , 1e - 6 ) ; <nl> + EXPECT_PRED3 ( relativeError , cpuRes , gpuRes , 1e - 5 ) ; <nl> } <nl> } <nl> <nl> OCL_TEST_P ( ReduceAvg , Mat ) <nl> OCL_OFF ( cv : : reduce ( src_roi , dst_roi , dim , CV_REDUCE_AVG , dtype ) ) ; <nl> OCL_ON ( cv : : reduce ( usrc_roi , udst_roi , dim , CV_REDUCE_AVG , dtype ) ) ; <nl> <nl> - double eps = ddepth < = CV_32S ? 1 : 5e - 6 ; <nl> + double eps = ddepth < = CV_32S ? 1 : 6e - 6 ; <nl> OCL_EXPECT_MATS_NEAR ( dst , eps ) ; <nl> } <nl> } <nl>
Increase epsilon for OpenCL version UMatDot and ReduceAvg accurency tests
opencv/opencv
20260946913666ca5952bbf9f5152b27dbf3517f
2014-08-02T09:21:34Z
mmm a / tensorflow / python / eager / def_function . py <nl> ppp b / tensorflow / python / eager / def_function . py <nl> def g ( x ) : <nl> l . append ( i ) # Caution ! Doesn ' t work . <nl> ` ` ` <nl> <nl> + Note that unlike other TensorFlow operations , we don ' t convert python <nl> + numerical inputs to tensors . <nl> + <nl> _Referencing ` tf . Variable ` s_ <nl> <nl> The Python function ` func ` may reference stateful objects ( such as <nl>
Minor tf . function documentation update .
tensorflow/tensorflow
5c738693ad5d714eeba413b53a0f037f12fc2c82
2019-02-08T20:06:40Z
mmm a / android / filament - android / src / main / java / com / google / android / filament / Skybox . java <nl> ppp b / android / filament - android / src / main / java / com / google / android / filament / Skybox . java <nl> <nl> <nl> <nl> / * * <nl> - * Use { @ link # Builder } to construct a < code > Skybox < / code > object instance . <nl> + * Use < code > Builder < / code > to construct a < code > Skybox < / code > object instance . <nl> * / <nl> public static class Builder { <nl> @ SuppressWarnings ( { " FieldCanBeLocal " , " UnusedDeclaration " } ) / / Keep to finalize native resources <nl> <nl> private final long mNativeBuilder ; <nl> <nl> / * * <nl> - * Use { @ link # Builder } to construct a < code > Skybox < / code > object instance . <nl> + * Use < code > Builder < / code > to construct a < code > Skybox < / code > object instance . <nl> * / <nl> public Builder ( ) { <nl> mNativeBuilder = nCreateBuilder ( ) ; <nl> mmm a / android / filament - android / src / main / java / com / google / android / filament / Texture . java <nl> ppp b / android / filament - android / src / main / java / com / google / android / filament / Texture . java <nl> public InternalFormat getFormat ( ) { <nl> * @ exception BufferOverflowException if the specified parameters would result in reading <nl> * outside of < code > buffer < / code > . <nl> * <nl> - * @ see Builder # sampler ( Sampler ) <nl> + * @ see Builder # sampler <nl> * @ see PixelBufferDescriptor <nl> * / <nl> public void setImage ( @ NonNull Engine engine , <nl> public void setImage ( @ NonNull Engine engine , <nl> * @ exception BufferOverflowException if the specified parameters would result in reading <nl> * outside of < code > buffer < / code > . <nl> * <nl> - * @ see Builder # sampler ( Sampler ) <nl> + * @ see Builder # sampler <nl> * @ see PixelBufferDescriptor <nl> * / <nl> public void setImage ( @ NonNull Engine engine , <nl> public void setImage ( @ NonNull Engine engine , <nl> * @ exception BufferOverflowException if the specified parameters would result in reading <nl> * outside of < code > buffer < / code > . <nl> * <nl> - * @ see Builder # sampler ( Sampler ) <nl> + * @ see Builder # sampler <nl> * @ see PixelBufferDescriptor <nl> * / <nl> public void setImage ( @ NonNull Engine engine , @ IntRange ( from = 0 ) int level , <nl> public void setImage ( @ NonNull Engine engine , @ IntRange ( from = 0 ) int level , <nl> * < li > < code > kCVPixelFormatType_420YpCbCr8BiPlanarFullRange < / code > < / li > <nl> * < / ul > < / p > <nl> * <nl> - * @ see Builder # sampler ( Sampler ) <nl> + * @ see Builder # sampler <nl> * / <nl> public void setExternalImage ( @ NonNull Engine engine , long eglImage ) { <nl> nSetExternalImage ( getNativeObject ( ) , engine . getNativeObject ( ) , eglImage ) ; <nl> public void setExternalImage ( @ NonNull Engine engine , long eglImage ) { <nl> * { @ link Sampler # SAMPLER_EXTERNAL SAMPLER_EXTERNAL } <nl> * <nl> * @ see Stream <nl> - * @ see Builder # sampler ( Sampler ) <nl> + * @ see Builder # sampler <nl> * <nl> * / <nl> public void setExternalStream ( @ NonNull Engine engine , @ NonNull Stream stream ) { <nl>
Fix javadoc warnings .
google/filament
82e8104539fa7752c86081ae731f22d720a8fc67
2019-10-04T15:47:55Z
mmm a / Documentation / Books / Manual / Deployment / Docker . mdpp <nl> ppp b / Documentation / Books / Manual / Deployment / Docker . mdpp <nl> <nl> <nl> ! SUBSECTION Networking <nl> <nl> - A bit of extra care has to be invested in how docker isolates its network . By default it fully isolates the network and by doing so an endpoint like ` - - server . endpoint tcp : / / 0 . 0 . 0 . 0 : 8529 ` will only bind to all interfaces inside the docker container which does not include any external interface on the host machine . This may be sufficient if you just want to access it locally but in case you want to expose it to the outside you must facilitate dockers port forwarding using the ` - p ` command line option . Be sure to check the [ official docker documentation ] ( https : / / docs . docker . com / engine / reference / run / ) . <nl> + A bit of extra care has to be invested in how Docker isolates its network . By default it fully isolates the network and by doing so an endpoint like ` - - server . endpoint tcp : / / 0 . 0 . 0 . 0 : 8529 ` will only bind to all interfaces inside the Docker container which does not include any external interface on the host machine . This may be sufficient if you just want to access it locally but in case you want to expose it to the outside you must facilitate Dockers port forwarding using the ` - p ` command line option . Be sure to check the [ official Docker documentation ] ( https : / / docs . docker . com / engine / reference / run / ) . <nl> <nl> To simply make arangodb available on all host interfaces on port 8529 : <nl> <nl> ` docker run - p 8529 : 8529 - e ARANGO_NO_AUTH = 1 arangodb ` <nl> <nl> - Another possibility is to start docker via network mode ` host ` . This is possible but generally not recommended . To do it anyway check the docker documentation for details . <nl> + Another possibility is to start Docker via network mode ` host ` . This is possible but generally not recommended . To do it anyway check the Docker documentation for details . <nl> <nl> ! SUBSUBSECTION Docker and Cluster tasks <nl> <nl> - To start the cluster via docker is basically the same as starting [ locally ] ( Local . md ) . However just like with the single networking image we will face networking issues . You can simply use the ` - p ` flag to make the individual task available on the host machine or you could use dockers [ links ] ( https : / / docs . docker . com / engine / reference / run / ) to enable task intercommunication . <nl> + To start the cluster via Docker is basically the same as starting [ locally ] ( Local . md ) . However just like with the single networking image we will face networking issues . You can simply use the ` - p ` flag to make the individual task available on the host machine or you could use Docker ' s [ links ] ( https : / / docs . docker . com / engine / reference / run / ) to enable task intercommunication . <nl> <nl> Please note that there are some flags that specify how ArangoDB can reach a task from the outside . These are very important and built for this exact usecase . An example configuration might look like this : <nl> <nl> Please note that there are some flags that specify how ArangoDB can reach a task <nl> docker run - e ARANGO_NO_AUTH = 1 - p 192 . 168 . 1 . 1 : 10000 : 8529 arangodb arangod - - server . endpoint tcp : / / 0 . 0 . 0 . 0 : 8529 - - cluster . my - address tcp : / / 192 . 168 . 1 . 1 : 10000 - - cluster . my - local - info db1 - - cluster . my - role PRIMARY - - cluster . agency - endpoint tcp : / / 192 . 168 . 1 . 1 : 5001 <nl> ` ` ` <nl> <nl> - This will start a primary DB server within a docker container with an isolated network . Within the docker container it will bind on all interfaces ( this will be 127 . 0 . 0 . 1 : 8529 and some internal docker ip on port 8529 ) . By supplying ` - p 192 . 168 . 1 . 1 : 10000 : 8529 ` we are establishing a port forwarding from our local IP ( 192 . 168 . 1 . 1 in this example ) to port 8529 inside the container . Within the command we are telling arangod how it can be reached from the outside ` - - cluster . my - address tcp : / / 192 . 168 . 1 . 1 : 10000 ` . This information will be forwarded to the agency so that the other tasks in your cluster can see how this particular DBServer may be reached . <nl> \ No newline at end of file <nl> + This will start a primary DB server within a Docker container with an isolated network . Within the Docker container it will bind on all interfaces ( this will be 127 . 0 . 0 . 1 : 8529 and some internal Docker ip on port 8529 ) . By supplying ` - p 192 . 168 . 1 . 1 : 10000 : 8529 ` we are establishing a port forwarding from our local IP ( 192 . 168 . 1 . 1 in this example ) to port 8529 inside the container . Within the command we are telling arangod how it can be reached from the outside ` - - cluster . my - address tcp : / / 192 . 168 . 1 . 1 : 10000 ` . This information will be forwarded to the agency so that the other tasks in your cluster can see how this particular DBServer may be reached . <nl> mmm a / Documentation / Books / Manual / Deployment / Local . mdpp <nl> ppp b / Documentation / Books / Manual / Deployment / Local . mdpp <nl> <nl> ! SECTION Launching a local ArangoDB cluster for testing <nl> <nl> - An ArangoDB cluster consists of several running tasks which form the cluster . ArangoDB itself won ' t start or monitor any of these tasks . So it will need some kind of supervisor which is monitoring and starting these tasks . For production usage we recommend using Mesos as the cluster supervisor . <nl> + An ArangoDB cluster consists of several running tasks which form the cluster . ArangoDB itself won ' t start or monitor any of these tasks . So it will need some kind of supervisor which is monitoring and starting these tasks . For production usage we recommend using Apache Mesos as the cluster supervisor . <nl> <nl> However starting a cluster locally is possible and a very easy method to get a first impression of what an ArangoDB cluster looks like . <nl> <nl> - The easiest way to start a local cluster for testing purposes is to run ` scripts / startLocalCluster . sh ` . This will start 1 Agency , 2 DBServers and 1 Coordinator . To stop the cluster issue ` scripts / stopLocalCluster . sh ` . <nl> + The easiest way to start a local cluster for testing purposes is to run ` scripts / startLocalCluster . sh ` from a clone of the [ source repository ] ( https : / / github . com / ArangoDB / ArangoDB ) after compiling ArangoDB from source ( see instructions in the file ` README_maintainers . md ` in the repository . This will start 1 Agency , 2 DBServers and 1 Coordinator . To stop the cluster issue ` scripts / stopLocalCluster . sh ` . <nl> <nl> - This section will now discuss the required parameters for every role in an ArangoDB cluster . Be sure to read the [ Architecture ] ( . . / Scalability / README . md ) documentation to get a basic understanding of the underlying architecture and the involved roles in an ArangoDB cluster . <nl> + This section will discuss the required parameters for every role in an ArangoDB cluster . Be sure to read the [ Architecture ] ( . . / Scalability / README . md ) documentation to get a basic understanding of the underlying architecture and the involved roles in an ArangoDB cluster . <nl> <nl> In the following sections we will go through the relevant options per role . <nl> <nl> ! SUBSECTION Agency <nl> <nl> - The bare minimum to start an agency is to provide the id . The id has to be ` 0 ` for a single instance . <nl> + The bare minimum to start an agent is to provide the id . The id has to be ` 0 ` for a single instance . <nl> <nl> - To start up the agency in its fault tolerant mode set the ` - - agency . size ` to ` 3 ` and start the agents with increasing ids starting from ` 0 ` . Furthermore you should provide different ` - - server . endpoint ` to every agent . One of the agents must also have ` - - agency . notify true ` to bootstrap the agency . This agent must also provide a list of all endpoints via ` - - agency . endpoint ` . <nl> + To start up the agency in its fault tolerant mode set the ` - - agency . size ` to ` 3 ` and start the agents with increasing ids starting from ` 0 ` . Furthermore you should provide different ` - - server . endpoint ` values to every agent . One of the agents must also have ` - - agency . notify true ` to bootstrap the agency . This agent must also get a list of all endpoints via ` - - agency . endpoint ` . <nl> <nl> - So in summary this might be what your startup might look like : <nl> + So in summary this is what your startup might look like : <nl> <nl> ` ` ` <nl> build / bin / arangod - - server . endpoint tcp : / / 127 . 0 . 0 . 1 : 5001 - - server . authentication false - - agency . id 0 - - agency . size 3 agency1 & <nl> build / bin / arangod - - server . endpoint tcp : / / 127 . 0 . 0 . 1 : 5003 - - server . authentication <nl> <nl> ! SUBSECTION Coordinators and DBServers <nl> <nl> - These two roles share a common set of relevant options . First you should specify the role using ` - - cluster . my - role ` . This can either be ` PRIMARY ` ( a database server ) or ` COORDINATOR ` . Both also need some unique information with which they will register in the agency . This could for example be some combination of ` " $ HOSTNAME_ $ PORT " ` or whatever you have at hand . However it must be unique and provided as ` - - cluster . my - local - info ` . Furthermore provide the external address of the task via ` - - cluster . my - address ` . <nl> + These two roles share a common set of relevant options . First you should specify the role using ` - - cluster . my - role ` . This can either be ` PRIMARY ` ( a database server ) or ` COORDINATOR ` . Both also need some unique information with which they will register in the agency . This could for example be some combination of host name and port or whatever you have at hand . However it must be unique for each instance and be provided as value for ` - - cluster . my - local - info ` . Furthermore provide the external endpoint ( IP and port ) of the task via ` - - cluster . my - address ` . <nl> <nl> The following is a full - example of what it might look like : <nl> <nl> Upon registering with the agency during startup the cluster will assign an ID to <nl> <nl> Secondaries need a bit more work . Secondaries need to have some primary assigned . To do that there is a special route . To register a Secondary you must first find out the Server - ID of the primary server . Then generate your own ID for the secondary ( put it into " newSecondary " ) you are about to start and call one of the coordinators like this : <nl> <nl> - ` curl - f - X PUT - - data " { \ " primary \ " : \ " DBServer1 \ " , \ " oldSecondary \ " : \ " none \ " , \ " newSecondary \ " : \ " Secondary1 \ " } " - H " Content - Type : application / json " : 8530 / _admin / cluster / replaceSecondary ` <nl> + curl - f - X PUT - - data ' { " primary " : " DBServer1 " , " oldSecondary " : " none " , " newSecondary " : " Secondary1 " } ' - H " Content - Type : application / json " http : / / localhost : 8530 / _admin / cluster / replaceSecondary <nl> <nl> If that call was successful you can start the secondary . Instead of providing ` - - cluster . my - local - info ` you should now provide the Id in the curl call above via ` - - cluster . my - id ` . You can omit the ` - - cluster . my - role ` in this case . The secondary will find out from the agency about its role . <nl> <nl> To sum it up : <nl> <nl> - ` ` ` <nl> - curl - f - X PUT - - data " { \ " primary \ " : \ " DBServer1 \ " , \ " oldSecondary \ " : \ " none \ " , \ " newSecondary \ " : \ " Secondary1 \ " } " - H " Content - Type : application / json " : 8530 / _admin / cluster / replaceSecondary & & arangod - - server . authentication = false - - server . endpoint tcp : / / 0 . 0 . 0 . 0 : 8629 - - cluster . my - id Secondary1 - - cluster . agency - endpoint tcp : / / 127 . 0 . 0 . 1 : 5001 secondary1 & <nl> - curl - f - X PUT - - data " { \ " primary \ " : \ " DBServer2 \ " , \ " oldSecondary \ " : \ " none \ " , \ " newSecondary \ " : \ " Secondary2 \ " } " - H " Content - Type : application / json " : 8530 / _admin / cluster / replaceSecondary & & arangod - - server . authentication = false - - server . endpoint tcp : / / 0 . 0 . 0 . 0 : 8630 - - cluster . my - id Secondary2 - - cluster . agency - endpoint tcp : / / 127 . 0 . 0 . 1 : 5001 secondary2 & <nl> - ` ` ` <nl> \ No newline at end of file <nl> + curl - f - X PUT - - data ' { " primary " : " DBServer1 " , " oldSecondary " : " none " , " newSecondary " : " Secondary1 " } ' - H " Content - Type : application / json " http : / / localhost : 8530 / _admin / cluster / replaceSecondary & & arangod - - server . authentication = false - - server . endpoint tcp : / / 0 . 0 . 0 . 0 : 8629 - - cluster . my - id Secondary1 - - cluster . agency - endpoint tcp : / / 127 . 0 . 0 . 1 : 5001 secondary1 & <nl> + curl - f - X PUT - - data ' { " primary " : " DBServer2 " , " oldSecondary " : " none " , " newSecondary " : " Secondary2 " } ' - H " Content - Type : application / json " http : / / localhost : 8530 / _admin / cluster / replaceSecondary & & arangod - - server . authentication = false - - server . endpoint tcp : / / 0 . 0 . 0 . 0 : 8630 - - cluster . my - id Secondary2 - - cluster . agency - endpoint tcp : / / 127 . 0 . 0 . 1 : 5001 secondary2 & <nl> mmm a / Documentation / Books / Manual / Deployment / Mesos . mdpp <nl> ppp b / Documentation / Books / Manual / Deployment / Mesos . mdpp <nl> <nl> ! SECTION Distributed deployment using Apache Mesos <nl> <nl> - ArangoDB has a sophisticated yet easy to use cluster mode . To leverage the full cluster feature set ( monitoring , scaling and failover ) you have to run ArangoDB on some kind of cluster management system . Currently ArangoDB relies on Apache Mesos in that matter . Mesos is a cluster operating system which powers some of the worlds biggest datacenters running several thousands of nodes . <nl> + ArangoDB has a sophisticated and yet easy way to use cluster mode . To leverage the full cluster feature set ( monitoring , scaling , automatic failover and automatic replacement of failed nodes ) you have to run ArangoDB on some kind of cluster management system . Currently ArangoDB relies on Apache Mesos in that matter . Mesos is a cluster operating system which powers some of the worlds biggest datacenters running several thousands of nodes . <nl> <nl> ! SUBSECTION DC / OS <nl> <nl> DC / OS comes with its own package management . Packages can be installed from the <nl> 4 . Locate arangodb and hit " Install Package " <nl> 5 . Press " Install Package " <nl> <nl> - 2 . Installing via DC / OS Commandline <nl> + 2 . Installing via the DC / OS command line <nl> <nl> 1 . Install the [ dcos cli ] ( https : / / docs . mesosphere . com / usage / cli / ) <nl> 2 . Open a terminal and issue ` dcos install arangodb ` <nl> <nl> Both options are essentially doing the same in the background . Both are starting ArangoDB with its default options set . <nl> <nl> - To review the default options using the webinterface simply click " Advanced Installation " in the webinterface . There you will find a list of options including some explanation . <nl> + To review the default options using the web interface simply click " Advanced Installation " in the web interface . There you will find a list of options including some explanation . <nl> <nl> To review the default options using the CLI first type ` dcos package describe - - config arangodb ` . This will give you a flat list of default settings . <nl> <nl> - To get an explanation of the various command line options please check the latest options here ( choose the most recent number and have a look at config . json ) : <nl> + To get an explanation of the various command line options please check the latest options here ( choose the most recent number and have a look at ` config . json ` ) : <nl> <nl> https : / / github . com / mesosphere / universe / tree / version - 3 . x / repo / packages / A / arangodb <nl> <nl> - After installing DC / OS will start deploying the ArangoDB cluster on the DC / OS cluster . You can watch ArangoDB starting on the " Services " tab in the webinterface . Once it is listed as healthy click the link next to it and you should see the ArangoDB webinterface . <nl> + After installation DC / OS will start deploying the ArangoDB cluster on the DC / OS cluster . You can watch ArangoDB starting on the " Services " tab in the web interface . Once it is listed as healthy click the link next to it and you should see the ArangoDB web interface . <nl> <nl> ! SUBSUBSECTION ArangoDB Mesos framework <nl> <nl> - As soon as ArangoDB was deployed Mesos will keep your cluster running . The webinterface has many monitoring facilities so be sure to make yourself familiar with the DC / OS webinterface . As a fault tolerant system Mesos will take care of most failure scenarios automatically . Mesos does that by running ArangoDB as a so called " framework " . This framework has been specifically built to keep ArangoDB running in a healthy condition on the Mesos cluster . From time to time a task might fail . The ArangoDB framework will then take care of rescheduling the failed task . As it knows about the very specifics of each cluster task and its role it will automatically take care of most failure scenarios . <nl> + As soon as ArangoDB was deployed Mesos will keep your cluster running . The web interface has many monitoring facilities so be sure to make yourself familiar with the DC / OS web interface . As a fault tolerant system Mesos will take care of most failure scenarios automatically . Mesos does that by running ArangoDB as a so called " framework " . This framework has been specifically built to keep ArangoDB running in a healthy condition on the Mesos cluster . From time to time a task might fail . The ArangoDB framework will then take care of rescheduling the failed task . As it knows about the very specifics of each cluster task and its role it will automatically take care of most failure scenarios . <nl> <nl> - To inspect what the framework is doing go to ` http : / / webinterface - url / mesos ` in your browser . Locate the task " arangodb " and inspect stderr in the " Sandbox " . This can be of interest for example when a slave got lost and the framework is rescheduling the task . <nl> + To inspect what the framework is doing go to ` http : / / web - interface - url / mesos ` in your browser . Locate the task " arangodb " and inspect stderr in the " Sandbox " . This can be of interest for example when a slave got lost and the framework is rescheduling the task . <nl> <nl> ! SUBSUBSECTION Using ArangoDB <nl> <nl> To change the settings of your ArangoDB Cluster access the ArangoDB UI and hit " <nl> <nl> After changing the settings the ArangoDB framework will take care of the rest . Scaling your cluster up is generally a straightforward operation as Mesos will simply launch another task and be done with it . Scaling down is a bit more complicated as the data first has to be moved to some other place so that will naturally take somewhat longer . <nl> <nl> - Please note that scaling operations might not always work . For example if the underlying Mesos cluster is completely saturated with its running tasks scaling up will not be possible . Scaling down might also fail due to not being able to move all shards of a DBServer to a new destination because of size limitations . Be sure to check the output of the ArangoDB framework . <nl> + Please note that scaling operations might not always work . For example if the underlying Mesos cluster is completely saturated with its running tasks scaling up will not be possible . Scaling down might also fail due to the cluster not being able to move all shards of a DBServer to a new destination because of size limitations . Be sure to check the output of the ArangoDB framework . <nl> <nl> ! SUBSUBSECTION Deinstallation <nl> <nl> Should you forget to cleanup the state you can do so later by using the [ arangod <nl> <nl> The cleanup framework will announce itself as a normal ArangoDB . Mesos will recognize this and offer all persistent volumes it still has for ArangoDB to this framework . The cleanup framework will then properly free the persistent volumes . Finally it will clean up any state left in zookeeper ( the central configuration manager in a Mesos cluster ) . <nl> <nl> - To deploy the cleanup framework , follow the instructions in the github repository . After deploying watch the output in the sandbox of the Mesos webinterface . After a while there shouldn ' t be any persistent resource offers anymore as everything was cleaned up . After that you can delete the cleanup framework again via marathon . <nl> + To deploy the cleanup framework , follow the instructions in the github repository . After deployment watch the output in the sandbox of the Mesos web interface . After a while there shouldn ' t be any persistent resource offers anymore as everything was cleaned up . After that you can delete the cleanup framework again via Marathon . <nl> <nl> ! SUBSECTION Apache Mesos and Marathon <nl> <nl> - You can also install ArangoDB on a bare Apache Mesos cluster provided that marathon is running on it . <nl> + You can also install ArangoDB on a bare Apache Mesos cluster provided that Marathon is running on it . <nl> <nl> Doing so has the following downsides : <nl> <nl> 1 . Manual Mesos cluster setup <nl> 1 . You need to implement your own service discovery <nl> 1 . You are missing the dcos cli <nl> - 1 . Install and deinstall are tedious <nl> - 1 . You need to setup some kind of proxy tunnel to access arangodb from the outside <nl> + 1 . Installation and deinstallation are tedious <nl> + 1 . You need to setup some kind of proxy tunnel to access ArangoDB from the outside <nl> 1 . Sparse monitoring capabilities <nl> <nl> However these are things which do not influence ArangoDB itself and operating your cluster like this is fully supported . <nl> To install ArangoDB via marathon you need a proper config file : <nl> } <nl> ` ` ` <nl> <nl> - Carefully review the settings ( especially the IPs and the resources ) . Then you can deploy to marathon : <nl> + Carefully review the settings ( especially the IPs and the resources ) . Then you can deploy to Marathon : <nl> <nl> ` curl - X POST - H " Content - Type : application / json " http : / / url - of - marathon / v2 / apps - d @ arangodb3 . json ` <nl> <nl> - Alternatively use the webinterface of marathon to deploy ArangoDB . <nl> + Alternatively use the web interface of Marathon to deploy ArangoDB . <nl> <nl> ! SUBSUBSECTION Deinstallation via Marathon <nl> <nl> As with DC / OS you first need to properly cleanup any state leftovers . <nl> <nl> - The easiest is to simply delete arangodb and then deploy the cleanup - framework ( see section arangodb - cleanup - framework ) . <nl> + The easiest is to simply delete ArangoDB and then deploy the cleanup - framework ( see section arangodb - cleanup - framework ) . <nl> <nl> ! SUBSECTION Configuration options <nl> <nl> - The Arangodb Mesos framework has a ton of different options which are listed and described here : https : / / github . com / arangodb / arangodb - mesos - framework / tree / 3 . 0 <nl> \ No newline at end of file <nl> + The Arangodb Mesos framework has a ton of different options which are listed and described here : https : / / github . com / arangodb / arangodb - mesos - framework / tree / 3 . 0 <nl> mmm a / Documentation / Books / Manual / Deployment / Single . mdpp <nl> ppp b / Documentation / Books / Manual / Deployment / Single . mdpp <nl> Please note that ArangoDB will only work on 64bit . <nl> <nl> ! SUBSECTION Docker <nl> <nl> - The most simple way to deploy ArangoDB is using [ Docker ] ( https : / / docker . io / ) . To get a general understanding of docker have a look at their excellent documentation . <nl> + The simplest way to deploy ArangoDB is using [ Docker ] ( https : / / docker . io / ) . To get a general understanding of Docker have a look at [ their excellent documentation ] ( https : / / docs . docker . com / ) . <nl> <nl> ! SUBSUBSECTION Authentication <nl> <nl> - To start the official docker container you will have to decide on an authentication method . Otherwise the container won ' t start . <nl> + To start the official Docker container you will have to decide on an authentication method . Otherwise the container won ' t start . <nl> <nl> - Provide one of the arguments to docker as an environment variable . <nl> + Provide one of the arguments to Docker as an environment variable . <nl> <nl> There are three options : <nl> <nl> 1 . ARANGO_NO_AUTH = 1 <nl> <nl> - Disable authentication completely . Useful for local testing or for operating in a trusted network ( without a public interface ) . <nl> + Disable authentication completely . Useful for local testing or for operating in a trusted network ( without a public interface ) . <nl> <nl> 2 . ARANGO_ROOT_PASSWORD = password <nl> <nl> - Start ArangoDB with the given password for root <nl> + Start ArangoDB with the given password for root <nl> <nl> 3 . ARANGO_RANDOM_ROOT_PASSWORD = 1 <nl> <nl> - Let ArangoDB generate a random root password <nl> + Let ArangoDB generate a random root password <nl> <nl> - To get quickly going : <nl> + To get going quickly : <nl> <nl> ` docker run - e ARANGO_RANDOM_ROOT_PASSWORD = 1 arangodb ` <nl> <nl> - For an in depth guide about docker and arangodb please check the official documentation : https : / / hub . docker . com / r / _ / arangodb / <nl> \ No newline at end of file <nl> + For an in depth guide about Docker and ArangoDB please check the official documentation : https : / / hub . docker . com / r / _ / arangodb / <nl>
Minor corrections of typos in Deployment chapter .
arangodb/arangodb
2762d4704d27c78d2e6d97d3411fdd7036a4b409
2016-06-17T05:20:30Z
mmm a / system / lib / libc . symbols <nl> ppp b / system / lib / libc . symbols <nl> <nl> U getgrent <nl> T getgrouplist <nl> T getgroups <nl> - T gethostbyname <nl> - T gethostbyname2 <nl> - T gethostbyname2_r <nl> - T gethostbyname_r <nl> T gethostent <nl> T gethostid <nl> T gethostname <nl> mmm a / tools / system_libs . py <nl> ppp b / tools / system_libs . py <nl> def create_libc ( libname ) : <nl> ' dlmalloc . c ' , <nl> ] <nl> musl_srcdir = shared . path_from_root ( ' system ' , ' lib ' , ' libc ' , ' musl ' , ' src ' ) <nl> - blacklist = set ( [ ' ipc ' , ' passwd ' , ' thread ' , ' signal ' , ' sched ' , ' ipc ' , ' time ' , ' linux ' , ' aio ' , ' exit ' , ' legacy ' , ' mq ' , ' process ' , ' search ' , ' setjmp ' , ' env ' , ' ldso ' , ' conf ' ] + [ ' memcpy . c ' , ' memset . c ' , ' memmove . c ' , ' getaddrinfo . c ' , ' getnameinfo . c ' , ' inet_addr . c ' , ' res_query . c ' , ' gai_strerror . c ' , ' proto . c ' , ' gethostbyaddr . c ' , ' gethostbyaddr_r . c ' ] ) <nl> + blacklist = set ( [ ' ipc ' , ' passwd ' , ' thread ' , ' signal ' , ' sched ' , ' ipc ' , ' time ' , ' linux ' , ' aio ' , ' exit ' , ' legacy ' , ' mq ' , ' process ' , ' search ' , ' setjmp ' , ' env ' , ' ldso ' , ' conf ' ] + [ ' memcpy . c ' , ' memset . c ' , ' memmove . c ' , ' getaddrinfo . c ' , ' getnameinfo . c ' , ' inet_addr . c ' , ' res_query . c ' , ' gai_strerror . c ' , ' proto . c ' , ' gethostbyaddr . c ' , ' gethostbyaddr_r . c ' , ' gethostbyname . c ' , ' gethostbyname2_r . c ' , ' gethostbyname_r . c ' , ' gethostbyname2 . c ' ] ) <nl> # TODO : decide which math ( abs , cos , log , pow , etc . ) methods to use from Math , and which to keep from musl <nl> for dirpath , dirnames , filenames in os . walk ( musl_srcdir ) : <nl> for f in filenames : <nl>
don ' t build musl gethost * methods
emscripten-core/emscripten
e9cad3e446875729b23ed954eef48885de287559
2015-06-09T21:08:14Z
new file mode 100755 <nl> index 000000000000 . . 33705148501d <nl> mmm / dev / null <nl> ppp b / utils / analyzeDylibSize . py <nl> <nl> + # ! / usr / bin / env python <nl> + import subprocess <nl> + import sys <nl> + <nl> + if len ( sys . argv ) < 2 : <nl> + print " Usage : % s file . dylib " % sys . argv [ 0 ] <nl> + sys . exit ( 0 ) <nl> + <nl> + class BinaryFunction : <nl> + " " " <nl> + This class represents a disassembled binary function . <nl> + " " " <nl> + def __init__ ( self , symbolName ) : <nl> + self . name = symbolName <nl> + self . start = None <nl> + self . end = None <nl> + <nl> + def addLine ( self , opcodeLine ) : <nl> + " " " <nl> + Parse a single disassembly line that is not the header . <nl> + Example input : <nl> + 0000000000002b0a callq 0x1aef4c <nl> + " " " <nl> + # Ignore the second part of the multi - line instruction . <nl> + if not opcodeLine . startswith ( " 000 " ) : return <nl> + # Normalize the spaces in the line . <nl> + opcodeLine = opcodeLine . replace ( " \ t " , " " ) <nl> + # Find the address of the instruction . <nl> + addr = int ( opcodeLine . split ( " " ) [ 0 ] , 16 ) <nl> + # Initialize the start address if we haven ' t done this before . <nl> + if not self . start : self . start = addr <nl> + # notice that we don ' t have the size of the last instruction <nl> + # and assume it is zero . <nl> + self . end = addr <nl> + <nl> + def getSize ( self ) : <nl> + " " " <nl> + Return the size of the function in the binary . <nl> + " " " <nl> + if not self . end : return 0 <nl> + return self . end - self . start <nl> + <nl> + def __str__ ( self ) : return self . name + " , " + str ( self . getSize ( ) ) <nl> + <nl> + # Disassemble the dylib . <nl> + content = subprocess . check_output ( [ " otool " , " - v " , " - t " , sys . argv [ 1 ] ] ) . split ( " \ n " ) <nl> + <nl> + # parse the disassembled test : <nl> + Funcs = [ ] <nl> + CurrF = None <nl> + for line in content : <nl> + # Parse the function header . <nl> + if line . endswith ( " : " ) : <nl> + CurrF = BinaryFunction ( line [ : - 1 ] ) <nl> + Funcs . append ( CurrF ) <nl> + continue <nl> + # Parse the instructions . <nl> + CurrF . addLine ( line ) <nl> + <nl> + Prefix = { <nl> + # Cpp <nl> + " __Z " : " CPP " , <nl> + " _swift " : " CPP " , <nl> + " __swift " : " CPP " , <nl> + <nl> + # Objective - C <nl> + " + [ " : " CPP " , <nl> + " - [ " : " CPP " , <nl> + <nl> + # Swift <nl> + " __TP " : " Partial Apply " , <nl> + " __TTW " : " Protocol Witness " , <nl> + " __Tw " : " Value Witness " , <nl> + " __TM " : " Type Metadata " , <nl> + " __TF " : " Swift Function " , <nl> + " __TTS " : " Specialization " , <nl> + <nl> + # Default <nl> + " " : " Unknown " , <nl> + } <nl> + <nl> + # initialize an empty score board for the prefixes . <nl> + ScoreBoard = { } <nl> + for key in Prefix : <nl> + ScoreBoard [ Prefix [ key ] ] = ( 0 , 0 ) <nl> + <nl> + # Sort the functions by size ( in case we want to print them ) <nl> + SortedFunctions = sorted ( Funcs , key = lambda x : x . getSize ( ) ) <nl> + <nl> + # Calculate the size of each kind of function . <nl> + for BinFunc in SortedFunctions : <nl> + for prefix in reversed ( sorted ( Prefix ) ) : <nl> + if BinFunc . name . startswith ( prefix ) : <nl> + count , size = ScoreBoard [ Prefix [ prefix ] ] <nl> + ScoreBoard [ Prefix [ prefix ] ] = ( count + 1 , size + BinFunc . getSize ( ) ) <nl> + break <nl> + <nl> + # Print the different prefixes . <nl> + print " Function Kind , Count , Size , " <nl> + for Entry in reversed ( sorted ( ScoreBoard ) ) : <nl> + print Entry , " , " , ScoreBoard [ Entry ] [ 0 ] , " , " , ScoreBoard [ Entry ] [ 1 ] , " , " <nl> + <nl> + <nl>
Add a script to analyze the content of the swift dylib .
apple/swift
4089886f3004b89742d3217b1188dd7d84d760cf
2015-02-04T21:34:55Z
mmm a / include / swift / AST / DiagnosticsSIL . def <nl> ppp b / include / swift / AST / DiagnosticsSIL . def <nl> ERROR ( inout_argument_alias , none , <nl> NOTE ( previous_inout_alias , none , <nl> " previous aliasing argument " , ( ) ) <nl> <nl> - ERROR ( exclusivity_access_required , none , <nl> - " % select { initialization | read | modification | deinitialization } 0 requires " <nl> - " % select { exclusive | shared } 1 access " , ( unsigned , unsigned ) ) <nl> + / / This is temporarily a warning during staging to make it easier to evaluate . <nl> + / / The intent is to change it to an error before turning it on by default . <nl> + WARNING ( exclusivity_access_required , none , <nl> + " % select { initialization | read | modification | deinitialization } 0 requires " <nl> + " % select { exclusive | shared } 1 access " , ( unsigned , unsigned ) ) <nl> NOTE ( exclusivity_conflicting_access , none , <nl> " conflicting % select { initialization | read | modification | deinitialization } 0 " <nl> " requires % select { exclusive | shared } 1 access " , ( unsigned , unsigned ) ) <nl> mmm a / test / SILOptimizer / exclusivity_static_diagnostics . sil <nl> ppp b / test / SILOptimizer / exclusivity_static_diagnostics . sil <nl> bb0 ( % 0 : $ Int ) : <nl> store % 0 to [ trivial ] % 3 : $ * Int <nl> % 4 = function_ref @ takesTwoInouts : $ @ convention ( thin ) ( @ inout Int , @ inout Int ) - > ( ) <nl> % 5 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - note { { conflicting modification requires exclusive access } } <nl> - % 6 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - error { { modification requires exclusive access } } <nl> + % 6 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - warning { { modification requires exclusive access } } <nl> % 7 = apply % 4 ( % 5 , % 6 ) : $ @ convention ( thin ) ( @ inout Int , @ inout Int ) - > ( ) <nl> end_access % 6 : $ * Int <nl> end_access % 5 : $ * Int <nl> bb0 ( % 0 : $ Int ) : <nl> % 4 = function_ref @ takesTwoInouts : $ @ convention ( thin ) ( @ inout Int , @ inout Int ) - > ( ) <nl> % 5 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - note { { conflicting modification requires exclusive access } } <nl> % 6 = begin_access [ modify ] [ unknown ] % 5 : $ * Int <nl> - % 7 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - error { { modification requires exclusive access } } <nl> + % 7 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - warning { { modification requires exclusive access } } <nl> % 8 = apply % 4 ( % 5 , % 6 ) : $ @ convention ( thin ) ( @ inout Int , @ inout Int ) - > ( ) <nl> end_access % 7 : $ * Int <nl> end_access % 6 : $ * Int <nl> bb0 ( % 0 : $ Int , % 1 : $ Builtin . Int1 ) : <nl> bb1 : <nl> / / Make sure we don ' t diagnose twice . <nl> % 4 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - note { { conflicting modification requires exclusive access } } <nl> - % 5 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - error { { modification requires exclusive access } } <nl> + % 5 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - warning { { modification requires exclusive access } } <nl> end_access % 5 : $ * Int <nl> end_access % 4 : $ * Int <nl> cond_br % 1 , bb1 , bb2 <nl> bb0 ( % 0 : $ Int ) : <nl> % 2 = project_box % 1 : $ { var Int } , 0 <nl> store % 0 to [ trivial ] % 2 : $ * Int <nl> % 4 = begin_access [ read ] [ unknown ] % 2 : $ * Int / / expected - note { { conflicting read requires shared access } } <nl> - % 5 = begin_access [ modify ] [ unknown ] % 2 : $ * Int / / expected - error { { modification requires exclusive access } } <nl> + % 5 = begin_access [ modify ] [ unknown ] % 2 : $ * Int / / expected - warning { { modification requires exclusive access } } <nl> end_access % 5 : $ * Int <nl> end_access % 4 : $ * Int <nl> destroy_value % 1 : $ { var Int } <nl> bb0 ( % 0 : $ Int ) : <nl> % 2 = project_box % 1 : $ { var Int } , 0 <nl> store % 0 to [ trivial ] % 2 : $ * Int <nl> % 4 = begin_access [ modify ] [ unknown ] % 2 : $ * Int / / expected - note { { conflicting modification requires exclusive access } } <nl> - % 5 = begin_access [ read ] [ unknown ] % 2 : $ * Int / / expected - error { { read requires shared access } } <nl> + % 5 = begin_access [ read ] [ unknown ] % 2 : $ * Int / / expected - warning { { read requires shared access } } <nl> end_access % 5 : $ * Int <nl> end_access % 4 : $ * Int <nl> destroy_value % 1 : $ { var Int } <nl> bb0 ( % 0 : $ Int ) : <nl> % 4 = copy_value % 2 : $ { var Int } <nl> % 5 = project_box % 4 : $ { var Int } , 0 <nl> % 6 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - note { { conflicting modification requires exclusive access } } <nl> - % 7 = begin_access [ modify ] [ unknown ] % 5 : $ * Int / / expected - error { { modification requires exclusive access } } <nl> + % 7 = begin_access [ modify ] [ unknown ] % 5 : $ * Int / / expected - warning { { modification requires exclusive access } } <nl> end_access % 7 : $ * Int <nl> end_access % 6 : $ * Int <nl> destroy_value % 2 : $ { var Int } <nl> bb0 ( % 0 : $ Int ) : <nl> % 1 = global_addr @ global1 : $ * Int <nl> % 2 = global_addr @ global1 : $ * Int <nl> % 3 = begin_access [ modify ] [ unknown ] % 1 : $ * Int / / expected - note { { conflicting modification requires exclusive access } } <nl> - % 4 = begin_access [ modify ] [ unknown ] % 2 : $ * Int / / expected - error { { modification requires exclusive access } } <nl> + % 4 = begin_access [ modify ] [ unknown ] % 2 : $ * Int / / expected - warning { { modification requires exclusive access } } <nl> end_access % 4 : $ * Int <nl> end_access % 3 : $ * Int <nl> % 5 = tuple ( ) <nl> bb0 ( % 0 : $ Int ) : <nl> store % 0 to [ trivial ] % 3 : $ * Int <nl> % 4 = function_ref @ takesTwoInouts : $ @ convention ( thin ) ( @ inout Int , @ inout Int ) - > ( ) <nl> % 5 = begin_access [ read ] [ unknown ] % 3 : $ * Int / / expected - note { { conflicting read requires shared access } } <nl> - % 6 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - error { { modification requires exclusive access } } <nl> + % 6 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - warning { { modification requires exclusive access } } <nl> % 7 = begin_access [ read ] [ unknown ] % 3 : $ * Int / / no - error <nl> % 8 = apply % 4 ( % 5 , % 6 ) : $ @ convention ( thin ) ( @ inout Int , @ inout Int ) - > ( ) <nl> end_access % 7 : $ * Int <nl> bb0 ( % 0 : $ Int ) : <nl> store % 0 to [ trivial ] % 3 : $ * Int <nl> % 4 = function_ref @ takesTwoInouts : $ @ convention ( thin ) ( @ inout Int , @ inout Int ) - > ( ) <nl> % 5 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - note { { conflicting modification requires exclusive access } } <nl> - % 6 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - error { { modification requires exclusive access } } <nl> + % 6 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - warning { { modification requires exclusive access } } <nl> % 7 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / no - error <nl> % 8 = apply % 4 ( % 5 , % 6 ) : $ @ convention ( thin ) ( @ inout Int , @ inout Int ) - > ( ) <nl> end_access % 7 : $ * Int <nl> bb0 ( % 0 : $ Int ) : <nl> % 5 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / no - note <nl> end_access % 5 : $ * Int <nl> % 6 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - note { { conflicting modification requires exclusive access } } <nl> - % 7 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - error { { modification requires exclusive access } } <nl> + % 7 = begin_access [ modify ] [ unknown ] % 3 : $ * Int / / expected - warning { { modification requires exclusive access } } <nl> % 8 = apply % 4 ( % 5 , % 6 ) : $ @ convention ( thin ) ( @ inout Int , @ inout Int ) - > ( ) <nl> end_access % 7 : $ * Int <nl> end_access % 6 : $ * Int <nl>
Merge pull request from devincoughlin / static - enforcement - warning
apple/swift
c1b92dbf77bb1fc0364ecf4304e9da81efaff76a
2017-04-14T23:58:12Z
mmm a / cocos2dx / actions / CCActionInterval . cpp <nl> ppp b / cocos2dx / actions / CCActionInterval . cpp <nl> bool Animate : : initWithAnimation ( Animation * pAnimation ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( pFrames , pObj ) <nl> { <nl> - AnimationFrame * frame = ( AnimationFrame * ) pObj ; <nl> + AnimationFrame * frame = static_cast < AnimationFrame * > ( pObj ) ; <nl> float value = ( accumUnitsOfTime * newUnitOfTimeValue ) / singleDuration ; <nl> accumUnitsOfTime + = frame - > getDelayUnits ( ) ; <nl> _splitTimes - > push_back ( value ) ; <nl> mmm a / cocos2dx / base_nodes / CCNode . cpp <nl> ppp b / cocos2dx / base_nodes / CCNode . cpp <nl> Node : : ~ Node ( void ) <nl> Object * child ; <nl> CCARRAY_FOREACH ( _children , child ) <nl> { <nl> - Node * pChild = ( Node * ) child ; <nl> + Node * pChild = static_cast < Node * > ( child ) ; <nl> if ( pChild ) <nl> { <nl> pChild - > _parent = NULL ; <nl> Node * Node : : getChildByTag ( int aTag ) <nl> Object * child ; <nl> CCARRAY_FOREACH ( _children , child ) <nl> { <nl> - Node * pNode = ( Node * ) child ; <nl> + Node * pNode = static_cast < Node * > ( child ) ; <nl> if ( pNode & & pNode - > _tag = = aTag ) <nl> return pNode ; <nl> } <nl> void Node : : removeAllChildrenWithCleanup ( bool cleanup ) <nl> Object * child ; <nl> CCARRAY_FOREACH ( _children , child ) <nl> { <nl> - Node * pNode = ( Node * ) child ; <nl> + Node * pNode = static_cast < Node * > ( child ) ; <nl> if ( pNode ) <nl> { <nl> / / IMPORTANT : <nl> mmm a / cocos2dx / cocoa / CCArray . h <nl> ppp b / cocos2dx / cocoa / CCArray . h <nl> I found that it ' s not work in C + + . So it keep what it ' s look like in version 1 . 0 <nl> do { \ <nl> if ( pArray & & pArray - > count ( ) > 0 ) \ <nl> { \ <nl> - Object * child ; \ <nl> + Object * child ; \ <nl> CCARRAY_FOREACH ( pArray , child ) \ <nl> { \ <nl> - elementType pNode = ( elementType ) child ; \ <nl> + elementType pNode = static_cast < elementType > ( child ) ; \ <nl> if ( pNode ) \ <nl> { \ <nl> pNode - > func ( ) ; \ <nl> while ( false ) <nl> do { \ <nl> if ( pArray & & pArray - > count ( ) > 0 ) \ <nl> { \ <nl> - Object * child = NULL ; \ <nl> + Object * child ; \ <nl> CCARRAY_FOREACH ( pArray , child ) \ <nl> { \ <nl> - elementType pNode = ( elementType ) child ; \ <nl> + elementType pNode = static_cast < elementType > ( child ) ; \ <nl> if ( pNode ) \ <nl> { \ <nl> pNode - > func ( pObject ) ; \ <nl> mmm a / cocos2dx / cocoa / CCAutoreleasePool . cpp <nl> ppp b / cocos2dx / cocoa / CCAutoreleasePool . cpp <nl> void PoolManager : : finalize ( ) <nl> { <nl> if ( ! pObj ) <nl> break ; <nl> - AutoreleasePool * pPool = ( AutoreleasePool * ) pObj ; <nl> + AutoreleasePool * pPool = static_cast < AutoreleasePool * > ( pObj ) ; <nl> pPool - > clear ( ) ; <nl> } <nl> } <nl> mmm a / cocos2dx / cocoa / CCDictionary . h <nl> ppp b / cocos2dx / cocoa / CCDictionary . h <nl> class Dictionary ; <nl> * { <nl> * const char * key = pElement - > getStrKey ( ) ; <nl> * / / You certainly know the type of value , so we assume that it ' s a Sprite . <nl> - * Sprite * pSprite = ( Sprite * ) pElement - > getObject ( ) ; <nl> + * Sprite * pSprite = static_cast < Sprite * > ( pElement - > getObject ( ) ) ; <nl> * / / . . . . . . <nl> * } <nl> * @ endcode <nl> class CC_DLL DictElement <nl> * It ' s also safe to remove elements while traversing . <nl> * / <nl> # define CCDICT_FOREACH ( __dict__ , __el__ ) \ <nl> - DictElement * pTmp # # __dict__ # # __el__ = NULL ; \ <nl> + DictElement * pTmp # # __dict__ # # __el__ = nullptr ; \ <nl> if ( __dict__ ) \ <nl> HASH_ITER ( hh , ( __dict__ ) - > _elements , __el__ , pTmp # # __dict__ # # __el__ ) <nl> <nl> mmm a / cocos2dx / keypad_dispatcher / CCKeypadDispatcher . cpp <nl> ppp b / cocos2dx / keypad_dispatcher / CCKeypadDispatcher . cpp <nl> void KeypadDispatcher : : forceRemoveDelegate ( KeypadDelegate * pDelegate ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( _delegates , pObj ) <nl> { <nl> - pHandler = ( KeypadHandler * ) pObj ; <nl> + pHandler = static_cast < KeypadHandler * > ( pObj ) ; <nl> if ( pHandler & & pHandler - > getDelegate ( ) = = pDelegate ) <nl> { <nl> _delegates - > removeObject ( pHandler ) ; <nl> bool KeypadDispatcher : : dispatchKeypadMSG ( ccKeypadMSGType nMsgType ) <nl> { <nl> CC_BREAK_IF ( ! pObj ) ; <nl> <nl> - pHandler = ( KeypadHandler * ) pObj ; <nl> + pHandler = static_cast < KeypadHandler * > ( pObj ) ; <nl> pDelegate = pHandler - > getDelegate ( ) ; <nl> <nl> switch ( nMsgType ) <nl> mmm a / cocos2dx / menu_nodes / CCMenu . cpp <nl> ppp b / cocos2dx / menu_nodes / CCMenu . cpp <nl> static std : : vector < unsigned int > ccarray_to_std_vector ( Array * pArray ) <nl> Object * pObj ; <nl> CCARRAY_FOREACH ( pArray , pObj ) <nl> { <nl> - Integer * pInteger = ( Integer * ) pObj ; <nl> + Integer * pInteger = static_cast < Integer * > ( pObj ) ; <nl> ret . push_back ( ( unsigned int ) pInteger - > getValue ( ) ) ; <nl> } <nl> return ret ; <nl> bool Menu : : initWithArray ( Array * pArrayOfItems ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( pArrayOfItems , pObj ) <nl> { <nl> - MenuItem * item = ( MenuItem * ) pObj ; <nl> + MenuItem * item = static_cast < MenuItem * > ( pObj ) ; <nl> this - > addChild ( item , z ) ; <nl> z + + ; <nl> } <nl> mmm a / cocos2dx / menu_nodes / CCMenuItem . cpp <nl> ppp b / cocos2dx / menu_nodes / CCMenuItem . cpp <nl> void MenuItemToggle : : setEnabled ( bool enabled ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( _subItems , pObj ) <nl> { <nl> - MenuItem * pItem = ( MenuItem * ) pObj ; <nl> + MenuItem * pItem = static_cast < MenuItem * > ( pObj ) ; <nl> pItem - > setEnabled ( enabled ) ; <nl> } <nl> } <nl> mmm a / cocos2dx / misc_nodes / CCRenderTexture . cpp <nl> ppp b / cocos2dx / misc_nodes / CCRenderTexture . cpp <nl> void RenderTexture : : draw ( ) <nl> Object * pElement ; <nl> CCARRAY_FOREACH ( _children , pElement ) <nl> { <nl> - Node * pChild = ( Node * ) pElement ; <nl> + Node * pChild = static_cast < Node * > ( pElement ) ; <nl> <nl> if ( pChild ! = _sprite ) <nl> { <nl> mmm a / cocos2dx / particle_nodes / CCParticleBatchNode . cpp <nl> ppp b / cocos2dx / particle_nodes / CCParticleBatchNode . cpp <nl> void ParticleBatchNode : : updateAllAtlasIndexes ( ) <nl> <nl> CCARRAY_FOREACH ( _children , pObj ) <nl> { <nl> - ParticleSystem * child = ( ParticleSystem * ) pObj ; <nl> + ParticleSystem * child = static_cast < ParticleSystem * > ( pObj ) ; <nl> child - > setAtlasIndex ( index ) ; <nl> index + = child - > getTotalParticles ( ) ; <nl> } <nl> mmm a / cocos2dx / sprite_nodes / CCAnimation . cpp <nl> ppp b / cocos2dx / sprite_nodes / CCAnimation . cpp <nl> bool Animation : : initWithSpriteFrames ( Array * pFrames , float delay / * = 0 . 0f * / ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( pFrames , pObj ) <nl> { <nl> - SpriteFrame * frame = ( SpriteFrame * ) pObj ; <nl> + SpriteFrame * frame = static_cast < SpriteFrame * > ( pObj ) ; <nl> AnimationFrame * animFrame = new AnimationFrame ( ) ; <nl> animFrame - > initWithSpriteFrame ( frame , 1 , NULL ) ; <nl> _frames - > addObject ( animFrame ) ; <nl> bool Animation : : initWithAnimationFrames ( Array * arrayOfAnimationFrames , float del <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( _frames , pObj ) <nl> { <nl> - AnimationFrame * animFrame = ( AnimationFrame * ) pObj ; <nl> + AnimationFrame * animFrame = static_cast < AnimationFrame * > ( pObj ) ; <nl> _totalDelayUnits + = animFrame - > getDelayUnits ( ) ; <nl> } <nl> return true ; <nl> mmm a / cocos2dx / sprite_nodes / CCAnimationCache . cpp <nl> ppp b / cocos2dx / sprite_nodes / CCAnimationCache . cpp <nl> void AnimationCache : : parseVersion1 ( Dictionary * animations ) <nl> DictElement * pElement = NULL ; <nl> CCDICT_FOREACH ( animations , pElement ) <nl> { <nl> - Dictionary * animationDict = ( Dictionary * ) pElement - > getObject ( ) ; <nl> - Array * frameNames = ( Array * ) animationDict - > objectForKey ( " frames " ) ; <nl> + Dictionary * animationDict = static_cast < Dictionary * > ( pElement - > getObject ( ) ) ; <nl> + Array * frameNames = static_cast < Array * > ( animationDict - > objectForKey ( " frames " ) ) ; <nl> float delay = animationDict - > valueForKey ( " delay " ) - > floatValue ( ) ; <nl> Animation * animation = NULL ; <nl> <nl> void AnimationCache : : parseVersion1 ( Dictionary * animations ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( frameNames , pObj ) <nl> { <nl> - const char * frameName = ( ( String * ) pObj ) - > getCString ( ) ; <nl> + const char * frameName = static_cast < String * > ( pObj ) - > getCString ( ) ; <nl> SpriteFrame * spriteFrame = frameCache - > spriteFrameByName ( frameName ) ; <nl> <nl> if ( ! spriteFrame ) { <nl> void AnimationCache : : parseVersion2 ( Dictionary * animations ) <nl> CCDICT_FOREACH ( animations , pElement ) <nl> { <nl> const char * name = pElement - > getStrKey ( ) ; <nl> - Dictionary * animationDict = ( Dictionary * ) pElement - > getObject ( ) ; <nl> + Dictionary * animationDict = static_cast < Dictionary * > ( pElement - > getObject ( ) ) ; <nl> <nl> const String * loops = animationDict - > valueForKey ( " loops " ) ; <nl> bool restoreOriginalFrame = animationDict - > valueForKey ( " restoreOriginalFrame " ) - > boolValue ( ) ; <nl> <nl> - Array * frameArray = ( Array * ) animationDict - > objectForKey ( " frames " ) ; <nl> + Array * frameArray = static_cast < Array * > ( animationDict - > objectForKey ( " frames " ) ) ; <nl> <nl> if ( frameArray = = NULL ) { <nl> CCLOG ( " cocos2d : AnimationCache : Animation ' % s ' found in dictionary without any frames - cannot add to animation cache . " , name ) ; <nl> void AnimationCache : : parseVersion2 ( Dictionary * animations ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( frameArray , pObj ) <nl> { <nl> - Dictionary * entry = ( Dictionary * ) ( pObj ) ; <nl> + Dictionary * entry = static_cast < Dictionary * > ( pObj ) ; <nl> <nl> const char * spriteFrameName = entry - > valueForKey ( " spriteframe " ) - > getCString ( ) ; <nl> SpriteFrame * spriteFrame = frameCache - > spriteFrameByName ( spriteFrameName ) ; <nl> void AnimationCache : : addAnimationsWithDictionary ( Dictionary * dictionary ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( spritesheets , pObj ) <nl> { <nl> - String * name = ( String * ) ( pObj ) ; <nl> + String * name = static_cast < String * > ( pObj ) ; <nl> SpriteFrameCache : : sharedSpriteFrameCache ( ) - > addSpriteFramesWithFile ( name - > getCString ( ) ) ; <nl> } <nl> } <nl> mmm a / cocos2dx / sprite_nodes / CCSpriteBatchNode . cpp <nl> ppp b / cocos2dx / sprite_nodes / CCSpriteBatchNode . cpp <nl> void SpriteBatchNode : : sortAllChildren ( ) <nl> / / and at the same time reorder descendants and the quads to the right index <nl> CCARRAY_FOREACH ( _children , pObj ) <nl> { <nl> - Sprite * pChild = ( Sprite * ) pObj ; <nl> + Sprite * pChild = static_cast < Sprite * > ( pObj ) ; <nl> updateAtlasIndex ( pChild , & index ) ; <nl> } <nl> } <nl> void SpriteBatchNode : : updateAtlasIndex ( Sprite * sprite , int * curIndex ) <nl> { <nl> bool needNewIndex = true ; <nl> <nl> - if ( ( ( Sprite * ) ( pArray - > data - > arr [ 0 ] ) ) - > getZOrder ( ) > = 0 ) <nl> + if ( static_cast < Sprite * > ( pArray - > data - > arr [ 0 ] ) - > getZOrder ( ) > = 0 ) <nl> { <nl> / / all children are in front of the parent <nl> oldIndex = sprite - > getAtlasIndex ( ) ; <nl> void SpriteBatchNode : : updateAtlasIndex ( Sprite * sprite , int * curIndex ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( pArray , pObj ) <nl> { <nl> - Sprite * child = ( Sprite * ) pObj ; <nl> + Sprite * child = static_cast < Sprite * > ( pObj ) ; <nl> if ( needNewIndex & & child - > getZOrder ( ) > = 0 ) <nl> { <nl> oldIndex = sprite - > getAtlasIndex ( ) ; <nl> unsigned int SpriteBatchNode : : rebuildIndexInOrder ( Sprite * pobParent , unsigned in <nl> Object * pObject = NULL ; <nl> CCARRAY_FOREACH ( pChildren , pObject ) <nl> { <nl> - Sprite * pChild = ( Sprite * ) pObject ; <nl> + Sprite * pChild = static_cast < Sprite * > ( pObject ) ; <nl> if ( pChild & & ( pChild - > getZOrder ( ) < 0 ) ) <nl> { <nl> uIndex = rebuildIndexInOrder ( pChild , uIndex ) ; <nl> unsigned int SpriteBatchNode : : rebuildIndexInOrder ( Sprite * pobParent , unsigned in <nl> Object * pObject = NULL ; <nl> CCARRAY_FOREACH ( pChildren , pObject ) <nl> { <nl> - Sprite * pChild = ( Sprite * ) pObject ; <nl> + Sprite * pChild = static_cast < Sprite * > ( pObject ) ; <nl> if ( pChild & & ( pChild - > getZOrder ( ) > = 0 ) ) <nl> { <nl> uIndex = rebuildIndexInOrder ( pChild , uIndex ) ; <nl> void SpriteBatchNode : : insertChild ( Sprite * pSprite , unsigned int uIndex ) <nl> / / update indices <nl> unsigned int i = uIndex + 1 ; <nl> <nl> - Sprite * pChild = NULL ; <nl> + Sprite * pChild = nullptr ; <nl> for ( ; i < descendantsData - > num ; i + + ) { <nl> - pChild = ( Sprite * ) descendantsData - > arr [ i ] ; <nl> + pChild = static_cast < Sprite * > ( descendantsData - > arr [ i ] ) ; <nl> pChild - > setAtlasIndex ( pChild - > getAtlasIndex ( ) + 1 ) ; <nl> } <nl> <nl> / / add children recursively <nl> - Object * pObj = NULL ; <nl> + Object * pObj = nullptr ; <nl> CCARRAY_FOREACH ( pSprite - > getChildren ( ) , pObj ) <nl> { <nl> - pChild = ( Sprite * ) pObj ; <nl> + pChild = static_cast < Sprite * > ( pObj ) ; <nl> unsigned int idx = atlasIndexForChild ( pChild , pChild - > getZOrder ( ) ) ; <nl> insertChild ( pChild , idx ) ; <nl> } <nl> void SpriteBatchNode : : appendChild ( Sprite * sprite ) <nl> <nl> / / add children recursively <nl> <nl> - Object * pObj = NULL ; <nl> + Object * pObj = nullptr ; <nl> CCARRAY_FOREACH ( sprite - > getChildren ( ) , pObj ) <nl> { <nl> - Sprite * child = ( Sprite * ) pObj ; <nl> + Sprite * child = static_cast < Sprite * > ( pObj ) ; <nl> appendChild ( child ) ; <nl> } <nl> } <nl> void SpriteBatchNode : : removeSpriteFromAtlas ( Sprite * pobSprite ) <nl> Object * pObject = NULL ; <nl> CCARRAY_FOREACH ( pChildren , pObject ) <nl> { <nl> - Sprite * pChild = ( Sprite * ) pObject ; <nl> + Sprite * pChild = static_cast < Sprite * > ( pObject ) ; <nl> if ( pChild ) <nl> { <nl> removeSpriteFromAtlas ( pChild ) ; <nl> SpriteBatchNode * SpriteBatchNode : : addSpriteWithoutQuad ( Sprite * child , unsigned i <nl> Object * pObject = NULL ; <nl> CCARRAY_FOREACH ( _descendants , pObject ) <nl> { <nl> - Sprite * pChild = ( Sprite * ) pObject ; <nl> + Sprite * pChild = static_cast < Sprite * > ( pObject ) ; <nl> if ( pChild & & ( pChild - > getAtlasIndex ( ) > = z ) ) <nl> { <nl> + + i ; <nl> mmm a / cocos2dx / sprite_nodes / CCSpriteFrameCache . cpp <nl> ppp b / cocos2dx / sprite_nodes / CCSpriteFrameCache . cpp <nl> void SpriteFrameCache : : addSpriteFramesWithDictionary ( Dictionary * dictionary , Tex <nl> DictElement * pElement = NULL ; <nl> CCDICT_FOREACH ( framesDict , pElement ) <nl> { <nl> - Dictionary * frameDict = ( Dictionary * ) pElement - > getObject ( ) ; <nl> + Dictionary * frameDict = static_cast < Dictionary * > ( pElement - > getObject ( ) ) ; <nl> std : : string spriteFrameName = pElement - > getStrKey ( ) ; <nl> - SpriteFrame * spriteFrame = ( SpriteFrame * ) _spriteFrames - > objectForKey ( spriteFrameName ) ; <nl> + SpriteFrame * spriteFrame = static_cast < SpriteFrame * > ( _spriteFrames - > objectForKey ( spriteFrameName ) ) ; <nl> if ( spriteFrame ) <nl> { <nl> continue ; <nl> void SpriteFrameCache : : addSpriteFramesWithDictionary ( Dictionary * dictionary , Tex <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( aliases , pObj ) <nl> { <nl> - std : : string oneAlias = ( ( String * ) pObj ) - > getCString ( ) ; <nl> + std : : string oneAlias = static_cast < String * > ( pObj ) - > getCString ( ) ; <nl> if ( _spriteFramesAliases - > objectForKey ( oneAlias . c_str ( ) ) ) <nl> { <nl> CCLOGWARN ( " cocos2d : WARNING : an alias with name % s already exists " , oneAlias . c_str ( ) ) ; <nl> void SpriteFrameCache : : removeUnusedSpriteFrames ( void ) <nl> DictElement * pElement = NULL ; <nl> CCDICT_FOREACH ( _spriteFrames , pElement ) <nl> { <nl> - SpriteFrame * spriteFrame = ( SpriteFrame * ) pElement - > getObject ( ) ; <nl> + SpriteFrame * spriteFrame = static_cast < SpriteFrame * > ( pElement - > getObject ( ) ) ; <nl> if ( spriteFrame - > retainCount ( ) = = 1 ) <nl> { <nl> CCLOG ( " cocos2d : SpriteFrameCache : removing unused frame : % s " , pElement - > getStrKey ( ) ) ; <nl> void SpriteFrameCache : : removeSpriteFramesFromFile ( const char * plist ) <nl> <nl> void SpriteFrameCache : : removeSpriteFramesFromDictionary ( Dictionary * dictionary ) <nl> { <nl> - Dictionary * framesDict = ( Dictionary * ) dictionary - > objectForKey ( " frames " ) ; <nl> + Dictionary * framesDict = static_cast < Dictionary * > ( dictionary - > objectForKey ( " frames " ) ) ; <nl> Array * keysToRemove = Array : : create ( ) ; <nl> <nl> DictElement * pElement = NULL ; <nl> void SpriteFrameCache : : removeSpriteFramesFromTexture ( Texture2D * texture ) <nl> CCDICT_FOREACH ( _spriteFrames , pElement ) <nl> { <nl> string key = pElement - > getStrKey ( ) ; <nl> - SpriteFrame * frame = ( SpriteFrame * ) _spriteFrames - > objectForKey ( key . c_str ( ) ) ; <nl> + SpriteFrame * frame = static_cast < SpriteFrame * > ( _spriteFrames - > objectForKey ( key . c_str ( ) ) ) ; <nl> if ( frame & & ( frame - > getTexture ( ) = = texture ) ) <nl> { <nl> keysToRemove - > addObject ( String : : create ( pElement - > getStrKey ( ) ) ) ; <nl> mmm a / cocos2dx / support / CCNotificationCenter . cpp <nl> ppp b / cocos2dx / support / CCNotificationCenter . cpp <nl> void NotificationCenter : : removeObserver ( Object * target , const char * name ) <nl> Object * obj = NULL ; <nl> CCARRAY_FOREACH ( _observers , obj ) <nl> { <nl> - NotificationObserver * observer = ( NotificationObserver * ) obj ; <nl> + NotificationObserver * observer = static_cast < NotificationObserver * > ( obj ) ; <nl> if ( ! observer ) <nl> continue ; <nl> <nl> int NotificationCenter : : removeAllObservers ( Object * target ) <nl> <nl> CCARRAY_FOREACH ( _observers , obj ) <nl> { <nl> - NotificationObserver * observer = ( NotificationObserver * ) obj ; <nl> + NotificationObserver * observer = static_cast < NotificationObserver * > ( obj ) ; <nl> if ( ! observer ) <nl> continue ; <nl> <nl> void NotificationCenter : : unregisterScriptObserver ( Object * target , const char * nam <nl> Object * obj = NULL ; <nl> CCARRAY_FOREACH ( _observers , obj ) <nl> { <nl> - NotificationObserver * observer = ( NotificationObserver * ) obj ; <nl> + NotificationObserver * observer = static_cast < NotificationObserver * > ( obj ) ; <nl> if ( ! observer ) <nl> continue ; <nl> <nl> void NotificationCenter : : postNotification ( const char * name , Object * object ) <nl> Object * obj = NULL ; <nl> CCARRAY_FOREACH ( ObserversCopy , obj ) <nl> { <nl> - NotificationObserver * observer = ( NotificationObserver * ) obj ; <nl> + NotificationObserver * observer = static_cast < NotificationObserver * > ( obj ) ; <nl> if ( ! observer ) <nl> continue ; <nl> <nl> int NotificationCenter : : getObserverHandlerByName ( const char * name ) <nl> Object * obj = NULL ; <nl> CCARRAY_FOREACH ( _observers , obj ) <nl> { <nl> - NotificationObserver * observer = ( NotificationObserver * ) obj ; <nl> + NotificationObserver * observer = static_cast < NotificationObserver * > ( obj ) ; <nl> if ( NULL = = observer ) <nl> continue ; <nl> <nl> mmm a / cocos2dx / support / CCProfiling . cpp <nl> ppp b / cocos2dx / support / CCProfiling . cpp <nl> void Profiler : : displayTimers ( ) <nl> DictElement * pElement = NULL ; <nl> CCDICT_FOREACH ( _activeTimers , pElement ) <nl> { <nl> - ProfilingTimer * timer = ( ProfilingTimer * ) pElement - > getObject ( ) ; <nl> + ProfilingTimer * timer = static_cast < ProfilingTimer * > ( pElement - > getObject ( ) ) ; <nl> CCLog ( " % s " , timer - > description ( ) ) ; <nl> } <nl> } <nl> mmm a / cocos2dx / textures / CCTextureCache . cpp <nl> ppp b / cocos2dx / textures / CCTextureCache . cpp <nl> void TextureCache : : removeUnusedTextures ( ) <nl> CCDICT_FOREACH ( _textures , pElement ) <nl> { <nl> CCLOG ( " cocos2d : TextureCache : texture : % s " , pElement - > getStrKey ( ) ) ; <nl> - Texture2D * value = ( Texture2D * ) pElement - > getObject ( ) ; <nl> + Texture2D * value = static_cast < Texture2D * > ( pElement - > getObject ( ) ) ; <nl> if ( value - > retainCount ( ) = = 1 ) <nl> { <nl> CCLOG ( " cocos2d : TextureCache : removing unused texture : % s " , pElement - > getStrKey ( ) ) ; <nl> void TextureCache : : removeUnusedTextures ( ) <nl> CCDICT_FOREACH ( _textures , pElement ) <nl> { <nl> CCLOG ( " cocos2d : TextureCache : texture : % s " , pElement - > getStrKey ( ) ) ; <nl> - Texture2D * value = ( Texture2D * ) pElement - > getObject ( ) ; <nl> + Texture2D * value = static_cast < Texture2D * > ( pElement - > getObject ( ) ) ; <nl> if ( value - > retainCount ( ) = = 1 ) <nl> { <nl> elementToRemove . push_back ( pElement ) ; <nl> void TextureCache : : dumpCachedTextureInfo ( ) <nl> DictElement * pElement = NULL ; <nl> CCDICT_FOREACH ( _textures , pElement ) <nl> { <nl> - Texture2D * tex = ( Texture2D * ) pElement - > getObject ( ) ; <nl> + Texture2D * tex = static_cast < Texture2D * > ( pElement - > getObject ( ) ) ; <nl> unsigned int bpp = tex - > bitsPerPixelForFormat ( ) ; <nl> / / Each texture takes up width * height * bytesPerPixel bytes . <nl> unsigned int bytes = tex - > getPixelsWide ( ) * tex - > getPixelsHigh ( ) * bpp / 8 ; <nl> mmm a / cocos2dx / tilemap_parallax_nodes / CCTMXLayer . cpp <nl> ppp b / cocos2dx / tilemap_parallax_nodes / CCTMXLayer . cpp <nl> Sprite * TMXLayer : : insertTileForGID ( unsigned int gid , const Point & pos ) <nl> / / update possible children <nl> if ( _children & & _children - > count ( ) > 0 ) <nl> { <nl> - Object * pObject = NULL ; <nl> + Object * pObject = nullptr ; <nl> CCARRAY_FOREACH ( _children , pObject ) <nl> { <nl> - Sprite * pChild = ( Sprite * ) pObject ; <nl> + Sprite * pChild = static_cast < Sprite * > ( pObject ) ; <nl> if ( pChild ) <nl> { <nl> unsigned int ai = pChild - > getAtlasIndex ( ) ; <nl> void TMXLayer : : removeTileAt ( const Point & pos ) <nl> / / update possible children <nl> if ( _children & & _children - > count ( ) > 0 ) <nl> { <nl> - Object * pObject = NULL ; <nl> + Object * pObject = nullptr ; <nl> CCARRAY_FOREACH ( _children , pObject ) <nl> { <nl> - Sprite * pChild = ( Sprite * ) pObject ; <nl> + Sprite * pChild = static_cast < Sprite * > ( pObject ) ; <nl> if ( pChild ) <nl> { <nl> unsigned int ai = pChild - > getAtlasIndex ( ) ; <nl> mmm a / cocos2dx / tilemap_parallax_nodes / CCTMXObjectGroup . cpp <nl> ppp b / cocos2dx / tilemap_parallax_nodes / CCTMXObjectGroup . cpp <nl> Dictionary * TMXObjectGroup : : objectNamed ( const char * objectName ) <nl> { <nl> if ( _objects & & _objects - > count ( ) > 0 ) <nl> { <nl> - Object * pObj = NULL ; <nl> + Object * pObj = nullptr ; <nl> CCARRAY_FOREACH ( _objects , pObj ) <nl> { <nl> - Dictionary * pDict = ( Dictionary * ) pObj ; <nl> + Dictionary * pDict = static_cast < Dictionary * > ( pObj ) ; <nl> String * name = ( String * ) pDict - > objectForKey ( " name " ) ; <nl> if ( name & & name - > _string = = objectName ) <nl> { <nl> mmm a / cocos2dx / tilemap_parallax_nodes / CCTMXTiledMap . cpp <nl> ppp b / cocos2dx / tilemap_parallax_nodes / CCTMXTiledMap . cpp <nl> TMXTilesetInfo * TMXTiledMap : : tilesetForLayer ( TMXLayerInfo * layerInfo , TMXMapInf <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH_REVERSE ( tilesets , pObj ) <nl> { <nl> - tileset = ( TMXTilesetInfo * ) pObj ; <nl> + tileset = static_cast < TMXTilesetInfo * > ( pObj ) ; <nl> if ( tileset ) <nl> { <nl> for ( unsigned int y = 0 ; y < size . height ; y + + ) <nl> void TMXTiledMap : : buildWithMapInfo ( TMXMapInfo * mapInfo ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( layers , pObj ) <nl> { <nl> - layerInfo = ( TMXLayerInfo * ) pObj ; <nl> + layerInfo = static_cast < TMXLayerInfo * > ( pObj ) ; <nl> if ( layerInfo & & layerInfo - > _visible ) <nl> { <nl> TMXLayer * child = parseLayer ( layerInfo , mapInfo ) ; <nl> TMXObjectGroup * TMXTiledMap : : objectGroupNamed ( const char * groupName ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( _objectGroups , pObj ) <nl> { <nl> - objectGroup = ( TMXObjectGroup * ) ( pObj ) ; <nl> + objectGroup = static_cast < TMXObjectGroup * > ( pObj ) ; <nl> if ( objectGroup & & objectGroup - > getGroupName ( ) = = sGroupName ) <nl> { <nl> return objectGroup ; <nl> mmm a / cocos2dx / touch_dispatcher / CCTouchDispatcher . cpp <nl> ppp b / cocos2dx / touch_dispatcher / CCTouchDispatcher . cpp <nl> void TouchDispatcher : : forceAddHandler ( TouchHandler * pHandler , Array * pArray ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( pArray , pObj ) <nl> { <nl> - TouchHandler * h = ( TouchHandler * ) pObj ; <nl> + TouchHandler * h = static_cast < TouchHandler * > ( pObj ) ; <nl> if ( h ) <nl> { <nl> if ( h - > getPriority ( ) < pHandler - > getPriority ( ) ) <nl> void TouchDispatcher : : forceRemoveDelegate ( TouchDelegate * pDelegate ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( _standardHandlers , pObj ) <nl> { <nl> - pHandler = ( TouchHandler * ) pObj ; <nl> + pHandler = static_cast < TouchHandler * > ( pObj ) ; <nl> if ( pHandler & & pHandler - > getDelegate ( ) = = pDelegate ) <nl> { <nl> _standardHandlers - > removeObject ( pHandler ) ; <nl> void TouchDispatcher : : forceRemoveDelegate ( TouchDelegate * pDelegate ) <nl> / / remove handler from _targetedHandlers <nl> CCARRAY_FOREACH ( _targetedHandlers , pObj ) <nl> { <nl> - pHandler = ( TouchHandler * ) pObj ; <nl> + pHandler = static_cast < TouchHandler * > ( pObj ) ; <nl> if ( pHandler & & pHandler - > getDelegate ( ) = = pDelegate ) <nl> { <nl> _targetedHandlers - > removeObject ( pHandler ) ; <nl> TouchHandler * TouchDispatcher : : findHandler ( TouchDelegate * pDelegate ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( _targetedHandlers , pObj ) <nl> { <nl> - TouchHandler * pHandler = ( TouchHandler * ) pObj ; <nl> + TouchHandler * pHandler = static_cast < TouchHandler * > ( pObj ) ; <nl> if ( pHandler - > getDelegate ( ) = = pDelegate ) <nl> { <nl> return pHandler ; <nl> TouchHandler * TouchDispatcher : : findHandler ( TouchDelegate * pDelegate ) <nl> <nl> CCARRAY_FOREACH ( _standardHandlers , pObj ) <nl> { <nl> - TouchHandler * pHandler = ( TouchHandler * ) pObj ; <nl> + TouchHandler * pHandler = static_cast < TouchHandler * > ( pObj ) ; <nl> if ( pHandler - > getDelegate ( ) = = pDelegate ) <nl> { <nl> return pHandler ; <nl> TouchHandler * TouchDispatcher : : findHandler ( Array * pArray , TouchDelegate * pDelega <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( pArray , pObj ) <nl> { <nl> - TouchHandler * pHandle = ( TouchHandler * ) pObj ; <nl> + TouchHandler * pHandle = static_cast < TouchHandler * > ( pObj ) ; <nl> if ( pHandle - > getDelegate ( ) = = pDelegate ) <nl> { <nl> return pHandle ; <nl> void TouchDispatcher : : touches ( Set * pTouches , Event * pEvent , unsigned int uIndex ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( _targetedHandlers , pObj ) <nl> { <nl> - pHandler = ( TargetedTouchHandler * ) ( pObj ) ; <nl> + pHandler = static_cast < TargetedTouchHandler * > ( pObj ) ; <nl> <nl> if ( ! pHandler ) <nl> { <nl> void TouchDispatcher : : touches ( Set * pTouches , Event * pEvent , unsigned int uIndex ) <nl> if ( uStandardHandlersCount > 0 & & pMutableTouches - > count ( ) > 0 ) <nl> { <nl> StandardTouchHandler * pHandler = NULL ; <nl> - Object * pObj = NULL ; <nl> + Object * pObj = nullptr ; <nl> CCARRAY_FOREACH ( _standardHandlers , pObj ) <nl> { <nl> - pHandler = ( StandardTouchHandler * ) ( pObj ) ; <nl> + pHandler = static_cast < StandardTouchHandler * > ( pObj ) ; <nl> <nl> if ( ! pHandler ) <nl> { <nl> void TouchDispatcher : : touches ( Set * pTouches , Event * pEvent , unsigned int uIndex ) <nl> { <nl> _toAdd = false ; <nl> TouchHandler * pHandler = NULL ; <nl> - Object * pObj = NULL ; <nl> + Object * pObj = nullptr ; <nl> CCARRAY_FOREACH ( _handlersToAdd , pObj ) <nl> { <nl> - pHandler = ( TouchHandler * ) pObj ; <nl> + pHandler = static_cast < TouchHandler * > ( pObj ) ; <nl> if ( ! pHandler ) <nl> { <nl> break ; <nl> mmm a / extensions / CCArmature / CCArmature . cpp <nl> ppp b / extensions / CCArmature / CCArmature . cpp <nl> void Armature : : update ( float dt ) <nl> Object * object = NULL ; <nl> CCARRAY_FOREACH ( _topBoneList , object ) <nl> { <nl> - ( ( Bone * ) object ) - > update ( dt ) ; <nl> + static_cast < Bone * > ( object ) - > update ( dt ) ; <nl> } <nl> } <nl> <nl> void Armature : : draw ( ) <nl> Object * object = NULL ; <nl> CCARRAY_FOREACH ( _children , object ) <nl> { <nl> - Bone * bone = ( Bone * ) object ; <nl> + Bone * bone = static_cast < Bone * > ( object ) ; <nl> <nl> DisplayManager * displayManager = bone - > getDisplayManager ( ) ; <nl> Node * node = displayManager - > getDisplayRenderNode ( ) ; <nl> Rect Armature : : boundingBox ( ) <nl> Object * object = NULL ; <nl> CCARRAY_FOREACH ( _children , object ) <nl> { <nl> - Bone * bone = ( Bone * ) object ; <nl> + Bone * bone = static_cast < Bone * > ( object ) ; <nl> Rect r = bone - > getDisplayManager ( ) - > getBoundingBox ( ) ; <nl> <nl> if ( first ) <nl> mmm a / extensions / CCArmature / CCBone . cpp <nl> ppp b / extensions / CCArmature / CCBone . cpp <nl> void Bone : : update ( float delta ) <nl> Object * object = NULL ; <nl> CCARRAY_FOREACH ( _children , object ) <nl> { <nl> - Bone * childBone = ( Bone * ) object ; <nl> + Bone * childBone = static_cast < Bone * > ( object ) ; <nl> childBone - > update ( delta ) ; <nl> } <nl> <nl> void Bone : : removeChildBone ( Bone * bone , bool recursion ) <nl> Object * _object = NULL ; <nl> CCARRAY_FOREACH ( _ccbones , _object ) <nl> { <nl> - Bone * _ccBone = ( Bone * ) _object ; <nl> + Bone * _ccBone = static_cast < Bone * > ( _object ) ; <nl> bone - > removeChildBone ( _ccBone , recursion ) ; <nl> } <nl> } <nl> mmm a / extensions / CCArmature / animation / CCArmatureAnimation . cpp <nl> ppp b / extensions / CCArmature / animation / CCArmatureAnimation . cpp <nl> void ArmatureAnimation : : pause ( ) <nl> Object * object = NULL ; <nl> CCARRAY_FOREACH ( _tweenList , object ) <nl> { <nl> - ( ( Tween * ) object ) - > pause ( ) ; <nl> + static_cast < Tween * > ( object ) - > pause ( ) ; <nl> } <nl> ProcessBase : : pause ( ) ; <nl> } <nl> void ArmatureAnimation : : resume ( ) <nl> Object * object = NULL ; <nl> CCARRAY_FOREACH ( _tweenList , object ) <nl> { <nl> - ( ( Tween * ) object ) - > resume ( ) ; <nl> + static_cast < Tween * > ( object ) - > resume ( ) ; <nl> } <nl> ProcessBase : : resume ( ) ; <nl> } <nl> void ArmatureAnimation : : stop ( ) <nl> Object * object = NULL ; <nl> CCARRAY_FOREACH ( _tweenList , object ) <nl> { <nl> - ( ( Tween * ) object ) - > stop ( ) ; <nl> + static_cast < Tween * > ( object ) - > stop ( ) ; <nl> } <nl> _tweenList - > removeAllObjects ( ) ; <nl> ProcessBase : : stop ( ) ; <nl> void ArmatureAnimation : : update ( float dt ) <nl> Object * object = NULL ; <nl> CCARRAY_FOREACH ( _tweenList , object ) <nl> { <nl> - ( ( Tween * ) object ) - > update ( dt ) ; <nl> + static_cast < Tween * > ( object ) - > update ( dt ) ; <nl> } <nl> } <nl> <nl> mmm a / extensions / CCArmature / display / CCDisplayManager . cpp <nl> ppp b / extensions / CCArmature / display / CCDisplayManager . cpp <nl> void DisplayManager : : initDisplayList ( BoneData * boneData ) <nl> Array * displayDataList = & boneData - > displayDataList ; <nl> CCARRAY_FOREACH ( displayDataList , object ) <nl> { <nl> - DisplayData * displayData = ( DisplayData * ) object ; <nl> + DisplayData * displayData = static_cast < DisplayData * > ( object ) ; <nl> <nl> DecorativeDisplay * decoDisplay = DecorativeDisplay : : create ( ) ; <nl> decoDisplay - > setDisplayData ( displayData ) ; <nl> mmm a / extensions / CCArmature / physics / CCColliderDetector . cpp <nl> ppp b / extensions / CCArmature / physics / CCColliderDetector . cpp <nl> ColliderDetector : : ~ ColliderDetector ( ) <nl> Object * object = NULL ; <nl> CCARRAY_FOREACH ( _colliderBodyList , object ) <nl> { <nl> - ColliderBody * colliderBody = ( ColliderBody * ) object ; <nl> + ColliderBody * colliderBody = static_cast < ColliderBody * > ( object ) ; <nl> <nl> b2Body * body = colliderBody - > getB2Body ( ) ; <nl> PhysicsWorld : : sharedPhysicsWorld ( ) - > getNoGravityWorld ( ) - > DestroyBody ( body ) ; <nl> void ColliderDetector : : addContourData ( ContourData * contourData ) <nl> int i = 0 ; <nl> CCARRAY_FOREACH ( array , object ) <nl> { <nl> - ContourVertex2F * v = ( ContourVertex2F * ) object ; <nl> + ContourVertex2F * v = static_cast < ContourVertex2F * > ( object ) ; <nl> b2bv [ i ] . Set ( v - > x / PT_RATIO , v - > y / PT_RATIO ) ; <nl> i + + ; <nl> } <nl> void ColliderDetector : : addContourDataList ( Array * contourDataList ) <nl> Object * object = NULL ; <nl> CCARRAY_FOREACH ( contourDataList , object ) <nl> { <nl> - addContourData ( ( ContourData * ) object ) ; <nl> + addContourData ( static_cast < ContourData * > ( object ) ) ; <nl> } <nl> } <nl> <nl> void ColliderDetector : : setColliderFilter ( b2Filter & filter ) <nl> Object * object = NULL ; <nl> CCARRAY_FOREACH ( _colliderBodyList , object ) <nl> { <nl> - ColliderBody * colliderBody = ( ColliderBody * ) object ; <nl> + ColliderBody * colliderBody = static_cast < ColliderBody * > ( object ) ; <nl> colliderBody - > getB2Body ( ) - > GetFixtureList ( ) - > SetFilterData ( filter ) ; <nl> } <nl> } <nl> void ColliderDetector : : setActive ( bool active ) <nl> Object * object = NULL ; <nl> CCARRAY_FOREACH ( _colliderBodyList , object ) <nl> { <nl> - ColliderBody * colliderBody = ( ColliderBody * ) object ; <nl> + ColliderBody * colliderBody = static_cast < ColliderBody * > ( object ) ; <nl> colliderBody - > getB2Body ( ) - > SetActive ( active ) ; <nl> } <nl> } <nl> void ColliderDetector : : updateTransform ( AffineTransform & t ) <nl> Object * object = NULL ; <nl> CCARRAY_FOREACH ( _colliderBodyList , object ) <nl> { <nl> - ColliderBody * colliderBody = ( ColliderBody * ) object ; <nl> + ColliderBody * colliderBody = static_cast < ColliderBody * > ( object ) ; <nl> <nl> ContourData * contourData = colliderBody - > getContourData ( ) ; <nl> b2Body * body = colliderBody - > getB2Body ( ) ; <nl> void ColliderDetector : : updateTransform ( AffineTransform & t ) <nl> int i = 0 ; <nl> CCARRAY_FOREACH ( array , object ) <nl> { <nl> - ContourVertex2F * cv = ( ContourVertex2F * ) object ; <nl> + ContourVertex2F * cv = static_cast < ContourVertex2F * > ( object ) ; <nl> b2Vec2 & bv = shape - > m_vertices [ i ] ; <nl> <nl> helpPoint . setPoint ( cv - > x , cv - > y ) ; <nl> mmm a / extensions / CCArmature / utils / CCSpriteFrameCacheHelper . cpp <nl> ppp b / extensions / CCArmature / utils / CCSpriteFrameCacheHelper . cpp <nl> void SpriteFrameCacheHelper : : addSpriteFrameFromDict ( Dictionary * dictionary , Text <nl> DictElement * pElement = NULL ; <nl> CCDICT_FOREACH ( framesDict , pElement ) <nl> { <nl> - Dictionary * frameDict = ( Dictionary * ) pElement - > getObject ( ) ; <nl> + Dictionary * frameDict = static_cast < Dictionary * > ( pElement - > getObject ( ) ) ; <nl> std : : string spriteFrameName = pElement - > getStrKey ( ) ; <nl> <nl> _display2ImageMap [ spriteFrameName ] = imagePath ; <nl> mmm a / extensions / CCBReader / CCBAnimationManager . cpp <nl> ppp b / extensions / CCBReader / CCBAnimationManager . cpp <nl> int CCBAnimationManager : : getSequenceId ( const char * pSequenceName ) <nl> string seqName ( pSequenceName ) ; <nl> CCARRAY_FOREACH ( mSequences , pElement ) <nl> { <nl> - CCBSequence * seq = ( CCBSequence * ) pElement ; <nl> + CCBSequence * seq = static_cast < CCBSequence * > ( pElement ) ; <nl> if ( seqName . compare ( seq - > getName ( ) ) = = 0 ) <nl> { <nl> return seq - > getSequenceId ( ) ; <nl> CCBSequence * CCBAnimationManager : : getSequence ( int nSequenceId ) <nl> Object * pElement = NULL ; <nl> CCARRAY_FOREACH ( mSequences , pElement ) <nl> { <nl> - CCBSequence * seq = ( CCBSequence * ) pElement ; <nl> + CCBSequence * seq = static_cast < CCBSequence * > ( pElement ) ; <nl> if ( seq - > getSequenceId ( ) = = nSequenceId ) <nl> { <nl> return seq ; <nl> void CCBAnimationManager : : runAnimationsForSequenceIdTweenDuration ( int nSeqId , fl <nl> DictElement * pElement = NULL ; <nl> CCDICT_FOREACH ( mNodeSequences , pElement ) <nl> { <nl> - Node * node = ( Node * ) pElement - > getIntKey ( ) ; <nl> + Node * node = reinterpret_cast < Node * > ( pElement - > getIntKey ( ) ) ; <nl> node - > stopAllActions ( ) ; <nl> <nl> / / Refer to CCBReader : : readKeyframe ( ) for the real type of value <nl> void CCBAnimationManager : : runAnimationsForSequenceIdTweenDuration ( int nSeqId , fl <nl> CCDICT_FOREACH ( seqNodeProps , pElement1 ) <nl> { <nl> const char * propName = pElement1 - > getStrKey ( ) ; <nl> - CCBSequenceProperty * seqProp = ( CCBSequenceProperty * ) seqNodeProps - > objectForKey ( propName ) ; <nl> + CCBSequenceProperty * seqProp = static_cast < CCBSequenceProperty * > ( seqNodeProps - > objectForKey ( propName ) ) ; <nl> seqNodePropNames . insert ( propName ) ; <nl> <nl> setFirstFrame ( node , seqProp , fTweenDuration ) ; <nl> mmm a / extensions / CCBReader / CCBReader . cpp <nl> ppp b / extensions / CCBReader / CCBReader . cpp <nl> Node * CCBReader : : readNodeGraphFromData ( Data * pData , Object * pOwner , const Size & <nl> CCDICT_FOREACH ( animationManagers , pElement ) <nl> { <nl> Node * pNode = ( Node * ) pElement - > getIntKey ( ) ; <nl> - CCBAnimationManager * manager = ( CCBAnimationManager * ) animationManagers - > objectForKey ( ( intptr_t ) pNode ) ; <nl> + CCBAnimationManager * manager = static_cast < CCBAnimationManager * > ( animationManagers - > objectForKey ( ( intptr_t ) pNode ) ) ; <nl> pNode - > setUserObject ( manager ) ; <nl> <nl> if ( jsControlled ) <nl> void CCBReader : : cleanUpNodeGraph ( Node * pNode ) <nl> Object * pChild = NULL ; <nl> CCARRAY_FOREACH ( pNode - > getChildren ( ) , pChild ) <nl> { <nl> - cleanUpNodeGraph ( ( Node * ) pChild ) ; <nl> + cleanUpNodeGraph ( static_cast < Node * > ( pChild ) ) ; <nl> } <nl> } <nl> <nl> Node * CCBReader : : readNodeGraph ( Node * pParent ) { <nl> DictElement * pElement ; <nl> CCDICT_FOREACH ( pCustomPropeties , pElement ) <nl> { <nl> - customAssigned = targetAsCCBMemberVariableAssigner - > onAssignCCBCustomProperty ( target , pElement - > getStrKey ( ) , ( CCBValue * ) pElement - > getObject ( ) ) ; <nl> + customAssigned = targetAsCCBMemberVariableAssigner - > onAssignCCBCustomProperty ( target , pElement - > getStrKey ( ) , static_cast < CCBValue * > ( pElement - > getObject ( ) ) ) ; <nl> <nl> if ( ! customAssigned & & this - > mCCBMemberVariableAssigner ! = NULL ) <nl> { <nl> - customAssigned = this - > mCCBMemberVariableAssigner - > onAssignCCBCustomProperty ( target , pElement - > getStrKey ( ) , ( CCBValue * ) pElement - > getObject ( ) ) ; <nl> + customAssigned = this - > mCCBMemberVariableAssigner - > onAssignCCBCustomProperty ( target , pElement - > getStrKey ( ) , static_cast < CCBValue * > ( pElement - > getObject ( ) ) ) ; <nl> } <nl> } <nl> } <nl> mmm a / extensions / CCBReader / CCNodeLoader . cpp <nl> ppp b / extensions / CCBReader / CCNodeLoader . cpp <nl> void NodeLoader : : parseProperties ( Node * pNode , Node * pParent , CCBReader * pCCBR <nl> bool bFound = false ; <nl> CCARRAY_FOREACH ( extraPropsNames , pObj ) <nl> { <nl> - String * pStr = ( String * ) pObj ; <nl> + String * pStr = static_cast < String * > ( pObj ) ; <nl> if ( 0 = = pStr - > compare ( propertyName . c_str ( ) ) ) <nl> { <nl> bFound = true ; <nl> void NodeLoader : : parseProperties ( Node * pNode , Node * pParent , CCBReader * pCCBR <nl> } <nl> else if ( isExtraProp & & pNode = = pCCBReader - > getAnimationManager ( ) - > getRootNode ( ) ) <nl> { <nl> - Array * extraPropsNames = ( Array * ) pNode - > getUserObject ( ) ; <nl> + Array * extraPropsNames = static_cast < Array * > ( pNode - > getUserObject ( ) ) ; <nl> if ( ! extraPropsNames ) <nl> { <nl> extraPropsNames = Array : : create ( ) ; <nl> mmm a / extensions / GUI / CCControlExtension / CCControl . cpp <nl> ppp b / extensions / GUI / CCControlExtension / CCControl . cpp <nl> void Control : : sendActionsForControlEvents ( ControlEvent controlEvents ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( invocationList , pObj ) <nl> { <nl> - Invocation * invocation = ( Invocation * ) pObj ; <nl> + Invocation * invocation = static_cast < Invocation * > ( pObj ) ; <nl> invocation - > invoke ( this ) ; <nl> } <nl> / / Call ScriptFunc <nl> void Control : : removeTargetWithActionForControlEvent ( Object * target , SEL_CCContro <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( eventInvocationList , pObj ) <nl> { <nl> - Invocation * invocation = ( Invocation * ) pObj ; <nl> + Invocation * invocation = static_cast < Invocation * > ( pObj ) ; <nl> bool shouldBeRemoved = true ; <nl> if ( target ) <nl> { <nl> mmm a / extensions / GUI / CCControlExtension / CCControlButton . cpp <nl> ppp b / extensions / GUI / CCControlExtension / CCControlButton . cpp <nl> void ControlButton : : setPreferredSize ( Size size ) <nl> DictElement * item = NULL ; <nl> CCDICT_FOREACH ( _backgroundSpriteDispatchTable , item ) <nl> { <nl> - Scale9Sprite * sprite = ( Scale9Sprite * ) item - > getObject ( ) ; <nl> + Scale9Sprite * sprite = static_cast < Scale9Sprite * > ( item - > getObject ( ) ) ; <nl> sprite - > setPreferredSize ( size ) ; <nl> } <nl> } <nl> void ControlButton : : setOpacity ( GLubyte opacity ) <nl> DictElement * item = NULL ; <nl> CCDICT_FOREACH ( _backgroundSpriteDispatchTable , item ) <nl> { <nl> - Scale9Sprite * sprite = ( Scale9Sprite * ) item - > getObject ( ) ; <nl> + Scale9Sprite * sprite = static_cast < Scale9Sprite * > ( item - > getObject ( ) ) ; <nl> sprite - > setOpacity ( opacity ) ; <nl> } <nl> } <nl> void ControlButton : : setColor ( const Color3B & color ) <nl> DictElement * item = NULL ; <nl> CCDICT_FOREACH ( _backgroundSpriteDispatchTable , item ) <nl> { <nl> - Scale9Sprite * sprite = ( Scale9Sprite * ) item - > getObject ( ) ; <nl> + Scale9Sprite * sprite = static_cast < Scale9Sprite * > ( item - > getObject ( ) ) ; <nl> sprite - > setColor ( color ) ; <nl> } <nl> } <nl> mmm a / extensions / GUI / CCScrollView / CCScrollView . cpp <nl> ppp b / extensions / GUI / CCScrollView / CCScrollView . cpp <nl> void ScrollView : : pause ( Object * sender ) <nl> <nl> CCARRAY_FOREACH ( pChildren , pObj ) <nl> { <nl> - Node * pChild = ( Node * ) pObj ; <nl> + Node * pChild = static_cast < Node * > ( pObj ) ; <nl> pChild - > pauseSchedulerAndActions ( ) ; <nl> } <nl> } <nl> void ScrollView : : resume ( Object * sender ) <nl> <nl> CCARRAY_FOREACH ( pChildren , pObj ) <nl> { <nl> - Node * pChild = ( Node * ) pObj ; <nl> + Node * pChild = static_cast < Node * > ( pObj ) ; <nl> pChild - > resumeSchedulerAndActions ( ) ; <nl> } <nl> <nl> mmm a / extensions / GUI / CCScrollView / CCTableView . cpp <nl> ppp b / extensions / GUI / CCScrollView / CCTableView . cpp <nl> void TableView : : reloadData ( ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( _cellsUsed , pObj ) <nl> { <nl> - TableViewCell * cell = ( TableViewCell * ) pObj ; <nl> + TableViewCell * cell = static_cast < TableViewCell * > ( pObj ) ; <nl> <nl> if ( _tableViewDelegate ! = NULL ) { <nl> _tableViewDelegate - > tableCellWillRecycle ( this , cell ) ; <nl> void TableView : : scrollViewDidScroll ( ScrollView * view ) <nl> int i = 0 ; <nl> CCARRAY_FOREACH ( _cellsUsed , pObj ) <nl> { <nl> - TableViewCell * pCell = ( TableViewCell * ) pObj ; <nl> + TableViewCell * pCell = static_cast < TableViewCell * > ( pObj ) ; <nl> CCLog ( " cells Used index % d , value = % d " , i , pCell - > getIdx ( ) ) ; <nl> i + + ; <nl> } <nl> void TableView : : scrollViewDidScroll ( ScrollView * view ) <nl> i = 0 ; <nl> CCARRAY_FOREACH ( _cellsFreed , pObj ) <nl> { <nl> - TableViewCell * pCell = ( TableViewCell * ) pObj ; <nl> + TableViewCell * pCell = static_cast < TableViewCell * > ( pObj ) ; <nl> CCLog ( " cells freed index % d , value = % d " , i , pCell - > getIdx ( ) ) ; <nl> i + + ; <nl> } <nl> mmm a / samples / Cpp / TestCpp / Classes / BugsTest / Bug - 422 . cpp <nl> ppp b / samples / Cpp / TestCpp / Classes / BugsTest / Bug - 422 . cpp <nl> void Bug422Layer : : check ( Node * t ) <nl> CCARRAY_FOREACH ( array , pChild ) <nl> { <nl> CC_BREAK_IF ( ! pChild ) ; <nl> - Node * pNode = ( Node * ) pChild ; <nl> + Node * pNode = static_cast < Node * > ( pChild ) ; <nl> CCLog ( " % p , rc : % d " , pNode , pNode - > retainCount ( ) ) ; <nl> check ( pNode ) ; <nl> } <nl> mmm a / samples / Cpp / TestCpp / Classes / ExtensionsTest / ComponentsTest / ProjectileController . cpp <nl> ppp b / samples / Cpp / TestCpp / Classes / ExtensionsTest / ComponentsTest / ProjectileController . cpp <nl> void ProjectileController : : update ( float delta ) <nl> CCARRAY_FOREACH ( targetsToDelete , jt ) <nl> { <nl> Sprite * target = dynamic_cast < Sprite * > ( jt ) ; <nl> - ( ( EnemyController * ) ( target - > getComponent ( " EnemyController " ) ) ) - > die ( ) ; <nl> + static_cast < EnemyController * > ( target - > getComponent ( " EnemyController " ) ) - > die ( ) ; <nl> } <nl> <nl> bool isDied = targetsToDelete - > count ( ) ; <nl> mmm a / samples / Cpp / TestCpp / Classes / ExtensionsTest / ControlExtensionTest / CCControlButtonTest / CCControlButtonTest . cpp <nl> ppp b / samples / Cpp / TestCpp / Classes / ExtensionsTest / ControlExtensionTest / CCControlButtonTest / CCControlButtonTest . cpp <nl> bool ControlButtonTest_HelloVariableSize : : init ( ) <nl> int i = 0 ; <nl> CCARRAY_FOREACH ( stringArray , pObj ) <nl> { <nl> - String * title = ( String * ) pObj ; <nl> + String * title = static_cast < String * > ( pObj ) ; <nl> / / Creates a button with this string as title <nl> ControlButton * button = standardButtonWithTitle ( title - > getCString ( ) ) ; <nl> if ( i = = 0 ) <nl> mmm a / samples / Cpp / TestCpp / Classes / LayerTest / LayerTest . cpp <nl> ppp b / samples / Cpp / TestCpp / Classes / LayerTest / LayerTest . cpp <nl> static void setEnableRecursiveCascading ( Node * node , bool enable ) <nl> Array * children = node - > getChildren ( ) ; <nl> CCARRAY_FOREACH ( children , obj ) <nl> { <nl> - Node * child = ( Node * ) obj ; <nl> + Node * child = static_cast < Node * > ( obj ) ; <nl> setEnableRecursiveCascading ( child , enable ) ; <nl> } <nl> } <nl> mmm a / samples / Cpp / TestCpp / Classes / PerformanceTest / PerformanceNodeChildrenTest . cpp <nl> ppp b / samples / Cpp / TestCpp / Classes / PerformanceTest / PerformanceNodeChildrenTest . cpp <nl> void IterateSpriteSheetFastEnum : : update ( float dt ) <nl> <nl> CCARRAY_FOREACH ( pChildren , pObject ) <nl> { <nl> - Sprite * pSprite = ( Sprite * ) pObject ; <nl> + Sprite * pSprite = static_cast < Sprite * > ( pObject ) ; <nl> pSprite - > setVisible ( false ) ; <nl> } <nl> <nl> void IterateSpriteSheetCArray : : update ( float dt ) <nl> <nl> CCARRAY_FOREACH ( pChildren , pObject ) <nl> { <nl> - Sprite * pSprite = ( Sprite * ) pObject ; <nl> + Sprite * pSprite = static_cast < Sprite * > ( pObject ) ; <nl> pSprite - > setVisible ( false ) ; <nl> } <nl> <nl> mmm a / samples / Cpp / TestCpp / Classes / TileMapTest / TileMapTest . cpp <nl> ppp b / samples / Cpp / TestCpp / Classes / TileMapTest / TileMapTest . cpp <nl> TMXOrthoTest : : TMXOrthoTest ( ) <nl> Object * pObject = NULL ; <nl> CCARRAY_FOREACH ( pChildrenArray , pObject ) <nl> { <nl> - child = ( SpriteBatchNode * ) pObject ; <nl> + child = static_cast < SpriteBatchNode * > ( pObject ) ; <nl> <nl> if ( ! child ) <nl> break ; <nl> TMXOrthoTest2 : : TMXOrthoTest2 ( ) <nl> Object * pObject = NULL ; <nl> CCARRAY_FOREACH ( pChildrenArray , pObject ) <nl> { <nl> - child = ( SpriteBatchNode * ) pObject ; <nl> + child = static_cast < SpriteBatchNode * > ( pObject ) ; <nl> <nl> if ( ! child ) <nl> break ; <nl> TMXOrthoTest3 : : TMXOrthoTest3 ( ) <nl> Object * pObject = NULL ; <nl> CCARRAY_FOREACH ( pChildrenArray , pObject ) <nl> { <nl> - child = ( SpriteBatchNode * ) pObject ; <nl> + child = static_cast < SpriteBatchNode * > ( pObject ) ; <nl> <nl> if ( ! child ) <nl> break ; <nl> TMXOrthoTest4 : : TMXOrthoTest4 ( ) <nl> Object * pObject = NULL ; <nl> CCARRAY_FOREACH ( pChildrenArray , pObject ) <nl> { <nl> - child = ( SpriteBatchNode * ) pObject ; <nl> + child = static_cast < SpriteBatchNode * > ( pObject ) ; <nl> <nl> if ( ! child ) <nl> break ; <nl> TMXUncompressedTest : : TMXUncompressedTest ( ) <nl> Object * pObject = NULL ; <nl> CCARRAY_FOREACH ( pChildrenArray , pObject ) <nl> { <nl> - layer = ( TMXLayer * ) pObject ; <nl> + layer = static_cast < TMXLayer * > ( pObject ) ; <nl> <nl> if ( ! layer ) <nl> break ; <nl> TMXOrthoObjectsTest : : TMXOrthoObjectsTest ( ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( objects , pObj ) <nl> { <nl> - dict = ( Dictionary * ) pObj ; / / dynamic_cast < StringToStringDictionary * > ( * it ) ; <nl> + dict = static_cast < Dictionary * > ( pObj ) ; <nl> <nl> if ( ! dict ) <nl> break ; <nl> void TMXOrthoObjectsTest : : draw ( ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( objects , pObj ) <nl> { <nl> - dict = ( Dictionary * ) pObj ; / / dynamic_cast < StringToStringDictionary * > ( * it ) ; <nl> + dict = static_cast < Dictionary * > ( pObj ) ; <nl> <nl> if ( ! dict ) <nl> break ; <nl> TMXIsoObjectsTest : : TMXIsoObjectsTest ( ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( objects , pObj ) <nl> { <nl> - dict = ( Dictionary * ) pObj ; <nl> + dict = static_cast < Dictionary * > ( pObj ) ; <nl> <nl> if ( ! dict ) <nl> break ; <nl> void TMXIsoObjectsTest : : draw ( ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( objects , pObj ) <nl> { <nl> - dict = ( Dictionary * ) pObj ; / / dynamic_cast < StringToStringDictionary * > ( * it ) ; <nl> + dict = static_cast < Dictionary * > ( pObj ) ; <nl> <nl> if ( ! dict ) <nl> break ; <nl> const char * key = " x " ; <nl> - int x = ( ( String * ) dict - > objectForKey ( key ) ) - > intValue ( ) ; / / dynamic_cast < NSNumber * > ( dict - > objectForKey ( " x " ) ) - > getNumber ( ) ; <nl> + int x = static_cast < String * > ( dict - > objectForKey ( key ) ) - > intValue ( ) ; <nl> key = " y " ; <nl> - int y = ( ( String * ) dict - > objectForKey ( key ) ) - > intValue ( ) ; / / dynamic_cast < NSNumber * > ( dict - > objectForKey ( " y " ) ) - > getNumber ( ) ; <nl> + int y = static_cast < String * > ( dict - > objectForKey ( key ) ) - > intValue ( ) ; <nl> key = " width " ; <nl> - int width = ( ( String * ) dict - > objectForKey ( key ) ) - > intValue ( ) ; / / dynamic_cast < NSNumber * > ( dict - > objectForKey ( " width " ) ) - > getNumber ( ) ; <nl> + int width = static_cast < String * > ( dict - > objectForKey ( key ) ) - > intValue ( ) ; <nl> key = " height " ; <nl> - int height = ( ( String * ) dict - > objectForKey ( key ) ) - > intValue ( ) ; / / dynamic_cast < NSNumber * > ( dict - > objectForKey ( " height " ) ) - > getNumber ( ) ; <nl> + int height = static_cast < String * > ( dict - > objectForKey ( key ) ) - > intValue ( ) ; <nl> <nl> glLineWidth ( 3 ) ; <nl> <nl> TMXOrthoFlipTest : : TMXOrthoFlipTest ( ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( map - > getChildren ( ) , pObj ) <nl> { <nl> - SpriteBatchNode * child = ( SpriteBatchNode * ) pObj ; <nl> + SpriteBatchNode * child = static_cast < SpriteBatchNode * > ( pObj ) ; <nl> child - > getTexture ( ) - > setAntiAliasTexParameters ( ) ; <nl> } <nl> <nl> TMXOrthoFlipRunTimeTest : : TMXOrthoFlipRunTimeTest ( ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( map - > getChildren ( ) , pObj ) <nl> { <nl> - SpriteBatchNode * child = ( SpriteBatchNode * ) pObj ; <nl> + SpriteBatchNode * child = static_cast < SpriteBatchNode * > ( pObj ) ; <nl> child - > getTexture ( ) - > setAntiAliasTexParameters ( ) ; <nl> } <nl> <nl> TMXOrthoFromXMLTest : : TMXOrthoFromXMLTest ( ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( map - > getChildren ( ) , pObj ) <nl> { <nl> - SpriteBatchNode * child = ( SpriteBatchNode * ) pObj ; <nl> + SpriteBatchNode * child = static_cast < SpriteBatchNode * > ( pObj ) ; <nl> child - > getTexture ( ) - > setAntiAliasTexParameters ( ) ; <nl> } <nl> <nl> TMXBug987 : : TMXBug987 ( ) <nl> Object * pObject = NULL ; <nl> CCARRAY_FOREACH ( childs , pObject ) <nl> { <nl> - pNode = ( TMXLayer * ) pObject ; <nl> + pNode = static_cast < TMXLayer * > ( pObject ) ; <nl> CC_BREAK_IF ( ! pNode ) ; <nl> pNode - > getTexture ( ) - > setAntiAliasTexParameters ( ) ; <nl> } <nl> void TMXGIDObjectsTest : : draw ( ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( array , pObj ) <nl> { <nl> - dict = ( Dictionary * ) pObj ; <nl> + dict = static_cast < Dictionary * > ( pObj ) ; <nl> if ( ! dict ) <nl> { <nl> break ; <nl> mmm a / samples / Cpp / TestCpp / Classes / TouchesTest / TouchesTest . cpp <nl> ppp b / samples / Cpp / TestCpp / Classes / TouchesTest / TouchesTest . cpp <nl> PongLayer : : PongLayer ( ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( _paddles , pObj ) <nl> { <nl> - paddle = ( Paddle * ) ( pObj ) ; <nl> + paddle = static_cast < Paddle * > ( pObj ) ; <nl> <nl> if ( ! paddle ) <nl> break ; <nl> void PongLayer : : doStep ( float delta ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( _paddles , pObj ) <nl> { <nl> - paddle = ( Paddle * ) ( pObj ) ; <nl> + paddle = static_cast < Paddle * > ( pObj ) ; <nl> <nl> if ( ! paddle ) <nl> break ; <nl> mmm a / scripting / javascript / bindings / cocos2d_specifics . cpp . REMOVED . git - id <nl> ppp b / scripting / javascript / bindings / cocos2d_specifics . cpp . REMOVED . git - id <nl> @ @ - 1 + 1 @ @ <nl> - ef40a853a2bf6590fbd9af75f8b318844caf4b64 <nl> \ No newline at end of file <nl> + 283cec6d4443dbb957d240238c35201064da699f <nl> \ No newline at end of file <nl> mmm a / scripting / lua / cocos2dx_support / Lua_web_socket . cpp <nl> ppp b / scripting / lua / cocos2dx_support / Lua_web_socket . cpp <nl> static int tolua_Cocos2d_WebSocket_createByProtocolArray00 ( lua_State * tolua_S ) <nl> Object * pObj = NULL ; <nl> CCARRAY_FOREACH ( protocolArray , pObj ) <nl> { <nl> - String * pStr = ( String * ) pObj ; <nl> + String * pStr = static_cast < String * > ( pObj ) ; <nl> if ( NULL ! = pStr ) { <nl> protocols . push_back ( pStr - > getCString ( ) ) ; <nl> } <nl>
Merge pull request from dumganhar / iss2387 - static_cast - love
cocos2d/cocos2d-x
9741d748b55b41d26268a2625bf839ac63b4b723
2013-07-09T06:46:17Z
mmm a / Source / SGDLib / SimpleDistGradAggregator . h <nl> ppp b / Source / SGDLib / SimpleDistGradAggregator . h <nl> class SimpleDistGradAggregator : public IDistGradAggregator < ElemType > <nl> offset + = gradients [ i ] - > GetNumElements ( ) ; <nl> } <nl> <nl> - / / Initiate transfer of the bufferred data to the CPU if needed <nl> - if ( ShouldCopyDataToCPU ( deviceId ) ) <nl> - { <nl> - size_t gpuDataTransfersIdx = 0 ; <nl> - Matrix < ElemType > * gpuCopyBuffer = m_aggregationBuffer . get ( ) ; <nl> - for ( size_t i : m_gradientIndexToAggregate ) <nl> - { <nl> - if ( i ! = - 1 ) <nl> - { <nl> - gpuCopyBuffer = gradients [ i ] ; <nl> - } <nl> - else <nl> - { <nl> - / / i = = - 1 , first element is for packed gradients , which should not be with AsyncAggregation <nl> - assert ( m_useAsyncAggregation = = false ) ; <nl> - } <nl> - m_gpuDataTransferers [ gpuDataTransfersIdx ] - > CopyGPUToCPUAsync ( gpuCopyBuffer - > Data ( ) , gpuCopyBuffer - > GetNumElements ( ) , m_intermediateCPUBuffers [ gpuDataTransfersIdx ] . get ( ) ) ; <nl> - gpuDataTransfersIdx + + ; <nl> - } <nl> - } <nl> - <nl> / / Initiate receive of the header on the main node <nl> std : : vector < MPI_Request > recvHeaderRequests ( NumProc ( ) - 1 ) ; <nl> if ( m_mpi - > IsMainNode ( ) ) <nl> class SimpleDistGradAggregator : public IDistGradAggregator < ElemType > <nl> if ( ! m_mpi - > IsMainNode ( ) ) <nl> m_mpi - > Isend ( headerCPU , headerCPU - > Size ( ) , MPI_CHAR , m_mpi - > MainNodeRank ( ) , numGradMatrices , & sendHeaderRequest ) | | MpiFail ( " MPI_Isend " ) ; <nl> <nl> - / / Perform async allreduce on the gradient data <nl> + <nl> + / / New aggregation pipeline for non - GDR , perform sync allreduce on the gradient data <nl> + / / For CPU , still use async allreduce <nl> std : : vector < MPI_Request > allReduceRequests ; <nl> - if ( ! m_nccl . IsSupported ( ) ) <nl> + size_t gpuToCpuIndex = 0 ; <nl> + size_t cpuToGpuIndex = 0 ; <nl> + size_t allReduceIndex = 0 ; <nl> + size_t numGradientIndex = m_gradientIndexToAggregate . size ( ) ; <nl> + if ( numGradientIndex > 0 ) <nl> { <nl> - size_t allReduceIndex = 0 ; <nl> - ElemType * reductionBuffer ; <nl> - for ( size_t i : m_gradientIndexToAggregate ) <nl> + / / non - GDR & & GPU <nl> + if ( ( m_mpi - > UseGpuGdr ( ) = = 0 ) & & ( deviceId ! = CPUDEVICE ) ) <nl> { <nl> - allReduceRequests . push_back ( MPI_Request ( ) ) ; <nl> - reductionBuffer = ( i = = - 1 ) ? m_aggregationBuffer - > Data ( ) : gradients [ i ] - > Data ( ) ; <nl> - if ( m_mpi - > UseGpuGdr ( ) = = 0 & & deviceId ! = CPUDEVICE ) <nl> + Matrix < ElemType > * gpuCopyBuffer = m_aggregationBuffer . get ( ) ; <nl> + <nl> + ElemType * reductionBuffer ; <nl> + / / currentGradientIndex will load the index from m_gradientIndexToAggregate <nl> + size_t currentGradientIndex = m_gradientIndexToAggregate [ 0 ] ; <nl> + size_t nextGradientIndex = - 2 ; <nl> + / / Get the first Gradient , and do async D - to - H copy <nl> + if ( currentGradientIndex ! = - 1 ) <nl> { <nl> - m_gpuDataTransferers [ allReduceIndex ] - > WaitForCopyGPUToCPUAsync ( ) ; <nl> - reductionBuffer = m_intermediateCPUBuffers [ allReduceIndex ] . get ( ) ; <nl> + gpuCopyBuffer = gradients [ currentGradientIndex ] ; <nl> } <nl> - <nl> - if ( m_mpi - > UseGpuGdr ( ) = = 0 ) <nl> + else <nl> { <nl> - m_mpi - > Iallreduce ( MPI_IN_PLACE , reductionBuffer , ( i = = - 1 ) ? m_aggregationBuffer - > GetNumElements ( ) : gradients [ i ] - > GetNumElements ( ) , <nl> - MPIWrapper : : GetDataType ( reductionBuffer ) , MPI_SUM , & allReduceRequests . back ( ) ) | | MpiFail ( " MPI_Iallreduce " ) ; <nl> - allReduceIndex + + ; <nl> + / / currentGradientIndex = = - 1 , first element is for packed gradients , which should not be with AsyncAggregation <nl> + assert ( m_useAsyncAggregation = = false ) ; <nl> } <nl> - / / TODO : Remove this when MPI_Iallreduce with CUDA - aware is supported <nl> - else <nl> + / / First sync_g_to_c_copy <nl> + / / TODO : we need a CopyGPUToCPUSync <nl> + cudaMemcpy ( m_intermediateCPUBuffers [ gpuToCpuIndex ] . get ( ) , gpuCopyBuffer - > Data ( ) , gpuCopyBuffer - > GetNumElements ( ) * sizeof ( ElemType ) , cudaMemcpyDeviceToHost ) ; <nl> + gpuToCpuIndex + + ; <nl> + <nl> + for ( size_t i = 1 ; i < = numGradientIndex ; i + + ) <nl> { <nl> - m_mpi - > AllReduce ( reductionBuffer , ( i = = - 1 ) ? m_aggregationBuffer - > GetNumElements ( ) : gradients [ i ] - > GetNumElements ( ) ) ; <nl> + / / Get next gradient <nl> + if ( i < numGradientIndex ) <nl> + { <nl> + nextGradientIndex = m_gradientIndexToAggregate [ i ] ; <nl> + if ( nextGradientIndex ! = - 1 ) <nl> + { <nl> + gpuCopyBuffer = gradients [ nextGradientIndex ] ; <nl> + } <nl> + else <nl> + { <nl> + / / currentGradientIndex = = - 1 , first element is for packed gradients , which should not be with AsyncAggregation <nl> + assert ( m_useAsyncAggregation = = false ) ; <nl> + } <nl> + / / Async D - to - H copy ( next gradient ) <nl> + m_gpuDataTransferers [ gpuToCpuIndex ] - > CopyGPUToCPUAsync ( gpuCopyBuffer - > Data ( ) , gpuCopyBuffer - > GetNumElements ( ) , m_intermediateCPUBuffers [ gpuToCpuIndex ] . get ( ) ) ; <nl> + } <nl> + / / Wait for previous copy <nl> + m_gpuDataTransferers [ allReduceIndex ] - > WaitForCopyGPUToCPUAsync ( ) ; <nl> + <nl> + / / Allreduce <nl> + reductionBuffer = m_intermediateCPUBuffers [ allReduceIndex ] . get ( ) ; <nl> + m_mpi - > AllReduce ( reductionBuffer , ( currentGradientIndex = = - 1 ) ? m_aggregationBuffer - > GetNumElements ( ) : gradients [ currentGradientIndex ] - > GetNumElements ( ) ) ; <nl> + <nl> + / / Create async H - to - G copy <nl> + cpuToGpuIndex = allReduceIndex ; <nl> + m_gpuDataTransferers [ cpuToGpuIndex ] - > CopyCPUToGPUAsync ( m_intermediateCPUBuffers [ cpuToGpuIndex ] . get ( ) , <nl> + ( currentGradientIndex = = - 1 ) ? m_aggregationBuffer - > GetNumElements ( ) : gradients [ currentGradientIndex ] - > GetNumElements ( ) , <nl> + ( currentGradientIndex = = - 1 ) ? m_aggregationBuffer - > Data ( ) : gradients [ currentGradientIndex ] - > Data ( ) ) ; <nl> + allReduceIndex = gpuToCpuIndex ; <nl> + gpuToCpuIndex + + ; <nl> + currentGradientIndex = nextGradientIndex ; <nl> } <nl> } <nl> - } <nl> - else <nl> - { <nl> - std : : vector < Matrix < ElemType > * > ncclReduceGradients ; <nl> - for ( size_t i : m_gradientIndexToAggregate ) <nl> + / / non - NCCL <nl> + else if ( ! m_nccl . IsSupported ( ) ) <nl> { <nl> - ncclReduceGradients . push_back ( ( i = = - 1 ) ? m_aggregationBuffer . get ( ) : gradients [ i ] ) ; <nl> + ElemType * reductionBuffer ; <nl> + for ( size_t i : m_gradientIndexToAggregate ) <nl> + { <nl> + allReduceRequests . push_back ( MPI_Request ( ) ) ; <nl> + reductionBuffer = ( i = = - 1 ) ? m_aggregationBuffer - > Data ( ) : gradients [ i ] - > Data ( ) ; <nl> + / / CPU <nl> + if ( m_mpi - > UseGpuGdr ( ) = = 0 ) <nl> + { <nl> + m_mpi - > Iallreduce ( MPI_IN_PLACE , reductionBuffer , ( i = = - 1 ) ? m_aggregationBuffer - > GetNumElements ( ) : gradients [ i ] - > GetNumElements ( ) , <nl> + MPIWrapper : : GetDataType ( reductionBuffer ) , MPI_SUM , & allReduceRequests . back ( ) ) | | MpiFail ( " MPI_Iallreduce " ) ; <nl> + allReduceIndex + + ; <nl> + } <nl> + / / GDR & & GPU <nl> + else if ( deviceId ! = CPUDEVICE ) <nl> + { <nl> + m_mpi - > AllReduce ( reductionBuffer , ( i = = - 1 ) ? m_aggregationBuffer - > GetNumElements ( ) : gradients [ i ] - > GetNumElements ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + else if ( m_nccl . IsSupported ( ) ) <nl> + { <nl> + std : : vector < Matrix < ElemType > * > ncclReduceGradients ; <nl> + for ( size_t i : m_gradientIndexToAggregate ) <nl> + { <nl> + ncclReduceGradients . push_back ( ( i = = - 1 ) ? m_aggregationBuffer . get ( ) : gradients [ i ] ) ; <nl> + } <nl> + m_nccl . AllReduce ( ncclReduceGradients ) ; <nl> } <nl> - m_nccl . AllReduce ( ncclReduceGradients ) ; <nl> } <nl> <nl> / / On the main node wait for the headers to arrive and aggregate <nl> class SimpleDistGradAggregator : public IDistGradAggregator < ElemType > <nl> { <nl> m_nccl . Sync ( ) ; <nl> } <nl> - / / TODO : Remove this when MPI_Iallreduce with CUDA - aware is supported <nl> + / / Non - GDR & & GPU <nl> + else if ( ( m_mpi - > UseGpuGdr ( ) = = 0 ) & & ( deviceId ! = CPUDEVICE ) ) <nl> + { <nl> + / / Wait for async CPU - to - GPU copy ( non - GDR ) <nl> + for ( size_t i = 0 ; i < allReduceIndex ; i + + ) <nl> + m_gpuDataTransferers [ i ] - > WaitForCopyCPUToGPUAsync ( ) ; <nl> + } <nl> + / / CPU <nl> else if ( m_mpi - > UseGpuGdr ( ) = = 0 ) <nl> { <nl> - / / Wait for the allreduce operations to finish and initiate transfer back to the GPU if needed <nl> - size_t gpuDataTransfersIdx = 0 ; / / Index of allReduceRequest for each un - packed gradient <nl> - for ( size_t i : m_gradientIndexToAggregate ) <nl> - { <nl> - m_mpi - > Wait ( & allReduceRequests [ gpuDataTransfersIdx ] , MPI_STATUSES_IGNORE ) | | MpiFail ( " MPI_Wait " ) ; <nl> - if ( deviceId ! = CPUDEVICE ) <nl> - { <nl> - m_gpuDataTransferers [ gpuDataTransfersIdx ] - > CopyCPUToGPUAsync ( m_intermediateCPUBuffers [ gpuDataTransfersIdx ] . get ( ) , <nl> - ( i = = - 1 ) ? m_aggregationBuffer - > GetNumElements ( ) : gradients [ i ] - > GetNumElements ( ) , <nl> - ( i = = - 1 ) ? m_aggregationBuffer - > Data ( ) : gradients [ i ] - > Data ( ) ) ; <nl> - } <nl> - gpuDataTransfersIdx + + ; <nl> - } <nl> - <nl> - / / Wait for copy data from CPU to GPU , if not running on CPU and not NCCL enabled <nl> - if ( deviceId ! = CPUDEVICE ) <nl> + / / Wait for the Iallreduce operations to finish <nl> + for ( size_t i = 0 ; i < allReduceIndex ; i + + ) <nl> { <nl> - for ( size_t i = 0 ; i < m_gradientIndexToAggregate . size ( ) ; i + + ) <nl> - m_gpuDataTransferers [ i ] - > WaitForCopyCPUToGPUAsync ( ) ; <nl> + m_mpi - > Wait ( & allReduceRequests [ i ] , MPI_STATUSES_IGNORE ) | | MpiFail ( " MPI_Wait " ) ; <nl> } <nl> } <nl> <nl>
optimize AggregateGradientsImpl ( async pipeline ) for non - GDR & GPU scenario
microsoft/CNTK
afc8deb9335bc328cc18aaa2dc540576072c30ee
2017-09-27T02:46:16Z
mmm a / src / AddRecordDialog . cpp <nl> ppp b / src / AddRecordDialog . cpp <nl> class NullLineEdit : public QLineEdit { <nl> bool m_isNull ; <nl> <nl> public : <nl> - NullLineEdit ( QWidget * parent = nullptr ) : QLineEdit ( parent ) , m_isNull ( true ) { } <nl> + explicit NullLineEdit ( QWidget * parent = nullptr ) : QLineEdit ( parent ) , m_isNull ( true ) { } <nl> <nl> bool isNull ( ) { return m_isNull ; } <nl> void setNull ( bool value ) { <nl> class NullLineEdit : public QLineEdit { <nl> / / Styled Item Delegate for non - editable columns ( all except Value ) <nl> class NoEditDelegate : public QStyledItemDelegate { <nl> public : <nl> - NoEditDelegate ( QObject * parent = nullptr ) : QStyledItemDelegate ( parent ) { } <nl> + explicit NoEditDelegate ( QObject * parent = nullptr ) : QStyledItemDelegate ( parent ) { } <nl> QWidget * createEditor ( QWidget * / * parent * / , const QStyleOptionViewItem & / * option * / , const QModelIndex & / * index * / ) const override { <nl> return nullptr ; <nl> } <nl> class NoEditDelegate : public QStyledItemDelegate { <nl> class EditDelegate : public QStyledItemDelegate { <nl> <nl> public : <nl> - EditDelegate ( QObject * parent = nullptr ) : QStyledItemDelegate ( parent ) { } <nl> + explicit EditDelegate ( QObject * parent = nullptr ) : QStyledItemDelegate ( parent ) { } <nl> QWidget * createEditor ( QWidget * parent , const QStyleOptionViewItem & / * option * / , const QModelIndex & / * index * / ) const override { <nl> return new NullLineEdit ( parent ) ; <nl> } <nl> void AddRecordDialog : : populateFields ( ) <nl> ui - > treeWidget - > setItemDelegateForColumn ( kValue , new EditDelegate ( this ) ) ; <nl> <nl> sqlb : : FieldVector fields ; <nl> - QVector < sqlb : : ConstraintPtr > fks ; <nl> + std : : vector < sqlb : : ConstraintPtr > fks ; <nl> QStringList pk ; <nl> <nl> / / Initialize fields , fks and pk differently depending on whether it ' s a table or a view . <nl> void AddRecordDialog : : populateFields ( ) <nl> sqlb : : TablePtr m_table = pdb . getObjectByName < sqlb : : Table > ( curTable ) ; <nl> fields = m_table - > fields ; <nl> for ( const sqlb : : Field & f : fields ) <nl> - fks . append ( m_table - > constraint ( { f . name ( ) } , sqlb : : Constraint : : ForeignKeyConstraintType ) ) ; <nl> + fks . push_back ( m_table - > constraint ( { f . name ( ) } , sqlb : : Constraint : : ForeignKeyConstraintType ) ) ; <nl> pk = m_table - > primaryKey ( ) ; <nl> } else { <nl> sqlb : : ViewPtr m_view = pdb . getObjectByName < sqlb : : View > ( curTable ) ; <nl> fields = m_view - > fields ; <nl> - fks . fill ( sqlb : : ConstraintPtr ( nullptr ) , fields . size ( ) ) ; <nl> + fks . resize ( fields . size ( ) , sqlb : : ConstraintPtr ( nullptr ) ) ; <nl> for ( const auto & col : pseudo_pk ) <nl> pk < < QString : : fromStdString ( col ) ; <nl> } <nl> mmm a / src / CipherSettings . cpp <nl> ppp b / src / CipherSettings . cpp <nl> <nl> # include " CipherSettings . h " <nl> <nl> + CipherSettings : : CipherSettings ( ) <nl> + : keyFormat ( Passphrase ) , <nl> + pageSize ( 0 ) , <nl> + kdfIterations ( 0 ) <nl> + { <nl> + } <nl> + <nl> CipherSettings : : KeyFormats CipherSettings : : getKeyFormat ( ) const <nl> { <nl> return keyFormat ; <nl> mmm a / src / CipherSettings . h <nl> ppp b / src / CipherSettings . h <nl> <nl> class CipherSettings <nl> { <nl> public : <nl> + CipherSettings ( ) ; <nl> + <nl> enum KeyFormats <nl> { <nl> Passphrase , <nl> mmm a / src / CondFormat . cpp <nl> ppp b / src / CondFormat . cpp <nl> <nl> <nl> CondFormat : : CondFormat ( const QString & filter , const QColor & foreground , const QColor & background , const QString & encoding ) <nl> : m_filter ( filter ) , <nl> - m_fgColor ( foreground ) , <nl> - m_bgColor ( background ) <nl> + m_bgColor ( background ) , <nl> + m_fgColor ( foreground ) <nl> { <nl> if ( ! filter . isEmpty ( ) ) <nl> m_sqlCondition = filterToSqlCondition ( filter , encoding ) ; <nl> mmm a / src / CondFormat . h <nl> ppp b / src / CondFormat . h <nl> <nl> class CondFormat <nl> { <nl> public : <nl> - CondFormat ( ) { } ; <nl> + CondFormat ( ) { } <nl> explicit CondFormat ( const QString & filter , const QColor & foreground , const QColor & background , const QString & encoding = QString ( ) ) ; <nl> <nl> static QString filterToSqlCondition ( const QString & value , const QString & encoding = QString ( ) ) ; <nl> class CondFormat <nl> QColor m_fgColor ; <nl> <nl> public : <nl> - QString sqlCondition ( ) const { return m_sqlCondition ; } ; <nl> - QString filter ( ) const { return m_filter ; } ; <nl> - QColor backgroundColor ( ) const { return m_bgColor ; } ; <nl> - QColor foregroundColor ( ) const { return m_fgColor ; } ; <nl> + QString sqlCondition ( ) const { return m_sqlCondition ; } <nl> + QString filter ( ) const { return m_filter ; } <nl> + QColor backgroundColor ( ) const { return m_bgColor ; } <nl> + QColor foregroundColor ( ) const { return m_fgColor ; } <nl> <nl> } ; <nl> <nl> mmm a / src / Data . cpp <nl> ppp b / src / Data . cpp <nl> QByteArray removeBom ( QByteArray & data ) <nl> } <nl> } <nl> <nl> - QStringList toStringList ( const QList < QByteArray > list ) { <nl> + QStringList toStringList ( const QList < QByteArray > & list ) { <nl> QStringList strings ; <nl> for ( const QByteArray & item : list ) { <nl> strings . append ( QString : : fromUtf8 ( item ) ) ; <nl> mmm a / src / Data . h <nl> ppp b / src / Data . h <nl> bool startsWithBom ( const QByteArray & data ) ; <nl> / / with a BOM an empty byte array is returned and the original data is not modified . <nl> QByteArray removeBom ( QByteArray & data ) ; <nl> <nl> - QStringList toStringList ( const QList < QByteArray > list ) ; <nl> + QStringList toStringList ( const QList < QByteArray > & list ) ; <nl> <nl> QByteArray encodeString ( const QByteArray & str , const QString & encoding ) ; <nl> <nl> mmm a / src / EditIndexDialog . cpp <nl> ppp b / src / EditIndexDialog . cpp <nl> void EditIndexDialog : : updateColumnLists ( ) <nl> if ( ! table ) <nl> return ; <nl> sqlb : : FieldInfoList tableFields = table - > fieldInformation ( ) ; <nl> - ui - > tableTableColumns - > setRowCount ( tableFields . size ( ) ) ; <nl> + ui - > tableTableColumns - > setRowCount ( static_cast < int > ( tableFields . size ( ) ) ) ; <nl> int tableRows = 0 ; <nl> for ( size_t i = 0 ; i < tableFields . size ( ) ; + + i ) <nl> { <nl> void EditIndexDialog : : updateColumnLists ( ) <nl> / / table ) and to preserve the order of the index columns <nl> auto indexFields = index . fields ; <nl> ui - > tableIndexColumns - > blockSignals ( true ) ; <nl> - ui - > tableIndexColumns - > setRowCount ( indexFields . size ( ) ) ; <nl> + ui - > tableIndexColumns - > setRowCount ( static_cast < int > ( indexFields . size ( ) ) ) ; <nl> for ( size_t i = 0 ; i < indexFields . size ( ) ; + + i ) <nl> { <nl> / / Put the name of the field in the first column <nl> mmm a / src / EditTableDialog . cpp <nl> ppp b / src / EditTableDialog . cpp <nl> void EditTableDialog : : populateConstraints ( ) <nl> const auto & constraints = m_table . allConstraints ( ) ; <nl> <nl> ui - > tableConstraints - > blockSignals ( true ) ; <nl> - ui - > tableConstraints - > setRowCount ( constraints . size ( ) ) ; <nl> + ui - > tableConstraints - > setRowCount ( static_cast < int > ( constraints . size ( ) ) ) ; <nl> int row = 0 ; <nl> for ( const auto & pair : constraints ) <nl> { <nl> void EditTableDialog : : addField ( ) <nl> / / Find an unused name for the field by starting with ' Fieldx ' where x is the number of fields + 1 . <nl> / / If this name happens to exist already , increase x by one until we find an unused name . <nl> { <nl> - unsigned int field_number = ui - > treeWidget - > topLevelItemCount ( ) ; <nl> + int field_number = ui - > treeWidget - > topLevelItemCount ( ) ; <nl> QString field_name ; <nl> do <nl> { <nl> mmm a / src / ExportDataDialog . cpp <nl> ppp b / src / ExportDataDialog . cpp <nl> bool ExportDataDialog : : exportQuery ( const QString & sQuery , const QString & sFilena <nl> return exportQueryCsv ( sQuery , sFilename ) ; <nl> case ExportFormatJson : <nl> return exportQueryJson ( sQuery , sFilename ) ; <nl> - default : <nl> - return false ; <nl> } <nl> + <nl> + return false ; <nl> } <nl> <nl> bool ExportDataDialog : : exportQueryCsv ( const QString & sQuery , const QString & sFilename ) <nl> mmm a / src / FileDialog . h <nl> ppp b / src / FileDialog . h <nl> class FileDialog : public QFileDialog <nl> public : <nl> static QString getOpenFileName ( const FileDialogTypes dialogType , QWidget * parent = nullptr , const QString & caption = QString ( ) , <nl> const QString & filter = QString ( ) , QString * selectedFilter = nullptr , <nl> - Options options = 0 ) ; <nl> + Options options = Options ( ) ) ; <nl> static QStringList getOpenFileNames ( const FileDialogTypes dialogType , QWidget * parent = nullptr , const QString & caption = QString ( ) , <nl> const QString & filter = QString ( ) , QString * selectedFilter = nullptr , <nl> - Options options = 0 ) ; <nl> + Options options = Options ( ) ) ; <nl> static QString getSaveFileName ( const FileDialogTypes dialogType , QWidget * parent = nullptr , const QString & caption = QString ( ) , <nl> const QString & filter = QString ( ) , const QString & defaultFileName = QString ( ) , QString * selectedFilter = nullptr , <nl> - Options options = 0 ) ; <nl> + Options options = Options ( ) ) ; <nl> static QString getExistingDirectory ( const FileDialogTypes dialogType , QWidget * parent = nullptr , const QString & caption = QString ( ) , <nl> - Options options = 0 ) ; <nl> + Options options = Options ( ) ) ; <nl> <nl> static QString getSqlDatabaseFileFilter ( ) ; <nl> <nl> mmm a / src / ForeignKeyEditorDelegate . cpp <nl> ppp b / src / ForeignKeyEditorDelegate . cpp <nl> void ForeignKeyEditorDelegate : : setEditorData ( QWidget * editor , const QModelIndex & <nl> { <nl> ForeignKeyEditor * fkEditor = static_cast < ForeignKeyEditor * > ( editor ) ; <nl> <nl> - int column = index . row ( ) ; / / weird ? I know right <nl> + size_t column = static_cast < size_t > ( index . row ( ) ) ; / / weird ? I know right <nl> const sqlb : : Field & field = m_table . fields . at ( column ) ; <nl> auto fk = std : : dynamic_pointer_cast < sqlb : : ForeignKeyClause > ( m_table . constraint ( { field . name ( ) } , sqlb : : Constraint : : ForeignKeyConstraintType ) ) ; <nl> if ( fk ) { <nl> void ForeignKeyEditorDelegate : : setModelData ( QWidget * editor , QAbstractItemModel * <nl> ForeignKeyEditor * fkEditor = static_cast < ForeignKeyEditor * > ( editor ) ; <nl> QString sql = fkEditor - > getSql ( ) ; <nl> <nl> - int column = index . row ( ) ; <nl> + size_t column = static_cast < size_t > ( index . row ( ) ) ; <nl> const sqlb : : Field & field = m_table . fields . at ( column ) ; <nl> if ( sql . isEmpty ( ) ) { <nl> / / Remove the foreign key <nl> mmm a / src / ImportCsvDialog . cpp <nl> ppp b / src / ImportCsvDialog . cpp <nl> ImportCsvDialog : : ImportCsvDialog ( const QStringList & filenames , DBBrowserDB * db , <nl> <nl> / / Create a list of all available encodings and create an auto completion list from them <nl> QStringList encodingList ; <nl> - for ( const QString & enc : QTextCodec : : availableCodecs ( ) ) <nl> + for ( const QByteArray & enc : QTextCodec : : availableCodecs ( ) ) <nl> encodingList . push_back ( enc ) ; <nl> encodingCompleter = new QCompleter ( encodingList , this ) ; <nl> encodingCompleter - > setCaseSensitivity ( Qt : : CaseInsensitive ) ; <nl> class CSVImportProgress : public CSVProgress <nl> m_pProgressDlg - > setWindowModality ( Qt : : ApplicationModal ) ; <nl> } <nl> <nl> - ~ CSVImportProgress ( ) <nl> + ~ CSVImportProgress ( ) override <nl> { <nl> delete m_pProgressDlg ; <nl> } <nl> <nl> - void start ( ) <nl> + void start ( ) override <nl> { <nl> m_pProgressDlg - > show ( ) ; <nl> } <nl> <nl> - bool update ( unsigned long long pos ) <nl> + bool update ( unsigned long long pos ) override <nl> { <nl> m_pProgressDlg - > setValue ( static_cast < int > ( ( static_cast < float > ( pos ) / static_cast < float > ( totalFileSize ) ) * 10000 . 0f ) ) ; <nl> qApp - > processEvents ( ) ; <nl> class CSVImportProgress : public CSVProgress <nl> return ! m_pProgressDlg - > wasCanceled ( ) ; <nl> } <nl> <nl> - void end ( ) <nl> + void end ( ) override <nl> { <nl> m_pProgressDlg - > hide ( ) ; <nl> } <nl> void ImportCsvDialog : : updatePreview ( ) <nl> <nl> / / Reset preview widget <nl> ui - > tablePreview - > clear ( ) ; <nl> - ui - > tablePreview - > setColumnCount ( fieldList . size ( ) ) ; <nl> + ui - > tablePreview - > setColumnCount ( static_cast < int > ( fieldList . size ( ) ) ) ; <nl> <nl> / / Exit if there are no lines to preview at all <nl> if ( fieldList . size ( ) = = 0 ) <nl> mmm a / src / MainWindow . cpp <nl> ppp b / src / MainWindow . cpp <nl> MainWindow : : MainWindow ( QWidget * parent ) <nl> remoteDock ( new RemoteDock ( this ) ) , <nl> findReplaceDialog ( new FindReplaceDialog ( this ) ) , <nl> gotoValidator ( new QIntValidator ( 0 , 0 , this ) ) , <nl> - isProjectModified ( false ) , <nl> - execute_sql_worker ( nullptr ) <nl> + execute_sql_worker ( nullptr ) , <nl> + isProjectModified ( false ) <nl> { <nl> ui - > setupUi ( this ) ; <nl> init ( ) ; <nl> void MainWindow : : executeQuery ( ) <nl> / / Log the query and the result message . <nl> / / The query takes the last placeholder as it may itself contain the sequence ' % ' + number . <nl> QString query = editor - > text ( from_position , to_position ) ; <nl> - QString log_message = QString ( " - - " + tr ( " At line % 1 : " ) + " \ n % 3 \ n - - " + tr ( " Result : % 2 " ) ) . arg ( execute_from_line + 1 ) . arg ( status_message ) . arg ( query . trimmed ( ) ) ; <nl> + QString log_message = " - - " + tr ( " At line % 1 : " ) . arg ( execute_from_line + 1 ) + " \ n " + query . trimmed ( ) + " \ n - - " + tr ( " Result : % 1 " ) . arg ( status_message ) ; <nl> db . logSQL ( log_message , kLogMsg_User ) ; <nl> <nl> - log_message = QString ( tr ( " Result : % 2 " ) + " \ n " + tr ( " At line % 1 : " ) + " \ n % 3 " ) . arg ( execute_from_line + 1 ) . arg ( status_message ) . arg ( query . trimmed ( ) ) ; <nl> + log_message = tr ( " Result : % 2 " ) . arg ( status_message ) + " \ n " + tr ( " At line % 1 : " ) . arg ( execute_from_line + 1 ) + " \ n " + query . trimmed ( ) ; <nl> / / Update the execution area <nl> sqlWidget - > finishExecution ( log_message , ok ) ; <nl> } ; <nl> bool MainWindow : : loadProject ( QString filename , bool readOnly ) <nl> if ( xml . name ( ) = = " sql " ) <nl> { <nl> / / SQL editor tab <nl> - unsigned int index = openSqlTab ( ) ; <nl> + int index = openSqlTab ( ) ; <nl> ui - > tabSqlAreas - > setTabText ( index , xml . attributes ( ) . value ( " name " ) . toString ( ) ) ; <nl> SqlTextEdit * sqlEditor = qobject_cast < SqlExecutionArea * > ( ui - > tabSqlAreas - > widget ( index ) ) - > getEditor ( ) ; <nl> sqlEditor - > setText ( xml . readElementText ( ) ) ; <nl> void MainWindow : : editDataColumnDisplayFormat ( ) <nl> int field_number = sender ( ) - > property ( " clicked_column " ) . toInt ( ) ; <nl> QString field_name ; <nl> if ( db . getObjectByName ( current_table ) - > type ( ) = = sqlb : : Object : : Table ) <nl> - field_name = db . getObjectByName < sqlb : : Table > ( current_table ) - > fields . at ( field_number - 1 ) . name ( ) ; <nl> + field_name = db . getObjectByName < sqlb : : Table > ( current_table ) - > fields . at ( field_number - 1 ) . name ( ) ; <nl> else <nl> - field_name = db . getObjectByName < sqlb : : View > ( current_table ) - > fieldNames ( ) . at ( field_number - 1 ) ; <nl> + field_name = db . getObjectByName < sqlb : : View > ( current_table ) - > fieldNames ( ) . at ( field_number - 1 ) ; <nl> / / Get the current display format of the field <nl> QString current_displayformat = browseTableSettings [ current_table ] . displayFormats [ field_number ] ; <nl> <nl> void MainWindow : : runSqlNewTab ( const QString & query , const QString & title ) <nl> if ( ui - > mainTab - > indexOf ( ui - > query ) = = - 1 ) <nl> ui - > mainTab - > addTab ( ui - > query , ui - > query - > accessibleName ( ) ) ; <nl> ui - > mainTab - > setCurrentWidget ( ui - > query ) ; <nl> - unsigned int index = openSqlTab ( ) ; <nl> + int index = openSqlTab ( ) ; <nl> ui - > tabSqlAreas - > setTabText ( index , title ) ; <nl> qobject_cast < SqlExecutionArea * > ( ui - > tabSqlAreas - > widget ( index ) ) - > getEditor ( ) - > setText ( query ) ; <nl> executeQuery ( ) ; <nl> mmm a / src / PlotDock . cpp <nl> ppp b / src / PlotDock . cpp <nl> <nl> <nl> # include < QPrinter > <nl> # include < QPrintPreviewDialog > <nl> + # include < QRandomGenerator > <nl> <nl> PlotDock : : PlotDock ( QWidget * parent ) <nl> : QDialog ( parent ) , <nl> void PlotDock : : on_treePlotColumns_itemDoubleClicked ( QTreeWidgetItem * item , int c <nl> / / On double click open the colordialog <nl> QColorDialog colordialog ( this ) ; <nl> QColor curbkcolor = item - > backgroundColor ( column ) ; <nl> - QColor precolor = ! curbkcolor . isValid ( ) ? static_cast < Qt : : GlobalColor > ( qrand ( ) % 13 + 5 ) : curbkcolor ; <nl> + QColor precolor = ! curbkcolor . isValid ( ) ? static_cast < Qt : : GlobalColor > ( QRandomGenerator : : global ( ) - > bounded ( 5 , 13 ) ) : curbkcolor ; <nl> QColor color = colordialog . getColor ( precolor , this , tr ( " Choose an axis color " ) ) ; <nl> if ( color . isValid ( ) ) <nl> { <nl> mmm a / src / RowLoader . cpp <nl> ppp b / src / RowLoader . cpp <nl> RowLoader : : RowLoader ( <nl> { <nl> } <nl> <nl> - void RowLoader : : setQuery ( QString new_query , QString newCountQuery ) <nl> + void RowLoader : : setQuery ( const QString & new_query , const QString & newCountQuery ) <nl> { <nl> std : : lock_guard < std : : mutex > lk ( m ) ; <nl> query = new_query ; <nl> mmm a / src / RowLoader . h <nl> ppp b / src / RowLoader . h <nl> class RowLoader : public QThread <nl> Cache & cache_data <nl> ) ; <nl> <nl> - void setQuery ( QString new_query , QString newCountQuery = QString ( ) ) ; <nl> + void setQuery ( const QString & new_query , const QString & newCountQuery = QString ( ) ) ; <nl> <nl> void triggerRowCountDetermination ( int token ) ; <nl> <nl> mmm a / src / RunSql . cpp <nl> ppp b / src / RunSql . cpp <nl> bool RunSql : : executeNextStatement ( ) <nl> acquireDbAccess ( ) ; <nl> sqlite3_stmt * vm ; <nl> int sql3status = sqlite3_prepare_v2 ( pDb . get ( ) , tail , tail_length , & vm , & tail ) ; <nl> - QString queryPart = QString : : fromUtf8 ( qbegin , tail - qbegin ) ; <nl> + QString queryPart = QString : : fromUtf8 ( qbegin , static_cast < int > ( tail - qbegin ) ) ; <nl> int tail_length_before = tail_length ; <nl> tail_length - = ( tail - qbegin ) ; <nl> int end_of_current_statement_position = execute_current_position + tail_length_before - tail_length ; <nl> mmm a / src / Settings . cpp <nl> ppp b / src / Settings . cpp <nl> QColor Settings : : getDefaultColorValue ( const QString & group , const QString & name , <nl> return QColor ( Qt : : lightGray ) . name ( ) ; <nl> if ( name = = " bin_bg_colour " ) <nl> return QPalette ( ) . color ( QPalette : : Active , QPalette : : Base ) . name ( ) ; <nl> + break ; <nl> case DarkStyle : <nl> if ( name = = " null_fg_colour " ) <nl> return QColor ( " # 787878 " ) ; <nl> QColor Settings : : getDefaultColorValue ( const QString & group , const QString & name , <nl> return QColor ( " # 787878 " ) ; <nl> if ( name = = " bin_bg_colour " ) <nl> return QColor ( " # 19232D " ) ; <nl> + break ; <nl> } <nl> } <nl> <nl> mmm a / src / sql / Query . cpp <nl> ppp b / src / sql / Query . cpp <nl> <nl> namespace sqlb <nl> { <nl> <nl> - Query : : Query ( ) <nl> - { <nl> - } <nl> - <nl> void Query : : clear ( ) <nl> { <nl> m_table . clear ( ) ; <nl> std : : string Query : : buildQuery ( bool withRowid ) const <nl> std : : string order_by ; <nl> for ( const auto & sorted_column : m_sort ) <nl> { <nl> - if ( sorted_column . column < static_cast < int > ( m_column_names . size ( ) ) ) <nl> + if ( sorted_column . column < m_column_names . size ( ) ) <nl> order_by + = sqlb : : escapeIdentifier ( m_column_names . at ( sorted_column . column ) ) + " " <nl> + ( sorted_column . direction = = sqlb : : Ascending ? " ASC " : " DESC " ) + " , " ; <nl> } <nl> mmm a / src / sql / Query . h <nl> ppp b / src / sql / Query . h <nl> enum SortDirection <nl> <nl> struct SortedColumn <nl> { <nl> - SortedColumn ( int column_ , SortDirection direction_ ) : <nl> + SortedColumn ( size_t column_ , SortDirection direction_ ) : <nl> column ( column_ ) , <nl> direction ( direction_ ) <nl> { } <nl> struct SortedColumn <nl> return column = = rhs . column & & direction = = rhs . direction ; <nl> } <nl> <nl> - int column ; <nl> + size_t column ; <nl> SortDirection direction ; <nl> } ; <nl> <nl> struct SelectedColumn <nl> class Query <nl> { <nl> public : <nl> - Query ( ) ; <nl> - Query ( const sqlb : : ObjectIdentifier & table ) : <nl> + Query ( ) { } <nl> + explicit Query ( const sqlb : : ObjectIdentifier & table ) : <nl> m_table ( table ) <nl> { } <nl> <nl> class Query <nl> const std : : vector < SelectedColumn > & selectedColumns ( ) const { return m_selected_columns ; } <nl> std : : vector < SelectedColumn > & selectedColumns ( ) { return m_selected_columns ; } <nl> <nl> - const std : : unordered_map < int , std : : string > & where ( ) const { return m_where ; } <nl> - std : : unordered_map < int , std : : string > & where ( ) { return m_where ; } <nl> + const std : : unordered_map < size_t , std : : string > & where ( ) const { return m_where ; } <nl> + std : : unordered_map < size_t , std : : string > & where ( ) { return m_where ; } <nl> <nl> const std : : vector < SortedColumn > & orderBy ( ) const { return m_sort ; } <nl> std : : vector < SortedColumn > & orderBy ( ) { return m_sort ; } <nl> class Query <nl> sqlb : : ObjectIdentifier m_table ; <nl> std : : vector < std : : string > m_rowid_columns ; <nl> std : : vector < SelectedColumn > m_selected_columns ; <nl> - std : : unordered_map < int , std : : string > m_where ; <nl> + std : : unordered_map < size_t , std : : string > m_where ; <nl> std : : vector < SortedColumn > m_sort ; <nl> <nl> std : : vector < SelectedColumn > : : iterator findSelectedColumnByName ( const std : : string & name ) ; <nl> mmm a / src / sql / sqlitetypes . cpp <nl> ppp b / src / sql / sqlitetypes . cpp <nl> std : : string escapeIdentifier ( std : : string id ) <nl> return escapeIdentifier ( QString : : fromStdString ( id ) ) . toStdString ( ) ; <nl> } <nl> <nl> - QStringList escapeIdentifier ( const QStringList & ids ) <nl> + QStringList escapeIdentifier ( QStringList ids ) <nl> { <nl> - QStringList ret ; <nl> - for ( const QString & id : ids ) <nl> - ret . push_back ( escapeIdentifier ( id ) ) ; <nl> - return ret ; <nl> + std : : transform ( ids . begin ( ) , ids . end ( ) , ids . begin ( ) , [ ] ( const QString & id ) { <nl> + return escapeIdentifier ( id ) ; <nl> + } ) ; <nl> + return ids ; <nl> } <nl> <nl> / * * <nl> class SetLocaleToC <nl> { <nl> public : <nl> SetLocaleToC ( ) <nl> + : oldLocale ( std : : setlocale ( LC_CTYPE , nullptr ) ) / / Query current locale and save it <nl> { <nl> - / / Query current locale and save it <nl> - oldLocale = std : : setlocale ( LC_CTYPE , nullptr ) ; <nl> - <nl> / / Set locale for standard library functions <nl> std : : setlocale ( LC_CTYPE , " C . UTF - 8 " ) ; <nl> } <nl> Table & Table : : operator = ( const Table & rhs ) <nl> <nl> / / Make copies of the fields and the constraints . This is necessary in order to avoid any unwanted changes to the application ' s main database <nl> / / schema representation just by modifying a reference to the fields or constraints and thinking it operates on a copy . <nl> - for ( const Field & f : rhs . fields ) <nl> - fields . push_back ( f ) ; <nl> + std : : copy ( rhs . fields . begin ( ) , rhs . fields . end ( ) , std : : back_inserter ( fields ) ) ; <nl> m_constraints = rhs . m_constraints ; <nl> <nl> return * this ; <nl> FieldInfoList Table : : fieldInformation ( ) const <nl> <nl> bool Table : : hasAutoIncrement ( ) const <nl> { <nl> - for ( const Field & f : fields ) { <nl> - if ( f . autoIncrement ( ) ) <nl> - return true ; <nl> - } <nl> - return false ; <nl> + return std : : any_of ( fields . begin ( ) , fields . end ( ) , [ ] ( const Field & f ) { return f . autoIncrement ( ) ; } ) ; <nl> } <nl> <nl> TablePtr Table : : parseSQL ( const QString & sSQL ) <nl> QString Table : : sql ( const QString & schema , bool ifNotExists ) const <nl> return sql + " ; " ; <nl> } <nl> <nl> - void Table : : addConstraint ( QStringList fields , ConstraintPtr constraint ) <nl> + void Table : : addConstraint ( const QStringList & fields , ConstraintPtr constraint ) <nl> { <nl> m_constraints . insert ( { fields , constraint } ) ; <nl> } <nl> <nl> - void Table : : setConstraint ( QStringList fields , ConstraintPtr constraint ) <nl> + void Table : : setConstraint ( const QStringList & fields , ConstraintPtr constraint ) <nl> { <nl> / / Delete any old constraints of this type for these fields <nl> removeConstraints ( fields , constraint - > type ( ) ) ; <nl> void Table : : setConstraint ( QStringList fields , ConstraintPtr constraint ) <nl> addConstraint ( fields , constraint ) ; <nl> } <nl> <nl> - void Table : : removeConstraints ( QStringList fields , Constraint : : ConstraintTypes type ) <nl> + void Table : : removeConstraints ( const QStringList & fields , Constraint : : ConstraintTypes type ) <nl> { <nl> for ( auto it = m_constraints . begin ( ) ; it ! = m_constraints . end ( ) ; ) <nl> { <nl> void Table : : removeConstraints ( QStringList fields , Constraint : : ConstraintTypes ty <nl> } <nl> } <nl> <nl> - ConstraintPtr Table : : constraint ( QStringList fields , Constraint : : ConstraintTypes type ) const <nl> + ConstraintPtr Table : : constraint ( const QStringList & fields , Constraint : : ConstraintTypes type ) const <nl> { <nl> auto list = constraints ( fields , type ) ; <nl> if ( list . size ( ) ) <nl> ConstraintPtr Table : : constraint ( QStringList fields , Constraint : : ConstraintTypes <nl> return ConstraintPtr ( nullptr ) ; <nl> } <nl> <nl> - std : : vector < ConstraintPtr > Table : : constraints ( QStringList fields , Constraint : : ConstraintTypes type ) const <nl> + std : : vector < ConstraintPtr > Table : : constraints ( const QStringList & fields , Constraint : : ConstraintTypes type ) const <nl> { <nl> ConstraintMap : : const_iterator begin , end ; <nl> if ( fields . empty ( ) ) <nl> Index & Index : : operator = ( const Index & rhs ) <nl> m_whereExpr = rhs . m_whereExpr ; <nl> <nl> / / Make copies of the column <nl> - for ( const IndexedColumn & c : rhs . fields ) <nl> - fields . push_back ( c ) ; <nl> + std : : copy ( rhs . fields . begin ( ) , rhs . fields . end ( ) , std : : back_inserter ( fields ) ) ; <nl> <nl> return * this ; <nl> } <nl> mmm a / src / sql / sqlitetypes . h <nl> ppp b / src / sql / sqlitetypes . h <nl> void setIdentifierQuoting ( escapeQuoting toQuoting ) ; <nl> <nl> QString escapeIdentifier ( QString id ) ; <nl> std : : string escapeIdentifier ( std : : string id ) ; <nl> - QStringList escapeIdentifier ( const QStringList & ids ) ; <nl> + QStringList escapeIdentifier ( QStringList ids ) ; <nl> <nl> class ObjectIdentifier <nl> { <nl> class Table : public Object <nl> <nl> FieldInfoList fieldInformation ( ) const override ; <nl> <nl> - void addConstraint ( QStringList fields , ConstraintPtr constraint ) ; <nl> - void setConstraint ( QStringList fields , ConstraintPtr constraint ) ; <nl> - void removeConstraints ( QStringList fields = QStringList ( ) , Constraint : : ConstraintTypes type = Constraint : : NoType ) ; / / ! Only removes the first constraint , if any <nl> - ConstraintPtr constraint ( QStringList fields = QStringList ( ) , Constraint : : ConstraintTypes type = Constraint : : NoType ) const ; / / ! Only returns the first constraint , if any <nl> - std : : vector < ConstraintPtr > constraints ( QStringList fields = QStringList ( ) , Constraint : : ConstraintTypes type = Constraint : : NoType ) const ; <nl> + void addConstraint ( const QStringList & fields , ConstraintPtr constraint ) ; <nl> + void setConstraint ( const QStringList & fields , ConstraintPtr constraint ) ; <nl> + void removeConstraints ( const QStringList & fields = QStringList ( ) , Constraint : : ConstraintTypes type = Constraint : : NoType ) ; / / ! Only removes the first constraint , if any <nl> + ConstraintPtr constraint ( const QStringList & fields = QStringList ( ) , Constraint : : ConstraintTypes type = Constraint : : NoType ) const ; / / ! Only returns the first constraint , if any <nl> + std : : vector < ConstraintPtr > constraints ( const QStringList & fields = QStringList ( ) , Constraint : : ConstraintTypes type = Constraint : : NoType ) const ; <nl> ConstraintMap allConstraints ( ) const { return m_constraints ; } <nl> void setConstraints ( const ConstraintMap & constraints ) ; <nl> QStringList & primaryKeyRef ( ) ; <nl> mmm a / src / sqlitedb . cpp <nl> ppp b / src / sqlitedb . cpp <nl> std : : function < Ret ( Params . . . ) > Callback < Ret ( Params . . . ) > : : func ; <nl> int collCompare ( void * / * pArg * / , int sizeA , const void * sA , int sizeB , const void * sB ) <nl> { <nl> if ( sizeA = = sizeB ) <nl> - return memcmp ( sA , sB , sizeA ) ; <nl> + return memcmp ( sA , sB , static_cast < size_t > ( sizeA ) ) ; <nl> return sizeA - sizeB ; <nl> } <nl> <nl> bool DBBrowserDB : : close ( ) <nl> return true ; <nl> } <nl> <nl> - DBBrowserDB : : db_pointer_type DBBrowserDB : : get ( QString user , bool force_wait ) <nl> + DBBrowserDB : : db_pointer_type DBBrowserDB : : get ( const QString & user , bool force_wait ) <nl> { <nl> if ( ! _db ) <nl> return nullptr ; <nl> bool DBBrowserDB : : dump ( const QString & filePath , <nl> QApplication : : setOverrideCursor ( Qt : : WaitCursor ) ; <nl> <nl> / / Count the total number of all records in all tables for the progress dialog <nl> - size_t numRecordsTotal = 0 , numRecordsCurrent = 0 ; <nl> + size_t numRecordsTotal = 0 ; <nl> objectMap objMap = schemata [ " main " ] ; / / We only always export the main database , not the attached databases <nl> QList < sqlb : : ObjectPtr > tables = objMap . values ( " table " ) ; <nl> for ( QMutableListIterator < sqlb : : ObjectPtr > it ( tables ) ; it . hasNext ( ) ; ) <nl> bool DBBrowserDB : : dump ( const QString & filePath , <nl> } <nl> <nl> QProgressDialog progress ( tr ( " Exporting database to SQL file . . . " ) , <nl> - tr ( " Cancel " ) , 0 , numRecordsTotal ) ; <nl> + tr ( " Cancel " ) , 0 , static_cast < int > ( numRecordsTotal ) ) ; <nl> progress . setWindowModality ( Qt : : ApplicationModal ) ; <nl> progress . show ( ) ; <nl> qApp - > processEvents ( ) ; <nl> bool DBBrowserDB : : dump ( const QString & filePath , <nl> { <nl> int columns = sqlite3_column_count ( stmt ) ; <nl> size_t counter = 0 ; <nl> + size_t numRecordsCurrent = 0 ; <nl> qApp - > processEvents ( ) ; <nl> while ( sqlite3_step ( stmt ) = = SQLITE_ROW ) <nl> { <nl> bool DBBrowserDB : : dump ( const QString & filePath , <nl> stream < < ' , ' ; <nl> } <nl> <nl> - progress . setValue ( + + numRecordsCurrent ) ; <nl> + progress . setValue ( static_cast < int > ( + + numRecordsCurrent ) ) ; <nl> if ( counter % 5000 = = 0 ) <nl> qApp - > processEvents ( ) ; <nl> counter + + ; <nl> bool DBBrowserDB : : executeMultiSQL ( QByteArray query , bool dirty , bool log ) <nl> } <nl> <nl> / / Execute next statement <nl> - res = sqlite3_prepare_v2 ( _db , tail , tail_end - tail + 1 , & vm , & tail ) ; <nl> + res = sqlite3_prepare_v2 ( _db , tail , static_cast < int > ( tail_end - tail + 1 ) , & vm , & tail ) ; <nl> if ( res = = SQLITE_OK ) <nl> { <nl> switch ( sqlite3_step ( vm ) ) <nl> mmm a / src / sqlitedb . h <nl> ppp b / src / sqlitedb . h <nl> class DBBrowserDB : public QObject <nl> / / / custom unique_ptr deleter releases database for further use by others <nl> struct DatabaseReleaser <nl> { <nl> - DatabaseReleaser ( DBBrowserDB * pParent_ = nullptr ) : pParent ( pParent_ ) { } <nl> + explicit DatabaseReleaser ( DBBrowserDB * pParent_ = nullptr ) : pParent ( pParent_ ) { } <nl> <nl> DBBrowserDB * pParent ; <nl> <nl> class DBBrowserDB : public QObject <nl> \ returns a unique_ptr containing the SQLite database handle , or <nl> nullptr in case no database is open . <nl> * * / <nl> - db_pointer_type get ( QString user , bool force_wait = false ) ; <nl> + db_pointer_type get ( const QString & user , bool force_wait = false ) ; <nl> <nl> bool setSavepoint ( const QString & pointname = " RESTOREPOINT " ) ; <nl> bool releaseSavepoint ( const QString & pointname = " RESTOREPOINT " ) ; <nl> mmm a / src / sqlitetablemodel . cpp <nl> ppp b / src / sqlitetablemodel . cpp <nl> void SqliteTableModel : : setQuery ( const QString & sQuery , const QString & sCountQuer <nl> <nl> int SqliteTableModel : : rowCount ( const QModelIndex & ) const <nl> { <nl> - return m_currentRowCount ; <nl> + return static_cast < int > ( m_currentRowCount ) ; <nl> } <nl> <nl> int SqliteTableModel : : columnCount ( const QModelIndex & ) const <nl> int SqliteTableModel : : columnCount ( const QModelIndex & ) const <nl> return m_headers . size ( ) ; <nl> } <nl> <nl> - int SqliteTableModel : : filterCount ( ) const <nl> + size_t SqliteTableModel : : filterCount ( ) const <nl> { <nl> return m_query . where ( ) . size ( ) ; <nl> } <nl> mmm a / src / sqlitetablemodel . h <nl> ppp b / src / sqlitetablemodel . h <nl> class SqliteTableModel : public QAbstractTableModel <nl> int rowCount ( const QModelIndex & parent = QModelIndex ( ) ) const override ; <nl> <nl> int columnCount ( const QModelIndex & parent = QModelIndex ( ) ) const override ; <nl> - int filterCount ( ) const ; <nl> + size_t filterCount ( ) const ; <nl> QVariant headerData ( int section , Qt : : Orientation orientation , int role = Qt : : DisplayRole ) const override ; <nl> QVariant data ( const QModelIndex & index , int role = Qt : : DisplayRole ) const override ; <nl> bool setData ( const QModelIndex & index , const QVariant & value , int role = Qt : : EditRole ) override ; <nl> class SqliteTableModel : public QAbstractTableModel <nl> void setQuery ( const sqlb : : Query & query ) ; <nl> <nl> void setChunkSize ( size_t chunksize ) ; <nl> - size_t chunkSize ( ) { return m_chunkSize ; } ; <nl> + size_t chunkSize ( ) { return m_chunkSize ; } <nl> void sort ( int column , Qt : : SortOrder order = Qt : : AscendingOrder ) override ; <nl> void sort ( const std : : vector < sqlb : : SortedColumn > & columns ) ; <nl> sqlb : : ObjectIdentifier currentTableName ( ) const { return m_query . table ( ) ; } <nl> class SqliteTableModel : public QAbstractTableModel <nl> void addCondFormat ( int column , const CondFormat & condFormat ) ; <nl> void setCondFormats ( int column , const QVector < CondFormat > & condFormats ) ; <nl> <nl> - DBBrowserDB & db ( ) { return m_db ; } ; <nl> + DBBrowserDB & db ( ) { return m_db ; } <nl> <nl> public slots : <nl> void updateFilter ( int column , const QString & value ) ; <nl> mmm a / src / tests / TestImport . cpp <nl> ppp b / src / tests / TestImport . cpp <nl> void TestImport : : csvImport_data ( ) <nl> result . append ( QVector < QByteArray > ( ) < < " g " < < " h " < < " i " ) ; <nl> QTest : : newRow ( " commas_noquotes " ) < < " a , b , c \ nd , e , f \ ng , h , i \ n " <nl> < < ' , ' <nl> - < < ( char ) 0 <nl> + < < static_cast < char > ( 0 ) <nl> < < " UTF - 8 " <nl> < < 3 <nl> < < result ; <nl> QTest : : newRow ( " semicolons_noquotes " ) < < " a ; b ; c \ nd ; e ; f \ ng ; h ; i \ n " <nl> < < ' ; ' <nl> - < < ( char ) 0 <nl> + < < static_cast < char > ( 0 ) <nl> < < " UTF - 8 " <nl> < < 3 <nl> < < result ; <nl> void TestImport : : csvImport_data ( ) <nl> < < result ; <nl> QTest : : newRow ( " windowslinebreaks " ) < < " a , b , c \ r \ nd , e , f \ r \ ng , h , i \ r \ n " <nl> < < ' , ' <nl> - < < ( char ) 0 <nl> + < < static_cast < char > ( 0 ) <nl> < < " UTF - 8 " <nl> < < 3 <nl> < < result ; <nl> QTest : : newRow ( " oldmaclinebreaks " ) < < " a , b , c \ rd , e , f \ rg , h , i \ r " <nl> < < ' , ' <nl> - < < ( char ) 0 <nl> + < < static_cast < char > ( 0 ) <nl> < < " UTF - 8 " <nl> < < 3 <nl> < < result ; <nl> void TestImport : : csvImport_data ( ) <nl> result . append ( QVector < QByteArray > ( ) < < " " < < " " < < " f " ) ; <nl> QTest : : newRow ( " emptyvalues " ) < < " a , b , \ nc , \ nd , , e \ n \ n , , f " <nl> < < ' , ' <nl> - < < ( char ) 0 <nl> + < < static_cast < char > ( 0 ) <nl> < < " UTF - 8 " <nl> < < 3 <nl> < < result ; <nl> void TestImport : : csvImport_data ( ) <nl> result . append ( QVector < QByteArray > ( ) < < " a " < < " b " < < " c " ) ; <nl> QTest : : newRow ( " oneline " ) < < " a , b , c " <nl> < < ' , ' <nl> - < < ( char ) 0 <nl> + < < static_cast < char > ( 0 ) <nl> < < " UTF - 8 " <nl> < < 3 <nl> < < result ; <nl> void TestImport : : csvImport_data ( ) <nl> QString csv = QString : : fromUtf8 ( " \ xC2 \ xAE " ) + " , " + QString : : fromUtf8 ( " \ xC9 \ x85 " ) + " , " + QString : : fromUtf8 ( " \ xC6 \ x89 " ) + " \ n " ; <nl> QTest : : newRow ( " utf8chars " ) < < csv <nl> < < ' , ' <nl> - < < ( char ) 0 <nl> + < < static_cast < char > ( 0 ) <nl> < < " UTF - 8 " <nl> < < 3 <nl> < < result ; <nl> void TestImport : : csvImport_data ( ) <nl> QString csv2 = QString : : fromUtf8 ( " \ u4E18 " ) + " , " + QString : : fromUtf8 ( " \ u4E26 " ) + " , " + QString : : fromUtf8 ( " \ u4E4B " ) + " \ n " ; <nl> QTest : : newRow ( " utf16chars " ) < < csv2 <nl> < < ' , ' <nl> - < < ( char ) 0 <nl> + < < static_cast < char > ( 0 ) <nl> < < " UTF - 16 " <nl> < < 3 <nl> < < result ; <nl>
Fix some warning and other code style changes
sqlitebrowser/sqlitebrowser
f59a2453a2b36ddae00a1831b266079e1ea17001
2019-04-26T12:48:24Z
mmm a / torch / _tensor_str . py <nl> ppp b / torch / _tensor_str . py <nl> def _str ( self ) : <nl> strt = _tensor_str ( self ) <nl> <nl> size_str = ' x ' . join ( str ( size ) for size in self . size ( ) ) <nl> - strt + = ' [ { } of size { } ] \ n ' . format ( torch . typename ( self ) , size_str ) <nl> + device_str = ' ' if not self . is_cuda else ' ( GPU { } ) ' . format ( self . get_device ( ) ) <nl> + strt + = ' [ { } of size { } { } ] \ n ' . format ( torch . typename ( self ) , <nl> + size_str , device_str ) <nl> return ' \ n ' + strt <nl> <nl>
Print GPU id for CUDA tensors
pytorch/pytorch
645c913e4fc246c43fa8359f27ac203aec18c510
2016-10-29T22:16:06Z
mmm a / include / swift / SILOptimizer / Analysis / SideEffectAnalysis . h <nl> ppp b / include / swift / SILOptimizer / Analysis / SideEffectAnalysis . h <nl> class FunctionSideEffects { <nl> / / / instructions are considered as side effects . <nl> MemoryBehavior getMemBehavior ( RetainObserveKind ScanKind ) const ; <nl> <nl> + / / / Gets the memory behavior for an argument . <nl> + / / / <nl> + / / / This is derived from the combined argument and the global effects . <nl> + / / / Also the argument type and convention are considered . <nl> + MemoryBehavior getArgumentBehavior ( FullApplySite applySite , <nl> + unsigned argIdx ) ; <nl> + <nl> / / / Get the global effects for the function . These are effects which cannot <nl> / / / be associated to a specific parameter , e . g . writes to global variables <nl> / / / or writes to unknown pointers . <nl> mmm a / lib / SILOptimizer / Analysis / MemoryBehavior . cpp <nl> ppp b / lib / SILOptimizer / Analysis / MemoryBehavior . cpp <nl> class MemoryBehaviorVisitor <nl> MemBehavior visitCopyAddrInst ( CopyAddrInst * CAI ) ; <nl> MemBehavior visitApplyInst ( ApplyInst * AI ) ; <nl> MemBehavior visitTryApplyInst ( TryApplyInst * AI ) ; <nl> + MemBehavior visitBeginApplyInst ( BeginApplyInst * AI ) ; <nl> + MemBehavior visitEndApplyInst ( EndApplyInst * EAI ) ; <nl> + MemBehavior visitAbortApplyInst ( AbortApplyInst * AAI ) ; <nl> + MemBehavior getApplyBehavior ( FullApplySite AS ) ; <nl> MemBehavior visitBuiltinInst ( BuiltinInst * BI ) ; <nl> MemBehavior visitStrongReleaseInst ( StrongReleaseInst * BI ) ; <nl> MemBehavior visitReleaseValueInst ( ReleaseValueInst * BI ) ; <nl> MemBehavior MemoryBehaviorVisitor : : visitBuiltinInst ( BuiltinInst * BI ) { <nl> } <nl> <nl> MemBehavior MemoryBehaviorVisitor : : visitTryApplyInst ( TryApplyInst * AI ) { <nl> - MemBehavior Behavior = MemBehavior : : MayHaveSideEffects ; <nl> - / / Ask escape analysis . <nl> - if ( ! EA - > canEscapeTo ( V , AI ) ) <nl> - Behavior = MemBehavior : : None ; <nl> - <nl> - / / Otherwise be conservative and return that we may have side effects . <nl> - LLVM_DEBUG ( llvm : : dbgs ( ) < < " Found tryapply , returning " < < Behavior < < ' \ n ' ) ; <nl> - return Behavior ; <nl> + return getApplyBehavior ( AI ) ; <nl> } <nl> <nl> MemBehavior MemoryBehaviorVisitor : : visitApplyInst ( ApplyInst * AI ) { <nl> + return getApplyBehavior ( AI ) ; <nl> + } <nl> <nl> - FunctionSideEffects ApplyEffects ; <nl> - SEA - > getCalleeEffects ( ApplyEffects , AI ) ; <nl> + MemBehavior MemoryBehaviorVisitor : : visitBeginApplyInst ( BeginApplyInst * AI ) { <nl> + return getApplyBehavior ( AI ) ; <nl> + } <nl> + <nl> + MemBehavior MemoryBehaviorVisitor : : visitEndApplyInst ( EndApplyInst * EAI ) { <nl> + return getApplyBehavior ( EAI - > getBeginApply ( ) ) ; <nl> + } <nl> + <nl> + MemBehavior MemoryBehaviorVisitor : : visitAbortApplyInst ( AbortApplyInst * AAI ) { <nl> + return getApplyBehavior ( AAI - > getBeginApply ( ) ) ; <nl> + } <nl> + <nl> + / / / Returns true if the \ p address may have any users which let the address <nl> + / / / escape in an unusual way , e . g . with an address_to_pointer instruction . <nl> + static bool hasEscapingUses ( SILValue address , int & numChecks ) { <nl> + for ( Operand * use : address - > getUses ( ) ) { <nl> + SILInstruction * user = use - > getUser ( ) ; <nl> + <nl> + / / Avoid quadratic complexity in corner cases . A limit of 24 is more than <nl> + / / enough in most cases . <nl> + if ( + + numChecks > 24 ) <nl> + return true ; <nl> + <nl> + switch ( user - > getKind ( ) ) { <nl> + case SILInstructionKind : : DebugValueAddrInst : <nl> + case SILInstructionKind : : FixLifetimeInst : <nl> + case SILInstructionKind : : LoadInst : <nl> + case SILInstructionKind : : StoreInst : <nl> + case SILInstructionKind : : CopyAddrInst : <nl> + case SILInstructionKind : : DestroyAddrInst : <nl> + case SILInstructionKind : : DeallocStackInst : <nl> + / / Those instructions have no result and cannot escape the address . <nl> + break ; <nl> + case SILInstructionKind : : ApplyInst : <nl> + case SILInstructionKind : : TryApplyInst : <nl> + case SILInstructionKind : : BeginApplyInst : <nl> + / / Apply instructions can not let an address escape either . It ' s not <nl> + / / possible that an address , passed as an indirect parameter , escapes <nl> + / / the function in any way ( which is not unsafe and undefined behavior ) . <nl> + break ; <nl> + case SILInstructionKind : : OpenExistentialAddrInst : <nl> + case SILInstructionKind : : UncheckedTakeEnumDataAddrInst : <nl> + case SILInstructionKind : : StructElementAddrInst : <nl> + case SILInstructionKind : : TupleElementAddrInst : <nl> + case SILInstructionKind : : UncheckedAddrCastInst : <nl> + / / Check the uses of address projections . <nl> + if ( hasEscapingUses ( cast < SingleValueInstruction > ( user ) , numChecks ) ) <nl> + return true ; <nl> + break ; <nl> + case SILInstructionKind : : AddressToPointerInst : <nl> + / / This is _the_ instruction which can let an address escape . <nl> + return true ; <nl> + default : <nl> + / / To be conservative , also bail for anything we don ' t handle here . <nl> + return true ; <nl> + } <nl> + } <nl> + return false ; <nl> + } <nl> <nl> - MemBehavior Behavior = MemBehavior : : None ; <nl> + MemBehavior MemoryBehaviorVisitor : : getApplyBehavior ( FullApplySite AS ) { <nl> <nl> - / / We can ignore mayTrap ( ) . <nl> - bool any_in_guaranteed_params = false ; <nl> - for ( auto op : enumerate ( AI - > getArgumentOperands ( ) ) ) { <nl> - if ( op . value ( ) . get ( ) = = V & & <nl> - AI - > getSubstCalleeConv ( ) . getSILArgumentConvention ( op . index ( ) ) = = swift : : SILArgumentConvention : : Indirect_In_Guaranteed ) { <nl> - any_in_guaranteed_params = true ; <nl> - break ; <nl> + / / Do a quick check first : if V is directly passed to an in_guaranteed <nl> + / / argument , we know that the function cannot write to it . <nl> + for ( Operand & argOp : AS . getArgumentOperands ( ) ) { <nl> + if ( argOp . get ( ) = = V & & <nl> + AS . getArgumentConvention ( argOp ) = = <nl> + swift : : SILArgumentConvention : : Indirect_In_Guaranteed ) { <nl> + return MemBehavior : : MayRead ; <nl> } <nl> } <nl> <nl> - if ( any_in_guaranteed_params ) { <nl> - / / one the parameters in the function call is @ in_guaranteed of V , ie . the <nl> - / / callee isn ' t allowed to modify it . <nl> - Behavior = MemBehavior : : MayRead ; <nl> - } else { <nl> - auto & GlobalEffects = ApplyEffects . getGlobalEffects ( ) ; <nl> - Behavior = GlobalEffects . getMemBehavior ( RetainObserveKind : : IgnoreRetains ) ; <nl> - <nl> - / / Check all parameter effects . <nl> - for ( unsigned Idx = 0 , End = AI - > getNumArguments ( ) ; <nl> - Idx < End & & Behavior < MemBehavior : : MayHaveSideEffects ; + + Idx ) { <nl> - auto & ArgEffect = ApplyEffects . getParameterEffects ( ) [ Idx ] ; <nl> - auto ArgBehavior = ArgEffect . getMemBehavior ( RetainObserveKind : : IgnoreRetains ) ; <nl> - if ( ArgEffect . mayRelease ( ) ) { <nl> - Behavior = MemBehavior : : MayHaveSideEffects ; <nl> - break ; <nl> - } <nl> - auto NewBehavior = combineMemoryBehavior ( Behavior , ArgBehavior ) ; <nl> - if ( NewBehavior ! = Behavior ) { <nl> - SILValue Arg = AI - > getArgument ( Idx ) ; <nl> - / / We only consider the argument effects if the argument aliases V . <nl> - if ( ! Arg - > getType ( ) . isAddress ( ) | | mayAlias ( Arg ) ) <nl> - Behavior = NewBehavior ; <nl> - } <nl> + SILValue object = getUnderlyingObject ( V ) ; <nl> + int numUsesChecked = 0 ; <nl> + <nl> + / / For exclusive / local addresses we can do a quick and good check with alias <nl> + / / analysis . For everything else we use escape analysis ( see below ) . <nl> + / / TODO : The check for not - escaping can probably done easier with the upcoming <nl> + / / API of AccessStorage . <nl> + bool nonEscapingAddress = <nl> + ( isa < AllocStackInst > ( object ) | | isExclusiveArgument ( object ) ) & & <nl> + ! hasEscapingUses ( object , numUsesChecked ) ; <nl> + <nl> + FunctionSideEffects applyEffects ; <nl> + SEA - > getCalleeEffects ( applyEffects , AS ) ; <nl> + <nl> + MemBehavior behavior = MemBehavior : : None ; <nl> + MemBehavior globalBehavior = applyEffects . getGlobalEffects ( ) . getMemBehavior ( <nl> + RetainObserveKind : : IgnoreRetains ) ; <nl> + <nl> + / / If it ' s a non - escaping address , we don ' t care about the " global " effects <nl> + / / of the called function . <nl> + if ( ! nonEscapingAddress ) <nl> + behavior = globalBehavior ; <nl> + <nl> + / / Check all parameter effects . <nl> + for ( unsigned argIdx = 0 , end = AS . getNumArguments ( ) ; <nl> + argIdx < end & & behavior < MemBehavior : : MayHaveSideEffects ; <nl> + + + argIdx ) { <nl> + SILValue arg = AS . getArgument ( argIdx ) ; <nl> + <nl> + / / In case the argument is not an address , alias analysis will always report <nl> + / / a no - alias . Therefore we have to treat non - address arguments <nl> + / / conservatively here . For example V could be a ref_element_addr of a <nl> + / / reference argument . In this case V clearly " aliases " the argument , but <nl> + / / this is not reported by alias analysis . <nl> + if ( ( ! nonEscapingAddress & & ! arg - > getType ( ) . isAddress ( ) ) | | <nl> + mayAlias ( arg ) ) { <nl> + MemBehavior argBehavior = applyEffects . getArgumentBehavior ( AS , argIdx ) ; <nl> + behavior = combineMemoryBehavior ( behavior , argBehavior ) ; <nl> } <nl> } <nl> <nl> - if ( Behavior > MemBehavior : : None ) { <nl> - if ( Behavior > MemBehavior : : MayRead & & isLetValue ( ) ) <nl> - Behavior = MemBehavior : : MayRead ; <nl> + if ( behavior > MemBehavior : : None ) { <nl> + if ( behavior > MemBehavior : : MayRead & & isLetValue ( ) ) <nl> + behavior = MemBehavior : : MayRead ; <nl> <nl> / / Ask escape analysis . <nl> - if ( ! EA - > canEscapeTo ( V , AI ) ) <nl> - Behavior = MemBehavior : : None ; <nl> + if ( ! nonEscapingAddress & & ! EA - > canEscapeTo ( V , AS ) ) <nl> + behavior = MemBehavior : : None ; <nl> } <nl> - LLVM_DEBUG ( llvm : : dbgs ( ) < < " Found apply , returning " < < Behavior < < ' \ n ' ) ; <nl> - return Behavior ; <nl> + LLVM_DEBUG ( llvm : : dbgs ( ) < < " Found apply , returning " < < behavior < < ' \ n ' ) ; <nl> + <nl> + return behavior ; <nl> } <nl> <nl> MemBehavior <nl> mmm a / lib / SILOptimizer / Analysis / SideEffectAnalysis . cpp <nl> ppp b / lib / SILOptimizer / Analysis / SideEffectAnalysis . cpp <nl> FunctionSideEffects : : getMemBehavior ( RetainObserveKind ScanKind ) const { <nl> return Behavior ; <nl> } <nl> <nl> + MemoryBehavior <nl> + FunctionSideEffects : : getArgumentBehavior ( FullApplySite applySite , <nl> + unsigned argIdx ) { <nl> + / / Rule out trivial non - address argument types . <nl> + SILType argType = applySite . getArgument ( argIdx ) - > getType ( ) ; <nl> + if ( ! argType . isAddress ( ) & & argType . isTrivial ( * applySite . getFunction ( ) ) ) <nl> + return MemoryBehavior : : None ; <nl> + <nl> + / / The overall argument effect is the combination of the argument and the <nl> + / / global effects . <nl> + MemoryBehavior behavior = <nl> + GlobalEffects . getMemBehavior ( RetainObserveKind : : IgnoreRetains ) ; <nl> + MemoryBehavior argBehavior = <nl> + ParamEffects [ argIdx ] . getMemBehavior ( RetainObserveKind : : IgnoreRetains ) ; <nl> + <nl> + behavior = combineMemoryBehavior ( behavior , argBehavior ) ; <nl> + <nl> + if ( behavior > MemoryBehavior : : MayRead & & <nl> + applySite . getArgumentConvention ( applySite . getArgumentRef ( argIdx ) ) = = <nl> + SILArgumentConvention : : Indirect_In_Guaranteed ) { <nl> + / / Even if side - effect analysis doesn ' t know anything about the called <nl> + / / called function , the in_guaranteed convention guarantees that the <nl> + / / argument is never written to . <nl> + return MemoryBehavior : : MayRead ; <nl> + } <nl> + return behavior ; <nl> + } <nl> + <nl> bool FunctionSideEffects : : mergeFrom ( const FunctionSideEffects & RHS ) { <nl> bool Changed = mergeFlags ( RHS ) ; <nl> Changed | = GlobalEffects . mergeFrom ( RHS . GlobalEffects ) ; <nl> mmm a / lib / SILOptimizer / UtilityPasses / MemBehaviorDumper . cpp <nl> ppp b / lib / SILOptimizer / UtilityPasses / MemBehaviorDumper . cpp <nl> class MemBehaviorDumper : public SILModuleTransform { <nl> / / selected types of instructions . <nl> static bool shouldTestInstruction ( SILInstruction * I ) { <nl> / / Only consider function calls . <nl> - if ( ( EnableDumpAll & & I - > mayReadOrWriteMemory ( ) ) | | FullApplySite : : isa ( I ) ) <nl> + if ( ( EnableDumpAll & & I - > mayReadOrWriteMemory ( ) ) | | <nl> + FullApplySite : : isa ( I ) | | <nl> + isa < EndApplyInst > ( I ) | | <nl> + isa < AbortApplyInst > ( I ) ) <nl> return true ; <nl> <nl> return false ; <nl> mmm a / test / SILOptimizer / mem - behavior . sil <nl> ppp b / test / SILOptimizer / mem - behavior . sil <nl> class X { <nl> } <nl> <nl> sil @ unknown_func : $ @ convention ( thin ) ( Int32 , @ in Int32 ) - > ( ) <nl> - <nl> + sil @ single_indirect_arg : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 <nl> + sil @ single_indirect_arg_and_error : $ @ convention ( thin ) ( @ in Int32 ) - > ( Int32 , @ error Error ) <nl> + sil @ single_indirect_arg_coroutine : $ @ yield_once @ convention ( thin ) ( @ in Int32 ) - > @ yields Int32 <nl> + sil @ indirect_arg_and_ptr : $ @ convention ( thin ) ( @ in Int32 , Builtin . RawPointer ) - > Int32 <nl> + sil @ single_reference : $ @ convention ( thin ) ( @ guaranteed X ) - > Int32 <nl> sil @ nouser_func : $ @ convention ( thin ) ( ) - > ( ) <nl> <nl> sil @ store_to_int : $ @ convention ( thin ) ( Int32 , @ inout Int32 ) - > ( ) { <nl> bb0 ( % 0 : $ X ) : <nl> return % 6 : $ ( ) <nl> } <nl> <nl> + / / CHECK - LABEL : @ allocstack_and_copyaddr <nl> + / / CHECK : PAIR # 0 . <nl> + / / CHECK - NEXT : % 4 = apply % 3 ( % 0 ) : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 <nl> + / / CHECK - NEXT : % 0 = argument of bb0 : $ * Int32 <nl> + / / CHECK - NEXT : r = 1 , w = 1 <nl> + / / CHECK : PAIR # 1 . <nl> + / / CHECK - NEXT : % 4 = apply % 3 ( % 0 ) : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 <nl> + / / CHECK - NEXT : % 1 = alloc_stack $ Int32 <nl> + / / CHECK - NEXT : r = 0 , w = 0 <nl> + sil @ allocstack_and_copyaddr : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 { <nl> + bb0 ( % 0 : $ * Int32 ) : <nl> + % 1 = alloc_stack $ Int32 <nl> + copy_addr % 0 to % 1 : $ * Int32 <nl> + % 3 = function_ref @ single_indirect_arg : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 <nl> + % 4 = apply % 3 ( % 0 ) : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 <nl> + dealloc_stack % 1 : $ * Int32 <nl> + return % 4 : $ Int32 <nl> + } <nl> + <nl> + / / CHECK - LABEL : @ inout_and_copyaddr <nl> + / / CHECK : PAIR # 0 . <nl> + / / CHECK - NEXT : % 4 = apply % 3 ( % 0 ) : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 <nl> + / / CHECK - NEXT : % 0 = argument of bb0 : $ * Int32 <nl> + / / CHECK - NEXT : r = 1 , w = 1 <nl> + / / CHECK : PAIR # 1 . <nl> + / / CHECK - NEXT : % 4 = apply % 3 ( % 0 ) : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 <nl> + / / CHECK - NEXT : % 1 = argument of bb0 : $ * Int32 <nl> + / / CHECK - NEXT : r = 0 , w = 0 <nl> + sil @ inout_and_copyaddr : $ @ convention ( thin ) ( @ in Int32 , @ inout Int32 ) - > Int32 { <nl> + bb0 ( % 0 : $ * Int32 , % 1 : $ * Int32 ) : <nl> + copy_addr % 0 to % 1 : $ * Int32 <nl> + % 3 = function_ref @ single_indirect_arg : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 <nl> + % 4 = apply % 3 ( % 0 ) : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 <nl> + return % 4 : $ Int32 <nl> + } <nl> + <nl> + / / CHECK - LABEL : @ esacping_allocstack_and_copyaddr <nl> + / / CHECK : PAIR # 0 . <nl> + / / CHECK - NEXT : % 5 = apply % 4 ( % 0 , % 3 ) : $ @ convention ( thin ) ( @ in Int32 , Builtin . RawPointer ) - > Int32 <nl> + / / CHECK - NEXT : % 0 = argument of bb0 : $ * Int32 <nl> + / / CHECK - NEXT : r = 1 , w = 1 <nl> + / / CHECK : PAIR # 1 . <nl> + / / CHECK - NEXT : % 5 = apply % 4 ( % 0 , % 3 ) : $ @ convention ( thin ) ( @ in Int32 , Builtin . RawPointer ) - > Int32 <nl> + / / CHECK - NEXT : % 1 = alloc_stack $ Int32 <nl> + / / CHECK - NEXT : r = 1 , w = 1 <nl> + / / CHECK : PAIR # 2 . <nl> + / / CHECK - NEXT : % 5 = apply % 4 ( % 0 , % 3 ) : $ @ convention ( thin ) ( @ in Int32 , Builtin . RawPointer ) - > Int32 <nl> + / / CHECK - NEXT : % 3 = address_to_pointer % 1 : $ * Int32 to $ Builtin . RawPointer <nl> + / / CHECK - NEXT : r = 1 , w = 1 <nl> + sil @ esacping_allocstack_and_copyaddr : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 { <nl> + bb0 ( % 0 : $ * Int32 ) : <nl> + % 1 = alloc_stack $ Int32 <nl> + copy_addr % 0 to % 1 : $ * Int32 <nl> + % 3 = address_to_pointer % 1 : $ * Int32 to $ Builtin . RawPointer <nl> + % 4 = function_ref @ indirect_arg_and_ptr : $ @ convention ( thin ) ( @ in Int32 , Builtin . RawPointer ) - > Int32 <nl> + % 5 = apply % 4 ( % 0 , % 3 ) : $ @ convention ( thin ) ( @ in Int32 , Builtin . RawPointer ) - > Int32 <nl> + dealloc_stack % 1 : $ * Int32 <nl> + return % 5 : $ Int32 <nl> + } <nl> + <nl> + <nl> + / / CHECK - LABEL : @ tryapply_allocstack_and_copyaddr <nl> + / / CHECK : PAIR # 0 . <nl> + / / CHECK - NEXT : try_apply % 3 ( % 0 ) : $ @ convention ( thin ) ( @ in Int32 ) - > ( Int32 , @ error Error ) , normal bb1 , error bb2 <nl> + / / CHECK - NEXT : % 0 = argument of bb0 : $ * Int32 <nl> + / / CHECK - NEXT : r = 1 , w = 1 <nl> + / / CHECK : PAIR # 1 . <nl> + / / CHECK - NEXT : try_apply % 3 ( % 0 ) : $ @ convention ( thin ) ( @ in Int32 ) - > ( Int32 , @ error Error ) , normal bb1 , error bb2 <nl> + / / CHECK - NEXT : % 1 = alloc_stack $ Int32 <nl> + / / CHECK - NEXT : r = 0 , w = 0 <nl> + sil @ tryapply_allocstack_and_copyaddr : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 { <nl> + bb0 ( % 0 : $ * Int32 ) : <nl> + % 1 = alloc_stack $ Int32 <nl> + copy_addr % 0 to % 1 : $ * Int32 <nl> + % 3 = function_ref @ single_indirect_arg_and_error : $ @ convention ( thin ) ( @ in Int32 ) - > ( Int32 , @ error Error ) <nl> + try_apply % 3 ( % 0 ) : $ @ convention ( thin ) ( @ in Int32 ) - > ( Int32 , @ error Error ) , normal bb1 , error bb2 <nl> + bb1 ( % 5 : $ Int32 ) : <nl> + dealloc_stack % 1 : $ * Int32 <nl> + return % 5 : $ Int32 <nl> + bb2 ( % 8 : $ Error ) : <nl> + unreachable <nl> + } <nl> + <nl> + <nl> + / / CHECK - LABEL : @ beginapply_allocstack_and_copyaddr <nl> + / / CHECK : PAIR # 0 . <nl> + / / CHECK - NEXT : ( % 4 , % 5 ) = begin_apply % 3 ( % 0 ) : $ @ yield_once @ convention ( thin ) ( @ in Int32 ) - > @ yields Int32 <nl> + / / CHECK - NEXT : % 0 = argument of bb0 : $ * Int32 <nl> + / / CHECK - NEXT : r = 1 , w = 1 <nl> + / / CHECK : PAIR # 1 . <nl> + / / CHECK - NEXT : ( % 4 , % 5 ) = begin_apply % 3 ( % 0 ) : $ @ yield_once @ convention ( thin ) ( @ in Int32 ) - > @ yields Int32 <nl> + / / CHECK - NEXT : % 1 = alloc_stack $ Int32 <nl> + / / CHECK - NEXT : r = 0 , w = 0 <nl> + / / CHECK : PAIR # 3 . <nl> + / / CHECK - NEXT : end_apply % 5 <nl> + / / CHECK - NEXT : % 0 = argument of bb0 : $ * Int32 <nl> + / / CHECK - NEXT : r = 1 , w = 1 <nl> + / / CHECK : PAIR # 4 . <nl> + / / CHECK - NEXT : end_apply % 5 <nl> + / / CHECK - NEXT : % 1 = alloc_stack $ Int32 <nl> + / / CHECK - NEXT : r = 0 , w = 0 <nl> + / / CHECK : PAIR # 8 . <nl> + / / CHECK - NEXT : abort_apply % 5 <nl> + / / CHECK - NEXT : % 0 = argument of bb0 : $ * Int32 <nl> + / / CHECK - NEXT : r = 1 , w = 1 <nl> + / / CHECK : PAIR # 9 . <nl> + / / CHECK - NEXT : abort_apply % 5 <nl> + / / CHECK - NEXT : % 1 = alloc_stack $ Int32 <nl> + / / CHECK - NEXT : r = 0 , w = 0 <nl> + sil @ beginapply_allocstack_and_copyaddr : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 { <nl> + bb0 ( % 0 : $ * Int32 ) : <nl> + % 1 = alloc_stack $ Int32 <nl> + copy_addr % 0 to % 1 : $ * Int32 <nl> + % 3 = function_ref @ single_indirect_arg_coroutine : $ @ yield_once @ convention ( thin ) ( @ in Int32 ) - > @ yields Int32 <nl> + ( % 4 , % 5 ) = begin_apply % 3 ( % 0 ) : $ @ yield_once @ convention ( thin ) ( @ in Int32 ) - > @ yields Int32 <nl> + cond_br undef , bb1 , bb2 <nl> + bb1 : <nl> + end_apply % 5 <nl> + dealloc_stack % 1 : $ * Int32 <nl> + return % 4 : $ Int32 <nl> + bb2 : <nl> + abort_apply % 5 <nl> + unreachable <nl> + } <nl> + <nl> + / / CHECK - LABEL : @ refelementaddr_and_reference <nl> + / / CHECK : PAIR # 1 . <nl> + / / CHECK - NEXT : % 3 = apply % 2 ( % 0 ) : $ @ convention ( thin ) ( @ guaranteed X ) - > Int32 <nl> + / / CHECK - NEXT : % 1 = ref_element_addr % 0 : $ X , # X . a <nl> + / / CHECK - NEXT : r = 1 , w = 1 <nl> + sil @ refelementaddr_and_reference : $ @ convention ( thin ) ( @ guaranteed X ) - > Int32 { <nl> + bb0 ( % 0 : $ X ) : <nl> + % 1 = ref_element_addr % 0 : $ X , # X . a <nl> + % 2 = function_ref @ single_reference : $ @ convention ( thin ) ( @ guaranteed X ) - > Int32 <nl> + % 3 = apply % 2 ( % 0 ) : $ @ convention ( thin ) ( @ guaranteed X ) - > Int32 <nl> + return % 3 : $ Int32 <nl> + } <nl> + <nl> sil @ load_from_in : $ @ convention ( thin ) ( @ in X ) - > ( ) { <nl> bb0 ( % 0 : $ * X ) : <nl> % 1 = load % 0 : $ * X <nl> bb0 : <nl> return % r : $ ( ) <nl> } <nl> <nl> + / / CHECK - LABEL : @ non_overlapping_struct_fields <nl> + / / CHECK : PAIR # 0 . <nl> + / / CHECK - NEXT : % 4 = apply % 3 ( % 1 ) : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 <nl> + / / CHECK - NEXT : % 0 = argument of bb0 : $ * TwoInts <nl> + / / CHECK - NEXT : r = 1 , w = 1 <nl> + / / CHECK : PAIR # 1 . <nl> + / / CHECK - NEXT : % 4 = apply % 3 ( % 1 ) : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 <nl> + / / CHECK - NEXT : % 1 = struct_element_addr % 0 : $ * TwoInts , # TwoInts . i1 <nl> + / / CHECK - NEXT : r = 1 , w = 1 <nl> + / / CHECK : PAIR # 2 . <nl> + / / CHECK - NEXT : % 4 = apply % 3 ( % 1 ) : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 <nl> + / / CHECK - NEXT : % 2 = struct_element_addr % 0 : $ * TwoInts , # TwoInts . i2 <nl> + / / CHECK - NEXT : r = 0 , w = 0 <nl> + sil @ non_overlapping_struct_fields : $ @ convention ( thin ) ( @ in TwoInts ) - > Int32 { <nl> + bb0 ( % 0 : $ * TwoInts ) : <nl> + % 1 = struct_element_addr % 0 : $ * TwoInts , # TwoInts . i1 <nl> + % 2 = struct_element_addr % 0 : $ * TwoInts , # TwoInts . i2 <nl> + % 3 = function_ref @ single_indirect_arg : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 <nl> + % 4 = apply % 3 ( % 1 ) : $ @ convention ( thin ) ( @ in Int32 ) - > Int32 <nl> + return % 4 : $ Int32 <nl> + } <nl> + <nl> sil @ copy_ints : $ @ convention ( thin ) ( @ inout Int32 , @ inout Int32 ) - > ( ) { <nl> bb0 ( % 0 : $ * Int32 , % 1 : $ * Int32 ) : <nl> % 2 = load % 0 : $ * Int32 <nl> mmm a / test / SILOptimizer / specialize_opaque_type_archetypes . swift <nl> ppp b / test / SILOptimizer / specialize_opaque_type_archetypes . swift <nl> func nonResilient ( ) - > some ExternalP2 { <nl> <nl> / / CHECK - LABEL : sil @ $ s1A019usePairResilientNonC0yyF : $ @ convention ( thin ) ( ) - > ( ) <nl> / / CHECK : alloc_stack $ Pair < MyInt64 , @ _opaqueReturnTypeOf ( " $ s9External217externalResilientQryF " , 0 ) <nl> - / / CHECK : cond_fail <nl> / / CHECK : [ [ USEP : % . * ] ] = function_ref @ $ s1A4usePyyxAA1PRzlFs5Int64V_Tg5 <nl> / / CHECK : [ [ FIRST_MYVALUE3 : % . * ] ] = struct $ Int64 <nl> / / CHECK : apply [ [ USEP ] ] ( [ [ FIRST_MYVALUE3 ] ] ) <nl>
SILOptimizer : improve MemBehavior for apply instructions .
apple/swift
d4a6bd39b686307028b850d25a2a2fbc337160a3
2020-10-09T18:54:58Z
mmm a / stdlib / public / core / Integers . swift . gyb <nl> ppp b / stdlib / public / core / Integers . swift . gyb <nl> from SwiftIntTypes import all_integer_types , int_max_bits , should_define_truncat <nl> from SwiftFloatingPointTypes import getFtoIBounds <nl> <nl> from string import maketrans , capitalize <nl> + from itertools import chain <nl> <nl> # Number of bits in the Builtin . Word type <nl> word_bits = int ( CMAKE_SIZEOF_VOID_P ) * 8 <nl> public func numericCast < T : BinaryInteger , U : BinaryInteger > ( _ x : T ) - > U { <nl> return U ( x ) <nl> } <nl> <nl> + <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + / / = = = mmm DoubleWidth mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm = = = / / <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + <nl> + public struct DoubleWidth < T : FixedWidthInteger > <nl> + : FixedWidthInteger , _ExpressibleByBuiltinIntegerLiteral <nl> + where <nl> + T . Magnitude : FixedWidthInteger , <nl> + T . Magnitude . Magnitude = = T . Magnitude { <nl> + <nl> + internal typealias High = T <nl> + internal typealias Low = T . Magnitude <nl> + <nl> + internal var _storage : ( high : T , low : T . Magnitude ) <nl> + <nl> + internal init ( _ _value : ( High , Low ) ) { <nl> + self . _storage = ( high : _value . 0 , low : _value . 1 ) <nl> + } <nl> + <nl> + / / arithmetic <nl> + / / <nl> + public init ( ) { <nl> + self . init ( ( High ( ) , Low ( ) ) ) <nl> + } <nl> + <nl> + / / integer <nl> + / / <nl> + public var magnitude : DoubleWidth < Low > { <nl> + if T . isSigned & & _storage . high < 0 { <nl> + return ( DoubleWidth < T > ( ) - self ) . magnitude <nl> + } <nl> + return DoubleWidth < Low > ( ( <nl> + _storage . high . magnitude , _storage . low . magnitude ) ) <nl> + } <nl> + <nl> + public func isEqual ( to rhs : DoubleWidth < T > ) - > Bool { <nl> + return ( _storage . high = = rhs . _storage . high ) & & <nl> + ( _storage . low = = rhs . _storage . low ) <nl> + } <nl> + <nl> + public func isLess ( than rhs : DoubleWidth < T > ) - > Bool { <nl> + if _storage . high < rhs . _storage . high { <nl> + return true <nl> + } <nl> + if ( _storage . high > rhs . _storage . high ) { <nl> + return false <nl> + } <nl> + return _storage . low < rhs . _storage . low <nl> + } <nl> + <nl> + public init < T : BinaryInteger > ( _ source : T ) { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + public init ? < T : BinaryInteger > ( exactly source : T ) { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + public init < T : BinaryInteger > ( extendingOrTruncating source : T ) { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + public init < T : FloatingPoint > ( _ source : T ) { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + public init ? < T : FloatingPoint > ( exactly source : T ) { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + public func word ( at n : Int ) - > UInt { <nl> + if T . bitWidth < $ { word_bits } | | T . bitWidth % $ { word_bits } ! = 0 { <nl> + fatalError ( " word ( at : ) is not supported on this type " ) <nl> + } <nl> + / / TODO : move to Int128 just like init ( _builtinIntegerLiteral : ) ? <nl> + let wordsInT = T . bitWidth / $ { word_bits } <nl> + return ( n < wordsInT ) ? <nl> + _storage . low . word ( at : n ) : <nl> + _storage . high . word ( at : n - wordsInT ) <nl> + } <nl> + <nl> + public static var isSigned : Bool { <nl> + return T . isSigned <nl> + } <nl> + <nl> + public var bitWidth : Int { <nl> + return 2 * T . bitWidth <nl> + } <nl> + <nl> + / / fixed width <nl> + / / <nl> + public static var max : DoubleWidth < T > { <nl> + return self . init ( ( High . max , Low . max ) ) <nl> + } <nl> + <nl> + public static var min : DoubleWidth < T > { <nl> + return self . init ( ( High . min , Low . min ) ) <nl> + } <nl> + <nl> + public static var bitWidth : Int { return 2 * T . bitWidth } <nl> + <nl> + % # This covers + and - <nl> + % for x in binaryArithmetic [ ' Arithmetic ' ] [ : 2 ] : <nl> + % highAffectedByLowOverflow = ' T . max ' if x . operator = = ' + ' else ' T . min ' <nl> + public func $ { x . name } WithOverflow ( _ rhs : DoubleWidth < T > ) <nl> + - > ( partialValue : DoubleWidth < T > , overflow : ArithmeticOverflow ) { <nl> + let ( low , lowOverflow ) = <nl> + _storage . low . $ { x . name } WithOverflow ( rhs . _storage . low ) <nl> + let ( high , highOverflow ) = <nl> + _storage . high . $ { x . name } WithOverflow ( rhs . _storage . high ) <nl> + let isLowOverflow = lowOverflow = = . overflow <nl> + let result = ( high $ { x . operator } ( isLowOverflow ? 1 : 0 ) , low ) <nl> + let overflow = ArithmeticOverflow ( <nl> + highOverflow = = . overflow | | <nl> + high = = $ { highAffectedByLowOverflow } & & isLowOverflow <nl> + ) <nl> + return ( partialValue : DoubleWidth < T > ( result ) , <nl> + overflow : overflow ) <nl> + } <nl> + % end <nl> + <nl> + <nl> + public func multipliedWithOverflow ( <nl> + by rhs : DoubleWidth < T > <nl> + ) - > ( partialValue : DoubleWidth < T > , overflow : ArithmeticOverflow ) { <nl> + let isNegative = ( self < DoubleWidth < T > ( ) ) ! = ( rhs < DoubleWidth < T > ( ) ) <nl> + <nl> + func mul ( _ x : Low , _ y : Low , _ carry : Low ) - > ( partial : Low , carry : Low ) { <nl> + let pair = Low . doubleWidthMultiply ( x , y ) <nl> + let t = DoubleWidth < Low > ( pair ) + DoubleWidth < Low > ( ( 0 , carry ) ) <nl> + return ( partial : t . _storage . low , carry : t . _storage . high ) <nl> + } <nl> + <nl> + var high : Low = 0 <nl> + var low : Low = 0 <nl> + <nl> + func mkResult ( _ isOverflow : Bool ) <nl> + - > ( partialValue : DoubleWidth < T > , overflow : ArithmeticOverflow ) { <nl> + <nl> + / / TODO : High ( ) cast fails <nl> + let result = DoubleWidth < T > ( ( High ( high ) , low ) ) <nl> + if isNegative { <nl> + return DoubleWidth < T > ( ) . subtractingWithOverflow ( result ) <nl> + } <nl> + return ( partialValue : result , overflow : ArithmeticOverflow ( isOverflow ) ) <nl> + } <nl> + <nl> + var carry : Low = 0 <nl> + <nl> + let lhs = self . magnitude <nl> + let rhs = rhs . magnitude <nl> + <nl> + / / TODO : gyb me ! <nl> + let a = mul ( rhs . _storage . low , lhs . _storage . low , carry ) <nl> + low + = a . partial <nl> + carry = a . carry <nl> + / * _log ( " ( II ) 1 ( \ ( high ) , \ ( low ) ) carry : \ ( carry ) " ) * / <nl> + <nl> + let b = mul ( rhs . _storage . low , lhs . _storage . high , carry ) <nl> + high + = b . partial <nl> + carry = b . carry <nl> + / * _log ( " ( II ) 2 ( \ ( high ) , \ ( low ) ) carry : \ ( carry ) " ) * / <nl> + <nl> + if carry ! = 0 { <nl> + / * _log ( " ( EE ) overflow " ) * / <nl> + return mkResult ( true ) <nl> + } <nl> + <nl> + let c = mul ( rhs . _storage . high , lhs . _storage . low , carry ) <nl> + low + = c . partial <nl> + carry = c . carry <nl> + / * _log ( " ( II ) 3 ( \ ( high ) , \ ( low ) ) carry : \ ( carry ) " ) * / <nl> + <nl> + let d = mul ( rhs . _storage . high , lhs . _storage . high , carry ) <nl> + high + = d . partial <nl> + carry = d . carry <nl> + / * _log ( " ( II ) 4 ( \ ( high ) , \ ( low ) ) carry : \ ( carry ) " ) * / <nl> + <nl> + / * if ( carry > 0 ) { _log ( " ( EE ) overflow " ) } * / <nl> + return mkResult ( carry > 0 ) <nl> + } <nl> + <nl> + public func dividedWithOverflow ( by other : DoubleWidth < T > ) <nl> + - > ( partialValue : DoubleWidth < T > , overflow : ArithmeticOverflow ) { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + public static func doubleWidthMultiply ( <nl> + _ lhs : DoubleWidth < T > , _ rhs : DoubleWidth < T > <nl> + ) - > ( high : DoubleWidth < T > , low : DoubleWidth < T > . Magnitude ) { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + public static func doubleWidthDivide ( <nl> + _ lhs : ( high : DoubleWidth < T > , low : DoubleWidth < T > . Magnitude ) , <nl> + _ rhs : DoubleWidth < T > <nl> + ) - > ( quotient : DoubleWidth < T > , remainder : DoubleWidth < T > ) { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + % for x in binaryBitwise + maskingShifts : <nl> + public static func $ { x . operator } = ( <nl> + lhs : inout DoubleWidth < T > , rhs : DoubleWidth < T > <nl> + ) { <nl> + fatalError ( ) <nl> + } <nl> + % end <nl> + <nl> + % for x in chain ( * binaryArithmetic . values ( ) ) : <nl> + public static func $ { x . operator } = ( <nl> + lhs : inout DoubleWidth < T > , rhs : DoubleWidth < T > <nl> + ) { <nl> + fatalError ( ) <nl> + } <nl> + % end <nl> + <nl> + public init ( _truncatingBits bits : UInt ) { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + / / other <nl> + / / <nl> + public init ( _builtinIntegerLiteral x : _MaxBuiltinIntegerType ) { <nl> + fatalError ( " Method must be overridden " ) <nl> + } <nl> + <nl> + public var description : String { <nl> + return " ( \ ( _storage . high ) , \ ( _storage . low ) ) " <nl> + } <nl> + <nl> + public var leadingZeros : Int { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + public var trailingZeros : Int { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + public var popcount : Int { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + public var hashValue : Int { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + public init ( littleEndian : DoubleWidth < T > ) { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + public init ( bigEndian : DoubleWidth < T > ) { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + public var littleEndian : DoubleWidth < T > { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + public var bigEndian : DoubleWidth < T > { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + public var byteSwapped : DoubleWidth < T > { <nl> + fatalError ( ) <nl> + } <nl> + } <nl> + <nl> / / FIXME ( integers ) : switch to using ` FixedWidthInteger . unsafeAdding ` <nl> internal func _unsafePlus ( _ lhs : Int , _ rhs : Int ) - > Int { <nl> # if INTERNAL_CHECKS_ENABLED <nl>
Adding DoubleWidth < >
apple/swift
156b171722020e957eab02de522ce6dd00b647d1
2017-01-05T22:21:05Z
mmm a / ports / jsonnet / 001 - enable - msvc . patch <nl> ppp b / ports / jsonnet / 001 - enable - msvc . patch <nl> <nl> - + mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> - set ( GLOBAL_OUTPUT_PATH_SUFFIX " " CACHE STRING <nl> - <nl> + <nl> + cmake_minimum_required ( VERSION 2 . 8 . 7 ) <nl> project ( jsonnet C CXX ) <nl> <nl> + add_definitions ( - D_CRT_SECURE_NO_WARNINGS ) <nl> + <nl> - # Discourage in - source builds because they overwrite the hand - written Makefile . <nl> - # Use ` cmake . - B < dir > ` or the CMake GUI to do an out - of - source build . <nl> - if ( $ { CMAKE_SOURCE_DIR } STREQUAL $ { CMAKE_BINARY_DIR } AND <nl> - elseif ( BUILD_TESTS AND USE_SYSTEM_GTEST ) <nl> + include ( ExternalProject ) <nl> + include ( GNUInstallDirs ) <nl> + <nl> + else ( ) <nl> endif ( ) <nl> <nl> # Compiler flags . <nl> if ( $ { CMAKE_CXX_COMPILER_ID } MATCHES " Clang " OR <nl> $ { CMAKE_CXX_COMPILER_ID } STREQUAL " GNU " ) <nl> set ( OPT " - O3 " ) <nl> - else ( ) <nl> + else ( ) <nl> # TODO : Windows support . <nl> message ( FATAL_ERROR " Compiler $ { CMAKE_CXX_COMPILER_ID } not supported " ) <nl> endif ( ) <nl> # Look for libraries in global output path . <nl> link_directories ( $ { GLOBAL_OUTPUT_PATH } ) <nl> - + mmm a / cmd / CMakeLists . txt <nl> ppp b / cmd / CMakeLists . txt <nl> if ( BUILD_JSONNETFMT OR BUILD_TESTS ) <nl> - add_dependencies ( jsonnetfmt libjsonnet_static ) <nl> - target_link_libraries ( jsonnetfmt libjsonnet_static ) <nl> + add_dependencies ( jsonnetfmt libjsonnet_for_binaries ) <nl> + target_link_libraries ( jsonnetfmt libjsonnet_for_binaries ) <nl> <nl> - install ( TARGETS jsonnetfmt DESTINATION " $ { CMAKE_INSTALL_BINDIR } " ) <nl> + install ( TARGETS jsonnetfmt DESTINATION tools / jsonnet ) <nl> endif ( ) <nl> - + mmm a / core / CMakeLists . txt <nl> ppp b / core / CMakeLists . txt <nl> set ( LIBJSONNET_SOURCE <nl> - add_library ( libjsonnet SHARED $ { LIBJSONNET_HEADERS } $ { LIBJSONNET_SOURCE } ) <nl> + add_library ( libjsonnet $ { LIBJSONNET_HEADERS } $ { LIBJSONNET_SOURCE } ) <nl> add_dependencies ( libjsonnet md5 stdlib ) <nl> - target_link_libraries ( libjsonnet md5 ) <nl> + target_link_libraries ( libjsonnet md5 nlohmann_json : : nlohmann_json ) <nl> <nl> - set_target_properties ( libjsonnet PROPERTIES OUTPUT_NAME jsonnet <nl> + set_target_properties ( libjsonnet PROPERTIES OUTPUT_NAME jsonnet <nl> + PUBLIC_HEADER " $ { LIB_HEADER } " ) <nl> install ( TARGETS libjsonnet <nl> LIBRARY DESTINATION " $ { CMAKE_INSTALL_LIBDIR } " <nl> - ARCHIVE DESTINATION " $ { CMAKE_INSTALL_LIBDIR } " <nl> - + RUNTIME DESTINATION " $ { CMAKE_INSTALL_BINDIR } " <nl> + - ARCHIVE DESTINATION " $ { CMAKE_INSTALL_LIBDIR } " <nl> + + ARCHIVE DESTINATION " $ { CMAKE_INSTALL_BINDIR } " <nl> PUBLIC_HEADER DESTINATION " $ { CMAKE_INSTALL_INCLUDEDIR } " ) <nl> <nl> - # Static library for jsonnet command - line tool . <nl> - add_library ( libjsonnet_static STATIC $ { LIBJSONNET_SOURCE } ) <nl> - add_dependencies ( libjsonnet_static md5 stdlib ) <nl> - target_link_libraries ( libjsonnet_static md5 ) <nl> - - set_target_properties ( libjsonnet_static PROPERTIES OUTPUT_NAME jsonnet ) <nl> - install ( TARGETS libjsonnet_static DESTINATION " $ { CMAKE_INSTALL_LIBDIR } " ) <nl> - <nl> - # Tests <nl> + if ( BUILD_STATIC_LIBS ) <nl> - + mmm a / stdlib / CMakeLists . txt <nl> ppp b / stdlib / CMakeLists . txt <nl> <nl> # Custom command that will only build stdlib when it changes . <nl> add_custom_command ( <nl> OUTPUT $ { PROJECT_SOURCE_DIR } / core / std . jsonnet . h <nl> - add_custom_command ( <nl> - $ { PROJECT_SOURCE_DIR } / stdlib / std . jsonnet <nl> - $ { PROJECT_SOURCE_DIR } / core / std . jsonnet . h <nl> - DEPENDS to_c_array std . jsonnet ) <nl> - + endif ( ) <nl> - <nl> + add_custom_command ( <nl> # Standard library build target that libjsonnet can depend on . <nl> add_custom_target ( stdlib ALL <nl> + DEPENDS $ { PROJECT_SOURCE_DIR } / core / std . jsonnet . h ) <nl> + + endif ( ) <nl> + \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . cccdc9439cc <nl> mmm / dev / null <nl> ppp b / ports / jsonnet / 002 - fix - dependency - and - install . patch <nl> <nl> pppmmm a / core / CMakeLists . txt <nl> ppp + b / core / CMakeLists . txt <nl> + set ( LIBJSONNET_SOURCE <nl> + string_utils . cpp <nl> + vm . cpp ) <nl> + <nl> + + if ( BUILD_SHARED_BINARIES ) <nl> + add_library ( libjsonnet $ { LIBJSONNET_HEADERS } $ { LIBJSONNET_SOURCE } ) <nl> + add_dependencies ( libjsonnet md5 stdlib ) <nl> + target_link_libraries ( libjsonnet md5 nlohmann_json : : nlohmann_json ) <nl> + install ( TARGETS libjsonnet <nl> + LIBRARY DESTINATION " $ { CMAKE_INSTALL_LIBDIR } " <nl> + ARCHIVE DESTINATION " $ { CMAKE_INSTALL_BINDIR } " <nl> + PUBLIC_HEADER DESTINATION " $ { CMAKE_INSTALL_INCLUDEDIR } " ) <nl> + + endif ( ) <nl> + <nl> + if ( BUILD_STATIC_LIBS ) <nl> + # Static library for jsonnet command - line tool . <nl> + if ( BUILD_TESTS ) <nl> + add_test ( jsonnet_test_snippet <nl> + $ { GLOBAL_OUTPUT_PATH } / jsonnet - e $ { TEST_SNIPPET } ) <nl> + endif ( ) <nl> + + <nl> + + <nl> + + install ( FILES $ { LIBJSONNET_HEADERS } DESTINATION include ) <nl> + \ No newline at end of file <nl> pppmmm a / cpp / CMakeLists . txt <nl> ppp + b / cpp / CMakeLists . txt <nl> + set ( LIBJSONNETPP_SOURCE <nl> + libjsonnet + + . cpp <nl> + ) <nl> + <nl> + + if ( BUILD_SHARED_BINARIES ) <nl> + add_library ( libjsonnet + + SHARED $ { LIBJSONNETPP_HEADERS } $ { LIBJSONNETPP_SOURCE } ) <nl> + - add_dependencies ( libjsonnet + + jsonnet ) <nl> + - # target_link_libraries ( libjsonnet libjsonnet ) <nl> + + target_link_libraries ( libjsonnet + + libjsonnet ) <nl> + <nl> + # CMake prepends CMAKE_SHARED_LIBRARY_PREFIX to shared libraries , so without <nl> + # this step the output would be | liblibjsonnet | . <nl> + install ( TARGETS libjsonnet + + <nl> + LIBRARY DESTINATION " $ { CMAKE_INSTALL_LIBDIR } " <nl> + ARCHIVE DESTINATION " $ { CMAKE_INSTALL_LIBDIR } " <nl> + PUBLIC_HEADER DESTINATION " $ { CMAKE_INSTALL_INCLUDEDIR } " ) <nl> + + endif ( ) <nl> + <nl> + if ( BUILD_STATIC_LIBS ) <nl> + # Static library for jsonnet command - line tool . <nl> + else ( ) <nl> + add_library ( libjsonnet + + _for_binaries ALIAS libjsonnet + + _static ) <nl> + endif ( ) <nl> + <nl> + + install ( FILES $ { LIBJSONNETPP_HEADERS } DESTINATION include ) <nl> + + <nl> + # Tests <nl> + function ( add_test_executablepp test_name ) <nl> + if ( EXISTS $ { CMAKE_CURRENT_LIST_DIR } / $ { test_name } . cpp ) <nl> mmm a / ports / jsonnet / CONTROL <nl> ppp b / ports / jsonnet / CONTROL <nl> <nl> Source : jsonnet <nl> - Version : 0 . 13 . 0 <nl> + Version : 0 . 14 . 0 <nl> Homepage : https : / / github . com / google / jsonnet <nl> Description : Jsonnet - The data templating language <nl> mmm a / ports / jsonnet / portfile . cmake <nl> ppp b / ports / jsonnet / portfile . cmake <nl> <nl> - include ( vcpkg_common_functions ) <nl> - <nl> - if ( VCPKG_CMAKE_SYSTEM_NAME STREQUAL " WindowsStore " OR NOT VCPKG_CMAKE_SYSTEM_NAME ) <nl> + if ( VCPKG_TARGET_IS_WINDOWS ) <nl> vcpkg_check_linkage ( ONLY_STATIC_LIBRARY ) <nl> endif ( ) <nl> <nl> vcpkg_from_github ( <nl> OUT_SOURCE_PATH SOURCE_PATH <nl> REPO google / jsonnet <nl> - REF v0 . 13 . 0 <nl> - SHA512 d19e5398763e37b79b0ef02368f6bd6215d2df234b5ff7a6d98e2306a0d47290600061c9f868c0c262570b4f0ee9eee6c309bcc93937b12f6c14f8d12339a7d5 <nl> + REF 552d8ec6f6b973a6357b83eb9bacd707366d28f0 # v0 . 14 . 0 <nl> + SHA512 a4a9c6285155addbc5b7ef1a0c02b99b4d941bfc8e6536eaf029bff77c9c303a5c36f654ca8ab6b9757d2710c100c3e4a05f310269d82b0385ae55ea6ead14ef <nl> HEAD_REF master <nl> PATCHES <nl> - 001 - enable - msvc . patch <nl> + 001 - enable - msvc . patch <nl> + 002 - fix - dependency - and - install . patch <nl> ) <nl> <nl> if ( VCPKG_TARGET_IS_WINDOWS ) <nl> else ( ) <nl> ) <nl> endif ( ) <nl> <nl> + if ( VCPKG_LIBRARY_LINKAGE STREQUAL dynamic ) <nl> + set ( BUILD_SHARED ON ) <nl> + set ( BUILD_STATIC OFF ) <nl> + else ( ) <nl> + set ( BUILD_SHARED OFF ) <nl> + set ( BUILD_STATIC ON ) <nl> + endif ( ) <nl> + <nl> vcpkg_configure_cmake ( <nl> SOURCE_PATH $ { SOURCE_PATH } <nl> PREFER_NINJA <nl> - OPTIONS - DBUILD_JSONNET = OFF - DBUILD_JSONNETFMT = OFF - DBUILD_TESTS = OFF <nl> + OPTIONS <nl> + - DBUILD_SHARED_BINARIES = $ { BUILD_SHARED } <nl> + - DBUILD_STATIC_LIBS = $ { BUILD_STATIC } <nl> + - DBUILD_JSONNET = OFF <nl> + - DBUILD_JSONNETFMT = OFF <nl> + - DBUILD_TESTS = OFF <nl> ) <nl> <nl> vcpkg_install_cmake ( ) <nl> vcpkg_copy_pdbs ( ) <nl> vcpkg_copy_tool_dependencies ( $ { CURRENT_PACKAGES_DIR } / tools / jsonnet ) <nl> <nl> - file ( INSTALL $ { SOURCE_PATH } / LICENSE DESTINATION $ { CURRENT_PACKAGES_DIR } / share / jsonnet RENAME copyright ) <nl> file ( REMOVE_RECURSE $ { CURRENT_PACKAGES_DIR } / debug / include ) <nl> + <nl> + file ( INSTALL $ { SOURCE_PATH } / LICENSE DESTINATION $ { CURRENT_PACKAGES_DIR } / share / $ { PORT } RENAME copyright ) <nl>
[ jsonnet ] Upgrade to 0 . 14 . 0 . ( )
microsoft/vcpkg
b6f6619408c14980baa0495b45dd287cc26d460f
2019-11-01T23:51:08Z
mmm a / tensorflow / core / profiler / g3doc / profile_model_architecture . md <nl> ppp b / tensorflow / core / profiler / g3doc / profile_model_architecture . md <nl> sys . stdout . write ( ' total_params : % d \ n ' % param_stats . total_parameters ) <nl> <nl> For an operation to have float operation statistics : <nl> <nl> - * It must have ` RegisterStatistics ( ' flops ' ) ` defined in TensorFlow . tfprof <nl> - use the definition to calculate float operations . Contributes are welcome . <nl> - <nl> - * It must have known " shape " information for RegisterStatistics ( ' flops ' ) <nl> - to calculate the statistics . It is suggested to pass in ` - run_meta_path ` if <nl> - shape is only known during runtime . tfprof can fill in the missing shape with <nl> - the runtime shape information from RunMetadata . <nl> - Hence , it is suggested to use ` - account_displayed_op_only ` <nl> - option so that you know the statistics are only for the operations printed out . <nl> - <nl> - * If no RunMetadata provided , tfprof count float_ops of each graph node once , <nl> - even if it is defined in tf . while_loop . This is because tfprof doesn ' t know <nl> - how many times are run statically . If RunMetadata provided , tfprof calculate <nl> - float_ops as float_ops * run_count . <nl> - <nl> - <nl> + * It must have ` RegisterStatistics ( ' flops ' ) ` defined in TensorFlow . tfprof <nl> + uses the definition to calculate float operations . Contributions are <nl> + welcomed . <nl> + <nl> + * It must have known " shape " information for RegisterStatistics ( ' flops ' ) to <nl> + calculate the statistics . It is suggested to pass in ` - run_meta_path ` if <nl> + shape is only known during runtime . tfprof can fill in the missing shape <nl> + with the runtime shape information from RunMetadata . Hence , it is suggested <nl> + to use ` - account_displayed_op_only ` option so that you know the statistics <nl> + are only for the operations printed out . <nl> + <nl> + * If no RunMetadata is provided , tfprof counts float_ops of each graph node <nl> + once , even if it is defined in a tf . while_loop . This is because tfprof <nl> + doesn ' t know statically how many times each graph node is run . If <nl> + RunMetadata is provided , tfprof calculates float_ops as float_ops * <nl> + run_count . <nl> <nl> ` ` ` python <nl> # To profile float opertions in commandline , you need to pass - - graph_path <nl>
Fix typos in " Profile Model Float Operations " documentation .
tensorflow/tensorflow
e6225d9835f63729a9006f10ca9e50068381663d
2018-04-05T16:21:13Z
mmm a / src / mongo / shell / dbshell . cpp <nl> ppp b / src / mongo / shell / dbshell . cpp <nl> enum ShellExitCode : int { <nl> Scope * shellMainScope ; <nl> } <nl> <nl> + bool isSessionTimedOut ( ) { <nl> + static Date_t previousCommandTime = Date_t : : now ( ) ; <nl> + if ( shellGlobalParams . idleSessionTimeout > Seconds ( 0 ) ) { <nl> + const Date_t now = Date_t : : now ( ) ; <nl> + <nl> + if ( now > ( previousCommandTime + shellGlobalParams . idleSessionTimeout ) ) { <nl> + return true ; <nl> + } <nl> + previousCommandTime = now ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> void generateCompletions ( const std : : string & prefix , std : : vector < std : : string > & all ) { <nl> if ( prefix . find ( ' " ' ) ! = std : : string : : npos ) <nl> return ; <nl> int _main ( int argc , char * argv [ ] , char * * envp ) { <nl> free ( line ) ; <nl> break ; <nl> } <nl> + <nl> + / / Support idle session lifetime limits <nl> + if ( isSessionTimedOut ( ) ) { <nl> + std : : cout < < " Idle Connection Timeout : Shell session has expired " < < std : : endl ; <nl> + if ( line ) <nl> + free ( line ) ; <nl> + break ; <nl> + } <nl> + <nl> if ( code = = " cls " ) { <nl> free ( line ) ; <nl> linenoiseClearScreen ( ) ; <nl> mmm a / src / mongo / shell / shell_options . cpp <nl> ppp b / src / mongo / shell / shell_options . cpp <nl> Status storeMongoShellOptions ( const moe : : Environment & params , <nl> shellGlobalParams . jsHeapLimitMB = jsHeapLimitMB ; <nl> } <nl> <nl> + if ( params . count ( " idleSessionTimeout " ) ) { <nl> + shellGlobalParams . idleSessionTimeout = Seconds ( params [ " idleSessionTimeout " ] . as < int > ( ) ) ; <nl> + } <nl> + <nl> if ( shellGlobalParams . url = = " * " ) { <nl> StringBuilder sb ; <nl> sb < < " ERROR : " <nl> mmm a / src / mongo / shell / shell_options . h <nl> ppp b / src / mongo / shell / shell_options . h <nl> struct ShellGlobalParams { <nl> <nl> int jsHeapLimitMB = 0 ; <nl> bool nokillop = false ; <nl> + Seconds idleSessionTimeout = Seconds { 0 } ; <nl> } ; <nl> <nl> extern ShellGlobalParams shellGlobalParams ; <nl> mmm a / src / mongo / shell / shell_options . idl <nl> ppp b / src / mongo / shell / shell_options . idl <nl> configs : <nl> description : " Remote host name to use for purpose of GSSAPI / Kerberos authentication " <nl> arg_vartype : String <nl> <nl> + " idleSessionTimeout " : <nl> + description : " Terminate the Shell session if it ' s been idle for this many seconds " <nl> + arg_vartype : Int <nl> + default : 0 <nl> + validator : { gte : 0 } <nl>
SERVER - 33749 Add idleSessionTimeout flag
mongodb/mongo
9ad917886db091a5b7fde2434eea9095c7145986
2019-07-09T14:19:51Z
new file mode 100644 <nl> index 00000000000 . . 87af44f7361 <nl> mmm / dev / null <nl> ppp b / etc / README . md <nl> <nl> + The roots . pem file is periodically generated from : <nl> + https : / / hg . mozilla . org / mozilla - central / raw - file / tip / security / nss / lib / ckfw / builtins / certdata . txt <nl> + using <nl> + https : / / github . com / agl / extract - nss - root - certs <nl>
Merge pull request from jboeuf / readme_roots
grpc/grpc
edd2e50fcb2adca3da5cb16fecfb7e96b61eff51
2016-09-15T18:33:04Z
mmm a / test / DebugInfo / ErrorVar . swift <nl> ppp b / test / DebugInfo / ErrorVar . swift <nl> enum MyError : Error { <nl> / / thrown error we create a shadow stack location holding the address of the <nl> / / location that holds the pointer to the error instead . <nl> func simple ( _ placeholder : Int64 ) throws - > ( ) { <nl> - / / CHECK : define { { . * } } void @ _T06Errors6simpleys5Int64VKF ( i64 , % swift . refcounted * swiftself , % swift . error * * ) <nl> + / / CHECK : define { { . * } } void @ _T08ErrorVar6simpleys5Int64VKF ( i64 , % swift . refcounted * swiftself , % swift . error * * ) <nl> / / CHECK : call void @ llvm . dbg . declare <nl> / / CHECK : call void @ llvm . dbg . declare ( { { . * } } , metadata ! [ [ ERROR : [ 0 - 9 ] + ] ] , metadata ! [ [ DEREF : [ 0 - 9 ] + ] ] ) <nl> / / CHECK : ! [ [ ERROR ] ] = ! DILocalVariable ( name : " $ error " , arg : 2 , <nl>
Merge remote - tracking branch ' origin / master ' into master - next
apple/swift
acdb6fdfaaf686aa38d045951f46b3a7b1459078
2017-04-28T01:48:32Z
mmm a / src / node / src / credentials . js <nl> ppp b / src / node / src / credentials . js <nl> exports . createFromMetadataGenerator = function ( metadata_generator ) { <nl> exports . createFromGoogleCredential = function ( google_credential ) { <nl> return exports . createFromMetadataGenerator ( function ( auth_context , callback ) { <nl> var service_url = auth_context . service_url ; <nl> - console . log ( ' Service URL : ' , service_url ) ; <nl> google_credential . getRequestMetadata ( service_url , function ( err , header ) { <nl> if ( err ) { <nl> console . log ( ' Auth error : ' , err ) ; <nl> exports . createFromGoogleCredential = function ( google_credential ) { <nl> } <nl> var metadata = new Metadata ( ) ; <nl> metadata . add ( ' authorization ' , header . Authorization ) ; <nl> - console . log ( header . Authorization ) ; <nl> callback ( null , metadata ) ; <nl> } ) ; <nl> } ) ; <nl>
Remove some debug logging from 0 . 13 - branch node
grpc/grpc
6c621069f0fbcfbcd614c6dcff2389db1eb10d60
2016-04-01T16:33:55Z
mmm a / lib / SILOptimizer / Utils / Generics . cpp <nl> ppp b / lib / SILOptimizer / Utils / Generics . cpp <nl> void swift : : trySpecializeApplyOfGeneric ( <nl> if ( shouldNotSpecialize ( RefF , F ) ) <nl> return ; <nl> <nl> + / / If our callee has ownership , do not specialize for now . This should only <nl> + / / occur with transparent referenced functions . <nl> + / / <nl> + / / FIXME : Support this . <nl> + if ( RefF - > hasOwnership ( ) ) { <nl> + assert ( RefF - > isTransparent ( ) ) ; <nl> + return ; <nl> + } <nl> + <nl> / / If the caller and callee are both fragile , preserve the fragility when <nl> / / cloning the callee . Otherwise , strip it off so that we can optimize <nl> / / the body more . <nl> new file mode 100644 <nl> index 000000000000 . . 39e82c58ed35 <nl> mmm / dev / null <nl> ppp b / test / SILOptimizer / specialize_ossa . sil <nl> <nl> + / / RUN : % target - sil - opt - enable - sil - verify - all - generic - specializer % / s | % FileCheck % s <nl> + <nl> + sil_stage canonical <nl> + <nl> + import Builtin <nl> + import Swift <nl> + <nl> + sil [ ossa ] [ transparent ] @ ossaTransparentCallee : $ @ convention ( thin ) < T > ( @ in T ) - > ( ) { <nl> + bb0 ( % 0 : $ * T ) : <nl> + destroy_addr % 0 : $ * T <nl> + % 9999 = tuple ( ) <nl> + return % 9999 : $ ( ) <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil @ caller : $ @ convention ( thin ) ( @ owned Builtin . NativeObject ) - > ( ) { <nl> + / / CHECK : [ [ FUNC : % . * ] ] = function_ref @ ossaTransparentCallee : $ @ convention ( thin ) < τ_0_0 > ( @ in τ_0_0 ) - > ( ) <nl> + / / CHECK : apply [ [ FUNC ] ] < Builtin . NativeObject > ( <nl> + / / CHECK : } / / end sil function ' caller ' <nl> + sil @ caller : $ @ convention ( thin ) ( @ owned Builtin . NativeObject ) - > ( ) { <nl> + bb0 ( % 0 : $ Builtin . NativeObject ) : <nl> + % 1 = function_ref @ ossaTransparentCallee : $ @ convention ( thin ) < τ_0_0 > ( @ in τ_0_0 ) - > ( ) <nl> + % 2 = alloc_stack $ Builtin . NativeObject <nl> + store % 0 to % 2 : $ * Builtin . NativeObject <nl> + apply % 1 < Builtin . NativeObject > ( % 2 ) : $ @ convention ( thin ) < τ_0_0 > ( @ in τ_0_0 ) - > ( ) <nl> + dealloc_stack % 2 : $ * Builtin . NativeObject <nl> + % 9999 = tuple ( ) <nl> + return % 9999 : $ ( ) <nl> + } <nl> \ No newline at end of file <nl>
Merge remote - tracking branch ' origin / master ' into master - next
apple/swift
fdd36e10d68c42aaac91377bc888f33df855ea55
2019-02-12T04:49:17Z
mmm a / folly / experimental / Gen - inl . h <nl> ppp b / folly / experimental / Gen - inl . h <nl> class GenImpl : public FBounded < Self > { <nl> <nl> / * * <nl> * apply ( ) - Send all values produced by this generator to given <nl> - * handler until the handler returns false . Returns true until the handler <nl> - * returns false . GOTCHA : It should return true even if it completes ( without <nl> - * the handler returning false ) , as ' Chain ' uses the return value of apply <nl> - * to determine if it should process the second object in its chain . <nl> + * handler until the handler returns false . Returns false if and only if the <nl> + * handler returns false . Note : It should return true even if it completes <nl> + * ( without the handler returning false ) , as ' Chain ' uses the return value of <nl> + * apply to determine if it should process the second object in its chain . <nl> * / <nl> template < class Handler > <nl> bool apply ( Handler & & handler ) const ; <nl>
Gen apply comment
facebook/folly
b297ef2d39823ef74e1be98c6f9329e5173af928
2013-05-23T21:33:22Z
new file mode 100644 <nl> index 0000000000 . . 49748fb42d <nl> mmm / dev / null <nl> ppp b / Math / factorial . cpp <nl> <nl> + # include < iostream > <nl> + using namespace std ; <nl> + <nl> + int main ( ) <nl> + { <nl> + int n ; <nl> + cout < < " Enter number = " ; <nl> + cin > > n ; <nl> + int num = 1 ; <nl> + for ( int i = 1 ; i < n ; i + + ) <nl> + { <nl> + num * = i ; <nl> + } <nl> + cout < < " Factorial of number is = " ; <nl> + cout < < num < < endl ; <nl> + <nl> + return 0 ; <nl> + } <nl> new file mode 100644 <nl> index 0000000000 . . 14203b0d6f <nl> Binary files / dev / null and b / Math / factorial . exe differ <nl> new file mode 100644 <nl> index 0000000000 . . 23b949f7a6 <nl> Binary files / dev / null and b / Math / factorial . o differ <nl>
I added algorithm to find factorial of a number
TheAlgorithms/C-Plus-Plus
61664e9639bfa05ca7c1646224ed3b663876eadf
2019-10-23T14:44:43Z
mmm a / src / runtime / base / file / file . cpp <nl> ppp b / src / runtime / base / file / file . cpp <nl> bool File : : lock ( int operation ) { <nl> bool File : : lock ( int operation , bool & wouldblock / * = false * / ) { <nl> ASSERT ( m_fd > = 0 ) ; <nl> <nl> - if ( ( operation & 3 ) = = 0 ) { <nl> - throw_invalid_argument ( " operation : % d " , operation ) ; <nl> - return false ; <nl> - } <nl> - <nl> wouldblock = false ; <nl> if ( flock ( m_fd , operation ) ) { <nl> if ( errno = = EWOULDBLOCK ) { <nl> mmm a / src / runtime / ext / ext_file . cpp <nl> ppp b / src / runtime / ext / ext_file . cpp <nl> bool f_ftruncate ( CObjRef handle , int64 size ) { <nl> return CHECK_ERROR ( f - > truncate ( size ) ) ; <nl> } <nl> <nl> + static int flock_values [ ] = { LOCK_SH , LOCK_EX , LOCK_UN } ; <nl> + <nl> bool f_flock ( CObjRef handle , int operation , Variant wouldblock / * = null * / ) { <nl> CHECK_HANDLE ( handle , f ) ; <nl> bool block = false ; <nl> - bool ret = f - > lock ( operation , block ) ; <nl> + int act ; <nl> + <nl> + act = operation & 3 ; <nl> + if ( act < 1 | | act > 3 ) { <nl> + throw_invalid_argument ( " operation : % d " , operation ) ; <nl> + return false ; <nl> + } <nl> + act = flock_values [ act - 1 ] | ( operation & 4 ? LOCK_NB : 0 ) ; <nl> + bool ret = f - > lock ( act , block ) ; <nl> wouldblock = block ; <nl> return ret ; <nl> } <nl> mmm a / src / test / test_code_run . cpp <nl> ppp b / src / test / test_code_run . cpp <nl> bool TestCodeRun : : TestFile ( ) { <nl> " fseek ( $ dst , 0 ) ; " <nl> " $ str = stream_get_contents ( $ dst ) ; " <nl> " echo $ str ; " ) ; <nl> + MVCR ( " < ? php " <nl> + " define ( ' FILENAME ' , ' / tmp / flock_file . dat ' ) ; " <nl> + " function flock_or_die ( $ filename , $ resource , $ flock_op ) { " <nl> + " $ r = flock ( $ resource , $ flock_op ) ; " <nl> + " var_dump ( $ r ) ; " <nl> + " } " <nl> + " $ resource = fopen ( FILENAME , ' w ' ) ; " <nl> + " flock_or_die ( FILENAME , $ resource , LOCK_EX ) ; " <nl> + " flock_or_die ( FILENAME , $ resource , LOCK_UN ) ; " <nl> + " unlink ( FILENAME ) ; " ) ; <nl> return true ; <nl> } <nl> <nl>
[ Fix ] Fix flock bug
facebook/hhvm
3767465049d455652e70c4c75cbdad58078ec54d
2011-03-14T11:56:22Z
mmm a / cocos / network / HttpRequest . h <nl> ppp b / cocos / network / HttpRequest . h <nl> class HttpRequest : public cocos2d : : Object <nl> / * * Get the request data pointer back * / <nl> inline char * getRequestData ( ) <nl> { <nl> - if ( _requestData . size ( ) ! = 0 ) <nl> - return & ( _requestData . front ( ) ) ; <nl> + if ( _requestData . size ( ) ! = 0 ) <nl> + return & ( _requestData . front ( ) ) ; <nl> <nl> return nullptr ; <nl> } <nl>
: Add XMLHttpRequest lua binding and corresponding test sample
cocos2d/cocos2d-x
663b27d1e76afc89a92ef0b04991430f22c40618
2013-10-29T10:06:02Z
mmm a / tensorflow / compiler / mlir / lite / quantization / quantization_driver . cc <nl> ppp b / tensorflow / compiler / mlir / lite / quantization / quantization_driver . cc <nl> struct RequantizeState { <nl> class QuantizationDriver { <nl> public : <nl> explicit QuantizationDriver ( FuncOp fn , bool is_signed , <nl> + bool disable_per_channel , <nl> OpQuantSpecGetter op_quant_spec_getter ) <nl> : fn_ ( fn ) , <nl> builder_ ( fn . getBody ( ) ) , <nl> is_signed_ ( is_signed ) , <nl> + disable_per_channel_ ( disable_per_channel ) , <nl> op_quant_spec_getter_ ( op_quant_spec_getter ) { } <nl> <nl> / / The entry point of the quantization parameters propagation . <nl> class QuantizationDriver { <nl> FuncOp fn_ ; <nl> OpBuilder builder_ ; <nl> bool is_signed_ ; <nl> + bool disable_per_channel_ ; <nl> <nl> / / We should distinguish weights and bias constants . Biases are specified by <nl> / / the quantization spec or are the operands of ops with same scale spec . The <nl> bool QuantizationDriver : : SetConstantResultParams ( Operation * op ) { <nl> Type final_type ; <nl> auto it = optimized_weights_ . find ( op ) ; <nl> if ( it ! = optimized_weights_ . end ( ) ) { <nl> - if ( it - > second ! = - 1 & & is_signed_ ) { <nl> + / / When ` disable_per_channel_ ` is false , per - channel symmetric quantization <nl> + / / parameters are created from the weights when the ops support per - channel <nl> + / / quantization . Otherwise , uses per - tensor asymmetric quantization with <nl> + / / narrow range . <nl> + if ( it - > second ! = - 1 & & is_signed_ & & ! disable_per_channel_ ) { <nl> / / per - axis quantization weight <nl> final_type = GetUniformQuantizedPerAxisTypeForWeight ( <nl> attr , it - > second , / * symmetric = * / true , / * num_bits = * / 8 , is_signed_ , <nl> void QuantizationDriver : : Run ( ) { <nl> } <nl> <nl> void ApplyQuantizationParamsPropagation ( <nl> - mlir : : FuncOp func , bool is_signed , OpQuantSpecGetter op_quant_spec_getter ) { <nl> - QuantizationDriver ( func , is_signed , op_quant_spec_getter ) . Run ( ) ; <nl> + mlir : : FuncOp func , bool is_signed , bool disable_per_channel , <nl> + OpQuantSpecGetter op_quant_spec_getter ) { <nl> + QuantizationDriver ( func , is_signed , disable_per_channel , op_quant_spec_getter ) <nl> + . Run ( ) ; <nl> } <nl> <nl> } / / namespace TFL <nl> mmm a / tensorflow / compiler / mlir / lite / quantization / quantization_utils . h <nl> ppp b / tensorflow / compiler / mlir / lite / quantization / quantization_utils . h <nl> quant : : QuantizedType GetUniformQuantizedTypeForBias ( <nl> / / the quantization specification of the ops . This methods assumes the initial <nl> / / quantization parameters are stored as adjacent quantize and dequantize ops <nl> / / and the propagation results are materialized by inserting pairs of quantize <nl> - / / and dequantize ops to this function . <nl> + / / and dequantize ops to this function . Set ` disable_per_channel ` to true to not <nl> + / / use per channel quantization even the op supports it . <nl> void ApplyQuantizationParamsPropagation ( mlir : : FuncOp func , bool is_signed , <nl> + bool disable_per_channel , <nl> OpQuantSpecGetter op_quant_spec_getter ) ; <nl> <nl> / / The function might contain more stats ops than required , and it will <nl> mmm a / tensorflow / compiler / mlir / lite / tests / prepare - quantize - signed . mlir <nl> ppp b / tensorflow / compiler / mlir / lite / tests / prepare - quantize - signed . mlir <nl> <nl> / / RUN : tf - opt % s - tfl - prepare - quantize - tfl - test - quantize - signed | FileCheck % s <nl> + / / RUN : tf - opt % s - tfl - prepare - quantize - tfl - test - quantize - signed - tfl - disable - per - channel | FileCheck - - check - prefix = PerTensor % s <nl> <nl> / / CHECK - LABEL : uint8_to_int8 <nl> func @ uint8_to_int8 ( % arg0 : tensor < 2x2xf32 > ) - > tensor < 2x2xf32 > { <nl> func @ prepareConv2DSplat ( % arg0 : tensor < 1x5x5x3xf32 > ) - > tensor < 1x5x5x3xf32 > { <nl> / / CHECK - SAME : { 1 . 000000e + 00 , 1 . 000000e + 00 , 1 . 000000e + 00 } <nl> / / CHECK : % [ [ dq : . * ] ] = " tfl . dequantize " ( % [ [ q ] ] ) <nl> / / CHECK : % [ [ conv : . * ] ] = " tfl . conv_2d " ( % arg0 , % [ [ dq ] ] <nl> + <nl> + / / PerTensor : % [ [ cst : . * ] ] = constant dense < 1 . 270000e + 02 > : tensor < 3x3x3x3xf32 > <nl> + / / PerTensor : % [ [ q : . * ] ] = " tfl . quantize " ( % [ [ cst ] ] ) { qtype = tensor < 3x3x3x3x ! quant . uniform < i8 < - 127 : 127 > : f32 , 5 . 000000e - 01 : - 127 > > } <nl> + / / PerTensor : % [ [ dq : . * ] ] = " tfl . dequantize " ( % [ [ q ] ] ) <nl> + / / PerTensor : % [ [ conv : . * ] ] = " tfl . conv_2d " ( % arg0 , % [ [ dq ] ] <nl> } <nl> <nl> / / CHECK - LABEL : prepareConv2D <nl> func @ prepareConv2D ( % arg0 : tensor < 1x5x5x1xf32 > ) - > tensor < 1x5x5x3xf32 > { <nl> / / CHECK - SAME : { 0 . 0078740157480314959 , 1 . 000000e + 00 , 1 . 000000e + 00 } > > } <nl> / / CHECK : % [ [ dq : . * ] ] = " tfl . dequantize " ( % [ [ q ] ] ) <nl> / / CHECK : % [ [ conv : . * ] ] = " tfl . conv_2d " ( % arg0 , % [ [ dq ] ] <nl> + <nl> + / / PerTensor : % [ [ cst : . * ] ] = constant dense < [ { { \ [ \ [ \ [ } } 0 . 000000e + 00 ] ] ] , [ { { \ [ \ [ } } 1 . 270000e + 02 ] ] ] , [ { { \ [ \ [ } } - 1 . 270000e + 02 ] ] ] ] > <nl> + / / PerTensor : % [ [ q : . * ] ] = " tfl . quantize " ( % [ [ cst ] ] ) { qtype = tensor < 3x1x1x1x ! quant . uniform < i8 < - 127 : 127 > : f32 , <nl> + / / PerTensor : % [ [ dq : . * ] ] = " tfl . dequantize " ( % [ [ q ] ] ) <nl> + / / PerTensor : % [ [ conv : . * ] ] = " tfl . conv_2d " ( % arg0 , % [ [ dq ] ] <nl> } <nl> <nl> / / CHECK - LABEL : prepareDepthwiseConv2D <nl> func @ prepareDepthwiseConv2D ( % arg0 : tensor < 1x224x224x3xf32 > ) - > tensor < 1x112x112 <nl> / / CHECK - SAME : { 1 . 000000e + 00 , 1 . 000000e + 00 , 1 . 000000e + 00 } <nl> / / CHECK : % [ [ dq : . * ] ] = " tfl . dequantize " ( % [ [ q ] ] ) <nl> / / CHECK : % [ [ conv : . * ] ] = " tfl . depthwise_conv_2d " ( % arg0 , % [ [ dq ] ] <nl> + <nl> + / / PerTensor : % [ [ cst : . * ] ] = constant dense < 1 . 270000e + 02 > : tensor < 32x3x3x3xf32 > <nl> + / / PerTensor : % [ [ q : . * ] ] = " tfl . quantize " ( % [ [ cst ] ] ) { qtype = tensor < 32x3x3x3x ! quant . uniform < i8 < - 127 : 127 > : f32 , <nl> + / / PerTensor : % [ [ dq : . * ] ] = " tfl . dequantize " ( % [ [ q ] ] ) <nl> + / / PerTensor : % [ [ conv : . * ] ] = " tfl . depthwise_conv_2d " ( % arg0 , % [ [ dq ] ] <nl> } <nl> mmm a / tensorflow / compiler / mlir / lite / transforms / prepare_quantize . cc <nl> ppp b / tensorflow / compiler / mlir / lite / transforms / prepare_quantize . cc <nl> static llvm : : cl : : opt < bool > quantize_signed ( <nl> llvm : : cl : : desc ( " signed inference type . Only used in tests " ) , <nl> llvm : : cl : : init ( false ) ) ; <nl> <nl> + / / NOLINTNEXTLINE <nl> + static llvm : : cl : : opt < bool > disable_per_channel ( <nl> + " tfl - disable - per - channel " , llvm : : cl : : value_desc ( " bool " ) , <nl> + llvm : : cl : : desc ( " Whether disable per - channel quantized weights . " ) , <nl> + llvm : : cl : : init ( false ) ) ; <nl> + <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> / / The prepare - quantize Pass . <nl> / / <nl> void PrepareQuantizePass : : runOnFunction ( ) { <nl> <nl> / / Finally , the quantization parameters can be propagated to the rest of the <nl> / / values ( tensors ) . <nl> - ApplyQuantizationParamsPropagation ( func , is_signed , GetOpQuantSpec ) ; <nl> + ApplyQuantizationParamsPropagation ( func , is_signed , disable_per_channel , <nl> + GetOpQuantSpec ) ; <nl> } <nl> <nl> } / / namespace <nl>
Create a flag to disable per - channel quantized weight even the ops have the support
tensorflow/tensorflow
0a793b9dfd60ebfacda33cb52f0a12f089d41428
2019-11-20T00:23:16Z
mmm a / tests / lua - tests / project / Classes / lua_test_bindings . cpp <nl> ppp b / tests / lua - tests / project / Classes / lua_test_bindings . cpp <nl> int lua_cocos2dx_DrawNode3D_drawCube ( lua_State * L ) <nl> { <nl> lua_pushnumber ( L , i + 1 ) ; <nl> lua_gettable ( L , 2 ) ; <nl> - # ifndef TOLUA_RELEASE <nl> + # if COCOS2D_DEBUG > = 1 <nl> if ( ! tolua_istable ( L , - 1 , 0 , & tolua_err ) ) <nl> { <nl> lua_pop ( L , 1 ) ; <nl> int lua_cocos2dx_DrawNode3D_drawCube ( lua_State * L ) <nl> # endif <nl> ok & = luaval_to_vec3 ( L , lua_gettop ( L ) , & vec3 ) ; <nl> <nl> - # ifndef TOLUA_RELEASE <nl> + # if COCOS2D_DEBUG > = 1 <nl> if ( ! ok ) <nl> { <nl> lua_pop ( L , 1 ) ; <nl>
Fix the lua - tests compile error in the release condition
cocos2d/cocos2d-x
c4d3914230cfd2347479738d7701de8afb7ecec9
2014-10-17T06:16:17Z
mmm a / modules / ml / include / opencv2 / ml . hpp <nl> ppp b / modules / ml / include / opencv2 / ml . hpp <nl> class CV_EXPORTS_W TrainData <nl> / * * @ brief Returns vector of symbolic names captured in loadFromCSV ( ) * / <nl> CV_WRAP void getNames ( std : : vector < String > & names ) const ; <nl> <nl> - CV_WRAP static Mat getSubVector ( const Mat & vec , const Mat & idx ) ; <nl> + / * * @ brief Extract from 1D vector elements specified by passed indexes . <nl> + @ param vec input vector ( supported types : CV_32S , CV_32F , CV_64F ) <nl> + @ param idx 1D index vector <nl> + * / <nl> + static CV_WRAP Mat getSubVector ( const Mat & vec , const Mat & idx ) ; <nl> + <nl> + / * * @ brief Extract from matrix rows / cols specified by passed indexes . <nl> + @ param matrix input matrix ( supported types : CV_32S , CV_32F , CV_64F ) <nl> + @ param idx 1D index vector <nl> + @ param layout specifies to extract rows ( cv : : ml : : ROW_SAMPLES ) or to extract columns ( cv : : ml : : COL_SAMPLES ) <nl> + * / <nl> + static CV_WRAP Mat getSubMatrix ( const Mat & matrix , const Mat & idx , int layout ) ; <nl> <nl> / * * @ brief Reads the dataset from a . csv file and returns the ready - to - use training data . <nl> <nl> mmm a / modules / ml / src / data . cpp <nl> ppp b / modules / ml / src / data . cpp <nl> <nl> # include < algorithm > <nl> # include < iterator > <nl> <nl> + # include < opencv2 / core / utils / logger . hpp > <nl> + <nl> namespace cv { namespace ml { <nl> <nl> static const float MISSED_VAL = TrainData : : missingValue ( ) ; <nl> Mat TrainData : : getTestSamples ( ) const <nl> { <nl> Mat idx = getTestSampleIdx ( ) ; <nl> Mat samples = getSamples ( ) ; <nl> - return idx . empty ( ) ? Mat ( ) : getSubVector ( samples , idx ) ; <nl> + return idx . empty ( ) ? Mat ( ) : getSubMatrix ( samples , idx , getLayout ( ) ) ; <nl> } <nl> <nl> Mat TrainData : : getSubVector ( const Mat & vec , const Mat & idx ) <nl> { <nl> - if ( idx . empty ( ) ) <nl> - return vec ; <nl> - int i , j , n = idx . checkVector ( 1 , CV_32S ) ; <nl> - int type = vec . type ( ) ; <nl> - CV_Assert ( type = = CV_32S | | type = = CV_32F | | type = = CV_64F ) ; <nl> - int dims = 1 , m ; <nl> - <nl> - if ( vec . cols = = 1 | | vec . rows = = 1 ) <nl> + if ( ! ( vec . cols = = 1 | | vec . rows = = 1 ) ) <nl> + CV_LOG_WARNING ( NULL , " ' getSubVector ( const Mat & vec , const Mat & idx ) ' call with non - 1D input is deprecated . It is not designed to work with 2D matrixes ( especially with ' cv : : ml : : COL_SAMPLE ' layout ) . " ) ; <nl> + return getSubMatrix ( vec , idx , vec . rows = = 1 ? cv : : ml : : COL_SAMPLE : cv : : ml : : ROW_SAMPLE ) ; <nl> + } <nl> + <nl> + template < typename T > <nl> + Mat getSubMatrixImpl ( const Mat & m , const Mat & idx , int layout ) <nl> + { <nl> + int nidx = idx . checkVector ( 1 , CV_32S ) ; <nl> + int dims = m . cols , nsamples = m . rows ; <nl> + <nl> + Mat subm ; <nl> + if ( layout = = COL_SAMPLE ) <nl> { <nl> - dims = 1 ; <nl> - m = vec . cols + vec . rows - 1 ; <nl> + std : : swap ( dims , nsamples ) ; <nl> + subm . create ( dims , nidx , m . type ( ) ) ; <nl> } <nl> else <nl> { <nl> - dims = vec . cols ; <nl> - m = vec . rows ; <nl> + subm . create ( nidx , dims , m . type ( ) ) ; <nl> } <nl> <nl> - Mat subvec ; <nl> - <nl> - if ( vec . cols = = m ) <nl> - subvec . create ( dims , n , type ) ; <nl> - else <nl> - subvec . create ( n , dims , type ) ; <nl> - if ( type = = CV_32S ) <nl> - for ( i = 0 ; i < n ; i + + ) <nl> + for ( int i = 0 ; i < nidx ; i + + ) <nl> + { <nl> + int k = idx . at < int > ( i ) ; CV_CheckGE ( k , 0 , " Bad idx " ) ; CV_CheckLT ( k , nsamples , " Bad idx or layout " ) ; <nl> + if ( dims = = 1 ) <nl> { <nl> - int k = idx . at < int > ( i ) ; <nl> - CV_Assert ( 0 < = k & & k < m ) ; <nl> - if ( dims = = 1 ) <nl> - subvec . at < int > ( i ) = vec . at < int > ( k ) ; <nl> - else <nl> - for ( j = 0 ; j < dims ; j + + ) <nl> - subvec . at < int > ( i , j ) = vec . at < int > ( k , j ) ; <nl> + subm . at < T > ( i ) = m . at < T > ( k ) ; / / at ( ) has " transparent " access for 1D col - based / row - based vectors . <nl> } <nl> - else if ( type = = CV_32F ) <nl> - for ( i = 0 ; i < n ; i + + ) <nl> + else if ( layout = = COL_SAMPLE ) <nl> { <nl> - int k = idx . at < int > ( i ) ; <nl> - CV_Assert ( 0 < = k & & k < m ) ; <nl> - if ( dims = = 1 ) <nl> - subvec . at < float > ( i ) = vec . at < float > ( k ) ; <nl> - else <nl> - for ( j = 0 ; j < dims ; j + + ) <nl> - subvec . at < float > ( i , j ) = vec . at < float > ( k , j ) ; <nl> + for ( int j = 0 ; j < dims ; j + + ) <nl> + subm . at < T > ( j , i ) = m . at < T > ( j , k ) ; <nl> } <nl> - else <nl> - for ( i = 0 ; i < n ; i + + ) <nl> + else <nl> { <nl> - int k = idx . at < int > ( i ) ; <nl> - CV_Assert ( 0 < = k & & k < m ) ; <nl> - if ( dims = = 1 ) <nl> - subvec . at < double > ( i ) = vec . at < double > ( k ) ; <nl> - else <nl> - for ( j = 0 ; j < dims ; j + + ) <nl> - subvec . at < double > ( i , j ) = vec . at < double > ( k , j ) ; <nl> + for ( int j = 0 ; j < dims ; j + + ) <nl> + subm . at < T > ( i , j ) = m . at < T > ( k , j ) ; <nl> } <nl> - return subvec ; <nl> + } <nl> + return subm ; <nl> + } <nl> + <nl> + Mat TrainData : : getSubMatrix ( const Mat & m , const Mat & idx , int layout ) <nl> + { <nl> + if ( idx . empty ( ) ) <nl> + return m ; <nl> + int type = m . type ( ) ; <nl> + CV_CheckType ( type , type = = CV_32S | | type = = CV_32F | | type = = CV_64F , " " ) ; <nl> + if ( type = = CV_32S | | type = = CV_32F ) / / 32 - bit <nl> + return getSubMatrixImpl < int > ( m , idx , layout ) ; <nl> + if ( type = = CV_64F ) / / 64 - bit <nl> + return getSubMatrixImpl < double > ( m , idx , layout ) ; <nl> + CV_Error ( Error : : StsInternal , " " ) ; <nl> } <nl> <nl> class TrainDataImpl CV_FINAL : public TrainData <nl> class TrainDataImpl CV_FINAL : public TrainData <nl> } <nl> Mat getTrainSampleWeights ( ) const CV_OVERRIDE <nl> { <nl> - return getSubVector ( sampleWeights , getTrainSampleIdx ( ) ) ; <nl> + return getSubVector ( sampleWeights , getTrainSampleIdx ( ) ) ; / / 1D - vector <nl> } <nl> Mat getTestSampleWeights ( ) const CV_OVERRIDE <nl> { <nl> Mat idx = getTestSampleIdx ( ) ; <nl> - return idx . empty ( ) ? Mat ( ) : getSubVector ( sampleWeights , idx ) ; <nl> + return idx . empty ( ) ? Mat ( ) : getSubVector ( sampleWeights , idx ) ; / / 1D - vector <nl> } <nl> Mat getTrainResponses ( ) const CV_OVERRIDE <nl> { <nl> - return getSubVector ( responses , getTrainSampleIdx ( ) ) ; <nl> + return getSubMatrix ( responses , getTrainSampleIdx ( ) , cv : : ml : : ROW_SAMPLE ) ; / / col - based responses are transposed in setData ( ) <nl> } <nl> Mat getTrainNormCatResponses ( ) const CV_OVERRIDE <nl> { <nl> - return getSubVector ( normCatResponses , getTrainSampleIdx ( ) ) ; <nl> + return getSubMatrix ( normCatResponses , getTrainSampleIdx ( ) , cv : : ml : : ROW_SAMPLE ) ; / / like ' responses ' <nl> } <nl> Mat getTestResponses ( ) const CV_OVERRIDE <nl> { <nl> Mat idx = getTestSampleIdx ( ) ; <nl> - return idx . empty ( ) ? Mat ( ) : getSubVector ( responses , idx ) ; <nl> + return idx . empty ( ) ? Mat ( ) : getSubMatrix ( responses , idx , cv : : ml : : ROW_SAMPLE ) ; / / col - based responses are transposed in setData ( ) <nl> } <nl> Mat getTestNormCatResponses ( ) const CV_OVERRIDE <nl> { <nl> Mat idx = getTestSampleIdx ( ) ; <nl> - return idx . empty ( ) ? Mat ( ) : getSubVector ( normCatResponses , idx ) ; <nl> + return idx . empty ( ) ? Mat ( ) : getSubMatrix ( normCatResponses , idx , cv : : ml : : ROW_SAMPLE ) ; / / like ' responses ' <nl> } <nl> Mat getNormCatResponses ( ) const CV_OVERRIDE { return normCatResponses ; } <nl> Mat getClassLabels ( ) const CV_OVERRIDE { return classLabels ; } <nl> mmm a / modules / ml / test / test_mltests2 . cpp <nl> ppp b / modules / ml / test / test_mltests2 . cpp <nl> void CV_MLBaseTest : : load ( const char * filename ) <nl> CV_Error ( CV_StsNotImplemented , " invalid stat model name " ) ; <nl> } <nl> <nl> + <nl> + <nl> + TEST ( TrainDataGet , layout_ROW_SAMPLE ) / / Details : # 12236 <nl> + { <nl> + cv : : Mat test = cv : : Mat : : ones ( 150 , 30 , CV_32FC1 ) * 2 ; <nl> + test . col ( 3 ) + = Scalar : : all ( 3 ) ; <nl> + cv : : Mat labels = cv : : Mat : : ones ( 150 , 3 , CV_32SC1 ) * 5 ; <nl> + labels . col ( 1 ) + = 1 ; <nl> + cv : : Ptr < cv : : ml : : TrainData > train_data = cv : : ml : : TrainData : : create ( test , cv : : ml : : ROW_SAMPLE , labels ) ; <nl> + train_data - > setTrainTestSplitRatio ( 0 . 9 ) ; <nl> + <nl> + Mat tidx = train_data - > getTestSampleIdx ( ) ; <nl> + EXPECT_EQ ( ( size_t ) 15 , tidx . total ( ) ) ; <nl> + <nl> + Mat tresp = train_data - > getTestResponses ( ) ; <nl> + EXPECT_EQ ( 15 , tresp . rows ) ; <nl> + EXPECT_EQ ( labels . cols , tresp . cols ) ; <nl> + EXPECT_EQ ( 5 , tresp . at < int > ( 0 , 0 ) ) < < tresp ; <nl> + EXPECT_EQ ( 6 , tresp . at < int > ( 0 , 1 ) ) < < tresp ; <nl> + EXPECT_EQ ( 6 , tresp . at < int > ( 14 , 1 ) ) < < tresp ; <nl> + EXPECT_EQ ( 5 , tresp . at < int > ( 14 , 2 ) ) < < tresp ; <nl> + <nl> + Mat tsamples = train_data - > getTestSamples ( ) ; <nl> + EXPECT_EQ ( 15 , tsamples . rows ) ; <nl> + EXPECT_EQ ( test . cols , tsamples . cols ) ; <nl> + EXPECT_EQ ( 2 , tsamples . at < float > ( 0 , 0 ) ) < < tsamples ; <nl> + EXPECT_EQ ( 5 , tsamples . at < float > ( 0 , 3 ) ) < < tsamples ; <nl> + EXPECT_EQ ( 2 , tsamples . at < float > ( 14 , test . cols - 1 ) ) < < tsamples ; <nl> + EXPECT_EQ ( 5 , tsamples . at < float > ( 14 , 3 ) ) < < tsamples ; <nl> + } <nl> + <nl> + TEST ( TrainDataGet , layout_COL_SAMPLE ) / / Details : # 12236 <nl> + { <nl> + cv : : Mat test = cv : : Mat : : ones ( 30 , 150 , CV_32FC1 ) * 3 ; <nl> + test . row ( 3 ) + = Scalar : : all ( 3 ) ; <nl> + cv : : Mat labels = cv : : Mat : : ones ( 3 , 150 , CV_32SC1 ) * 5 ; <nl> + labels . row ( 1 ) + = 1 ; <nl> + cv : : Ptr < cv : : ml : : TrainData > train_data = cv : : ml : : TrainData : : create ( test , cv : : ml : : COL_SAMPLE , labels ) ; <nl> + train_data - > setTrainTestSplitRatio ( 0 . 9 ) ; <nl> + <nl> + Mat tidx = train_data - > getTestSampleIdx ( ) ; <nl> + EXPECT_EQ ( ( size_t ) 15 , tidx . total ( ) ) ; <nl> + <nl> + Mat tresp = train_data - > getTestResponses ( ) ; / / always row - based , transposed <nl> + EXPECT_EQ ( 15 , tresp . rows ) ; <nl> + EXPECT_EQ ( labels . rows , tresp . cols ) ; <nl> + EXPECT_EQ ( 5 , tresp . at < int > ( 0 , 0 ) ) < < tresp ; <nl> + EXPECT_EQ ( 6 , tresp . at < int > ( 0 , 1 ) ) < < tresp ; <nl> + EXPECT_EQ ( 6 , tresp . at < int > ( 14 , 1 ) ) < < tresp ; <nl> + EXPECT_EQ ( 5 , tresp . at < int > ( 14 , 2 ) ) < < tresp ; <nl> + <nl> + <nl> + Mat tsamples = train_data - > getTestSamples ( ) ; <nl> + EXPECT_EQ ( 15 , tsamples . cols ) ; <nl> + EXPECT_EQ ( test . rows , tsamples . rows ) ; <nl> + EXPECT_EQ ( 3 , tsamples . at < float > ( 0 , 0 ) ) < < tsamples ; <nl> + EXPECT_EQ ( 6 , tsamples . at < float > ( 3 , 0 ) ) < < tsamples ; <nl> + EXPECT_EQ ( 6 , tsamples . at < float > ( 3 , 14 ) ) < < tsamples ; <nl> + EXPECT_EQ ( 3 , tsamples . at < float > ( test . rows - 1 , 14 ) ) < < tsamples ; <nl> + } <nl> + <nl> + <nl> + <nl> } / / namespace <nl> / * End of file . * / <nl>
Merge pull request from alalek : fix_12236
opencv/opencv
c6f5b013eca54c38f4d2bb886ba18a462a901706
2018-08-20T13:53:27Z
mmm a / arangod / Indexes / CapConstraint . cpp <nl> ppp b / arangod / Indexes / CapConstraint . cpp <nl> triagens : : basics : : Json CapConstraint : : toJson ( TRI_memory_zone_t * zone , <nl> <nl> triagens : : basics : : Json CapConstraint : : toJsonFigures ( TRI_memory_zone_t * zone ) const { <nl> triagens : : basics : : Json json ( triagens : : basics : : Json : : Object ) ; <nl> + json ( " memory " , triagens : : basics : : Json ( static_cast < double > ( memory ( ) ) ) ) ; <nl> <nl> return json ; <nl> } <nl> mmm a / arangod / Indexes / EdgeIndex . cpp <nl> ppp b / arangod / Indexes / EdgeIndex . cpp <nl> triagens : : basics : : Json EdgeIndex : : toJson ( TRI_memory_zone_t * zone , <nl> triagens : : basics : : Json EdgeIndex : : toJsonFigures ( TRI_memory_zone_t * zone ) const { <nl> triagens : : basics : : Json json ( triagens : : basics : : Json : : Object ) ; <nl> <nl> + json ( " memory " , triagens : : basics : : Json ( static_cast < double > ( memory ( ) ) ) ) ; <nl> json ( " buckets " , triagens : : basics : : Json ( static_cast < double > ( _numBuckets ) ) ) ; <nl> <nl> return json ; <nl> mmm a / arangod / Indexes / FulltextIndex . cpp <nl> ppp b / arangod / Indexes / FulltextIndex . cpp <nl> triagens : : basics : : Json FulltextIndex : : toJson ( TRI_memory_zone_t * zone , <nl> <nl> triagens : : basics : : Json FulltextIndex : : toJsonFigures ( TRI_memory_zone_t * zone ) const { <nl> triagens : : basics : : Json json ( triagens : : basics : : Json : : Object ) ; <nl> + json ( " memory " , triagens : : basics : : Json ( static_cast < double > ( memory ( ) ) ) ) ; <nl> <nl> return json ; <nl> } <nl> mmm a / arangod / Indexes / GeoIndex2 . cpp <nl> ppp b / arangod / Indexes / GeoIndex2 . cpp <nl> triagens : : basics : : Json GeoIndex2 : : toJson ( TRI_memory_zone_t * zone , <nl> <nl> triagens : : basics : : Json GeoIndex2 : : toJsonFigures ( TRI_memory_zone_t * zone ) const { <nl> triagens : : basics : : Json json ( triagens : : basics : : Json : : Object ) ; <nl> + json ( " memory " , triagens : : basics : : Json ( static_cast < double > ( memory ( ) ) ) ) ; <nl> <nl> return json ; <nl> } <nl> mmm a / arangod / Indexes / HashIndex . cpp <nl> ppp b / arangod / Indexes / HashIndex . cpp <nl> triagens : : basics : : Json HashIndex : : toJson ( TRI_memory_zone_t * zone , <nl> triagens : : basics : : Json HashIndex : : toJsonFigures ( TRI_memory_zone_t * zone ) const { <nl> triagens : : basics : : Json json ( zone , triagens : : basics : : Json : : Object ) ; <nl> json ( " memory " , triagens : : basics : : Json ( static_cast < double > ( memory ( ) ) ) ) ; <nl> + <nl> if ( _unique ) { <nl> _uniqueArray . _hashArray - > appendToJson ( zone , json ) ; <nl> } <nl> mmm a / arangod / Indexes / PrimaryIndex . cpp <nl> ppp b / arangod / Indexes / PrimaryIndex . cpp <nl> triagens : : basics : : Json PrimaryIndex : : toJson ( TRI_memory_zone_t * zone , <nl> triagens : : basics : : Json PrimaryIndex : : toJsonFigures ( TRI_memory_zone_t * zone ) const { <nl> triagens : : basics : : Json json ( zone , triagens : : basics : : Json : : Object ) ; <nl> <nl> + json ( " memory " , triagens : : basics : : Json ( static_cast < double > ( memory ( ) ) ) ) ; <nl> json ( " nrAlloc " , triagens : : basics : : Json ( static_cast < double > ( _primaryIndex . _nrAlloc ) ) ) ; <nl> json ( " nrUsed " , triagens : : basics : : Json ( static_cast < double > ( _primaryIndex . _nrUsed ) ) ) ; <nl> <nl>
return memory usage for all indexes
arangodb/arangodb
473ed2c418e743323ce4d032bf038826ef796e72
2015-08-26T15:13:41Z
mmm a / tests / swoole_websocket_server / websocket_server_active_close . phpt <nl> ppp b / tests / swoole_websocket_server / websocket_server_active_close . phpt <nl> $ pm - > childFunc = function ( ) use ( $ pm ) { <nl> $ pm - > wakeup ( ) ; <nl> } ) ; <nl> $ serv - > on ( ' Message ' , function ( $ serv , $ frame ) { <nl> - if ( $ frame - > data = = ' shutdown ' ) { <nl> - $ serv - > disconnect ( $ frame - > fd , 4000 , ' shutdown received ' ) ; <nl> + if ( $ frame - > opcode = = 0x08 ) { <nl> + echo " { $ frame - > close_code } \ n " ; <nl> + echo " { $ frame - > data } \ n " ; <nl> + } else { <nl> + if ( $ frame - > data = = ' shutdown ' ) { <nl> + $ serv - > disconnect ( $ frame - > fd , 4000 , ' shutdown received ' ) ; <nl> + } <nl> } <nl> } ) ; <nl> - $ serv - > on ( ' WebsocketClose ' , function ( swoole_websocket_server $ serv , $ fd , $ from_id , $ code , $ reason ) use ( $ pm ) { <nl> - echo " { $ from_id } \ n " ; <nl> - echo " { $ code } \ n " ; <nl> - echo " { $ reason } \ n " ; <nl> - } ) ; <nl> $ serv - > start ( ) ; <nl> } ; <nl> $ pm - > childFirst ( ) ; <nl> $ pm - > run ( ) ; <nl> ? > <nl> - - EXPECT - - <nl> - - 1 <nl> 4000 <nl> shutdown received <nl>
Merge pull request from Inokinoki / unit - test
swoole/swoole-src
7610f5d1a28f4e5e091dcf444ab090fb96fefb0a
2018-08-22T18:21:54Z
mmm a / modules / dnn / src / tensorflow / tf_importer . cpp <nl> ppp b / modules / dnn / src / tensorflow / tf_importer . cpp <nl> Implementation of Tensorflow models parser <nl> # include < fstream > <nl> # include < algorithm > <nl> # include < string > <nl> + # include < queue > <nl> # include " tf_graph_simplifier . hpp " <nl> # endif <nl> <nl> static void addConstNodes ( tensorflow : : GraphDef & net , std : : map < String , int > & cons <nl> } <nl> } <nl> <nl> - / / If all inputs of specific layer have the same data layout we can say that <nl> - / / this layer ' s output has this data layout too . Returns DATA_LAYOUT_UNKNOWN otherwise . <nl> - static int predictOutputDataLayout ( const tensorflow : : NodeDef & layer , const std : : map < String , int > & data_layouts ) <nl> + static int getDataLayout ( const tensorflow : : NodeDef & layer ) <nl> { <nl> if ( hasLayerAttr ( layer , " data_format " ) ) <nl> { <nl> static int predictOutputDataLayout ( const tensorflow : : NodeDef & layer , const std : : <nl> else <nl> CV_Error ( Error : : StsParseError , " Unknown data_format value : " + format ) ; <nl> } <nl> + return DATA_LAYOUT_UNKNOWN ; <nl> + } <nl> + <nl> + static inline std : : string getNodeName ( const std : : string & tensorName ) <nl> + { <nl> + return tensorName . substr ( 0 , tensorName . rfind ( ' : ' ) ) ; <nl> + } <nl> + <nl> + / / If all inputs of specific layer have the same data layout we can say that <nl> + / / this layer ' s output has this data layout too . Returns DATA_LAYOUT_UNKNOWN otherwise . <nl> + static int predictOutputDataLayout ( const tensorflow : : GraphDef & net , <nl> + const tensorflow : : NodeDef & layer , <nl> + const std : : map < String , int > & data_layouts ) <nl> + { <nl> + int layout = getDataLayout ( layer ) ; <nl> + if ( layout ! = DATA_LAYOUT_UNKNOWN ) <nl> + return layout ; <nl> <nl> / / Determine layout by layer ' s inputs <nl> - int layout = DATA_LAYOUT_UNKNOWN ; <nl> std : : map < String , int > : : const_iterator it ; <nl> for ( int i = 0 , n = layer . input_size ( ) ; i < n ; + + i ) <nl> { <nl> - it = data_layouts . find ( layer . input ( i ) . substr ( 0 , layer . input ( i ) . rfind ( ' : ' ) ) ) ; <nl> + it = data_layouts . find ( getNodeName ( layer . input ( i ) ) ) ; <nl> if ( it ! = data_layouts . end ( ) ) <nl> { <nl> - if ( it - > second = = DATA_LAYOUT_UNKNOWN ) <nl> - return DATA_LAYOUT_UNKNOWN ; <nl> - else if ( it - > second ! = layout ) <nl> + if ( layout ! = DATA_LAYOUT_UNKNOWN ) <nl> { <nl> - if ( layout = = DATA_LAYOUT_UNKNOWN ) <nl> - layout = it - > second ; <nl> - else <nl> + if ( it - > second ! = layout & & it - > second ! = DATA_LAYOUT_UNKNOWN ) <nl> return DATA_LAYOUT_UNKNOWN ; <nl> } <nl> + else <nl> + layout = it - > second ; <nl> } <nl> } <nl> - return layout ; <nl> + <nl> + if ( layout ! = DATA_LAYOUT_UNKNOWN ) <nl> + return layout ; <nl> + <nl> + / / Determine layout by layer ' s consumers recursively . <nl> + it = data_layouts . find ( layer . name ( ) ) ; <nl> + CV_Assert ( it ! = data_layouts . end ( ) ) ; <nl> + return it - > second ; <nl> } <nl> <nl> void TFImporter : : populateNet ( Net dstNet ) <nl> void TFImporter : : populateNet ( Net dstNet ) <nl> int layersSize = net . node_size ( ) ; <nl> <nl> std : : map < String , int > data_layouts ; <nl> + / / Pre - fill data layouts where they are set explicitly . <nl> + / / Assuming that nodes are in topological order <nl> + for ( int i = net . node_size ( ) - 1 ; i > = 0 ; - - i ) <nl> + { <nl> + const tensorflow : : NodeDef & layer = net . node ( i ) ; <nl> + std : : string name = layer . name ( ) ; <nl> + <nl> + int layout = getDataLayout ( layer ) ; <nl> + std : : map < String , int > : : iterator it = data_layouts . find ( name ) ; <nl> + if ( it ! = data_layouts . end ( ) ) <nl> + { <nl> + if ( layout ! = DATA_LAYOUT_UNKNOWN ) <nl> + { <nl> + if ( it - > second = = DATA_LAYOUT_UNKNOWN ) <nl> + it - > second = layout ; <nl> + else if ( it - > second ! = layout ) <nl> + { <nl> + it - > second = DATA_LAYOUT_UNKNOWN ; <nl> + layout = DATA_LAYOUT_UNKNOWN ; <nl> + } <nl> + } <nl> + else <nl> + layout = it - > second ; <nl> + } <nl> + else <nl> + data_layouts [ name ] = layout ; <nl> + <nl> + / / Specify input layers to have the same data layout . <nl> + for ( int j = 0 ; j < layer . input_size ( ) ; + + j ) <nl> + { <nl> + name = getNodeName ( layer . input ( j ) ) ; <nl> + it = data_layouts . find ( name ) ; <nl> + if ( it ! = data_layouts . end ( ) ) <nl> + { <nl> + if ( layout ! = DATA_LAYOUT_UNKNOWN ) <nl> + { <nl> + if ( it - > second = = DATA_LAYOUT_UNKNOWN ) <nl> + it - > second = layout ; <nl> + else if ( it - > second ! = layout ) <nl> + it - > second = DATA_LAYOUT_UNKNOWN ; <nl> + } <nl> + } <nl> + else <nl> + data_layouts [ name ] = layout ; <nl> + } <nl> + } <nl> <nl> / / find all Const layers for params <nl> std : : map < String , int > value_id ; <nl> void TFImporter : : populateNet ( Net dstNet ) <nl> if ( layers_to_ignore . find ( name ) ! = layers_to_ignore . end ( ) ) <nl> continue ; <nl> <nl> - data_layouts [ name ] = predictOutputDataLayout ( layer , data_layouts ) ; <nl> + int predictedLayout = predictOutputDataLayout ( net , layer , data_layouts ) ; <nl> + data_layouts [ name ] = predictedLayout ; <nl> <nl> if ( type = = " Conv2D " | | type = = " SpaceToBatchND " | | type = = " DepthwiseConv2dNative " ) <nl> { <nl> void TFImporter : : populateNet ( Net dstNet ) <nl> <nl> / / one input only <nl> connect ( layer_id , dstNet , inpId , id , 0 ) ; <nl> + data_layouts [ name ] = DATA_LAYOUT_UNKNOWN ; <nl> } <nl> else if ( type = = " Flatten " | | type = = " Squeeze " ) <nl> { <nl> void TFImporter : : populateNet ( Net dstNet ) <nl> { <nl> int axisId = ( type = = " Concat " ? 0 : layer . input_size ( ) - 1 ) ; <nl> int axis = getConstBlob ( layer , value_id , axisId ) . int_val ( ) . Get ( 0 ) ; <nl> - layerParams . set ( " axis " , 0 < = axis & & axis < 4 ? toNCHW ( axis ) : axis ) ; <nl> + <nl> + if ( data_layouts [ name ] = = DATA_LAYOUT_NHWC ) <nl> + axis = toNCHW ( axis ) ; <nl> + layerParams . set ( " axis " , axis ) ; <nl> <nl> int id = dstNet . addLayer ( name , " Concat " , layerParams ) ; <nl> layer_id [ name ] = id ; <nl> mmm a / modules / dnn / test / test_tf_importer . cpp <nl> ppp b / modules / dnn / test / test_tf_importer . cpp <nl> TEST_P ( Test_TensorFlow_layers , eltwise_add_mul ) <nl> runTensorFlowNet ( " eltwise_add_mul " , GetParam ( ) ) ; <nl> } <nl> <nl> - TEST_P ( Test_TensorFlow_layers , pad_and_concat ) <nl> + TEST_P ( Test_TensorFlow_layers , concat ) <nl> { <nl> runTensorFlowNet ( " pad_and_concat " , GetParam ( ) ) ; <nl> + runTensorFlowNet ( " concat_axis_1 " , GetParam ( ) ) ; <nl> } <nl> <nl> TEST_P ( Test_TensorFlow_layers , batch_norm ) <nl>
Use layers consumers to predict data layout
opencv/opencv
715f40a48d9212e0836f34cc577138f3a8a4cf44
2018-06-25T15:25:40Z
mmm a / tensorflow / compiler / xla / service / gpu / gemm_rewriter . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / gemm_rewriter . cc <nl> static double GetScalarConstantAsDouble ( const Literal & literal ) { <nl> case F64 : <nl> return literal . Get < double > ( { } ) ; <nl> default : <nl> - LOG ( FATAL ) < < " Unsupported type . " ; <nl> + LOG ( FATAL ) < < " Unsupported type : " < < literal . shape ( ) ; <nl> } <nl> } <nl> <nl> class GemmRewriterVisitor : public DfsHloVisitorWithDefault { <nl> m : : Broadcast ( m : : ConstantScalar ( & alpha ) ) ) ) ) { <nl> TF_ASSIGN_OR_RETURN ( auto config , <nl> existing_gemm - > backend_config < GemmBackendConfig > ( ) ) ; <nl> - if ( config . beta ( ) = = 0 . 0 & & existing_gemm - > user_count ( ) = = 1 ) { <nl> + if ( config . beta ( ) = = 0 . 0 & & existing_gemm - > user_count ( ) = = 1 & & <nl> + ShapeUtil : : ElementIsFloating ( alpha - > literal ( ) . shape ( ) ) ) { <nl> double prev_alpha = config . alpha ( ) ; <nl> config . set_alpha ( GetScalarConstantAsDouble ( alpha - > literal ( ) ) * <nl> prev_alpha ) ; <nl>
[ XLA GPU ] Fix GEMM rewrite to only fold floating - point - like constant
tensorflow/tensorflow
25bb5f166a6e9ad374c8ae7f1ec646ffa49a0203
2019-06-25T01:46:41Z
mmm a / tensorflow / c / c_test_util . cc <nl> ppp b / tensorflow / c / c_test_util . cc <nl> static void Int32Deallocator ( void * data , size_t , void * arg ) { <nl> delete [ ] static_cast < int32_t * > ( data ) ; <nl> } <nl> <nl> + static void DoubleDeallocator ( void * data , size_t , void * arg ) { <nl> + delete [ ] static_cast < double * > ( data ) ; <nl> + } <nl> + <nl> TF_Tensor * Int8Tensor ( const int64_t * dims , int num_dims , const char * values ) { <nl> int64_t num_values = 1 ; <nl> for ( int i = 0 ; i < num_dims ; + + i ) { <nl> TF_Tensor * Int32Tensor ( int32_t v ) { <nl> & Int32Deallocator , nullptr ) ; <nl> } <nl> <nl> + TF_Tensor * DoubleTensor ( double v ) { <nl> + const int num_bytes = sizeof ( double ) ; <nl> + double * values = new double [ 1 ] ; <nl> + values [ 0 ] = v ; <nl> + return TF_NewTensor ( TF_DOUBLE , nullptr , 0 , values , num_bytes , <nl> + & DoubleDeallocator , nullptr ) ; <nl> + } <nl> + <nl> / / All the * Helper methods are used as a workaround for the restrictions that <nl> / / one cannot call ASSERT_ * methods in non - void - returning functions ( when <nl> / / exceptions are disabled during compilation ) <nl> TF_Operation * ScalarConst ( int32_t v , TF_Graph * graph , TF_Status * s , <nl> return Const ( tensor . get ( ) , graph , s , name ) ; <nl> } <nl> <nl> + TF_Operation * ScalarConst ( double v , TF_Graph * graph , TF_Status * s , <nl> + const char * name ) { <nl> + unique_tensor_ptr tensor ( DoubleTensor ( v ) , TF_DeleteTensor ) ; <nl> + return Const ( tensor . get ( ) , graph , s , name ) ; <nl> + } <nl> + <nl> void AddHelper ( TF_Operation * l , TF_Operation * r , TF_Graph * graph , TF_Status * s , <nl> const char * name , TF_Operation * * op , bool check ) { <nl> TF_OperationDescription * desc = TF_NewOperation ( graph , " AddN " , name ) ; <nl> mmm a / tensorflow / c / c_test_util . h <nl> ppp b / tensorflow / c / c_test_util . h <nl> TF_Tensor * Int32Tensor ( const std : : vector < int32_t > & values ) ; <nl> <nl> TF_Tensor * Int32Tensor ( int32_t v ) ; <nl> <nl> + TF_Tensor * DoubleTensor ( double v ) ; <nl> + <nl> TF_Operation * Placeholder ( TF_Graph * graph , TF_Status * s , <nl> const char * name = " feed " ) ; <nl> <nl> TF_Operation * Const ( TF_Tensor * t , TF_Graph * graph , TF_Status * s , <nl> TF_Operation * ScalarConst ( int32_t v , TF_Graph * graph , TF_Status * s , <nl> const char * name = " scalar " ) ; <nl> <nl> + TF_Operation * ScalarConst ( double v , TF_Graph * graph , TF_Status * s , <nl> + const char * name = " scalar " ) ; <nl> + <nl> TF_Operation * Add ( TF_Operation * l , TF_Operation * r , TF_Graph * graph , <nl> TF_Status * s , const char * name = " add " ) ; <nl> <nl> mmm a / tensorflow / c / while_loop_test . cc <nl> ppp b / tensorflow / c / while_loop_test . cc <nl> TEST_F ( CApiWhileLoopTest , NestedLoop ) { <nl> ExpectOutputValue ( 1 , 3 ) ; <nl> } <nl> <nl> - TEST_F ( CApiWhileLoopTest , BadCondOutput ) { <nl> + TEST_F ( CApiWhileLoopTest , UnsetCondOutput ) { <nl> Init ( 1 ) ; <nl> params_ - > body_outputs [ 0 ] = params_ - > body_inputs [ 0 ] ; <nl> ExpectError ( TF_INVALID_ARGUMENT , <nl> " TF_WhileParams ` cond_output ` field isn ' t set " ) ; <nl> } <nl> <nl> - TEST_F ( CApiWhileLoopTest , BadBodyOutput ) { <nl> + TEST_F ( CApiWhileLoopTest , WrongCondOutputType ) { <nl> + Init ( 1 ) ; <nl> + params_ - > cond_output = params_ - > cond_inputs [ 0 ] ; <nl> + params_ - > body_outputs [ 0 ] = params_ - > body_inputs [ 0 ] ; <nl> + ExpectError ( TF_INVALID_ARGUMENT , <nl> + " BuildWhileLoop : ' cond ' argument must return a boolean output , " <nl> + " got int32 " ) ; <nl> + } <nl> + <nl> + TEST_F ( CApiWhileLoopTest , InvalidCondOutputNode ) { <nl> + Init ( 1 ) ; <nl> + / / Try to reuse node from parent graph <nl> + params_ - > cond_output = inputs_ [ 0 ] ; <nl> + params_ - > body_outputs [ 0 ] = params_ - > body_inputs [ 0 ] ; <nl> + / / TODO ( skyewm ) : this error message could be more informative . Add explicit <nl> + / / checks for this case in the while loop implementation ? <nl> + ExpectError ( TF_INVALID_ARGUMENT , <nl> + " Requested return node ' p0 ' not found in graph def " ) ; <nl> + } <nl> + <nl> + TEST_F ( CApiWhileLoopTest , InvalidCondOutputIndex ) { <nl> + Init ( 1 ) ; <nl> + CreateCondGraph ( ) ; <nl> + params_ - > cond_output . index = 100 ; <nl> + params_ - > body_outputs [ 0 ] = params_ - > body_inputs [ 0 ] ; <nl> + ExpectError ( TF_INVALID_ARGUMENT , <nl> + " Invalid return output 100 of node ' less_than ' , which has 1 " <nl> + " output ( s ) " ) ; <nl> + } <nl> + <nl> + / / TODO ( skyewm ) : test bad cond output shape <nl> + <nl> + TEST_F ( CApiWhileLoopTest , UnsetBodyOutput ) { <nl> Init ( 1 ) ; <nl> CreateCondGraph ( ) ; <nl> ExpectError ( TF_INVALID_ARGUMENT , <nl> " TF_WhileParams ` body_outputs [ 0 ] ` field isn ' t set " ) ; <nl> } <nl> <nl> + / / TODO ( skyewm ) : enable this when it works ( currently doesn ' t error ) <nl> + / / TEST_F ( CApiWhileLoopTest , WrongBodyOutputType ) { <nl> + / / Init ( 1 ) ; <nl> + / / CreateCondGraph ( ) ; <nl> + / / TF_Operation * double_scalar = <nl> + / / ScalarConst ( 1 . 0 , params_ - > body_graph , s_ , " double_scalar " ) ; <nl> + / / params_ - > body_outputs [ 0 ] = { double_scalar , 0 } ; <nl> + / / ExpectError ( TF_INVALID_ARGUMENT , " bad body output type " ) ; <nl> + / / } <nl> + <nl> + TEST_F ( CApiWhileLoopTest , InvalidBodyOutputNode ) { <nl> + Init ( 1 ) ; <nl> + CreateCondGraph ( ) ; <nl> + / / Try to reuse node from parent graph <nl> + params_ - > body_outputs [ 0 ] = inputs_ [ 0 ] ; <nl> + / / TODO ( skyewm ) : this error message could be more informative . Add explicit <nl> + / / checks for this case in the while loop implementation ? <nl> + ExpectError ( TF_INVALID_ARGUMENT , <nl> + " Requested return node ' p0 ' not found in graph def " ) ; <nl> + } <nl> + <nl> + / / TODO ( skyewm ) : enable this when it works ( currently segfaults ! ) <nl> + / / TEST_F ( CApiWhileLoopTest , InvalidBodyOutputIndex ) { <nl> + / / Init ( 1 ) ; <nl> + / / CreateCondGraph ( ) ; <nl> + / / params_ - > body_outputs [ 0 ] = params_ - > body_inputs [ 0 ] ; <nl> + / / params_ - > body_outputs [ 0 ] . index = 100 ; <nl> + / / ExpectError ( TF_INVALID_ARGUMENT , <nl> + / / " Invalid return output 100 of node ' less_than ' , which has 1 " <nl> + / / " output ( s ) " ) ; <nl> + / / } <nl> + <nl> + / / TODO ( skyewm ) : test bad body output shape <nl> + <nl> TEST_F ( CApiWhileLoopTest , NullName ) { <nl> Init ( 1 ) ; <nl> CreateCondGraph ( ) ; <nl> mmm a / tensorflow / cc / BUILD <nl> ppp b / tensorflow / cc / BUILD <nl> cc_library_with_android_deps ( <nl> ] , <nl> ) <nl> <nl> + tf_cc_test ( <nl> + name = " ops_while_loop_test " , <nl> + size = " small " , <nl> + srcs = [ " ops / while_loop_test . cc " ] , <nl> + deps = [ <nl> + " : cc_ops " , <nl> + " : client_session " , <nl> + " : testutil " , <nl> + " : while_loop " , <nl> + " / / tensorflow / core : test " , <nl> + " / / tensorflow / core : test_main " , <nl> + " / / tensorflow / core : testlib " , <nl> + ] , <nl> + ) <nl> + <nl> cc_library ( <nl> name = " grad_op_registry " , <nl> srcs = [ " framework / grad_op_registry . cc " ] , <nl> mmm a / tensorflow / cc / ops / while_loop . cc <nl> ppp b / tensorflow / cc / ops / while_loop . cc <nl> Status CreateCond ( const Scope & scope , const CondGraphBuilderFn & cond , <nl> scope . NewSubScope ( " cond " ) . WithControlDependencies ( inputs [ 0 ] ) ; <nl> Output raw_cond_out ; <nl> TF_RETURN_IF_ERROR ( cond ( cond_scope , inputs , & raw_cond_out ) ) ; <nl> + <nl> + TF_RETURN_IF_ERROR ( scope . graph ( ) - > IsValidOutputTensor ( raw_cond_out . node ( ) , <nl> + raw_cond_out . index ( ) ) ) ; <nl> if ( raw_cond_out . type ( ) ! = DT_BOOL ) { <nl> return errors : : InvalidArgument ( <nl> " BuildWhileLoop : ' cond ' argument must return a boolean output , got " , <nl> DataTypeString ( raw_cond_out . type ( ) ) ) ; <nl> } <nl> + / / TODO ( skyewm ) : check that raw_cond_out is scalar <nl> + <nl> * output = LoopCond ( scope , raw_cond_out ) . output ; <nl> return Status : : OK ( ) ; <nl> } <nl> Status CreateBody ( const Scope & scope , const BodyGraphBuilderFn & body , <nl> Scope body_scope = <nl> scope . NewSubScope ( " body " ) . WithControlDependencies ( inputs [ 0 ] ) ; <nl> TF_RETURN_IF_ERROR ( body ( body_scope , inputs , outputs ) ) ; <nl> + <nl> const size_t num_loop_vars = inputs . size ( ) ; <nl> if ( outputs - > size ( ) ! = num_loop_vars ) { <nl> return errors : : InvalidArgument ( <nl> " BuildWhileLoop : ' body ' argument expected to return " , num_loop_vars , <nl> - " outputs , got " , outputs - > size ( ) ) ; <nl> + " output ( s ) , got " , outputs - > size ( ) ) ; <nl> + } <nl> + for ( const Output & output : * outputs ) { <nl> + TF_RETURN_IF_ERROR ( <nl> + scope . graph ( ) - > IsValidOutputTensor ( output . node ( ) , output . index ( ) ) ) ; <nl> + / / TODO ( skyewm ) : check output types / shapes <nl> } <nl> - / / TODO ( skyewm ) : check output types / shapes <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> new file mode 100644 <nl> index 0000000000000 . . 77028b5c41d5e <nl> mmm / dev / null <nl> ppp b / tensorflow / cc / ops / while_loop_test . cc <nl> <nl> + / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / cc / ops / while_loop . h " <nl> + # include " tensorflow / cc / client / client_session . h " <nl> + # include " tensorflow / cc / ops / standard_ops . h " <nl> + # include " tensorflow / core / framework / tensor_testutil . h " <nl> + # include " tensorflow / core / lib / core / status_test_util . h " <nl> + # include " tensorflow / core / platform / test . h " <nl> + <nl> + namespace tensorflow { <nl> + <nl> + namespace { <nl> + <nl> + class WhileLoopTest : public : : testing : : Test { <nl> + protected : <nl> + WhileLoopTest ( ) : scope_ ( Scope : : NewRootScope ( ) ) { } <nl> + <nl> + void Init ( int num_inputs , DataType dtype = DT_INT32 ) { <nl> + for ( int i = 0 ; i < num_inputs ; + + i ) { <nl> + inputs_ . push_back ( ops : : Placeholder ( scope_ , dtype ) ) ; <nl> + } <nl> + } <nl> + <nl> + void CreateLoop ( const ops : : CondGraphBuilderFn & cond , <nl> + const ops : : BodyGraphBuilderFn & body , <nl> + error : : Code error_code = error : : OK , <nl> + const string & error_msg = " " ) { <nl> + Status s = ops : : BuildWhileLoop ( scope_ , inputs_ , cond , body , " test_loop " , <nl> + & outputs_ ) ; <nl> + EXPECT_EQ ( s . code ( ) , error_code ) ; <nl> + EXPECT_EQ ( s . error_message ( ) , error_msg ) ; <nl> + } <nl> + <nl> + template < typename T > <nl> + void Run ( const std : : vector < Input : : Initializer > & input_values , <nl> + const std : : vector < T > & expected_output_values ) { <nl> + ClientSession session ( scope_ ) ; <nl> + <nl> + DCHECK_EQ ( input_values . size ( ) , inputs_ . size ( ) ) ; <nl> + ClientSession : : FeedType feeds ; <nl> + for ( int i = 0 ; i < inputs_ . size ( ) ; + + i ) { <nl> + feeds . emplace ( inputs_ [ i ] , input_values [ i ] ) ; <nl> + } <nl> + <nl> + std : : vector < Tensor > out_tensors ; <nl> + TF_ASSERT_OK ( session . Run ( feeds , outputs_ , & out_tensors ) ) ; <nl> + ASSERT_EQ ( out_tensors . size ( ) , outputs_ . size ( ) ) ; <nl> + <nl> + DCHECK_EQ ( expected_output_values . size ( ) , out_tensors . size ( ) ) ; <nl> + for ( int i = 0 ; i < out_tensors . size ( ) ; + + i ) { <nl> + test : : ExpectTensorEqual < T > ( <nl> + out_tensors [ i ] , test : : AsTensor < T > ( { expected_output_values [ i ] } , { } ) ) ; <nl> + } <nl> + } <nl> + <nl> + Scope scope_ ; <nl> + std : : vector < Output > inputs_ ; <nl> + std : : vector < Output > outputs_ ; <nl> + } ; <nl> + <nl> + Status LessThanTenCond ( const Scope & s , const std : : vector < Output > & inputs , <nl> + Output * output ) { <nl> + * output = ops : : Less ( s , inputs [ 0 ] , 10 ) ; <nl> + return s . status ( ) ; <nl> + } <nl> + <nl> + Status AddOneBody ( const Scope & s , const std : : vector < Output > & inputs , <nl> + std : : vector < Output > * outputs ) { <nl> + outputs - > push_back ( ops : : Add ( s , inputs [ 0 ] , 1 ) ) ; <nl> + return s . status ( ) ; <nl> + } <nl> + <nl> + TEST_F ( WhileLoopTest , Basic ) { <nl> + / / Create loop : while ( i < 10 ) i + = 1 <nl> + Init ( 1 ) ; <nl> + CreateLoop ( LessThanTenCond , AddOneBody ) ; <nl> + Run < int > ( { 1 } , { 10 } ) ; <nl> + Run < int > ( { 11 } , { 11 } ) ; <nl> + } <nl> + <nl> + TEST_F ( WhileLoopTest , WrongCondOutputType ) { <nl> + Init ( 1 ) ; <nl> + CreateLoop ( <nl> + [ ] ( const Scope & s , const std : : vector < Output > & inputs , Output * output ) { <nl> + * output = ops : : Placeholder ( s , DT_FLOAT ) ; <nl> + return s . status ( ) ; <nl> + } , <nl> + AddOneBody , error : : INVALID_ARGUMENT , <nl> + " BuildWhileLoop : ' cond ' argument must return a boolean output , got " <nl> + " float " ) ; <nl> + } <nl> + <nl> + / / TODO ( skyewm ) : test bad cond output shape <nl> + <nl> + TEST_F ( WhileLoopTest , NullCondOutputNode ) { <nl> + Init ( 1 ) ; <nl> + / / TODO ( skyewm ) : improve error message <nl> + CreateLoop ( <nl> + [ ] ( const Scope & s , const std : : vector < Output > & inputs , Output * output ) { <nl> + * output = { nullptr , 0 } ; <nl> + return s . status ( ) ; <nl> + } , <nl> + AddOneBody , error : : INVALID_ARGUMENT , " Node is null " ) ; <nl> + } <nl> + <nl> + TEST_F ( WhileLoopTest , InvalidCondOutputIndex ) { <nl> + Init ( 1 ) ; <nl> + CreateLoop ( <nl> + [ ] ( const Scope & s , const std : : vector < Output > & inputs , Output * output ) { <nl> + auto less = ops : : Less ( s , inputs [ 0 ] , 10 ) ; <nl> + * output = { less . node ( ) , 100 } ; <nl> + return s . status ( ) ; <nl> + } , <nl> + AddOneBody , error : : INVALID_ARGUMENT , <nl> + " Node ' cond / Less ' ( type : ' Less ' , num of outputs : 1 ) does not have output " <nl> + " 100 " ) ; <nl> + } <nl> + <nl> + TEST_F ( WhileLoopTest , UnsetCondOutput ) { <nl> + Init ( 1 ) ; <nl> + CreateLoop ( [ ] ( const Scope & s , const std : : vector < Output > & inputs , <nl> + Output * output ) { return s . status ( ) ; } , <nl> + AddOneBody , error : : INVALID_ARGUMENT , " Node is null " ) ; <nl> + } <nl> + <nl> + / / TODO ( skyewm ) : test bad body output type <nl> + / / TODO ( skyewm ) : test bad body output shape <nl> + <nl> + TEST_F ( WhileLoopTest , NullBodyOutputNode ) { <nl> + Init ( 1 ) ; <nl> + / / TODO ( skyewm ) : improve error message <nl> + CreateLoop ( LessThanTenCond , <nl> + [ ] ( const Scope & s , const std : : vector < Output > & inputs , <nl> + std : : vector < Output > * outputs ) { <nl> + outputs - > push_back ( { nullptr , 0 } ) ; <nl> + return s . status ( ) ; <nl> + } , <nl> + error : : INVALID_ARGUMENT , " Node is null " ) ; <nl> + } <nl> + <nl> + TEST_F ( WhileLoopTest , InvalidBodyOutputIndex ) { <nl> + Init ( 1 ) ; <nl> + CreateLoop ( LessThanTenCond , <nl> + [ ] ( const Scope & s , const std : : vector < Output > & inputs , <nl> + std : : vector < Output > * outputs ) { <nl> + auto add = ops : : Add ( s , inputs [ 0 ] , 1 ) ; <nl> + outputs - > emplace_back ( add . node ( ) , 100 ) ; <nl> + return s . status ( ) ; <nl> + } , <nl> + error : : INVALID_ARGUMENT , <nl> + " Node ' body / Add ' ( type : ' Add ' , num of outputs : 1 ) does not have " <nl> + " output 100 " ) ; <nl> + } <nl> + <nl> + TEST_F ( WhileLoopTest , UnsetBodyOutputs ) { <nl> + Init ( 1 ) ; <nl> + CreateLoop ( <nl> + LessThanTenCond , <nl> + [ ] ( const Scope & s , const std : : vector < Output > & inputs , <nl> + std : : vector < Output > * outputs ) { return s . status ( ) ; } , <nl> + error : : INVALID_ARGUMENT , <nl> + " BuildWhileLoop : ' body ' argument expected to return 1 output ( s ) , got 0 " ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + } / / namespace tensorflow <nl> mmm a / tensorflow / core / graph / graph_constructor . cc <nl> ppp b / tensorflow / core / graph / graph_constructor . cc <nl> Status GraphConstructor : : PopulateReturnTensors ( ) { <nl> id . second ! = Graph : : kControlSlot ) { <nl> return errors : : InvalidArgument ( " Invalid return output " , id . second , <nl> " of node ' " , id . first , " ' , which has " , <nl> - num_outputs , " outputs " ) ; <nl> + num_outputs , " output ( s ) " ) ; <nl> } <nl> return_tensors_ - > push_back ( { iter - > second . node , id . second } ) ; <nl> } else { <nl> mmm a / tensorflow / core / graph / graph_constructor_test . cc <nl> ppp b / tensorflow / core / graph / graph_constructor_test . cc <nl> TEST_F ( GraphConstructorTest , ImportGraphDef_ReturnTensorsErrors ) { <nl> opts . return_tensors . push_back ( { " new_input " , 2 } ) ; <nl> ExpectError ( " node { name : ' new_input ' op : ' TestInput ' } " , opts , <nl> { " Invalid return output 2 of node ' new_input ' , which has 2 " <nl> - " outputs " } , <nl> + " output ( s ) " } , <nl> nullptr , & return_tensors ) ; <nl> } <nl> <nl>
More C + + while loop validation
tensorflow/tensorflow
83ba41e0a38d211fcdb5e3b4e212ef296dc96490
2017-09-07T23:06:27Z
mmm a / include / swift / SILOptimizer / Analysis / ArraySemantic . h <nl> ppp b / include / swift / SILOptimizer / Analysis / ArraySemantic . h <nl> enum class ArrayCallKind { <nl> / / a function , and it has a self parameter , make sure that it is defined <nl> / / before this comment . <nl> kArrayInit , <nl> - kArrayUninitialized <nl> + kArrayUninitialized , <nl> + kArrayUninitializedIntrinsic <nl> } ; <nl> <nl> / / / Wrapper around array semantic calls . <nl> class ArraySemanticsCall { <nl> / / / Can this function be inlined by the early inliner . <nl> bool canInlineEarly ( ) const ; <nl> <nl> + / / / If this is a call to ArrayUninitialized ( or <nl> + / / / ArrayUninitializedInstrinsic ) , identify the instructions that store <nl> + / / / elements into the array indices . For every index , add the store <nl> + / / / instruction that stores to that index to \ p ElementStoreMap . <nl> + / / / <nl> + / / / \ returns true iff this is an " array . uninitialized " semantic call , and the <nl> + / / / stores into the array indices are identified and the \ p ElementStoreMap is <nl> + / / / populated . <nl> + / / / <nl> + / / / Note that this function does not support array initializations that use <nl> + / / / copy_addr , which implies that arrays with address - only types would not <nl> + / / / be recognized by this function as yet . <nl> + bool mapInitializationStores ( <nl> + llvm : : DenseMap < uint64_t , StoreInst * > & ElementStoreMap ) ; <nl> + <nl> protected : <nl> / / / Validate the signature of this call . <nl> bool isValidSignature ( ) ; <nl> mmm a / lib / SILOptimizer / Analysis / ArraySemantic . cpp <nl> ppp b / lib / SILOptimizer / Analysis / ArraySemantic . cpp <nl> ArrayCallKind swift : : ArraySemanticsCall : : getKind ( ) const { <nl> ArrayCallKind : : kArrayPropsIsNativeTypeChecked ) <nl> . StartsWith ( " array . init " , ArrayCallKind : : kArrayInit ) <nl> . Case ( " array . uninitialized " , ArrayCallKind : : kArrayUninitialized ) <nl> + . Case ( " array . uninitialized_intrinsic " , ArrayCallKind : : kArrayUninitializedIntrinsic ) <nl> . Case ( " array . check_subscript " , ArrayCallKind : : kCheckSubscript ) <nl> . Case ( " array . check_index " , ArrayCallKind : : kCheckIndex ) <nl> . Case ( " array . get_count " , ArrayCallKind : : kGetCount ) <nl> bool swift : : ArraySemanticsCall : : canInlineEarly ( ) const { <nl> case ArrayCallKind : : kAppendContentsOf : <nl> case ArrayCallKind : : kReserveCapacityForAppend : <nl> case ArrayCallKind : : kAppendElement : <nl> + case ArrayCallKind : : kArrayUninitializedIntrinsic : <nl> / / append ( Element ) calls other semantics functions . Therefore it ' s <nl> / / important that it ' s inlined by the early inliner ( which is before all <nl> / / the array optimizations ) . Also , this semantics is only used to lookup <nl> / / Array . append ( Element ) , so inlining it does not prevent any other <nl> / / optimization . <nl> + / / <nl> + / / Early inlining array . uninitialized_intrinsic semantic call helps in <nl> + / / stack promotion . <nl> return true ; <nl> } <nl> } <nl> SILValue swift : : ArraySemanticsCall : : getInitializationCount ( ) const { <nl> return SILValue ( ) ; <nl> } <nl> <nl> - SILValue swift : : ArraySemanticsCall : : getArrayValue ( ) const { <nl> - if ( getKind ( ) = = ArrayCallKind : : kArrayUninitialized ) { <nl> - TupleExtractInst * ArrayDef = nullptr ; <nl> - for ( auto * Op : SemanticsCall - > getUses ( ) ) { <nl> - auto * TupleElt = dyn_cast < TupleExtractInst > ( Op - > getUser ( ) ) ; <nl> - if ( ! TupleElt ) <nl> - return SILValue ( ) ; <nl> - switch ( TupleElt - > getFieldNo ( ) ) { <nl> - default : <nl> - return SILValue ( ) ; <nl> - case 0 : { <nl> - / / Should only have one tuple extract after CSE . <nl> - if ( ArrayDef ) <nl> - return SILValue ( ) ; <nl> - ArrayDef = TupleElt ; <nl> - break ; <nl> - } <nl> - case 1 : / * Ignore the storage address * / break ; <nl> - } <nl> + / / / Given an array semantic call \ c arrayCall , if it is an " array . uninitialized " <nl> + / / / initializer , which returns a two - element tuple , return the element of the <nl> + / / / tuple at \ c tupleElementIndex . Return a null SILValue if the <nl> + / / / array call is not an " array . uninitialized " initializer or if the extraction <nl> + / / / of the result tuple fails . <nl> + static SILValue getArrayUninitializedInitResult ( ArraySemanticsCall arrayCall , <nl> + unsigned tupleElementIndex ) { <nl> + assert ( tupleElementIndex < = 1 & & " tupleElementIndex must be 0 or 1 " ) ; <nl> + ArrayCallKind arrayCallKind = arrayCall . getKind ( ) ; <nl> + if ( arrayCallKind ! = ArrayCallKind : : kArrayUninitialized & & <nl> + arrayCallKind ! = ArrayCallKind : : kArrayUninitializedIntrinsic ) <nl> + return SILValue ( ) ; <nl> + <nl> + / / In OSSA , the call result will be extracted through a destructure_tuple <nl> + / / instruction . <nl> + ApplyInst * callInst = arrayCall ; <nl> + if ( callInst - > getFunction ( ) - > hasOwnership ( ) ) { <nl> + Operand * singleUse = callInst - > getSingleUse ( ) ; <nl> + if ( ! singleUse ) <nl> + return SILValue ( ) ; <nl> + if ( DestructureTupleInst * destructTuple = <nl> + dyn_cast < DestructureTupleInst > ( singleUse - > getUser ( ) ) ) { <nl> + return destructTuple - > getResult ( tupleElementIndex ) ; <nl> } <nl> - return SILValue ( ArrayDef ) ; <nl> + return SILValue ( ) ; <nl> + } <nl> + <nl> + / / In non - OSSA , look for a tuple_extract instruction of the call result with <nl> + / / the requested tupleElementIndex . <nl> + TupleExtractInst * tupleExtractInst = nullptr ; <nl> + for ( auto * op : callInst - > getUses ( ) ) { <nl> + auto * tupleElt = dyn_cast < TupleExtractInst > ( op - > getUser ( ) ) ; <nl> + if ( ! tupleElt ) <nl> + return SILValue ( ) ; <nl> + if ( tupleElt - > getFieldNo ( ) ! = tupleElementIndex ) <nl> + continue ; <nl> + tupleExtractInst = tupleElt ; <nl> + break ; <nl> } <nl> + return SILValue ( tupleExtractInst ) ; <nl> + } <nl> <nl> - if ( getKind ( ) = = ArrayCallKind : : kArrayInit ) <nl> + SILValue swift : : ArraySemanticsCall : : getArrayValue ( ) const { <nl> + ArrayCallKind arrayCallKind = getKind ( ) ; <nl> + if ( arrayCallKind = = ArrayCallKind : : kArrayInit ) <nl> return SILValue ( SemanticsCall ) ; <nl> - <nl> - return SILValue ( ) ; <nl> + return getArrayUninitializedInitResult ( * this , 0 ) ; <nl> } <nl> <nl> SILValue swift : : ArraySemanticsCall : : getArrayElementStoragePointer ( ) const { <nl> - if ( getKind ( ) = = ArrayCallKind : : kArrayUninitialized ) { <nl> - TupleExtractInst * ArrayElementStorage = nullptr ; <nl> - for ( auto * Op : SemanticsCall - > getUses ( ) ) { <nl> - auto * TupleElt = dyn_cast < TupleExtractInst > ( Op - > getUser ( ) ) ; <nl> - if ( ! TupleElt ) <nl> - return SILValue ( ) ; <nl> - switch ( TupleElt - > getFieldNo ( ) ) { <nl> - default : <nl> - return SILValue ( ) ; <nl> - case 0 : { <nl> - / / Ignore the array value . <nl> - break ; <nl> - } <nl> - case 1 : <nl> - / / Should only have one tuple extract after CSE . <nl> - if ( ArrayElementStorage ) <nl> - return SILValue ( ) ; <nl> - ArrayElementStorage = TupleElt ; <nl> - break ; <nl> - } <nl> - } <nl> - return SILValue ( ArrayElementStorage ) ; <nl> - } <nl> - <nl> - return SILValue ( ) ; <nl> + return getArrayUninitializedInitResult ( * this , 1 ) ; <nl> } <nl> <nl> bool swift : : ArraySemanticsCall : : replaceByValue ( SILValue V ) { <nl> bool swift : : ArraySemanticsCall : : replaceByAppendingValues ( <nl> <nl> return true ; <nl> } <nl> + <nl> + bool swift : : ArraySemanticsCall : : mapInitializationStores ( <nl> + llvm : : DenseMap < uint64_t , StoreInst * > & ElementValueMap ) { <nl> + if ( getKind ( ) ! = ArrayCallKind : : kArrayUninitialized & & <nl> + getKind ( ) ! = ArrayCallKind : : kArrayUninitializedIntrinsic ) <nl> + return false ; <nl> + SILValue ElementBuffer = getArrayElementStoragePointer ( ) ; <nl> + if ( ! ElementBuffer ) <nl> + return false ; <nl> + <nl> + / / Match initialization stores into ElementBuffer . E . g . <nl> + / / % 83 = struct_extract % element_buffer : $ UnsafeMutablePointer < Int > <nl> + / / % 84 = pointer_to_address % 83 : $ Builtin . RawPointer to strict $ * Int <nl> + / / store % 85 to % 84 : $ * Int <nl> + / / % 87 = integer_literal $ Builtin . Word , 1 <nl> + / / % 88 = index_addr % 84 : $ * Int , % 87 : $ Builtin . Word <nl> + / / store % some_value to % 88 : $ * Int <nl> + <nl> + / / If this an ArrayUinitializedIntrinsic then the ElementBuffer is a <nl> + / / builtin . RawPointer . Otherwise , it is an UnsafeMutablePointer , which would <nl> + / / be struct - extracted to obtain a builtin . RawPointer . <nl> + SILValue UnsafeMutablePointerExtract = <nl> + ( getKind ( ) = = ArrayCallKind : : kArrayUninitialized ) <nl> + ? dyn_cast_or_null < StructExtractInst > ( <nl> + getSingleNonDebugUser ( ElementBuffer ) ) <nl> + : ElementBuffer ; <nl> + if ( ! UnsafeMutablePointerExtract ) <nl> + return false ; <nl> + <nl> + auto * PointerToAddress = dyn_cast_or_null < PointerToAddressInst > ( <nl> + getSingleNonDebugUser ( UnsafeMutablePointerExtract ) ) ; <nl> + if ( ! PointerToAddress ) <nl> + return false ; <nl> + <nl> + / / Match the stores . We can have either a store directly to the address or <nl> + / / to an index_addr projection . <nl> + for ( auto * Op : PointerToAddress - > getUses ( ) ) { <nl> + auto * Inst = Op - > getUser ( ) ; <nl> + <nl> + / / Store to the base . <nl> + auto * SI = dyn_cast < StoreInst > ( Inst ) ; <nl> + if ( SI & & SI - > getDest ( ) = = PointerToAddress ) { <nl> + / / We have already seen an entry for this index bail . <nl> + if ( ElementValueMap . count ( 0 ) ) <nl> + return false ; <nl> + ElementValueMap [ 0 ] = SI ; <nl> + continue ; <nl> + } else if ( SI ) <nl> + return false ; <nl> + <nl> + / / Store to an index_addr projection . <nl> + auto * IndexAddr = dyn_cast < IndexAddrInst > ( Inst ) ; <nl> + if ( ! IndexAddr ) <nl> + return false ; <nl> + SI = dyn_cast_or_null < StoreInst > ( getSingleNonDebugUser ( IndexAddr ) ) ; <nl> + if ( ! SI | | SI - > getDest ( ) ! = IndexAddr ) <nl> + return false ; <nl> + auto * Index = dyn_cast < IntegerLiteralInst > ( IndexAddr - > getIndex ( ) ) ; <nl> + if ( ! Index ) <nl> + return false ; <nl> + auto IndexVal = Index - > getValue ( ) ; <nl> + / / Let ' s not blow up our map . <nl> + if ( IndexVal . getActiveBits ( ) > 16 ) <nl> + return false ; <nl> + / / Already saw an entry . <nl> + if ( ElementValueMap . count ( IndexVal . getZExtValue ( ) ) ) <nl> + return false ; <nl> + <nl> + ElementValueMap [ IndexVal . getZExtValue ( ) ] = SI ; <nl> + } <nl> + return ! ElementValueMap . empty ( ) ; <nl> + } <nl> mmm a / lib / SILOptimizer / LoopTransforms / COWArrayOpt . cpp <nl> ppp b / lib / SILOptimizer / LoopTransforms / COWArrayOpt . cpp <nl> static bool isNonMutatingArraySemanticCall ( SILInstruction * Inst ) { <nl> case ArrayCallKind : : kWithUnsafeMutableBufferPointer : <nl> case ArrayCallKind : : kArrayInit : <nl> case ArrayCallKind : : kArrayUninitialized : <nl> + case ArrayCallKind : : kArrayUninitializedIntrinsic : <nl> case ArrayCallKind : : kAppendContentsOf : <nl> case ArrayCallKind : : kAppendElement : <nl> return false ; <nl> bool COWArrayOpt : : hasLoopOnlyDestructorSafeArrayOperations ( ) { <nl> auto Kind = Sem . getKind ( ) ; <nl> / / Safe because they create new arrays . <nl> if ( Kind = = ArrayCallKind : : kArrayInit | | <nl> - Kind = = ArrayCallKind : : kArrayUninitialized ) <nl> + Kind = = ArrayCallKind : : kArrayUninitialized | | <nl> + Kind = = ArrayCallKind : : kArrayUninitializedIntrinsic ) <nl> continue ; <nl> / / All array types must be the same . This is a stronger guaranteed than <nl> / / we actually need . The requirement is that we can ' t create another <nl> mmm a / lib / SILOptimizer / Transforms / ArrayElementValuePropagation . cpp <nl> ppp b / lib / SILOptimizer / Transforms / ArrayElementValuePropagation . cpp <nl> class ArrayAllocation { <nl> / / / A map of Array indices to element values <nl> llvm : : DenseMap < uint64_t , SILValue > ElementValueMap ; <nl> <nl> - bool mapInitializationStores ( SILValue ElementBuffer ) ; <nl> + bool mapInitializationStores ( ArraySemanticsCall arrayUninitCall ) ; <nl> bool recursivelyCollectUses ( ValueBase * Def ) ; <nl> bool replacementsAreValid ( ) ; <nl> <nl> class ArrayAllocation { <nl> } ; <nl> <nl> / / / Map the indices of array element initialization stores to their values . <nl> - bool ArrayAllocation : : mapInitializationStores ( SILValue ElementBuffer ) { <nl> - assert ( ElementBuffer & & <nl> - " Must have identified an array element storage pointer " ) ; <nl> - <nl> - / / Match initialization stores . <nl> - / / % 83 = struct_extract % element_buffer : $ UnsafeMutablePointer < Int > <nl> - / / % 84 = pointer_to_address % 83 : $ Builtin . RawPointer to strict $ * Int <nl> - / / store % 85 to % 84 : $ * Int <nl> - / / % 87 = integer_literal $ Builtin . Word , 1 <nl> - / / % 88 = index_addr % 84 : $ * Int , % 87 : $ Builtin . Word <nl> - / / store % some_value to % 88 : $ * Int <nl> - <nl> - auto * UnsafeMutablePointerExtract = <nl> - dyn_cast_or_null < StructExtractInst > ( getSingleNonDebugUser ( ElementBuffer ) ) ; <nl> - if ( ! UnsafeMutablePointerExtract ) <nl> + bool ArrayAllocation : : mapInitializationStores ( <nl> + ArraySemanticsCall arrayUninitCall ) { <nl> + llvm : : DenseMap < uint64_t , StoreInst * > elementStoreMap ; <nl> + if ( ! arrayUninitCall . mapInitializationStores ( elementStoreMap ) ) <nl> return false ; <nl> - auto * PointerToAddress = dyn_cast_or_null < PointerToAddressInst > ( <nl> - getSingleNonDebugUser ( UnsafeMutablePointerExtract ) ) ; <nl> - if ( ! PointerToAddress ) <nl> - return false ; <nl> - <nl> - / / Match the stores . We can have either a store directly to the address or <nl> - / / to an index_addr projection . <nl> - for ( auto * Op : PointerToAddress - > getUses ( ) ) { <nl> - auto * Inst = Op - > getUser ( ) ; <nl> - <nl> - / / Store to the base . <nl> - auto * SI = dyn_cast < StoreInst > ( Inst ) ; <nl> - if ( SI & & SI - > getDest ( ) = = PointerToAddress ) { <nl> - / / We have already seen an entry for this index bail . <nl> - if ( ElementValueMap . count ( 0 ) ) <nl> - return false ; <nl> - ElementValueMap [ 0 ] = SI - > getSrc ( ) ; <nl> - continue ; <nl> - } else if ( SI ) <nl> - return false ; <nl> - <nl> - / / Store an index_addr projection . <nl> - auto * IndexAddr = dyn_cast < IndexAddrInst > ( Inst ) ; <nl> - if ( ! IndexAddr ) <nl> - return false ; <nl> - SI = dyn_cast_or_null < StoreInst > ( getSingleNonDebugUser ( IndexAddr ) ) ; <nl> - if ( ! SI | | SI - > getDest ( ) ! = IndexAddr ) <nl> - return false ; <nl> - auto * Index = dyn_cast < IntegerLiteralInst > ( IndexAddr - > getIndex ( ) ) ; <nl> - if ( ! Index ) <nl> - return false ; <nl> - auto IndexVal = Index - > getValue ( ) ; <nl> - / / Let ' s not blow up our map . <nl> - if ( IndexVal . getActiveBits ( ) > 16 ) <nl> - return false ; <nl> - / / Already saw an entry . <nl> - if ( ElementValueMap . count ( IndexVal . getZExtValue ( ) ) ) <nl> - return false ; <nl> - <nl> - ElementValueMap [ IndexVal . getZExtValue ( ) ] = SI - > getSrc ( ) ; <nl> - } <nl> - return ! ElementValueMap . empty ( ) ; <nl> + / / Extract the SIL values of the array elements from the stores . <nl> + ElementValueMap . grow ( elementStoreMap . size ( ) ) ; <nl> + for ( auto keyValue : elementStoreMap ) <nl> + ElementValueMap [ keyValue . getFirst ( ) ] = keyValue . getSecond ( ) - > getSrc ( ) ; <nl> + return true ; <nl> } <nl> <nl> bool ArrayAllocation : : replacementsAreValid ( ) { <nl> bool ArrayAllocation : : analyze ( ApplyInst * Alloc ) { <nl> if ( ! ArrayValue ) <nl> return false ; <nl> <nl> - SILValue ElementBuffer = Uninitialized . getArrayElementStoragePointer ( ) ; <nl> - if ( ! ElementBuffer ) <nl> - return false ; <nl> - <nl> / / Figure out all stores to the array . <nl> - if ( ! mapInitializationStores ( ElementBuffer ) ) <nl> + if ( ! mapInitializationStores ( Uninitialized ) ) <nl> return false ; <nl> <nl> / / Check if the array value was stored or has escaped . <nl> mmm a / lib / SILOptimizer / Transforms / DeadObjectElimination . cpp <nl> ppp b / lib / SILOptimizer / Transforms / DeadObjectElimination . cpp <nl> static bool removeAndReleaseArray ( SingleValueInstruction * NewArrayValue , <nl> / / / side effect ? <nl> static bool isAllocatingApply ( SILInstruction * Inst ) { <nl> ArraySemanticsCall ArrayAlloc ( Inst ) ; <nl> - return ArrayAlloc . getKind ( ) = = ArrayCallKind : : kArrayUninitialized ; <nl> + return ArrayAlloc . getKind ( ) = = ArrayCallKind : : kArrayUninitialized | | <nl> + ArrayAlloc . getKind ( ) = = ArrayCallKind : : kArrayUninitializedIntrinsic ; <nl> } <nl> <nl> namespace { <nl> static bool getDeadInstsAfterInitializerRemoved ( <nl> bool DeadObjectElimination : : processAllocApply ( ApplyInst * AI , <nl> DeadEndBlocks & DEBlocks ) { <nl> / / Currently only handle array . uninitialized <nl> - if ( ArraySemanticsCall ( AI ) . getKind ( ) ! = ArrayCallKind : : kArrayUninitialized ) <nl> + if ( ArraySemanticsCall ( AI ) . getKind ( ) ! = ArrayCallKind : : kArrayUninitialized & & <nl> + ArraySemanticsCall ( AI ) . getKind ( ) ! = <nl> + ArrayCallKind : : kArrayUninitializedIntrinsic ) <nl> return false ; <nl> <nl> llvm : : SmallVector < SILInstruction * , 8 > instsDeadAfterInitializerRemoved ; <nl> mmm a / test / SILOptimizer / dead_array_elim . sil <nl> ppp b / test / SILOptimizer / dead_array_elim . sil <nl> class TrivialDestructor { <nl> / / Remove a dead array . <nl> / / rdar : / / 20980377 Add dead array elimination to DeadObjectElimination <nl> / / Swift . _allocateUninitializedArray < A > ( Builtin . Word ) - > ( Swift . Array < A > , Builtin . RawPointer ) <nl> - sil [ _semantics " array . uninitialized " ] @ allocArray : $ @ convention ( thin ) < τ_0_0 > ( Builtin . Word ) - > @ owned ( Array < τ_0_0 > , Builtin . RawPointer ) <nl> + sil [ _semantics " array . uninitialized_intrinsic " ] @ allocArray : $ @ convention ( thin ) < τ_0_0 > ( Builtin . Word ) - > @ owned ( Array < τ_0_0 > , Builtin . RawPointer ) <nl> <nl> sil [ _semantics " array . uninitialized " ] @ adoptStorageSpecialiedForInt : $ @ convention ( method ) ( @ guaranteed _ContiguousArrayStorage < Int > , Builtin . Word , @ thin Array < Int > . Type ) - > ( @ owned Array < Int > , UnsafeMutablePointer < Int > ) <nl> <nl> new file mode 100644 <nl> index 000000000000 . . fdc389e8441c <nl> mmm / dev / null <nl> ppp b / test / SILOptimizer / dead_array_elim . swift <nl> <nl> + / / RUN : % target - swift - frontend - O - emit - sil - primary - file % s | % FileCheck % s <nl> + <nl> + / / These tests check whether DeadObjectElimination pass runs as a part of the <nl> + / / optimization pipeline and eliminates dead array literals in Swift code . <nl> + / / Note that DeadObjectElimination pass relies on @ _semantics annotations on <nl> + / / the array initializer that is used by the compiler to create array literals . <nl> + / / This test would fail if in case the initializer used by the compiler to <nl> + / / initialize array literals doesn ' t match the one expected by the pass . <nl> + <nl> + / / CHECK - LABEL : sil hidden @ $ s15dead_array_elim24testDeadArrayEliminationyyF <nl> + func testDeadArrayElimination ( ) { <nl> + _ = [ 1 , 2 , 3 ] <nl> + / / CHECK : bb0 : <nl> + / / CHECK - NEXT : % { { . * } } = tuple ( ) <nl> + / / CHECK - NEXT : return % { { . * } } : $ ( ) <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden @ $ s15dead_array_elim29testEmptyDeadArrayEliminationyyF <nl> + func testEmptyDeadArrayElimination ( ) { <nl> + _ = [ ] <nl> + / / CHECK : bb0 : <nl> + / / CHECK - NEXT : % { { . * } } = tuple ( ) <nl> + / / CHECK - NEXT : return % { { . * } } : $ ( ) <nl> + } <nl> + <nl> + / / The use case tested by the following test , where a _fixLifetime call is <nl> + / / invoked on an array , appears when new os log APIs are used . <nl> + / / CHECK - LABEL : sil hidden @ $ s15dead_array_elim35testDeadArrayElimWithFixLifetimeUseyyF <nl> + func testDeadArrayElimWithFixLifetimeUse ( ) { <nl> + let a : [ Int ] = [ ] <nl> + _fixLifetime ( a ) <nl> + / / CHECK : bb0 : <nl> + / / CHECK - NEXT : % { { . * } } = tuple ( ) <nl> + / / CHECK - NEXT : return % { { . * } } : $ ( ) <nl> + } <nl> + <nl> + / / FIXME : DeadObjectElimination doesn ' t optimize this yet . <nl> + func testDeadArrayElimWithAddressOnlyValues < T > ( x : T , y : T ) { <nl> + _ = [ x , y ] <nl> + } <nl>
[ SIL Optimization ] Make ArraySemantics . cpp aware of " array . uninitialized_intrinsic "
apple/swift
a6bed21d9e99b1692160d42dcbf11d5324a36329
2020-02-05T22:28:34Z
mmm a / src / wallet / db . cpp <nl> ppp b / src / wallet / db . cpp <nl> void BerkeleyEnvironment : : Close ( ) <nl> } <nl> } <nl> <nl> + FILE * error_file = nullptr ; <nl> + dbenv - > get_errfile ( & error_file ) ; <nl> + <nl> int ret = dbenv - > close ( 0 ) ; <nl> if ( ret ! = 0 ) <nl> LogPrintf ( " BerkeleyEnvironment : : Close : Error % d closing database environment : % s \ n " , ret , DbEnv : : strerror ( ret ) ) ; <nl> if ( ! fMockDb ) <nl> DbEnv ( ( u_int32_t ) 0 ) . remove ( strPath . c_str ( ) , 0 ) ; <nl> + <nl> + if ( error_file ) fclose ( error_file ) ; <nl> } <nl> <nl> void BerkeleyEnvironment : : Reset ( ) <nl>
wallet : Close dbenv error file db . log
bitcoin/bitcoin
8602a1e6aeca65911e4d4c821d3575147ba32a7e
2019-02-04T12:22:55Z
mmm a / stdlib / public / runtime / KnownMetadata . cpp <nl> ppp b / stdlib / public / runtime / KnownMetadata . cpp <nl> namespace pointer_types { <nl> } <nl> <nl> namespace { <nl> - template < typename T > <nl> - constexpr size_t getAlignment ( ) { return alignof ( T ) ; } <nl> + template < typename T > <nl> + struct BuiltinType { <nl> + static constexpr const size_t Alignment = alignof ( T ) ; <nl> + } ; <nl> <nl> - # define SET_FIXED_ALIGNMENT ( Type , Align ) \ <nl> - template < > constexpr size_t getAlignment < Type > ( ) { return Align ; } <nl> + # define SET_FIXED_ALIGNMENT ( Type , Align ) \ <nl> + template < > \ <nl> + struct BuiltinType < Type > { \ <nl> + static constexpr const size_t Alignment = Align ; \ <nl> + } ; <nl> <nl> SET_FIXED_ALIGNMENT ( uint8_t , 1 ) <nl> SET_FIXED_ALIGNMENT ( uint16_t , 2 ) <nl> template < > constexpr size_t getAlignment < Type > ( ) { return Align ; } <nl> <nl> # undef SET_FIXED_ALIGNMENT <nl> <nl> - template < typename T , unsigned N > <nl> - struct SwiftVecT { <nl> - typedef T type __attribute__ ( ( ext_vector_type ( N ) ) ) ; <nl> + template < typename T , unsigned N > <nl> + struct SIMDVector { <nl> + using Type = T __attribute__ ( ( __ext_vector_type__ ( N ) ) ) ; <nl> } ; <nl> <nl> - template < typename T , unsigned N > <nl> - using SwiftVec = typename SwiftVecT < T , N > : : type ; <nl> + template < > <nl> + struct SIMDVector < float , 3 > { <nl> + using Type = float __attribute__ ( ( __ext_vector_type__ ( 4 ) ) ) ; <nl> + } ; <nl> + <nl> + template < > <nl> + struct SIMDVector < double , 3 > { <nl> + using Type = double __attribute__ ( ( __ext_vector_type__ ( 4 ) ) ) ; <nl> + } ; <nl> + <nl> + template < typename T , unsigned N > <nl> + using SIMDVectorType = typename SIMDVector < T , N > : : Type ; <nl> } <nl> <nl> - # define BUILTIN_TYPE ( Symbol , Name ) \ <nl> - const ValueWitnessTable swift : : VALUE_WITNESS_SYM ( Symbol ) = \ <nl> - ValueWitnessTableForBox < NativeBox < ctypes : : Symbol , \ <nl> - getAlignment < ctypes : : Symbol > ( ) > > : : table ; <nl> + # define BUILTIN_TYPE ( Symbol , Name ) \ <nl> + const ValueWitnessTable swift : : VALUE_WITNESS_SYM ( Symbol ) = \ <nl> + ValueWitnessTableForBox < NativeBox < ctypes : : Symbol , \ <nl> + BuiltinType < ctypes : : Symbol > : : Alignment > > : : table ; <nl> + <nl> # define BUILTIN_POINTER_TYPE ( Symbol , Name ) \ <nl> - const ExtraInhabitantsValueWitnessTable swift : : VALUE_WITNESS_SYM ( Symbol ) = \ <nl> + const ExtraInhabitantsValueWitnessTable swift : : VALUE_WITNESS_SYM ( Symbol ) = \ <nl> ValueWitnessTableForBox < pointer_types : : Symbol > : : table ; <nl> - # define BUILTIN_VECTOR_TYPE ( ElementSymbol , _ , Width ) \ <nl> - const ValueWitnessTable \ <nl> - swift : : VALUE_WITNESS_SYM ( VECTOR_BUILTIN_SYMBOL_NAME ( ElementSymbol , Width ) ) = \ <nl> - ValueWitnessTableForBox < NativeBox < SwiftVec < ctypes : : ElementSymbol , \ <nl> - Width > > > : : table ; <nl> + <nl> + # define BUILTIN_VECTOR_TYPE ( ElementSymbol , _ , Width ) \ <nl> + const ValueWitnessTable \ <nl> + swift : : VALUE_WITNESS_SYM ( VECTOR_BUILTIN_SYMBOL_NAME ( ElementSymbol , Width ) ) = \ <nl> + ValueWitnessTableForBox < NativeBox < SIMDVectorType < ctypes : : ElementSymbol , \ <nl> + Width > > > : : table ; <nl> + <nl> # include " swift / Runtime / BuiltinTypes . def " <nl> <nl> / / / The value - witness table for pointer - aligned unmanaged pointer types . <nl>
Merge pull request from compnerd / metadata
apple/swift
9f7ee563cb7ae6f31ba0cb18007e3498a6875438
2018-10-29T21:47:16Z
mmm a / android / sdk / src / main / java / com / taobao / weex / dom / action / MoveElementAction . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / dom / action / MoveElementAction . java <nl> public void executeRender ( RenderActionContext context ) { <nl> WXVContainer oldParent = component . getParent ( ) ; <nl> oldParent . remove ( component , false ) ; <nl> ( ( WXVContainer ) newParent ) . addChild ( component , mNewIndex ) ; <nl> + if ( ! component . isVirtualComponent ( ) ) { <nl> + ( ( WXVContainer ) newParent ) . addSubView ( component . getHostView ( ) , mNewIndex ) ; <nl> + } <nl> } <nl> } <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / ui / component / WXScroller . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / component / WXScroller . java <nl> private boolean shouldReport ( int x , int y ) { <nl> * Intercept refresh view and loading view <nl> * / <nl> @ Override <nl> - protected void addSubView ( View child , int index ) { <nl> + public void addSubView ( View child , int index ) { <nl> if ( child = = null | | getRealView ( ) = = null ) { <nl> return ; <nl> } <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / ui / component / WXSlider . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / component / WXSlider . java <nl> public ViewGroup getRealView ( ) { <nl> } <nl> <nl> @ Override <nl> - protected void addSubView ( View view , int index ) { <nl> + public void addSubView ( View view , int index ) { <nl> if ( view = = null | | mAdapter = = null ) { <nl> return ; <nl> } <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / ui / component / WXSliderNeighbor . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / component / WXSliderNeighbor . java <nl> ZoomTransformer createTransformer ( ) { <nl> } <nl> <nl> @ Override <nl> - protected void addSubView ( View view , final int index ) { <nl> + public void addSubView ( View view , final int index ) { <nl> if ( view = = null | | mAdapter = = null ) { <nl> return ; <nl> } <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / ui / component / WXVContainer . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / component / WXVContainer . java <nl> <nl> <nl> import android . content . Context ; <nl> import android . content . Intent ; <nl> + import android . support . annotation . RestrictTo ; <nl> + import android . support . annotation . RestrictTo . Scope ; <nl> + import android . util . Pair ; <nl> import android . support . annotation . Nullable ; <nl> import android . util . Pair ; <nl> import android . view . Menu ; <nl> public void createChildViewAt ( int index ) { <nl> } <nl> } <nl> <nl> - protected void addSubView ( View child , int index ) { <nl> + @ RestrictTo ( Scope . LIBRARY ) <nl> + public void addSubView ( View child , int index ) { <nl> if ( child = = null | | getRealView ( ) = = null ) { <nl> return ; <nl> } <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / ui / component / list / BasicListComponent . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / component / list / BasicListComponent . java <nl> private void relocateAppearanceHelper ( ) { <nl> * com . taobao . weex . ui . view . listview . WXRecyclerView } <nl> * / <nl> @ Override <nl> - protected void addSubView ( View child , int index ) { <nl> + public void addSubView ( View child , int index ) { <nl> <nl> } <nl> <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / ui / component / list / template / WXRecyclerTemplateList . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / component / list / template / WXRecyclerTemplateList . java <nl> public void addChild ( WXComponent child , int index ) { <nl> * com . taobao . weex . ui . view . listview . WXRecyclerView } <nl> * / <nl> @ Override <nl> - protected void addSubView ( View child , int index ) { <nl> + public void addSubView ( View child , int index ) { <nl> <nl> } <nl> <nl>
* [ Android ] Fix that moveElement doesn ' t work when parent is not a list
apache/incubator-weex
f4fefab661c39f4eeb419587b1f7a6380c703311
2017-10-18T08:55:23Z
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> include_directories ( <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / cocos / physics <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / cocos / editor - support <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / cocos / math / kazmath <nl> + $ { CMAKE_CURRENT_SOURCE_DIR } / cocos / scripting / lua - bindings / auto <nl> + $ { CMAKE_CURRENT_SOURCE_DIR } / cocos / scripting / lua - bindings / manual <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / extensions <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / external <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / external / tinyxml2 <nl> mmm a / tests / cpp - empty - test / proj . linux / main . cpp <nl> ppp b / tests / cpp - empty - test / proj . linux / main . cpp <nl> int main ( int argc , char * * argv ) <nl> { <nl> / / create the application instance <nl> AppDelegate app ; <nl> - EGLView eglView ; <nl> - eglView . init ( " HelloCpp " , 900 , 640 ) ; <nl> return Application : : getInstance ( ) - > run ( ) ; <nl> } <nl> mmm a / tests / lua - empty - test / project / CMakeLists . txt <nl> ppp b / tests / lua - empty - test / project / CMakeLists . txt <nl> set_target_properties ( $ { APP_NAME } PROPERTIES <nl> <nl> pre_build ( $ { APP_NAME } <nl> COMMAND $ { CMAKE_COMMAND } - E remove_directory $ { APP_BIN_DIR } / Resources <nl> - COMMAND $ { CMAKE_COMMAND } - E copy_directory $ { CMAKE_CURRENT_SOURCE_DIR } / Resources $ { APP_BIN_DIR } / Resources <nl> - COMMAND $ { CMAKE_COMMAND } - E copy_directory $ { CMAKE_SOURCE_DIR } / cocos / scripting / lua / script $ { APP_BIN_DIR } / Resources <nl> + COMMAND $ { CMAKE_COMMAND } - E copy_directory $ { CMAKE_CURRENT_SOURCE_DIR } / . . / res $ { APP_BIN_DIR } / Resources / res <nl> + COMMAND $ { CMAKE_COMMAND } - E copy_directory $ { CMAKE_CURRENT_SOURCE_DIR } / . . / src $ { APP_BIN_DIR } / Resources / src <nl> + COMMAND $ { CMAKE_COMMAND } - E copy_directory $ { CMAKE_SOURCE_DIR } / cocos / scripting / lua - bindings / script $ { APP_BIN_DIR } / Resources <nl> ) <nl> mmm a / tests / lua - empty - test / project / proj . linux / main . cpp <nl> ppp b / tests / lua - empty - test / project / proj . linux / main . cpp <nl> int main ( int argc , char * * argv ) <nl> { <nl> / / create the application instance <nl> AppDelegate app ; <nl> - EGLView eglView ; <nl> - eglView . init ( " HelloLua " , 900 , 640 ) ; <nl> return Application : : getInstance ( ) - > run ( ) ; <nl> } <nl> mmm a / tests / lua - tests / project / CMakeLists . txt <nl> ppp b / tests / lua - tests / project / CMakeLists . txt <nl> set_target_properties ( $ { APP_NAME } PROPERTIES <nl> <nl> pre_build ( $ { APP_NAME } <nl> COMMAND $ { CMAKE_COMMAND } - E remove_directory $ { APP_BIN_DIR } / Resources <nl> + COMMAND $ { CMAKE_COMMAND } - E copy_directory $ { CMAKE_CURRENT_SOURCE_DIR } / . . / res $ { APP_BIN_DIR } / Resources / res <nl> COMMAND $ { CMAKE_COMMAND } - E copy_directory $ { CMAKE_CURRENT_SOURCE_DIR } / . . / src $ { APP_BIN_DIR } / Resources / src <nl> - COMMAND $ { CMAKE_COMMAND } - E copy_directory $ { CMAKE_CURRENT_SOURCE_DIR } / . . / res $ { APP_BIN_DIR } / Resources <nl> COMMAND $ { CMAKE_COMMAND } - E copy_directory $ { CMAKE_SOURCE_DIR } / cocos / scripting / lua - bindings / script $ { APP_BIN_DIR } / Resources <nl> - COMMAND $ { CMAKE_COMMAND } - E copy_directory $ { CMAKE_SOURCE_DIR } / samples / cpp - tests / Resources $ { APP_BIN_DIR } / Resources <nl> + COMMAND $ { CMAKE_COMMAND } - E copy_directory $ { CMAKE_SOURCE_DIR } / tests / cpp - tests / Resources $ { APP_BIN_DIR } / Resources / res <nl> ) <nl> <nl>
updates CMakeLists . txt
cocos2d/cocos2d-x
79e979ac7107bdf87b5b53441e733d635f46194a
2014-03-11T11:10:37Z
mmm a / Code / CryEngine / CryAction / ActorSystem . cpp <nl> ppp b / Code / CryEngine / CryAction / ActorSystem . cpp <nl> void CActorSystem : : Scan ( const char * folderName ) <nl> const char * fileExtension = PathUtil : : GetExt ( fd . name ) ; <nl> if ( stricmp ( fileExtension , " xml " ) ) <nl> { <nl> - if ( stricmp ( fileExtension , " binxml " ) ) <nl> - GameWarning ( " ActorSystem : File ' % s ' does not have ' xml ' extension , skipping . " , fd . name ) ; <nl> - <nl> continue ; <nl> } <nl> <nl> mmm a / Code / CryEngine / CryAction / ItemSystem . cpp <nl> ppp b / Code / CryEngine / CryAction / ItemSystem . cpp <nl> void CItemSystem : : Scan ( const char * folderName ) <nl> const char * fileExtension = PathUtil : : GetExt ( fd . name ) ; <nl> if ( stricmp ( fileExtension , " xml " ) ) <nl> { <nl> - if ( stricmp ( fileExtension , " binxml " ) ) <nl> - GameWarning ( " ItemSystem : File ' % s ' does not have ' xml ' extension , skipping . " , fd . name ) ; <nl> - <nl> continue ; <nl> } <nl> <nl>
! XB ( CRYENGINE ) ( CE - 16140 ) Warnings when reloading item scripts in GameSDK
CRYTEK/CRYENGINE
660f478915cc580cf18145583404581a63a6cd79
2018-03-21T09:25:46Z
mmm a / jstests / replsets / sessions_collection_auto_healing . js <nl> ppp b / jstests / replsets / sessions_collection_auto_healing . js <nl> TestData . disableImplicitSessions = true ; <nl> <nl> var replTest = new ReplSetTest ( { <nl> name : ' refresh ' , <nl> - nodes : [ { rsConfig : { votes : 1 , priority : 1 } } , { rsConfig : { votes : 0 , priority : 0 } } ] <nl> + nodes : [ <nl> + { / * primary * / } , <nl> + { / * secondary * / rsConfig : { priority : 0 } } , <nl> + { / * arbiter * / rsConfig : { arbiterOnly : true } } <nl> + ] <nl> } ) ; <nl> var nodes = replTest . startSet ( ) ; <nl> <nl> replTest . awaitSecondaryNodes ( ) ; <nl> var secondary = replTest . getSecondary ( ) ; <nl> var secondaryAdmin = secondary . getDB ( " admin " ) ; <nl> <nl> + let arbiter = replTest . getArbiter ( ) ; <nl> + <nl> + const refreshErrorMsgRegex = <nl> + new RegExp ( " Failed to refresh session cache , will try again at the next refresh interval " ) ; <nl> + <nl> / / Get the current value of the TTL index so that we can verify it ' s being properly applied . <nl> let res = assert . commandWorked ( <nl> primary . adminCommand ( { getParameter : 1 , localLogicalSessionTimeoutMinutes : 1 } ) ) ; <nl> let timeoutMinutes = res . localLogicalSessionTimeoutMinutes ; <nl> replTest . awaitReplication ( ) ; <nl> validateSessionsCollection ( secondary , false , false , timeoutMinutes ) ; <nl> } <nl> + <nl> + / / Test that a refresh on an arbiter does not create the sessions collection . <nl> + { <nl> + validateSessionsCollection ( primary , false , false , timeoutMinutes ) ; <nl> + <nl> + assert . commandWorked ( arbiter . adminCommand ( { clearLog : ' global ' } ) ) ; <nl> + assert . commandWorked ( arbiter . adminCommand ( { refreshLogicalSessionCacheNow : 1 } ) ) ; <nl> + <nl> + validateSessionsCollection ( primary , false , false , timeoutMinutes ) ; <nl> + <nl> + if ( ! jsTest . options ( ) . useRandomBinVersionsWithinReplicaSet ) { <nl> + / / Verify that the arbiter did not try to set up the session collection or refresh . <nl> + assert . eq ( false , checkLog . checkContainsOnce ( arbiter , refreshErrorMsgRegex ) ) ; <nl> + } <nl> + } <nl> + <nl> / / Test that a refresh on the primary creates the sessions collection . <nl> { <nl> validateSessionsCollection ( primary , false , false , timeoutMinutes ) ; <nl> let timeoutMinutes = res . localLogicalSessionTimeoutMinutes ; <nl> validateSessionsCollection ( primary , true , false , timeoutMinutes ) ; <nl> } <nl> <nl> + / / Test that a refresh on an arbiter will not create the TTL index on the sessions collection . <nl> + { <nl> + validateSessionsCollection ( primary , true , false , timeoutMinutes ) ; <nl> + <nl> + assert . commandWorked ( arbiter . adminCommand ( { refreshLogicalSessionCacheNow : 1 } ) ) ; <nl> + <nl> + validateSessionsCollection ( primary , true , false , timeoutMinutes ) ; <nl> + } <nl> + <nl> / / Test that a refresh on the primary will create the TTL index on the sessions collection . <nl> { <nl> validateSessionsCollection ( primary , true , false , timeoutMinutes ) ; <nl> new file mode 100644 <nl> index 000000000000 . . ec6e60bbf402 <nl> mmm / dev / null <nl> ppp b / jstests / replsets / sessions_collection_reaping . js <nl> <nl> + ( function ( ) { <nl> + " use strict " ; <nl> + <nl> + / / This test makes assertions about the number of sessions , which are not compatible with <nl> + / / implicit sessions . <nl> + TestData . disableImplicitSessions = true ; <nl> + <nl> + let replTest = new ReplSetTest ( { <nl> + name : ' reaping ' , <nl> + nodes : [ <nl> + { / * primary * / } , <nl> + { / * secondary * / rsConfig : { priority : 0 } } , <nl> + { / * arbiter * / rsConfig : { arbiterOnly : true } } <nl> + ] , <nl> + nodeOptions : { setParameter : { TransactionRecordMinimumLifetimeMinutes : 0 } } <nl> + } ) ; <nl> + let nodes = replTest . startSet ( ) ; <nl> + <nl> + replTest . initiate ( ) ; <nl> + let primary = replTest . getPrimary ( ) ; <nl> + let sessionsCollOnPrimary = primary . getDB ( " config " ) . system . sessions ; <nl> + let transactionsCollOnPrimary = primary . getDB ( " config " ) . transactions ; <nl> + <nl> + replTest . awaitSecondaryNodes ( ) ; <nl> + let secondary = replTest . getSecondary ( ) ; <nl> + let arbiter = replTest . getArbiter ( ) ; <nl> + <nl> + const dbName = jsTestName ( ) ; <nl> + const collName = " test " ; <nl> + const reapErrorMsgRegex = <nl> + new RegExp ( " Sessions collection is not set up . * waiting until next sessions reap interval " ) ; <nl> + <nl> + / / Set up a session with a retryable write . <nl> + let session = primary . startSession ( { retryWrites : 1 } ) ; <nl> + assert . commandWorked ( session . getDatabase ( dbName ) [ collName ] . save ( { x : 1 } ) ) ; <nl> + assert . commandWorked ( primary . adminCommand ( { refreshLogicalSessionCacheNow : 1 } ) ) ; <nl> + assert . eq ( 1 , sessionsCollOnPrimary . count ( ) ) ; <nl> + assert . eq ( 1 , transactionsCollOnPrimary . count ( ) ) ; <nl> + <nl> + / / Remove the session doc so the session gets reaped when reapLogicalSessionCacheNow is run . <nl> + assert . commandWorked ( sessionsCollOnPrimary . remove ( { } ) ) ; <nl> + <nl> + / / Test that a reap on a secondary does not lead to the on - disk state reaping of the session <nl> + / / since the session does not exist in the secondary ' s session catalog . <nl> + { <nl> + assert . commandWorked ( secondary . adminCommand ( { clearLog : ' global ' } ) ) ; <nl> + assert . commandWorked ( secondary . adminCommand ( { reapLogicalSessionCacheNow : 1 } ) ) ; <nl> + <nl> + assert . eq ( 1 , transactionsCollOnPrimary . count ( ) ) ; <nl> + assert . eq ( false , checkLog . checkContainsOnce ( secondary , reapErrorMsgRegex ) ) ; <nl> + } <nl> + <nl> + / / Test that a reap on an arbiter does not lead to reaping of the session . <nl> + { <nl> + assert . commandWorked ( arbiter . adminCommand ( { clearLog : ' global ' } ) ) ; <nl> + assert . commandWorked ( arbiter . adminCommand ( { reapLogicalSessionCacheNow : 1 } ) ) ; <nl> + <nl> + assert . eq ( 1 , transactionsCollOnPrimary . count ( ) ) ; <nl> + <nl> + if ( ! jsTest . options ( ) . useRandomBinVersionsWithinReplicaSet ) { <nl> + / / Verify that the arbiter did not try to reap the session . <nl> + assert . eq ( false , checkLog . checkContainsOnce ( arbiter , reapErrorMsgRegex ) ) ; <nl> + } <nl> + } <nl> + <nl> + / / Test that a reap on the primary works as expected . <nl> + { <nl> + assert . commandWorked ( primary . adminCommand ( { clearLog : ' global ' } ) ) ; <nl> + assert . commandWorked ( primary . adminCommand ( { reapLogicalSessionCacheNow : 1 } ) ) ; <nl> + <nl> + assert . eq ( 0 , transactionsCollOnPrimary . count ( ) ) ; <nl> + } <nl> + <nl> + replTest . stopSet ( ) ; <nl> + } ) ( ) ; <nl> mmm a / src / mongo / db / logical_session_cache_impl . cpp <nl> ppp b / src / mongo / db / logical_session_cache_impl . cpp <nl> <nl> # include " mongo / db / logical_session_id . h " <nl> # include " mongo / db / logical_session_id_helpers . h " <nl> # include " mongo / db / operation_context . h " <nl> + # include " mongo / db / repl / replication_coordinator . h " <nl> # include " mongo / db / s / operation_sharding_state . h " <nl> # include " mongo / db / service_context . h " <nl> # include " mongo / logv2 / log . h " <nl> void LogicalSessionCacheImpl : : _periodicReap ( Client * client ) { <nl> } <nl> <nl> Status LogicalSessionCacheImpl : : _reap ( Client * client ) { <nl> + boost : : optional < ServiceContext : : UniqueOperationContext > uniqueCtx ; <nl> + auto * const opCtx = [ & ] { <nl> + if ( client - > getOperationContext ( ) ) { <nl> + return client - > getOperationContext ( ) ; <nl> + } <nl> + <nl> + uniqueCtx . emplace ( client - > makeOperationContext ( ) ) ; <nl> + return uniqueCtx - > get ( ) ; <nl> + } ( ) ; <nl> + <nl> + const auto replCoord = repl : : ReplicationCoordinator : : get ( opCtx ) ; <nl> + if ( replCoord & & replCoord - > getMemberState ( ) . arbiter ( ) ) { <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> / / Take the lock to update some stats . <nl> { <nl> stdx : : lock_guard < Latch > lk ( _mutex ) ; <nl> Status LogicalSessionCacheImpl : : _reap ( Client * client ) { <nl> _stats . setTransactionReaperJobCount ( _stats . getTransactionReaperJobCount ( ) + 1 ) ; <nl> } <nl> <nl> - boost : : optional < ServiceContext : : UniqueOperationContext > uniqueCtx ; <nl> - auto * const opCtx = [ & ] { <nl> - if ( client - > getOperationContext ( ) ) { <nl> - return client - > getOperationContext ( ) ; <nl> - } <nl> - <nl> - uniqueCtx . emplace ( client - > makeOperationContext ( ) ) ; <nl> - return uniqueCtx - > get ( ) ; <nl> - } ( ) ; <nl> - <nl> int numReaped = 0 ; <nl> <nl> try { <nl> Status LogicalSessionCacheImpl : : _reap ( Client * client ) { <nl> } <nl> <nl> void LogicalSessionCacheImpl : : _refresh ( Client * client ) { <nl> + / / get or make an opCtx <nl> + boost : : optional < ServiceContext : : UniqueOperationContext > uniqueCtx ; <nl> + auto * const opCtx = [ & client , & uniqueCtx ] { <nl> + if ( client - > getOperationContext ( ) ) { <nl> + return client - > getOperationContext ( ) ; <nl> + } <nl> + <nl> + uniqueCtx . emplace ( client - > makeOperationContext ( ) ) ; <nl> + return uniqueCtx - > get ( ) ; <nl> + } ( ) ; <nl> + <nl> + const auto replCoord = repl : : ReplicationCoordinator : : get ( opCtx ) ; <nl> + if ( replCoord & & replCoord - > getMemberState ( ) . arbiter ( ) ) { <nl> + return ; <nl> + } <nl> + <nl> / / Stats for serverStatus : <nl> { <nl> stdx : : lock_guard < Latch > lk ( _mutex ) ; <nl> void LogicalSessionCacheImpl : : _refresh ( Client * client ) { <nl> _stats . setLastSessionsCollectionJobDurationMillis ( millis . count ( ) ) ; <nl> } ) ; <nl> <nl> - / / get or make an opCtx <nl> - boost : : optional < ServiceContext : : UniqueOperationContext > uniqueCtx ; <nl> - auto * const opCtx = [ & client , & uniqueCtx ] { <nl> - if ( client - > getOperationContext ( ) ) { <nl> - return client - > getOperationContext ( ) ; <nl> - } <nl> - <nl> - uniqueCtx . emplace ( client - > makeOperationContext ( ) ) ; <nl> - return uniqueCtx - > get ( ) ; <nl> - } ( ) ; <nl> - <nl> ON_BLOCK_EXIT ( [ & opCtx ] { clearShardingOperationFailedStatus ( opCtx ) ; } ) ; <nl> <nl> try { <nl>
SERVER - 40441 Make arbiters not try to setup the sessions collection or check if it exists in LogicalSessionCache refresh / reap thread
mongodb/mongo
35c86a14e2ad2ef8d14fe7ea3ea02951f30e646a
2020-05-20T01:24:31Z
mmm a / mars / build_windows . py <nl> ppp b / mars / build_windows . py <nl> <nl> BUILD_OUT_PATH = ' cmake_build ' <nl> WIN_LIBS_INSTALL_PATH = BUILD_OUT_PATH + " / Windows . out / " <nl> WIN_RESULT_DIR = WIN_LIBS_INSTALL_PATH + ' win / ' <nl> - WIN_BUILD_CMD = ' cmake . . & & cmake - - build . - - target install - - config Release ' <nl> + WIN_BUILD_CMD = ' cmake . . & & cmake - - build . - - target install - - config % s ' <nl> WIN_GEN_PROJECT_CMD = ' cmake . . ' <nl> <nl> <nl> - def build_windows ( incremental , tag = ' ' ) : <nl> + def build_windows ( incremental , tag = ' ' , config = ' Release ' ) : <nl> before_time = time . time ( ) <nl> gen_mars_revision_file ( ' comm ' , tag ) <nl> <nl> - clean ( BUILD_OUT_PATH , incremental ) <nl> + clean_windows ( BUILD_OUT_PATH , incremental ) <nl> os . chdir ( BUILD_OUT_PATH ) <nl> <nl> - print ( " build cmd : " + WIN_BUILD_CMD ) <nl> - ret = os . system ( WIN_BUILD_CMD ) <nl> + print ( " build cmd : " + WIN_BUILD_CMD % config ) <nl> + ret = os . system ( WIN_BUILD_CMD % config ) <nl> os . chdir ( SCRIPT_PATH ) <nl> <nl> if 0 ! = ret : <nl> def build_windows ( incremental , tag = ' ' ) : <nl> <nl> merge_win_static_libs ( glob . glob ( WIN_LIBS_INSTALL_PATH + ' * . lib ' ) , WIN_RESULT_DIR + ' mars . lib ' ) <nl> copy_file_mapping ( COMM_COPY_HEADER_FILES , ' . . / . . / ' , WIN_RESULT_DIR ) <nl> + <nl> + sub_folders = [ " app " , " baseevent " , " comm " , " boost " , " xlog " , " sdt " , " stn " ] <nl> + copy_windows_pdb ( BUILD_OUT_PATH , sub_folders , config , WIN_LIBS_INSTALL_PATH ) <nl> <nl> print ( ' = = = = = = = = = = = = = = = = = = Output = = = = = = = = = = = = = = = = = = = = = = = = ' ) <nl> print ( " libs : % s " % ( WIN_RESULT_DIR ) ) <nl> + print ( " pdb files : % s " % ( WIN_LIBS_INSTALL_PATH ) ) <nl> <nl> after_time = time . time ( ) <nl> print ( " use time : % d s " % ( int ( after_time - before_time ) ) ) <nl> def gen_win_project ( tag = ' ' ) : <nl> <nl> gen_mars_revision_file ( ' comm ' , tag ) <nl> <nl> - clean ( BUILD_OUT_PATH ) <nl> + clean_windows ( BUILD_OUT_PATH , False ) <nl> os . chdir ( BUILD_OUT_PATH ) <nl> ret = os . system ( WIN_GEN_PROJECT_CMD ) <nl> os . chdir ( SCRIPT_PATH ) <nl> def main ( ) : <nl> return <nl> <nl> while True : <nl> - if len ( sys . argv ) > = 2 : <nl> - build_windows ( False , sys . argv [ 1 ] ) <nl> + if len ( sys . argv ) > = 3 : <nl> + build_windows ( False , sys . argv [ 1 ] , sys . argv [ 2 ] ) <nl> break <nl> else : <nl> - num = raw_input ( ' Enter menu : \ n1 . Clean & & build . \ n2 . Build incrementally . \ n3 . Gen project file . \ n4 . Exit \ n ' ) <nl> + num = raw_input ( ' Enter menu ( or usage : python build_windows . py < tag > < Debug / Release > ) : \ n1 . Clean & & build Release . \ n2 . Build Release incrementally . \ n3 . Clean & & build Debug . \ n4 . Build Debug incrementally . \ n5 . Gen project file . \ n6 . Exit \ n ' ) <nl> if num = = ' 1 ' : <nl> - build_windows ( False ) <nl> + build_windows ( False , config = ' Release ' ) <nl> break <nl> elif num = = ' 2 ' : <nl> - build_windows ( True ) <nl> + build_windows ( True , config = ' Release ' ) <nl> break <nl> elif num = = ' 3 ' : <nl> - gen_win_project ( ) <nl> + build_windows ( False , config = ' Debug ' ) <nl> break <nl> elif num = = ' 4 ' : <nl> + build_windows ( True , config = ' Debug ' ) <nl> + break <nl> + elif num = = ' 5 ' : <nl> + gen_win_project ( ) <nl> + break <nl> + elif num = = ' 6 ' : <nl> break <nl> else : <nl> build_windows ( False ) <nl> mmm a / mars / mars_utils . py <nl> ppp b / mars / mars_utils . py <nl> def clean ( path , incremental = False ) : <nl> if not os . path . exists ( path ) : <nl> os . makedirs ( path ) <nl> <nl> + def clean_windows ( path , incremental ) : <nl> + if not os . path . exists ( path ) : <nl> + os . makedirs ( path ) <nl> + return <nl> + <nl> + if incremental : <nl> + return ; <nl> + <nl> + try : <nl> + if os . path . exists ( path ) : <nl> + shutil . rmtree ( path ) <nl> + if not os . path . exists ( path ) : <nl> + os . makedirs ( path ) <nl> + except Exception : <nl> + pass <nl> + <nl> + def copy_windows_pdb ( cmake_out , sub_folder , config , dst_folder ) : <nl> + for sf in sub_folder : <nl> + src_file = " % s / % s / " % ( cmake_out , sf ) <nl> + dirs = glob . glob ( src_file + " * . dir " ) <nl> + if len ( dirs ) ! = 1 : <nl> + print ( " Warning : % s path error . " % src_file ) <nl> + continue <nl> + <nl> + src_file = " % s / % s " % ( dirs [ 0 ] , config ) <nl> + pdbs = glob . glob ( src_file + " / * . pdb " ) <nl> + if len ( pdbs ) ! = 1 : <nl> + print ( " Warning : % s path error . " % src_file ) <nl> + continue <nl> + pdb = pdbs [ 0 ] <nl> + if os . path . isfile ( pdb ) : <nl> + shutil . copy ( pdb , dst_folder ) <nl> + else : <nl> + print ( " % s not exists " % pdb ) <nl> <nl> def copy_file ( src , dst ) : <nl> if not os . path . isfile ( src ) : <nl>
add debug option for windows
Tencent/mars
bbfb175c0d26ebc26c2a081ccf5648eca8ad03f9
2018-12-28T08:27:20Z
mmm a / src / bthread / key . cpp <nl> ppp b / src / bthread / key . cpp <nl> void return_keytable ( bthread_keytable_pool_t * pool , KeyTable * kt ) { <nl> pool - > free_keytables = kt ; <nl> } <nl> <nl> - static void cleanup_pthread ( ) { <nl> - KeyTable * kt = tls_bls . keytable ; <nl> - / / TODO ( zhujiashun ) : thread local storage not works in macos using clang <nl> + static void cleanup_pthread ( void * arg ) { <nl> + KeyTable * kt = static_cast < KeyTable * > ( arg ) ; <nl> if ( kt ) { <nl> delete kt ; <nl> / / After deletion : tls may be set during deletion . <nl> int bthread_setspecific ( bthread_key_t key , void * data ) { <nl> } <nl> if ( ! bthread : : tls_ever_created_keytable ) { <nl> bthread : : tls_ever_created_keytable = true ; <nl> - CHECK_EQ ( 0 , butil : : thread_atexit ( bthread : : cleanup_pthread ) ) ; <nl> + CHECK_EQ ( 0 , butil : : thread_atexit ( bthread : : cleanup_pthread , kt ) ) ; <nl> } <nl> } <nl> return kt - > set_data ( key , data ) ; <nl>
fix the problem that tls data cannot be read in destructor which is specified by pthread_key_create
apache/incubator-brpc
72c5135401c6420c77c8a614d95f10082809e27d
2018-04-02T10:42:49Z
mmm a / xbmc / interfaces / json - rpc / PlayerOperations . cpp <nl> ppp b / xbmc / interfaces / json - rpc / PlayerOperations . cpp <nl> void CPlayerOperations : : SendSlideshowAction ( int actionID ) <nl> CApplicationMessenger : : GetInstance ( ) . SendMsg ( TMSG_GUI_ACTION , WINDOW_SLIDESHOW , - 1 , static_cast < void * > ( new CAction ( actionID ) ) ) ; <nl> } <nl> <nl> + void AppendAudioStreamFlagsAsBooleans ( CVariant & list , StreamFlags & flags ) <nl> + { <nl> + list [ " is_default " ] = ( ( flags & StreamFlags : : FLAG_DEFAULT ) ! = 0 ) ; <nl> + list [ " is_original " ] = ( ( flags & StreamFlags : : FLAG_ORIGINAL ) ! = 0 ) ; <nl> + list [ " is_impaired " ] = ( ( flags & StreamFlags : : FLAG_VISUAL_IMPAIRED ) ! = 0 ) ; <nl> + } <nl> + <nl> + void AppendSubtitleStreamFlagsAsBooleans ( CVariant & list , StreamFlags & flags ) <nl> + { <nl> + list [ " is_default " ] = ( ( flags & StreamFlags : : FLAG_DEFAULT ) ! = 0 ) ; <nl> + list [ " is_forced " ] = ( ( flags & StreamFlags : : FLAG_FORCED ) ! = 0 ) ; <nl> + list [ " is_impaired " ] = ( ( flags & StreamFlags : : FLAG_HEARING_IMPAIRED ) ! = 0 ) ; <nl> + } <nl> + <nl> JSONRPC_STATUS CPlayerOperations : : GetPropertyValue ( PlayerType player , const std : : string & property , CVariant & result ) <nl> { <nl> if ( player = = None ) <nl> JSONRPC_STATUS CPlayerOperations : : GetPropertyValue ( PlayerType player , const std : <nl> result [ " codec " ] = info . codecName ; <nl> result [ " bitrate " ] = info . bitrate ; <nl> result [ " channels " ] = info . channels ; <nl> + AppendAudioStreamFlagsAsBooleans ( result , info . flags ) ; <nl> } <nl> } <nl> else <nl> JSONRPC_STATUS CPlayerOperations : : GetPropertyValue ( PlayerType player , const std : <nl> audioStream [ " codec " ] = info . codecName ; <nl> audioStream [ " bitrate " ] = info . bitrate ; <nl> audioStream [ " channels " ] = info . channels ; <nl> + AppendAudioStreamFlagsAsBooleans ( audioStream , info . flags ) ; <nl> <nl> result . append ( audioStream ) ; <nl> } <nl> JSONRPC_STATUS CPlayerOperations : : GetPropertyValue ( PlayerType player , const std : <nl> result [ " index " ] = index ; <nl> result [ " name " ] = info . name ; <nl> result [ " language " ] = info . language ; <nl> + AppendSubtitleStreamFlagsAsBooleans ( result , info . flags ) ; <nl> } <nl> } <nl> else <nl> JSONRPC_STATUS CPlayerOperations : : GetPropertyValue ( PlayerType player , const std : <nl> subtitle [ " index " ] = index ; <nl> subtitle [ " name " ] = info . name ; <nl> subtitle [ " language " ] = info . language ; <nl> + AppendSubtitleStreamFlagsAsBooleans ( subtitle , info . flags ) ; <nl> <nl> result . append ( subtitle ) ; <nl> } <nl> mmm a / xbmc / interfaces / json - rpc / schema / types . json <nl> ppp b / xbmc / interfaces / json - rpc / schema / types . json <nl> <nl> " language " : { " type " : " string " , " required " : true } , <nl> " codec " : { " type " : " string " , " required " : true } , <nl> " bitrate " : { " type " : " integer " , " required " : true } , <nl> - " channels " : { " type " : " integer " , " required " : true } <nl> + " channels " : { " type " : " integer " , " required " : true } , <nl> + " is_default " : { " type " : " boolean " , " required " : true } , <nl> + " is_original " : { " type " : " boolean " , " required " : true } , <nl> + " is_impaired " : { " type " : " boolean " , " required " : true } <nl> } <nl> } , <nl> " Player . Video . Stream " : { <nl> <nl> " properties " : { <nl> " index " : { " type " : " integer " , " minimum " : 0 , " required " : true } , <nl> " name " : { " type " : " string " , " required " : true } , <nl> - " language " : { " type " : " string " , " required " : true } <nl> + " language " : { " type " : " string " , " required " : true } , <nl> + " is_default " : { " type " : " boolean " , " required " : true } , <nl> + " is_forced " : { " type " : " boolean " , " required " : true } , <nl> + " is_impaired " : { " type " : " boolean " , " required " : true } <nl> } <nl> } , <nl> " Player . Property . Name " : { <nl> mmm a / xbmc / interfaces / json - rpc / schema / version . txt <nl> ppp b / xbmc / interfaces / json - rpc / schema / version . txt <nl> @ @ - 1 + 1 @ @ <nl> - JSONRPC_VERSION 10 . 6 . 2 <nl> + JSONRPC_VERSION 10 . 7 . 0 <nl>
json - rpc : add flags properties to GetPropertyValue concerning
xbmc/xbmc
a503b028b0f11c84e92d4af81b95b6b06f98fc55
2019-12-05T15:20:20Z
mmm a / src / builtins / array - filter . tq <nl> ppp b / src / builtins / array - filter . tq <nl> namespace array { <nl> thisArg : Object , a : JSArray ) labels Bailout ( Smi , Smi ) { <nl> let k : Smi = 0 ; <nl> let to : Smi = 0 ; <nl> - const fastOWitness : FastJSArrayWitness = <nl> - MakeWitness ( Cast < FastJSArray > ( o ) otherwise goto Bailout ( k , to ) ) ; <nl> - const fastAWitness : FastJSArrayWitness = <nl> - MakeWitness ( Cast < FastJSArray > ( a ) otherwise goto Bailout ( k , to ) ) ; <nl> + let fastO = <nl> + FastJSArrayWitness { Cast < FastJSArray > ( o ) otherwise goto Bailout ( k , to ) } ; <nl> + let fastA = <nl> + FastJSArrayWitness { Cast < FastJSArray > ( a ) otherwise goto Bailout ( k , to ) } ; <nl> <nl> / / Build a fast loop over the smi array . <nl> for ( ; k < len ; k + + ) { <nl> - let fastO : FastJSArray = <nl> - Testify ( fastOWitness ) otherwise goto Bailout ( k , to ) ; <nl> - <nl> / / Ensure that we haven ' t walked beyond a possibly updated length . <nl> - if ( k > = fastO . length ) goto Bailout ( k , to ) ; <nl> + if ( k > = fastO . Get ( ) . length ) goto Bailout ( k , to ) ; <nl> <nl> try { <nl> - const value : Object = <nl> - LoadElementNoHole < FixedArrayType > ( fastO , k ) otherwise FoundHole ; <nl> + const value : Object = LoadElementNoHole < FixedArrayType > ( fastO . Get ( ) , k ) <nl> + otherwise FoundHole ; <nl> const result : Object = <nl> - Call ( context , callbackfn , thisArg , value , k , fastO ) ; <nl> + Call ( context , callbackfn , thisArg , value , k , fastO . Get ( ) ) ; <nl> if ( ToBoolean ( result ) ) { <nl> try { <nl> / / Since the call to { callbackfn } is observable , we can ' t <nl> / / use the Bailout label until we ' ve successfully stored . <nl> / / Hence the { SlowStore } label . <nl> - const fastA : FastJSArray = <nl> - Testify ( fastAWitness ) otherwise SlowStore ; <nl> - if ( fastA . length ! = to ) goto SlowStore ; <nl> - BuildAppendJSArray ( kind , fastA , value ) <nl> + fastA . Recheck ( ) otherwise SlowStore ; <nl> + if ( fastA . Get ( ) . length ! = to ) goto SlowStore ; <nl> + BuildAppendJSArray ( kind , fastA . Get ( ) , value ) <nl> otherwise SlowStore ; <nl> } <nl> label SlowStore { <nl> namespace array { <nl> } <nl> to = to + 1 ; <nl> } <nl> + fastO . Recheck ( ) otherwise goto Bailout ( k + 1 , to ) ; <nl> } <nl> label FoundHole { } <nl> } <nl> mmm a / src / builtins / array - foreach . tq <nl> ppp b / src / builtins / array - foreach . tq <nl> namespace array { <nl> o : JSArray , len : Smi , callbackfn : Callable , thisArg : Object ) labels <nl> Bailout ( Smi ) { <nl> let k : Smi = 0 ; <nl> - const fastOWitness : FastJSArrayWitness = <nl> - MakeWitness ( Cast < FastJSArray > ( o ) otherwise goto Bailout ( k ) ) ; <nl> + let fastO = <nl> + FastJSArrayWitness { Cast < FastJSArray > ( o ) otherwise goto Bailout ( k ) } ; <nl> <nl> / / Build a fast loop over the smi array . <nl> for ( ; k < len ; k + + ) { <nl> - let fastO : FastJSArray = Testify ( fastOWitness ) otherwise goto Bailout ( k ) ; <nl> - <nl> / / Ensure that we haven ' t walked beyond a possibly updated length . <nl> - if ( k > = fastO . length ) goto Bailout ( k ) ; <nl> + if ( k > = fastO . Get ( ) . length ) goto Bailout ( k ) ; <nl> <nl> try { <nl> - const value : Object = <nl> - LoadElementNoHole < FixedArrayType > ( fastO , k ) otherwise FoundHole ; <nl> - Call ( context , callbackfn , thisArg , value , k , fastO ) ; <nl> + const value : Object = LoadElementNoHole < FixedArrayType > ( fastO . Get ( ) , k ) <nl> + otherwise FoundHole ; <nl> + Call ( context , callbackfn , thisArg , value , k , fastO . Get ( ) ) ; <nl> + fastO . Recheck ( ) otherwise goto Bailout ( k + 1 ) ; <nl> } <nl> label FoundHole { } <nl> } <nl> mmm a / src / builtins / array - map . tq <nl> ppp b / src / builtins / array - map . tq <nl> namespace array { <nl> vector : Vector ) : Vector labels Bailout ( Vector , Smi ) { <nl> let k : Smi = 0 ; <nl> let v : Vector = vector ; <nl> - const fastOWitness : FastJSArrayWitness = <nl> - MakeWitness ( Cast < FastJSArray > ( o ) otherwise goto Bailout ( v , k ) ) ; <nl> + let fastO = <nl> + FastJSArrayWitness { Cast < FastJSArray > ( o ) otherwise goto Bailout ( v , k ) } ; <nl> <nl> / / Build a fast loop over the smi array . <nl> / / 7 . Repeat , while k < len . <nl> - for ( ; k < len ; k = k + 1 ) { <nl> - let fastO : FastJSArray = <nl> - Testify ( fastOWitness ) otherwise goto Bailout ( v , k ) ; <nl> - <nl> + for ( ; k < len ; k + + ) { <nl> / / Ensure that we haven ' t walked beyond a possibly updated length . <nl> - if ( k > = fastO . length ) goto Bailout ( v , k ) ; <nl> + if ( k > = fastO . Get ( ) . length ) goto Bailout ( v , k ) ; <nl> <nl> try { <nl> - const value : Object = <nl> - LoadElementNoHole < FixedArrayType > ( fastO , k ) otherwise FoundHole ; <nl> + const value : Object = LoadElementNoHole < FixedArrayType > ( fastO . Get ( ) , k ) <nl> + otherwise FoundHole ; <nl> const result : Object = <nl> - Call ( context , callbackfn , thisArg , value , k , fastO ) ; <nl> + Call ( context , callbackfn , thisArg , value , k , fastO . Get ( ) ) ; <nl> v . StoreResult ( k , result ) ; <nl> + fastO . Recheck ( ) otherwise goto Bailout ( v , k + 1 ) ; <nl> } <nl> label FoundHole { <nl> / / Our output array must necessarily be holey because of holes in <nl> mmm a / src / builtins / base . tq <nl> ppp b / src / builtins / base . tq <nl> CastHeapObject < FastJSArray > ( implicit context : Context ) ( o : HeapObject ) : <nl> } <nl> <nl> struct FastJSArrayWitness { <nl> - array : HeapObject ; <nl> - map : Map ; <nl> - } <nl> + constructor ( array : FastJSArray ) { <nl> + this . stable = this . unstable = array ; <nl> + this . map = array . map ; <nl> + } <nl> <nl> - macro MakeWitness ( array : FastJSArray ) : FastJSArrayWitness { <nl> - return FastJSArrayWitness { array , array . map } ; <nl> - } <nl> + Get ( ) : FastJSArray { <nl> + return this . unstable ; <nl> + } <nl> <nl> - macro Testify ( witness : FastJSArrayWitness ) : FastJSArray labels CastError { <nl> - if ( witness . array . map ! = witness . map ) goto CastError ; <nl> - / / We don ' t need to check elements kind or whether the prototype <nl> - / / has changed away from the default JSArray prototype , because <nl> - / / if the map remains the same then those properties hold . <nl> - / / <nl> - / / However , we have to make sure there are no elements in the <nl> - / / prototype chain . <nl> - if ( IsNoElementsProtectorCellInvalid ( ) ) goto CastError ; <nl> - return % RawObjectCast < FastJSArray > ( witness . array ) ; <nl> + Recheck ( ) labels CastError { <nl> + if ( this . stable . map ! = this . map ) goto CastError ; <nl> + / / We don ' t need to check elements kind or whether the prototype <nl> + / / has changed away from the default JSArray prototype , because <nl> + / / if the map remains the same then those properties hold . <nl> + / / <nl> + / / However , we have to make sure there are no elements in the <nl> + / / prototype chain . <nl> + if ( IsNoElementsProtectorCellInvalid ( ) ) goto CastError ; <nl> + this . unstable = % RawObjectCast < FastJSArray > ( this . stable ) ; <nl> + } <nl> + <nl> + stable : JSArray ; <nl> + unstable : FastJSArray ; <nl> + map : Map ; <nl> } <nl> <nl> CastHeapObject < FastJSArrayForCopy > ( implicit context : Context ) ( o : HeapObject ) : <nl>
[ torque ] improve witness struct
v8/v8
b2ce346d4720f9c50838531ad86003f2886de83b
2019-01-24T14:31:33Z
mmm a / src / DataStreams / ya . make <nl> ppp b / src / DataStreams / ya . make <nl> LIBRARY ( ) <nl> <nl> PEERDIR ( <nl> clickhouse / src / Common <nl> + contrib / libs / poco / MongoDB <nl> ) <nl> <nl> NO_COMPILER_WARNINGS ( ) <nl> mmm a / src / Storages / ya . make <nl> ppp b / src / Storages / ya . make <nl> LIBRARY ( ) <nl> PEERDIR ( <nl> clickhouse / src / Common <nl> contrib / libs / sparsehash <nl> + contrib / libs / poco / MongoDB <nl> ) <nl> <nl> SRCS ( <nl>
Trying to fix ya . make
ClickHouse/ClickHouse
6b3092290ddeb5e18aed863ba999eba755d72c43
2020-06-26T16:33:51Z
mmm a / src / library . js <nl> ppp b / src / library . js <nl> var Library = { <nl> } <nl> return me . ret ; <nl> } , <nl> + <nl> + / / errno . h <nl> + <nl> + __errno_location : function ( ) { <nl> + var me = arguments . callee ; <nl> + if ( ! me . ret ) { <nl> + me . ret = Pointer_make ( [ 0 ] , 0 , ALLOC_STATIC ) ; <nl> + } <nl> + return me . ret ; <nl> + } , <nl> } ; <nl> <nl> load ( ' library_sdl . js ' ) ; <nl>
__errno_location
emscripten-core/emscripten
660398be701fa73c13d2828379404018d4292e0e
2010-12-10T04:51:52Z
mmm a / test / cpp / tensorexpr / CMakeLists . txt <nl> ppp b / test / cpp / tensorexpr / CMakeLists . txt <nl> add_executable ( test_tensorexpr <nl> target_link_libraries ( test_tensorexpr PRIVATE torch gtest ) <nl> target_include_directories ( test_tensorexpr PRIVATE $ { ATen_CPU_INCLUDE } ) <nl> <nl> + add_executable ( tutorial_tensorexpr $ { TENSOREXPR_TEST_ROOT } / tutorial . cpp ) <nl> + target_link_libraries ( tutorial_tensorexpr PRIVATE torch ) <nl> + target_include_directories ( tutorial_tensorexpr PRIVATE $ { ATen_CPU_INCLUDE } ) <nl> + <nl> + <nl> if ( USE_CUDA ) <nl> target_link_libraries ( test_tensorexpr PRIVATE <nl> $ { CUDA_LIBRARIES } <nl> $ { CUDA_NVRTC_LIB } <nl> $ { CUDA_CUDA_LIB } <nl> $ { TORCH_CUDA_LIBRARIES } ) <nl> - <nl> target_compile_definitions ( test_tensorexpr PRIVATE USE_CUDA ) <nl> + <nl> + target_link_libraries ( tutorial_tensorexpr PRIVATE <nl> + $ { CUDA_LIBRARIES } <nl> + $ { CUDA_NVRTC_LIB } <nl> + $ { CUDA_CUDA_LIB } <nl> + $ { TORCH_CUDA_LIBRARIES } ) <nl> + target_compile_definitions ( tutorial_tensorexpr PRIVATE USE_CUDA ) <nl> elseif ( USE_ROCM ) <nl> target_link_libraries ( test_tensorexpr PRIVATE <nl> $ { ROCM_HIPRTC_LIB } <nl> $ { PYTORCH_HIP_HCC_LIBRARIES } <nl> $ { TORCH_CUDA_LIBRARIES } ) <nl> - <nl> target_link_libraries ( test_tensorexpr PRIVATE caffe2_gpu ) <nl> - <nl> target_compile_definitions ( test_tensorexpr PRIVATE USE_ROCM ) <nl> + <nl> + target_link_libraries ( tutorial_tensorexpr PRIVATE <nl> + $ { ROCM_HIPRTC_LIB } <nl> + $ { PYTORCH_HIP_HCC_LIBRARIES } <nl> + $ { TORCH_CUDA_LIBRARIES } ) <nl> + target_link_libraries ( tutorial_tensorexpr PRIVATE caffe2_gpu ) <nl> + target_compile_definitions ( tutorial_tensorexpr PRIVATE USE_ROCM ) <nl> endif ( ) <nl> <nl> if ( INSTALL_TEST ) <nl> install ( TARGETS test_tensorexpr DESTINATION bin ) <nl> + install ( TARGETS tutorial_tensorexpr DESTINATION bin ) <nl> # Install PDB files for MSVC builds <nl> if ( MSVC AND BUILD_SHARED_LIBS ) <nl> install ( FILES $ < TARGET_PDB_FILE : test_tensorexpr > DESTINATION bin OPTIONAL ) <nl> + install ( FILES $ < TARGET_PDB_FILE : tutorial_tensorexpr > DESTINATION bin OPTIONAL ) <nl> endif ( ) <nl> endif ( ) <nl> new file mode 100644 <nl> index 000000000000 . . f0bcfc4c2485 <nl> mmm / dev / null <nl> ppp b / test / cpp / tensorexpr / tutorial . cpp <nl> <nl> + / / * * * Tensor Expressions * * * <nl> + / / <nl> + / / This tutorial covers basics of NNC ' s tensor expressions , shows basic APIs to <nl> + / / work with them , and outlines how they are used in the overall TorchScript <nl> + / / compilation pipeline . This doc is permanently a " work in progress " since NNC <nl> + / / is under active development and things change fast . <nl> + / / <nl> + / / This Tutorial ' s code is compiled in the standard pytorch build , and the <nl> + / / executable can be found in ` build / bin / tutorial_tensorexpr ` . <nl> + / / <nl> + / / * * * What is NNC * * * <nl> + / / <nl> + / / NNC stands for Neural Net Compiler . It is a component of TorchScript JIT <nl> + / / and it performs on - the - fly code generation for kernels , which are often a <nl> + / / combination of multiple aten ( torch ) operators . <nl> + / / <nl> + / / When the JIT interpreter executes a torchscript model , it automatically <nl> + / / extracts subgraphs from the torchscript IR graph for which specialized code <nl> + / / can be JIT generated . This usually improves performance as the ' combined ' <nl> + / / kernel created from the subgraph could avoid unnecessary memory traffic that <nl> + / / is unavoidable when the subgraph is interpreted as - is , operator by operator . <nl> + / / This optimization is often referred to as ' fusion ' . Relatedly , the process of <nl> + / / finding and extracting subgraphs suitable for NNC code generation is done by <nl> + / / a JIT pass called ' fuser ' . <nl> + / / <nl> + / / * * * What is TE * * * <nl> + / / <nl> + / / TE stands for Tensor Expressions . TE is a commonly used approach for <nl> + / / compiling kernels performing tensor ( ~ matrix ) computation . The idea behind it <nl> + / / is that operators are represented as a mathematical formula describing what <nl> + / / computation they do ( as TEs ) and then the TE engine can perform mathematical <nl> + / / simplification and other optimizations using those formulas and eventually <nl> + / / generate executable code that would produce the same results as the original <nl> + / / sequence of operators , but more efficiently . <nl> + / / <nl> + / / NNC ' s design and implementation of TE was heavily inspired by Halide and TVM <nl> + / / projects . <nl> + # include < iostream > <nl> + # include < string > <nl> + <nl> + # include < torch / csrc / jit / tensorexpr / eval . h > <nl> + # include < torch / csrc / jit / tensorexpr / expr . h > <nl> + # include < torch / csrc / jit / tensorexpr / ir . h > <nl> + # include < torch / csrc / jit / tensorexpr / ir_printer . h > <nl> + # include < torch / csrc / jit / tensorexpr / loopnest . h > <nl> + # include < torch / csrc / jit / tensorexpr / stmt . h > <nl> + # include < torch / csrc / jit / tensorexpr / tensor . h > <nl> + <nl> + using namespace torch : : jit : : tensorexpr ; <nl> + <nl> + int main ( int argc , char * argv [ ] ) { <nl> + / / Memory management for tensor expressions is currently done with memory <nl> + / / arenas . That is , whenever an object is created it registers itself in an <nl> + / / arena and the object is kept alive as long as the arena is alive . When the <nl> + / / arena gets destructed , it deletes all objects registered in it . <nl> + / / <nl> + / / The easiest way to set up a memory arena is to use ` KernelScope ` class - it <nl> + / / is a resource guard that creates a new arena on construction and restores <nl> + / / the previously set arena on destruction . <nl> + / / <nl> + / / We will create a kernel scope here , and thus we ' ll set up a mem arena for <nl> + / / the entire tutorial . <nl> + KernelScope kernel_scope ; <nl> + <nl> + std : : cout < < " * * * Structure of tensor expressions * * * " < < std : : endl ; <nl> + { <nl> + / / A tensor expression is a tree of expressions . Each expression has a type , <nl> + / / and that type defines what sub - expressions it the current expression has . <nl> + / / For instance , an expression of type ' Mul ' would have a type ' kMul ' and <nl> + / / two subexpressions : LHS and RHS . Each of these two sub - expressions could <nl> + / / also be a ' Mul ' or some other expression . <nl> + / / <nl> + / / Let ' s construct a simple TE : <nl> + Expr * lhs = new IntImm ( 5 ) ; <nl> + Expr * rhs = new Var ( " x " , kInt ) ; <nl> + Expr * mul = new Mul ( lhs , rhs ) ; <nl> + std : : cout < < " Tensor expression : " < < * mul < < std : : endl ; <nl> + / / Prints : Tensor expression : 5 * x <nl> + <nl> + / / Here we created an expression representing a 5 * x computation , where x is <nl> + / / an int variable . <nl> + <nl> + / / Another , probably a more convenient , way to construct tensor expressions <nl> + / / is to use so called expression handles ( as opposed to raw expressions <nl> + / / like we did in the previous example ) . Expression handles overload common <nl> + / / operations and allow us to express the same semantics in a more natural <nl> + / / way : <nl> + ExprHandle l = 1 ; <nl> + ExprHandle r = Var : : make ( " x " , kInt ) ; <nl> + ExprHandle m = l * r ; <nl> + std : : cout < < " Tensor expression : " < < * m . node ( ) < < std : : endl ; <nl> + / / Prints : Tensor expression : 1 * x <nl> + <nl> + / / In a similar fashion we could construct arbitrarily complex expressions <nl> + / / using mathematical and logical operations , casts between various data <nl> + / / types , and a bunch of intrinsics . <nl> + ExprHandle a = Var : : make ( " a " , kInt ) ; <nl> + ExprHandle b = Var : : make ( " b " , kFloat ) ; <nl> + ExprHandle c = Var : : make ( " c " , kFloat ) ; <nl> + ExprHandle x = ExprHandle ( 5 ) * a + b / ( sigmoid ( c ) - 3 . 0f ) ; <nl> + std : : cout < < " Tensor expression : " < < * x . node ( ) < < std : : endl ; <nl> + / / Prints : Tensor expression : float ( 5 * a ) + b / ( ( sigmoid ( c ) ) - 3 . f ) <nl> + <nl> + / / An ultimate purpose of tensor expressions is to optimize tensor <nl> + / / computations , and in order to represent accesses to tensors data , there <nl> + / / is a special kind of expression - a load . <nl> + / / To construct a load we need two pieces : the base and the indices . The <nl> + / / base of a load is a Buf expression , which could be thought of as a <nl> + / / placeholder similar to Var , but with dimensions info . <nl> + / / <nl> + / / Let ' s construct a simple load : <nl> + BufHandle A ( " A " , { ExprHandle ( 64 ) , ExprHandle ( 32 ) } , kInt ) ; <nl> + ExprHandle i = Var : : make ( " i " , kInt ) , j = Var : : make ( " j " , kInt ) ; <nl> + ExprHandle load = Load : : make ( A . dtype ( ) , A , { i , j } , / * mask = * / 1 ) ; <nl> + std : : cout < < " Tensor expression : " < < * load . node ( ) < < std : : endl ; <nl> + / / Prints : Tensor expression : A [ i , j ] <nl> + } <nl> + <nl> + std : : cout < < " * * * Tensors , Functions , and Placeholders * * * " < < std : : endl ; <nl> + { <nl> + / / A tensor computation is represented by objects of Tensor class and <nl> + / / consists of the following pieces : <nl> + / / - domain , which is specified by a Buf expression <nl> + / / - an expression ( or several expressions if we want to perform several <nl> + / / independent computations over the same domain ) for its elements , as a <nl> + / / function of indices <nl> + / / <nl> + / / We use Function objects to represent this . Let ' s build one . <nl> + / / <nl> + / / First , we need to specify the domain , or dimensions in which the <nl> + / / computation would be performed . Let ' s create a 64x32 domain : <nl> + std : : vector < const Expr * > dims = { <nl> + new IntImm ( 64 ) , new IntImm ( 32 ) } ; / / IntImm stands for Integer Immediate <nl> + / / and represents an integer constant <nl> + <nl> + / / Next we need to create Function arguments . The arguments of a Function <nl> + / / are Vars , and they play role of placeholders . The computation that the <nl> + / / function would describe would use these arguments . <nl> + const Var * i = new Var ( " i " , kInt ) ; <nl> + const Var * j = new Var ( " j " , kInt ) ; <nl> + std : : vector < const Var * > args = { i , j } ; <nl> + <nl> + / / Now we can define the function computations using these arguments . Let ' s <nl> + / / create two computations , the first would add the arguments of the <nl> + / / function , the second would multiply them . <nl> + Expr * func_body1 = new Mul ( i , j ) ; <nl> + Expr * func_body2 = new Add ( i , j ) ; <nl> + <nl> + / / Finally , we pass all these pieces together to Function constructor : <nl> + Function * func = <nl> + new Function ( { " X " , " Y " } , dims , args , { func_body1 , func_body2 } ) ; <nl> + / / Under the hood function constructor would create separate ` Buf ` <nl> + / / expressions for each computation ( which can be accessed via <nl> + / / ` func - > func_var ( idx ) ` ) with the names specified by the first parameter of <nl> + / / the constructor call . In our example two ` Buf ` variables will be created <nl> + / / with names ' X ' and ' Y ' , each of them would signify a domain of 64x32 . <nl> + <nl> + / / We can now print out our function : <nl> + std : : cout < < " Tensor function : " < < * func < < std : : endl ; <nl> + / / Prints : <nl> + / / Tensor function : Function F ( i [ 64 ] , j [ 32 ] ) { <nl> + / / X = i * j <nl> + / / Y = i + j <nl> + / / } <nl> + <nl> + / / A Tensor refers to an individual computation defined by a Function . For <nl> + / / instance , we could create a following tensor given the function above : <nl> + int output_idx = 0 ; / / Used to index the computation <nl> + Tensor * X = new Tensor ( func , output_idx ) ; <nl> + std : : cout < < " Tensor computation : " < < * X < < std : : endl ; <nl> + / / Prints : Tensor computation : Tensor X ( i [ 64 ] , j [ 32 ] ) = i * j <nl> + <nl> + / / Similarly to how we provide a more convenient way of using handles for <nl> + / / constructing Exprs , Tensors also have a more convenient API for <nl> + / / construction . It is based on Compute functions , which take a name : <nl> + / / dimensions , and a lambda specifying the computation body : <nl> + Tensor * Z = Compute ( <nl> + " Z " , <nl> + { { 64 , " i " } , { 32 , " j " } } , <nl> + [ ] ( const VarHandle & i , const VarHandle & j ) { return i / j ; } ) ; <nl> + std : : cout < < " Tensor computation : " < < * Z < < std : : endl ; <nl> + / / Prints : Tensor computation : Tensor Z ( i [ 64 ] , j [ 32 ] ) = i / j <nl> + <nl> + / / Tensors might access other tensors and external placeholders in their <nl> + / / expressions . It can be done like so : <nl> + Placeholder P ( " P " , kFloat , { 64 , 32 } ) ; <nl> + Tensor * R = Compute ( <nl> + " R " , <nl> + { { 64 , " i " } , { 32 , " j " } } , <nl> + [ & ] ( const VarHandle & i , const VarHandle & j ) { <nl> + return Z - > call ( i , j ) * P . load ( i , j ) ; <nl> + } ) ; <nl> + std : : cout < < " Tensor computation : " < < * R < < std : : endl ; <nl> + / / Prints : Tensor computation : Tensor R ( i [ 64 ] , j [ 32 ] ) = Z ( i , j ) * P [ i , j ] <nl> + <nl> + / / Placeholders could be thought of as external tensors , i . e . tensors for <nl> + / / which we don ' t have the element expression . In other words , for ` Tensor ` <nl> + / / we know an expression specifying how its elements can be computed ( a <nl> + / / mathematical formula ) . For external tensors , or placeholders , we don ' t <nl> + / / have such an expression . They need to be considered as coming to us as <nl> + / / inputs from outside - we can only load data from them . <nl> + / / <nl> + / / Also note that we use ' call ' to construct an access to an element of a <nl> + / / Tensor and we use ' load ' for accessing elements of an external tensor <nl> + / / through its Placeholder . This is an implementation detail and could be <nl> + / / changed in future . <nl> + / / <nl> + / / Why do we have Functions and Tensors and what is the relationship between <nl> + / / them ? Functions are used to represent several computations performed over <nl> + / / the same domain . Tensors refer to individual computations of a Function . <nl> + / / <nl> + / / Also note that currently a lot of code only supports single - output <nl> + / / Functions , in which case they become almost identical to Tensors . This <nl> + / / probably will be changed in future . <nl> + <nl> + / / TODO : Show how reductions are represented and constructed <nl> + } <nl> + <nl> + std : : cout < < " * * * Loopnests and Statements * * * " < < std : : endl ; <nl> + { <nl> + / / Creating a tensor expression is the first step to generate an executable <nl> + / / code for it . A next step is to represent it as a loop nest and apply <nl> + / / various loop transformations in order to get an optimal implementation . <nl> + / / In Halide ' s or TVM ' s terms the first step was to define the algorithm of <nl> + / / computation ( what to compute ? ) and now we are getting to the schedule of <nl> + / / the computation ( how to compute ? ) . <nl> + / / <nl> + / / Let ' s create a simple tensor expression and construct a loop nest for it . <nl> + Placeholder A ( " A " , kFloat , { 64 , 32 } ) ; <nl> + Placeholder B ( " B " , kFloat , { 64 , 32 } ) ; <nl> + Tensor * X = Compute ( <nl> + " X " , <nl> + { { 64 , " i " } , { 32 , " j " } } , <nl> + [ & ] ( const VarHandle & i , const VarHandle & j ) { <nl> + return A . load ( i , j ) + B . load ( i , j ) ; <nl> + } ) ; <nl> + Tensor * Y = Compute ( <nl> + " Y " , <nl> + { { 64 , " i " } , { 32 , " j " } } , <nl> + [ & ] ( const VarHandle & i , const VarHandle & j ) { <nl> + return sigmoid ( X - > call ( i , j ) ) ; <nl> + } ) ; <nl> + std : : cout < < " Tensor computation X : " < < * X <nl> + < < " Tensor computation Y : " < < * Y < < std : : endl ; <nl> + / / Prints : <nl> + / / Tensor computation X : Tensor X ( i [ 64 ] , j [ 32 ] ) = ( A [ i , j ] ) + ( B [ i , j ] ) <nl> + / / Tensor computation Y : Tensor Y ( i [ 64 ] , j [ 32 ] ) = sigmoid ( X ( i , j ) ) <nl> + <nl> + / / Creating a loop nest is as quite simple , we just need to specify what are <nl> + / / the output tensors in our computation and LoopNest object will <nl> + / / automatically pull all tensor dependencies : <nl> + LoopNest loopnest ( { Y } ) ; <nl> + <nl> + / / An IR used in LoopNest is based on tensor statements , represented by <nl> + / / ` Stmt ` class . Statements are used to specify the loop nest structure , and <nl> + / / to take a sneak peek at them , let ' s print out what we got right after <nl> + / / creating our LoopNest object : <nl> + std : : cout < < * loopnest . root_stmt ( ) < < std : : endl ; <nl> + / / Prints : <nl> + / / { <nl> + / / for ( int i = 0 ; i < 64 ; i + + ) { <nl> + / / for ( int j = 0 ; j < 32 ; j + + ) { <nl> + / / X [ i , j ] = ( A [ i , j ] ) + ( B [ i , j ] ) ; <nl> + / / } <nl> + / / } <nl> + / / for ( int i_1 = 0 ; i_1 < 64 ; i_1 + + ) { <nl> + / / for ( int j_1 = 0 ; j_1 < 32 ; j_1 + + ) { <nl> + / / Y [ i_1 , j_1 ] = sigmoid ( X ( i_1 , j_1 ) ) ; <nl> + / / } <nl> + / / } <nl> + / / } <nl> + <nl> + / / To introduce statements let ' s first look at their three main types ( in <nl> + / / fact , there are more than 3 types , but the other types would be easy to <nl> + / / understand once the overall structure is clear ) : <nl> + / / 1 ) Block <nl> + / / 2 ) For <nl> + / / 3 ) Store <nl> + / / <nl> + / / A ` Block ` statement is simply a list of other statements . <nl> + / / A ` For ` is a statement representing one axis of computation . It contains <nl> + / / an index variable ( Var ) , boundaries of the axis ( start and end - both are <nl> + / / ` Expr ` s ) , and a ` Block ` statement body . <nl> + / / A ` Store ` represents an assignment to a tensor element . It contains a Buf <nl> + / / representing the target tensor , a list of expressions for indices of the <nl> + / / element , and the value to be stored , which is an arbitrary expression . <nl> + <nl> + / / Once we ' ve constructed the loop nest , we can apply various tranformations <nl> + / / to it . To begin with , let ' s inline computation of X into computation of Y <nl> + / / and see what happens to our statements . <nl> + loopnest . computeInline ( loopnest . getLoopBodyFor ( X ) ) ; <nl> + std : : cout < < * loopnest . root_stmt ( ) < < std : : endl ; <nl> + / / Prints : <nl> + / / { <nl> + / / for ( int i = 0 ; i < 64 ; i + + ) { <nl> + / / for ( int j = 0 ; j < 32 ; j + + ) { <nl> + / / Y [ i , j ] = sigmoid ( ( A [ i , j ] ) + ( B [ i , j ] ) ) ; <nl> + / / } <nl> + / / } <nl> + / / } <nl> + / / <nl> + / / As you can see , the first two loops have disappeared and the expression <nl> + / / for X [ i , j ] has been inserted into the Y [ i , j ] computation . <nl> + <nl> + / / Loop transformations can be composed , so we can do something else with <nl> + / / our loop nest now . Let ' s split the inner loop with a factor of 9 , for <nl> + / / instance . <nl> + std : : vector < For * > loops = loopnest . getLoopStmtsFor ( Y ) ; <nl> + For * j_outer ; <nl> + For * j_inner ; <nl> + For * j_tail ; <nl> + int split_factor = 9 ; <nl> + loopnest . splitWithTail ( <nl> + loops [ 1 ] , / / loops [ 0 ] is the outer loop , loops [ 1 ] is inner <nl> + split_factor , <nl> + & j_outer , / / These are handles that we would be using for <nl> + & j_inner , / / further transformations <nl> + & j_tail ) ; <nl> + std : : cout < < * loopnest . root_stmt ( ) < < std : : endl ; <nl> + / / Prints : <nl> + / / { <nl> + / / for ( int i = 0 ; i < 64 ; i + + ) { <nl> + / / for ( int j_outer = 0 ; j_outer < ( 32 - 0 ) / 9 ; j_outer + + ) { <nl> + / / for ( int j_inner = 0 ; j_inner < 9 ; j_inner + + ) { <nl> + / / Y [ i , j_outer * 9 + j_inner ] = sigmoid ( ( A [ i , j_outer * 9 + . . . <nl> + / / } <nl> + / / } <nl> + / / for ( int j_tail = 0 ; j_tail < ( 32 - 0 ) % 9 ; j_tail + + ) { <nl> + / / Y [ i , j_tail + ( ( 32 - 0 ) / 9 ) * 9 ] = sigmoid ( ( A [ i , j_tail + . . . <nl> + / / } <nl> + / / } <nl> + / / } <nl> + <nl> + / / TODO : List all available transformations <nl> + / / TODO : Show how statements can be constructed manually <nl> + } <nl> + <nl> + std : : cout < < " * * * Codegen * * * " < < std : : endl ; <nl> + { <nl> + / / An ultimate goal of tensor expressions is to be provide a mechanism to <nl> + / / execute a given computation in the fastest possible way . So far we ' ve <nl> + / / looked at how we could describe what computation we ' re interested in , but <nl> + / / we haven ' t looked at how to actually execute it . So far all we ' ve been <nl> + / / dealing with was just symbols with no actual data associated , in this <nl> + / / section we would look at how we can bridge that gap . <nl> + <nl> + / / Let ' s start by constructing a simple computation for us to work with : <nl> + Placeholder A ( " A " , kInt , { 64 , 32 } ) ; <nl> + Placeholder B ( " B " , kInt , { 64 , 32 } ) ; <nl> + Tensor * X = Compute ( <nl> + " X " , <nl> + { { 64 , " i " } , { 32 , " j " } } , <nl> + [ & ] ( const VarHandle & i , const VarHandle & j ) { <nl> + return A . load ( i , j ) + B . load ( i , j ) ; <nl> + } ) ; <nl> + <nl> + / / And let ' s lower it to a loop nest , as we did in the previous section : <nl> + LoopNest loopnest ( { X } ) ; <nl> + std : : cout < < * loopnest . root_stmt ( ) < < std : : endl ; <nl> + / / Prints : <nl> + / / { <nl> + / / for ( int i = 0 ; i < 64 ; i + + ) { <nl> + / / for ( int j = 0 ; j < 32 ; j + + ) { <nl> + / / X [ i , j ] = ( A [ i , j ] ) + ( B [ i , j ] ) ; <nl> + / / } <nl> + / / } <nl> + <nl> + / / Now imagine that we have two actual tensors 64x32 that we want sum <nl> + / / together , how do we pass those tensors to the computation and how do we <nl> + / / carry it out ? <nl> + / / <nl> + / / Codegen object is aimed at providing exactly that functionality . Codegen <nl> + / / is an abstract class and concrete codegens are derived from it . <nl> + / / Currently , we have three codegens : <nl> + / / 1 ) Simple Evaluator , <nl> + / / 2 ) LLVM Codegen for CPU , <nl> + / / 3 ) CUDA Codegen . <nl> + / / In this example we will be using Simple Evaluator , since it ' s available <nl> + / / everywhere . <nl> + <nl> + / / To create a codegen , we need to provide the statement - it specifies the <nl> + / / computation we want to perform - and a list of placeholders and tensors <nl> + / / used in the computation . The latter part is crucial since that ' s the only <nl> + / / way the codegen could use to correlate symbols in the statement to actual <nl> + / / data arrays that we will be passing when we will actually be performing <nl> + / / the computation . <nl> + / / <nl> + / / Let ' s create a Simple IR Evaluator codegen for our computation : <nl> + SimpleIREvaluator ir_eval ( loopnest . root_stmt ( ) , { A , B , X } ) ; <nl> + <nl> + / / We are using the simplest codegen and in it almost no work is done at the <nl> + / / construction step . Real codegens such as CUDA and LLVM perform <nl> + / / compilation during that stage so that when we ' re about to run the <nl> + / / computation everything is ready . <nl> + <nl> + / / Let ' s now create some inputs and run our computation with them : <nl> + std : : vector < int > data_A ( 64 * 32 , 3 ) ; / / This will be the input A <nl> + std : : vector < int > data_B ( 64 * 32 , 5 ) ; / / This will be the input B <nl> + std : : vector < int > data_X ( 64 * 32 , 0 ) ; / / This will be used for the result <nl> + <nl> + / / Now let ' s invoke our codegen to perform the computation on our data . We <nl> + / / need to provide as many arguments as how many placeholders and tensors we <nl> + / / passed at the codegen construction time . A position in these lists would <nl> + / / define how real data arrays from the latter call ( these arguments are <nl> + / / referred to as ' CallArg ' s in our codebase ) correspond to symbols <nl> + / / ( placeholders and tensors ) used in the tensor expressions we constructed <nl> + / / ( these are referred to as ' BufferArg ' ) . <nl> + / / Thus , we will provide three arguments : data_A , data_B , and data_X . data_A <nl> + / / contains data for the placeholder A , data_B - for the placeholder B , and <nl> + / / data_X would be used for contents of tensor X . <nl> + ir_eval ( data_A , data_B , data_X ) ; <nl> + <nl> + / / Let ' s print one of the elements from each array to verify that the <nl> + / / computation did happen : <nl> + std : : cout < < " A [ 10 ] = " < < data_A [ 10 ] < < std : : endl <nl> + < < " B [ 10 ] = " < < data_B [ 10 ] < < std : : endl <nl> + < < " X [ 10 ] = A [ 10 ] + B [ 10 ] = " < < data_X [ 10 ] < < std : : endl ; <nl> + / / Prints : <nl> + / / A [ 10 ] = 3 <nl> + / / B [ 10 ] = 5 <nl> + / / X [ 10 ] = A [ 10 ] + B [ 10 ] = 8 <nl> + } <nl> + <nl> + / / TODO : Show how TorchScript IR is translated to TE <nl> + return 0 ; <nl> + } <nl>
[ TensorExpr ] Add a tensor expressions tutorial . ( )
pytorch/pytorch
75fc2635798b9e93cc09b359294e5b86a77a369a
2020-10-01T02:35:58Z
mmm a / python / caffe / _caffe . cpp <nl> ppp b / python / caffe / _caffe . cpp <nl> <nl> / / caffe : : Caffe functions so that one could easily call it from Python . <nl> / / Note that for Python , we will simply use float as the data type . <nl> <nl> + # include < Python . h > / / NOLINT ( build / include_alpha ) <nl> + <nl> + # include < boost / make_shared . hpp > <nl> # include < boost / python / suite / indexing / vector_indexing_suite . hpp > <nl> - # include < boost / make_shared . hpp > / / NOLINT ( build / include_alpha ) <nl> <nl> / / these need to be included after boost on OS X <nl> # include < string > / / NOLINT ( build / include_order ) <nl> mmm a / python / caffe / _caffe . hpp <nl> ppp b / python / caffe / _caffe . hpp <nl> <nl> <nl> # define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION <nl> <nl> + # include < Python . h > / / NOLINT ( build / include_alpha ) <nl> + <nl> # include < boost / python . hpp > <nl> # include < boost / shared_ptr . hpp > <nl> # include < numpy / arrayobject . h > <nl>
[ fix ] include Python . h instead of re - ordering for pycaffe build on OS X
BVLC/caffe
aee2efb0019b07cf9323a96e8f0fba469bead369
2014-10-03T20:53:52Z
mmm a / test / SILOptimizer / definite_init_markuninitialized_derivedself . sil <nl> ppp b / test / SILOptimizer / definite_init_markuninitialized_derivedself . sil <nl> bb0 ( % 0 : @ owned $ DerivedClassWithIVars ) : <nl> % 14 = function_ref @ superinit : $ @ convention ( method ) ( @ owned RootClassWithIVars ) - > @ owned RootClassWithIVars <nl> % 15 = apply % 14 ( % 13 ) : $ @ convention ( method ) ( @ owned RootClassWithIVars ) - > @ owned RootClassWithIVars / / expected - error { { property ' self . a ' not initialized at super . init call } } <nl> % 16 = unchecked_ref_cast % 15 : $ RootClassWithIVars to $ DerivedClassWithIVars <nl> - assign % 16 to % 3 : $ * DerivedClassWithIVars <nl> + store % 16 to [ init ] % 3 : $ * DerivedClassWithIVars <nl> <nl> % 18 = load [ copy ] % 3 : $ * DerivedClassWithIVars <nl> destroy_value % 1 : $ < τ_0_0 > { var τ_0_0 } < DerivedClassWithIVars > <nl> bb0 ( % 0 : @ owned $ DerivedClassWithIVars , % i : @ trivial $ Int ) : <nl> / / Call super . init . <nl> % 15 = apply % 14 ( % 13 ) : $ @ convention ( method ) ( @ owned RootClassWithIVars ) - > @ owned RootClassWithIVars <nl> % 16 = unchecked_ref_cast % 15 : $ RootClassWithIVars to $ DerivedClassWithIVars <nl> - assign % 16 to % 3 : $ * DerivedClassWithIVars <nl> + store % 16 to [ init ] % 3 : $ * DerivedClassWithIVars <nl> % 18 = load [ copy ] % 3 : $ * DerivedClassWithIVars <nl> destroy_value % 1 : $ < τ_0_0 > { var τ_0_0 } < DerivedClassWithIVars > <nl> return % 18 : $ DerivedClassWithIVars <nl>
DI : Tweak definite_init_markuninitialized_derivedself . sil test to match what SILGen actually emits
apple/swift
d5a21fa1e056de3aa7401c12b054cf5a1927ac72
2017-10-15T06:52:08Z
mmm a / modules / video / src / simpleflow . cpp <nl> ppp b / modules / video / src / simpleflow . cpp <nl> static void crossBilateralFilter ( const Mat & image , <nl> <nl> multiply ( weights , confidence_extended ( window_rows , window_cols ) , weights ) ; <nl> multiply ( weights , weights_space , weights ) ; <nl> - float weights_sum = sum ( weights ) [ 0 ] ; <nl> + float weights_sum = ( float ) sum ( weights ) [ 0 ] ; <nl> <nl> for ( int ch = 0 ; ch < 2 ; + + ch ) { <nl> multiply ( weights , image_extended_channels [ ch ] ( window_rows , window_cols ) , weighted_sum ) ; <nl> - float total_sum = sum ( weighted_sum ) [ 0 ] ; <nl> + float total_sum = ( float ) sum ( weighted_sum ) [ 0 ] ; <nl> <nl> dst . at < Vec2f > ( row , col ) [ ch ] = ( flag & & fabs ( weights_sum ) < 1e - 9 ) <nl> ? image . at < float > ( row , col ) <nl> static void calcConfidence ( const Mat & prev , <nl> for ( int r0 = 0 ; r0 < rows ; + + r0 ) { <nl> for ( int c0 = 0 ; c0 < cols ; + + c0 ) { <nl> Vec2f flow_at_point = flow . at < Vec2f > ( r0 , c0 ) ; <nl> - int u0 = floor ( flow_at_point [ 0 ] + 0 . 5 ) ; <nl> + int u0 = cvRound ( flow_at_point [ 0 ] ) ; <nl> if ( r0 + u0 < 0 ) { u0 = - r0 ; } <nl> if ( r0 + u0 > = rows ) { u0 = rows - 1 - r0 ; } <nl> - int v0 = floor ( flow_at_point [ 1 ] + 0 . 5 ) ; <nl> + int v0 = cvRound ( flow_at_point [ 1 ] ) ; <nl> if ( c0 + v0 < 0 ) { v0 = - c0 ; } <nl> if ( c0 + v0 > = cols ) { v0 = cols - 1 - c0 ; } <nl> <nl> CV_EXPORTS_W void calcOpticalFlowSF ( Mat & from , <nl> <nl> removeOcclusions ( flow , <nl> flow_inv , <nl> - occ_thr , <nl> + ( float ) occ_thr , <nl> confidence ) ; <nl> <nl> removeOcclusions ( flow_inv , <nl> flow , <nl> - occ_thr , <nl> + ( float ) occ_thr , <nl> confidence_inv ) ; <nl> <nl> Mat speed_up = Mat : : zeros ( curr_from . size ( ) , CV_8U ) ; <nl> CV_EXPORTS_W void calcOpticalFlowSF ( Mat & from , <nl> flow , <nl> averaging_radius , <nl> max_flow , <nl> - sigma_dist , <nl> - sigma_color ) ; <nl> + ( float ) sigma_dist , <nl> + ( float ) sigma_color ) ; <nl> <nl> calcConfidence ( curr_to , curr_from , flow_inv , confidence_inv , max_flow ) ; <nl> calcOpticalFlowSingleScaleSF ( curr_to_extended , <nl> CV_EXPORTS_W void calcOpticalFlowSF ( Mat & from , <nl> flow_inv , <nl> averaging_radius , <nl> max_flow , <nl> - sigma_dist , <nl> - sigma_color ) ; <nl> + ( float ) sigma_dist , <nl> + ( float ) sigma_color ) ; <nl> <nl> extrapolateFlow ( flow , speed_up ) ; <nl> extrapolateFlow ( flow_inv , speed_up_inv ) ; <nl> CV_EXPORTS_W void calcOpticalFlowSF ( Mat & from , <nl> } <nl> <nl> crossBilateralFilter ( flow , curr_from , confidence , flow , <nl> - postprocess_window , sigma_color_fix , sigma_dist_fix ) ; <nl> + postprocess_window , ( float ) sigma_color_fix , ( float ) sigma_dist_fix ) ; <nl> <nl> GaussianBlur ( flow , flow , Size ( 3 , 3 ) , 5 ) ; <nl> <nl>
Small fixes
opencv/opencv
9d1aa37e214485e18793df09ad473dc76d8c548c
2012-09-05T10:19:44Z
mmm a / vnpy / app / option_master / ui / widget . py <nl> ppp b / vnpy / app / option_master / ui / widget . py <nl> <nl> from vnpy . trader . constant import Direction , Offset , OrderType <nl> from vnpy . trader . object import OrderRequest , ContractData , TickData <nl> from vnpy . trader . event import EVENT_TICK <nl> + from vnpy . trader . utility import get_digits <nl> <nl> from . . base import APP_NAME , EVENT_OPTION_NEW_PORTFOLIO <nl> from . . engine import OptionEngine , PRICING_MODELS <nl> def __init__ ( self , option_engine : OptionEngine , portfolio_name : str ) : <nl> self . event_engine : EventEngine = option_engine . event_engine <nl> <nl> self . contracts : Dict [ str , ContractData ] = { } <nl> - self . vt_symbol = " " <nl> + self . vt_symbol : str = " " <nl> + self . price_digits : int = 0 <nl> <nl> self . init_ui ( ) <nl> self . init_contracts ( ) <nl> + self . connect_signal ( ) <nl> <nl> def init_ui ( self ) - > None : <nl> " " " " " " <nl> def _update_symbol ( self ) - > None : <nl> <nl> vt_symbol = contract . vt_symbol <nl> self . vt_symbol = vt_symbol <nl> + self . price_digits = get_digits ( contract . pricetick ) <nl> <nl> tick = self . main_engine . get_tick ( vt_symbol ) <nl> if tick : <nl> self . update_tick ( tick ) <nl> <nl> - self . event_engine . unregister ( EVENT_TICK + vt_symbol , self . process_tick_event ) <nl> + print ( EVENT_TICK + vt_symbol ) <nl> + self . event_engine . register ( EVENT_TICK + vt_symbol , self . process_tick_event ) <nl> <nl> def create_label ( <nl> self , <nl> def create_label ( <nl> def process_tick_event ( self , event : Event ) - > None : <nl> " " " " " " <nl> tick = event . data <nl> - <nl> if tick . vt_symbol ! = self . vt_symbol : <nl> return <nl> - <nl> self . signal_tick . emit ( tick ) <nl> - <nl> + <nl> def update_tick ( self , tick : TickData ) - > None : <nl> " " " " " " <nl> - self . lp_label . setText ( str ( tick . last_price ) ) <nl> - self . bp1_label . setText ( str ( tick . bid_price_1 ) ) <nl> + price_digits = self . price_digits <nl> + <nl> + self . lp_label . setText ( f " { tick . last_price : . { price_digits } f } " ) <nl> + self . bp1_label . setText ( f " { tick . bid_price_1 : . { price_digits } f } " ) <nl> self . bv1_label . setText ( str ( tick . bid_volume_1 ) ) <nl> - self . ap1_label . setText ( str ( tick . ask_price_1 ) ) <nl> + self . ap1_label . setText ( f " { tick . ask_price_1 : . { price_digits } f } " ) <nl> self . av1_label . setText ( str ( tick . ask_volume_1 ) ) <nl> <nl> if tick . pre_close : <nl> def update_tick ( self , tick : TickData ) - > None : <nl> self . return_label . setText ( f " { r : . 2f } % " ) <nl> <nl> if tick . bid_price_2 : <nl> - self . bp2_label . setText ( str ( tick . bid_price_2 ) ) <nl> + self . bp2_label . setText ( f " { tick . bid_price_2 : . { price_digits } f } " ) <nl> self . bv2_label . setText ( str ( tick . bid_volume_2 ) ) <nl> - self . ap2_label . setText ( str ( tick . ask_price_2 ) ) <nl> + self . ap2_label . setText ( f " { tick . ask_price_2 : . { price_digits } f } " ) <nl> self . av2_label . setText ( str ( tick . ask_volume_2 ) ) <nl> <nl> - self . bp3_label . setText ( str ( tick . bid_price_3 ) ) <nl> + self . bp3_label . setText ( f " { tick . bid_price_3 : . { price_digits } f } " ) <nl> self . bv3_label . setText ( str ( tick . bid_volume_3 ) ) <nl> - self . ap3_label . setText ( str ( tick . ask_price_3 ) ) <nl> + self . ap3_label . setText ( f " { tick . ask_price_3 : . { price_digits } f } " ) <nl> self . av3_label . setText ( str ( tick . ask_volume_3 ) ) <nl> <nl> - self . bp4_label . setText ( str ( tick . bid_price_4 ) ) <nl> + self . bp4_label . setText ( f " { tick . bid_price_4 : . { price_digits } f } " ) <nl> self . bv4_label . setText ( str ( tick . bid_volume_4 ) ) <nl> - self . ap4_label . setText ( str ( tick . ask_price_4 ) ) <nl> + self . ap4_label . setText ( f " { tick . ask_price_4 : . { price_digits } f } " ) <nl> self . av4_label . setText ( str ( tick . ask_volume_4 ) ) <nl> <nl> - self . bp5_label . setText ( str ( tick . bid_price_5 ) ) <nl> + self . bp5_label . setText ( f " { tick . bid_price_5 : . { price_digits } f } " ) <nl> self . bv5_label . setText ( str ( tick . bid_volume_5 ) ) <nl> - self . ap5_label . setText ( str ( tick . ask_price_5 ) ) <nl> + self . ap5_label . setText ( f " { tick . ask_price_5 : . { price_digits } f } " ) <nl> self . av5_label . setText ( str ( tick . ask_volume_5 ) ) <nl> <nl> def clear_data ( self ) - > None : <nl>
[ Mod ] display price data rounded to pricetick
vnpy/vnpy
1805eb29cce67a6e1383b51e862fe2475c38f277
2020-05-13T13:44:08Z
mmm a / data / gui . xml <nl> ppp b / data / gui . xml <nl> <nl> < item command = " LayerVisibility " text = " & amp ; Visible " / > <nl> < separator / > <nl> < item command = " NewLayer " text = " & amp ; New Layer " / > <nl> - < item command = " NewLayerGroup " text = " New & amp ; Group " / > <nl> + < item command = " NewGroup " text = " New & amp ; Group " / > <nl> < item command = " RemoveLayer " text = " & amp ; Remove Layer " / > <nl> < item command = " BackgroundFromLayer " text = " & amp ; Background from Layer " / > <nl> < item command = " LayerFromBackground " text = " & amp ; Layer from Background " / > <nl> mmm a / src / app / CMakeLists . txt <nl> ppp b / src / app / CMakeLists . txt <nl> add_library ( app - lib <nl> commands / cmd_new_file . cpp <nl> commands / cmd_new_frame . cpp <nl> commands / cmd_new_frame_tag . cpp <nl> + commands / cmd_new_group . cpp <nl> commands / cmd_new_layer . cpp <nl> - commands / cmd_new_layer_set . cpp <nl> commands / cmd_new_sprite_from_selection . cpp <nl> commands / cmd_onionskin . cpp <nl> commands / cmd_open_file . cpp <nl> similarity index 77 % <nl> rename from src / app / commands / cmd_new_layer_set . cpp <nl> rename to src / app / commands / cmd_new_group . cpp <nl> mmm a / src / app / commands / cmd_new_layer_set . cpp <nl> ppp b / src / app / commands / cmd_new_group . cpp <nl> namespace app { <nl> <nl> using namespace ui ; <nl> <nl> - class NewLayerGroupCommand : public Command { <nl> + class NewGroupCommand : public Command { <nl> public : <nl> - NewLayerGroupCommand ( ) ; <nl> - Command * clone ( ) const override { return new NewLayerGroupCommand ( * this ) ; } <nl> + NewGroupCommand ( ) ; <nl> + Command * clone ( ) const override { return new NewGroupCommand ( * this ) ; } <nl> <nl> protected : <nl> bool onEnabled ( Context * context ) override ; <nl> void onExecute ( Context * context ) override ; <nl> } ; <nl> <nl> - NewLayerGroupCommand : : NewLayerGroupCommand ( ) <nl> - : Command ( " NewLayerGroup " , <nl> + NewGroupCommand : : NewGroupCommand ( ) <nl> + : Command ( " NewGroup " , <nl> " New Layer Group " , <nl> CmdRecordableFlag ) <nl> { <nl> } <nl> <nl> - bool NewLayerGroupCommand : : onEnabled ( Context * context ) <nl> + bool NewGroupCommand : : onEnabled ( Context * context ) <nl> { <nl> return context - > checkFlags ( ContextFlags : : ActiveDocumentIsWritable | <nl> ContextFlags : : HasActiveSprite ) ; <nl> } <nl> <nl> - void NewLayerGroupCommand : : onExecute ( Context * context ) <nl> + void NewGroupCommand : : onExecute ( Context * context ) <nl> { <nl> ContextWriter writer ( context ) ; <nl> Document * document ( writer . document ( ) ) ; <nl> void NewLayerGroupCommand : : onExecute ( Context * context ) <nl> update_screen_for_document ( document ) ; <nl> <nl> StatusBar : : instance ( ) - > invalidate ( ) ; <nl> - StatusBar : : instance ( ) - > showTip ( 1000 , " Layer ` % s ' created " , name . c_str ( ) ) ; <nl> + StatusBar : : instance ( ) - > showTip ( 1000 , " Group ` % s ' created " , name . c_str ( ) ) ; <nl> } <nl> <nl> - Command * CommandFactory : : createNewLayerGroupCommand ( ) <nl> + Command * CommandFactory : : createNewGroupCommand ( ) <nl> { <nl> - return new NewLayerGroupCommand ; <nl> + return new NewGroupCommand ; <nl> } <nl> <nl> } / / namespace app <nl> mmm a / src / app / commands / commands_list . h <nl> ppp b / src / app / commands / commands_list . h <nl> FOR_EACH_COMMAND ( NewBrush ) <nl> FOR_EACH_COMMAND ( NewFile ) <nl> FOR_EACH_COMMAND ( NewFrame ) <nl> FOR_EACH_COMMAND ( NewFrameTag ) <nl> + FOR_EACH_COMMAND ( NewGroup ) <nl> FOR_EACH_COMMAND ( NewLayer ) <nl> - FOR_EACH_COMMAND ( NewLayerGroup ) <nl> FOR_EACH_COMMAND ( NewSpriteFromSelection ) <nl> FOR_EACH_COMMAND ( OpenFile ) <nl> FOR_EACH_COMMAND ( OpenInFolder ) <nl>
Rename NewLayerGroup command - > NewGroup command
aseprite/aseprite
33d7f6509f9e80115c9b2a73f8153739e8bba149
2016-06-08T18:20:11Z
mmm a / platformio . ini <nl> ppp b / platformio . ini <nl> lib_deps = <nl> SlowSoftI2CMaster = https : / / github . com / mikeshub / SlowSoftI2CMaster / archive / master . zip <nl> <nl> [ common_stm32f1 ] <nl> - platform = ststm32 <nl> + platform = ststm32 @ < 6 . 2 . 0 <nl> build_flags = ! python Marlin / src / HAL / STM32F1 / build_flags . py <nl> $ { common . build_flags } - std = gnu + + 14 - DHAVE_SW_SERIAL <nl> build_unflags = - std = gnu + + 11 <nl> lib_deps = $ { common_stm32f1 . lib_deps } <nl> # STM32F4 with STM32GENERIC <nl> # <nl> [ env : STM32F4 ] <nl> - platform = ststm32 <nl> + platform = ststm32 @ < 6 . 2 . 0 <nl> board = disco_f407vg <nl> build_flags = $ { common . build_flags } - DUSE_STM32GENERIC - DSTM32GENERIC - DSTM32F4 - DMENU_USB_SERIAL - DMENU_SERIAL = SerialUSB - DHAL_IWDG_MODULE_ENABLED <nl> lib_ignore = Adafruit NeoPixel , TMCStepper <nl> src_filter = $ { common . default_src_filter } + < src / HAL / STM32_F4_F7 > - < src / HAL / ST <nl> # STM32F7 with STM32GENERIC <nl> # <nl> [ env : STM32F7 ] <nl> - platform = ststm32 <nl> + platform = ststm32 @ < 6 . 2 . 0 <nl> board = remram_v1 <nl> build_flags = $ { common . build_flags } - DUSE_STM32GENERIC - DSTM32GENERIC - DSTM32F7 - DMENU_USB_SERIAL - DMENU_SERIAL = SerialUSB - DHAL_IWDG_MODULE_ENABLED <nl> lib_ignore = Adafruit NeoPixel , TMCStepper <nl> src_filter = $ { common . default_src_filter } + < src / HAL / STM32_F4_F7 > - < src / HAL / ST <nl> # ARMED ( STM32 ) <nl> # <nl> [ env : ARMED ] <nl> - platform = ststm32 <nl> + platform = ststm32 @ < 6 . 2 . 0 <nl> platform_packages = framework - arduinoststm32 @ $ { common . arduinoststm32_ver } <nl> board = armed_v1 <nl> build_flags = $ { common . build_flags } <nl> lib_ignore = $ { common_stm32f1 . lib_ignore } <nl> # Malyan M200 v2 ( STM32F070RB ) <nl> # <nl> [ env : STM32F070RB_malyan ] <nl> - platform = ststm32 <nl> + platform = ststm32 @ < 6 . 2 . 0 <nl> platform_packages = framework - arduinoststm32 @ $ { common . arduinoststm32_ver } <nl> board = malyanM200v2 <nl> build_flags = - DSTM32F0xx - DUSBCON - DUSBD_VID = 0x0483 ' - DUSB_MANUFACTURER = " Unknown " ' ' - DUSB_PRODUCT = " ARMED_V1 " ' - DUSBD_USE_CDC - DHAL_PCD_MODULE_ENABLED <nl> lib_ignore = LiquidCrystal , LiquidTWI2 , Adafruit NeoPixel , TMCStepper , U8glib - H <nl> # Malyan M300 ( STM32F070CB ) <nl> # <nl> [ env : malyan_M300 ] <nl> - platform = ststm32 @ > = 6 . 1 . 0 <nl> + platform = ststm32 @ > = 6 . 1 . 0 , < 6 . 2 . 0 <nl> board = malyanm300_f070cb <nl> build_flags = $ { common . build_flags } <nl> - DUSBCON - DUSBD_VID = 0x0483 " - DUSB_MANUFACTURER = \ " Unknown \ " " " - DUSB_PRODUCT = \ " MALYAN_M300 \ " " <nl> build_unflags = $ { common_stm32f1 . build_unflags } <nl> # ' STEVAL - 3DP001V1 ' STM32F401VE board - https : / / www . st . com / en / evaluation - tools / steval - 3dp001v1 . html <nl> # <nl> [ env : STM32F401VE_STEVAL ] <nl> - platform = ststm32 <nl> + platform = ststm32 @ < 6 . 2 . 0 <nl> platform_packages = framework - arduinoststm32 @ $ { common . arduinoststm32_ver } <nl> board = STEVAL_STM32F401VE <nl> build_flags = $ { common . build_flags } <nl> src_filter = $ { common . default_src_filter } + < src / HAL / STM32 > <nl> # FLYF407ZG <nl> # <nl> [ env : FLYF407ZG ] <nl> - platform = ststm32 <nl> + platform = ststm32 @ < 6 . 2 . 0 <nl> platform_packages = framework - arduinoststm32 @ $ { common . arduinoststm32_ver } <nl> board = FLYF407ZG <nl> build_flags = $ { common . build_flags } <nl> src_filter = $ { common . default_src_filter } + < src / HAL / STM32 > <nl> # FYSETC S6 ( STM32F446VET6 ARM Cortex - M4 ) <nl> # <nl> [ env : FYSETC_S6 ] <nl> - platform = ststm32 <nl> + platform = ststm32 @ < 6 . 2 . 0 <nl> platform_packages = <nl> tool - stm32duino <nl> framework - arduinoststm32 @ $ { common . arduinoststm32_ver } <nl> upload_command = dfu - util - a 0 - s 0x08010000 : leave - D " $ SOURCE " <nl> # Shield - https : / / github . com / jmz52 / Hardware <nl> # <nl> [ env : STM32F407VE_black ] <nl> - platform = ststm32 <nl> + platform = ststm32 @ < 6 . 2 . 0 <nl> platform_packages = framework - arduinoststm32 @ $ { common . arduinoststm32_ver } <nl> board = blackSTM32F407VET6 <nl> build_flags = $ { common . build_flags } <nl> src_filter = $ { common . default_src_filter } + < src / HAL / STM32 > <nl> # BigTreeTech SKR Pro ( STM32F407ZGT6 ARM Cortex - M4 ) <nl> # <nl> [ env : BIGTREE_SKR_PRO ] <nl> - platform = ststm32 <nl> + platform = ststm32 @ < 6 . 2 . 0 <nl> platform_packages = framework - arduinoststm32 @ $ { common . arduinoststm32_ver } <nl> board = BigTree_SKR_Pro <nl> build_flags = $ { common . build_flags } <nl> debug_init_break = <nl> # Bigtreetech GTR V1 . 0 ( STM32F407IGT6 ARM Cortex - M4 ) <nl> # <nl> [ env : BIGTREE_GTR_V1_0 ] <nl> - platform = ststm32 @ > = 5 . 7 . 0 <nl> + platform = ststm32 @ > = 5 . 7 . 0 , < 6 . 2 . 0 <nl> platform_packages = framework - arduinoststm32 @ $ { common . arduinoststm32_ver } <nl> board = BigTree_GTR_v1 <nl> extra_scripts = pre : buildroot / share / PlatformIO / scripts / generic_create_variant . py <nl> debug_tool = jlink <nl> # <nl> # RUMBA32 <nl> # <nl> - [ env : rumba32_f446ve ] <nl> - platform = ststm32 <nl> + <nl> + [ common_rumba32 ] <nl> + platform = ststm32 @ < 6 . 2 . 0 <nl> platform_packages = framework - arduinoststm32 @ $ { common . arduinoststm32_ver } <nl> board = rumba32_f446ve <nl> - build_flags = $ { common . build_flags } <nl> - - DSTM32F4xx <nl> - - DARDUINO_RUMBA32_F446VE <nl> - - DARDUINO_ARCH_STM32 <nl> - " - DBOARD_NAME = \ " RUMBA32_F446VE \ " " <nl> - - DSTM32F446xx <nl> - - DUSBCON <nl> - - DUSBD_VID = 0x0483 <nl> - " - DUSB_MANUFACTURER = \ " Unknown \ " " <nl> - " - DUSB_PRODUCT = \ " RUMBA32_F446VE \ " " <nl> + build_flags = $ { common . build_flags } - Os - IMarlin / src / HAL / STM32 <nl> + - DSTM32F4xx - DARDUINO_RUMBA32_F446VE - DARDUINO_ARCH_STM32 " - DBOARD_NAME = \ " RUMBA32_F446VE \ " " <nl> + - DSTM32F446xx - DUSBCON " - DUSB_MANUFACTURER = \ " Unknown \ " " " - DUSB_PRODUCT = \ " RUMBA32_F446VE \ " " <nl> - DHAL_PCD_MODULE_ENABLED <nl> - DUSBD_USE_CDC <nl> - DDISABLE_GENERIC_SERIALUSB <nl> - DHAL_UART_MODULE_ENABLED <nl> - - Os <nl> - - IMarlin / src / HAL / STM32 <nl> lib_ignore = Adafruit NeoPixel , SoftwareSerial <nl> src_filter = $ { common . default_src_filter } + < src / HAL / STM32 > <nl> - monitor_speed = 500000 <nl> upload_protocol = dfu <nl> <nl> + [ env : rumba32_f446ve ] <nl> + platform = $ { common_rumba32 . platform } <nl> + extends = common_rumba32 <nl> + build_flags = $ { common_rumba32 . build_flags } - DUSBD_VID = 0x0483 <nl> + monitor_speed = 500000 <nl> + <nl> # <nl> # MKS RUMBA32 ( adds TMC2208 / 2209 UART interface and AUX - 1 ) <nl> # <nl> [ env : rumba32_mks ] <nl> - platform = ststm32 <nl> - platform_packages = framework - arduinoststm32 @ $ { common . arduinoststm32_ver } <nl> - board = rumba32_f446ve <nl> - build_flags = $ { common . build_flags } <nl> - - DSTM32F4xx - DARDUINO_RUMBA32_F446VE - DARDUINO_ARCH_STM32 " - DBOARD_NAME = \ " RUMBA32_F446VE \ " " <nl> - - DSTM32F446xx - DUSBCON - DUSBD_VID = 0x8000 <nl> - " - DUSB_MANUFACTURER = \ " Unknown \ " " <nl> - " - DUSB_PRODUCT = \ " RUMBA32_F446VE \ " " <nl> - - DHAL_PCD_MODULE_ENABLED <nl> - - DUSBD_USE_CDC <nl> - - DDISABLE_GENERIC_SERIALUSB <nl> - - DHAL_UART_MODULE_ENABLED <nl> - - Os <nl> - - IMarlin / src / HAL / STM32 <nl> - lib_ignore = Adafruit NeoPixel , SoftwareSerial <nl> - src_filter = $ { common . default_src_filter } + < src / HAL / STM32 > <nl> - upload_protocol = dfu <nl> + platform = $ { common_rumba32 . platform } <nl> + extends = common_rumba32 <nl> + build_flags = $ { common_rumba32 . build_flags } - DUSBD_VID = 0x8000 <nl> <nl> # <nl> # Just print the dependency tree <nl>
Support STM32 platform up to 6 . 1 . x
MarlinFirmware/Marlin
89704ce7f92831d9729bd8b348f932486cee8717
2020-06-08T00:50:22Z
mmm a / xbmc / network / AirTunesServer . cpp <nl> ppp b / xbmc / network / AirTunesServer . cpp <nl> ao_device * CAirTunesServer : : AudioOutputFunctions : : ao_open_live ( int driver_id , ao <nl> header . durationMs = 0 ; <nl> <nl> if ( device - > pipe - > Write ( & header , sizeof ( header ) ) = = 0 ) <nl> + { <nl> + delete device - > pipe ; <nl> + delete device ; <nl> return 0 ; <nl> - <nl> + } <nl> + <nl> ThreadMessage tMsg = { TMSG_MEDIA_STOP } ; <nl> CApplicationMessenger : : Get ( ) . SendMessage ( tMsg , true ) ; <nl> <nl> mmm a / xbmc / visualizations / Vortex / VortexVis / Core / Renderer . cpp <nl> ppp b / xbmc / visualizations / Vortex / VortexVis / Core / Renderer . cpp <nl> void Renderer : : Sphere ( int del_uhol_x , int del_uhol_y , float size ) <nl> / / g_device - > DrawPrimitive ( D3DPT_TRIANGLESTRIP , i * 2 * ( del_y + 1 ) , 2 * del_y ) ; <nl> m_pD3DDevice - > DrawPrimitiveUP ( D3DPT_TRIANGLESTRIP , 2 * del_uhol_y , & v [ i * 2 * ( del_uhol_y + 1 ) ] , sizeof ( PosColNormalUVVertex ) ) ; <nl> <nl> - delete v ; <nl> + delete [ ] v ; <nl> / / pd - > DrawPrimitive ( D3DPT_TRIANGLESTRIP , i * 2 * ( del_y + 1 ) , 2 * del_y ) ; <nl> } <nl> <nl> mmm a / xbmc / windowing / egl / EGLNativeTypeRaspberryPI . cpp <nl> ppp b / xbmc / windowing / egl / EGLNativeTypeRaspberryPI . cpp <nl> void CEGLNativeTypeRaspberryPI : : GetSupportedModes ( HDMI_RES_GROUP_T group , std : : v <nl> } <nl> } <nl> if ( supported_modes ) <nl> - delete supported_modes ; <nl> + delete [ ] supported_modes ; <nl> } <nl> <nl> void CEGLNativeTypeRaspberryPI : : TvServiceCallback ( uint32_t reason , uint32_t param1 , uint32_t param2 ) <nl>
Merge pull request from asmaloney / mem_problems
xbmc/xbmc
f631f0fcc4393f719ec8b7f80fd00166538cf6cf
2013-02-12T14:44:40Z
mmm a / tools / run_tests / artifacts / artifact_targets . py <nl> ppp b / tools / run_tests / artifacts / artifact_targets . py <nl> def build_jobspec ( self ) : <nl> self . name , <nl> ' tools / dockerfile / grpc_artifact_linux_ % s ' % self . arch , <nl> ' tools / run_tests / artifacts / build_artifact_csharp . sh ' , <nl> - environ = { ' CMAKE_ARCH_OPTION ' : cmake_arch_option } <nl> - ) <nl> + environ = { <nl> + ' CMAKE_ARCH_OPTION ' : cmake_arch_option <nl> + } ) <nl> else : <nl> cmake_arch_option = ' ' # x64 is the default architecture <nl> if self . arch = = ' x86 ' : <nl>
yapf code
grpc/grpc
b4b24dc13d8300ea50fdaa828c1bafb8b9338717
2018-10-04T09:59:23Z
mmm a / src / init . cpp <nl> ppp b / src / init . cpp <nl> std : : string HelpMessage ( ) <nl> " - banscore = < n > " + _ ( " Threshold for disconnecting misbehaving peers ( default : 100 ) " ) + " \ n " + <nl> " - bantime = < n > " + _ ( " Number of seconds to keep misbehaving peers from reconnecting ( default : 86400 ) " ) + " \ n " + <nl> " - maxreceivebuffer = < n > " + _ ( " Maximum per - connection receive buffer , < n > * 1000 bytes ( default : 5000 ) " ) + " \ n " + <nl> - " - maxsendbuffer = < n > " + _ ( " Maximum per - connection send buffer , < n > * 1000 bytes ( default : 5000 ) " ) + " \ n " + <nl> + " - maxsendbuffer = < n > " + _ ( " Maximum per - connection send buffer , < n > * 1000 bytes ( default : 1000 ) " ) + " \ n " + <nl> # ifdef USE_UPNP <nl> # if USE_UPNP <nl> " - upnp " + _ ( " Use UPnP to map the listening port ( default : 1 when listening ) " ) + " \ n " + <nl> mmm a / src / net . cpp <nl> ppp b / src / net . cpp <nl> void ThreadSocketHandler2 ( void * parg ) <nl> pnode - > CloseSocketDisconnect ( ) ; <nl> } <nl> } <nl> - if ( vSend . size ( ) > SendBufferSize ( ) ) <nl> - printf ( " socket send buffer full warning ( % d bytes ) \ n " , vSend . size ( ) ) ; <nl> } <nl> } <nl> } <nl> mmm a / src / net . h <nl> ppp b / src / net . h <nl> extern int nBestHeight ; <nl> <nl> <nl> inline unsigned int ReceiveBufferSize ( ) { return 1000 * GetArg ( " - maxreceivebuffer " , 5 * 1000 ) ; } <nl> - inline unsigned int SendBufferSize ( ) { return 1000 * GetArg ( " - maxsendbuffer " , 5 * 1000 ) ; } <nl> + inline unsigned int SendBufferSize ( ) { return 1000 * GetArg ( " - maxsendbuffer " , 1 * 1000 ) ; } <nl> <nl> void AddOneShot ( std : : string strDest ) ; <nl> bool RecvLine ( SOCKET hSocket , std : : string & strLine ) ; <nl>
Merge pull request from TheBlueMatt / diffsendbuffer
bitcoin/bitcoin
da1103f4f8288cffc9ea475254fdeb258f04de77
2012-07-02T01:17:34Z
mmm a / include / swift / AST / Expr . h <nl> ppp b / include / swift / AST / Expr . h <nl> class alignas ( 8 ) Expr { <nl> / / / typically this will have an ErrorType . <nl> class ErrorExpr : public Expr { <nl> SourceRange Range ; <nl> + Expr * OriginalExpr ; <nl> public : <nl> - ErrorExpr ( SourceRange Range , Type Ty = Type ( ) ) <nl> - : Expr ( ExprKind : : Error , / * Implicit = * / true , Ty ) , Range ( Range ) { } <nl> + ErrorExpr ( SourceRange Range , Type Ty = Type ( ) , Expr * OriginalExpr = nullptr ) <nl> + : Expr ( ExprKind : : Error , / * Implicit = * / true , Ty ) , Range ( Range ) , <nl> + OriginalExpr ( OriginalExpr ) { } <nl> <nl> SourceRange getSourceRange ( ) const { return Range ; } <nl> + Expr * getOriginalExpr ( ) const { return OriginalExpr ; } <nl> <nl> static bool classof ( const Expr * E ) { <nl> return E - > getKind ( ) = = ExprKind : : Error ; <nl> mmm a / include / swift / AST / Stmt . h <nl> ppp b / include / swift / AST / Stmt . h <nl> class alignas ( 8 ) PoundAvailableInfo final : <nl> friend TrailingObjects ; <nl> <nl> SourceLoc PoundLoc ; <nl> + SourceLoc LParenLoc ; <nl> SourceLoc RParenLoc ; <nl> <nl> / / The number of queries tail allocated after this object . <nl> class alignas ( 8 ) PoundAvailableInfo final : <nl> / / / This is filled in by Sema . <nl> VersionRange VariantAvailableRange ; <nl> <nl> - PoundAvailableInfo ( SourceLoc PoundLoc , ArrayRef < AvailabilitySpec * > queries , <nl> - SourceLoc RParenLoc ) <nl> - : PoundLoc ( PoundLoc ) , RParenLoc ( RParenLoc ) , NumQueries ( queries . size ( ) ) , <nl> - AvailableRange ( VersionRange : : empty ( ) ) , <nl> + PoundAvailableInfo ( SourceLoc PoundLoc , SourceLoc LParenLoc , <nl> + ArrayRef < AvailabilitySpec * > queries , SourceLoc RParenLoc ) <nl> + : PoundLoc ( PoundLoc ) , LParenLoc ( LParenLoc ) , RParenLoc ( RParenLoc ) , <nl> + NumQueries ( queries . size ( ) ) , AvailableRange ( VersionRange : : empty ( ) ) , <nl> VariantAvailableRange ( VersionRange : : empty ( ) ) { <nl> std : : uninitialized_copy ( queries . begin ( ) , queries . end ( ) , <nl> getTrailingObjects < AvailabilitySpec * > ( ) ) ; <nl> class alignas ( 8 ) PoundAvailableInfo final : <nl> <nl> public : <nl> static PoundAvailableInfo * create ( ASTContext & ctx , SourceLoc PoundLoc , <nl> + SourceLoc LParenLoc , <nl> ArrayRef < AvailabilitySpec * > queries , <nl> SourceLoc RParenLoc ) ; <nl> <nl> class alignas ( 8 ) PoundAvailableInfo final : <nl> NumQueries ) ; <nl> } <nl> <nl> + SourceLoc getLParenLoc ( ) const { return LParenLoc ; } <nl> + SourceLoc getRParenLoc ( ) const { return RParenLoc ; } <nl> + <nl> SourceLoc getStartLoc ( ) const { return PoundLoc ; } <nl> SourceLoc getEndLoc ( ) const ; <nl> SourceLoc getLoc ( ) const { return PoundLoc ; } <nl> class WhileStmt : public LabeledConditionalStmt { <nl> <nl> SourceLoc getStartLoc ( ) const { return getLabelLocOrKeywordLoc ( WhileLoc ) ; } <nl> SourceLoc getEndLoc ( ) const { return Body - > getEndLoc ( ) ; } <nl> + SourceLoc getWhileLoc ( ) const { return WhileLoc ; } <nl> <nl> Stmt * getBody ( ) const { return Body ; } <nl> void setBody ( Stmt * s ) { Body = s ; } <nl> class RepeatWhileStmt : public LabeledStmt { <nl> getDefaultImplicitFlag ( implicit , RepeatLoc ) , <nl> LabelInfo ) , <nl> RepeatLoc ( RepeatLoc ) , WhileLoc ( WhileLoc ) , Body ( Body ) , Cond ( Cond ) { } <nl> - <nl> + <nl> SourceLoc getStartLoc ( ) const { return getLabelLocOrKeywordLoc ( RepeatLoc ) ; } <nl> SourceLoc getEndLoc ( ) const ; <nl> + SourceLoc getRepeatLoc ( ) const { return RepeatLoc ; } <nl> <nl> Stmt * getBody ( ) const { return Body ; } <nl> void setBody ( Stmt * s ) { Body = s ; } <nl> class ForEachStmt : public LabeledStmt { <nl> Pattern * Pat ; <nl> SourceLoc InLoc ; <nl> Expr * Sequence ; <nl> + SourceLoc WhereLoc ; <nl> Expr * WhereExpr = nullptr ; <nl> BraceStmt * Body ; <nl> <nl> class ForEachStmt : public LabeledStmt { <nl> <nl> public : <nl> ForEachStmt ( LabeledStmtInfo LabelInfo , SourceLoc ForLoc , Pattern * Pat , <nl> - SourceLoc InLoc , Expr * Sequence , Expr * WhereExpr , BraceStmt * Body , <nl> - Optional < bool > implicit = None ) <nl> + SourceLoc InLoc , Expr * Sequence , SourceLoc WhereLoc , <nl> + Expr * WhereExpr , BraceStmt * Body , Optional < bool > implicit = None ) <nl> : LabeledStmt ( StmtKind : : ForEach , getDefaultImplicitFlag ( implicit , ForLoc ) , <nl> LabelInfo ) , <nl> ForLoc ( ForLoc ) , Pat ( nullptr ) , InLoc ( InLoc ) , Sequence ( Sequence ) , <nl> - WhereExpr ( WhereExpr ) , Body ( Body ) { <nl> + WhereLoc ( WhereLoc ) , WhereExpr ( WhereExpr ) , Body ( Body ) { <nl> setPattern ( Pat ) ; <nl> } <nl> <nl> class ForEachStmt : public LabeledStmt { <nl> <nl> / / / getInLoc - Retrieve the location of the ' in ' keyword . <nl> SourceLoc getInLoc ( ) const { return InLoc ; } <nl> + <nl> + / / / getWhereLoc - Retrieve the location of the ' where ' keyword . <nl> + SourceLoc getWhereLoc ( ) const { return WhereLoc ; } <nl> <nl> / / / getPattern - Retrieve the pattern describing the iteration variables . <nl> / / / These variables will only be visible within the body of the loop . <nl> mmm a / lib / AST / ASTWalker . cpp <nl> ppp b / lib / AST / ASTWalker . cpp <nl> class Traversal : public ASTVisitor < Traversal , Expr * , Stmt * , <nl> return false ; <nl> } <nl> <nl> + bool visitParamDecl ( ParamDecl * P ) { <nl> + / / Don ' t walk into the type if the decl is implicit , or if the type is <nl> + / / implicit . <nl> + if ( ! P - > isImplicit ( ) ) { <nl> + if ( auto * repr = P - > getTypeRepr ( ) ) { <nl> + if ( doIt ( repr ) ) { <nl> + return true ; <nl> + } <nl> + } <nl> + } <nl> + if ( auto * E = P - > getStructuralDefaultExpr ( ) ) { <nl> + auto res = doIt ( E ) ; <nl> + if ( ! res ) return true ; <nl> + P - > setDefaultExpr ( res , / * isTypeChecked * / ( bool ) res - > getType ( ) ) ; <nl> + } <nl> + <nl> + if ( ! Walker . shouldWalkAccessorsTheOldWay ( ) ) { <nl> + for ( auto * AD : P - > getAllAccessors ( ) ) <nl> + if ( doIt ( AD ) ) <nl> + return true ; <nl> + } <nl> + <nl> + return false ; <nl> + } <nl> + <nl> bool visitSubscriptDecl ( SubscriptDecl * SD ) { <nl> bool WalkGenerics = visitGenericParamListIfNeeded ( SD ) ; <nl> <nl> class Traversal : public ASTVisitor < Traversal , Expr * , Stmt * , <nl> / / Walk each parameter ' s decl and typeloc and default value . <nl> if ( doIt ( P ) ) <nl> return true ; <nl> - <nl> - / / Don ' t walk into the type if the decl is implicit , or if the type is <nl> - / / implicit . <nl> - if ( ! P - > isImplicit ( ) ) { <nl> - if ( auto * repr = P - > getTypeRepr ( ) ) { <nl> - if ( doIt ( repr ) ) { <nl> - return true ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - if ( auto * E = P - > getStructuralDefaultExpr ( ) ) { <nl> - auto res = doIt ( E ) ; <nl> - if ( ! res ) return true ; <nl> - P - > setDefaultExpr ( res , / * isTypeChecked * / ( bool ) res - > getType ( ) ) ; <nl> - } <nl> } <nl> <nl> return Walker . walkToParameterListPost ( PL ) ; <nl> mmm a / lib / AST / Stmt . cpp <nl> ppp b / lib / AST / Stmt . cpp <nl> bool CatchStmt : : isSyntacticallyExhaustive ( ) const { <nl> <nl> PoundAvailableInfo * PoundAvailableInfo : : create ( ASTContext & ctx , <nl> SourceLoc PoundLoc , <nl> + SourceLoc LParenLoc , <nl> ArrayRef < AvailabilitySpec * > queries , <nl> SourceLoc RParenLoc ) { <nl> unsigned size = totalSizeToAlloc < AvailabilitySpec * > ( queries . size ( ) ) ; <nl> void * Buffer = ctx . Allocate ( size , alignof ( PoundAvailableInfo ) ) ; <nl> - return : : new ( Buffer ) PoundAvailableInfo ( PoundLoc , queries , RParenLoc ) ; <nl> + return : : new ( Buffer ) PoundAvailableInfo ( PoundLoc , LParenLoc , queries , <nl> + RParenLoc ) ; <nl> } <nl> <nl> SourceLoc PoundAvailableInfo : : getEndLoc ( ) const { <nl> if ( RParenLoc . isInvalid ( ) ) { <nl> if ( NumQueries = = 0 ) { <nl> - return PoundLoc ; <nl> + if ( LParenLoc . isInvalid ( ) ) <nl> + return PoundLoc ; <nl> + return LParenLoc ; <nl> } <nl> return getQueries ( ) [ NumQueries - 1 ] - > getSourceRange ( ) . End ; <nl> } <nl> mmm a / lib / IDE / Formatting . cpp <nl> ppp b / lib / IDE / Formatting . cpp <nl> <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> # include " swift / AST / ASTWalker . h " <nl> + # include " swift / AST / TypeRepr . h " <nl> # include " swift / IDE / SourceEntityWalker . h " <nl> # include " swift / Parse / Parser . h " <nl> # include " swift / Frontend / Frontend . h " <nl> using namespace ide ; <nl> <nl> namespace { <nl> <nl> - struct SiblingAlignInfo { <nl> - SourceLoc Loc ; <nl> - bool ExtraIndent ; <nl> - } ; <nl> + using StringBuilder = llvm : : SmallString < 64 > ; <nl> <nl> - struct TokenInfo { <nl> - / / The tokens appearing at the start of lines , from the first line to the <nl> - / / line under indentation . <nl> - std : : vector < const Token * > LineStarts ; <nl> - const Token * getLineStarter ( unsigned idx = 0 ) const { <nl> - auto it = LineStarts . rbegin ( ) + idx ; <nl> - return it < LineStarts . rend ( ) ? * it : nullptr ; <nl> - } <nl> - operator bool ( ) { return LineStarts . size ( ) > 1 ; } <nl> - bool isRBraceDotsPattern ( ) const { <nl> - for ( auto It = LineStarts . rbegin ( ) , end = LineStarts . rend ( ) ; <nl> - It + 1 < end ; + + It ) { <nl> - auto * CurrentLine = * It ; <nl> - auto * PreviousLine = * ( It + 1 ) ; <nl> - if ( CurrentLine - > getKind ( ) = = tok : : period & & <nl> - PreviousLine - > getKind ( ) = = tok : : period ) { <nl> - / / If the previous line starts with dot too , move further up . <nl> - continue ; <nl> - } else if ( CurrentLine - > getKind ( ) = = tok : : period & & <nl> - PreviousLine - > getKind ( ) = = tok : : r_brace & & <nl> - PreviousLine + 1 = = CurrentLine ) { <nl> - / / Check if the previous line starts with ' } ' and the period of the <nl> - / / current line is immediately after the ' } ' <nl> - return true ; <nl> - } else { <nl> - return false ; <nl> - } <nl> - } <nl> - return false ; <nl> + static bool isOnSameLine ( SourceManager & SM , SourceLoc L , SourceLoc R ) { <nl> + return Lexer : : getLocForStartOfLine ( SM , L ) = = <nl> + Lexer : : getLocForStartOfLine ( SM , R ) ; <nl> + } <nl> + <nl> + static void widenOrSet ( SourceRange & First , SourceRange Second ) { <nl> + if ( Second . isInvalid ( ) ) <nl> + return ; <nl> + if ( First . isValid ( ) ) { <nl> + First . widen ( Second ) ; <nl> + } else { <nl> + First = Second ; <nl> } <nl> - } ; <nl> + } <nl> <nl> - using StringBuilder = llvm : : SmallString < 64 > ; <nl> + / / / \ returns true if \ c Loc is the location of the first non - comment token on <nl> + / / / its line . <nl> + static bool isFirstTokenOnLine ( SourceManager & SM , SourceLoc Loc ) { <nl> + assert ( Loc . isValid ( ) ) ; <nl> + SourceLoc LineStart = Lexer : : getLocForStartOfLine ( SM , Loc ) ; <nl> + CommentRetentionMode SkipComments = CommentRetentionMode : : None ; <nl> + Token First = Lexer : : getTokenAtLocation ( SM , LineStart , SkipComments ) ; <nl> + return First . getLoc ( ) = = Loc ; <nl> + } <nl> + <nl> + / / / \ returns the location of the first non - whitespace character on the line <nl> + / / / containing \ c Loc . <nl> + static SourceLoc <nl> + getLocForContentStartOnSameLine ( SourceManager & SM , SourceLoc Loc ) { <nl> + assert ( Loc . isValid ( ) ) ; <nl> + SourceLoc LineStart = Lexer : : getLocForStartOfLine ( SM , Loc ) ; <nl> + StringRef Indentation = Lexer : : getIndentationForLine ( SM , LineStart ) ; <nl> + return LineStart . getAdvancedLoc ( Indentation . size ( ) ) ; <nl> + } <nl> + <nl> + / / / \ returns the first token after the token at \ c Loc . <nl> + static Optional < Token > <nl> + getTokenAfter ( SourceManager & SM , SourceLoc Loc , bool SkipComments = true ) { <nl> + assert ( Loc . isValid ( ) ) ; <nl> + CommentRetentionMode Mode = SkipComments <nl> + ? CommentRetentionMode : : None <nl> + : CommentRetentionMode : : ReturnAsTokens ; <nl> + assert ( Lexer : : getTokenAtLocation ( SM , Loc , Mode ) . getLoc ( ) = = Loc ) ; <nl> + SourceLoc End = Lexer : : getLocForEndOfToken ( SM , Loc ) ; <nl> + Token Next = Lexer : : getTokenAtLocation ( SM , End , Mode ) ; <nl> + if ( Next . getKind ( ) = = tok : : NUM_TOKENS ) <nl> + return None ; <nl> + return Next ; <nl> + } <nl> + <nl> + / / / \ returns the last token of the given kind in the open range between \ c From <nl> + / / / and \ c To . <nl> + static Optional < Token > <nl> + getLastTokenOfKindInOpenRange ( SourceManager & SM , tok Kind , <nl> + SourceLoc From , SourceLoc To ) { <nl> + Optional < Token > Match ; <nl> + while ( auto Next = getTokenAfter ( SM , From ) ) { <nl> + if ( ! Next | | ! SM . isBeforeInBuffer ( Next - > getLoc ( ) , To ) ) <nl> + break ; <nl> + if ( Next - > getKind ( ) = = Kind ) <nl> + Match = Next ; <nl> + From = Next - > getLoc ( ) ; <nl> + } <nl> + return Match ; <nl> + } <nl> + <nl> + / / / \ returns true if the token at \ c Loc is one of the given \ c Kinds . <nl> + static bool locIsKind ( SourceManager & SM , SourceLoc Loc , ArrayRef < tok > Kinds ) { <nl> + Token Tok = Lexer : : getTokenAtLocation ( SM , Loc ) ; <nl> + return Tok . getLoc ( ) = = Loc & & <nl> + std : : find ( Kinds . begin ( ) , Kinds . end ( ) , Tok . getKind ( ) ) ! = Kinds . end ( ) ; <nl> + } <nl> + <nl> + / / / \ returns the given \ c Loc if there is a token at that location that is one <nl> + / / / of the given \ c Kinds and an invalid \ c SourceLocation otherwise . <nl> + static SourceLoc getLocIfKind ( SourceManager & SM , SourceLoc Loc , <nl> + ArrayRef < tok > Kinds ) { <nl> + if ( ! locIsKind ( SM , Loc , Kinds ) ) <nl> + return SourceLoc ( ) ; <nl> + return Loc ; <nl> + } <nl> <nl> - static SourceLoc getVarDeclInitEnd ( VarDecl * VD ) { <nl> - return VD - > getBracesRange ( ) . isValid ( ) <nl> - ? VD - > getBracesRange ( ) . End <nl> - : VD - > getParentInitializer ( ) & & <nl> - VD - > getParentInitializer ( ) - > getEndLoc ( ) . isValid ( ) <nl> - ? VD - > getParentInitializer ( ) - > getEndLoc ( ) <nl> - : SourceLoc ( ) ; <nl> + / / / \ returns the given \ c Loc if there is a token at that location that is <nl> + / / / spelled with the given \ c Text and an invalid \ c SourceLocation otherwise . <nl> + static SourceLoc <nl> + getLocIfTokenTextMatches ( SourceManager & SM , SourceLoc Loc , StringRef Text ) { <nl> + Token Tok = Lexer : : getTokenAtLocation ( SM , Loc ) ; <nl> + if ( Tok . getLoc ( ) ! = Loc | | Tok . getKind ( ) = = tok : : NUM_TOKENS | | <nl> + Tok . getRawText ( ) ! = Text ) <nl> + return SourceLoc ( ) ; <nl> + return Loc ; <nl> } <nl> <nl> + / / / \ returns true if the given \ c VarDecl has grouping accessor braces containing <nl> + / / / one or more explicit accessors . <nl> + static bool hasExplicitAccessors ( VarDecl * VD ) { <nl> + auto Getter = VD - > getParsedAccessor ( AccessorKind : : Get ) ; <nl> + SourceRange Braces = VD - > getBracesRange ( ) ; <nl> + return Braces . isValid ( ) & & ( ! Getter | | <nl> + Getter - > getAccessorKeywordLoc ( ) . isValid ( ) ) ; <nl> + } <nl> + <nl> + <nl> + / / / An indentation context of the target location <nl> + struct IndentContext { <nl> + enum ContextKind { Exact , LineStart } ; <nl> + <nl> + / / / The location to indent relative to . <nl> + SourceLoc ContextLoc ; <nl> + <nl> + / / / Indicates whether to indent relative to the extact column of ContextLoc <nl> + / / / ( Exact ) or to the start of the content of the line it appears on ( LineStart ) . <nl> + ContextKind Kind ; <nl> + <nl> + / / / The number of levels to indent by . <nl> + unsigned IndentLevel ; <nl> + <nl> + IndentContext ( SourceLoc Context , bool AddsIndent , <nl> + ContextKind Kind = LineStart ) <nl> + : ContextLoc ( Context ) , Kind ( Kind ) , IndentLevel ( AddsIndent ? 1 : 0 ) { <nl> + assert ( Context . isValid ( ) ) ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> + / / / A helper class used to optionally override the ContextLoc and Kind of an <nl> + / / / IndentContext . <nl> + class ContextOverride { <nl> + struct Override { <nl> + / / / The overriding ContextLoc . <nl> + SourceLoc ContextLoc ; <nl> + / / / The overriding Kind . <nl> + IndentContext : : ContextKind Kind ; <nl> + / / / The location after which this override takes effect . <nl> + SourceLoc ApplicableFrom ; <nl> + } ; <nl> + <nl> + / / / The current override , if set . <nl> + Optional < Override > Value ; <nl> + <nl> + public : <nl> + / / / Clears this override . <nl> + void clear ( ) { Value = None ; } <nl> + <nl> + / / / Sets this override to make an IndentContext indent relative to the exact <nl> + / / / column of AlignLoc if the IndentContext ' s ContextLoc is > = AlignLoc and <nl> + / / / on the same line . <nl> + void setExact ( SourceManager & SM , SourceLoc AlignLoc ) { <nl> + Value = { AlignLoc , IndentContext : : Exact , AlignLoc } ; <nl> + } <nl> + <nl> + / / / Sets this override to propagate the given ContextLoc and Kind along to any <nl> + / / / IndentContext with a ContextLoc > = L and on the same line . If this <nl> + / / / override ' s existing value applies to the provided ContextLoc , its <nl> + / / / ContextLoc and Kind are propagated instead . <nl> + / / / <nl> + / / / This propagation is necessary for cases like the trailing closure of ' bar ' <nl> + / / / in the example below . It ' s direct ContextLoc is ' bar ' , but we want <nl> + / / / it to be ' foo ' ( the ContextLoc of its parent tuple expression ) : <nl> + / / / <nl> + / / / \ code <nl> + / / / foo ( a : 1 , <nl> + / / / b : 2 ) ( 45 , bar ( c : 1 , <nl> + / / / d : 2 ) { <nl> + / / / fatalError ( ) <nl> + / / / } ) <nl> + / / / \ endcode <nl> + SourceLoc propagateContext ( SourceManager & SM , SourceLoc ContextLoc , <nl> + IndentContext : : ContextKind Kind , <nl> + SourceLoc L , SourceLoc R ) { <nl> + / / If the range ends on the same line as it starts , we know up front that <nl> + / / no child range can span multiple lines , so there ' s no need to propagate <nl> + / / ContextLoc via this override . <nl> + if ( R . isValid ( ) & & isOnSameLine ( SM , L , R ) ) <nl> + return ContextLoc ; <nl> + <nl> + / / Similarly if the ContextLoc and L are on the same line , there ' s no need <nl> + / / to propagate . Overrides applicable to ContextLoc will already apply <nl> + / / to child ranges on the same line as L . <nl> + if ( isOnSameLine ( SM , ContextLoc , L ) ) <nl> + return ContextLoc ; <nl> + <nl> + applyIfNeeded ( SM , ContextLoc , Kind ) ; <nl> + Value = { ContextLoc , Kind , L } ; <nl> + return ContextLoc ; <nl> + } <nl> + <nl> + / / / Applies the overriding ContextLoc and Kind to the given IndentContext if it <nl> + / / / starts after ApplicableFrom and on the same line . <nl> + void applyIfNeeded ( SourceManager & SM , IndentContext & Ctx ) { <nl> + / / Exactly aligned indent contexts should always set a matching exact <nl> + / / alignment context override so child braces / parens / brackets are indented <nl> + / / correctly . If the given innermost indent context is Exact and the <nl> + / / override doesn ' t match its Kind and ContextLoc , something is wrong . <nl> + assert ( ( Ctx . Kind ! = IndentContext : : Exact | | <nl> + ( Value & & Value - > Kind = = IndentContext : : Exact & & <nl> + Value - > ContextLoc = = Ctx . ContextLoc ) ) & & <nl> + " didn ' t set override ctx when exact innermost context was set ? " ) ; <nl> + <nl> + applyIfNeeded ( SM , Ctx . ContextLoc , Ctx . Kind ) ; <nl> + } <nl> + <nl> + / / / Applies the overriding ContextLoc and Kind to the given Override if its <nl> + / / / ContextLoc starts after ApplicableFrom and on the same line . <nl> + void applyIfNeeded ( SourceManager & SM , SourceLoc & ContextLoc , <nl> + IndentContext : : ContextKind & Kind ) { <nl> + if ( ! isApplicableTo ( SM , ContextLoc ) ) <nl> + return ; <nl> + ContextLoc = Value - > ContextLoc ; <nl> + Kind = Value - > Kind ; <nl> + } <nl> + <nl> + private : <nl> + bool isApplicableTo ( SourceManager & SM , SourceLoc Loc ) const { <nl> + return Value & & isOnSameLine ( SM , Loc , Value - > ApplicableFrom ) & & <nl> + ! SM . isBeforeInBuffer ( Loc , Value - > ApplicableFrom ) ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> class FormatContext { <nl> SourceManager & SM ; <nl> - std : : vector < swift : : ASTWalker : : ParentTy > & Stack ; <nl> - std : : vector < swift : : ASTWalker : : ParentTy > : : reverse_iterator Cursor ; <nl> - swift : : ASTWalker : : ParentTy Start ; <nl> - swift : : ASTWalker : : ParentTy End ; <nl> + Optional < IndentContext > InnermostCtx ; <nl> bool InDocCommentBlock ; <nl> bool InCommentLine ; <nl> bool InStringLiteral ; <nl> - SiblingAlignInfo SiblingInfo ; <nl> <nl> public : <nl> FormatContext ( SourceManager & SM , <nl> - std : : vector < swift : : ASTWalker : : ParentTy > & Stack , <nl> - swift : : ASTWalker : : ParentTy Start = swift : : ASTWalker : : ParentTy ( ) , <nl> - swift : : ASTWalker : : ParentTy End = swift : : ASTWalker : : ParentTy ( ) , <nl> + Optional < IndentContext > IndentCtx , <nl> bool InDocCommentBlock = false , <nl> bool InCommentLine = false , <nl> - bool InStringLiteral = false , <nl> - SiblingAlignInfo SiblingInfo = SiblingAlignInfo ( ) ) <nl> - : SM ( SM ) , Stack ( Stack ) , Cursor ( Stack . rbegin ( ) ) , Start ( Start ) , End ( End ) , <nl> - InDocCommentBlock ( InDocCommentBlock ) , InCommentLine ( InCommentLine ) , <nl> - InStringLiteral ( InStringLiteral ) , <nl> - SiblingInfo ( SiblingInfo ) { } <nl> - <nl> - FormatContext parent ( ) { <nl> - assert ( Cursor ! = Stack . rend ( ) ) ; <nl> - FormatContext Parent ( * this ) ; <nl> - + + Parent . Cursor ; <nl> - return Parent ; <nl> - } <nl> + bool InStringLiteral = false ) <nl> + : SM ( SM ) , InnermostCtx ( IndentCtx ) , InDocCommentBlock ( InDocCommentBlock ) , <nl> + InCommentLine ( InCommentLine ) , InStringLiteral ( InStringLiteral ) { } <nl> <nl> bool IsInDocCommentBlock ( ) { <nl> return InDocCommentBlock ; <nl> class FormatContext { <nl> return InStringLiteral ; <nl> } <nl> <nl> - bool isSwitchControlStmt ( unsigned LineIndex , StringRef Text ) { <nl> - if ( ! isSwitchContext ( ) ) <nl> + void padToExactColumn ( StringBuilder & Builder , <nl> + const CodeFormatOptions & FmtOptions ) { <nl> + assert ( isExact ( ) & & " Context is not exact ? " ) ; <nl> + SourceLoc AlignLoc = InnermostCtx - > ContextLoc ; <nl> + CharSourceRange Range ( SM , Lexer : : getLocForStartOfLine ( SM , AlignLoc ) , <nl> + AlignLoc ) ; <nl> + unsigned SpaceLength = 0 ; <nl> + unsigned TabLength = 0 ; <nl> + <nl> + / / Calculating space length <nl> + for ( auto C : Range . str ( ) ) <nl> + SpaceLength + = C = = ' \ t ' ? FmtOptions . TabWidth : 1 ; <nl> + SpaceLength + = InnermostCtx - > IndentLevel * FmtOptions . TabWidth ; <nl> + <nl> + / / If we ' re indenting past the exact column , round down to the next tab . <nl> + if ( InnermostCtx - > IndentLevel ) <nl> + SpaceLength - = SpaceLength % FmtOptions . TabWidth ; <nl> + <nl> + / / If we are using tabs , calculating the number of tabs and spaces we need <nl> + / / to insert . <nl> + if ( FmtOptions . UseTabs ) { <nl> + TabLength = SpaceLength / FmtOptions . TabWidth ; <nl> + SpaceLength = SpaceLength % FmtOptions . TabWidth ; <nl> + } <nl> + Builder . append ( TabLength , ' \ t ' ) ; <nl> + Builder . append ( SpaceLength , ' ' ) ; <nl> + } <nl> + <nl> + bool isExact ( ) { <nl> + return InnermostCtx . hasValue ( ) & & <nl> + InnermostCtx - > Kind = = IndentContext : : Exact ; <nl> + } <nl> + <nl> + std : : pair < unsigned , unsigned > indentLineAndColumn ( ) { <nl> + if ( InnermostCtx ) <nl> + return SM . getLineAndColumn ( InnermostCtx - > ContextLoc ) ; <nl> + return std : : make_pair ( 0 , 0 ) ; <nl> + } <nl> + <nl> + bool shouldAddIndentForLine ( ) const { <nl> + return InnermostCtx . hasValue ( ) & & InnermostCtx - > IndentLevel > 0 ; <nl> + } <nl> + <nl> + unsigned numIndentLevels ( ) const { <nl> + if ( InnermostCtx ) <nl> + return InnermostCtx - > IndentLevel ; <nl> + return 0 ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> + / / / Recursively strips any trailing arguments , subscripts , generic <nl> + / / / specializations , or optional bindings from the given expression . <nl> + static Expr * getContextExprOf ( SourceManager & SM , Expr * E ) { <nl> + assert ( E ) ; <nl> + if ( auto * USE = dyn_cast < UnresolvedSpecializeExpr > ( E ) ) { <nl> + if ( auto * Sub = USE - > getSubExpr ( ) ) <nl> + return getContextExprOf ( SM , Sub ) ; <nl> + } else if ( auto * CE = dyn_cast < CallExpr > ( E ) ) { <nl> + if ( auto * Fn = CE - > getFn ( ) ) <nl> + return getContextExprOf ( SM , Fn ) ; <nl> + } else if ( auto * SE = dyn_cast < SubscriptExpr > ( E ) ) { <nl> + if ( auto * B = SE - > getBase ( ) ) <nl> + return getContextExprOf ( SM , B ) ; <nl> + } else if ( auto * OBE = dyn_cast < BindOptionalExpr > ( E ) ) { <nl> + if ( auto * B = OBE - > getSubExpr ( ) ) <nl> + return getContextExprOf ( SM , B ) ; <nl> + } else if ( auto * PUE = dyn_cast < PostfixUnaryExpr > ( E ) ) { <nl> + if ( auto * B = PUE - > getArg ( ) ) <nl> + return getContextExprOf ( SM , B ) ; <nl> + } <nl> + return E ; <nl> + } <nl> + <nl> + / / / Finds the ContextLoc to use for the argument of the given SubscriptExpr , <nl> + / / / ApplyExpr , or UnresolvedSpecializeExpr . This is needed as the ContextLoc to <nl> + / / / align their arguments with ( including trailing closures ) may be neither the <nl> + / / / start or end of their function or base expression , as in the SubscriptExpr <nl> + / / / in the example below , where ' select ' is the desired ContextLoc to use . <nl> + / / / <nl> + / / / \ code <nl> + / / / Base ( ) <nl> + / / / . select ( x : 10 <nl> + / / / y : 20 ) [ 10 ] { <nl> + / / / print ( $ 0 ) <nl> + / / / } <nl> + / / / . count <nl> + / / / \ endcode <nl> + static SourceLoc getContextLocForArgs ( SourceManager & SM , Expr * E ) { <nl> + assert ( isa < SubscriptExpr > ( E ) | | isa < CallExpr > ( E ) | | isa < UnresolvedSpecializeExpr > ( E ) ) ; <nl> + Expr * Base = getContextExprOf ( SM , E ) ; <nl> + if ( auto * UDE = dyn_cast < UnresolvedDotExpr > ( Base ) ) <nl> + return UDE - > getDotLoc ( ) ; <nl> + if ( auto * UDRE = dyn_cast < UnresolvedDeclRefExpr > ( Base ) ) <nl> + return UDRE - > getLoc ( ) ; <nl> + return Base - > getStartLoc ( ) ; <nl> + } <nl> + <nl> + / / / This is a helper class intended to report every pair of matching parens , <nl> + / / / braces , angle brackets , and square brackets in a given AST node , along with their ContextLoc . <nl> + class RangeWalker : protected ASTWalker { <nl> + protected : <nl> + SourceManager & SM ; <nl> + <nl> + public : <nl> + explicit RangeWalker ( SourceManager & SM ) : SM ( SM ) { } <nl> + <nl> + / / / \ returns true to continue walking . <nl> + virtual bool handleRange ( SourceLoc L , SourceLoc R , SourceLoc ContextLoc ) = 0 ; <nl> + <nl> + private : <nl> + bool handleBraces ( SourceLoc L , SourceLoc R , SourceLoc ContextLoc ) { <nl> + L = getLocIfKind ( SM , L , tok : : l_brace ) ; <nl> + R = getLocIfKind ( SM , R , tok : : r_brace ) ; <nl> + return L . isInvalid ( ) | | handleRange ( L , R , ContextLoc ) ; <nl> + } <nl> + <nl> + bool handleBraces ( SourceRange Braces , SourceLoc ContextLoc ) { <nl> + return handleBraces ( Braces . Start , Braces . End , ContextLoc ) ; <nl> + } <nl> + <nl> + bool handleParens ( SourceLoc L , SourceLoc R , SourceLoc ContextLoc ) { <nl> + L = getLocIfKind ( SM , L , tok : : l_paren ) ; <nl> + R = getLocIfKind ( SM , R , tok : : r_paren ) ; <nl> + return L . isInvalid ( ) | | handleRange ( L , R , ContextLoc ) ; <nl> + } <nl> + <nl> + bool handleSquares ( SourceLoc L , SourceLoc R , SourceLoc ContextLoc ) { <nl> + L = getLocIfKind ( SM , L , tok : : l_square ) ; <nl> + R = getLocIfKind ( SM , R , tok : : r_square ) ; <nl> + return L . isInvalid ( ) | | handleRange ( L , R , ContextLoc ) ; <nl> + } <nl> + <nl> + bool handleAngles ( SourceLoc L , SourceLoc R , SourceLoc ContextLoc ) { <nl> + L = getLocIfTokenTextMatches ( SM , L , " < " ) ; <nl> + R = getLocIfTokenTextMatches ( SM , R , " > " ) ; <nl> + return L . isInvalid ( ) | | handleRange ( L , R , ContextLoc ) ; <nl> + } <nl> + <nl> + bool handleBraceStmt ( Stmt * S , SourceLoc ContextLoc ) { <nl> + if ( auto * BS = dyn_cast_or_null < BraceStmt > ( S ) ) <nl> + return handleBraces ( { BS - > getLBraceLoc ( ) , BS - > getRBraceLoc ( ) } , ContextLoc ) ; <nl> + return true ; <nl> + } <nl> + <nl> + bool walkCustomAttributes ( Decl * D ) { <nl> + / / CustomAttrs of non - param VarDecls are handled when this method is called <nl> + / / on their containing PatternBindingDecls ( below ) . <nl> + if ( isa < VarDecl > ( D ) & & ! isa < ParamDecl > ( D ) ) <nl> + return true ; <nl> + <nl> + if ( auto * PBD = dyn_cast < PatternBindingDecl > ( D ) ) { <nl> + if ( auto * SingleVar = PBD - > getSingleVar ( ) ) { <nl> + D = SingleVar ; <nl> + } else { <nl> + return true ; <nl> + } <nl> + } <nl> + for ( auto * customAttr : D - > getAttrs ( ) . getAttributes < CustomAttr , true > ( ) ) { <nl> + if ( auto * Repr = customAttr - > getTypeLoc ( ) . getTypeRepr ( ) ) { <nl> + if ( ! Repr - > walk ( * this ) ) <nl> + return false ; <nl> + } <nl> + if ( auto * Arg = customAttr - > getArg ( ) ) { <nl> + if ( ! Arg - > walk ( * this ) ) <nl> + return false ; <nl> + } <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + bool walkToDeclPre ( Decl * D ) override { <nl> + bool Continue = true , Stop = false ; <nl> + <nl> + if ( ! walkCustomAttributes ( D ) ) <nl> + return Stop ; <nl> + <nl> + if ( D - > isImplicit ( ) ) <nl> + return Continue ; <nl> + <nl> + / / Walk into inactive config regions . <nl> + if ( auto * ICD = dyn_cast < IfConfigDecl > ( D ) ) { <nl> + for ( auto Clause : ICD - > getClauses ( ) ) { <nl> + for ( auto Member : Clause . Elements ) <nl> + Member . walk ( * this ) ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + SourceLoc ContextLoc = D - > getStartLoc ( ) ; <nl> + <nl> + if ( auto * GC = D - > getAsGenericContext ( ) ) { <nl> + / / Asking for generic parameters on decls where they are computed , rather <nl> + / / than explicitly defined will trigger an assertion when semantic queries <nl> + / / and name lookup are disabled . <nl> + bool SafeToAskForGenerics = ! isa < ExtensionDecl > ( D ) & & <nl> + ! isa < ProtocolDecl > ( D ) ; <nl> + if ( SafeToAskForGenerics ) { <nl> + if ( auto * GP = GC - > getGenericParams ( ) ) { <nl> + if ( ! handleAngles ( GP - > getLAngleLoc ( ) , GP - > getRAngleLoc ( ) , ContextLoc ) ) <nl> + return Stop ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + if ( auto * NTD = dyn_cast < NominalTypeDecl > ( D ) ) { <nl> + if ( ! handleBraces ( NTD - > getBraces ( ) , ContextLoc ) ) <nl> + return Stop ; <nl> + } else if ( auto * ED = dyn_cast < ExtensionDecl > ( D ) ) { <nl> + if ( ! handleBraces ( ED - > getBraces ( ) , ContextLoc ) ) <nl> + return Stop ; <nl> + } else if ( auto * VD = dyn_cast < VarDecl > ( D ) ) { <nl> + if ( ! handleBraces ( VD - > getBracesRange ( ) , VD - > getNameLoc ( ) ) ) <nl> + return Stop ; <nl> + } else if ( auto * AFD = dyn_cast < AbstractFunctionDecl > ( D ) ) { <nl> + if ( auto * PL = AFD - > getParameters ( ) ) { <nl> + if ( ! handleParens ( PL - > getLParenLoc ( ) , PL - > getRParenLoc ( ) , ContextLoc ) ) <nl> + return Stop ; <nl> + } <nl> + } else if ( auto * SD = dyn_cast < SubscriptDecl > ( D ) ) { <nl> + if ( ! handleBraces ( SD - > getBracesRange ( ) , ContextLoc ) ) <nl> + return Stop ; <nl> + if ( auto * PL = SD - > getIndices ( ) ) { <nl> + if ( ! handleParens ( PL - > getLParenLoc ( ) , PL - > getRParenLoc ( ) , ContextLoc ) ) <nl> + return Stop ; <nl> + } <nl> + } else if ( auto * PGD = dyn_cast < PrecedenceGroupDecl > ( D ) ) { <nl> + SourceRange Braces ( PGD - > getLBraceLoc ( ) , PGD - > getRBraceLoc ( ) ) ; <nl> + if ( ! handleBraces ( Braces , ContextLoc ) ) <nl> + return Stop ; <nl> + } else if ( auto * PDD = dyn_cast < PoundDiagnosticDecl > ( D ) ) { <nl> + / / TODO : add paren locations to PoundDiagnosticDecl <nl> + } <nl> + <nl> + return Continue ; <nl> + } <nl> + <nl> + std : : pair < bool , Stmt * > walkToStmtPre ( Stmt * S ) override { <nl> + std : : pair < bool , Stmt * > Continue = { true , S } , Stop = { false , nullptr } ; <nl> + <nl> + if ( S - > isImplicit ( ) ) <nl> + return Continue ; <nl> + <nl> + if ( auto * LCS = dyn_cast < LabeledConditionalStmt > ( S ) ) { <nl> + for ( auto & Elem : LCS - > getCond ( ) ) { <nl> + if ( Elem . getKind ( ) = = StmtConditionElement : : CK_Availability ) { <nl> + PoundAvailableInfo * PA = Elem . getAvailability ( ) ; <nl> + if ( ! handleParens ( PA - > getLParenLoc ( ) , PA - > getRParenLoc ( ) , <nl> + PA - > getStartLoc ( ) ) ) <nl> + return Stop ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + SourceLoc ContextLoc = S - > getStartLoc ( ) ; <nl> + if ( auto * BS = dyn_cast < BraceStmt > ( S ) ) { <nl> + if ( ! handleBraceStmt ( BS , ContextLoc ) ) <nl> + return Stop ; <nl> + } else if ( auto * IS = dyn_cast < IfStmt > ( S ) ) { <nl> + if ( ! handleBraceStmt ( IS - > getThenStmt ( ) , IS - > getIfLoc ( ) ) ) <nl> + return Stop ; <nl> + } else if ( auto * GS = dyn_cast < GuardStmt > ( S ) ) { <nl> + if ( ! handleBraceStmt ( GS - > getBody ( ) , GS - > getGuardLoc ( ) ) ) <nl> + return Stop ; <nl> + } else if ( auto * FS = dyn_cast < ForEachStmt > ( S ) ) { <nl> + if ( ! handleBraceStmt ( FS - > getBody ( ) , FS - > getForLoc ( ) ) ) <nl> + return Stop ; <nl> + } else if ( auto * SS = dyn_cast < SwitchStmt > ( S ) ) { <nl> + SourceRange Braces ( SS - > getLBraceLoc ( ) , SS - > getRBraceLoc ( ) ) ; <nl> + if ( ! handleBraces ( Braces , SS - > getSwitchLoc ( ) ) ) <nl> + return Stop ; <nl> + } else if ( auto * DS = dyn_cast < DoStmt > ( S ) ) { <nl> + if ( ! handleBraceStmt ( DS - > getBody ( ) , DS - > getDoLoc ( ) ) ) <nl> + return Stop ; <nl> + } else if ( auto * DCS = dyn_cast < DoCatchStmt > ( S ) ) { <nl> + if ( ! handleBraceStmt ( DCS - > getBody ( ) , DCS - > getDoLoc ( ) ) ) <nl> + return Stop ; <nl> + } else if ( auto * CS = dyn_cast < CatchStmt > ( S ) ) { <nl> + if ( ! handleBraceStmt ( CS - > getBody ( ) , CS - > getCatchLoc ( ) ) ) <nl> + return Stop ; <nl> + } else if ( auto * RWS = dyn_cast < RepeatWhileStmt > ( S ) ) { <nl> + if ( ! handleBraceStmt ( RWS - > getBody ( ) , RWS - > getRepeatLoc ( ) ) ) <nl> + return Stop ; <nl> + } else if ( auto * WS = dyn_cast < WhileStmt > ( S ) ) { <nl> + if ( ! handleBraceStmt ( WS - > getBody ( ) , WS - > getWhileLoc ( ) ) ) <nl> + return Stop ; <nl> + } else if ( auto * PAS = dyn_cast < PoundAssertStmt > ( S ) ) { <nl> + / / TODO : add paren locations to PoundAssertStmt <nl> + } <nl> + <nl> + return Continue ; <nl> + } <nl> + <nl> + std : : pair < bool , Expr * > walkToExprPre ( Expr * E ) override { <nl> + std : : pair < bool , Expr * > Stop = { false , nullptr } , Continue = { true , E } ; <nl> + <nl> + / / Walk through error expressions . <nl> + if ( auto * EE = dyn_cast < ErrorExpr > ( E ) ) { <nl> + if ( auto * OE = EE - > getOriginalExpr ( ) ) { <nl> + llvm : : SaveAndRestore < ASTWalker : : ParentTy > ( Parent , EE ) ; <nl> + OE - > walk ( * this ) ; <nl> + } <nl> + return Continue ; <nl> + } <nl> + <nl> + if ( E - > isImplicit ( ) ) <nl> + return Continue ; <nl> + <nl> + SourceLoc ContextLoc = E - > getStartLoc ( ) ; <nl> + if ( auto * PE = dyn_cast < ParenExpr > ( E ) ) { <nl> + SourceLoc L = getLocIfKind ( SM , PE - > getLParenLoc ( ) , <nl> + { tok : : l_paren , tok : : l_square } ) ; <nl> + SourceLoc R = getLocIfKind ( SM , PE - > getRParenLoc ( ) , <nl> + { tok : : r_paren , tok : : r_square } ) ; <nl> + if ( L . isValid ( ) & & ! handleRange ( L , R , ContextLoc ) ) <nl> + return Stop ; <nl> + } else if ( auto * TE = dyn_cast < TupleExpr > ( E ) ) { <nl> + SourceLoc L = getLocIfKind ( SM , TE - > getLParenLoc ( ) , <nl> + { tok : : l_paren , tok : : l_square } ) ; <nl> + SourceLoc R = getLocIfKind ( SM , TE - > getRParenLoc ( ) , <nl> + { tok : : r_paren , tok : : r_square } ) ; <nl> + if ( L . isValid ( ) & & ! handleRange ( L , R , ContextLoc ) ) <nl> + return Stop ; <nl> + } else if ( auto * CE = dyn_cast < CollectionExpr > ( E ) ) { <nl> + if ( ! handleSquares ( CE - > getLBracketLoc ( ) , CE - > getRBracketLoc ( ) , <nl> + ContextLoc ) ) <nl> + return Stop ; <nl> + } else if ( auto * CE = dyn_cast < ClosureExpr > ( E ) ) { <nl> + if ( ! handleBraceStmt ( CE - > getBody ( ) , ContextLoc ) ) <nl> + return Stop ; <nl> + SourceRange Capture = CE - > getBracketRange ( ) ; <nl> + if ( ! handleSquares ( Capture . Start , Capture . End , Capture . Start ) ) <nl> + return Stop ; <nl> + if ( auto * PL = CE - > getParameters ( ) ) { <nl> + if ( ! handleParens ( PL - > getLParenLoc ( ) , PL - > getRParenLoc ( ) , <nl> + PL - > getStartLoc ( ) ) ) <nl> + return Stop ; <nl> + } <nl> + } else if ( isa < CallExpr > ( E ) | | isa < SubscriptExpr > ( E ) ) { <nl> + SourceLoc ContextLoc = getContextLocForArgs ( SM , E ) ; <nl> + Expr * Arg ; <nl> + if ( auto * CE = dyn_cast < CallExpr > ( E ) ) { <nl> + Arg = CE - > getArg ( ) ; <nl> + } else { <nl> + Arg = cast < SubscriptExpr > ( E ) - > getIndex ( ) ; <nl> + } <nl> + ClosureExpr * TC = nullptr ; <nl> + CaptureListExpr * TCL = nullptr ; <nl> + if ( auto * PE = dyn_cast_or_null < ParenExpr > ( Arg ) ) { <nl> + if ( isa < SubscriptExpr > ( E ) ) { <nl> + if ( ! handleSquares ( PE - > getLParenLoc ( ) , PE - > getRParenLoc ( ) , ContextLoc ) ) <nl> + return Stop ; <nl> + } else { <nl> + if ( ! handleParens ( PE - > getLParenLoc ( ) , PE - > getRParenLoc ( ) , ContextLoc ) ) <nl> + return Stop ; <nl> + } <nl> + if ( PE - > hasTrailingClosure ( ) ) { <nl> + if ( auto * Last = PE - > getSubExpr ( ) ) { <nl> + TC = dyn_cast < ClosureExpr > ( Last ) ; <nl> + TCL = dyn_cast < CaptureListExpr > ( Last ) ; <nl> + } <nl> + } <nl> + } else if ( auto * TE = dyn_cast_or_null < TupleExpr > ( Arg ) ) { <nl> + if ( isa < SubscriptExpr > ( E ) ) { <nl> + if ( ! handleSquares ( TE - > getLParenLoc ( ) , TE - > getRParenLoc ( ) , ContextLoc ) ) <nl> + return Stop ; <nl> + } else { <nl> + if ( ! handleParens ( TE - > getLParenLoc ( ) , TE - > getRParenLoc ( ) , ContextLoc ) ) <nl> + return Stop ; <nl> + } <nl> + if ( TE - > hasTrailingClosure ( ) ) { <nl> + if ( auto * Last = TE - > getElements ( ) . back ( ) ) { <nl> + TC = dyn_cast < ClosureExpr > ( Last ) ; <nl> + TCL = dyn_cast < CaptureListExpr > ( Last ) ; <nl> + } <nl> + } <nl> + } <nl> + if ( TC & & ! handleBraceStmt ( TC - > getBody ( ) , ContextLoc ) ) <nl> + return Stop ; <nl> + if ( TCL & & ! handleBraceStmt ( TCL - > getClosureBody ( ) - > getBody ( ) , ContextLoc ) ) <nl> + return Stop ; <nl> + } <nl> + return Continue ; <nl> + } <nl> + <nl> + std : : pair < bool , Pattern * > walkToPatternPre ( Pattern * P ) override { <nl> + std : : pair < bool , Pattern * > Continue = { true , P } , Stop = { false , nullptr } ; <nl> + <nl> + if ( P - > isImplicit ( ) ) <nl> + return Continue ; <nl> + <nl> + if ( isa < TuplePattern > ( P ) | | isa < ParenPattern > ( P ) ) { <nl> + if ( ! handleParens ( P - > getStartLoc ( ) , P - > getEndLoc ( ) , P - > getStartLoc ( ) ) ) <nl> + return Stop ; <nl> + } <nl> + <nl> + return Continue ; <nl> + } <nl> + <nl> + bool walkToTypeReprPre ( TypeRepr * T ) override { <nl> + bool Continue = true , Stop = false ; <nl> + <nl> + if ( auto * TT = dyn_cast < TupleTypeRepr > ( T ) ) { <nl> + SourceRange Parens = TT - > getParens ( ) ; <nl> + if ( ! handleParens ( Parens . Start , Parens . End , Parens . Start ) ) <nl> + return Stop ; <nl> + } else if ( isa < ArrayTypeRepr > ( T ) | | isa < DictionaryTypeRepr > ( T ) ) { <nl> + if ( ! handleSquares ( T - > getStartLoc ( ) , T - > getEndLoc ( ) , T - > getStartLoc ( ) ) ) <nl> + return Stop ; <nl> + } else if ( auto * GI = dyn_cast < GenericIdentTypeRepr > ( T ) ) { <nl> + SourceLoc ContextLoc = GI - > getNameLoc ( ) . getBaseNameLoc ( ) ; <nl> + SourceRange Brackets = GI - > getAngleBrackets ( ) ; <nl> + if ( ! handleAngles ( Brackets . Start , Brackets . End , ContextLoc ) ) <nl> + return Stop ; <nl> + } <nl> + <nl> + return Continue ; <nl> + } <nl> + <nl> + bool shouldWalkIntoGenericParams ( ) override { return true ; } <nl> + } ; <nl> + <nl> + / / / Indicates whether a range is an open or closed range . <nl> + enum class RangeKind { Closed , Open } ; <nl> + <nl> + / / / A helper class that determines whether a given node , or subrage of a node <nl> + / / / should indent or not when it spans multiple lines . <nl> + class OutdentChecker : protected RangeWalker { <nl> + SourceRange CheckRange ; / / / < The source range to consider . <nl> + RangeKind CheckRangeKind ; / / / < Whether \ c CheckRange is open or closed . <nl> + bool IsOutdenting = false ; / / / < Tracks whether a seen range prevents indenting . <nl> + llvm : : DenseMap < SourceLoc , ContextOverride > LineStartToOverride ; <nl> + <nl> + explicit OutdentChecker ( SourceManager & SM , <nl> + SourceRange CheckRange , RangeKind CheckRangeKind ) <nl> + : RangeWalker ( SM ) , CheckRange ( CheckRange ) , CheckRangeKind ( CheckRangeKind ) { <nl> + assert ( CheckRange . isValid ( ) ) ; <nl> + } <nl> + <nl> + bool handleRange ( SourceLoc L , SourceLoc R , SourceLoc ContextLoc ) override { <nl> + assert ( L . isValid ( ) & & ContextLoc . isValid ( ) ) ; <nl> + <nl> + / / Ignore parens / braces / brackets outside of the open / closed check range . <nl> + if ( ! isInCheckRange ( L , R ) ) <nl> + return true ; <nl> + <nl> + / / The CheckRange is made outdenting by any parens / braces / brackets with : <nl> + / / 1 ) a ContextLoc starts on the same line as the start of CheckRange , and <nl> + / / 2 ) either : <nl> + / / a ) an R token that starts its containing line , or <nl> + / / b ) an L token that isn ' t the ContextLoc and starts its containing line . <nl> + / / <nl> + / / E . g . for an open CheckRange covering the contents of an array literal <nl> + / / with various line bresk positions : <nl> + / / <nl> + / / / / This doesn ' t outdent because : <nl> + / / [ / / The array brackets are outside the open CheckRange so ignored . <nl> + / / ( 1 , ( 2 , 3 ) ) , / / both parens fail conditions 1 and 2 . <nl> + / / ( 4 , ( 5 , 6 ) ) / / both parens fail conditions 1 and 2 . <nl> + / / ] <nl> + / / <nl> + / / / / This doesn ' t outdent because : <nl> + / / [ ( 1 , ( 2 , 3 ) ) , / / both parens fail condition 2 . <nl> + / / ( / / these parens fail condition 1 . <nl> + / / 4 , ( 5 , 6 ) / / these parens fail conditions 1 and 2 . <nl> + / / ) ] <nl> + / / <nl> + / / This outdents because : <nl> + / / [ ( / / These parens meet conditions 1 and 2a . <nl> + / / 1 , ( 2 , 3 ) <nl> + / / ) , ( <nl> + / / 4 , ( 5 , 6 ) <nl> + / / ) ] <nl> + / / <nl> + / / This outdents because : <nl> + / / [ ( 1 , ( / / The inner parens meet conditions 1 and 2a . <nl> + / / 2 , 3 <nl> + / / ) , ( 4 , ( 5 , 6 ) ] <nl> + / / <nl> + / / This outdents because : <nl> + / / [ ( 1 , ( 2 , 3 ) , ( / / The inner parens meet conditions 1 and 2a . <nl> + / / 4 , ( 5 , 6 ) <nl> + / / ) ] <nl> + / / <nl> + / / For a closed CheckRange covering the variable declaration below : <nl> + / / <nl> + / / This doesn ' t outdent because : <nl> + / / var x = foo ( 1 ) { / / The parens and braces fail condition 2 <nl> + / / return 42 } <nl> + / / <nl> + / / This outdents because : <nl> + / / var x = foo ( 1 ) { / / These braces meet conditions 1 and 2a . <nl> + / / return 42 <nl> + / / } <nl> + / / <nl> + / / This outdents because : <nl> + / / var x = foo ( 1 ) <nl> + / / { / / These braces meet conditions 1 and 2b ( their ContextLoc is ' foo ' ) . <nl> + / / return 42 <nl> + / / } <nl> + / / <nl> + / / And for a closed CheckRange covering the call expression below : <nl> + / / <nl> + / / This doesn ' t outdent because : <nl> + / / foo ( 1 , / / These parens fail condition 2 . <nl> + / / 2 , 3 ) { return 42 } / / These braces fail condition 1 and 2 . <nl> + / / <nl> + / / This outdents because : <nl> + / / foo ( 1 , 2 , 3 ) <nl> + / / { / / These braces meet conditions 1 and 2b ( their ContextLoc is ' foo ' ) . <nl> + / / return 42 <nl> + / / } <nl> + <nl> + / / The above conditions are not sufficient to handle cases like the below , <nl> + / / which we would like to be considered outdenting : <nl> + / / foo ( a : 1 , <nl> + / / b : 2 ) [ x : bar { / / These braces fail condition 1 . <nl> + / / return 42 <nl> + / / } ] <nl> + / / To handle them , we propagate the ContextLoc of each parent range down to <nl> + / / any child ranges that start on the same line as the parent . The braces <nl> + / / above then ' inherit ' the ContextLoc of their parent brackets ( ' foo ' ) , and <nl> + / / pass condition 1 . <nl> + ContextLoc = propagateContextLocs ( ContextLoc , L , R ) ; <nl> + <nl> + / / Ignore parens / braces / brackets that fail Condition 1 . <nl> + if ( ! isOnSameLine ( SM , ContextLoc , CheckRange . Start ) ) <nl> + return true ; <nl> + <nl> + / / Ignore parens / braces / brackets that can ' t meet Condition 2 . <nl> + if ( R . isValid ( ) & & isOnSameLine ( SM , ContextLoc , R ) ) <nl> + return true ; <nl> + <nl> + / / Check condition 2b . <nl> + if ( ContextLoc ! = L & & isFirstTokenOnLine ( SM , L ) ) { <nl> + IsOutdenting = true ; <nl> + } else if ( R . isValid ( ) ) { <nl> + / / Check condition 2a . <nl> + SourceLoc LineStart = Lexer : : getLocForStartOfLine ( SM , R ) ; <nl> + Token First = Lexer : : getTokenAtLocation ( SM , LineStart , <nl> + CommentRetentionMode : : None ) ; <nl> + IsOutdenting | = First . getLoc ( ) = = R ; <nl> + } <nl> + <nl> + / / We only need to continue checking if it ' s not already outdenting . <nl> + return ! IsOutdenting ; <nl> + } <nl> + <nl> + SourceLoc propagateContextLocs ( SourceLoc ContextLoc , SourceLoc L , SourceLoc R ) { <nl> + bool HasSeparateContext = ! isOnSameLine ( SM , L , ContextLoc ) ; <nl> + <nl> + / / Update ContextLoc for the currently active override on its line . <nl> + ContextOverride & Upstream = getOverrideForLineContaining ( ContextLoc ) ; <nl> + IndentContext : : ContextKind Kind = IndentContext : : LineStart ; <nl> + Upstream . applyIfNeeded ( SM , ContextLoc , Kind ) ; <nl> + <nl> + / / If the original ContextLoc and L were on the same line , there ' s no need <nl> + / / to propagate anything . Child ranges later on the same line will pick up <nl> + / / whatever override we picked up above anyway , and if there wasn ' t <nl> + / / one , their normal ContextLoc should already be correct . <nl> + if ( ! HasSeparateContext ) <nl> + return ContextLoc ; <nl> + <nl> + / / Set an override to propagate the context loc onto the line of L . <nl> + ContextOverride & Downstream = getOverrideForLineContaining ( L ) ; <nl> + ContextLoc = Downstream . propagateContext ( SM , ContextLoc , <nl> + Kind , L , R ) ; <nl> + return ContextLoc ; <nl> + } <nl> + <nl> + bool isInCheckRange ( SourceLoc L , SourceLoc R ) const { <nl> + switch ( CheckRangeKind ) { <nl> + case RangeKind : : Open : <nl> + return SM . isBeforeInBuffer ( CheckRange . Start , L ) & & <nl> + ( R . isInvalid ( ) | | SM . isBeforeInBuffer ( R , CheckRange . End ) ) ; <nl> + case RangeKind : : Closed : <nl> + return ! SM . isBeforeInBuffer ( L , CheckRange . Start ) & & <nl> + ( R . isInvalid ( ) | | ! SM . isBeforeInBuffer ( CheckRange . End , R ) ) ; <nl> + } <nl> + } <nl> + <nl> + public : <nl> + / / / Checks if a source range shouldn ' t indent when it crosses multiple lines . <nl> + / / / <nl> + / / / \ param SM <nl> + / / / The SourceManager managing the given source range . <nl> + / / / \ param Range <nl> + / / / The range to check . <nl> + / / / \ param WalkableParent <nl> + / / / A parent AST node that when walked covers all relevant nodes in the <nl> + / / / given source range . <nl> + / / / \ param RangeKind <nl> + / / / Whether the given range to check is closed ( the default ) or open . <nl> + template < typename T > <nl> + static bool hasOutdent ( SourceManager & SM , SourceRange Range , T * WalkableParent , <nl> + RangeKind RangeKind = RangeKind : : Closed ) { <nl> + assert ( Range . isValid ( ) ) ; <nl> + if ( isOnSameLine ( SM , Range . Start , Range . End ) ) <nl> return false ; <nl> - StringRef LineText = swift : : ide : : getTextForLine ( LineIndex , Text , / * Trim * / true ) ; <nl> - return LineText . startswith ( " break " ) | | LineText . startswith ( " continue " ) | | <nl> - LineText . startswith ( " return " ) | | LineText . startswith ( " fallthrough " ) ; <nl> + OutdentChecker Checker ( SM , Range , RangeKind ) ; <nl> + WalkableParent - > walk ( Checker ) ; <nl> + return Checker . IsOutdenting ; <nl> + } <nl> + <nl> + / / / Checks if an AST node shouldn ' t indent when it crosses multiple lines . <nl> + / / / <nl> + / / / \ param SM <nl> + / / / The SourceManager managing the given source range . <nl> + / / / \ param WalkableNode <nl> + / / / The AST node to check . <nl> + / / / \ param RangeKind <nl> + / / / Whether to check the source range of \ c WalkableNode as a closed ( the <nl> + / / / default ) or open range . <nl> + template < typename T > <nl> + static bool hasOutdent ( SourceManager & SM , T * WalkableNode , <nl> + RangeKind RangeKind = RangeKind : : Closed ) { <nl> + return hasOutdent ( SM , WalkableNode - > getSourceRange ( ) , WalkableNode , <nl> + RangeKind ) ; <nl> } <nl> <nl> - void padToSiblingColumn ( StringBuilder & Builder , <nl> - const CodeFormatOptions & FmtOptions ) { <nl> - assert ( SiblingInfo . Loc . isValid ( ) & & " No sibling to align with . " ) ; <nl> - CharSourceRange Range ( SM , Lexer : : getLocForStartOfLine ( SM , SiblingInfo . Loc ) , <nl> - SiblingInfo . Loc ) ; <nl> - unsigned SpaceLength = 0 ; <nl> - unsigned TabLength = 0 ; <nl> + private : <nl> + ContextOverride & getOverrideForLineContaining ( SourceLoc Loc ) { <nl> + SourceLoc LineStart = Lexer : : getLocForStartOfLine ( SM , Loc ) ; <nl> + auto Ret = LineStartToOverride . insert ( { LineStart , ContextOverride ( ) } ) ; <nl> + return Ret . first - > getSecond ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> + / / / A helper class for aligning list elements and their bounding tokens . <nl> + class ListAligner { <nl> + SourceManager & SM ; <nl> + SourceLoc TargetLoc ; / / / < The indent location . <nl> + SourceLoc ContextLoc ; / / / < The owning indent context ' s location . <nl> + SourceLoc IntroducerLoc ; / / / < The opening token before the first list element . <nl> + SourceLoc CloseLoc ; / / / < The token that closes the list ( optional ) . <nl> + bool AllowsTrailingSeparator ; / / / < Whether a final trailing comma is legal . <nl> + <nl> + SourceLoc AlignLoc ; <nl> + SourceLoc LastEndLoc ; <nl> + bool HasOutdent = false ; <nl> + <nl> + public : <nl> + <nl> + / / / Constructs a new \ c ListAligner <nl> + / / / <nl> + / / / \ param SM <nl> + / / / The source manager to use . <nl> + / / / \ param TargetLoc <nl> + / / / The indent target location . <nl> + / / / \ param ContextLoc <nl> + / / / The location list items should indent relative to . <nl> + / / / \ param IntroducerLoc <nl> + / / / The location of the token before the first item in the list , e . g . ' ( ' , <nl> + / / / or \ c case . <nl> + / / / \ param CloseLoc <nl> + / / / The location of the closing token of the list , e . g . ' ) ' , if present . <nl> + / / / \ param AllowsTrailingSeparator <nl> + / / / Whether a trailing comma is legal , or indicates an incomplete list . <nl> + ListAligner ( SourceManager & SM , SourceLoc TargetLoc , SourceLoc ContextLoc , <nl> + SourceLoc IntroducerLoc , SourceLoc CloseLoc = SourceLoc ( ) , <nl> + bool AllowsTrailingSeparator = false ) <nl> + : SM ( SM ) , TargetLoc ( TargetLoc ) , ContextLoc ( ContextLoc ) , <nl> + IntroducerLoc ( IntroducerLoc ) , CloseLoc ( CloseLoc ) , <nl> + AllowsTrailingSeparator ( AllowsTrailingSeparator ) { } <nl> + <nl> + / / / Update the alignment for a list element . <nl> + / / / <nl> + / / / \ param Start <nl> + / / / The start location of the element . <nl> + / / / \ param End <nl> + / / / The end location of the element <nl> + / / / \ param WalkableParent <nl> + / / / An AST node that is , or contains the element , and is walkable . <nl> + template < typename T > <nl> + void updateAlignment ( SourceLoc Start , SourceLoc End , T * WalkableParent ) { <nl> + updateAlignment ( SourceRange ( Start , End ) , WalkableParent ) ; <nl> + } <nl> + <nl> + / / / Update the alignment for a list element . <nl> + / / / <nl> + / / / \ param Range <nl> + / / / The source range of the element . <nl> + / / / \ param WalkableParent <nl> + / / / An AST node that is , or contains the element , and is walkable . <nl> + template < typename T > <nl> + void updateAlignment ( SourceRange Range , T * WalkableParent ) { <nl> + assert ( Range . isValid ( ) ) ; <nl> + LastEndLoc = Range . End ; <nl> + <nl> + HasOutdent | = isOnSameLine ( SM , IntroducerLoc , Range . Start ) & & <nl> + OutdentChecker : : hasOutdent ( SM , Range , WalkableParent ) ; <nl> + <nl> + if ( HasOutdent | | ! SM . isBeforeInBuffer ( Range . Start , TargetLoc ) ) <nl> + return ; <nl> + if ( AlignLoc . isValid ( ) ) { <nl> + if ( isOnSameLine ( SM , Range . Start , AlignLoc ) | | <nl> + ! isFirstTokenOnLine ( SM , Range . Start ) ) <nl> + return ; <nl> + AlignLoc = getLocForContentStartOnSameLine ( SM , Range . Start ) ; <nl> + } else if ( isOnSameLine ( SM , IntroducerLoc , Range . Start ) ) { <nl> + AlignLoc = Range . Start ; <nl> + } <nl> + } <nl> + <nl> + / / / Returns the list ' s IndentContext and sets an exact alignment override if <nl> + / / / needed . <nl> + / / / <nl> + / / / \ note This should only be called after calling \ c updateAlignment on every <nl> + / / / element range . <nl> + / / / \ param Override <nl> + / / / An optional ContextOverride object to set <nl> + IndentContext <nl> + getContextAndSetAlignment ( ContextOverride & Override ) { <nl> + assert ( CloseLoc . isInvalid ( ) | | ! SM . isBeforeInBuffer ( CloseLoc , TargetLoc ) ) ; <nl> + bool ShouldIndent = ! HasOutdent & & TargetLoc ! = IntroducerLoc ; <nl> + if ( ShouldIndent & & isTargetTrailing ( ) ) { <nl> + if ( TargetLoc = = CloseLoc ) { <nl> + ShouldIndent = ! AllowsTrailingSeparator & & hasTrailingComma ( ) ; <nl> + } else if ( CloseLoc . isInvalid ( ) ) { <nl> + ShouldIndent = isEmpty ( ) | | hasTrailingComma ( ) ; <nl> + } <nl> + } <nl> + if ( ShouldIndent & & AlignLoc . isValid ( ) ) { <nl> + setAlignmentIfNeeded ( Override ) ; <nl> + return IndentContext { AlignLoc , false , IndentContext : : Exact } ; <nl> + } <nl> + return IndentContext { ContextLoc , ShouldIndent } ; <nl> + } <nl> + <nl> + / / / Sets an exact alignment override for child indent contexts , if needed . <nl> + / / / <nl> + / / / This should be called before returning an \ c IndentContext for a subrange <nl> + / / / of the list . <nl> + void setAlignmentIfNeeded ( ContextOverride & Override ) { <nl> + if ( HasOutdent | | AlignLoc . isInvalid ( ) ) <nl> + return ; <nl> + Override . setExact ( SM , AlignLoc ) ; <nl> + } <nl> + <nl> + private : <nl> + bool hasTrailingComma ( ) const { <nl> + if ( LastEndLoc . isInvalid ( ) ) <nl> + return false ; <nl> + if ( locIsKind ( SM , LastEndLoc , tok : : comma ) ) <nl> + return true ; <nl> + Optional < Token > AfterLast = getTokenAfter ( SM , LastEndLoc ) ; <nl> + return AfterLast & & AfterLast - > is ( tok : : comma ) ; <nl> + } <nl> + <nl> + bool isTargetTrailing ( ) const { <nl> + return isEmpty ( ) | | SM . isBeforeInBuffer ( LastEndLoc , TargetLoc ) ; <nl> + } <nl> + <nl> + bool isEmpty ( ) const { return LastEndLoc . isInvalid ( ) ; } <nl> + } ; <nl> + <nl> + <nl> + / / / Represents an indent target that immediately follows a node being walked by <nl> + / / / a \ c FormatWalker instance . <nl> + struct Trailing { <nl> + Optional < Token > Token ; <nl> + <nl> + / / / Whether the trailing target is on an empty line . <nl> + bool isEmpty ( ) const { return ! Token . hasValue ( ) ; } <nl> + <nl> + / / / Whether the trailing target is a token with one of the given kinds . <nl> + bool hasKind ( ArrayRef < tok > Kinds ) const { <nl> + if ( Token ) { <nl> + tok Kind = Token - > getKind ( ) ; <nl> + return std : : find ( Kinds . begin ( ) , Kinds . end ( ) , Kind ) ! = Kinds . end ( ) ; <nl> + } <nl> + return false ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> + / / / Walks an AST Node to determine the \ c FormatContext of the target indent location . <nl> + / / / <nl> + / / / It only walks into nodes whose source range overlaps , or immediately <nl> + / / / precedes the target indent location . <nl> + class FormatWalker : public ASTWalker { <nl> + SourceFile & SF ; <nl> + SourceManager & SM ; <nl> + CodeFormatOptions & FmtOptions ; <nl> + ArrayRef < Token > TokenList ; <nl> + <nl> + SourceLoc TargetLocation ; <nl> + SourceLoc TargetLineLoc ; <nl> + llvm : : SmallPtrSet < void * , 16 > NodesToSkip ; <nl> + ArrayRef < Token > : : iterator CurrentTokIt ; <nl> + <nl> + / / / The innermost indent context of the target location . <nl> + Optional < IndentContext > InnermostCtx ; <nl> + / / / A conditionally applicable indent context override . <nl> + ContextOverride CtxOverride ; <nl> + / / / Whether the target location appears within a doc comment block . <nl> + bool InDocCommentBlock = false ; <nl> + / / / Whether the target location appears within a line comment . <nl> + bool InCommentLine = false ; <nl> + / / / Whether the target location appears within a string literal . <nl> + bool InStringLiteral = false ; <nl> + <nl> + public : <nl> + explicit FormatWalker ( SourceFile & SF , SourceManager & SM , CodeFormatOptions & Options ) <nl> + : SF ( SF ) , SM ( SM ) , FmtOptions ( Options ) , TokenList ( SF . getAllTokens ( ) ) , <nl> + CurrentTokIt ( TokenList . begin ( ) ) { } <nl> + <nl> + / / / Compute the \ c FormatContext of the given source location . <nl> + / / / <nl> + / / / \ note The given location should point to the content start of its line . <nl> + FormatContext walkToLocation ( SourceLoc Loc ) { <nl> + InnermostCtx = None ; <nl> + CtxOverride . clear ( ) ; <nl> + TargetLocation = Loc ; <nl> + TargetLineLoc = Lexer : : getLocForStartOfLine ( SM , TargetLocation ) ; <nl> + InDocCommentBlock = InCommentLine = InStringLiteral = false ; <nl> + NodesToSkip . clear ( ) ; <nl> + CurrentTokIt = TokenList . begin ( ) ; <nl> + <nl> + SF . walk ( * this ) ; <nl> + scanTokensUntil ( SourceLoc ( ) ) ; <nl> + <nl> + if ( InnermostCtx ) <nl> + CtxOverride . applyIfNeeded ( SM , * InnermostCtx ) ; <nl> + <nl> + return FormatContext ( SM , InnermostCtx , InDocCommentBlock , <nl> + InCommentLine , InStringLiteral ) ; <nl> + } <nl> + <nl> + <nl> + # pragma mark ASTWalker overrides and helpers <nl> + <nl> + private : <nl> + bool walkCustomAttributes ( Decl * D ) { <nl> + / / CustomAttrs of non - param VarDecls are handled when this method is called <nl> + / / on their containing PatternBindingDecls ( below ) . <nl> + if ( isa < VarDecl > ( D ) & & ! isa < ParamDecl > ( D ) ) <nl> + return true ; <nl> + <nl> + if ( auto * PBD = dyn_cast < PatternBindingDecl > ( D ) ) { <nl> + if ( auto * SingleVar = PBD - > getSingleVar ( ) ) { <nl> + D = SingleVar ; <nl> + } else { <nl> + return true ; <nl> + } <nl> + } <nl> + for ( auto * customAttr : D - > getAttrs ( ) . getAttributes < CustomAttr , true > ( ) ) { <nl> + if ( auto * Repr = customAttr - > getTypeLoc ( ) . getTypeRepr ( ) ) { <nl> + if ( ! Repr - > walk ( * this ) ) <nl> + return false ; <nl> + } <nl> + if ( auto * Arg = customAttr - > getArg ( ) ) { <nl> + if ( ! Arg - > walk ( * this ) ) <nl> + return false ; <nl> + } <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + bool walkToDeclPre ( Decl * D ) override { <nl> + if ( ! walkCustomAttributes ( D ) ) <nl> + return false ; <nl> + <nl> + auto Action = HandlePre ( D , D - > isImplicit ( ) ) ; <nl> + if ( Action . shouldGenerateIndentContext ( ) ) { <nl> + if ( auto IndentCtx = getIndentContextFrom ( D , Action . Trailing ) ) <nl> + InnermostCtx = IndentCtx ; <nl> + } <nl> + <nl> + / / Walk accessors via their pattern binding decl . They aren ' t walked via <nl> + / / their VarDecls due to their non - overlapping range , so they ' d be skipped <nl> + / / otherwise . <nl> + if ( auto * PBD = dyn_cast < PatternBindingDecl > ( D ) ) { <nl> + if ( Action . shouldVisitChildren ( ) ) { <nl> + for ( auto I : range ( PBD - > getNumPatternEntries ( ) ) ) { <nl> + auto * P = PBD - > getPattern ( I ) ; <nl> + if ( ! P ) <nl> + continue ; <nl> + bool Cancelled = false ; <nl> + P - > forEachVariable ( [ & ] ( VarDecl * VD ) { <nl> + if ( Cancelled | | VD - > getBracesRange ( ) . isInvalid ( ) ) <nl> + return ; <nl> + for ( auto * AD : VD - > getAllAccessors ( ) ) { <nl> + if ( AD - > walk ( * this ) ) { <nl> + Cancelled = true ; <nl> + break ; <nl> + } <nl> + } <nl> + } ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Walk into inactive config regions . <nl> + if ( auto * ICD = dyn_cast < IfConfigDecl > ( D ) ) { <nl> + if ( Action . shouldVisitChildren ( ) ) { <nl> + for ( auto Clause : ICD - > getClauses ( ) ) { <nl> + for ( auto Member : Clause . Elements ) <nl> + Member . walk ( * this ) ; <nl> + } <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + return Action . shouldVisitChildren ( ) ; <nl> + } <nl> + <nl> + std : : pair < bool , Stmt * > walkToStmtPre ( Stmt * S ) override { <nl> + auto Action = HandlePre ( S , S - > isImplicit ( ) ) ; <nl> + if ( Action . shouldGenerateIndentContext ( ) ) { <nl> + if ( auto IndentCtx = getIndentContextFrom ( S , Action . Trailing ) ) <nl> + InnermostCtx = IndentCtx ; <nl> + } <nl> + return { Action . shouldVisitChildren ( ) , S } ; <nl> + } <nl> + <nl> + std : : pair < bool , Expr * > walkToExprPre ( Expr * E ) override { <nl> + if ( E - > getKind ( ) = = ExprKind : : StringLiteral & & <nl> + SM . isBeforeInBuffer ( E - > getStartLoc ( ) , TargetLocation ) & & <nl> + SM . isBeforeInBuffer ( TargetLocation , <nl> + Lexer : : getLocForEndOfToken ( SM , E - > getEndLoc ( ) ) ) ) { <nl> + InStringLiteral = true ; <nl> + } <nl> + <nl> + / / Create a default indent context for all top - level expressions <nl> + if ( isStatementListItem ( ) ) { <nl> + SourceRange Range = E - > getSourceRange ( ) ; <nl> + if ( Range . isValid ( ) & & isTargetContext ( Range ) ) { <nl> + InnermostCtx = IndentContext { <nl> + E - > getStartLoc ( ) , <nl> + ! OutdentChecker : : hasOutdent ( SM , E ) <nl> + } ; <nl> + } <nl> + } <nl> + <nl> + auto Action = HandlePre ( E , E - > isImplicit ( ) ) ; <nl> + if ( Action . shouldGenerateIndentContext ( ) ) { <nl> + if ( auto IndentCtx = getIndentContextFrom ( E , Action . Trailing ) ) <nl> + InnermostCtx = IndentCtx ; <nl> + } <nl> + <nl> + / / Don ' t visit the child expressions of interpolated strings directly - <nl> + / / visit only the argument of each appendInterpolation call instead , and <nl> + / / update InStringLiteral for each segment . <nl> + if ( auto * ISL = dyn_cast < InterpolatedStringLiteralExpr > ( E ) ) { <nl> + if ( Action . shouldVisitChildren ( ) ) { <nl> + llvm : : SaveAndRestore < ASTWalker : : ParentTy > ( Parent , ISL ) ; <nl> + SourceLoc PrevStringStart = ISL - > getStartLoc ( ) ; <nl> + ISL - > forEachSegment ( SF . getASTContext ( ) , <nl> + [ & ] ( bool IsInterpolation , CallExpr * CE ) { <nl> + if ( auto * Arg = CE - > getArg ( ) ) { <nl> + if ( IsInterpolation ) { <nl> + / / Handle the preceeding string segment . <nl> + CharSourceRange StringRange ( SM , PrevStringStart , CE - > getStartLoc ( ) ) ; <nl> + if ( StringRange . contains ( TargetLocation ) ) { <nl> + InStringLiteral = true ; <nl> + return ; <nl> + } <nl> + / / Walk into the interpolation segment . <nl> + Arg - > walk ( * this ) ; <nl> + } else { <nl> + PrevStringStart = CE - > getStartLoc ( ) ; <nl> + } <nl> + } <nl> + } ) ; <nl> + / / Handle the trailing string segment . <nl> + SourceLoc End = Lexer : : getLocForEndOfToken ( SM , ISL - > getStartLoc ( ) ) ; <nl> + CharSourceRange StringRange ( SM , PrevStringStart , End ) ; <nl> + if ( StringRange . contains ( TargetLocation ) ) <nl> + InStringLiteral = true ; <nl> + <nl> + return { false , E } ; <nl> + } <nl> + } <nl> + <nl> + / / Walk through error expressions . <nl> + if ( auto * EE = dyn_cast < ErrorExpr > ( E ) ) { <nl> + if ( Action . shouldVisitChildren ( ) ) { <nl> + if ( auto * OE = EE - > getOriginalExpr ( ) ) { <nl> + llvm : : SaveAndRestore < ASTWalker : : ParentTy > ( Parent , EE ) ; <nl> + OE - > walk ( * this ) ; <nl> + } <nl> + return { false , E } ; <nl> + } <nl> + } <nl> + <nl> + return { Action . shouldVisitChildren ( ) , E } ; <nl> + } <nl> + <nl> + std : : pair < bool , Pattern * > walkToPatternPre ( Pattern * P ) override { <nl> + auto Action = HandlePre ( P , P - > isImplicit ( ) ) ; <nl> + if ( Action . shouldGenerateIndentContext ( ) ) { <nl> + if ( auto IndentCtx = getIndentContextFrom ( P , Action . Trailing ) ) <nl> + InnermostCtx = IndentCtx ; <nl> + } <nl> + return { Action . shouldVisitChildren ( ) , P } ; <nl> + } <nl> + <nl> + bool walkToTypeReprPre ( TypeRepr * T ) override { <nl> + auto Action = HandlePre ( T , false ) ; <nl> + if ( Action . shouldGenerateIndentContext ( ) ) { <nl> + if ( auto IndentCtx = getIndentContextFrom ( T , Action . Trailing ) ) <nl> + InnermostCtx = IndentCtx ; <nl> + } <nl> + return Action . shouldVisitChildren ( ) ; <nl> + } <nl> + <nl> + bool walkToDeclPost ( Decl * D ) override { return HandlePost ( D ) ; } <nl> + bool walkToTypeReprPost ( TypeRepr * T ) override { return HandlePost ( T ) ; } <nl> + <nl> + Stmt * walkToStmtPost ( Stmt * S ) override { <nl> + return HandlePost ( S ) ? S : nullptr ; <nl> + } <nl> + <nl> + Expr * walkToExprPost ( Expr * E ) override { <nl> + return HandlePost ( E ) ? E : nullptr ; <nl> + } <nl> + <nl> + Pattern * walkToPatternPost ( Pattern * P ) override { <nl> + return HandlePost ( P ) ? P : nullptr ; <nl> + } <nl> + <nl> + bool shouldWalkIntoGenericParams ( ) override { return true ; } <nl> + <nl> + <nl> + # pragma mark Visitation helpers <nl> + <nl> + struct VisitAction { <nl> + enum : unsigned { Skip , VisitChildren , GetContext } action ; <nl> + Optional < Trailing > Trailing ; <nl> + <nl> + bool shouldVisitChildren ( ) const { return action > = VisitChildren ; } <nl> + bool shouldGenerateIndentContext ( ) const { return action > = GetContext ; } <nl> + } ; <nl> + <nl> + template < class T > <nl> + VisitAction HandlePre ( T * Node , bool IsImplicit ) { <nl> + SourceLoc Start = Node - > getStartLoc ( ) , End = Node - > getEndLoc ( ) ; <nl> + <nl> + if ( Start . isInvalid ( ) ) <nl> + return { VisitAction : : VisitChildren , None } ; <nl> + <nl> + Optional < Trailing > Trailing = checkForTrailingTarget ( End ) ; <nl> + scanTokensUntil ( Start ) ; <nl> + <nl> + if ( ! isTargetContext ( Start , End ) & & ! Trailing ) <nl> + return { VisitAction : : Skip , None } ; <nl> + if ( ! NodesToSkip . count ( Node ) & & ! IsImplicit ) <nl> + return { VisitAction : : GetContext , Trailing } ; <nl> + return { VisitAction : : VisitChildren , Trailing } ; <nl> + } <nl> + <nl> + template < typename T > <nl> + bool HandlePost ( T * Node ) { <nl> + return ! SM . isBeforeInBuffer ( TargetLocation , Node - > getStartLoc ( ) ) ; <nl> + } <nl> + <nl> + void scanTokensUntil ( SourceLoc Loc ) { <nl> + if ( InDocCommentBlock | | InCommentLine ) <nl> + return ; <nl> + for ( auto Invalid = Loc . isInvalid ( ) ; CurrentTokIt ! = TokenList . end ( ) & & <nl> + ( Invalid | | SM . isBeforeInBuffer ( CurrentTokIt - > getLoc ( ) , Loc ) ) ; <nl> + CurrentTokIt + + ) { <nl> + if ( CurrentTokIt - > getKind ( ) = = tok : : comment ) { <nl> + CharSourceRange CommentRange = CurrentTokIt - > getRange ( ) ; <nl> + SourceLoc StartLineLoc = Lexer : : getLocForStartOfLine ( <nl> + SM , CommentRange . getStart ( ) ) ; <nl> + <nl> + / / The - 1 is needed in case the past - the - end position is a newline <nl> + / / character . In that case getLocForStartOfLine returns the start of <nl> + / / the next line . <nl> + SourceLoc EndLineLoc = Lexer : : getLocForStartOfLine ( <nl> + SM , CommentRange . getEnd ( ) . getAdvancedLoc ( - 1 ) ) ; <nl> + auto TokenStr = CurrentTokIt - > getRange ( ) . str ( ) ; <nl> + InDocCommentBlock | = SM . isBeforeInBuffer ( StartLineLoc , TargetLineLoc ) & & ! SM . isBeforeInBuffer ( EndLineLoc , TargetLineLoc ) & & <nl> + TokenStr . startswith ( " / * " ) ; <nl> + InCommentLine | = StartLineLoc = = TargetLineLoc & & <nl> + TokenStr . startswith ( " / / " ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + Optional < Trailing > <nl> + checkForTrailingTarget ( SourceLoc End ) { <nl> + if ( ! SM . isBeforeInBuffer ( End , TargetLocation ) ) <nl> + return None ; <nl> + auto Next = getTokenAfter ( SM , End , / * SkipComments = * / false ) ; <nl> + if ( Next & & Next - > getLoc ( ) = = TargetLocation ) <nl> + return Trailing { Next } ; <nl> + if ( ! Next | | ! SM . isBeforeInBuffer ( Next - > getLoc ( ) , TargetLocation ) ) <nl> + return Trailing { None } ; <nl> + return None ; <nl> + } <nl> + <nl> + / / / When visiting an expression , returns true if it ' s a stement level <nl> + / / / expression . <nl> + bool isStatementListItem ( ) { <nl> + if ( auto * S = Parent . getAsStmt ( ) ) { <nl> + if ( auto * RS = dyn_cast < ReturnStmt > ( S ) ) <nl> + return RS - > isImplicit ( ) ; <nl> + return isa < BraceStmt > ( S ) ; <nl> + } <nl> + if ( auto * E = Parent . getAsExpr ( ) ) { <nl> + return isa < ClosureExpr > ( E ) ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + / / / Checks whether the given range is an indent context of the target location . <nl> + / / / <nl> + / / / \ return \ c Start < \ c TargetLocation < = \ c End . <nl> + bool isTargetContext ( SourceLoc Start , SourceLoc End ) const { <nl> + assert ( Start . isValid ( ) ) ; <nl> + / / Start < Target < = End <nl> + return SM . isBeforeInBuffer ( Start , TargetLocation ) & & <nl> + ( End . isInvalid ( ) | | ! SM . isBeforeInBuffer ( End , TargetLocation ) ) ; <nl> + } <nl> + <nl> + / / / Checks whether the given range is an indent context of the target location . <nl> + / / / <nl> + / / / \ return \ c Range . Start < \ c TargetLocation < = \ c Range . End . <nl> + bool isTargetContext ( SourceRange Range ) const { <nl> + return isTargetContext ( Range . Start , Range . End ) ; <nl> + } <nl> + <nl> + / / / Checks whether the given range overlaps the target location . <nl> + / / / <nl> + / / / \ return \ c Start < = \ c TargetLocation < = \ c End <nl> + bool overlapsTarget ( SourceLoc Start , SourceLoc End ) const { <nl> + assert ( Start . isValid ( ) ) ; <nl> + return ! SM . isBeforeInBuffer ( TargetLocation , Start ) & & <nl> + ( End . isInvalid ( ) | | ! SM . isBeforeInBuffer ( End , TargetLocation ) ) ; <nl> + } <nl> + <nl> + / / / Checks whether the given range overlaps the target location . <nl> + / / / <nl> + / / / \ return \ c Range . Start < = \ c TargetLocation < = \ c Range . End <nl> + bool overlapsTarget ( SourceRange Range ) const { <nl> + assert ( Range . isValid ( ) ) ; <nl> + return overlapsTarget ( Range . Start , Range . End ) ; <nl> + } <nl> + <nl> + / / / Checks whether the given range contains the target location . <nl> + / / / <nl> + / / / \ return \ c Start < \ c TargetLocation < \ c End <nl> + bool containsTarget ( SourceLoc Start , SourceLoc End ) const { <nl> + assert ( Start . isValid ( ) ) ; <nl> + return SM . isBeforeInBuffer ( Start , TargetLocation ) & & <nl> + ( End . isInvalid ( ) | | SM . isBeforeInBuffer ( TargetLocation , End ) ) ; <nl> + } <nl> + <nl> + / / / Checks whether the given range contains the target location . <nl> + / / / <nl> + / / / \ return \ c Range . Start < \ c TargetLocation < \ c Range . End <nl> + bool containsTarget ( SourceRange Range ) const { <nl> + assert ( Range . isValid ( ) ) ; <nl> + return containsTarget ( Range . Start , Range . End ) ; <nl> + } <nl> + <nl> + # pragma mark Declaration indent contexts <nl> + <nl> + Optional < IndentContext > <nl> + getIndentContextFrom ( Decl * D , Optional < Trailing > TrailingTarget ) { <nl> + <nl> + if ( auto * AFD = dyn_cast < AbstractFunctionDecl > ( D ) ) { <nl> + SourceLoc ContextLoc = AFD - > getStartLoc ( ) ; <nl> + / / If this is a getter without a ' get ' loc , the context loc is the start <nl> + / / of its storage . <nl> + if ( auto * AD = dyn_cast < AccessorDecl > ( AFD ) ) { <nl> + if ( AD - > isGetter ( ) & & AD - > getAccessorKeywordLoc ( ) . isInvalid ( ) ) { <nl> + auto * ASD = AD - > getStorage ( ) ; <nl> + if ( auto * VD = dyn_cast_or_null < VarDecl > ( ASD ) ) { <nl> + ContextLoc = VD - > getStartLoc ( ) ; <nl> + } else if ( auto * SD = dyn_cast_or_null < SubscriptDecl > ( ASD ) ) { <nl> + ContextLoc = SD - > getStartLoc ( ) ; <nl> + } <nl> + } <nl> + } <nl> + if ( auto Ctx = getIndentContextFrom ( AFD - > getBody ( ) , ContextLoc ) ) <nl> + return Ctx ; <nl> + if ( auto Ctx = getIndentContextFrom ( AFD - > getParameters ( ) , ContextLoc ) ) <nl> + return Ctx ; <nl> + if ( auto Ctx = getIndentContextFrom ( AFD - > getGenericParams ( ) , ContextLoc , D ) ) <nl> + return Ctx ; <nl> + <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> + return IndentContext { ContextLoc , false } ; <nl> + } <nl> + <nl> + if ( auto * NTD = dyn_cast < NominalTypeDecl > ( D ) ) { <nl> + SourceLoc ContextLoc = NTD - > getStartLoc ( ) ; <nl> + <nl> + if ( auto Ctx = getIndentContextFromInherits ( NTD - > getInherited ( ) , ContextLoc ) ) <nl> + return Ctx ; <nl> + if ( auto Ctx = getIndentContextFromBraces ( NTD - > getBraces ( ) , ContextLoc , NTD ) ) <nl> + return Ctx ; <nl> + if ( auto Ctx = getIndentContextFrom ( NTD - > getGenericParams ( ) , ContextLoc , D ) ) <nl> + return Ctx ; <nl> + if ( auto Ctx = getIndentContextFrom ( NTD - > getTrailingWhereClause ( ) , ContextLoc , D ) ) <nl> + return Ctx ; <nl> + <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> + return IndentContext { ContextLoc , false } ; <nl> + } <nl> + <nl> + if ( auto * ED = dyn_cast < ExtensionDecl > ( D ) ) { <nl> + SourceLoc ContextLoc = ED - > getStartLoc ( ) ; <nl> + <nl> + if ( auto Ctx = getIndentContextFromInherits ( ED - > getInherited ( ) , ContextLoc ) ) <nl> + return Ctx ; <nl> + if ( auto Ctx = getIndentContextFromBraces ( ED - > getBraces ( ) , ContextLoc , ED ) ) <nl> + return Ctx ; <nl> + if ( auto Ctx = getIndentContextFrom ( ED - > getTrailingWhereClause ( ) , ContextLoc , D ) ) <nl> + return Ctx ; <nl> + <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> + return IndentContext { ContextLoc , false } ; <nl> + } <nl> + <nl> + if ( auto * SD = dyn_cast < SubscriptDecl > ( D ) ) { <nl> + SourceLoc ContextLoc = SD - > getStartLoc ( ) ; <nl> + <nl> + if ( auto Ctx = getIndentContextFromBraces ( SD - > getBracesRange ( ) , ContextLoc , SD ) ) <nl> + return Ctx ; <nl> + if ( auto Ctx = getIndentContextFrom ( SD - > getIndices ( ) , ContextLoc ) ) <nl> + return Ctx ; <nl> + if ( auto Ctx = getIndentContextFrom ( SD - > getGenericParams ( ) , ContextLoc , D ) ) <nl> + return Ctx ; <nl> + <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> + return IndentContext { ContextLoc , false } ; <nl> + } <nl> + <nl> + if ( auto * PGD = dyn_cast < PrecedenceGroupDecl > ( D ) ) { <nl> + SourceLoc ContextLoc = PGD - > getStartLoc ( ) ; <nl> + SourceLoc L = PGD - > getLBraceLoc ( ) , R = PGD - > getRBraceLoc ( ) ; <nl> + <nl> + if ( auto Ctx = getIndentContextFromBraces ( L , R , ContextLoc , PGD ) ) <nl> + return Ctx ; <nl> + <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> + return IndentContext { PGD - > getStartLoc ( ) , false } ; <nl> + } <nl> + <nl> + if ( auto * PBD = dyn_cast < PatternBindingDecl > ( D ) ) { <nl> + SourceLoc ContextLoc = PBD - > getStartLoc ( ) , IntroducerLoc = PBD - > getLoc ( ) ; <nl> + <nl> + ListAligner Aligner ( SM , TargetLocation , ContextLoc , IntroducerLoc ) ; <nl> + for ( auto I : range ( PBD - > getNumPatternEntries ( ) ) ) { <nl> + SourceRange EntryRange = PBD - > getEqualLoc ( I ) ; <nl> + VarDecl * SingleVar = nullptr ; <nl> + <nl> + if ( auto * E = PBD - > getOriginalInit ( I ) ) <nl> + widenOrSet ( EntryRange , E - > getSourceRange ( ) ) ; <nl> + if ( auto * P = PBD - > getPattern ( I ) ) { <nl> + widenOrSet ( EntryRange , P - > getSourceRange ( ) ) ; <nl> + if ( ( SingleVar = P - > getSingleVar ( ) ) ) <nl> + widenOrSet ( EntryRange , SingleVar - > getBracesRange ( ) ) ; <nl> + } <nl> + assert ( EntryRange . isValid ( ) ) ; <nl> + Aligner . updateAlignment ( EntryRange , PBD ) ; <nl> + <nl> + / / If the var has explicit accessors , the braces are an indent context . <nl> + if ( SingleVar & & hasExplicitAccessors ( SingleVar ) ) { <nl> + SourceRange Braces = SingleVar - > getBracesRange ( ) ; <nl> + if ( auto Ctx = getIndentContextFromBraces ( Braces , EntryRange . Start , PBD ) ) { <nl> + Aligner . setAlignmentIfNeeded ( CtxOverride ) ; <nl> + return Ctx ; <nl> + } <nl> + } <nl> <nl> - / / Calculating space length <nl> - for ( auto C : Range . str ( ) ) { <nl> - if ( C = = ' \ t ' ) <nl> - SpaceLength + = FmtOptions . TabWidth ; <nl> - else <nl> - SpaceLength + = 1 ; <nl> - } <nl> + / / The pattern entry as whole is also an indent context . <nl> + if ( isTargetContext ( EntryRange ) ) { <nl> + Aligner . setAlignmentIfNeeded ( CtxOverride ) ; <nl> + return IndentContext { <nl> + EntryRange . Start , <nl> + ! OutdentChecker : : hasOutdent ( SM , EntryRange , PBD ) <nl> + } ; <nl> + } <nl> + } <nl> <nl> - / / If we are using tabs , calculating the number of tabs and spaces we need <nl> - / / to insert . <nl> - if ( FmtOptions . UseTabs ) { <nl> - TabLength = SpaceLength / FmtOptions . TabWidth ; <nl> - SpaceLength = SpaceLength % FmtOptions . TabWidth ; <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> } <nl> - Builder . append ( TabLength , ' \ t ' ) ; <nl> - Builder . append ( SpaceLength , ' ' ) ; <nl> - } <nl> <nl> - bool HasSibling ( ) { <nl> - return SiblingInfo . Loc . isValid ( ) ; <nl> - } <nl> + / / None of the below declarations can claim trailing targets . <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> <nl> - bool needExtraIndentationForSibling ( ) { <nl> - return SiblingInfo . ExtraIndent ; <nl> - } <nl> + if ( auto * TAD = dyn_cast < TypeAliasDecl > ( D ) ) { <nl> + SourceLoc ContextLoc = TAD - > getStartLoc ( ) ; <nl> <nl> - std : : pair < unsigned , unsigned > lineAndColumn ( ) { <nl> - if ( Cursor = = Stack . rend ( ) ) <nl> - return std : : make_pair ( 0 , 0 ) ; <nl> + if ( auto Ctx = getIndentContextFrom ( TAD - > getGenericParams ( ) , ContextLoc , <nl> + D ) ) { <nl> + return Ctx ; <nl> + } <nl> <nl> - if ( Stmt * S = Cursor - > getAsStmt ( ) ) { <nl> - SourceLoc SL = S - > getStartLoc ( ) ; <nl> - return SM . getLineAndColumn ( SL ) ; <nl> + return IndentContext { ContextLoc , ! OutdentChecker : : hasOutdent ( SM , D ) } ; <nl> } <nl> - if ( Decl * D = Cursor - > getAsDecl ( ) ) { <nl> - SourceLoc SL = D - > getStartLoc ( ) ; <nl> <nl> - / / FIXME : put the attributes into forward source order so we don ' t need <nl> - / / to iterate through them . <nl> - for ( auto * Attr : D - > getAttrs ( ) ) { <nl> - SourceLoc AttrLoc = Attr - > getRangeWithAt ( ) . Start ; <nl> - if ( AttrLoc . isValid ( ) & & SM . isBeforeInBuffer ( AttrLoc , SL ) ) <nl> - SL = AttrLoc ; <nl> + if ( auto * ATD = dyn_cast < AssociatedTypeDecl > ( D ) ) { <nl> + SourceLoc ContextLoc = ATD - > getStartLoc ( ) ; <nl> + <nl> + if ( auto Ctx = getIndentContextFromInherits ( ATD - > getInherited ( ) , <nl> + ContextLoc ) ) { <nl> + return Ctx ; <nl> + } <nl> + if ( auto Ctx = getIndentContextFrom ( ATD - > getTrailingWhereClause ( ) , <nl> + ContextLoc , D ) ) { <nl> + return Ctx ; <nl> } <nl> <nl> - return SM . getLineAndColumn ( SL ) ; <nl> + return IndentContext { ContextLoc , ! OutdentChecker : : hasOutdent ( SM , D ) } ; <nl> } <nl> - if ( Expr * E = Cursor - > getAsExpr ( ) ) { <nl> - SourceLoc SL = E - > getStartLoc ( ) ; <nl> - return SM . getLineAndColumn ( SL ) ; <nl> + <nl> + if ( auto * PDD = dyn_cast < PoundDiagnosticDecl > ( D ) ) { <nl> + SourceLoc ContextLoc = PDD - > getStartLoc ( ) ; <nl> + / / FIXME : add paren source locations to the AST Node . <nl> + if ( auto * SLE = PDD - > getMessage ( ) ) { <nl> + SourceRange MessageRange = SLE - > getSourceRange ( ) ; <nl> + if ( MessageRange . isValid ( ) & & overlapsTarget ( MessageRange ) ) <nl> + return IndentContext { ContextLoc , true } ; <nl> + } <nl> + return IndentContext { ContextLoc , ! OutdentChecker : : hasOutdent ( SM , D ) } ; <nl> } <nl> <nl> - return std : : make_pair ( 0 , 0 ) ; <nl> - } <nl> + if ( auto * ICD = dyn_cast < IfConfigDecl > ( D ) ) { <nl> + for ( auto & Clause : ICD - > getClauses ( ) ) { <nl> + if ( Clause . Loc = = TargetLocation ) <nl> + break ; <nl> + if ( auto * Cond = Clause . Cond ) { <nl> + SourceRange CondRange = Cond - > getSourceRange ( ) ; <nl> + if ( CondRange . isValid ( ) & & overlapsTarget ( CondRange ) ) <nl> + return IndentContext { Clause . Loc , true } ; <nl> + } <nl> + } <nl> + return IndentContext { ICD - > getStartLoc ( ) , false } ; <nl> + } <nl> <nl> - template < class T > <nl> - bool isStmtContext ( ) { <nl> - if ( Cursor = = Stack . rend ( ) ) <nl> - return false ; <nl> - Stmt * ContextStmt = Cursor - > getAsStmt ( ) ; <nl> - return ContextStmt & & isa < T > ( ContextStmt ) ; <nl> + switch ( D - > getKind ( ) ) { <nl> + case DeclKind : : InfixOperator : <nl> + case DeclKind : : PostfixOperator : <nl> + case DeclKind : : PrefixOperator : <nl> + case DeclKind : : Import : <nl> + case DeclKind : : Param : <nl> + return IndentContext { <nl> + D - > getStartLoc ( ) , <nl> + ! OutdentChecker : : hasOutdent ( SM , D ) <nl> + } ; <nl> + default : <nl> + return None ; <nl> + } <nl> } <nl> <nl> - bool isBraceContext ( ) { <nl> - return isStmtContext < BraceStmt > ( ) ; <nl> + Optional < IndentContext > <nl> + getIndentContextFromWhereClause ( ArrayRef < RequirementRepr > Requirements , <nl> + SourceRange Range , SourceLoc ContextLoc , <nl> + Decl * WalkableParent ) { <nl> + if ( Range . isInvalid ( ) | | ! isTargetContext ( Range ) ) <nl> + return None ; <nl> + <nl> + ListAligner Aligner ( SM , TargetLocation , ContextLoc , Range . Start ) ; <nl> + for ( auto & Req : Requirements ) { <nl> + SourceRange ReqRange = Req . getSourceRange ( ) ; <nl> + if ( ReqRange . isInvalid ( ) ) <nl> + continue ; <nl> + Aligner . updateAlignment ( ReqRange , WalkableParent ) ; <nl> + if ( isTargetContext ( ReqRange ) ) { <nl> + Aligner . setAlignmentIfNeeded ( CtxOverride ) ; <nl> + return IndentContext { <nl> + ReqRange . Start , <nl> + ! OutdentChecker : : hasOutdent ( SM , ReqRange , WalkableParent ) <nl> + } ; <nl> + } <nl> + } <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> } <nl> <nl> - bool isImplicitBraceContext ( ) { <nl> - / / If we ' re directly at the top , it ' s implicit . <nl> - if ( Cursor = = Stack . rend ( ) ) <nl> - return true ; <nl> - <nl> - if ( ! isBraceContext ( ) ) <nl> - return false ; <nl> - auto Parent = parent ( ) ; <nl> - / / If the parent is directly at the top , it ' s implicit . <nl> - if ( Parent . Cursor = = Stack . rend ( ) ) <nl> - return true ; <nl> + Optional < IndentContext > <nl> + getIndentContextFrom ( TrailingWhereClause * TWC , SourceLoc ContextLoc , <nl> + Decl * WalkableParent ) { <nl> + if ( ! TWC ) <nl> + return None ; <nl> + return getIndentContextFromWhereClause ( TWC - > getRequirements ( ) , <nl> + TWC - > getSourceRange ( ) , <nl> + ContextLoc , WalkableParent ) ; <nl> + } <nl> <nl> - / / If we ' re within a case body , it ' s implicit . <nl> - / / For example : <nl> - / / case . . . : <nl> - / / case body is implicitly wrapped in a brace statement <nl> - if ( Parent . isCaseContext ( ) ) <nl> - return true ; <nl> + Optional < IndentContext > <nl> + getIndentContextFrom ( GenericParamList * GP , SourceLoc ContextLoc , <nl> + Decl * WalkableParent ) { <nl> + if ( ! GP ) <nl> + return None ; <nl> + <nl> + SourceLoc L = GP - > getLAngleLoc ( ) ; <nl> + SourceLoc R = getLocIfTokenTextMatches ( SM , GP - > getRAngleLoc ( ) , " > " ) ; <nl> + <nl> + if ( L . isValid ( ) & & overlapsTarget ( L , R ) ) { <nl> + ListAligner Aligner ( SM , TargetLocation , ContextLoc , L , R ) ; <nl> + for ( auto * P : GP - > getParams ( ) ) { <nl> + SourceRange ParamRange = P - > getSourceRange ( ) ; <nl> + Aligner . updateAlignment ( ParamRange , WalkableParent ) ; <nl> + if ( isTargetContext ( ParamRange ) ) { <nl> + Aligner . setAlignmentIfNeeded ( CtxOverride ) ; <nl> + return IndentContext { <nl> + ParamRange . Start , <nl> + ! OutdentChecker : : hasOutdent ( SM , P ) <nl> + } ; <nl> + } <nl> + } <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> + } <nl> <nl> - return false ; <nl> + SourceRange TrailingRange = GP - > getTrailingWhereClauseSourceRange ( ) ; <nl> + if ( auto Ctx = getIndentContextFromWhereClause ( GP - > getRequirements ( ) , <nl> + TrailingRange , ContextLoc , <nl> + WalkableParent ) ) <nl> + return Ctx ; <nl> + return None ; <nl> } <nl> <nl> - bool isCaseContext ( ) { <nl> - return isStmtContext < CaseStmt > ( ) ; <nl> + Optional < IndentContext > <nl> + getIndentContextFrom ( ParameterList * PL , SourceLoc ContextLoc = SourceLoc ( ) ) { <nl> + if ( ! PL ) <nl> + return None ; <nl> + <nl> + SourceRange Range = PL - > getSourceRange ( ) ; <nl> + if ( Range . isInvalid ( ) | | locIsKind ( SM , Range . Start , tok : : l_brace ) | | <nl> + ! isTargetContext ( Range ) ) <nl> + return None ; <nl> + <nl> + SourceLoc L = getLocIfKind ( SM , PL - > getLParenLoc ( ) , tok : : l_paren ) ; <nl> + SourceLoc R = getLocIfKind ( SM , PL - > getRParenLoc ( ) , tok : : r_paren ) ; <nl> + if ( L . isInvalid ( ) ) <nl> + L = Range . Start ; <nl> + if ( ContextLoc . isInvalid ( ) ) <nl> + ContextLoc = Range . Start ; <nl> + ListAligner Aligner ( SM , TargetLocation , ContextLoc , L , R ) ; <nl> + for ( auto * PD : * PL ) <nl> + Aligner . updateAlignment ( PD - > getSourceRange ( ) , PD ) ; <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> } <nl> <nl> - bool isSwitchContext ( ) { <nl> - return isStmtContext < SwitchStmt > ( ) ; <nl> + template < typename T > <nl> + Optional < IndentContext > <nl> + getIndentContextFromBraces ( SourceLoc L , SourceLoc End , SourceLoc ContextLoc , <nl> + T * WalkableParent ) { <nl> + SourceLoc R = getLocIfKind ( SM , End , tok : : r_brace ) ; <nl> + if ( L . isInvalid ( ) | | ! overlapsTarget ( L , R ) ) <nl> + return None ; <nl> + return IndentContext { <nl> + ContextLoc , <nl> + containsTarget ( L , R ) & & <nl> + ! OutdentChecker : : hasOutdent ( SM , SourceRange ( L , End ) , WalkableParent , <nl> + RangeKind : : Open ) <nl> + } ; <nl> } <nl> <nl> - std : : pair < unsigned , unsigned > indentLineAndColumn ( ) { <nl> - if ( Cursor = = Stack . rend ( ) ) <nl> - return std : : make_pair ( 0 , 0 ) ; <nl> - <nl> - / / Get the line and indent position for this context . <nl> - auto LineAndColumn = lineAndColumn ( ) ; <nl> - auto SavedCursor = Cursor ; <nl> - <nl> - / / Walk up the context stack to find the topmost applicable context . <nl> - while ( + + Cursor ! = Stack . rend ( ) ) { <nl> - auto ParentLineAndColumn = lineAndColumn ( ) ; <nl> - <nl> - if ( ParentLineAndColumn . second = = 0 ) <nl> - break ; <nl> - <nl> - if ( ParentLineAndColumn . first ! = LineAndColumn . first ) { <nl> - / / The start line is not the same , see if this is at the ' else ' clause . <nl> - if ( auto * If = dyn_cast_or_null < IfStmt > ( Cursor - > getAsStmt ( ) ) ) { <nl> - SourceLoc ElseLoc = If - > getElseLoc ( ) ; <nl> - / / If we ' re at ' else ' , take the indent of ' if ' and continue . <nl> - if ( ElseLoc . isValid ( ) & & <nl> - LineAndColumn . first = = SM . getLineAndColumn ( ElseLoc ) . first ) { <nl> - LineAndColumn = ParentLineAndColumn ; <nl> - continue ; <nl> - } <nl> - / / If we are at conditions , take the indent of ' if ' and continue . <nl> - for ( auto Cond : If - > getCond ( ) ) { <nl> - if ( LineAndColumn . first = = SM . getLineNumber ( Cond . getEndLoc ( ) ) ) { <nl> - LineAndColumn = ParentLineAndColumn ; <nl> - continue ; <nl> - } <nl> - } <nl> - } <nl> + template < typename T > <nl> + Optional < IndentContext > <nl> + getIndentContextFromBraces ( SourceRange Braces , SourceLoc ContextLoc , <nl> + T * WalkableParent ) { <nl> + return getIndentContextFromBraces ( Braces . Start , Braces . End , ContextLoc , <nl> + WalkableParent ) ; <nl> + } <nl> <nl> - / / No extra indentation level for getters without explicit names . <nl> - / / e . g . <nl> - / / public var someValue : Int { <nl> - / / return 0 ; < - No indentation added because of the getter . <nl> - / / } <nl> - if ( auto VD = dyn_cast_or_null < VarDecl > ( Cursor - > getAsDecl ( ) ) ) { <nl> - if ( auto Getter = VD - > getParsedAccessor ( AccessorKind : : Get ) ) { <nl> - if ( Getter - > getAccessorKeywordLoc ( ) . isInvalid ( ) ) { <nl> - LineAndColumn = ParentLineAndColumn ; <nl> - continue ; <nl> - } <nl> - } <nl> - } <nl> + Optional < IndentContext > <nl> + getIndentContextFromInherits ( ArrayRef < TypeLoc > Inherits , <nl> + SourceLoc ContextLoc ) { <nl> + if ( Inherits . empty ( ) ) <nl> + return None ; <nl> <nl> - / / Align with Func start instead of with param decls . <nl> - if ( auto * FD = dyn_cast_or_null < AbstractFunctionDecl > ( Cursor - > getAsDecl ( ) ) ) { <nl> - if ( LineAndColumn . first < = SM . getLineNumber ( FD - > getSignatureSourceRange ( ) . End ) ) { <nl> - LineAndColumn = ParentLineAndColumn ; <nl> - continue ; <nl> - } <nl> - } <nl> + SourceRange Range = Inherits . front ( ) . getSourceRange ( ) ; <nl> + Range . widen ( Inherits . back ( ) . getSourceRange ( ) ) ; <nl> <nl> - / / Break out if the line is no longer the same . <nl> - break ; <nl> - } <nl> + if ( Range . isInvalid ( ) | | ! overlapsTarget ( Range ) ) <nl> + return None ; <nl> <nl> - LineAndColumn . second = ParentLineAndColumn . second ; <nl> - } <nl> + / / FIXME : Add the colon location to the AST . <nl> + auto ColonLoc = getLastTokenOfKindInOpenRange ( SM , tok : : colon , ContextLoc , <nl> + Range . Start ) ; <nl> + assert ( ColonLoc . hasValue ( ) & & " inherits list without leading colon ? " ) ; <nl> + Range . widen ( ColonLoc - > getLoc ( ) ) ; <nl> <nl> - Cursor = SavedCursor ; <nl> - return LineAndColumn ; <nl> + ListAligner Aligner ( SM , TargetLocation , ContextLoc , ColonLoc - > getLoc ( ) ) ; <nl> + for ( auto TL : Inherits ) <nl> + Aligner . updateAlignment ( TL . getSourceRange ( ) , TL . getTypeRepr ( ) ) ; <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> } <nl> <nl> - bool exprEndAtLine ( Expr * E , unsigned Line ) { <nl> - return E - > getEndLoc ( ) . isValid ( ) & & SM . getLineNumber ( E - > getEndLoc ( ) ) = = Line ; <nl> - } ; <nl> + # pragma mark Statement indent contexts <nl> <nl> - bool shouldAddIndentForLine ( unsigned Line , TokenInfo TInfo , <nl> - const CodeFormatOptions & FmtOptions ) { <nl> - if ( Cursor = = Stack . rend ( ) ) <nl> - return false ; <nl> + Optional < IndentContext > <nl> + getIndentContextFrom ( Stmt * S , Optional < Trailing > TrailingTarget ) { <nl> <nl> - if ( TInfo ) { <nl> - if ( TInfo . getLineStarter ( ) - > getKind ( ) = = tok : : l_brace & & <nl> - isKeywordPossibleDeclStart ( * TInfo . getLineStarter ( 1 ) ) & & <nl> - TInfo . getLineStarter ( 1 ) - > isKeyword ( ) ) <nl> - return false ; <nl> - / / VStack { <nl> - / / . . . <nl> - / / } <nl> - / / . onAppear { < mmm - No indentation here . <nl> - / / . onAppear1 { < mmm - No indentation here . <nl> - if ( TInfo . isRBraceDotsPattern ( ) ) { <nl> - return false ; <nl> - } <nl> - } <nl> - <nl> - / / Handle switch / case , indent unless at a case label . <nl> - if ( auto * Case = dyn_cast_or_null < CaseStmt > ( Cursor - > getAsStmt ( ) ) ) { <nl> - auto LabelItems = Case - > getCaseLabelItems ( ) ; <nl> - SourceLoc Loc ; <nl> - if ( ! LabelItems . empty ( ) ) <nl> - Loc = LabelItems . back ( ) . getPattern ( ) - > getLoc ( ) ; <nl> - if ( Loc . isValid ( ) ) <nl> - return Line > SM . getLineAndColumn ( Loc ) . first ; <nl> - return true ; <nl> + if ( auto * BS = dyn_cast < BraceStmt > ( S ) ) <nl> + return getIndentContextFrom ( BS ) ; <nl> + <nl> + if ( auto * SS = dyn_cast < SwitchStmt > ( S ) ) { <nl> + SourceLoc ContextLoc = SS - > getSwitchLoc ( ) ; <nl> + if ( ! SM . isBeforeInBuffer ( ContextLoc , TargetLocation ) ) <nl> + return None ; <nl> + <nl> + if ( auto * E = SS - > getSubjectExpr ( ) ) { <nl> + SourceRange Range = E - > getSourceRange ( ) ; <nl> + widenOrSet ( Range , ContextLoc ) ; <nl> + if ( isTargetContext ( Range ) ) { <nl> + return IndentContext { <nl> + ContextLoc , <nl> + ! OutdentChecker : : hasOutdent ( SM , Range , E ) <nl> + } ; <nl> + } <nl> + } <nl> + SourceLoc L = SS - > getLBraceLoc ( ) , R = SS - > getRBraceLoc ( ) ; <nl> + if ( FmtOptions . IndentSwitchCase ) { <nl> + if ( auto Ctx = getIndentContextFromBraces ( L , R , ContextLoc , SS ) ) <nl> + return Ctx ; <nl> + } <nl> + <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> + return IndentContext { ContextLoc , false } ; <nl> } <nl> - if ( isSwitchContext ( ) ) { <nl> - / / If we ' re at the start of a case label , don ' t add indent . <nl> - / / For example : <nl> - / / switch . . . { <nl> - / / case xyz : < - - No indent here , should be at same level as switch . <nl> - Stmt * AtStmtStart = Start . getAsStmt ( ) ; <nl> - if ( AtStmtStart & & isa < CaseStmt > ( AtStmtStart ) ) <nl> - return FmtOptions . IndentSwitchCase ; <nl> - <nl> - / / If we ' re at the open brace of the switch , don ' t add an indent . <nl> - / / For example : <nl> - / / switch . . . <nl> - / / { < - - No indent here , open brace should be at same level as switch . <nl> - auto * S = cast < SwitchStmt > ( Cursor - > getAsStmt ( ) ) ; <nl> - if ( SM . getLineAndColumn ( S - > getLBraceLoc ( ) ) . first = = Line ) <nl> - return false ; <nl> - if ( IsInCommentLine ( ) ) { <nl> - for ( auto Case : S - > getCases ( ) ) { <nl> - / / switch . . . <nl> - / / { <nl> - / / / / case comment < - - No indent here . <nl> - / / case 0 : <nl> - if ( SM . getLineAndColumn ( Case - > swift : : Stmt : : getStartLoc ( ) ) . first = = Line + 1 ) <nl> - return FmtOptions . IndentSwitchCase ; <nl> + <nl> + if ( auto * CS = dyn_cast < CaseStmt > ( S ) ) { <nl> + if ( TrailingTarget & & ! TrailingTarget - > isEmpty ( ) ) <nl> + return None ; <nl> + <nl> + SourceLoc CaseLoc = CS - > getLoc ( ) ; <nl> + if ( ! SM . isBeforeInBuffer ( CaseLoc , TargetLocation ) ) <nl> + return None ; <nl> + <nl> + SourceRange LabelItemsRange = CS - > getLabelItemsRange ( ) ; <nl> + SourceLoc ColonLoc = getLocIfKind ( SM , LabelItemsRange . End , tok : : colon ) ; <nl> + <nl> + if ( isTargetContext ( CaseLoc , ColonLoc ) ) { <nl> + ListAligner Aligner ( SM , TargetLocation , CaseLoc , CaseLoc , ColonLoc ) ; <nl> + for ( auto & Elem : CS - > getCaseLabelItems ( ) ) { <nl> + SourceRange ElemRange = Elem . getSourceRange ( ) ; <nl> + Aligner . updateAlignment ( ElemRange , CS ) ; <nl> + if ( isTargetContext ( ElemRange ) ) { <nl> + Aligner . setAlignmentIfNeeded ( CtxOverride ) ; <nl> + return IndentContext { <nl> + ElemRange . Start , <nl> + ! OutdentChecker : : hasOutdent ( SM , ElemRange , CS ) <nl> + } ; <nl> + } <nl> } <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> } <nl> + if ( ColonLoc . isValid ( ) & & isTargetContext ( ColonLoc , SourceLoc ( ) ) ) { <nl> + SourceRange ColonToEnd = SourceRange ( ColonLoc , CS - > getEndLoc ( ) ) ; <nl> + return IndentContext { <nl> + CaseLoc , <nl> + ! OutdentChecker : : hasOutdent ( SM , ColonToEnd , CS ) <nl> + } ; <nl> + } <nl> + return IndentContext { CaseLoc , false } ; <nl> } <nl> <nl> - / / If we ' re within an implicit brace context , don ' t add indent . <nl> - if ( isImplicitBraceContext ( ) ) <nl> - return false ; <nl> + if ( auto * DS = dyn_cast < DoStmt > ( S ) ) { <nl> + if ( ! SM . isBeforeInBuffer ( DS - > getDoLoc ( ) , TargetLocation ) ) <nl> + return None ; <nl> <nl> - / / If we ' re at the open brace of a no - name getter , don ' t add an indent . <nl> - / / For example : <nl> - / / public var someValue : Int <nl> - / / { < - We add no indentation here . <nl> - / / return 0 <nl> - / / } <nl> - if ( auto FD = dyn_cast_or_null < AccessorDecl > ( Start . getAsDecl ( ) ) ) { <nl> - if ( FD - > isGetter ( ) & & FD - > getAccessorKeywordLoc ( ) . isInvalid ( ) ) { <nl> - if ( SM . getLineNumber ( FD - > getBodySourceRange ( ) . Start ) = = Line ) <nl> - return false ; <nl> + if ( auto * BS = dyn_cast < BraceStmt > ( DS - > getBody ( ) ) ) { <nl> + if ( auto Ctx = getIndentContextFrom ( BS , DS - > getStartLoc ( ) ) ) <nl> + return Ctx ; <nl> } <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> + return IndentContext { DS - > getStartLoc ( ) , false } ; <nl> } <nl> <nl> - / / func foo ( a : Int , <nl> - / / b : Int <nl> - / / ) { } < - Avoid adding indentation here <nl> - SourceLoc SignatureEnd ; <nl> - if ( auto * AFD = dyn_cast_or_null < AbstractFunctionDecl > ( Cursor - > getAsDecl ( ) ) ) { <nl> - SignatureEnd = AFD - > getSignatureSourceRange ( ) . End ; <nl> - } else if ( auto * SD = dyn_cast_or_null < SubscriptDecl > ( Cursor - > getAsDecl ( ) ) ) { <nl> - SignatureEnd = SD - > getSignatureSourceRange ( ) . End ; <nl> + if ( auto * CS = dyn_cast < CatchStmt > ( S ) ) { <nl> + if ( auto * BS = dyn_cast < BraceStmt > ( CS - > getBody ( ) ) ) { <nl> + if ( auto Ctx = getIndentContextFrom ( BS , CS - > getStartLoc ( ) ) ) <nl> + return Ctx ; <nl> + } <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> + return IndentContext { CS - > getStartLoc ( ) , true } ; <nl> } <nl> - if ( SignatureEnd . isValid ( ) & & TInfo & & <nl> - TInfo . getLineStarter ( ) - > getLoc ( ) = = SignatureEnd ) { <nl> - return false ; <nl> + <nl> + if ( auto * IS = dyn_cast < IfStmt > ( S ) ) { <nl> + SourceLoc ContextLoc = IS - > getIfLoc ( ) ; <nl> + if ( ! SM . isBeforeInBuffer ( ContextLoc , TargetLocation ) ) <nl> + return None ; <nl> + <nl> + if ( auto Ctx = getIndentContextFrom ( IS - > getCond ( ) , ContextLoc , IS ) ) <nl> + return Ctx ; <nl> + if ( auto * BS = dyn_cast_or_null < BraceStmt > ( IS - > getThenStmt ( ) ) ) { <nl> + if ( auto Ctx = getIndentContextFrom ( BS , IS - > getStartLoc ( ) ) ) <nl> + return Ctx ; <nl> + } <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> + return IndentContext { ContextLoc , false } ; <nl> } <nl> <nl> - / / If we ' re at the beginning of a brace on a separate line in the context <nl> - / / of anything other than BraceStmt , don ' t add an indent . <nl> - / / For example : <nl> - / / func foo ( ) <nl> - / / { < - - No indent here , open brace should be at same level as func . <nl> - Stmt * AtStmtStart = Start . getAsStmt ( ) ; <nl> - if ( AtStmtStart & & isa < BraceStmt > ( AtStmtStart ) & & ! isBraceContext ( ) ) <nl> - return false ; <nl> + if ( auto * GS = dyn_cast < GuardStmt > ( S ) ) { <nl> + SourceLoc ContextLoc = GS - > getGuardLoc ( ) ; <nl> + if ( ! SM . isBeforeInBuffer ( ContextLoc , TargetLocation ) ) <nl> + return None ; <nl> <nl> - / / If we ' re at the end of a brace on a separate line in the context <nl> - / / of anything other than BraceStmt , don ' t add an indent . <nl> - / / For example : <nl> - if ( Stmt * AtStmtEnd = End . getAsStmt ( ) ) { <nl> - if ( ! isBraceContext ( ) ) { <nl> - / / func foo ( ) { <nl> - / / } < - - No indent here , close brace should be at same level as func . <nl> - if ( isa < BraceStmt > ( AtStmtEnd ) ) <nl> - return false ; <nl> - / / do { <nl> - / / } <nl> - / / catch { <nl> - / / } < - - No indent here , close brace should be at same level as do . <nl> - / / catch { <nl> - / / } <nl> - if ( isa < CatchStmt > ( AtStmtEnd ) ) <nl> - return false ; <nl> + if ( auto Ctx = getIndentContextFrom ( GS - > getCond ( ) , ContextLoc , GS ) ) <nl> + return Ctx ; <nl> + if ( auto * BS = dyn_cast_or_null < BraceStmt > ( GS - > getBody ( ) ) ) { <nl> + if ( auto Ctx = getIndentContextFrom ( BS , GS - > getStartLoc ( ) ) ) <nl> + return Ctx ; <nl> } <nl> + <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> + return IndentContext { GS - > getGuardLoc ( ) , false } ; <nl> } <nl> <nl> - / / If we ' re at the open brace of a NominalTypeDecl or ExtensionDecl , <nl> - / / don ' t add an indent . <nl> - / / For example : <nl> - / / class Foo <nl> - / / { < - - No indent here , open brace should be at same level as class . <nl> - auto * NTD = dyn_cast_or_null < NominalTypeDecl > ( Cursor - > getAsDecl ( ) ) ; <nl> - if ( NTD & & SM . getLineAndColumn ( NTD - > getBraces ( ) . Start ) . first = = Line ) <nl> - return false ; <nl> - auto * ETD = dyn_cast_or_null < ExtensionDecl > ( Cursor - > getAsDecl ( ) ) ; <nl> - if ( ETD & & SM . getLineAndColumn ( ETD - > getBraces ( ) . Start ) . first = = Line ) <nl> - return false ; <nl> + if ( auto * RWS = dyn_cast < RepeatWhileStmt > ( S ) ) { <nl> + SourceLoc ContextLoc = RWS - > getRepeatLoc ( ) ; <nl> + if ( ! SM . isBeforeInBuffer ( ContextLoc , TargetLocation ) ) <nl> + return None ; <nl> <nl> - / / If we are at the start of a trailing closure , do not add indentation . <nl> - / / For example : <nl> - / / foo ( 1 ) <nl> - / / { < - - No indent here . <nl> - auto * TE = dyn_cast_or_null < TupleExpr > ( Cursor - > getAsExpr ( ) ) ; <nl> - if ( TE & & TE - > hasTrailingClosure ( ) & & <nl> - SM . getLineNumber ( TE - > getElements ( ) . back ( ) - > getStartLoc ( ) ) = = Line ) { <nl> - return false ; <nl> + if ( auto * E = RWS - > getCond ( ) ) { <nl> + if ( overlapsTarget ( E - > getSourceRange ( ) ) ) <nl> + return IndentContext { RWS - > getRepeatLoc ( ) , true } ; <nl> + } <nl> + <nl> + if ( auto * BS = dyn_cast_or_null < BraceStmt > ( RWS - > getBody ( ) ) ) { <nl> + if ( auto Ctx = getIndentContextFrom ( BS , ContextLoc ) ) <nl> + return Ctx ; <nl> + } <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> + return IndentContext { RWS - > getRepeatLoc ( ) , false } ; <nl> } <nl> <nl> - / / If we ' re in an IfStmt and at the ' else ' , don ' t add an indent . <nl> - IfStmt * If = dyn_cast_or_null < IfStmt > ( Cursor - > getAsStmt ( ) ) ; <nl> - if ( If & & If - > getElseLoc ( ) . isValid ( ) & & <nl> - SM . getLineAndColumn ( If - > getElseLoc ( ) ) . first = = Line ) <nl> - return false ; <nl> + if ( auto * WS = dyn_cast < WhileStmt > ( S ) ) { <nl> + SourceLoc ContextLoc = WS - > getWhileLoc ( ) ; <nl> + if ( ! SM . isBeforeInBuffer ( ContextLoc , TargetLocation ) ) <nl> + return None ; <nl> <nl> - / / If we ' re in a DoCatchStmt and at a ' catch ' , don ' t add an indent . <nl> - if ( auto * DoCatchS = dyn_cast_or_null < DoCatchStmt > ( Cursor - > getAsStmt ( ) ) ) { <nl> - for ( CatchStmt * CatchS : DoCatchS - > getCatches ( ) ) { <nl> - SourceLoc Loc = CatchS - > getCatchLoc ( ) ; <nl> - if ( Loc . isValid ( ) & & SM . getLineAndColumn ( Loc ) . first = = Line ) <nl> - return false ; <nl> + if ( auto Ctx = getIndentContextFrom ( WS - > getCond ( ) , ContextLoc , WS ) ) <nl> + return Ctx ; <nl> + <nl> + if ( auto * BS = dyn_cast_or_null < BraceStmt > ( WS - > getBody ( ) ) ) { <nl> + if ( auto Ctx = getIndentContextFrom ( BS , ContextLoc ) ) <nl> + return Ctx ; <nl> } <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> + return IndentContext { ContextLoc , false } ; <nl> } <nl> <nl> - / / If we ' re at the end of a closure , paren or tuple expr , and the context <nl> - / / is a paren / tuple expr ending with that sub expression , and it ends on the <nl> - / / same line , don ' t add an indent . <nl> - / / For example : <nl> - / / foo ( x , { <nl> - / / } ) < - - No indent here , the paren expr for the call ends on the same line . <nl> - Expr * AtExprEnd = End . getAsExpr ( ) ; <nl> - if ( AtExprEnd & & ( isa < ClosureExpr > ( AtExprEnd ) | | <nl> - isa < ParenExpr > ( AtExprEnd ) | | <nl> - isa < TupleExpr > ( AtExprEnd ) | | <nl> - isa < CaptureListExpr > ( AtExprEnd ) ) ) { <nl> + if ( auto * FS = dyn_cast < ForEachStmt > ( S ) ) { <nl> + SourceLoc ContextLoc = FS - > getStartLoc ( ) ; <nl> + SourceLoc ForLoc = FS - > getForLoc ( ) ; <nl> + if ( ! SM . isBeforeInBuffer ( ForLoc , TargetLocation ) ) <nl> + return None ; <nl> <nl> - if ( auto * Paren = dyn_cast_or_null < ParenExpr > ( Cursor - > getAsExpr ( ) ) ) { <nl> - auto * SubExpr = Paren - > getSubExpr ( ) ; <nl> - if ( SubExpr & & SubExpr = = AtExprEnd & & <nl> - SM . getLineAndColumn ( Paren - > getEndLoc ( ) ) . first = = Line ) <nl> - return false ; <nl> - } else if ( auto * Tuple = dyn_cast_or_null < TupleExpr > ( Cursor - > getAsExpr ( ) ) ) { <nl> - auto SubExprs = Tuple - > getElements ( ) ; <nl> - if ( ! SubExprs . empty ( ) & & SubExprs . back ( ) = = AtExprEnd & & <nl> - SM . getLineAndColumn ( Tuple - > getEndLoc ( ) ) . first = = Line ) { <nl> - return false ; <nl> - } <nl> - } else if ( auto * VD = dyn_cast_or_null < VarDecl > ( Cursor - > getAsDecl ( ) ) ) { <nl> - SourceLoc Loc = getVarDeclInitEnd ( VD ) ; <nl> - if ( Loc . isValid ( ) & & SM . getLineNumber ( Loc ) = = Line ) { <nl> - return false ; <nl> - } <nl> - } else if ( auto * Seq = dyn_cast_or_null < SequenceExpr > ( Cursor - > getAsExpr ( ) ) ) { <nl> - ArrayRef < Expr * > Elements = Seq - > getElements ( ) ; <nl> - if ( Elements . size ( ) = = 3 & & <nl> - isa < AssignExpr > ( Elements [ 1 ] ) & & <nl> - SM . getLineAndColumn ( Elements [ 2 ] - > getEndLoc ( ) ) . first = = Line ) { <nl> - return false ; <nl> + if ( auto * P = FS - > getPattern ( ) ) { <nl> + SourceRange Range = P - > getSourceRange ( ) ; <nl> + if ( Range . isValid ( ) & & overlapsTarget ( Range ) ) <nl> + return IndentContext { ForLoc , ! OutdentChecker : : hasOutdent ( SM , P ) } ; <nl> + } <nl> + if ( auto * E = FS - > getSequence ( ) ) { <nl> + SourceRange Range = FS - > getInLoc ( ) ; <nl> + widenOrSet ( Range , E - > getSourceRange ( ) ) ; <nl> + if ( Range . isValid ( ) & & isTargetContext ( Range ) ) { <nl> + return IndentContext { <nl> + Range . Start , <nl> + ! OutdentChecker : : hasOutdent ( SM , E ) <nl> + } ; <nl> } <nl> } <nl> + if ( auto * WE = FS - > getWhere ( ) ) { <nl> + SourceLoc WhereLoc = FS - > getWhereLoc ( ) ; <nl> + SourceRange Range = WE - > getSourceRange ( ) ; <nl> + if ( Range . isValid ( ) & & overlapsTarget ( Range ) ) <nl> + return IndentContext { WhereLoc , ! OutdentChecker : : hasOutdent ( SM , WE ) } ; <nl> + } <nl> + if ( auto * BS = dyn_cast_or_null < BraceStmt > ( FS - > getBody ( ) ) ) { <nl> + if ( auto Ctx = getIndentContextFrom ( BS , FS - > getStartLoc ( ) ) ) <nl> + return Ctx ; <nl> + } <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> + return IndentContext { ContextLoc , false } ; <nl> } <nl> <nl> - / / let msg = String ( [ 65 , 108 , 105 , 103 , 110 ] . map { c in <nl> - / / Character ( UnicodeScalar ( c ) ) <nl> - / / } ) < mmm No indentation here . <nl> - auto AtCursorExpr = Cursor - > getAsExpr ( ) ; <nl> - if ( AtExprEnd & & AtCursorExpr & & ( isa < ParenExpr > ( AtCursorExpr ) | | <nl> - isa < TupleExpr > ( AtCursorExpr ) ) ) { <nl> - if ( isa < CallExpr > ( AtExprEnd ) | | <nl> - isa < ArrayExpr > ( AtExprEnd ) | | <nl> - isa < DictionaryExpr > ( AtExprEnd ) ) { <nl> - if ( exprEndAtLine ( AtExprEnd , Line ) & & <nl> - exprEndAtLine ( AtCursorExpr , Line ) ) { <nl> - return false ; <nl> - } <nl> + / / None of the below statements ever claim trailing targets . <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> + <nl> + if ( auto * RS = dyn_cast < ReturnStmt > ( S ) ) { <nl> + SourceLoc ContextLoc = RS - > getReturnLoc ( ) ; <nl> + SourceRange Range = RS - > getSourceRange ( ) ; <nl> + Expr * Result = RS - > getResult ( ) ; <nl> + return IndentContext { <nl> + ContextLoc , <nl> + Result & & ! OutdentChecker : : hasOutdent ( SM , Range , Result ) <nl> + } ; <nl> + } <nl> + <nl> + if ( auto * DCS = dyn_cast < DoCatchStmt > ( S ) ) { <nl> + if ( ! SM . isBeforeInBuffer ( DCS - > getDoLoc ( ) , TargetLocation ) ) <nl> + return None ; <nl> + if ( auto * BS = dyn_cast < BraceStmt > ( DCS - > getBody ( ) ) ) { <nl> + if ( auto Ctx = getIndentContextFrom ( BS ) ) <nl> + return Ctx ; <nl> } <nl> + return IndentContext { DCS - > getStartLoc ( ) , false } ; <nl> + } <nl> <nl> - / / foo ( A : { <nl> - / / . . . <nl> - / / } , B : { < mmm No indentation here . <nl> - / / . . . <nl> - / / } ) <nl> - if ( auto * TE = dyn_cast < TupleExpr > ( AtCursorExpr ) ) { <nl> - if ( isa < ClosureExpr > ( AtExprEnd ) & & exprEndAtLine ( AtExprEnd , Line ) ) { <nl> - for ( auto * ELE : TE - > getElements ( ) ) { <nl> - if ( exprEndAtLine ( ELE , Line ) ) { <nl> - return false ; <nl> - } <nl> - } <nl> + return None ; <nl> + } <nl> + <nl> + Optional < IndentContext > <nl> + getIndentContextFrom ( BraceStmt * BS , SourceLoc ContextLoc = SourceLoc ( ) ) { <nl> + if ( ! BS ) <nl> + return None ; <nl> + <nl> + SourceLoc L = getLocIfKind ( SM , BS - > getLBraceLoc ( ) , tok : : l_brace ) ; <nl> + SourceLoc R = getLocIfKind ( SM , BS - > getRBraceLoc ( ) , tok : : r_brace ) ; <nl> + if ( L . isInvalid ( ) | | ! overlapsTarget ( L , R ) ) <nl> + return None ; <nl> + <nl> + if ( ContextLoc . isInvalid ( ) ) { <nl> + ContextLoc = L ; <nl> + } else { <nl> + NodesToSkip . insert ( static_cast < Stmt * > ( BS ) ) ; <nl> + } <nl> + bool shouldIndent = containsTarget ( L , R ) & & <nl> + ! OutdentChecker : : hasOutdent ( SM , BS , RangeKind : : Open ) ; <nl> + return IndentContext { ContextLoc , shouldIndent } ; <nl> + } <nl> + <nl> + template < typename T > <nl> + Optional < IndentContext > <nl> + getIndentContextFrom ( PoundAvailableInfo * A , T * WalkableParent ) { <nl> + SourceLoc ContextLoc = A - > getStartLoc ( ) ; <nl> + SourceLoc L = A - > getLParenLoc ( ) ; <nl> + SourceLoc R = getLocIfKind ( SM , A - > getRParenLoc ( ) , tok : : r_paren ) ; <nl> + if ( L . isInvalid ( ) | | ! overlapsTarget ( L , R ) ) <nl> + return None ; <nl> + <nl> + ListAligner Aligner ( SM , TargetLocation , ContextLoc , L , R ) ; <nl> + for ( auto * Spec : A - > getQueries ( ) ) { <nl> + SourceRange Range = Spec - > getSourceRange ( ) ; <nl> + if ( Range . isValid ( ) ) { <nl> + Aligner . updateAlignment ( Range , WalkableParent ) ; <nl> + if ( isTargetContext ( Range ) ) { <nl> + Aligner . setAlignmentIfNeeded ( CtxOverride ) ; <nl> + return IndentContext { Range . Start , true } ; <nl> } <nl> } <nl> } <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> + } <nl> <nl> - / / Chained trailing closures shouldn ' t require additional indentation . <nl> - / / a . map { <nl> - / / . . . <nl> - / / } . filter { < mmm No indentation here . <nl> - / / . . . <nl> - / / } . map { < mmm No indentation here . <nl> - / / . . . <nl> - / / } <nl> - if ( AtExprEnd & & AtCursorExpr & & <nl> - ( isa < CallExpr > ( AtExprEnd ) | | isa < SubscriptExpr > ( AtExprEnd ) ) ) { <nl> - if ( auto * UDE = dyn_cast < UnresolvedDotExpr > ( AtCursorExpr ) ) { <nl> - if ( auto * Base = UDE - > getBase ( ) ) { <nl> - if ( exprEndAtLine ( Base , Line ) ) <nl> - return false ; <nl> + template < typename T > <nl> + Optional < IndentContext > <nl> + getIndentContextFrom ( const StmtCondition & Condition , SourceLoc ContextLoc , <nl> + T * WalkableParent ) { <nl> + SourceRange Bounds = getConditionRange ( Condition ) ; <nl> + if ( Bounds . isInvalid ( ) | | ! overlapsTarget ( Bounds ) ) <nl> + return None ; <nl> + <nl> + ListAligner Aligner ( SM , TargetLocation , ContextLoc , ContextLoc ) ; <nl> + for ( auto & Elem : Condition ) { <nl> + SourceRange ElemRange = Elem . getSourceRange ( ) ; <nl> + Aligner . updateAlignment ( ElemRange , WalkableParent ) ; <nl> + <nl> + if ( Elem . getKind ( ) = = StmtConditionElement : : CK_Availability ) { <nl> + if ( auto Ctx = getIndentContextFrom ( Elem . getAvailability ( ) , <nl> + WalkableParent ) ) { <nl> + Aligner . setAlignmentIfNeeded ( CtxOverride ) ; <nl> + return Ctx ; <nl> } <nl> } <nl> + if ( ElemRange . isValid ( ) & & isTargetContext ( ElemRange ) ) { <nl> + Aligner . setAlignmentIfNeeded ( CtxOverride ) ; <nl> + return IndentContext { <nl> + ElemRange . Start , <nl> + ! OutdentChecker : : hasOutdent ( SM , ElemRange , WalkableParent ) <nl> + } ; <nl> + } <nl> } <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> + } <nl> <nl> + SourceRange getConditionRange ( const StmtCondition & Condition ) { <nl> + if ( Condition . empty ( ) ) <nl> + return SourceRange ( ) ; <nl> <nl> - / / Indent another level from the outer context by default . <nl> - return true ; <nl> + SourceRange Bounds = SourceRange ( Condition . front ( ) . getStartLoc ( ) , <nl> + Condition . back ( ) . getEndLoc ( ) ) ; <nl> + if ( auto Next = getTokenAfter ( SM , Bounds . End ) ) { <nl> + if ( Next - > getKind ( ) = = tok : : comma ) <nl> + Bounds . widen ( Next - > getLoc ( ) ) ; <nl> + } <nl> + return Bounds ; <nl> } <nl> - } ; <nl> <nl> - class FormatWalker : public SourceEntityWalker { <nl> - using TokenIt = ArrayRef < Token > : : iterator ; <nl> - class SiblingCollector { <nl> - SourceLoc FoundSibling ; <nl> - SourceManager & SM ; <nl> - ArrayRef < Token > Tokens ; <nl> - SourceLoc & TargetLoc ; <nl> - TokenIt TI ; <nl> - bool NeedExtraIndentation ; <nl> - <nl> - class SourceLocIterator <nl> - : public std : : iterator < std : : input_iterator_tag , SourceLoc > <nl> - { <nl> - TokenIt It ; <nl> - public : <nl> - SourceLocIterator ( TokenIt It ) : It ( It ) { } <nl> - SourceLocIterator ( const SourceLocIterator & mit ) : It ( mit . It ) { } <nl> - const SourceLocIterator & operator = ( const SourceLocIterator & mit ) { <nl> - It = mit . It ; <nl> - return * this ; <nl> - } <nl> - SourceLocIterator & operator + + ( ) { + + It ; return * this ; } <nl> - SourceLocIterator operator + + ( int ) { <nl> - SourceLocIterator tmp ( * this ) ; <nl> - operator + + ( ) ; <nl> - return tmp ; <nl> - } <nl> - bool operator = = ( const SourceLocIterator & rhs ) { return It = = rhs . It ; } <nl> - bool operator ! = ( const SourceLocIterator & rhs ) { return It ! = rhs . It ; } <nl> - SourceLoc operator * ( ) { return It - > getLoc ( ) ; } <nl> - const SourceLoc operator * ( ) const { return It - > getLoc ( ) ; } <nl> - } ; <nl> + # pragma mark Expression indent contexts <nl> <nl> - void adjustTokenIteratorToImmediateAfter ( SourceLoc End ) { <nl> - SourceLocIterator LocBegin ( Tokens . begin ( ) ) ; <nl> - SourceLocIterator LocEnd ( Tokens . end ( ) ) ; <nl> - auto Lower = std : : lower_bound ( LocBegin , LocEnd , End , <nl> - [ & ] ( SourceLoc L , SourceLoc R ) { <nl> - return SM . isBeforeInBuffer ( L , R ) ; <nl> - } ) ; <nl> - if ( * Lower = = End ) { <nl> - Lower + + ; <nl> - } <nl> - TI = Tokens . begin ( ) ; <nl> - std : : advance ( TI , std : : distance ( LocBegin , Lower ) ) ; <nl> - } <nl> - <nl> - bool isImmediateAfterSeparator ( SourceLoc End , tok Separator ) { <nl> - adjustTokenIteratorToImmediateAfter ( End ) ; <nl> - if ( TI = = Tokens . end ( ) | | TI - > getKind ( ) ! = Separator ) <nl> - return false ; <nl> - auto SeparatorLoc = TI - > getLoc ( ) ; <nl> - TI + + ; <nl> - if ( TI = = Tokens . end ( ) ) <nl> - return false ; <nl> - auto NextLoc = TI - > getLoc ( ) ; <nl> - return SM . isBeforeInBuffer ( SeparatorLoc , TargetLoc ) & & <nl> - ! SM . isBeforeInBuffer ( NextLoc , TargetLoc ) ; <nl> - } <nl> - <nl> - bool isTargetImmediateAfter ( SourceLoc Loc ) { <nl> - adjustTokenIteratorToImmediateAfter ( Loc ) ; <nl> - / / Make sure target loc is after loc <nl> - return SM . isBeforeInBuffer ( Loc , TargetLoc ) & & <nl> - / / Make sure immediate loc after loc is not before target loc . <nl> - ! SM . isBeforeInBuffer ( TI - > getLoc ( ) , TargetLoc ) ; <nl> - } <nl> - <nl> - bool sameLineWithTarget ( SourceLoc Loc ) { <nl> - return SM . getLineNumber ( Loc ) = = SM . getLineNumber ( TargetLoc ) ; <nl> - } <nl> - <nl> - public : <nl> - SiblingCollector ( SourceManager & SM , ArrayRef < Token > Tokens , <nl> - SourceLoc & TargetLoc ) : SM ( SM ) , Tokens ( Tokens ) , <nl> - TargetLoc ( TargetLoc ) , TI ( Tokens . begin ( ) ) , <nl> - NeedExtraIndentation ( false ) { } <nl> - <nl> - void collect ( ASTNode Node ) { <nl> - if ( FoundSibling . isValid ( ) ) <nl> - return ; <nl> - SourceLoc PrevLoc ; <nl> - auto FindAlignLoc = [ & ] ( SourceLoc Loc ) { <nl> - if ( PrevLoc . isValid ( ) & & Loc . isValid ( ) & & <nl> - SM . getLineNumber ( PrevLoc ) = = SM . getLineNumber ( Loc ) ) <nl> - return PrevLoc ; <nl> - return PrevLoc = Loc ; <nl> - } ; <nl> + Optional < IndentContext > <nl> + getIndentContextFrom ( Expr * E , Optional < Trailing > TrailingTarget ) { <nl> <nl> - auto addPair = [ & ] ( SourceLoc EndLoc , SourceLoc AlignLoc , tok Separator ) { <nl> - if ( isImmediateAfterSeparator ( EndLoc , Separator ) ) <nl> - FoundSibling = AlignLoc ; <nl> - } ; <nl> + / / All handled expressions may claim a trailing target . <nl> <nl> - if ( auto AE = dyn_cast_or_null < ApplyExpr > ( Node . dyn_cast < Expr * > ( ) ) ) { <nl> - / / PrefixUnaryExpr shouldn ' t be syntactically considered as a function call <nl> - / / for sibling alignment . <nl> - if ( ! isa < PrefixUnaryExpr > ( AE ) ) { <nl> - collect ( AE - > getArg ( ) ) ; <nl> - return ; <nl> - } <nl> - } <nl> + if ( auto * TE = dyn_cast < TupleExpr > ( E ) ) { <nl> + if ( TrailingTarget & & TE - > hasTrailingClosure ( ) ) <nl> + return None ; <nl> + return getIndentContextFrom ( TE ) ; <nl> + } <nl> + <nl> + if ( auto * PE = dyn_cast < ParenExpr > ( E ) ) { <nl> + if ( TrailingTarget & & PE - > hasTrailingClosure ( ) ) <nl> + return None ; <nl> + return getIndentContextFrom ( PE ) ; <nl> + } <nl> <nl> - if ( auto PE = dyn_cast_or_null < ParenExpr > ( Node . dyn_cast < Expr * > ( ) ) ) { <nl> - if ( auto Sub = PE - > getSubExpr ( ) ) { <nl> - addPair ( Sub - > getEndLoc ( ) , FindAlignLoc ( Sub - > getStartLoc ( ) ) , <nl> - tok : : comma ) ; <nl> + if ( auto * DE = dyn_cast < DictionaryExpr > ( E ) ) { <nl> + SourceLoc L = DE - > getLBracketLoc ( ) ; <nl> + SourceLoc R = getLocIfKind ( SM , DE - > getRBracketLoc ( ) , tok : : r_square ) ; <nl> + if ( L . isInvalid ( ) | | ! overlapsTarget ( L , R ) ) <nl> + return None ; <nl> + <nl> + ListAligner Aligner ( SM , TargetLocation , L , L , R , true ) ; <nl> + for ( Expr * Elem : DE - > getElements ( ) ) { <nl> + auto * TE = dyn_cast < TupleExpr > ( Elem ) ; <nl> + Aligner . updateAlignment ( TE - > getSourceRange ( ) , TE ) ; <nl> + if ( auto Ctx = getIndentContextFromDictionaryElem ( TE ) ) { <nl> + Aligner . setAlignmentIfNeeded ( CtxOverride ) ; <nl> + return Ctx ; <nl> } <nl> } <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> + } <nl> <nl> - / / Tuple elements are siblings . <nl> - if ( auto TE = dyn_cast_or_null < TupleExpr > ( Node . dyn_cast < Expr * > ( ) ) ) { <nl> - / / Trailing closures are not considered siblings to other args . <nl> - unsigned EndAdjust = TE - > hasTrailingClosure ( ) ? 1 : 0 ; <nl> - for ( unsigned I = 0 , N = TE - > getNumElements ( ) - EndAdjust ; I < N ; I + + ) { <nl> - auto EleStart = TE - > getElementNameLoc ( I ) ; <nl> - if ( EleStart . isInvalid ( ) ) { <nl> - EleStart = TE - > getElement ( I ) - > getStartLoc ( ) ; <nl> - } <nl> - addPair ( TE - > getElement ( I ) - > getEndLoc ( ) , FindAlignLoc ( EleStart ) , tok : : comma ) ; <nl> - } <nl> + if ( auto * AE = dyn_cast < ArrayExpr > ( E ) ) { <nl> + SourceLoc L = AE - > getLBracketLoc ( ) ; <nl> + SourceLoc R = getLocIfKind ( SM , AE - > getRBracketLoc ( ) , tok : : r_square ) ; <nl> + if ( L . isInvalid ( ) | | ! overlapsTarget ( L , R ) ) <nl> + return None ; <nl> + <nl> + ListAligner Aligner ( SM , TargetLocation , L , L , R , true ) ; <nl> + for ( auto * Elem : AE - > getElements ( ) ) <nl> + Aligner . updateAlignment ( Elem - > getStartLoc ( ) , Elem - > getEndLoc ( ) , Elem ) ; <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> + } <nl> + <nl> + if ( auto * USE = dyn_cast < UnresolvedSpecializeExpr > ( E ) ) { <nl> + SourceLoc L = USE - > getLAngleLoc ( ) ; <nl> + SourceLoc R = getLocIfTokenTextMatches ( SM , USE - > getRAngleLoc ( ) , " > " ) ; <nl> + if ( L . isInvalid ( ) | | ! overlapsTarget ( L , R ) ) <nl> + return None ; <nl> + <nl> + SourceLoc ContextLoc = getContextLocForArgs ( SM , USE ) ; <nl> + ListAligner Aligner ( SM , TargetLocation , ContextLoc , L , R ) ; <nl> + for ( auto & Arg : USE - > getUnresolvedParams ( ) ) { <nl> + if ( auto * T = Arg . getTypeRepr ( ) ) <nl> + Aligner . updateAlignment ( T - > getSourceRange ( ) , T ) ; <nl> } <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> + } <nl> <nl> - if ( auto AFD = dyn_cast_or_null < AbstractFunctionDecl > ( Node . dyn_cast < Decl * > ( ) ) ) { <nl> - / / Function parameters are siblings . <nl> - for ( auto * param : * AFD - > getParameters ( ) ) { <nl> - addPair ( param - > getEndLoc ( ) , FindAlignLoc ( param - > getStartLoc ( ) ) , <nl> - tok : : comma ) ; <nl> - } <nl> + if ( auto * CLE = dyn_cast < CaptureListExpr > ( E ) ) <nl> + return getIndentContextFrom ( CLE ) ; <nl> + <nl> + if ( auto * CE = dyn_cast < ClosureExpr > ( E ) ) <nl> + return getIndentContextFrom ( CE ) ; <nl> + <nl> + if ( isa < CallExpr > ( E ) | | isa < SubscriptExpr > ( E ) ) { <nl> + SourceLoc ContextLoc = getContextLocForArgs ( SM , E ) ; <nl> + Expr * Arg ; <nl> + if ( auto * CE = dyn_cast < CallExpr > ( E ) ) { <nl> + Arg = CE - > getArg ( ) ; <nl> + } else { <nl> + Arg = cast < SubscriptExpr > ( E ) - > getIndex ( ) ; <nl> } <nl> <nl> - / / Array / Dictionary elements are siblings to align with each other . <nl> - if ( auto AE = dyn_cast_or_null < CollectionExpr > ( Node . dyn_cast < Expr * > ( ) ) ) { <nl> - / / The following check ends - up creating too much indentation , <nl> - / / for example : <nl> - / / let something = [ <nl> - / / a <nl> - / / ] <nl> - / / <nl> - / / Disabling the check gets us back to the Swift2 . 2 behavior : <nl> - / / let something = [ <nl> - / / a <nl> - / / ] <nl> - / / <nl> - / / FIXME : We are going to revisit the behavior and the indentation we <nl> - / / want for dictionary / array literals . <nl> - / / <nl> - # if 0 <nl> - SourceLoc LBracketLoc = AE - > getLBracketLoc ( ) ; <nl> - if ( isTargetImmediateAfter ( LBracketLoc ) & & <nl> - ! sameLineWithTarget ( LBracketLoc ) ) { <nl> - FoundSibling = LBracketLoc ; <nl> - NeedExtraIndentation = true ; <nl> + ClosureExpr * TCE = nullptr ; <nl> + CaptureListExpr * TCL = nullptr ; <nl> + if ( auto * PE = dyn_cast_or_null < ParenExpr > ( Arg ) ) { <nl> + if ( auto Ctx = getIndentContextFrom ( PE , ContextLoc ) ) <nl> + return Ctx ; <nl> + if ( PE - > hasTrailingClosure ( ) ) { <nl> + Expr * Last = PE - > getSubExpr ( ) ; <nl> + TCE = dyn_cast_or_null < ClosureExpr > ( Last ) ; <nl> + TCL = dyn_cast_or_null < CaptureListExpr > ( Last ) ; <nl> } <nl> - # endif <nl> - for ( unsigned I = 0 , N = AE - > getNumElements ( ) ; I < N ; I + + ) { <nl> - addPair ( AE - > getElement ( I ) - > getEndLoc ( ) , <nl> - FindAlignLoc ( AE - > getElement ( I ) - > getStartLoc ( ) ) , tok : : comma ) ; <nl> + } else if ( auto * TE = dyn_cast_or_null < TupleExpr > ( Arg ) ) { <nl> + if ( auto Ctx = getIndentContextFrom ( TE , ContextLoc ) ) { <nl> + return Ctx ; <nl> } <nl> - } <nl> - / / Case label items in a case statement are siblings . <nl> - if ( auto CS = dyn_cast_or_null < CaseStmt > ( Node . dyn_cast < Stmt * > ( ) ) ) { <nl> - for ( const CaseLabelItem & Item : CS - > getCaseLabelItems ( ) ) { <nl> - addPair ( Item . getEndLoc ( ) , FindAlignLoc ( Item . getStartLoc ( ) ) , tok : : comma ) ; <nl> + if ( TE - > hasTrailingClosure ( ) ) { <nl> + Expr * Last = TE - > getElements ( ) . back ( ) ; <nl> + TCE = dyn_cast_or_null < ClosureExpr > ( Last ) ; <nl> + TCL = dyn_cast_or_null < CaptureListExpr > ( Last ) ; <nl> } <nl> } <nl> - } ; <nl> <nl> - SiblingAlignInfo getSiblingInfo ( ) { <nl> - return { FoundSibling , NeedExtraIndentation } ; <nl> + if ( TCL ) { <nl> + SourceRange Range = TCL - > getSourceRange ( ) ; <nl> + if ( Range . isValid ( ) & & ( TrailingTarget | | overlapsTarget ( Range ) ) ) <nl> + return getIndentContextFrom ( TCL , ContextLoc ) ; <nl> + } else if ( TCE ) { <nl> + SourceRange Range = TCE - > getSourceRange ( ) ; <nl> + if ( Range . isValid ( ) & & ( TrailingTarget | | overlapsTarget ( Range ) ) ) <nl> + return getIndentContextFrom ( TCE , ContextLoc ) ; <nl> + } <nl> } <nl> - } ; <nl> <nl> - SourceFile & SF ; <nl> - SourceManager & SM ; <nl> - SourceLoc TargetLocation ; <nl> - std : : vector < swift : : ASTWalker : : ParentTy > Stack ; <nl> - swift : : ASTWalker : : ParentTy AtStart ; <nl> - swift : : ASTWalker : : ParentTy AtEnd ; <nl> - bool InDocCommentBlock = false ; <nl> - bool InCommentLine = false ; <nl> - bool InStringLiteral = false ; <nl> - ArrayRef < Token > Tokens ; <nl> - LangOptions Options ; <nl> - TokenIt CurrentTokIt ; <nl> - unsigned TargetLine ; <nl> - SiblingCollector SCollector ; <nl> - <nl> - / / / Sometimes , target is a part of " parent " , for instance , " # else " is a part <nl> - / / / of an IfConfigDecl , so that IfConfigDecl is not really the parent of " # else " . <nl> - bool isTargetPartOf ( swift : : ASTWalker : : ParentTy Parent ) { <nl> - if ( auto Conf = dyn_cast_or_null < IfConfigDecl > ( Parent . getAsDecl ( ) ) ) { <nl> - for ( auto Clause : Conf - > getClauses ( ) ) { <nl> - if ( Clause . Loc = = TargetLocation ) <nl> - return true ; <nl> - } <nl> - } else if ( auto Call = dyn_cast_or_null < CallExpr > ( Parent . getAsExpr ( ) ) ) { <nl> - if ( auto Clo = dyn_cast < ClosureExpr > ( Call - > getFn ( ) ) ) { <nl> - if ( Clo - > getBody ( ) - > getLBraceLoc ( ) = = TargetLocation | | <nl> - Clo - > getBody ( ) - > getRBraceLoc ( ) = = TargetLocation ) { <nl> - return true ; <nl> + return None ; <nl> + } <nl> + <nl> + Optional < IndentContext > <nl> + getIndentContextFrom ( CaptureListExpr * CL , <nl> + SourceLoc ContextLoc = SourceLoc ( ) ) { <nl> + ClosureExpr * CE = CL - > getClosureBody ( ) ; <nl> + BraceStmt * BS = CE - > getBody ( ) ; <nl> + if ( ! CE | | ! BS ) <nl> + return None ; <nl> + <nl> + if ( ContextLoc . isValid ( ) ) { <nl> + NodesToSkip . insert ( static_cast < Expr * > ( CL ) ) ; <nl> + } else { <nl> + NodesToSkip . insert ( static_cast < Expr * > ( CE ) ) ; <nl> + } <nl> + <nl> + return getIndentContextFrom ( CE , ContextLoc , CL ) ; <nl> + } <nl> + <nl> + Optional < IndentContext > <nl> + getIndentContextFrom ( ClosureExpr * CE , SourceLoc ContextLoc = SourceLoc ( ) , <nl> + CaptureListExpr * ParentCapture = nullptr ) { <nl> + BraceStmt * BS = CE - > getBody ( ) ; <nl> + if ( ! BS ) <nl> + return None ; <nl> + NodesToSkip . insert ( static_cast < Stmt * > ( BS ) ) ; <nl> + <nl> + SourceLoc L = BS - > getLBraceLoc ( ) ; <nl> + SourceLoc R = getLocIfKind ( SM , BS - > getRBraceLoc ( ) , tok : : r_brace ) ; <nl> + <nl> + if ( ContextLoc . isValid ( ) ) { <nl> + NodesToSkip . insert ( static_cast < Expr * > ( CE ) ) ; <nl> + if ( isTargetContext ( L , R ) ) <nl> + ContextLoc = CtxOverride . propagateContext ( SM , ContextLoc , <nl> + IndentContext : : LineStart , <nl> + L , R ) ; <nl> + } <nl> + <nl> + / / Handle the capture list . <nl> + SourceRange CL = CE - > getBracketRange ( ) ; <nl> + if ( CL . isValid ( ) ) { <nl> + SourceLoc L = CL . Start ; <nl> + SourceLoc R = getLocIfKind ( SM , CL . End , tok : : r_square ) ; <nl> + if ( isTargetContext ( L , R ) ) { <nl> + ContextLoc = L ; <nl> + if ( ! ParentCapture ) / / empty capture list <nl> + return IndentContext { ContextLoc , containsTarget ( L , R ) } ; <nl> + <nl> + ListAligner Aligner ( SM , TargetLocation , ContextLoc , L , R ) ; <nl> + for ( auto & Entry : ParentCapture - > getCaptureList ( ) ) { <nl> + if ( auto * PBD = Entry . Init ) { <nl> + NodesToSkip . insert ( PBD ) ; <nl> + SourceRange Range = PBD - > getSourceRangeIncludingAttrs ( ) ; <nl> + Aligner . updateAlignment ( Range , PBD ) ; <nl> + <nl> + if ( isTargetContext ( Range ) ) { <nl> + Aligner . setAlignmentIfNeeded ( CtxOverride ) ; <nl> + return IndentContext { <nl> + Range . Start , <nl> + ! OutdentChecker : : hasOutdent ( SM , Range , PBD ) <nl> + } ; <nl> + } <nl> + } <nl> } <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> } <nl> } <nl> - return false ; <nl> + <nl> + / / Handle parameter list <nl> + if ( auto Ctx = getIndentContextFrom ( CE - > getParameters ( ) ) ) <nl> + return Ctx ; <nl> + <nl> + / / Handle outer braces . <nl> + if ( L . isInvalid ( ) | | ! isTargetContext ( L , R ) ) <nl> + return None ; <nl> + <nl> + if ( ContextLoc . isInvalid ( ) ) <nl> + ContextLoc = L ; <nl> + Expr * WalkableParent = CE ; <nl> + if ( ParentCapture ) <nl> + WalkableParent = ParentCapture ; <nl> + <nl> + auto InLoc = CE - > getInLoc ( ) ; <nl> + if ( InLoc . isValid ( ) ) { <nl> + if ( containsTarget ( InLoc , R ) ) { <nl> + SourceRange InToEnd = SourceRange ( InLoc , BS - > getEndLoc ( ) ) ; <nl> + return IndentContext { <nl> + ContextLoc , <nl> + ! OutdentChecker : : hasOutdent ( SM , InToEnd , WalkableParent ) <nl> + } ; <nl> + } <nl> + } <nl> + <nl> + bool shouldIndent = containsTarget ( L , R ) & & <nl> + ! OutdentChecker : : hasOutdent ( SM , WalkableParent , RangeKind : : Open ) ; <nl> + return IndentContext { ContextLoc , shouldIndent } ; <nl> } <nl> <nl> - template < class T > <nl> - bool HandlePre ( T * Node , SourceLoc Start , SourceLoc End ) { <nl> - scanForComments ( Start ) ; <nl> - SCollector . collect ( Node ) ; <nl> - <nl> - if ( SM . isBeforeInBuffer ( TargetLocation , Start ) ) <nl> - return false ; / / Target is before start of Node , skip it . <nl> - if ( SM . isBeforeInBuffer ( End , TargetLocation ) ) <nl> - return false ; / / Target is after end of Node , skip it . <nl> - if ( TargetLocation = = Start ) { <nl> - / / Target is right at the start of Node , mark it . <nl> - AtStart = Node ; <nl> - return false ; <nl> + Optional < IndentContext > <nl> + getIndentContextFromDictionaryElem ( TupleExpr * TE ) { <nl> + SourceLoc Start = TE - > getStartLoc ( ) , End = TE - > getEndLoc ( ) ; <nl> + if ( ! TE - > getNumElements ( ) | | ! isTargetContext ( Start , End ) ) <nl> + return None ; <nl> + Expr * Key = TE - > getElement ( 0 ) ; <nl> + SourceLoc ColonLoc ; <nl> + if ( auto Next = getTokenAfter ( SM , Key - > getEndLoc ( ) ) ) { <nl> + if ( Next & & Next - > getKind ( ) = = tok : : colon ) <nl> + ColonLoc = Next - > getLoc ( ) ; <nl> } <nl> - if ( TargetLocation = = End ) { <nl> - / / Target is right at the end of Node , mark it . <nl> - AtEnd = Node ; <nl> - return false ; <nl> + if ( ColonLoc . isValid ( ) & & isTargetContext ( ColonLoc , End ) ) <nl> + return IndentContext { <nl> + Start , <nl> + ! OutdentChecker : : hasOutdent ( SM , SourceRange ( ColonLoc , End ) , TE ) <nl> + } ; <nl> + return IndentContext { Start , ! OutdentChecker : : hasOutdent ( SM , Key ) } ; <nl> + } <nl> + <nl> + Optional < IndentContext > <nl> + getIndentContextFrom ( TupleExpr * TE , SourceLoc ContextLoc = SourceLoc ( ) ) { <nl> + if ( ContextLoc . isValid ( ) ) <nl> + NodesToSkip . insert ( static_cast < Expr * > ( TE ) ) ; <nl> + SourceLoc L = TE - > getLParenLoc ( ) ; <nl> + SourceLoc R = getLocIfKind ( SM , TE - > getRParenLoc ( ) , <nl> + { tok : : r_paren , tok : : r_square } ) ; <nl> + if ( L . isInvalid ( ) | | ! overlapsTarget ( L , R ) ) <nl> + return None ; <nl> + <nl> + if ( ContextLoc . isValid ( ) ) { <nl> + ContextLoc = CtxOverride . propagateContext ( SM , ContextLoc , <nl> + IndentContext : : LineStart , <nl> + L , R ) ; <nl> + } else { <nl> + ContextLoc = L ; <nl> } <nl> <nl> - / / Target is within Node and Node is really the parent of Target , take it . <nl> - if ( ! isTargetPartOf ( Node ) ) <nl> - Stack . push_back ( Node ) ; <nl> - return true ; <nl> + ListAligner Aligner ( SM , TargetLocation , ContextLoc , L , R ) ; <nl> + auto NumElems = TE - > getNumElements ( ) ; <nl> + if ( TE - > hasTrailingClosure ( ) ) <nl> + - - NumElems ; <nl> + for ( auto I : range ( NumElems ) ) { <nl> + SourceRange ElemRange = TE - > getElementNameLoc ( I ) ; <nl> + if ( Expr * Elem = TE - > getElement ( I ) ) <nl> + widenOrSet ( ElemRange , Elem - > getSourceRange ( ) ) ; <nl> + assert ( ElemRange . isValid ( ) ) ; <nl> + <nl> + Aligner . updateAlignment ( ElemRange , TE ) ; <nl> + if ( isTargetContext ( ElemRange ) ) { <nl> + Aligner . setAlignmentIfNeeded ( CtxOverride ) ; <nl> + return IndentContext { <nl> + ElemRange . Start , <nl> + ! OutdentChecker : : hasOutdent ( SM , ElemRange , TE ) <nl> + } ; <nl> + } <nl> + } <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> } <nl> <nl> - void scanForComments ( SourceLoc Loc ) { <nl> - if ( InDocCommentBlock | | InCommentLine ) <nl> - return ; <nl> - for ( auto InValid = Loc . isInvalid ( ) ; CurrentTokIt ! = Tokens . end ( ) & & <nl> - ( InValid | | SM . isBeforeInBuffer ( CurrentTokIt - > getLoc ( ) , Loc ) ) ; <nl> - CurrentTokIt + + ) { <nl> - if ( CurrentTokIt - > getKind ( ) = = tok : : comment ) { <nl> - auto StartLine = SM . getLineNumber ( CurrentTokIt - > getRange ( ) . getStart ( ) ) ; <nl> - auto EndLine = SM . getLineNumber ( CurrentTokIt - > getRange ( ) . getEnd ( ) ) ; <nl> - auto TokenStr = CurrentTokIt - > getRange ( ) . str ( ) ; <nl> - InDocCommentBlock | = TargetLine > StartLine & & TargetLine < = EndLine & & <nl> - TokenStr . startswith ( " / * " ) ; <nl> - InCommentLine | = StartLine = = TargetLine & & TokenStr . startswith ( " / / " ) ; <nl> + Optional < IndentContext > <nl> + getIndentContextFrom ( ParenExpr * PE , SourceLoc ContextLoc = SourceLoc ( ) ) { <nl> + if ( ContextLoc . isValid ( ) ) <nl> + NodesToSkip . insert ( static_cast < Expr * > ( PE ) ) ; <nl> + SourceLoc L = PE - > getLParenLoc ( ) ; <nl> + SourceLoc R = getLocIfKind ( SM , PE - > getRParenLoc ( ) , <nl> + { tok : : r_paren , tok : : r_square } ) ; <nl> + if ( L . isInvalid ( ) | | ! overlapsTarget ( L , R ) ) <nl> + return None ; <nl> + <nl> + if ( ContextLoc . isValid ( ) ) { <nl> + ContextLoc = CtxOverride . propagateContext ( SM , ContextLoc , <nl> + IndentContext : : LineStart , <nl> + L , R ) ; <nl> + } else { <nl> + ContextLoc = L ; <nl> + } <nl> + <nl> + ListAligner Aligner ( SM , TargetLocation , ContextLoc , L , R ) ; <nl> + if ( ! PE - > hasTrailingClosure ( ) ) { <nl> + Expr * Elem = PE - > getSubExpr ( ) ; <nl> + SourceRange Range = Elem - > getSourceRange ( ) ; <nl> + Aligner . updateAlignment ( Range , Elem ) ; <nl> + <nl> + if ( isTargetContext ( Range ) ) { <nl> + Aligner . setAlignmentIfNeeded ( CtxOverride ) ; <nl> + return IndentContext { <nl> + Range . Start , <nl> + ! OutdentChecker : : hasOutdent ( SM , Elem ) <nl> + } ; <nl> } <nl> } <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> } <nl> <nl> - template < typename T > <nl> - bool HandlePost ( T * Node ) { <nl> - if ( SM . isBeforeInBuffer ( TargetLocation , Node - > getStartLoc ( ) ) ) <nl> - return false ; / / Target is before start of Node , terminate walking . <nl> + # pragma mark TypeRepr indent contexts <nl> <nl> - return true ; <nl> - } <nl> + Optional < IndentContext > <nl> + getIndentContextFrom ( TypeRepr * T , Optional < Trailing > TrailingTarget ) { <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> <nl> - public : <nl> - explicit FormatWalker ( SourceFile & SF , SourceManager & SM ) <nl> - : SF ( SF ) , SM ( SM ) , <nl> - Tokens ( SF . getAllTokens ( ) ) , <nl> - CurrentTokIt ( Tokens . begin ( ) ) , <nl> - SCollector ( SM , Tokens , TargetLocation ) { } <nl> + if ( auto * GIT = dyn_cast < GenericIdentTypeRepr > ( T ) ) { <nl> + SourceLoc ContextLoc = GIT - > getNameLoc ( ) . getBaseNameLoc ( ) ; <nl> + SourceRange Brackets = GIT - > getAngleBrackets ( ) ; <nl> + if ( Brackets . isInvalid ( ) ) <nl> + return None ; <nl> <nl> - FormatContext walkToLocation ( SourceLoc Loc ) { <nl> - Stack . clear ( ) ; <nl> - TargetLocation = Loc ; <nl> - TargetLine = SM . getLineNumber ( TargetLocation ) ; <nl> - AtStart = AtEnd = swift : : ASTWalker : : ParentTy ( ) ; <nl> - walk ( SF ) ; <nl> - scanForComments ( SourceLoc ( ) ) ; <nl> - return FormatContext ( SM , Stack , AtStart , AtEnd , InDocCommentBlock , <nl> - InCommentLine , InStringLiteral , <nl> - SCollector . getSiblingInfo ( ) ) ; <nl> - } <nl> + SourceLoc L = Brackets . Start ; <nl> + SourceLoc R = getLocIfTokenTextMatches ( SM , Brackets . End , " > " ) ; <nl> + ListAligner Aligner ( SM , TargetLocation , ContextLoc , L , R ) ; <nl> + for ( auto * Arg : GIT - > getGenericArgs ( ) ) <nl> + Aligner . updateAlignment ( Arg - > getSourceRange ( ) , GIT ) ; <nl> <nl> - ArrayRef < Token > getTokens ( ) { <nl> - return llvm : : makeArrayRef ( Tokens ) ; <nl> - } <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> + } <nl> <nl> - bool walkToDeclPre ( Decl * D , CharSourceRange Range ) override { <nl> - SourceLoc Start = D - > getStartLoc ( ) ; <nl> - SourceLoc End = D - > getEndLoc ( ) ; <nl> + if ( auto * TT = dyn_cast < TupleTypeRepr > ( T ) ) { <nl> + SourceLoc ContextLoc = TT - > getStartLoc ( ) ; <nl> + SourceRange Parens = TT - > getParens ( ) ; <nl> + if ( Parens . isInvalid ( ) ) <nl> + return None ; <nl> + <nl> + SourceLoc L = Parens . Start ; <nl> + SourceLoc R = getLocIfKind ( SM , Parens . End , tok : : r_paren ) ; <nl> + ListAligner Aligner ( SM , TargetLocation , ContextLoc , L , R ) ; <nl> + for ( auto & Elem : TT - > getElements ( ) ) { <nl> + SourceRange ElemRange = Elem . NameLoc ; <nl> + widenOrSet ( ElemRange , Elem . UnderscoreLoc ) ; <nl> + if ( auto * T = Elem . Type ) <nl> + widenOrSet ( ElemRange , T - > getSourceRange ( ) ) ; <nl> + <nl> + Aligner . updateAlignment ( ElemRange , TT ) ; <nl> + if ( Elem . ColonLoc . isValid ( ) ) { <nl> + SourceRange FromColonToEnd = SourceRange ( Elem . ColonLoc , ElemRange . End ) ; <nl> + if ( isTargetContext ( FromColonToEnd ) ) { <nl> + Aligner . setAlignmentIfNeeded ( CtxOverride ) ; <nl> + return IndentContext { <nl> + ElemRange . Start , <nl> + ! OutdentChecker : : hasOutdent ( SM , FromColonToEnd , TT ) <nl> + } ; <nl> + } <nl> + } <nl> + } <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> + } <nl> <nl> - if ( auto * VD = dyn_cast < VarDecl > ( D ) ) { <nl> - / / We ' ll treat properties with accessors as spanning the braces as well . <nl> - / / This will ensure we can do indentation inside the braces . <nl> - auto Loc = getVarDeclInitEnd ( VD ) ; <nl> - End = Loc . isValid ( ) ? Loc : End ; <nl> + if ( auto * AT = dyn_cast < ArrayTypeRepr > ( T ) ) { <nl> + SourceLoc ContextLoc = AT - > getStartLoc ( ) ; <nl> + SourceRange Brackets = AT - > getBrackets ( ) ; <nl> + if ( Brackets . isInvalid ( ) ) <nl> + return None ; <nl> + return IndentContext { <nl> + ContextLoc , <nl> + containsTarget ( Brackets . Start , Brackets . End ) & & <nl> + ! OutdentChecker : : hasOutdent ( SM , AT , RangeKind : : Open ) <nl> + } ; <nl> } <nl> <nl> - return HandlePre ( D , Start , End ) ; <nl> - } <nl> + if ( auto * DT = dyn_cast < DictionaryTypeRepr > ( T ) ) { <nl> + SourceLoc ContextLoc = DT - > getStartLoc ( ) ; <nl> + SourceRange Brackets = DT - > getBrackets ( ) ; <nl> + if ( Brackets . isInvalid ( ) ) <nl> + return None ; <nl> + <nl> + SourceLoc KeyLoc = DT - > getKey ( ) - > getStartLoc ( ) ; <nl> + SourceLoc ColonLoc = DT - > getColonLoc ( ) ; <nl> + if ( ColonLoc . isValid ( ) ) { <nl> + SourceRange ColonToEnd = SourceRange ( ColonLoc , Brackets . End ) ; <nl> + if ( isTargetContext ( ColonToEnd ) ) <nl> + return IndentContext { <nl> + KeyLoc , <nl> + containsTarget ( Brackets ) & & <nl> + ! OutdentChecker : : hasOutdent ( SM , ColonToEnd , DT ) <nl> + } ; <nl> + } <nl> + return IndentContext { <nl> + ContextLoc , <nl> + containsTarget ( Brackets ) & & <nl> + ! OutdentChecker : : hasOutdent ( SM , DT , RangeKind : : Open ) <nl> + } ; <nl> + } <nl> <nl> - bool walkToDeclPost ( Decl * D ) override { <nl> - return HandlePost ( D ) ; <nl> + return None ; <nl> } <nl> <nl> - bool walkToStmtPre ( Stmt * S ) override { <nl> - return HandlePre ( S , S - > getStartLoc ( ) , S - > getEndLoc ( ) ) ; <nl> - } <nl> + # pragma mark Pattern indent contexts <nl> <nl> - bool walkToStmtPost ( Stmt * S ) override { <nl> - return HandlePost ( S ) ; <nl> - } <nl> + Optional < IndentContext > <nl> + getIndentContextFrom ( Pattern * P , Optional < Trailing > TrailingTarget ) { <nl> + if ( TrailingTarget ) <nl> + return None ; <nl> <nl> - bool walkToExprPre ( Expr * E ) override { <nl> - if ( E - > getKind ( ) = = ExprKind : : StringLiteral & & <nl> - SM . isBeforeInBuffer ( E - > getStartLoc ( ) , TargetLocation ) & & <nl> - SM . isBeforeInBuffer ( TargetLocation , <nl> - Lexer : : getLocForEndOfToken ( SM , E - > getEndLoc ( ) ) ) ) { <nl> - InStringLiteral = true ; <nl> + if ( auto * TP = dyn_cast < TypedPattern > ( P ) ) { <nl> + SourceLoc ContextLoc = TP - > getStartLoc ( ) ; <nl> + auto * LHS = TP - > getSubPattern ( ) ; <nl> + <nl> + SourceLoc ColonLoc ; <nl> + if ( auto Next = getTokenAfter ( SM , LHS - > getEndLoc ( ) ) ) { <nl> + if ( Next - > getKind ( ) = = tok : : colon ) <nl> + ColonLoc = Next - > getLoc ( ) ; <nl> + } <nl> + if ( ColonLoc . isValid ( ) ) { <nl> + SourceRange ColonToEnd = SourceRange ( ColonLoc , TP - > getEndLoc ( ) ) ; <nl> + if ( isTargetContext ( ColonToEnd ) ) <nl> + return IndentContext { <nl> + ContextLoc , <nl> + ! OutdentChecker : : hasOutdent ( SM , ColonToEnd , TP ) <nl> + } ; <nl> + } <nl> + return IndentContext { ContextLoc , ! OutdentChecker : : hasOutdent ( SM , TP ) } ; <nl> } <nl> - return HandlePre ( E , E - > getStartLoc ( ) , E - > getEndLoc ( ) ) ; <nl> - } <nl> <nl> - bool walkToExprPost ( Expr * E ) override { <nl> - return HandlePost ( E ) ; <nl> - } <nl> + if ( auto * PP = dyn_cast < ParenPattern > ( P ) ) { <nl> + SourceLoc ContextLoc = PP - > getStartLoc ( ) ; <nl> + SourceLoc L = PP - > getLParenLoc ( ) ; <nl> + SourceLoc R = getLocIfKind ( SM , PP - > getRParenLoc ( ) , tok : : r_paren ) ; <nl> + if ( L . isInvalid ( ) ) <nl> + return None ; <nl> + ListAligner Aligner ( SM , TargetLocation , ContextLoc , L , R ) ; <nl> + if ( auto * Elem = PP - > getSubPattern ( ) ) { <nl> + SourceRange ElemRange = Elem - > getSourceRange ( ) ; <nl> + Aligner . updateAlignment ( ElemRange , Elem ) ; <nl> + <nl> + if ( isTargetContext ( ElemRange ) ) { <nl> + Aligner . setAlignmentIfNeeded ( CtxOverride ) ; <nl> + return IndentContext { <nl> + ElemRange . Start , <nl> + ! OutdentChecker : : hasOutdent ( SM , ElemRange , Elem ) <nl> + } ; <nl> + } <nl> + } <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> + } <nl> <nl> - bool shouldWalkInactiveConfigRegion ( ) override { <nl> - return true ; <nl> + if ( auto * TP = dyn_cast < TuplePattern > ( P ) ) { <nl> + SourceLoc ContextLoc = TP - > getStartLoc ( ) ; <nl> + SourceLoc L = TP - > getLParenLoc ( ) , R = TP - > getRParenLoc ( ) ; <nl> + if ( L . isInvalid ( ) ) <nl> + return None ; <nl> + <nl> + ListAligner Aligner ( SM , TargetLocation , ContextLoc , L , R ) ; <nl> + for ( auto & Elem : TP - > getElements ( ) ) { <nl> + SourceRange ElemRange = Elem . getLabelLoc ( ) ; <nl> + if ( auto * P = Elem . getPattern ( ) ) <nl> + widenOrSet ( ElemRange , P - > getSourceRange ( ) ) ; <nl> + Aligner . updateAlignment ( ElemRange , TP ) ; <nl> + <nl> + if ( isTargetContext ( ElemRange ) ) { <nl> + Aligner . setAlignmentIfNeeded ( CtxOverride ) ; <nl> + return IndentContext { <nl> + ElemRange . Start , <nl> + ! OutdentChecker : : hasOutdent ( SM , ElemRange , TP ) <nl> + } ; <nl> + } <nl> + } <nl> + return Aligner . getContextAndSetAlignment ( CtxOverride ) ; <nl> + } <nl> + <nl> + return None ; <nl> } <nl> } ; <nl> <nl> class CodeFormatter { <nl> <nl> std : : pair < LineRange , std : : string > indent ( unsigned LineIndex , <nl> FormatContext & FC , <nl> - StringRef Text , TokenInfo ToInfo ) { <nl> - <nl> - / / If having sibling locs to align with , respect siblings . <nl> - auto isClosingSquare = <nl> - ToInfo & & ToInfo . getLineStarter ( ) - > getKind ( ) = = tok : : r_square ; <nl> - if ( ! isClosingSquare & & FC . HasSibling ( ) ) { <nl> + StringRef Text ) { <nl> + if ( FC . isExact ( ) ) { <nl> StringRef Line = swift : : ide : : getTextForLine ( LineIndex , Text , / * Trim * / true ) ; <nl> StringBuilder Builder ; <nl> - FC . padToSiblingColumn ( Builder , FmtOptions ) ; <nl> - if ( FC . needExtraIndentationForSibling ( ) ) { <nl> - if ( FmtOptions . UseTabs ) <nl> - Builder . append ( 1 , ' \ t ' ) ; <nl> - else <nl> - Builder . append ( FmtOptions . IndentWidth , ' ' ) ; <nl> - } <nl> + FC . padToExactColumn ( Builder , FmtOptions ) ; <nl> Builder . append ( Line ) ; <nl> return std : : make_pair ( LineRange ( LineIndex , 1 ) , Builder . str ( ) . str ( ) ) ; <nl> } <nl> class CodeFormatter { <nl> swift : : ide : : getTextForLine ( LineIndex , Text , / * Trim * / false ) ) ; <nl> } <nl> <nl> - / / Take the current indent position of the outer context , then add another <nl> - / / indent level if expected . <nl> + / / Take the current indent position of the context , then add the number of <nl> + / / indents specified . <nl> auto LineAndColumn = FC . indentLineAndColumn ( ) ; <nl> size_t ExpandedIndent = swift : : ide : : getExpandedIndentForLine ( LineAndColumn . first , <nl> FmtOptions , Text ) ; <nl> - auto AddIndentFunc = [ & ] ( ) { <nl> + <nl> + if ( FC . shouldAddIndentForLine ( ) ) { <nl> auto Width = FmtOptions . UseTabs ? FmtOptions . TabWidth <nl> : FmtOptions . IndentWidth ; <nl> / / We don ' t need to add additional indentation if Width is zero . <nl> - if ( ! Width ) <nl> - return ; <nl> - / / Increment indent . <nl> - ExpandedIndent + = Width ; <nl> - / / Normalize indent to align on proper column indent width . <nl> - ExpandedIndent - = ExpandedIndent % Width ; <nl> - } ; <nl> - <nl> - if ( LineAndColumn . second > 0 & & <nl> - FC . shouldAddIndentForLine ( LineIndex , ToInfo , FmtOptions ) ) <nl> - AddIndentFunc ( ) ; <nl> + if ( Width ) { <nl> + / / Increment indent . <nl> + ExpandedIndent + = Width * FC . numIndentLevels ( ) ; <nl> <nl> - / / Control statements in switch align with the rest of the block in case . <nl> - / / For example : <nl> - / / switch . . . { <nl> - / / case xyz : <nl> - / / break < - - Extra indent level here . <nl> - if ( FmtOptions . IndentSwitchCase & & FC . isSwitchControlStmt ( LineIndex , Text ) ) <nl> - AddIndentFunc ( ) ; <nl> + / / Normalize indent to align on proper column indent width . <nl> + ExpandedIndent - = ExpandedIndent % Width ; <nl> + } <nl> + } <nl> <nl> if ( FC . IsInDocCommentBlock ( ) ) { <nl> - <nl> / / Inside doc comment block , the indent is one space , e . g . <nl> / / / * * <nl> / / * < mmmIndent to align with the first star . <nl> class CodeFormatter { <nl> LineRange range = LineRange ( LineIndex , 1 ) ; <nl> return std : : make_pair ( range , IndentedLine ) ; <nl> } <nl> - <nl> - } ; <nl> - <nl> - class TokenInfoCollector { <nl> - SourceManager & SM ; <nl> - ArrayRef < Token > Tokens ; <nl> - unsigned Line ; <nl> - / / The location of the end of the line under indentation , we don ' t need to <nl> - / / collect tokens after this location . <nl> - SourceLoc EndLimit ; <nl> - public : <nl> - TokenInfoCollector ( SourceManager & SM , <nl> - unsigned BufferId , <nl> - ArrayRef < Token > Tokens , unsigned Line ) : <nl> - SM ( SM ) , Tokens ( Tokens ) , Line ( Line ) { <nl> - if ( auto Offset = SM . resolveOffsetForEndOfLine ( BufferId , Line ) ) { <nl> - EndLimit = SM . getLocForOffset ( BufferId , * Offset ) ; <nl> - } <nl> - } <nl> - <nl> - TokenInfo collect ( ) { <nl> - if ( EndLimit . isInvalid ( ) ) { <nl> - return TokenInfo ( ) ; <nl> - } <nl> - TokenInfo Result ; <nl> - for ( auto & T : Tokens ) { <nl> - if ( ! T . isAtStartOfLine ( ) ) <nl> - continue ; <nl> - if ( SM . isBeforeInBuffer ( EndLimit , T . getLoc ( ) ) ) { <nl> - if ( ! Result . LineStarts . empty ( ) ) { <nl> - if ( SM . getLineNumber ( Result . getLineStarter ( ) - > getLoc ( ) ) = = Line ) { <nl> - return Result ; <nl> - } <nl> - } <nl> - return TokenInfo ( ) ; <nl> - } <nl> - Result . LineStarts . push_back ( & T ) ; <nl> - } <nl> - return TokenInfo ( ) ; <nl> - } <nl> } ; <nl> } / / anonymous namespace <nl> <nl> std : : pair < LineRange , std : : string > swift : : ide : : reformat ( LineRange Range , <nl> SourceFile & SF ) { <nl> / / Sanitize 0 - width tab <nl> if ( Options . UseTabs & & ! Options . TabWidth ) { <nl> - / / If IndentWidth is specified , use it as the tab width . <nl> - if ( Options . IndentWidth ) <nl> - Options . TabWidth = Options . IndentWidth ; <nl> - / / Otherwise , use the default value , <nl> - else <nl> - Options . TabWidth = 4 ; <nl> + / / If IndentWidth is specified , use it as the tab width . Otherwise , use the <nl> + / / default value . <nl> + Options . TabWidth = Options . IndentWidth ? Options . IndentWidth : 4 ; <nl> } <nl> - FormatWalker walker ( SF , SM ) ; <nl> auto SourceBufferID = SF . getBufferID ( ) . getValue ( ) ; <nl> StringRef Text = SM . getLLVMSourceMgr ( ) <nl> . getMemoryBuffer ( SourceBufferID ) - > getBuffer ( ) ; <nl> size_t Offset = getOffsetOfLine ( Range . startLine ( ) , Text , / * Trim * / true ) ; <nl> SourceLoc Loc = SM . getLocForBufferStart ( SourceBufferID ) <nl> . getAdvancedLoc ( Offset ) ; <nl> + <nl> + FormatWalker walker ( SF , SM , Options ) ; <nl> FormatContext FC = walker . walkToLocation ( Loc ) ; <nl> CodeFormatter CF ( Options ) ; <nl> - unsigned Line = Range . startLine ( ) ; <nl> - return CF . indent ( Line , FC , Text , TokenInfoCollector ( SM , SourceBufferID , <nl> - walker . getTokens ( ) , <nl> - Line ) . collect ( ) ) ; <nl> + return CF . indent ( Range . startLine ( ) , FC , Text ) ; <nl> } <nl> <nl> mmm a / lib / Parse / ParseDecl . cpp <nl> ppp b / lib / Parse / ParseDecl . cpp <nl> bool Parser : : parseNewDeclAttribute ( DeclAttributes & Attributes , SourceLoc AtLoc , <nl> PlatformAndVersions ; <nl> <nl> StringRef AttrName = " @ _originalDefinedIn " ; <nl> + bool SuppressLaterDiags = false ; <nl> if ( parseList ( tok : : r_paren , LeftLoc , RightLoc , false , <nl> diag : : originally_defined_in_missing_rparen , <nl> SyntaxKind : : Unknown , [ & ] ( ) - > ParserStatus { <nl> bool Parser : : parseNewDeclAttribute ( DeclAttributes & Attributes , SourceLoc AtLoc , <nl> if ( ! Tok . is ( tok : : identifier ) | | Tok . getText ( ) ! = " module " | | <nl> ! peekToken ( ) . is ( tok : : colon ) ) { <nl> diagnose ( Tok , diag : : originally_defined_in_need_original_module_name ) ; <nl> + SuppressLaterDiags = true ; <nl> return makeParserError ( ) ; <nl> } <nl> consumeToken ( tok : : identifier ) ; <nl> bool Parser : : parseNewDeclAttribute ( DeclAttributes & Attributes , SourceLoc AtLoc , <nl> if ( OriginalModuleName . empty ( ) ) { <nl> diagnose ( ModuleNameLoc , <nl> diag : : originally_defined_in_need_nonempty_module_name ) ; <nl> + SuppressLaterDiags = true ; <nl> return makeParserError ( ) ; <nl> } <nl> return makeParserSuccess ( ) ; <nl> bool Parser : : parseNewDeclAttribute ( DeclAttributes & Attributes , SourceLoc AtLoc , <nl> if ( ! Plat . hasValue ( ) ) { <nl> diagnose ( Tok . getLoc ( ) , <nl> diag : : originally_defined_in_unrecognized_platform ) ; <nl> + SuppressLaterDiags = true ; <nl> return makeParserError ( ) ; <nl> } else { <nl> consumeToken ( ) ; <nl> bool Parser : : parseNewDeclAttribute ( DeclAttributes & Attributes , SourceLoc AtLoc , <nl> SourceRange VersionRange ; <nl> if ( parseVersionTuple ( VerTuple , VersionRange , <nl> Diagnostic ( diag : : attr_availability_expected_version , AttrName ) ) ) { <nl> + SuppressLaterDiags = true ; <nl> return makeParserError ( ) ; <nl> } else { <nl> if ( VerTuple . getSubminor ( ) . hasValue ( ) | | <nl> bool Parser : : parseNewDeclAttribute ( DeclAttributes & Attributes , SourceLoc AtLoc , <nl> } <nl> } <nl> diagnose ( AtLoc , diag : : originally_defined_in_need_platform_version ) ; <nl> + SuppressLaterDiags = true ; <nl> return makeParserError ( ) ; <nl> } <nl> } <nl> - } ) . isError ( ) ) { <nl> + } ) . isError ( ) | | SuppressLaterDiags ) { <nl> return false ; <nl> } <nl> if ( OriginalModuleName . empty ( ) ) { <nl> mmm a / lib / Parse / ParseExpr . cpp <nl> ppp b / lib / Parse / ParseExpr . cpp <nl> ParserResult < Expr > Parser : : parseExprImpl ( Diag < > Message , <nl> return makeParserResult ( new ( Context ) UnresolvedPatternExpr ( pattern . get ( ) ) ) ; <nl> } <nl> <nl> - auto expr = parseExprSequence ( Message , isExprBasic , <nl> + return parseExprSequence ( Message , isExprBasic , <nl> / * forConditionalDirective * / false ) ; <nl> - if ( expr . hasCodeCompletion ( ) ) <nl> - return expr ; <nl> - if ( expr . isNull ( ) ) <nl> - return nullptr ; <nl> - <nl> - return makeParserResult ( expr . get ( ) ) ; <nl> } <nl> <nl> / / / parseExprIs <nl> ParserResult < Expr > Parser : : parseExprSequence ( Diag < > Message , <nl> <nl> SmallVector < Expr * , 8 > SequencedExprs ; <nl> SourceLoc startLoc = Tok . getLoc ( ) ; <nl> - bool HasCodeCompletion = false ; <nl> + ParserStatus SequenceStatus ; <nl> bool PendingTernary = false ; <nl> <nl> while ( true ) { <nl> ParserResult < Expr > Parser : : parseExprSequence ( Diag < > Message , <nl> / / Parse a unary expression . <nl> ParserResult < Expr > Primary = <nl> parseExprSequenceElement ( Message , isExprBasic ) ; <nl> + SequenceStatus | = Primary ; <nl> <nl> - if ( Primary . hasCodeCompletion ( ) ) { <nl> - HasCodeCompletion = true ; <nl> - if ( CodeCompletion ) <nl> + if ( SequenceStatus . hasCodeCompletion ( ) & & CodeCompletion ) <nl> CodeCompletion - > setLeadingSequenceExprs ( SequencedExprs ) ; <nl> - } <nl> + <nl> if ( Primary . isNull ( ) ) { <nl> - if ( HasCodeCompletion ) { <nl> + if ( SequenceStatus . hasCodeCompletion ( ) ) { <nl> SequencedExprs . push_back ( new ( Context ) CodeCompletionExpr ( PreviousLoc ) ) ; <nl> break ; <nl> } <nl> - return Primary ; <nl> + return nullptr ; <nl> } <nl> - <nl> SequencedExprs . push_back ( Primary . get ( ) ) ; <nl> <nl> / / We know we can make a syntax node for ternary expression . <nl> ParserResult < Expr > Parser : : parseExprSequence ( Diag < > Message , <nl> PendingTernary = false ; <nl> } <nl> <nl> + if ( SequenceStatus . isError ( ) & & ! SequenceStatus . hasCodeCompletion ( ) ) <nl> + break ; <nl> + <nl> if ( isForConditionalDirective & & Tok . isAtStartOfLine ( ) ) <nl> break ; <nl> <nl> ParserResult < Expr > Parser : : parseExprSequence ( Diag < > Message , <nl> / / Parse the middle expression of the ternary . <nl> ParserResult < Expr > middle = <nl> parseExprSequence ( diag : : expected_expr_after_if_question , isExprBasic ) ; <nl> + SequenceStatus | = middle ; <nl> ParserStatus Status = middle ; <nl> - if ( middle . hasCodeCompletion ( ) ) <nl> - HasCodeCompletion = true ; <nl> if ( middle . isNull ( ) ) <nl> return nullptr ; <nl> <nl> ParserResult < Expr > Parser : : parseExprSequence ( Diag < > Message , <nl> <nl> / / Store the expr itself as a placeholder RHS . The real RHS is the <nl> / / type parameter stored in the node itself . <nl> + SequenceStatus | = is ; <nl> SequencedExprs . push_back ( is . get ( ) ) ; <nl> SequencedExprs . push_back ( is . get ( ) ) ; <nl> <nl> ParserResult < Expr > Parser : : parseExprSequence ( Diag < > Message , <nl> <nl> / / Store the expr itself as a placeholder RHS . The real RHS is the <nl> / / type parameter stored in the node itself . <nl> + SequenceStatus | = as ; <nl> SequencedExprs . push_back ( as . get ( ) ) ; <nl> SequencedExprs . push_back ( as . get ( ) ) ; <nl> <nl> ParserResult < Expr > Parser : : parseExprSequence ( Diag < > Message , <nl> ParserResult < Expr > arrow = parseExprArrow ( ) ; <nl> if ( arrow . isNull ( ) | | arrow . hasCodeCompletion ( ) ) <nl> return arrow ; <nl> + SequenceStatus | = arrow ; <nl> SequencedExprs . push_back ( arrow . get ( ) ) ; <nl> break ; <nl> } <nl> ParserResult < Expr > Parser : : parseExprSequence ( Diag < > Message , <nl> assert ( ! SequencedExprs . empty ( ) ) ; <nl> <nl> / / If we saw no operators , don ' t build a sequence . <nl> - if ( SequencedExprs . size ( ) = = 1 ) { <nl> - auto Result = makeParserResult ( SequencedExprs [ 0 ] ) ; <nl> - if ( HasCodeCompletion ) <nl> - Result . setHasCodeCompletion ( ) ; <nl> - return Result ; <nl> - } <nl> + if ( SequencedExprs . size ( ) = = 1 ) <nl> + return makeParserResult ( SequenceStatus , SequencedExprs [ 0 ] ) ; <nl> <nl> ExprSequnceContext . createNodeInPlace ( SyntaxKind : : ExprList ) ; <nl> ExprSequnceContext . setCreateSyntax ( SyntaxKind : : SequenceExpr ) ; <nl> - auto Result = makeParserResult ( SequenceExpr : : create ( Context , SequencedExprs ) ) ; <nl> - if ( HasCodeCompletion ) <nl> - Result . setHasCodeCompletion ( ) ; <nl> - return Result ; <nl> + return makeParserResult ( SequenceStatus , <nl> + SequenceExpr : : create ( Context , SequencedExprs ) ) ; <nl> } <nl> <nl> / / / parseExprSequenceElement <nl> ParserResult < Expr > Parser : : parseExprKeyPath ( ) { <nl> <nl> / / FIXME : diagnostics <nl> ParserResult < Expr > rootResult , pathResult ; <nl> + ParserStatus parseStatus ; <nl> + <nl> if ( ! startsWithSymbol ( Tok , ' . ' ) ) { <nl> rootResult = parseExprPostfix ( diag : : expr_keypath_expected_expr , <nl> / * isBasic = * / true ) ; <nl> + parseStatus = rootResult ; <nl> <nl> if ( rootResult . isParseError ( ) ) <nl> return rootResult ; <nl> ParserResult < Expr > Parser : : parseExprKeyPath ( ) { <nl> pathResult = parseExprPostfixSuffix ( inner , / * isExprBasic = * / true , <nl> / * periodHasKeyPathBehavior = * / false , <nl> unusedHasBindOptional ) ; <nl> + parseStatus | = pathResult ; <nl> } <nl> <nl> - if ( ! rootResult . getPtrOrNull ( ) & & ! pathResult . getPtrOrNull ( ) ) <nl> - return pathResult ; <nl> + if ( rootResult . isNull ( ) & & pathResult . isNull ( ) ) <nl> + return nullptr ; <nl> <nl> auto keypath = new ( Context ) KeyPathExpr ( <nl> backslashLoc , rootResult . getPtrOrNull ( ) , pathResult . getPtrOrNull ( ) ) ; <nl> ParserResult < Expr > Parser : : parseExprKeyPath ( ) { <nl> return makeParserCodeCompletionResult ( keypath ) ; <nl> } <nl> <nl> - ParserStatus parseStatus = ParserStatus ( rootResult ) | ParserStatus ( pathResult ) ; <nl> return makeParserResult ( parseStatus , keypath ) ; <nl> } <nl> <nl> Parser : : parseExprPostfixSuffix ( ParserResult < Expr > Result , bool isExprBasic , <nl> : diag : : expected_member_name ; <nl> auto Name = parseDeclNameRef ( NameLoc , D , <nl> DeclNameFlag : : AllowKeywords | DeclNameFlag : : AllowCompoundNames ) ; <nl> - if ( ! Name ) <nl> - return nullptr ; <nl> + if ( ! Name ) { <nl> + SourceRange ErrorRange = Result . get ( ) - > getSourceRange ( ) ; <nl> + ErrorRange . widen ( TokLoc ) ; <nl> + return makeParserErrorResult ( new ( Context ) ErrorExpr ( ErrorRange , Type ( ) , Result . get ( ) ) ) ; <nl> + } <nl> SyntaxContext - > createNodeInPlace ( SyntaxKind : : MemberAccessExpr ) ; <nl> Result = makeParserResult ( Result , new ( Context ) UnresolvedDotExpr ( <nl> Result . get ( ) , TokLoc , Name , NameLoc , <nl> Parser : : parseExprPostfixSuffix ( ParserResult < Expr > Result , bool isExprBasic , <nl> <nl> / / If we end up with an unknown token on this line , return an ErrorExpr <nl> / / covering the range of the token . <nl> - if ( ! Tok . isAtStartOfLine ( ) & & consumeIf ( tok : : unknown ) ) { <nl> - Result = makeParserResult ( <nl> - Result , new ( Context ) ErrorExpr ( Result . get ( ) - > getSourceRange ( ) ) ) ; <nl> + if ( ! Tok . isAtStartOfLine ( ) & & Tok . is ( tok : : unknown ) ) { <nl> + SourceLoc UnknownLoc = consumeToken ( ) ; <nl> + SourceRange ErrorRange = Result . get ( ) - > getSourceRange ( ) ; <nl> + ErrorRange . widen ( UnknownLoc ) ; <nl> + Result = makeParserResult ( Result , new ( Context ) ErrorExpr ( ErrorRange , <nl> + Type ( ) , <nl> + Result . get ( ) ) ) ; <nl> continue ; <nl> } <nl> <nl> ParserResult < Expr > Parser : : parseExprPrimary ( Diag < > ID , bool isExprBasic ) { <nl> <nl> Name = parseDeclNameRef ( NameLoc , diag : : expected_identifier_after_dot_expr , <nl> DeclNameFlag : : AllowKeywords | DeclNameFlag : : AllowCompoundNames ) ; <nl> - if ( ! Name ) return nullptr ; <nl> + if ( ! Name ) <nl> + return makeParserErrorResult ( new ( Context ) ErrorExpr ( DotLoc ) ) ; <nl> SyntaxContext - > createNodeInPlace ( SyntaxKind : : MemberAccessExpr ) ; <nl> <nl> / / Check for a ( ) suffix , which indicates a call when constructing <nl> mmm a / lib / Parse / ParseStmt . cpp <nl> ppp b / lib / Parse / ParseStmt . cpp <nl> ParserStatus Parser : : parseBraceItems ( SmallVectorImpl < ASTNode > & Entries , <nl> if ( ! Result . isNull ( ) ) { <nl> / / NOTE : this is a ' virtual ' brace statement which does not have <nl> / / explicit ' { ' or ' } ' , so the start and end locations should be <nl> - / / the same as those of the result node <nl> + / / the same as those of the result node , plus any junk consumed <nl> + / / afterwards <nl> auto Brace = BraceStmt : : create ( Context , Result . getStartLoc ( ) , <nl> - Result , Result . getEndLoc ( ) ) ; <nl> + Result , PreviousLoc , / * Implicit = * / true ) ; <nl> TLCD - > setBody ( Brace ) ; <nl> Entries . push_back ( TLCD ) ; <nl> <nl> ParserResult < PoundAvailableInfo > Parser : : parseStmtConditionPoundAvailable ( ) { <nl> diag : : avail_query_expected_rparen , LParenLoc ) ) <nl> Status . setIsParseError ( ) ; <nl> <nl> - auto * result = PoundAvailableInfo : : create ( Context , PoundLoc , Specs , RParenLoc ) ; <nl> + auto * result = PoundAvailableInfo : : create ( Context , PoundLoc , LParenLoc , Specs , <nl> + RParenLoc ) ; <nl> return makeParserResult ( Status , result ) ; <nl> } <nl> <nl> ParserResult < Stmt > Parser : : parseStmtForEach ( LabeledStmtInfo LabelInfo ) { <nl> <nl> / / Parse the ' where ' expression if present . <nl> ParserResult < Expr > Where ; <nl> + SourceLoc WhereLoc ; <nl> if ( Tok . is ( tok : : kw_where ) ) { <nl> SyntaxParsingContext WhereClauseCtxt ( SyntaxContext , <nl> SyntaxKind : : WhereClause ) ; <nl> - consumeToken ( ) ; <nl> + WhereLoc = consumeToken ( ) ; <nl> Where = parseExprBasic ( diag : : expected_foreach_where_expr ) ; <nl> if ( Where . isNull ( ) ) <nl> Where = makeParserErrorResult ( new ( Context ) ErrorExpr ( Tok . getLoc ( ) ) ) ; <nl> ParserResult < Stmt > Parser : : parseStmtForEach ( LabeledStmtInfo LabelInfo ) { <nl> return makeParserResult ( <nl> Status , <nl> new ( Context ) ForEachStmt ( LabelInfo , ForLoc , pattern . get ( ) , InLoc , <nl> - Container . get ( ) , Where . getPtrOrNull ( ) , <nl> + Container . get ( ) , WhereLoc , Where . getPtrOrNull ( ) , <nl> Body . get ( ) ) ) ; <nl> } <nl> <nl> mmm a / lib / Parse / Parser . cpp <nl> ppp b / lib / Parse / Parser . cpp <nl> Parser : : parseList ( tok RightK , SourceLoc LeftLoc , SourceLoc & RightLoc , <nl> <nl> if ( Status . isError ( ) ) { <nl> / / If we ' ve already got errors , don ' t emit missing RightK diagnostics . <nl> - RightLoc = <nl> - Tok . is ( RightK ) ? consumeToken ( ) : getLocForMissingMatchingToken ( ) ; <nl> + if ( Tok . is ( RightK ) ) { <nl> + RightLoc = consumeToken ( ) ; <nl> + / / Don ' t propagate the error because we have recovered . <nl> + if ( ! Status . hasCodeCompletion ( ) ) <nl> + Status = makeParserSuccess ( ) ; <nl> + } else { <nl> + RightLoc = getLocForMissingMatchingToken ( ) ; <nl> + } <nl> } else if ( parseMatchingToken ( RightK , RightLoc , ErrorDiag , LeftLoc ) ) { <nl> Status . setIsParseError ( ) ; <nl> } <nl> mmm a / lib / ParseSIL / ParseSIL . cpp <nl> ppp b / lib / ParseSIL / ParseSIL . cpp <nl> bool SILParser : : parseSILBBArgsAtBranch ( SmallVector < SILValue , 6 > & Args , <nl> SourceLoc LParenLoc = P . consumeToken ( tok : : l_paren ) ; <nl> SourceLoc RParenLoc ; <nl> <nl> + bool HasError = false ; <nl> if ( P . parseList ( tok : : r_paren , LParenLoc , RParenLoc , <nl> / * AllowSepAfterLast = * / false , <nl> diag : : sil_basicblock_arg_rparen , <nl> bool SILParser : : parseSILBBArgsAtBranch ( SmallVector < SILValue , 6 > & Args , <nl> [ & ] ( ) - > ParserStatus { <nl> SILValue Arg ; <nl> SourceLoc ArgLoc ; <nl> - if ( parseTypedValueRef ( Arg , ArgLoc , B ) ) <nl> + if ( parseTypedValueRef ( Arg , ArgLoc , B ) ) { <nl> + HasError = true ; <nl> return makeParserError ( ) ; <nl> + } <nl> Args . push_back ( Arg ) ; <nl> return makeParserSuccess ( ) ; <nl> - } ) . isError ( ) ) <nl> + } ) . isError ( ) | | HasError ) <nl> return true ; <nl> } <nl> return false ; <nl> mmm a / lib / Sema / DerivedConformanceRawRepresentable . cpp <nl> ppp b / lib / Sema / DerivedConformanceRawRepresentable . cpp <nl> struct RuntimeVersionCheck { <nl> <nl> / / availableInfo = " # available ( \ ( platformSpec ) , \ ( otherSpec ) ) " <nl> auto availableInfo = PoundAvailableInfo : : create ( <nl> - C , SourceLoc ( ) , { platformSpec , otherSpec } , SourceLoc ( ) ) ; <nl> + C , SourceLoc ( ) , SourceLoc ( ) , { platformSpec , otherSpec } , SourceLoc ( ) ) ; <nl> <nl> / / This won ' t be filled in by TypeCheckAvailability because we have <nl> / / invalid SourceLocs in this area of the AST . <nl> mmm a / test / SourceKit / CodeFormat / indent - bracestmt2 . swift <nl> ppp b / test / SourceKit / CodeFormat / indent - bracestmt2 . swift <nl> class Foo <nl> / / CHECK : key . sourcetext : " func foo ( ) " <nl> / / CHECK : key . sourcetext : " { " <nl> / / CHECK : key . sourcetext : " bar ( ) " <nl> - / / CHECK : key . sourcetext : " { " <nl> - / / CHECK : key . sourcetext : " } " <nl> + <nl> + / / bar ( ) " <nl> + / / CHECK : key . sourcetext : " { " <nl> / / CHECK : key . sourcetext : " } " <nl> + <nl> + / / func foo ( ) " <nl> + / / { " <nl> + / / . . . " <nl> + / / CHECK : key . sourcetext : " } " <nl> + <nl> + / / class Foo " <nl> + / / . . . " <nl> / / CHECK : key . sourcetext : " } " <nl> new file mode 100644 <nl> index 000000000000 . . 3a5d448476dd <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - chained . swift <nl> <nl> + func first ( ) { <nl> + foo <nl> + . bar { <nl> + } <nl> + . tug <nl> + <nl> + baz <nl> + . bop <nl> + . <nl> + } <nl> + <nl> + qux <nl> + . wop <nl> + . <nl> + ; <nl> + <nl> + func second ( ) { <nl> + baz <nl> + . bop ( ) <nl> + . <nl> + } <nl> + <nl> + func third ( ) { <nl> + baz { <nl> + <nl> + } <nl> + . <nl> + } <nl> + <nl> + func fourth ( ) { <nl> + baz ( ) { <nl> + <nl> + } <nl> + . <nl> + } <nl> + <nl> + func fifth ( ) { <nl> + baz <nl> + . bar { } <nl> + . <nl> + } <nl> + <nl> + func sixth ( ) { <nl> + baz <nl> + . bar ( <nl> + ) { <nl> + <nl> + } <nl> + . <nl> + } <nl> + <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 4 - length = 1 % s > % t . response <nl> + / / RUN : % sourcekitd - test - req = format - line = 5 - length = 1 % s > > % t . response <nl> + / / RUN : % sourcekitd - test - req = format - line = 9 - length = 1 % s > > % t . response <nl> + / / RUN : % sourcekitd - test - req = format - line = 14 - length = 1 % s > > % t . response <nl> + / / RUN : % sourcekitd - test - req = format - line = 20 - length = 1 % s > > % t . response <nl> + / / RUN : % sourcekitd - test - req = format - line = 27 - length = 1 % s > > % t . response <nl> + / / RUN : % sourcekitd - test - req = format - line = 34 - length = 1 % s > > % t . response <nl> + / / RUN : % sourcekitd - test - req = format - line = 40 - length = 1 % s > > % t . response <nl> + / / RUN : % sourcekitd - test - req = format - line = 49 - length = 1 % s > > % t . response <nl> + / / RUN : % FileCheck - - strict - whitespace % s < % t . response <nl> + / / <nl> + / / " foo " <nl> + / / " . bar { " <nl> + / / CHECK : key . sourcetext : " } " <nl> + / / <nl> + / / " foo " <nl> + / / " . bar { " <nl> + / / " } <nl> + / / CHECK : key . sourcetext : " . tug " <nl> + / / <nl> + / / " baz " <nl> + / / " . bop " <nl> + / / CHECK : key . sourcetext : " . " <nl> + / / <nl> + / / " qux " <nl> + / / " . wop " <nl> + / / CHECK : key . sourcetext : " . " <nl> + / / <nl> + / / " baz " <nl> + / / " . bop ( ) " <nl> + / / CHECK : key . sourcetext : " . " <nl> + / / <nl> + / / " baz { " <nl> + / / " } " <nl> + / / CHECK : key . sourcetext : " . " <nl> + / / <nl> + / / " baz " <nl> + / / " . bar { } " <nl> + / / CHECK : key . sourcetext : " . " <nl> + / / <nl> + / / " baz <nl> + / / " . bar ( " <nl> + / / " ) { " <nl> + / / " } " <nl> + / / CHECK : key . sourcetext : " . " <nl> mmm a / test / SourceKit / CodeFormat / indent - closure . swift <nl> ppp b / test / SourceKit / CodeFormat / indent - closure . swift <nl> func foo ( ) { <nl> } <nl> <nl> class C { <nl> - private static let durationTimeFormatter : NSDateComponentsFormatter = { <nl> - return timeFormatter <nl> + private static let durationTimeFormatter : NSDateComponentsFormatter = { <nl> + return timeFormatter <nl> } ( ) <nl> } <nl> <nl> func foo1 ( a : Int , handler : ( ) - > ( ) ) { } <nl> func foo2 ( handler : ( ) - > ( ) ) { } <nl> <nl> func foo3 ( ) { <nl> - foo1 ( 1 ) <nl> - { <nl> - } <nl> + foo1 ( 1 ) <nl> + { <nl> + } <nl> } <nl> <nl> func foo4 ( ) { <nl> func foo5 ( input : Int , block : ( Int ) - > ( ) ) - > Int { <nl> } <nl> <nl> func foo6 ( ) { <nl> - _ = foo5 ( input : 0 , block : { [ unowned self ] blockInput in <nl> - foo4 ( ) <nl> - } ) <nl> + _ = foo5 ( input : 0 , block : { [ unowned self ] blockInput in <nl> + foo4 ( ) <nl> + } ) <nl> } <nl> <nl> func foo7 ( A : ( ) - > ( ) , B : ( ) - > ( ) ) { } <nl> <nl> func foo8 ( ) { <nl> - foo7 ( A : { _ in <nl> - print ( " hello " ) <nl> - } , B : { <nl> - print ( " world " ) <nl> - } ) <nl> + foo7 ( A : { _ in <nl> + print ( " hello " ) <nl> + } , B : { <nl> + print ( " world " ) <nl> + } ) <nl> } <nl> <nl> func foo9 ( input : [ Int ] ) { <nl> func foo10 ( ) { <nl> func foo11 ( ) { <nl> VStack { <nl> } <nl> - . onAppear { <nl> + . onAppear { <nl> } <nl> } <nl> <nl> func foo12 ( ) { <nl> / / CHECK : key . sourcetext : " } ( ) " <nl> / / CHECK : key . sourcetext : " } " <nl> <nl> - / / " private static let durationTimeFormatter : NSDateComponentsFormatter = { " <nl> - / / CHECK : key . sourcetext : " } ( ) " <nl> - / / " foo1 ( 1 ) " <nl> - / / CHECK : key . sourcetext : " { " <nl> + / / " private static let durationTimeFormatter : NSDateComponentsFormatter = { " <nl> + / / CHECK : key . sourcetext : " } ( ) " <nl> + / / " foo1 ( 1 ) " <nl> + / / CHECK : key . sourcetext : " { " <nl> <nl> / / CHECK : key . sourcetext : " test = { " <nl> / / CHECK : key . sourcetext : " return 0 " <nl> func foo12 ( ) { <nl> / / CHECK : key . sourcetext : " return 0 " <nl> / / CHECK : key . sourcetext : " } ( ) " <nl> <nl> - / / CHECK : key . sourcetext : " } ) " <nl> + / / CHECK : key . sourcetext : " } ) " <nl> <nl> - / / CHECK : key . sourcetext : " } , B : { " <nl> + / / CHECK : key . sourcetext : " } , B : { " <nl> <nl> / / CHECK : key . sourcetext : " func foo9 ( input : [ Int ] ) { " <nl> / / CHECK : key . sourcetext : " input . map { ( ele ) in " <nl> func foo12 ( ) { <nl> / / CHECK : key . sourcetext : " . onAppear1 ( ) " <nl> / / CHECK : key . sourcetext : " . onAppear2 ( ) { } " <nl> / / CHECK : key . sourcetext : " . onAppear3 ( ) { " <nl> - / / CHECK : key . sourcetext : " } " <nl> + / / CHECK : key . sourcetext : " } " <nl> / / CHECK : key . sourcetext : " . onAppear4 ( ) { } " <nl> mmm a / test / SourceKit / CodeFormat / indent - computed - property . swift <nl> ppp b / test / SourceKit / CodeFormat / indent - computed - property . swift <nl> print ( ) <nl> } <nl> <nl> class C3 { <nl> - var value : Int <nl> - { } <nl> + var value : Int <nl> + { } <nl> } <nl> / / RUN : % sourcekitd - test - req = format - line = 1 - length = 1 % s > % t . response <nl> / / RUN : % sourcekitd - test - req = format - line = 2 - length = 1 % s > > % t . response <nl> class C3 { <nl> / / CHECK : key . sourcetext : " didSet { " <nl> " didSet { " <nl> / / CHECK : key . sourcetext : " print ( ) " <nl> - / / CHECK : key . sourcetext : " { } " <nl> + / / CHECK : key . sourcetext : " { } " <nl> mmm a / test / SourceKit / CodeFormat / indent - if . swift <nl> ppp b / test / SourceKit / CodeFormat / indent - if . swift <nl> if condition , <nl> <nl> / / RUN : % FileCheck - - strict - whitespace % s < % t . response <nl> <nl> - / / CHECK : key . sourcetext : " ! condition , " <nl> - / / CHECK : key . sourcetext : " condition , " <nl> - / / CHECK : key . sourcetext : " condition , " <nl> + / / CHECK : key . sourcetext : " ! condition , " <nl> + / / CHECK : key . sourcetext : " condition , " <nl> + / / CHECK : key . sourcetext : " condition , " <nl> / / CHECK : key . sourcetext : " ! condition , " <nl> / / CHECK : key . sourcetext : " condition { " <nl> similarity index 96 % <nl> rename from test / SourceKit / CodeFormat / indent - sibling . swift <nl> rename to test / SourceKit / CodeFormat / indent - list - exact . swift <nl> mmm a / test / SourceKit / CodeFormat / indent - sibling . swift <nl> ppp b / test / SourceKit / CodeFormat / indent - list - exact . swift <nl> class Foo2 { <nl> / / CHECK : key . sourcetext : " 5 ) " <nl> <nl> / / " func foo6 < T1 : Testable , " <nl> - / / CHECK : key . sourcetext : " T2 : Testable , " <nl> + / / CHECK : key . sourcetext : " T2 : Testable , " <nl> <nl> / / " T2 : Testable , " <nl> - / / CHECK : key . sourcetext : " T3 : Testable , " <nl> + / / CHECK : key . sourcetext : " T3 : Testable , " <nl> <nl> / / " T3 : Testable , " <nl> - / / CHECK : key . sourcetext : " T4 : where T4 : Testable > ( t1 : T1 , t2 : T2 , t3 : T2 ) { } " <nl> + / / CHECK : key . sourcetext : " T4 : where T4 : Testable > ( t1 : T1 , t2 : T2 , t3 : T2 ) { } " <nl> <nl> / / " func foo7 ( i1 : Int , i2 : Int , " <nl> / / CHECK : key . sourcetext : " i3 : Int , i4 : Int , " <nl> similarity index 100 % <nl> rename from test / SourceKit / CodeFormat / indent - sibling2 . swift <nl> rename to test / SourceKit / CodeFormat / indent - list - exact2 . swift <nl> mmm a / test / SourceKit / CodeFormat / indent - multiline - string . swift <nl> ppp b / test / SourceKit / CodeFormat / indent - multiline - string . swift <nl> this is line1 , <nl> / / CHECK : key . sourcetext : " this is line1 , " <nl> / / CHECK : key . sourcetext : " this is line2 , " <nl> / / CHECK : key . sourcetext : " \ " \ " \ " " <nl> - / / CHECK : key . sourcetext : " \ " content \ " " <nl> + / / CHECK : key . sourcetext : " \ " content \ " " <nl> new file mode 100644 <nl> index 000000000000 . . c473d4fc4826 <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - catch . swift <nl> <nl> + func foo ( x : Bool ) { <nl> + do { } catch { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 3 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . 0c856b87115c <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - class . swift <nl> <nl> + class Foo { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 2 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . 05c3cf67ba67 <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - closure - in . swift <nl> <nl> + func foo ( ) { <nl> + foo { ( <nl> + x , <nl> + y <nl> + ) in <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 6 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 000000000000 . . bf2f749f2f2b <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - closure . swift <nl> <nl> + func foo ( ) { <nl> + bar <nl> + . fooooooooooo ( first : 3 , <nl> + second ) [ x : 10 ] { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 5 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . f755dfe6ad86 <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - deinit . swift <nl> <nl> + class Foo { <nl> + deinit { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 3 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . 1dac241b4a27 <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - do . swift <nl> <nl> + func foo ( ) { <nl> + do { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 3 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . 5de9ca2cbc66 <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - else . swift <nl> <nl> + func foo ( x : Bool ) { <nl> + if x { } <nl> + else { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 4 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . a247d44f4542 <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - extension . swift <nl> <nl> + extension Foo { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 2 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . edd804d9bffb <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - for . swift <nl> <nl> + func foo < T > ( x : Collection < T > ) { <nl> + for i in x { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 3 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . ff19d29c28aa <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - func . swift <nl> <nl> + func foo ( ) - > Int { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 2 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . 8054475b0dbb <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - guard - condition . swift <nl> <nl> + func foo ( x : Bool ) { <nl> + guard let number = patterns . index ( where : { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 3 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 000000000000 . . 9f774103934e <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - guard . swift <nl> <nl> + func foo ( x : Bool ) { <nl> + guard x else { <nl> + <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 3 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . ac98b3a6e572 <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - if . swift <nl> <nl> + func foo ( x : Bool ) { <nl> + if x { <nl> + <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 3 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . 496ee4f1d648 <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - init . swift <nl> <nl> + class Foo { <nl> + init ( ) { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 3 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . a13bd8013137 <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - precedence . swift <nl> <nl> + precedencegroup Foo { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 2 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . 799e20daaa8e <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - protocol . swift <nl> <nl> + protocol Foo { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 2 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . f49058e4a940 <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - repeat . swift <nl> <nl> + func foo ( ) { <nl> + repeat { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 3 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . f8f70064cfdf <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - string - element . swift <nl> <nl> + let foo = Bar . Stuff ( <nl> + first : path , <nl> + description : " No \ ( thing ) was found at path \ ( path ) " <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 4 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . 8245bf934d07 <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - struct . swift <nl> <nl> + struct Foo { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 2 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> new file mode 100644 <nl> index 000000000000 . . fec7c8aeb048 <nl> mmm / dev / null <nl> ppp b / test / SourceKit / CodeFormat / indent - trailing / trailing - while . swift <nl> <nl> + func foo ( x : Bool ) { <nl> + while x { <nl> + <nl> + / / RUN : % sourcekitd - test - req = format - line = 3 - length = 1 % s | % FileCheck - - strict - whitespace % s <nl> + / / CHECK : key . sourcetext : " " <nl> mmm a / test / SourceKit / CodeFormat / rdar_32789463 . swift <nl> ppp b / test / SourceKit / CodeFormat / rdar_32789463 . swift <nl> $ <nl> <nl> / / CHECK : key . sourcetext : " struct $ { " <nl> / / CHECK : key . sourcetext : " let $ : < # Type # > " <nl> - / / CHECK : key . sourcetext : " = foo ( \ " foo \ \ ( $ ) bar \ " ) { " <nl> - / / CHECK : key . sourcetext : " $ " <nl> + / / CHECK : key . sourcetext : " = foo ( \ " foo \ \ ( $ ) bar \ " ) { " <nl> + / / CHECK : key . sourcetext : " $ " <nl> mmm a / test / SourceKit / CursorInfo / cursor_info . swift <nl> ppp b / test / SourceKit / CursorInfo / cursor_info . swift <nl> enum E7 : String { <nl> / / RUN : % sourcekitd - test - req = cursor - pos = 228 : 10 % s - - - F % S / . . / Inputs / libIDE - mock - sdk - I % t . tmp % s | % FileCheck - check - prefix = CHECK93 % s <nl> / / CHECK93 : < Declaration > case b = & quot ; f & quot ; < / Declaration > <nl> / / CHECK93 - NEXT : < decl . enumelement > < syntaxtype . keyword > case < / syntaxtype . keyword > < decl . name > b < / decl . name > = < syntaxtype . string > & quot ; f & quot ; < / syntaxtype . string > < / decl . enumelement > <nl> + <nl> + / / RUN : % sourcekitd - test - req = cursor - pos = 227 : 14 % s - - - F % S / . . / Inputs / libIDE - mock - sdk - I % t . tmp % s | % FileCheck - check - prefix = CHECK94 % s <nl> + / / CHECK94 : < empty cursor info ; internal diagnostic : " Resolved to incomplete expression or statement . " > <nl> new file mode 100644 <nl> index 000000000000 . . 1384e34c4308 <nl> mmm / dev / null <nl> ppp b / test / swift - indent / basic . swift <nl> <nl> + / / RUN : % swift - indent % s > % t . response <nl> + / / RUN : diff - u % s % t . response <nl> + <nl> + <nl> + / / Trailing ' . ' s should be indented as if they were a dot member expression . <nl> + / / <nl> + HStack ( alignment : . center ) { <nl> + landmark . image <nl> + . <nl> + } <nl> + . <nl> + ; / / Make sure the trailing . doesn ' t use the next token as its member <nl> + / / identifier . <nl> + <nl> + <nl> + / / Parameters should align by their external argument names <nl> + / / <nl> + func foobar ( <nl> + record : Int ? , <nl> + forKeys keys : [ String ] = Foo . bar , <nl> + atIndex index : Int , <nl> + error : NSError ? ) <nl> + { <nl> + } <nl> + <nl> + <nl> + / / Closures within arguments should align their closing brace to match the <nl> + / / argument indentation <nl> + / / <nl> + Base . query ( <nl> + predicate : foo , <nl> + shouldShowMessage : { ( ) - > Bool in <nl> + fatalError ( ) <nl> + } , <nl> + retryTarget : foo , <nl> + retrySelector : foo , <nl> + networkUnavailable : { <nl> + fatalError ( ) <nl> + } , <nl> + unknownError : { <nl> + fatalError ( ) <nl> + } <nl> + ) <nl> + <nl> + / / Arguments that span multiple lines should indent relative to the argument start . <nl> + / / FIXME : Should unlabelled args following a labelled arg be indented relative <nl> + / / to it ? <nl> + / / <nl> + let children = KeyValuePairs < String , Any > ( dictionaryLiteral : <nl> + ( " something " , " Oh Hello ! " ) , <nl> + ( " something " , " Oh Hello ! " ) , <nl> + ( " something " , " Oh Hello ! " ) <nl> + ) <nl> + <nl> + <nl> + / / Trailing closure content should indent relative to the called function , <nl> + / / rather than starting brace . <nl> + / / <nl> + test ( arg1 : 1 , <nl> + arg2 : 2 ) { x in <nl> + print ( x ) <nl> + } <nl> + <nl> + test ( arg1 : 1 , <nl> + arg2 : 2 ) <nl> + { x in <nl> + print ( x ) <nl> + } <nl> + <nl> + let x = [ 1 , 2 , 3 ] <nl> + . filter { $ 0 < $ 1 } <nl> + . filter { $ 0 < $ 1 } <nl> + . filter { $ 0 < $ 1 } <nl> + <nl> + bax ( 34949494949 ) <nl> + . foo ( a : Int , <nl> + b : Int ) { <nl> + } [ x : { <nl> + fatalError ( ) <nl> + } ] { <nl> + fatalError ( ) <nl> + } <nl> + . baz <nl> + <nl> + <nl> + / / Condition elements should align with each other . <nl> + / / <nl> + guard let x = Optional . some ( 10 ) , x > 100 , <nl> + let y = Optional . some ( 20 ) , y < 50 , <nl> + # available ( OSX 10 . 9 , iOS 7 . 0 , * ) , <nl> + x < y <nl> + else { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + if let x = Optional . some ( 10 ) , x > 100 , <nl> + let y = Optional . some ( 20 ) , y < 50 , <nl> + # available ( OSX 10 . 9 , iOS 7 . 0 , * ) , <nl> + x < y <nl> + { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + guard # available ( <nl> + OSX 10 . 9 , iOS 7 . 0 , * <nl> + ) , x < y else { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + if # available ( <nl> + OSX 10 . 9 , iOS 7 . 0 , * <nl> + ) , x < y { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + / / Trailing closures , subscript expressions and argument tuples / parens should <nl> + / / indent relative to the line containing the start of the last named component <nl> + / / of their function , or the function start if their are none . Any child <nl> + / / tuples / parens / brackets should that start after them on the same line should <nl> + / / do the same . <nl> + / / <nl> + let _ = [ ] <nl> + . map { <nl> + f { <nl> + print ( ) <nl> + } ? ? 0 <nl> + } <nl> + <nl> + basename <nl> + . foo ( a : Int , <nl> + b : Int ) [ x : { <nl> + fatalError ( ) <nl> + } ] { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + basename <nl> + . foo ( a : Int , <nl> + b : Int ) ( { <nl> + fatalError ( ) <nl> + } ) { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + basename <nl> + . foo ( ) <nl> + { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + [ foo ( a : Int , <nl> + b : Int ) [ z : { <nl> + fatalError ( ) <nl> + } ] , <nl> + " hello " <nl> + ] <nl> + <nl> + [ foo ( a : Int , <nl> + b : Int ) [ z : foo ( { <nl> + fatalError ( ) <nl> + } , { <nl> + fatalError ( ) <nl> + } ) ] , <nl> + " hello " <nl> + ] <nl> + <nl> + [ foo ( a : Int , <nl> + b : Int ) { <nl> + fatalError ( ) <nl> + } [ y : 10 , <nl> + z : foo ( { <nl> + fatalError ( ) <nl> + } , { <nl> + fatalError ( ) <nl> + } ) ] , <nl> + " hello " <nl> + ] <nl> + <nl> + [ foo ( a : Int , <nl> + b : Int ) [ z : foo ( ) <nl> + { <nl> + fatalError ( ) <nl> + } ) ] , <nl> + " hello " <nl> + ] <nl> + <nl> + [ foo ( a : Int , <nl> + b : Int ) [ z : foo ( a : 1 , <nl> + b : 2 ) <nl> + { <nl> + fatalError ( ) <nl> + } ) ] , <nl> + " hello " <nl> + ] <nl> + <nl> + <nl> + / / Closing arg parens shouldn ' t indent . <nl> + / / <nl> + func foo ( <nl> + bar : BarParameterType , <nl> + baz : BazParameterType <nl> + ) - > SomeResultType { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + <nl> + / / Sequence expressions should indent realtive to their first element . Leading <nl> + / / " = " should be considered part of the sequence when present . <nl> + / / <nl> + let arrayA = [ 0 ] <nl> + let arrayB = [ 1 ] <nl> + let arrayC = [ 2 ] <nl> + let arrayD = [ 3 ] <nl> + <nl> + let array1 = <nl> + arrayA + <nl> + arrayB + <nl> + arrayC + <nl> + arrayD <nl> + <nl> + array1 = <nl> + arrayA + <nl> + arrayB + <nl> + arrayC + <nl> + arrayD <nl> + <nl> + array1 = ( <nl> + 2 , 3 , 4 <nl> + ) . 0 + <nl> + arrayA + <nl> + arrayB + <nl> + arrayC + <nl> + arrayD <nl> + <nl> + let array2 = arrayA + <nl> + arrayB + <nl> + arrayC + <nl> + arrayD <nl> + <nl> + <nl> + / / Comments should not break exact alignment , and leading comments should be aligned , rather than the label . <nl> + / / <nl> + _ = NSLocalizedString ( " Call without a comment " , / / here is a comment <nl> + comment : " This indents correctly " ) / / here is a comment <nl> + _ = NSLocalizedString ( " Call with a comment " , <nl> + / / Here is a comment <nl> + comment : " Note the bad indentation " ) <nl> + _ = NSLocalizedString ( first : " Call with a comment " , <nl> + / * leading * / second : " foobarbaz " , <nl> + next : " Here is the next element " ) <nl> + <nl> + <nl> + / / String interpolations shouldn ' t break exact alignment . <nl> + / / <nl> + struct Foo { <nl> + let value1 : String <nl> + let value2 : String <nl> + let value3 : String <nl> + <nl> + static func makeFooList ( ) - > [ Foo ] { <nl> + let foo1 = Foo ( value1 : " Blue " , <nl> + value2 : " House " , <nl> + value3 : " Chicago " ) <nl> + let foo2 = Foo ( value1 : " Blue \ ( Date ( ) ) " , <nl> + value2 : " House " , <nl> + value3 : " Chicago " ) <nl> + let foo3 = Foo ( value1 : " Blue " , <nl> + value2 : " House \ ( Date ( ) ) " , <nl> + value3 : " Chicago " ) <nl> + let foo4 = Foo ( value1 : " Blue " , <nl> + value2 : " House " , <nl> + value3 : " Chicago \ ( Date ( ) ) " ) <nl> + let foo5 = Foo ( value1 : " Blue " + Date ( ) . description , <nl> + value2 : " House " , <nl> + value3 : " Chicago \ ( Date ( ) ) " ) <nl> + return [ foo1 , foo2 , foo3 , foo4 , foo5 ] <nl> + } <nl> + } <nl> + <nl> + <nl> + <nl> + / / Multiline string leading whitespace should be preserved . <nl> + / / <nl> + let s = " " " <nl> + a <nl> + b <nl> + c <nl> + " " " <nl> + <nl> + <nl> + / / Interpolations shouldn ' t change how multiline strings are handled . <nl> + / / <nl> + switch self { <nl> + case . first : <nl> + return " " " <nl> + foo foo foo foo foo foo foo foo foo foo foo foo foo foo foo foo foo foo foo \ <nl> + foo foo foo foo foo foo foo foo foo foo foo foo foo foo foo foo foo foo foo . <nl> + " " " <nl> + case . second ( let a , let b ) : <nl> + return " " " <nl> + foo foo foo foo foo foo foo foo foo foo foo foo foo foo foo foo \ ( bar . bar ) , \ <nl> + foo foo foo foo foo foo foo foo foo foo foo foo foo foo foo foo \ ( bar . bar ) . <nl> + " " " <nl> + } <nl> + <nl> + <nl> + / / Comments after the last item of a collection should still indent . <nl> + / / <nl> + let x = [ <nl> + / / hello <nl> + 124 , <nl> + / / hello <nl> + 123 <nl> + / / hello <nl> + ] <nl> + <nl> + <nl> + / / Pound directives aren ' t indentation contexts , though this should probably be <nl> + / / configurable . <nl> + / / <nl> + # if UNIT_TEST <nl> + var array : [ String ] = [ ] <nl> + # else <nl> + var array : [ String ] = { <nl> + return [ " one " , <nl> + " two " ] <nl> + } ( ) <nl> + # endif <nl> + <nl> + <nl> + / / Comments should not affect switch case indentations . <nl> + / / <nl> + switch false { <nl> + case true : <nl> + break / / a comment <nl> + case false : <nl> + break / / another comment <nl> + } <nl> + <nl> + / / The else in guard statements should not be indented . <nl> + / / <nl> + guard <nl> + let a = boo , <nl> + let b = foo , <nl> + let c = woo <nl> + else <nl> + { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + / / Trailing commas in collections expressions shouldn ' t cause the closing <nl> + / / bracket to indent . <nl> + / / <nl> + func a ( ) { <nl> + let b : [ String ] = [ <nl> + " x " , <nl> + ] <nl> + } <nl> + func a ( ) { <nl> + let b : [ String : Int ] = [ <nl> + " x " : 1 , <nl> + ] <nl> + } <nl> + <nl> + <nl> + / / Array / Dictionay / Tuple / Closure within arg list shouldn ' t cause their solo <nl> + / / closing tokens to indent . <nl> + / / <nl> + foo ( a : [ <nl> + " hi " : " b " <nl> + ] ) <nl> + foo ( a : [ <nl> + 456 <nl> + ] ) <nl> + AssertNoThrow ( { <nl> + let x = 1 <nl> + } ( ) ) <nl> + bar ( ( <nl> + x : 1 , <nl> + y : 2 <nl> + ) ) <nl> + <nl> + <nl> + / / Else on a new line shouldn ' t indent . <nl> + / / <nl> + if contains ( . Stuff ) { <nl> + foo ( ) <nl> + } else <nl> + if contains ( . Things ) { <nl> + bar ( ) <nl> + } else <nl> + if contains ( . Objects ) { <nl> + baz ( ) <nl> + } else <nl> + if contains ( . Concepts ) { <nl> + bill ( ) <nl> + } <nl> + <nl> + <nl> + / / Base classes , or conformed - to protocols should be indented at the same level . <nl> + / / <nl> + class Bar : <nl> + Foo , <nl> + Fizz , <nl> + Buzz { <nl> + func foo ( ) { } <nl> + } <nl> + <nl> + struct Bar : Boop , <nl> + Beep , <nl> + Bop { <nl> + func foo ( ) { } <nl> + } <nl> + <nl> + <nl> + / / Chaining after literals should not break their closing token ' s indentation . <nl> + / / <nl> + let array2 = [ <nl> + 42 , <nl> + 69 , <nl> + ] . map { $ 0 + 1 } <nl> + <nl> + let array3 = [ <nl> + 42 , <nl> + 69 , <nl> + ] . map { <nl> + $ 0 + 1 <nl> + } <nl> + <nl> + let array4 = [ <nl> + 42 , <nl> + 69 , <nl> + ] . map { <nl> + $ 0 . distance ( <nl> + to : - 1 <nl> + ) <nl> + } <nl> + <nl> + <nl> + / / Capture lists should not affect how the surrounding code is indented . <nl> + / / <nl> + foo ( first : { [ weak self ] _ in <nl> + print ( " " ) <nl> + } , second : { _ in <nl> + print ( " " ) <nl> + } ) <nl> + <nl> + <nl> + / / Comments between case statements aren ' t within them . <nl> + / / <nl> + switch foo { <nl> + case . bar : return <nl> + <nl> + / / a comment <nl> + case . baz : return <nl> + <nl> + / / a comment <nl> + / / on two lines <nl> + case . buz : return <nl> + } <nl> + <nl> + <nl> + / / Arrays within exactly - aligned lists should indent relative to the alignment column . <nl> + / / <nl> + private static let domainFooo = FoooodOntologyNode ( name : " food " , <nl> + childNodes : [ <nl> + first , <nl> + second , <nl> + third , <nl> + fourth , <nl> + Base . sixth <nl> + Base . seventh . two <nl> + ] , <nl> + isSpecial : false ) { } <nl> + <nl> + / / Incomplete member access shouldn ' t cause indentation to fail . <nl> + / / <nl> + [ " one " , <nl> + " two " ] <nl> + . foo { <nl> + <nl> + } ( <nl> + x : 10 , <nl> + y : 94 <nl> + ) . <nl> + ; / / Make sure later code isn ' t interpreted as the member name <nl> + <nl> + <nl> + / / Elements that span from the opening tuple line should prevent later elements indenting . <nl> + / / <nl> + baz ( foo ( a : 34 , <nl> + b : 56 ) { <nl> + print ( " hello " ) <nl> + } , <nl> + bar ) <nl> + <nl> + <nl> + / / Generic parameter list elements should be aligned . <nl> + / / <nl> + struct Foo < T : Equatable & <nl> + Hashable , <nl> + U : Identifiable , <nl> + V : Codable & Decodable > { <nl> + <nl> + } <nl> + <nl> + <nl> + / / Where clauses requirements should be aligned . <nl> + / / <nl> + struct Foo < T , U , V > where T : Equatable & Hashable , <nl> + U : Identifiable , <nl> + V : Codable & <nl> + Decodable { <nl> + <nl> + } <nl> + <nl> + / / Generic parameter list elements should be aligned . <nl> + / / <nl> + struct Foo < T : Equatable & Hashable , <nl> + U : Identifiable , <nl> + V : Codable & Decodable > where T : Equatable & Hashable , <nl> + U : Identifiable , <nl> + V : Codable & Decodable { <nl> + <nl> + } <nl> + <nl> + <nl> + / / The closing ] of array literals shouldn ' t indent regardless of whether a trailing member access is present or not . <nl> + / / <nl> + let array2 = [ <nl> + 42 , <nl> + 69 , <nl> + ] . map { $ 0 + 1 } <nl> + <nl> + <nl> + / / Make sure we handle the where clause on ' for ' correctly . <nl> + / / <nl> + for value in array where <nl> + value > 0 <nl> + { <nl> + print ( value ) <nl> + } <nl> + <nl> + for value in array where <nl> + value > 0 { <nl> + print ( value ) <nl> + } <nl> + <nl> + for value in array <nl> + where value > 0 { <nl> + print ( value ) <nl> + } <nl> + <nl> + for <nl> + value <nl> + in <nl> + array <nl> + where <nl> + value > 0 & & <nl> + value < 2 { <nl> + print ( value ) <nl> + } <nl> + <nl> + <nl> + for value in array <nl> + where <nl> + value > 0 & & <nl> + value < 2 { <nl> + print ( value ) <nl> + } <nl> + <nl> + for value in array where <nl> + value > 0 & & <nl> + value < 2 { <nl> + print ( value ) <nl> + } <nl> + <nl> + <nl> + / / Only " top - level " expressions should indent when split across multiple lines . <nl> + / / <nl> + func foo ( ) { <nl> + ( 1 + 2 + 4 ) <nl> + + otherTerm <nl> + <nl> + func inner ( a b : Int = ( 1 + 2 + 4 ) <nl> + + otherTerm ) { } <nl> + <nl> + func inner ( a b : Int = <nl> + ( 1 + 2 + 4 ) <nl> + + otherTerm ) { } <nl> + <nl> + if <nl> + ( 1 + 2 + 4 ) <nl> + + otherTerm > 10 , <nl> + ( 3 + 5 + 6 ) <nl> + + otherTerm < 30 { <nl> + print ( " foo " ) <nl> + } <nl> + <nl> + let x = ( <nl> + ( 1 + 2 + 4 ) <nl> + + otherTerm > 10 <nl> + ) <nl> + <nl> + let y = ( <nl> + ( 1 + 2 + 4 ) <nl> + + otherTerm > 10 , <nl> + label : <nl> + ( 1 + 2 + 4 ) <nl> + + otherTerm > 10 <nl> + ) <nl> + <nl> + foo { <nl> + ( 1 + 2 + 4 ) <nl> + + otherTerm <nl> + } <nl> + <nl> + return <nl> + ( 1 + 2 + 4 ) <nl> + + otherTerm <nl> + } <nl> + <nl> + <nl> + / / Dictionary element values should be indented relative to their key . <nl> + / / <nl> + _ = [ <nl> + " foo " : " bar " , <nl> + " foo " : <nl> + " bar " <nl> + ] <nl> + _ = [ <nl> + ( 1 , 2 ) : ( 1 , 2 ) , <nl> + ( 1 , 2 ) : <nl> + ( 1 , 2 ) , <nl> + ( 1 , 2 ) : ( <nl> + 1 , <nl> + 2 <nl> + ) , <nl> + ( <nl> + 1 , <nl> + 2 <nl> + ) : <nl> + ( 1 , 2 ) , <nl> + ( <nl> + 1 , <nl> + 2 <nl> + ) : ( <nl> + 1 , <nl> + 2 <nl> + ) , <nl> + ( 1 , 2 ) : <nl> + ( <nl> + 1 , <nl> + 2 <nl> + ) <nl> + ] <nl> + <nl> + <nl> + / / Closure params and capture list should be indented from the closure body , and their elements aligned . <nl> + / / <nl> + <nl> + _ = { <nl> + [ x , y ] ( a , b ) - > Int in <nl> + fatalError ( ) <nl> + } <nl> + <nl> + _ = { <nl> + ( a : Int , <nl> + b : Int ) - > Int in <nl> + fatalError ( ) <nl> + } <nl> + <nl> + _ = { <nl> + ( a : Int , <nl> + b : Int <nl> + ) - > Int in <nl> + fatalError ( ) <nl> + } <nl> + <nl> + _ = { <nl> + [ x , y ] a , <nl> + b - > Int in <nl> + fatalError ( ) <nl> + } <nl> + <nl> + _ = { <nl> + [ x , y ] ( a : ( <nl> + Int , Int <nl> + ) , <nl> + b ) - > Int in <nl> + fatalError ( ) <nl> + } <nl> + <nl> + _ = { [ x , y ] ( a : ( <nl> + Int , Int <nl> + ) , <nl> + b ) - > Int in <nl> + fatalError ( ) <nl> + } <nl> + <nl> + _ = { <nl> + [ x , <nl> + y , <nl> + ] a , <nl> + b - > Int in <nl> + fatalError ( ) <nl> + } <nl> + _ = { <nl> + [ weak myX = y . x , <nl> + unowned yourX = y . p ] ( ) in <nl> + fatalError ( ) <nl> + } <nl> + <nl> + _ = { [ <nl> + weak myX = y . x , <nl> + unowned yourX = y . p <nl> + ] ( ) in <nl> + fatalError ( ) <nl> + } <nl> + <nl> + _ = { [ <nl> + weak myX = <nl> + y . x , <nl> + unowned yourX = <nl> + y . p <nl> + ] ( ) in <nl> + fatalError ( ) <nl> + } <nl> + <nl> + _ = { [ <nl> + weak myX = <nl> + y . x , <nl> + unowned yourX = <nl> + y . p <nl> + ] ( ) in ( ( <nl> + 45 <nl> + ) ) } <nl> + <nl> + <nl> + / / Tuple types should have their elements aligned exactly . <nl> + / / <nl> + let x : ( <nl> + Int , <nl> + Int <nl> + ) ? = nil <nl> + <nl> + let x : ( <nl> + a : Int , <nl> + b : Int <nl> + ) ? = nil <nl> + <nl> + let x : ( <nl> + a : <nl> + Int , <nl> + b : <nl> + Int <nl> + ) ? = nil <nl> + <nl> + let x : ( Int , <nl> + Int ) ? = nil <nl> + <nl> + let x : ( a : Int , <nl> + b : Int ) ? = nil <nl> + <nl> + let x : ( a : <nl> + Int , <nl> + b : <nl> + Int ) ? = nil <nl> + <nl> + let x : ( Int , <nl> + Int <nl> + ) ? = nil <nl> + <nl> + let x : ( a : Int , <nl> + b : Int <nl> + ) ? = nil <nl> + <nl> + let x : ( a : <nl> + Int , <nl> + b : <nl> + Int <nl> + ) ? = nil <nl> + <nl> + <nl> + let myFunc : ( Int , <nl> + Int , <nl> + Int <nl> + ) - > Int <nl> + <nl> + let myFunc : ( ) - > ( Int , <nl> + Int , <nl> + Int ) <nl> + <nl> + <nl> + / / Tuple types should align on their outer label locs ( if present ) <nl> + / / <nl> + typealias Thing = ( _ tmpURL : URL , <nl> + _ secondURL : URL ) - > ( destinationURL : URL , options : Options ) <nl> + <nl> + / / Type member access shouldn ' t indent . <nl> + / / <nl> + func foo ( <nl> + a : Type <nl> + . Type , <nl> + b : Type <nl> + . Protocol <nl> + ) { } <nl> + <nl> + func foo ( <nl> + a : <nl> + Type <nl> + . Type , <nl> + b : <nl> + Type <nl> + . Protocol , <nl> + c : <nl> + [ [ <nl> + Type <nl> + . Type <nl> + ] : <nl> + String <nl> + ] <nl> + ) { } <nl> + <nl> + var ( d , e ) : <nl> + ( Int , Int ) = ( 1 , 3 ) , <nl> + ( f , g ) : ( <nl> + Int , <nl> + Int <nl> + ) = ( 1 , 3 ) , <nl> + ( h , i ) : <nl> + ( <nl> + Int , <nl> + Int <nl> + ) = ( 1 , 3 ) <nl> + <nl> + var ( d , <nl> + e <nl> + ) : <nl> + ( Int , Int ) = ( 1 , 3 ) , <nl> + ( f , <nl> + g <nl> + ) : ( <nl> + Int , <nl> + Int <nl> + ) = ( 1 , 3 ) <nl> + <nl> + / / Generic identifier types should have their arguments aligned . <nl> + / / <nl> + let x : Array < Int , <nl> + String , <nl> + Int > <nl> + <nl> + let x : Array < <nl> + Int , <nl> + String , <nl> + Int <nl> + > <nl> + <nl> + let x : Array < ( <nl> + Int , <nl> + String , <nl> + Int <nl> + ) > <nl> + <nl> + / / Generic specializations should have their arguments aligned too <nl> + / / <nl> + let x = foo < Int , <nl> + String , <nl> + Int > ( ) <nl> + <nl> + <nl> + / / Invalid elements should still be indented . <nl> + / / <nl> + let x : [ Int ? ] = [ <nl> + . <nl> + ] <nl> + <nl> + <nl> + / / Attributes on case stmts shouldn ' t indent their case <nl> + / / <nl> + switch Foo . c { <nl> + case . a : <nl> + break <nl> + case . b : <nl> + break ; <nl> + case . c : <nl> + break <nl> + @ unknown <nl> + default : <nl> + break <nl> + } <nl> + <nl> + <nl> + / / Handle empty single expression closures ( invalid start location ) <nl> + / / <nl> + foo . bar ( ) { <nl> + return <nl> + } <nl> + <nl> + <nl> + / / Handle invalid for each ( missing ' in ' location ) <nl> + / / <nl> + for query : foo <nl> + bar ; <nl> + <nl> + <nl> + / / Handle custom attributes <nl> + / / <nl> + @ Custom ( foo : 3 , <nl> + baz : 3 ) <nl> + struct Foo { <nl> + @ Custom ( <nl> + foo : Bar , <nl> + baz : Bar <nl> + ) <nl> + var d : Int = 10 <nl> + } <nl> + <nl> + / / Ignore postfix expressions when determining context locations . <nl> + / / <nl> + return Foo ( deferred ) { <nl> + print ( " hello " ) <nl> + } + + <nl> + <nl> + IncrementedFirst + + <nl> + . foo ( ) + + <nl> + . bar { <nl> + <nl> + } + + <nl> + . baz ( ) <nl> + <nl>
Merge pull request from nathawes / indentation
apple/swift
fcf95eae16490d976d8c1db57ae888de2913a581
2020-03-13T15:01:29Z
mmm a / swoole_runtime . cc <nl> ppp b / swoole_runtime . cc <nl> static php_stream * socket_create ( <nl> php_stream * stream = NULL ; <nl> Socket * sock ; <nl> <nl> - if ( unlikely ( SwooleG . main_reactor = = nullptr | | ! Coroutine : : get_current ( ) ) ) <nl> - { <nl> - return php_socket_create ( proto , protolen , resourcename , resourcenamelen , persistent_id , options , flags , timeout , context STREAMS_CC ) ; <nl> - } <nl> + Coroutine : : get_current_safe ( ) ; <nl> <nl> if ( strncmp ( proto , " unix " , protolen ) = = 0 ) <nl> { <nl>
fixed
swoole/swoole-src
16a70f6957f427f6edb7ab61c30d44f56b51b77e
2019-04-26T13:33:55Z
mmm a / xbmc / guilib / JpegIO . cpp <nl> ppp b / xbmc / guilib / JpegIO . cpp <nl> unsigned int CJpegIO : : findExifMarker ( unsigned char * jpegData , <nl> { <nl> unsigned char * buffPtr = jpegData + 2 ; / / SKIP 0xFFD8 <nl> unsigned char * endOfFile = jpegData + dataSize ; <nl> - <nl> + <nl> if ( ! jpegData | | dataSize < 2 | | jpegData [ 0 ] ! = 0xFF | | jpegData [ 1 ] ! = 0xD8 ) <nl> return 0 ; <nl> - <nl> + <nl> for ( ; ; ) <nl> { <nl> BYTE marker = 0 ; <nl> - for ( int a = 0 ; a < 7 & & ( buffPtr < endOfFile ) ; a + + ) <nl> + for ( int a = 0 ; a < 7 & & ( buffPtr < endOfFile ) ; a + + ) <nl> { <nl> marker = * buffPtr ; <nl> if ( marker ! = 0xFF ) <nl> unsigned int CJpegIO : : findExifMarker ( unsigned char * jpegData , <nl> / / 0xff is legal padding , but if we get that many , something ' s wrong . <nl> if ( marker = = 0xff ) <nl> return 0 ; <nl> - <nl> - buffPtr + + ; / / move to start of itemlen field <nl> - if ( ( buffPtr + 1 ) > = endOfFile ) <nl> - return 0 ; <nl> + <nl> + buffPtr + + ; / / move to start of itemlen field <nl> + if ( ( buffPtr + 1 ) > = endOfFile ) <nl> + return 0 ; <nl> <nl> / / Read the length of the section . <nl> unsigned short itemlen = ( * buffPtr + + ) < < 8 ; <nl> itemlen + = * buffPtr ; <nl> - <nl> - if ( itemlen < sizeof ( itemlen ) ) <nl> + <nl> + if ( itemlen < sizeof ( itemlen ) ) <nl> return 0 ; <nl> <nl> switch ( marker ) <nl> unsigned int CJpegIO : : findExifMarker ( unsigned char * jpegData , <nl> case M_EOI : <nl> case M_SOS : / / stop before hitting compressed data <nl> return 0 ; <nl> - case M_EXIF : <nl> - / * found exifdata <nl> - buffPtr was pointing at the second length byte <nl> - + 1 for getting the exif tag * / <nl> + case M_EXIF : <nl> + / / found exifdata <nl> + / / buffPtr was pointing at the second length byte <nl> + / / + 1 for getting the exif tag <nl> exifPtr = buffPtr + 1 ; <nl> return itemlen ; <nl> default : / / skip all other sections <nl> unsigned int CJpegIO : : findExifMarker ( unsigned char * jpegData , <nl> break ; <nl> } <nl> } <nl> - <nl> + <nl> return 0 ; <nl> } <nl> <nl> bool CJpegIO : : GetExif ( ) <nl> unsigned int tagNumber = 0 ; <nl> bool isMotorola = false ; <nl> unsigned char * exif_data = NULL ; <nl> - unsigned const char ExifHeader [ ] = " Exif \ 0 \ 0 " ; <nl> - <nl> + unsigned const char ExifHeader [ ] = " Exif \ 0 \ 0 " ; <nl> + <nl> length = findExifMarker ( m_inputBuff , m_imgsize , exif_data ) ; <nl> - <nl> - / * read exif head , check for " Exif " <nl> - next we want to read to current offset + length <nl> - check if buffer is big enough * / <nl> - if ( length & & <nl> - memcmp ( exif_data , ExifHeader , 6 ) = = 0 ) <nl> + <nl> + / / read exif head , check for " Exif " <nl> + / / next we want to read to current offset + length <nl> + / / check if buffer is big enough <nl> + if ( length & & memcmp ( exif_data , ExifHeader , 6 ) = = 0 ) <nl> { <nl> / / read exif body <nl> exif_data + = 6 ; <nl> bool CJpegIO : : GetExif ( ) <nl> { <nl> return false ; <nl> } <nl> - <nl> + <nl> / / check for broken files <nl> - if ( ( m_inputBuff + m_imgsize ) < ( exif_data + length ) ) <nl> + if ( ( m_inputBuff + m_imgsize ) < ( exif_data + length ) ) <nl> { <nl> return false ; <nl> } <nl> bool CJpegIO : : GetExif ( ) <nl> isMotorola = true ; <nl> else <nl> return false ; <nl> - <nl> + <nl> / / Check Tag Mark <nl> - if ( isMotorola ) <nl> + if ( isMotorola ) <nl> { <nl> if ( exif_data [ 2 ] ! = 0 | | exif_data [ 3 ] ! = 0x2A ) <nl> return false ; <nl> - } <nl> - else <nl> + } <nl> + else <nl> { <nl> - if ( exif_data [ 3 ] ! = 0 | | exif_data [ 2 ] ! = 0x2A ) <nl> + if ( exif_data [ 3 ] ! = 0 | | exif_data [ 2 ] ! = 0x2A ) <nl> return false ; <nl> } <nl> - <nl> + <nl> / / Get first IFD offset ( offset to IFD0 ) <nl> - if ( isMotorola ) <nl> + if ( isMotorola ) <nl> { <nl> - if ( exif_data [ 4 ] ! = 0 | | exif_data [ 5 ] ! = 0 ) <nl> + if ( exif_data [ 4 ] ! = 0 | | exif_data [ 5 ] ! = 0 ) <nl> return false ; <nl> offset = exif_data [ 6 ] ; <nl> offset < < = 8 ; <nl> offset + = exif_data [ 7 ] ; <nl> - } <nl> - else <nl> + } <nl> + else <nl> { <nl> - if ( exif_data [ 7 ] ! = 0 | | exif_data [ 6 ] ! = 0 ) <nl> + if ( exif_data [ 7 ] ! = 0 | | exif_data [ 6 ] ! = 0 ) <nl> return false ; <nl> offset = exif_data [ 5 ] ; <nl> offset < < = 8 ; <nl> offset + = exif_data [ 4 ] ; <nl> } <nl> - <nl> + <nl> if ( offset > length - 2 ) <nl> return false ; / / check end of data segment <nl> - <nl> + <nl> / / Get the number of directory entries contained in this IFD <nl> - if ( isMotorola ) <nl> + if ( isMotorola ) <nl> { <nl> numberOfTags = exif_data [ offset ] ; <nl> numberOfTags < < = 8 ; <nl> numberOfTags + = exif_data [ offset + 1 ] ; <nl> - } <nl> - else <nl> + } <nl> + else <nl> { <nl> numberOfTags = exif_data [ offset + 1 ] ; <nl> numberOfTags < < = 8 ; <nl> numberOfTags + = exif_data [ offset ] ; <nl> } <nl> - <nl> - if ( numberOfTags = = 0 ) <nl> + <nl> + if ( numberOfTags = = 0 ) <nl> return false ; <nl> offset + = 2 ; <nl> - <nl> + <nl> / / Search for Orientation Tag in IFD0 - hey almost there ! : D <nl> while ( 1 ) / / hopefully this jpeg has correct exif data . . . <nl> { <nl> if ( offset > length - 12 ) <nl> return false ; / / check end of data segment <nl> - <nl> + <nl> / / Get Tag number <nl> - if ( isMotorola ) <nl> + if ( isMotorola ) <nl> { <nl> tagNumber = exif_data [ offset ] ; <nl> tagNumber < < = 8 ; <nl> tagNumber + = exif_data [ offset + 1 ] ; <nl> - } <nl> - else <nl> + } <nl> + else <nl> { <nl> tagNumber = exif_data [ offset + 1 ] ; <nl> tagNumber < < = 8 ; <nl> tagNumber + = exif_data [ offset ] ; <nl> } <nl> - <nl> - if ( tagNumber = = EXIF_TAG_ORIENTATION ) <nl> + <nl> + if ( tagNumber = = EXIF_TAG_ORIENTATION ) <nl> break ; / / found orientation tag <nl> - <nl> - if ( - - numberOfTags = = 0 ) <nl> + <nl> + if ( - - numberOfTags = = 0 ) <nl> return false ; / / no orientation found <nl> offset + = 12 ; / / jump to next tag <nl> } <nl> - <nl> + <nl> / / Get the Orientation value <nl> - if ( isMotorola ) <nl> + if ( isMotorola ) <nl> { <nl> - if ( exif_data [ offset + 8 ] ! = 0 ) <nl> + if ( exif_data [ offset + 8 ] ! = 0 ) <nl> return false ; <nl> m_orientation = exif_data [ offset + 9 ] ; <nl> - } <nl> - else <nl> + } <nl> + else <nl> { <nl> - if ( exif_data [ offset + 9 ] ! = 0 ) <nl> + if ( exif_data [ offset + 9 ] ! = 0 ) <nl> return false ; <nl> m_orientation = exif_data [ offset + 8 ] ; <nl> } <nl> bool CJpegIO : : GetExif ( ) <nl> m_orientation = 0 ; <nl> return false ; <nl> } <nl> - <nl> + <nl> return true ; / / done <nl> } <nl>
cosmetics
xbmc/xbmc
d04744a1c7079ea2cfa2e11ebc11fb1d07b6445d
2011-09-29T17:18:37Z
deleted file mode 100644 <nl> index 43b1f96a3e . . 0000000000 <nl> mmm a / change / react - native - windows - 2020 - 03 - 31 - 09 - 49 - 09 - fix - asmw32 - multiremove . json <nl> ppp / dev / null <nl> <nl> - { <nl> - " type " : " prerelease " , <nl> - " comment " : " 1 . Make changes in response to Vladimir ' s feedback in the PR for bb8c00133 . . . 2 . Fix a dumb bug in multiRemove ; the parameterized query variable was not being bound so nothing ever got deleted " , <nl> - " packageName " : " react - native - windows " , <nl> - " email " : " hpratt @ microsoft . com " , <nl> - " dependentChangeType " : " patch " , <nl> - " date " : " 2020 - 03 - 31T16 : 49 : 09 . 648Z " <nl> - } <nl> \ No newline at end of file <nl> mmm a / packages / E2ETest / package . json <nl> ppp b / packages / E2ETest / package . json <nl> <nl> " dependencies " : { <nl> " react " : " 16 . 9 . 0 " , <nl> " react - native " : " 0 . 61 . 5 " , <nl> - " react - native - windows " : " 0 . 0 . 0 - master . 5 " , <nl> + " react - native - windows " : " 0 . 0 . 0 - master . 6 " , <nl> " rnpm - plugin - windows " : " ^ 0 . 6 . 1 " <nl> } , <nl> " devDependencies " : { <nl> mmm a / packages / microsoft - reactnative - sampleapps / package . json <nl> ppp b / packages / microsoft - reactnative - sampleapps / package . json <nl> <nl> " dependencies " : { <nl> " react " : " 16 . 9 . 0 " , <nl> " react - native " : " 0 . 61 . 5 " , <nl> - " react - native - windows " : " 0 . 0 . 0 - master . 5 " , <nl> + " react - native - windows " : " 0 . 0 . 0 - master . 6 " , <nl> " rnpm - plugin - windows " : " ^ 0 . 6 . 1 " <nl> } , <nl> " devDependencies " : { <nl> mmm a / packages / playground / package . json <nl> ppp b / packages / playground / package . json <nl> <nl> " dependencies " : { <nl> " react " : " 16 . 9 . 0 " , <nl> " react - native " : " 0 . 61 . 5 " , <nl> - " react - native - windows " : " 0 . 0 . 0 - master . 5 " , <nl> + " react - native - windows " : " 0 . 0 . 0 - master . 6 " , <nl> " rnpm - plugin - windows " : " ^ 0 . 6 . 1 " <nl> } , <nl> " devDependencies " : { <nl> mmm a / vnext / CHANGELOG . json <nl> ppp b / vnext / CHANGELOG . json <nl> <nl> { <nl> " name " : " react - native - windows " , <nl> " entries " : [ <nl> + { <nl> + " date " : " Tue , 31 Mar 2020 20 : 21 : 56 GMT " , <nl> + " tag " : " react - native - windows_v0 . 0 . 0 - master . 6 " , <nl> + " version " : " 0 . 0 . 0 - master . 6 " , <nl> + " comments " : { <nl> + " prerelease " : [ <nl> + { <nl> + " comment " : " 1 . Make changes in response to Vladimir ' s feedback in the PR for bb8c00133 . . . 2 . Fix a dumb bug in multiRemove ; the parameterized query variable was not being bound so nothing ever got deleted " , <nl> + " author " : " hpratt @ microsoft . com " , <nl> + " commit " : " 407c0834ada43cd9d42c24cb6ddfe7c91ddf960a " , <nl> + " package " : " react - native - windows " <nl> + } <nl> + ] <nl> + } <nl> + } , <nl> { <nl> " date " : " Tue , 31 Mar 2020 16 : 24 : 06 GMT " , <nl> " tag " : " react - native - windows_v0 . 0 . 0 - master . 5 " , <nl> mmm a / vnext / CHANGELOG . md <nl> ppp b / vnext / CHANGELOG . md <nl> <nl> # Change Log - react - native - windows <nl> <nl> - This log was last generated on Tue , 31 Mar 2020 02 : 03 : 05 GMT and should not be manually modified . <nl> + This log was last generated on Tue , 31 Mar 2020 20 : 21 : 56 GMT and should not be manually modified . <nl> <nl> + # # 0 . 0 . 0 - master . 6 <nl> + Tue , 31 Mar 2020 20 : 21 : 56 GMT <nl> + <nl> + # # # Changes <nl> + <nl> + - 1 . Make changes in response to Vladimir ' s feedback in the PR for bb8c00133 . . . 2 . Fix a dumb bug in multiRemove ; the parameterized query variable was not being bound so nothing ever got deleted ( hpratt @ microsoft . com ) <nl> # # 0 . 0 . 0 - master . 5 <nl> Tue , 31 Mar 2020 02 : 03 : 05 GMT <nl> <nl> mmm a / vnext / package . json <nl> ppp b / vnext / package . json <nl> <nl> { <nl> " name " : " react - native - windows " , <nl> - " version " : " 0 . 0 . 0 - master . 5 " , <nl> + " version " : " 0 . 0 . 0 - master . 6 " , <nl> " license " : " MIT " , <nl> " repository " : { <nl> " type " : " git " , <nl>
applying package updates * * * NO_CI * * *
microsoft/react-native-windows
2548176b6802c6d1209b076a62cccc76827c99d5
2020-03-31T20:21:56Z
mmm a / dlib / optimization / optimization_line_search . h <nl> ppp b / dlib / optimization / optimization_line_search . h <nl> namespace dlib <nl> double f0 , <nl> double d0 , <nl> double f1 , <nl> - double d1 <nl> + double d1 , <nl> + double limit = 1 <nl> ) <nl> { <nl> const double n = 3 * ( f1 - f0 ) - 2 * d0 - d1 ; <nl> namespace dlib <nl> else <nl> x = x2 ; <nl> <nl> - / / now make sure the minimum is within the allowed range of ( 0 , 1 ) <nl> - return put_in_range ( 0 , 1 , x ) ; <nl> + / / now make sure the minimum is within the allowed range of [ 0 , limit ] <nl> + return put_in_range ( 0 , limit , x ) ; <nl> } <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> namespace dlib <nl> / / the book Practical Methods of Optimization by R . Fletcher . The sectioning <nl> / / phase is an implementation of 2 . 6 . 4 from the same book . <nl> <nl> - / / tau1 > 1 . Controls the alpha jump size during the search <nl> - const double tau1 = 9 ; <nl> + / / 1 < tau1a < tau1b . Controls the alpha jump size during the search <nl> + const double tau1a = 2 . 0 ; <nl> + const double tau1b = 9 ; <nl> <nl> / / it must be the case that 0 < tau2 < tau3 < = 1 / 2 for the algorithm to function <nl> / / correctly but the specific values of tau2 and tau3 aren ' t super important . <nl> namespace dlib <nl> break ; <nl> } <nl> <nl> - if ( mu < = 2 * alpha - last_alpha ) <nl> - { <nl> - last_alpha = alpha ; <nl> - alpha = mu ; <nl> - } <nl> - else <nl> - { <nl> - const double temp = alpha ; <nl> <nl> - double first = 2 * alpha - last_alpha ; <nl> - double last ; <nl> - if ( mu > 0 ) <nl> - last = std : : min ( mu , alpha + tau1 * ( alpha - last_alpha ) ) ; <nl> - else <nl> - last = std : : max ( mu , alpha + tau1 * ( alpha - last_alpha ) ) ; <nl> <nl> + const double temp = alpha ; <nl> + / / Pick a larger range [ first , last ] . We will pick the next alpha in that <nl> + / / range . <nl> + double first = alpha + tau1a * ( alpha - last_alpha ) ; <nl> + double last ; <nl> + if ( mu > 0 ) <nl> + last = std : : min ( mu , alpha + tau1b * ( alpha - last_alpha ) ) ; <nl> + else <nl> + last = std : : max ( mu , alpha + tau1b * ( alpha - last_alpha ) ) ; <nl> <nl> - / / pick a point between first and last by doing some kind of interpolation <nl> - if ( last_alpha < alpha ) <nl> - alpha = last_alpha + ( alpha - last_alpha ) * poly_min_extrap ( last_val , last_val_der , val , val_der ) ; <nl> - else <nl> - alpha = alpha + ( last_alpha - alpha ) * poly_min_extrap ( val , val_der , last_val , last_val_der ) ; <nl> <nl> - alpha = put_in_range ( first , last , alpha ) ; <nl> + / / pick a point between first and last by doing some kind of interpolation <nl> + if ( last_alpha < alpha ) <nl> + alpha = last_alpha + ( alpha - last_alpha ) * poly_min_extrap ( last_val , last_val_der , val , val_der , 1e10 ) ; <nl> + else <nl> + alpha = alpha + ( last_alpha - alpha ) * poly_min_extrap ( val , val_der , last_val , last_val_der , 1e10 ) ; <nl> <nl> + alpha = put_in_range ( first , last , alpha ) ; <nl> <nl> - last_alpha = temp ; <nl> - } <nl> + last_alpha = temp ; <nl> <nl> last_val = val ; <nl> last_val_der = val_der ; <nl> namespace dlib <nl> { <nl> if ( std : : abs ( val_der ) < = thresh ) <nl> return alpha ; <nl> + / / If we are optimizing a function that doesn ' t have continuous first <nl> + / / derivatives then val_der might not ever go below thresh . So check if it <nl> + / / looks like the first derivative is discontinuous and stop if so . The <nl> + / / current alpha is plenty good enough in this case . <nl> + const double second_der = std : : abs ( a_val_der - b_val_der ) / std : : abs ( a - b ) ; <nl> + if ( second_der > 1e5 ) <nl> + return alpha ; <nl> <nl> if ( ( b - a ) * val_der > = 0 ) <nl> { <nl> mmm a / dlib / optimization / optimization_line_search_abstract . h <nl> ppp b / dlib / optimization / optimization_line_search_abstract . h <nl> namespace dlib <nl> double f0 , <nl> double d0 , <nl> double f1 , <nl> - double d1 <nl> + double d1 , <nl> + double limit = 1 <nl> ) ; <nl> / * ! <nl> ensures <nl> namespace dlib <nl> - c ( 1 ) = = f1 <nl> - derivative of c ( x ) at x = = 0 is d0 <nl> - derivative of c ( x ) at x = = 1 is d1 <nl> - - returns the point in the range [ 0 , 1 ] that minimizes the polynomial c ( x ) <nl> + - returns the point in the range [ 0 , limit ] that minimizes the polynomial c ( x ) <nl> ! * / <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl>
Improved line_search ( ) behavior for functions without continuous derivatives .
davisking/dlib
79d99b8582fe04a5f08dd088cd1dd6275bd51554
2016-03-28T19:43:11Z
mmm a / tensorflow / examples / android / src / org / tensorflow / demo / ClassifierActivity . java <nl> ppp b / tensorflow / examples / android / src / org / tensorflow / demo / ClassifierActivity . java <nl> <nl> / / - - input_node_names = " Mul " \ <nl> / / - - output_node_names = " final_result " \ <nl> / / - - input_binary = true <nl> - private static final int NUM_CLASSES = 1001 ; <nl> + / / <nl> + / / Note : the actual number of classes for Inception is 1001 , but the output layer size is 1008 . <nl> + private static final int NUM_CLASSES = 1008 ; <nl> private static final int INPUT_SIZE = 224 ; <nl> private static final int IMAGE_MEAN = 117 ; <nl> private static final float IMAGE_STD = 1 ; <nl> mmm a / tensorflow / examples / android / src / org / tensorflow / demo / TensorFlowImageClassifier . java <nl> ppp b / tensorflow / examples / android / src / org / tensorflow / demo / TensorFlowImageClassifier . java <nl> public static Classifier create ( <nl> c . labels . add ( line ) ; <nl> } <nl> br . close ( ) ; <nl> - Log . i ( TAG , " Read " + c . labels . size ( ) + " , " + numClasses + " specified " ) ; <nl> + Log . i ( TAG , " Read " + c . labels . size ( ) + " labels , " + numClasses + <nl> + " output layer size specified " ) ; <nl> <nl> c . inputSize = inputSize ; <nl> c . imageMean = imageMean ; <nl> public int compare ( Recognition lhs , Recognition rhs ) { <nl> } ) ; <nl> for ( int i = 0 ; i < outputs . length ; + + i ) { <nl> if ( outputs [ i ] > THRESHOLD ) { <nl> - pq . add ( new Recognition ( " " + i , labels . get ( i ) , outputs [ i ] , null ) ) ; <nl> + pq . add ( new Recognition ( " " + i , <nl> + labels . size ( ) > i ? labels . get ( i ) : " unknown " , outputs [ i ] , null ) ) ; <nl> } <nl> } <nl> final ArrayList < Recognition > recognitions = new ArrayList < Recognition > ( ) ; <nl>
Android : fix issue where mismatch between Inception output size and inception num classes could cause demo app to crash ( due to recent CHECK added in inference interface ) .
tensorflow/tensorflow
0eb6d8ff445922e8eb585534e3fb1deb927e011e
2017-02-01T19:47:14Z
mmm a / Marlin / src / pins / pins_CHEAPTRONICv2 . h <nl> ppp b / Marlin / src / pins / pins_CHEAPTRONICv2 . h <nl> <nl> # define TEMP_0_PIN 15 <nl> # define TEMP_1_PIN 13 <nl> # define TEMP_2_PIN 14 <nl> - # define TEMP_3_PIN 11 <nl> + # define TEMP_3_PIN 11 / / should be used for chamber temperature control <nl> # define TEMP_BED_PIN 12 <nl> <nl> / / <nl> <nl> / / <nl> / / Other board specific pins <nl> / / <nl> + # define FIL_RUNOUT_PIN 37 / / board input labeled as F - DET <nl> + # define Z_MIN_PROBE_PIN 36 / / additional external board input labeled as E - SENS ( should be used for Z - probe ) <nl> # define LED_PIN 13 <nl> - # define SPINDLE_ENABLE_PIN 4 <nl> + # define SPINDLE_ENABLE_PIN 4 / / additional PWM pin 1 at JP1 connector - should be used for laser control too <nl> + # define EXT_2 5 / / additional PWM pin 2 at JP1 connector <nl> + # define EXT_3 2 / / additional PWM pin 3 at JP1 connector <nl> # define FAN_PIN 3 <nl> + # define FAN2_PIN 58 / / additional fan or light control output <nl> # define PS_ON_PIN 45 <nl> # define KILL_PIN 46 <nl> <nl> # ifndef FILWIDTH_PIN <nl> - # define FILWIDTH_PIN 37 / / should be Analog Input ( 0 - 15 ) <nl> + # define FILWIDTH_PIN 11 / / shared with TEMP_3 analog input <nl> # endif <nl>
CheapTRONIC v2 . 0 additions / revisions
MarlinFirmware/Marlin
048d8a0158e1a5f3aa287794200ff25cfa552e17
2018-01-05T00:00:25Z
mmm a / editor / editor_file_dialog . cpp <nl> ppp b / editor / editor_file_dialog . cpp <nl> void EditorFileDialog : : _update_drives ( ) { <nl> <nl> void EditorFileDialog : : _favorite_selected ( int p_idx ) { <nl> dir_access - > change_dir ( favorites - > get_item_metadata ( p_idx ) ) ; <nl> - file - > set_text ( " " ) ; <nl> update_dir ( ) ; <nl> invalidate ( ) ; <nl> _push_history ( ) ; <nl>
fix file dialog filename cleared when selecting favorites
godotengine/godot
318d5442ecbe9c081d9a477a45b8e265c4131562
2020-05-23T23:46:13Z
mmm a / drivers / nrex / nrex . cpp <nl> ppp b / drivers / nrex / nrex . cpp <nl> struct nrex_node_group : public nrex_node <nl> { <nl> return res ; <nl> } <nl> - if ( ( res > = 0 ) ! = negate ) <nl> + if ( negate ) <nl> + { <nl> + if ( res < 0 ) <nl> + { <nl> + res = pos + 1 ; <nl> + } <nl> + else <nl> + { <nl> + return - 1 ; <nl> + } <nl> + } <nl> + if ( res > = 0 ) <nl> { <nl> if ( capturing > = 0 ) <nl> { <nl> struct nrex_node_quantifier : public nrex_node <nl> { <nl> nrex_array < int > backtrack ; <nl> backtrack . push ( pos ) ; <nl> - s - > complete = false ; <nl> while ( backtrack . top ( ) < = s - > end ) <nl> { <nl> if ( max > = 1 & & backtrack . size ( ) > ( unsigned int ) max ) <nl> struct nrex_node_quantifier : public nrex_node <nl> return res ; <nl> } <nl> } <nl> - s - > complete = false ; <nl> int res = child - > test ( s , backtrack . top ( ) ) ; <nl> if ( s - > complete ) <nl> { <nl> struct nrex_node_quantifier : public nrex_node <nl> } <nl> while ( greedy & & ( unsigned int ) min < backtrack . size ( ) ) <nl> { <nl> - s - > complete = false ; <nl> int res = backtrack . top ( ) ; <nl> - if ( s - > complete ) <nl> - { <nl> - return res ; <nl> - } <nl> if ( next ) <nl> { <nl> res = next - > test ( s , res ) ; <nl> struct nrex_node_quantifier : public nrex_node <nl> { <nl> return res ; <nl> } <nl> + if ( s - > complete ) <nl> + { <nl> + return res ; <nl> + } <nl> backtrack . pop ( ) ; <nl> } <nl> return - 1 ; <nl>
More nrex fixes
godotengine/godot
d9f1a85948478fe5038ecc2fd1923c451cca84d5
2015-07-24T12:25:04Z
mmm a / test / cctest / test - heap . cc <nl> ppp b / test / cctest / test - heap . cc <nl> TEST ( ObjectsInOptimizedCodeAreWeak ) { <nl> } <nl> <nl> <nl> + # ifdef DEBUG <nl> TEST ( AddInstructionChangesNewSpacePromotion ) { <nl> i : : FLAG_allow_natives_syntax = true ; <nl> i : : FLAG_expose_gc = true ; <nl> TEST ( AddInstructionChangesNewSpacePromotion ) { <nl> g - > Call ( global , 1 , args1 ) ; <nl> heap - > CollectAllGarbage ( Heap : : kAbortIncrementalMarkingMask ) ; <nl> } <nl> + # endif <nl>
Do not run AddInstructionChangesNewSpacePromotion test in release mode .
v8/v8
d9090c02848af29a8bb985c83174abf6b65e4d1e
2014-03-11T11:53:40Z
mmm a / . circleci / cimodel / data / pytorch_build_definitions . py <nl> ppp b / . circleci / cimodel / data / pytorch_build_definitions . py <nl> def gen_workflow_params ( self , phase ) : <nl> resource_class = " large " <nl> if self . gpu_resource : <nl> resource_class = " gpu . " + self . gpu_resource <nl> - <nl> - if self . gpu_resource = = " large " : <nl> - parameters [ " multi_gpu " ] = miniutils . quote ( " 1 " ) <nl> parameters [ " resource_class " ] = resource_class <nl> return parameters <nl> <nl> mmm a / . circleci / config . yml <nl> ppp b / . circleci / config . yml <nl> pytorch_params : & pytorch_params <nl> use_cuda_docker_runtime : <nl> type : string <nl> default : " " <nl> - multi_gpu : <nl> - type : string <nl> - default : " " <nl> environment : <nl> BUILD_ENVIRONMENT : < < parameters . build_environment > > <nl> DOCKER_IMAGE : < < parameters . docker_image > > <nl> USE_CUDA_DOCKER_RUNTIME : < < parameters . use_cuda_docker_runtime > > <nl> - MULTI_GPU : < < parameters . multi_gpu > > <nl> resource_class : < < parameters . resource_class > > <nl> <nl> caffe2_params : & caffe2_params <nl> jobs : <nl> else <nl> export id = $ ( docker run - t - d - w / var / lib / jenkins $ { COMMIT_DOCKER_IMAGE } ) <nl> fi <nl> - if [ - n " $ { MULTI_GPU } " ] ; then <nl> + if [ [ $ { BUILD_ENVIRONMENT } = = * " multigpu " * ] ] ; then <nl> export COMMAND = ' ( ( echo " export BUILD_ENVIRONMENT = $ { BUILD_ENVIRONMENT } " & & echo ' " $ NAMED_FLAG " ' & & echo " source . / workspace / env " & & echo " sudo chown - R jenkins workspace & & cd workspace & & . jenkins / pytorch / multigpu - test . sh " ) | docker exec - u jenkins - i " $ id " bash ) 2 > & 1 ' <nl> else <nl> export COMMAND = ' ( ( echo " export BUILD_ENVIRONMENT = $ { BUILD_ENVIRONMENT } " & & echo ' " $ NAMED_FLAG " ' & & echo " source . / workspace / env " & & echo " sudo chown - R jenkins workspace & & cd workspace & & . jenkins / pytorch / test . sh " ) | docker exec - u jenkins - i " $ id " bash ) 2 > & 1 ' <nl> workflows : <nl> build_environment : " pytorch - linux - xenial - cuda9 - cudnn7 - py3 - multigpu - test " <nl> docker_image : " 308535385114 . dkr . ecr . us - east - 1 . amazonaws . com / pytorch / pytorch - linux - xenial - cuda9 - cudnn7 - py3 : 339 " <nl> use_cuda_docker_runtime : " 1 " <nl> - multi_gpu : " 1 " <nl> resource_class : gpu . large <nl> - pytorch_linux_test : <nl> name : pytorch_linux_xenial_cuda9_cudnn7_py3_NO_AVX2_test <nl> mmm a / . circleci / verbatim - sources / pytorch - build - params . yml <nl> ppp b / . circleci / verbatim - sources / pytorch - build - params . yml <nl> pytorch_params : & pytorch_params <nl> use_cuda_docker_runtime : <nl> type : string <nl> default : " " <nl> - multi_gpu : <nl> - type : string <nl> - default : " " <nl> environment : <nl> BUILD_ENVIRONMENT : < < parameters . build_environment > > <nl> DOCKER_IMAGE : < < parameters . docker_image > > <nl> USE_CUDA_DOCKER_RUNTIME : < < parameters . use_cuda_docker_runtime > > <nl> - MULTI_GPU : < < parameters . multi_gpu > > <nl> resource_class : < < parameters . resource_class > > <nl> <nl> mmm a / . circleci / verbatim - sources / pytorch - job - specs . yml <nl> ppp b / . circleci / verbatim - sources / pytorch - job - specs . yml <nl> jobs : <nl> else <nl> export id = $ ( docker run - t - d - w / var / lib / jenkins $ { COMMIT_DOCKER_IMAGE } ) <nl> fi <nl> - if [ - n " $ { MULTI_GPU } " ] ; then <nl> + if [ [ $ { BUILD_ENVIRONMENT } = = * " multigpu " * ] ] ; then <nl> export COMMAND = ' ( ( echo " export BUILD_ENVIRONMENT = $ { BUILD_ENVIRONMENT } " & & echo ' " $ NAMED_FLAG " ' & & echo " source . / workspace / env " & & echo " sudo chown - R jenkins workspace & & cd workspace & & . jenkins / pytorch / multigpu - test . sh " ) | docker exec - u jenkins - i " $ id " bash ) 2 > & 1 ' <nl> else <nl> export COMMAND = ' ( ( echo " export BUILD_ENVIRONMENT = $ { BUILD_ENVIRONMENT } " & & echo ' " $ NAMED_FLAG " ' & & echo " source . / workspace / env " & & echo " sudo chown - R jenkins workspace & & cd workspace & & . jenkins / pytorch / test . sh " ) | docker exec - u jenkins - i " $ id " bash ) 2 > & 1 ' <nl>
remove MULTI_GPU ( )
pytorch/pytorch
7d3564fc2cd1d2aeddbd6a3466cd10ba24d81239
2019-09-02T17:22:30Z
mmm a / tensorflow / lite / experimental / delegates / hexagon / utils . cc <nl> ppp b / tensorflow / lite / experimental / delegates / hexagon / utils . cc <nl> bool InputsWithCorrectTypes ( <nl> const std : : vector < std : : vector < TfLiteType > > & per_input_possible_types ) { <nl> if ( node - > inputs - > size ! = per_input_possible_types . size ( ) ) return false ; <nl> for ( int i = 0 ; i < per_input_possible_types . size ( ) ; + + i ) { <nl> + / / Skip optional tensor . <nl> + if ( node - > inputs - > data [ i ] = = - 1 ) continue ; <nl> bool type_found = false ; <nl> for ( auto possible_type : per_input_possible_types [ i ] ) { <nl> if ( TensorTypeMatch ( node - > inputs - > data [ i ] , context , possible_type ) ) { <nl>
Hexagon Delegate
tensorflow/tensorflow
456a61ddb1b4d774b68caf046193a44c5cbe4c24
2020-05-19T01:27:14Z
mmm a / validation - test / Evolution / test_global_change_size . swift <nl> ppp b / validation - test / Evolution / test_global_change_size . swift <nl> <nl> / / RUN : % target - run % t / after_before <nl> / / RUN : % target - run % t / after_after <nl> <nl> + / / This test is currently crashing in optimized mode . <nl> + / / TODO : remove the following requirement when rdar : / / problem / 24222892 is fixed . <nl> + / / REQUIRES : swift_test_mode_optimize_none <nl> + <nl> import StdlibUnittest <nl> import global_change_size <nl> <nl>
tests : temporarily disable the test_global_change_size test in optimized mode .
apple/swift
e369c95cce3a8a82cea051f63f51572337ccdfbe
2016-01-17T18:37:18Z
new file mode 100644 <nl> index 00000000000 . . 67af8546f36 <nl> mmm / dev / null <nl> ppp b / thirdparty / vhacd / 0003 - fix - musl - build . patch <nl> <nl> pppmmm a / thirdparty / vhacd / inc / vhacdMutex . h <nl> ppp + b / thirdparty / vhacd / inc / vhacdMutex . h <nl> + <nl> + # include < pthread . h > <nl> + # endif <nl> + <nl> + - # if defined ( __APPLE__ ) <nl> + + / / - - GODOT start - - <nl> + + # if defined ( __APPLE__ ) | | ! defined ( __GLIBC__ ) <nl> + + / / - - GODOT end - - <nl> + # define PTHREAD_MUTEX_RECURSIVE_NP PTHREAD_MUTEX_RECURSIVE <nl> + # endif <nl> + <nl> mmm a / thirdparty / vhacd / inc / vhacdMutex . h <nl> ppp b / thirdparty / vhacd / inc / vhacdMutex . h <nl> <nl> # include < pthread . h > <nl> # endif <nl> <nl> - # if defined ( __APPLE__ ) <nl> + / / - - GODOT start - - <nl> + # if defined ( __APPLE__ ) | | ! defined ( __GLIBC__ ) <nl> + / / - - GODOT end - - <nl> # define PTHREAD_MUTEX_RECURSIVE_NP PTHREAD_MUTEX_RECURSIVE <nl> # endif <nl> <nl>
Fix build on musl - based systems
godotengine/godot
589eb80a90e947469b0261420f8bba30e867ff0a
2019-12-10T21:54:32Z
mmm a / js / client / modules / @ arangodb / tutorial . js <nl> ppp b / js / client / modules / @ arangodb / tutorial . js <nl> var lessons = [ <nl> " database and all of its collections , use ' _dropDatabase ' : \ n \ n " + <nl> " db . _createDatabase ( \ " mydb \ " ) ; \ n " + <nl> " db . _useDatabase ( \ " mydb \ " ) ; \ n " + <nl> + " db . _useDatabase ( \ " _system \ " ) ; \ n " + <nl> " db . _dropDatabase ( \ " mydb \ " ) ; " <nl> } <nl> ] ; <nl>
As @ afj88 points out , one can ' t drop a database that one is currently in .
arangodb/arangodb
f7731562c3cd28668d9daaadfb2b3143051beb4b
2016-06-16T13:56:37Z
mmm a / js / client / modules / org / arangodb / graph . js <nl> ppp b / js / client / modules / org / arangodb / graph . js <nl> GraphAPI = { <nl> } <nl> } ; <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - module " org / arangodb / graph " <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - Edge <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - constructors and destructors <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoGraph <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief constructs a new edge object <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public methods <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> GraphAPI = { <nl> / / / @ { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief returns the to vertex <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - Edge . prototype . getInVertex = function ( ) { <nl> - return this . _graph . getVertex ( this . _properties . _to ) ; <nl> - } ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief returns the from vertex <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - Edge . prototype . getOutVertex = function ( ) { <nl> - return this . _graph . getVertex ( this . _properties . _from ) ; <nl> - } ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief returns the other vertex <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - Edge . prototype . getPeerVertex = function ( vertex ) { <nl> - if ( vertex . _properties . _id = = = this . _properties . _to ) { <nl> - return this . _graph . getVertex ( this . _properties . _from ) ; <nl> - } <nl> - <nl> - if ( vertex . _properties . _id = = = this . _properties . _from ) { <nl> - return this . _graph . getVertex ( this . _properties . _to ) ; <nl> - } <nl> - <nl> - return null ; <nl> - } ; <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief changes a property of an edge <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / js / common / modules / org / arangodb / graph - common . js <nl> ppp b / js / common / modules / org / arangodb / graph - common . js <nl> Edge . prototype . properties = function ( ) { <nl> return this . _properties . _shallowCopy ; <nl> } ; <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief returns the to vertex <nl> + / / / <nl> + / / / @ FUN { @ FA { edge } . getInVertex ( ) } <nl> + / / / <nl> + / / / Returns the vertex at the head of the @ FA { edge } . <nl> + / / / <nl> + / / / @ EXAMPLES <nl> + / / / <nl> + / / / @ verbinclude graph - edge - get - in - vertex <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + Edge . prototype . getInVertex = function ( ) { <nl> + return this . _graph . getVertex ( this . _properties . _to ) ; <nl> + } ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief returns the from vertex <nl> + / / / <nl> + / / / @ FUN { @ FA { edge } . getOutVertex ( ) } <nl> + / / / <nl> + / / / Returns the vertex at the tail of the @ FA { edge } . <nl> + / / / <nl> + / / / @ EXAMPLES <nl> + / / / <nl> + / / / @ verbinclude graph - edge - get - out - vertex <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + Edge . prototype . getOutVertex = function ( ) { <nl> + return this . _graph . getVertex ( this . _properties . _from ) ; <nl> + } ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief returns the other vertex <nl> + / / / <nl> + / / / @ FUN { @ FA { edge } . getPeerVertex ( @ FA { vertex } ) } <nl> + / / / <nl> + / / / Returns the peer vertex of the @ FA { edge } and the @ FA { vertex } . <nl> + / / / <nl> + / / / @ EXAMPLES <nl> + / / / <nl> + / / / @ code <nl> + / / / arango > v1 = g . addVertex ( " 1 " ) ; <nl> + / / / Vertex ( " 1 " ) <nl> + / / / <nl> + / / / arango > v2 = g . addVertex ( " 2 " ) ; <nl> + / / / Vertex ( " 2 " ) <nl> + / / / <nl> + / / / arango > e = g . addEdge ( v1 , v2 , " 1 - > 2 " , " knows " ) ; <nl> + / / / Edge ( " 1 - > 2 " ) <nl> + / / / <nl> + / / / arango > e . getPeerVertex ( v1 ) ; <nl> + / / / Vertex ( 2 ) <nl> + / / / @ endcode <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + Edge . prototype . getPeerVertex = function ( vertex ) { <nl> + if ( vertex . _properties . _id = = = this . _properties . _to ) { <nl> + return this . _graph . getVertex ( this . _properties . _from ) ; <nl> + } <nl> + <nl> + if ( vertex . _properties . _id = = = this . _properties . _from ) { <nl> + return this . _graph . getVertex ( this . _properties . _to ) ; <nl> + } <nl> + <nl> + return null ; <nl> + } ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ } <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / js / server / modules / org / arangodb / graph . js <nl> ppp b / js / server / modules / org / arangodb / graph . js <nl> var findOrCreateCollectionByName = function ( name ) { <nl> <nl> if ( col = = = null ) { <nl> col = db . _create ( name ) ; <nl> - } <nl> - else if ( ! ( col instanceof ArangoCollection ) | | col . type ( ) ! = = ArangoCollection . TYPE_DOCUMENT ) { <nl> + } else if ( ! ( col instanceof ArangoCollection ) | | col . type ( ) ! = = ArangoCollection . TYPE_DOCUMENT ) { <nl> throw " < " + name + " > must be a document collection " ; <nl> } <nl> <nl> var findOrCreateEdgeCollectionByName = function ( name ) { <nl> <nl> if ( col = = = null ) { <nl> col = db . _createEdgeCollection ( name ) ; <nl> - } <nl> - else if ( ! ( col instanceof ArangoCollection ) | | col . type ( ) ! = = ArangoCollection . TYPE_EDGE ) { <nl> + } else if ( ! ( col instanceof ArangoCollection ) | | col . type ( ) ! = = ArangoCollection . TYPE_EDGE ) { <nl> throw " < " + name + " > must be an edge collection " ; <nl> } <nl> <nl> var findOrCreateEdgeCollectionByName = function ( name ) { <nl> / / / @ } <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - Edge <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - constructors and destructors <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoGraph <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief constructs a new edge object <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public methods <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> var findOrCreateEdgeCollectionByName = function ( name ) { <nl> / / / @ { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief returns the to vertex <nl> - / / / <nl> - / / / @ FUN { @ FA { edge } . getInVertex ( ) } <nl> - / / / <nl> - / / / Returns the vertex at the head of the @ FA { edge } . <nl> - / / / <nl> - / / / @ EXAMPLES <nl> - / / / <nl> - / / / @ verbinclude graph - edge - get - in - vertex <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - Edge . prototype . getInVertex = function ( ) { <nl> - return this . _graph . constructVertex ( this . _properties . _to ) ; <nl> - } ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief returns the from vertex <nl> - / / / <nl> - / / / @ FUN { @ FA { edge } . getOutVertex ( ) } <nl> - / / / <nl> - / / / Returns the vertex at the tail of the @ FA { edge } . <nl> - / / / <nl> - / / / @ EXAMPLES <nl> - / / / <nl> - / / / @ verbinclude graph - edge - get - out - vertex <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - Edge . prototype . getOutVertex = function ( ) { <nl> - return this . _graph . constructVertex ( this . _properties . _from ) ; <nl> - } ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief returns the other vertex <nl> - / / / <nl> - / / / @ FUN { @ FA { edge } . getPeerVertex ( @ FA { vertex } ) } <nl> - / / / <nl> - / / / Returns the peer vertex of the @ FA { edge } and the @ FA { vertex } . <nl> - / / / <nl> - / / / @ EXAMPLES <nl> - / / / <nl> - / / / @ code <nl> - / / / arango > v1 = g . addVertex ( " 1 " ) ; <nl> - / / / Vertex ( " 1 " ) <nl> - / / / <nl> - / / / arango > v2 = g . addVertex ( " 2 " ) ; <nl> - / / / Vertex ( " 2 " ) <nl> - / / / <nl> - / / / arango > e = g . addEdge ( v1 , v2 , " 1 - > 2 " , " knows " ) ; <nl> - / / / Edge ( " 1 - > 2 " ) <nl> - / / / <nl> - / / / arango > e . getPeerVertex ( v1 ) ; <nl> - / / / Vertex ( 2 ) <nl> - / / / @ endcode <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - Edge . prototype . getPeerVertex = function ( vertex ) { <nl> - if ( vertex . _properties . _id = = = this . _properties . _to ) { <nl> - return this . _graph . constructVertex ( this . _properties . _from ) ; <nl> - } <nl> - <nl> - if ( vertex . _properties . _id = = = this . _properties . _from ) { <nl> - return this . _graph . constructVertex ( this . _properties . _to ) ; <nl> - } <nl> - <nl> - return null ; <nl> - } ; <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief changes a property of an edge <nl> / / / <nl>
Graph Module : Edge almost entirely pulled into Common
arangodb/arangodb
38327183e8ab5ab9aa7beed906896f19104c210e
2013-06-19T15:09:18Z
mmm a / tensorflow / contrib / rnn / BUILD <nl> ppp b / tensorflow / contrib / rnn / BUILD <nl> cuda_py_tests ( <nl> " / / tensorflow / python : variable_scope " , <nl> " / / tensorflow / python : variables " , <nl> ] , <nl> + tags = [ " optonly " ] , <nl> xla_enabled = True , <nl> ) <nl> <nl>
Run / / tensorflow / contrib / rnn : rnn_cell_test in optonly mode .
tensorflow/tensorflow
421802c1b4ce8846e398b10ac226bb8499d2f6a6
2019-04-25T20:10:59Z
mmm a / cocos / network / SocketIO . cpp <nl> ppp b / cocos / network / SocketIO . cpp <nl> void SIOClientImpl : : onClose ( WebSocket * ws ) <nl> { <nl> iter - > second - > socketClosed ( ) ; <nl> } <nl> + / / discard this client <nl> + _connected = false ; <nl> + Director : : getInstance ( ) - > getScheduler ( ) - > unscheduleAllForTarget ( this ) ; <nl> + SocketIO : : getInstance ( ) - > removeSocket ( _uri ) ; <nl> } <nl> <nl> this - > release ( ) ; <nl>
Merge pull request from pandemosth / socketIO_issue_14287
cocos2d/cocos2d-x
949112fdc2f73d626d497d6b0c288baa6c0b74e1
2016-04-21T06:28:21Z
mmm a / tensorflow / python / ops / nn_ops . py <nl> ppp b / tensorflow / python / ops / nn_ops . py <nl> def _with_space_to_batch_call ( self , inp , filter ) : # pylint : disable = redefined - b <nl> <nl> # Recover channel information for output shape if channels are not last . <nl> if self . data_format is not None and self . data_format . startswith ( " NC " ) : <nl> - if not result_converted . shape [ 1 ] . value : <nl> + if not result_converted . shape [ 1 ] . value and filter is not None : <nl> output_shape = result_converted . shape . as_list ( ) <nl> output_shape [ 1 ] = filter . shape [ - 1 ] <nl> result_converted . set_shape ( output_shape ) <nl>
Fix NoneType error in tf . nn . depthwise_conv2d with unknown shape
tensorflow/tensorflow
2007f9752e116c46cb82c08a54f5c5e711a7c59d
2018-09-07T03:44:47Z
mmm a / third_party / nanopb <nl> ppp b / third_party / nanopb <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit f8ac463766281625ad710900479130c7fcb4d63b <nl> + Subproject commit 68a86e96481e6bea987df8de47027847b30c325b <nl>
Fix submodule
grpc/grpc
4398b1a13e993c55b50fbfe4daf61bcc8ea70506
2016-09-23T17:50:04Z
mmm a / version . pri <nl> ppp b / version . pri <nl> <nl> os2 { <nl> - DEFINES + = VERSION = \ ' \ " v2 . 6 . 0beta3 \ " \ ' <nl> + DEFINES + = VERSION = \ ' \ " v2 . 6 . 0beta4 \ " \ ' <nl> } else { <nl> - DEFINES + = VERSION = \ \ \ " v2 . 6 . 0beta3 \ \ \ " <nl> + DEFINES + = VERSION = \ \ \ " v2 . 6 . 0beta4 \ \ \ " <nl> } <nl> DEFINES + = VERSION_MAJOR = 2 <nl> DEFINES + = VERSION_MINOR = 6 <nl>
Bump to beta4
qbittorrent/qBittorrent
6f6b938dfc58323d345b20809314c1b05008b3cc
2011-01-01T09:20:41Z
similarity index 92 % <nl> rename from docs / Testing . rst <nl> rename to docs / Testing . md <nl> mmm a / docs / Testing . rst <nl> ppp b / docs / Testing . md <nl> <nl> - : orphan : <nl> <nl> - . . @ raise litre . TestsAreMissing <nl> - <nl> - = = = = = = = = = = = = = <nl> - Testing Swift <nl> - = = = = = = = = = = = = = <nl> + # Testing Swift <nl> <nl> This document describes how we test the Swift compiler , the Swift runtime , and <nl> the Swift standard library . <nl> <nl> - Testing approaches <nl> - = = = = = = = = = = = = = = = = = = <nl> + # # Testing approaches <nl> <nl> We use multiple approaches to test the Swift toolchain . <nl> <nl> * LLVM lit - based testsuites for the compiler , runtime and the standard library . <nl> - <nl> * Unit tests for sub - tools . <nl> - <nl> * A selection of open source projects written in Swift . <nl> <nl> - The LLVM lit - based testsuite <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + # # The LLVM lit - based testsuite <nl> <nl> * * Purpose * * : primary testsuites for the Swift toolchain . <nl> <nl> The LLVM lit - based testsuite <nl> * Engineers and contributors are expected to run tests from these testsuites <nl> locally before committing . ( Usually on a single platform , and not necessarily <nl> all tests . ) <nl> - <nl> * Buildbots run all tests , on all supported platforms . <nl> <nl> - Testsuite subsets <nl> mmmmmmmmmmmmmmmmmm <nl> + # # # Testsuite subsets <nl> <nl> The testsuite is split into four subsets : <nl> <nl> * Primary testsuite , located under ` ` swift / test ` ` . <nl> - <nl> * Validation testsuite , located under ` ` swift / validation - test ` ` . <nl> - <nl> * Unit tests , located under ` ` swift / unittests ` ` . <nl> - <nl> * Long tests , which are marked with ` ` REQUIRES : long_test ` ` . <nl> <nl> - Unlike other tests , every long test should also include either <nl> - ` ` REQUIRES : nonexecutable_test ` ` or ` ` REQUIRES : executable_test ` ` . <nl> + Unlike other tests , every long test should also include either <nl> + ` ` REQUIRES : nonexecutable_test ` ` or ` ` REQUIRES : executable_test ` ` . <nl> <nl> - Running the LLVM lit - based testsuite <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + # # # Running the LLVM lit - based testsuite <nl> <nl> It is recommended that you run the Swift test suites via ` ` utils / build - script ` ` . <nl> For day - to - day work on the Swift compiler , using ` ` utils / build - script - - test ` ` <nl> technically possible to execute the tests directly via CMake . For example , if yo <nl> built Swift products at the directory ` ` build / Ninja - ReleaseAssert / swift - macosx - x86_64 ` ` , <nl> you may run the entire test suite directly using the following command : <nl> <nl> - . . code - block : : bash <nl> - <nl> + ` ` ` <nl> cmake - - build build / Ninja - ReleaseAssert / swift - macosx - x86_64 - - check - swift - macosx - x86_64 <nl> + ` ` ` <nl> <nl> Note that ` ` check - swift ` ` is suffixed with a target operating system and architecture . <nl> Besides ` ` check - swift ` ` , other targets are also available . Here ' s the full list : <nl> <nl> - * ` ` check - swift ` ` <nl> - <nl> - Runs tests from the ` ` $ { SWIFT_SOURCE_ROOT } / test ` ` directory . <nl> - <nl> - * ` ` check - swift - only_validation ` ` <nl> - <nl> - Runs tests from the ` ` $ { SWIFT_SOURCE_ROOT } / validation - test ` ` directory . <nl> - <nl> - * ` ` check - swift - validation ` ` <nl> - <nl> - Runs the primary and validation tests , without the long tests . <nl> - <nl> - * ` ` check - swift - only_long ` ` <nl> - <nl> - Runs long tests only . <nl> - <nl> - * ` ` check - swift - all ` ` <nl> - <nl> - Runs all tests ( primary , validation , and long ) . <nl> - <nl> - * ` ` SwiftUnitTests ` ` <nl> - <nl> - Builds all unit tests . Executables are located under <nl> + * ` ` check - swift ` ` : Runs tests from the ` ` $ { SWIFT_SOURCE_ROOT } / test ` ` directory . <nl> + * ` ` check - swift - only_validation ` ` : Runs tests from the ` ` $ { SWIFT_SOURCE_ROOT } / validation - test ` ` directory . <nl> + * ` ` check - swift - validation ` ` : Runs the primary and validation tests , without the long tests . <nl> + * ` ` check - swift - only_long ` ` : Runs long tests only . <nl> + * ` ` check - swift - all ` ` : Runs all tests ( primary , validation , and long ) . <nl> + * ` ` SwiftUnitTests ` ` : Builds all unit tests . Executables are located under <nl> ` ` $ { SWIFT_BUILD_ROOT } / unittests ` ` and must be run individually . <nl> <nl> For every target above , there are variants for different optimizations : <nl> <nl> * the target itself ( e . g . , ` ` check - swift ` ` ) - - runs all tests from the primary <nl> testsuite . The execution tests are run in ` ` - Onone ` ` mode . <nl> - <nl> * the target with ` ` - optimize ` ` suffix ( e . g . , ` ` check - swift - optimize ` ` ) - - runs <nl> execution tests in ` ` - O ` ` mode . This target will only run tests marked as <nl> ` ` executable_test ` ` . <nl> - <nl> * the target with ` ` - optimize - unchecked ` ` suffix ( e . g . , <nl> ` ` check - swift - optimize - unchecked ` ` ) - - runs execution tests in <nl> ` ` - Ounchecked ` ` mode . This target will only run tests marked as <nl> ` ` executable_test ` ` . <nl> - <nl> * the target with ` ` - executable ` ` suffix ( e . g . , <nl> ` ` check - swift - executable - iphoneos - arm64 ` ` ) - - runs tests marked with <nl> ` ` executable_test ` ` in ` ` - Onone ` ` mode . <nl> - <nl> * the target with ` ` - non - executable ` ` suffix ( e . g . , <nl> ` ` check - swift - non - executable - iphoneos - arm64 ` ` ) - - runs tests not marked with <nl> ` ` executable_test ` ` in ` ` - Onone ` ` mode . <nl> For every target above , there are variants for different optimizations : <nl> If more control is required ( e . g . to manually run certain tests ) , you can invoke <nl> LLVM ' s lit . py script directly . For example : <nl> <nl> - . . code - block : : bash <nl> - <nl> + ` ` ` <nl> % $ { LLVM_SOURCE_ROOT } / utils / lit / lit . py - sv $ { SWIFT_BUILD_ROOT } / test - iphonesimulator - i386 / Parse / <nl> + ` ` ` <nl> <nl> This runs the tests in the test / Parse / directory targeting the 32 - bit iOS <nl> Simulator . The ` ` - sv ` ` options give you a nice progress bar and only show you <nl> source ' test / ' directory . ) There is a more verbose form that specifies the <nl> testing configuration explicitly , which then allows you to test files <nl> regardless of location . <nl> <nl> - . . code - block : : bash <nl> - <nl> + ` ` ` <nl> % $ { LLVM_SOURCE_ROOT } / utils / lit / lit . py - sv - - param swift_site_config = $ { SWIFT_BUILD_ROOT } / test - iphonesimulator - i386 / lit . site . cfg $ { SWIFT_SOURCE_ROOT } / test / Parse / <nl> + ` ` ` <nl> <nl> For more complicated configuration , copy the invocation from one of the build <nl> targets mentioned above and modify it as necessary . lit . py also has several <nl> useful features , like timing tests and providing a timeout . Check these features <nl> out with ` ` lit . py - h ` ` . We document some of the more useful ones below : <nl> <nl> - Extra lit . py invocation options <nl> - ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ <nl> + # # # # Extra lit . py invocation options <nl> <nl> * ` ` - s ` ` reduces the amount of output that lit shows . <nl> - <nl> * ` ` - v ` ` causes a test ' s commandline and output to be printed if the test fails . <nl> - <nl> * ` ` - a ` ` causes a test ' s commandline and output to always be printed . <nl> - <nl> * ` ` - - filter = < pattern > ` ` causes only tests with paths matching the given regular <nl> expression to be run . <nl> - <nl> * ` ` - i ` ` causes tests that have a newer modification date and failing tests to <nl> be run first . This is implemented by updating the mtimes of the tests . <nl> - <nl> * ` ` - - no - execute ` ` causes a dry run to be performed . * NOTE * This means that all <nl> tests are assumed to PASS . <nl> - <nl> * ` ` - - time - tests ` ` will cause elapsed wall time to be tracked for each test . <nl> - <nl> * ` ` - - timeout = < MAXINDIVIDUALTESTTIME > ` ` sets a maximum time that can be spent <nl> running a single test ( in seconds ) . 0 ( the default means no time limit . <nl> - <nl> * ` ` - - max - failures = < MAXFAILURES > ` ` stops execution after ` ` MAXFAILURES ` ` number <nl> of failures . <nl> - <nl> * ` ` - - param gmalloc ` ` will run all tests under Guard Malloc ( macOS only ) . See <nl> ` ` man libgmalloc ` ` for more information . <nl> - <nl> * ` ` - - param swift - version = < MAJOR > ` ` overrides the default Swift language <nl> version used by swift / swiftc and swift - ide - test . <nl> - <nl> * ` ` - - param interpret ` ` is an experimental option for running execution tests <nl> using Swift ' s interpreter rather than compiling them first . Note that this <nl> does not affect all substitutions . <nl> - <nl> * ` ` - - param swift_test_mode = < MODE > ` ` drives the various suffix variations <nl> mentioned above . Again , it ' s best to get the invocation from the existing <nl> build system targets and modify it rather than constructing it yourself . <nl> <nl> - Writing tests <nl> mmmmmmmmmmmm - - <nl> + # # # Writing tests <nl> <nl> - General guidelines <nl> - ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ <nl> + # # # # General guidelines <nl> <nl> When adding a new testcase , try to find an existing test file focused on the <nl> same topic rather than starting a new test file . There is a fixed runtime cost <nl> details of the standard library ( unless doing so is point of the test ) . <nl> Platform - dependent details include : <nl> <nl> * ` ` Int ` ` ( use integer types with explicit types instead ) . <nl> - <nl> * Layout of ` ` String ` ` , ` ` Array ` ` , ` ` Dictionary ` ` , ` ` Set ` ` . These differ <nl> between platforms that have Objective - C interop and those that don ' t . <nl> <nl> standard library that only has a very basic set of APIs . <nl> If you write an executable test please add ` ` REQUIRES : executable_test ` ` to the <nl> test . <nl> <nl> - Substitutions in lit tests <nl> - ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ <nl> + # # # # Substitutions in lit tests <nl> <nl> Substitutions that start with ` ` % target ` ` configure the compiler for building <nl> code for the target that is not the build machine : <nl> Other substitutions : <nl> When writing a test where output ( or IR , SIL ) depends on the bitness of the <nl> target CPU , use this pattern : : <nl> <nl> + ` ` ` <nl> / / RUN : % target - swift - frontend . . . | % FileCheck - - check - prefix = CHECK - - check - prefix = CHECK - % target - ptrsize % s <nl> <nl> / / CHECK : common line <nl> target CPU , use this pattern : : <nl> / / CHECK : define @ foo ( ) { <nl> / / CHECK - 32 : integer_literal $ Builtin . Int32 , 0 <nl> / / CHECK - 64 : integer_literal $ Builtin . Int64 , 0 <nl> + ` ` ` <nl> <nl> When writing a test where output ( or IR , SIL ) depends on the target CPU itself , <nl> use this pattern : : <nl> <nl> + ` ` ` <nl> / / RUN : % target - swift - frontend . . . | % FileCheck - - check - prefix = CHECK - - check - prefix = CHECK - % target - cpu % s <nl> <nl> / / CHECK : common line <nl> use this pattern : : <nl> / / CHECK - arm64 : only for arm64 <nl> / / CHECK - powerpc64 : only for powerpc64 <nl> / / CHECK - powerpc64le : only for powerpc64le <nl> + ` ` ` <nl> <nl> - Features for ` ` REQUIRES ` ` and ` ` XFAIL ` ` <nl> - ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ <nl> + # # # # Features for ` ` REQUIRES ` ` and ` ` XFAIL ` ` <nl> <nl> FIXME : full list . <nl> <nl> FIXME : full list . <nl> * ` ` XFAIL : linux ` ` : tests that need to be adapted for Linux , for example parts <nl> that depend on Objective - C interop need to be split out . <nl> <nl> - Feature ` ` REQUIRES : executable_test ` ` <nl> - ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ <nl> + # # # # Feature ` ` REQUIRES : executable_test ` ` <nl> <nl> This feature marks an executable test . The test harness makes this feature <nl> generally available . It can be used to restrict the set of tests to run . <nl> <nl> - StdlibUnittest <nl> - ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ <nl> + # # # # StdlibUnittest <nl> <nl> Tests accept command line parameters , run StdlibUnittest - based test binary <nl> with ` ` - - help ` ` for more information . <nl> <nl> - Testing memory management in execution tests <nl> - ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ <nl> + # # # # Testing memory management in execution tests <nl> <nl> In execution tests , memory management testing should be performed <nl> using local variables enclosed in a closure passed to the standard <nl> library ` ` autoreleasepool ` ` function . For example : : <nl> <nl> + ` ` ` <nl> / / A counter that ' s decremented by Canary ' s deinitializer . <nl> var CanaryCount = 0 <nl> <nl> library ` ` autoreleasepool ` ` function . For example : : <nl> let canary = Canary ( ) <nl> } <nl> assert ( CanaryCount = = 1 , " canary was not released " ) <nl> + ` ` ` <nl> <nl> Memory management tests should be performed in a local scope because Swift does <nl> not guarantee the destruction of global variables . Code that needs to <nl>
Merge remote - tracking branch ' origin / master ' into master - next
apple/swift
8831053b145a99874373bbaf77e59621d813e4ec
2017-07-05T07:14:45Z
mmm a / folly / io / IOBuf . cpp <nl> ppp b / folly / io / IOBuf . cpp <nl> size_t IOBufHash : : operator ( ) ( const IOBuf & buf ) const { <nl> return h1 ; <nl> } <nl> <nl> - bool IOBufEqual : : operator ( ) ( const IOBuf & a , const IOBuf & b ) const { <nl> + bool IOBufEqualTo : : operator ( ) ( const IOBuf & a , const IOBuf & b ) const { <nl> io : : Cursor ca ( & a ) ; <nl> io : : Cursor cb ( & b ) ; <nl> for ( ; ; ) { <nl> mmm a / folly / io / IOBuf . h <nl> ppp b / folly / io / IOBuf . h <nl> struct IOBufHash { <nl> / * * <nl> * Equality predicate for IOBuf objects . Compares data in the entire chain . <nl> * / <nl> - struct IOBufEqual { <nl> + struct IOBufEqualTo { <nl> bool operator ( ) ( const IOBuf & a , const IOBuf & b ) const ; <nl> bool operator ( ) ( const std : : unique_ptr < IOBuf > & a , <nl> const std : : unique_ptr < IOBuf > & b ) const { <nl> mmm a / folly / io / async / test / WriteChainAsyncTransportWrapperTest . cpp <nl> ppp b / folly / io / async / test / WriteChainAsyncTransportWrapperTest . cpp <nl> class TestWriteChainAsyncTransportWrapper : <nl> } ; <nl> <nl> MATCHER_P ( BufMatches , expected , " " ) { <nl> - folly : : IOBufEqual eq ; <nl> + folly : : IOBufEqualTo eq ; <nl> return eq ( * arg , * expected ) ; <nl> } <nl> <nl> mmm a / folly / io / test / IOBufCursorTest . cpp <nl> ppp b / folly / io / test / IOBufCursorTest . cpp <nl> TEST ( IOBuf , cloneAndInsert ) { <nl> } <nl> <nl> TEST ( IOBuf , cloneWithEmptyBufAtStart ) { <nl> - folly : : IOBufEqual eq ; <nl> + folly : : IOBufEqualTo eq ; <nl> auto empty = IOBuf : : create ( 0 ) ; <nl> auto hel = IOBuf : : create ( 3 ) ; <nl> append ( hel , " hel " ) ; <nl> mmm a / folly / io / test / IOBufTest . cpp <nl> ppp b / folly / io / test / IOBufTest . cpp <nl> std : : unique_ptr < IOBuf > fromStr ( StringPiece sp ) { <nl> } / / namespace <nl> <nl> TEST ( IOBuf , HashAndEqual ) { <nl> - folly : : IOBufEqual eq ; <nl> + folly : : IOBufEqualTo eq ; <nl> folly : : IOBufHash hash ; <nl> <nl> EXPECT_TRUE ( eq ( nullptr , nullptr ) ) ; <nl>
Rename IOBufEqual to IOBufEqualTo
facebook/folly
c816aeff274a808a9018dff748935f196d9b914f
2018-04-12T05:24:06Z