diff
stringlengths
41
2.03M
msg
stringlengths
1
1.5k
repo
stringlengths
5
40
sha
stringlengths
40
40
time
stringlengths
20
20
mmm a / . github / ISSUE_TEMPLATE . md <nl> ppp b / . github / ISSUE_TEMPLATE . md <nl> <nl> - This is a template helping you to create an issue which can be processes as quickly as possible . Feel free to add additional information or remove not relevant points if you do not need them . <nl> - <nl> + < ! - - <nl> If you have a question rather than reporting a bug please go to http : / / answers . opencv . org where you get much faster responses . <nl> + If you need further assistance please read [ How To Contribute ] ( https : / / github . com / Itseez / opencv / wiki / How_to_contribute ) . <nl> <nl> - # # # Please state the information for your system <nl> - - OpenCV version : 2 . 4 / 3 . x <nl> - - Host OS : Linux ( Ubuntu 14 . 04 ) / Mac OS X 10 . 11 . 3 / Windows 10 <nl> - - * ( if needed , only cross - platform builds ) * Target OS : host / Android 6 . 0 / ARM board / Raspberry Pi 2 <nl> - - * ( if needed ) * Compiler & CMake : GCC 5 . 3 & CMake 3 . 5 <nl> - <nl> - # # # In which part of the OpenCV library you got the issue ? <nl> - Examples : <nl> - - objdetect , highgui , imgproc , cuda , tests <nl> - - face recognition , resizing an image , reading an jpg image <nl> + This is a template helping you to create an issue which can be processed as quickly as possible . This is the bug reporting section for the OpenCV library . <nl> + - - > <nl> <nl> - # # # Expected behaviour <nl> + # # # # # System information ( version ) <nl> + < ! - - Example <nl> + - OpenCV = > 3 . 1 <nl> + - Operating System / Platform = > Windows 64 Bit <nl> + - Compiler = > Visual Studio 2015 <nl> + - - > <nl> <nl> - # # # Actual behaviour <nl> + - OpenCV = > : grey_question : <nl> + - Operating System / Platform = > : grey_question : <nl> + - Compiler = > : grey_question : <nl> <nl> - # # # Additional description <nl> + # # # # # Detailed description <nl> <nl> - # # # Code example to reproduce the issue / Steps to reproduce the issue <nl> - Please try to give a full example which will compile as is . <nl> - ` ` ` <nl> - # include " opencv2 / core . hpp " <nl> - # include < iostream > <nl> - using namespace std ; <nl> - using namespace cv ; <nl> + < ! - - your description - - > <nl> <nl> - int main ( ) <nl> - { <nl> - double d [ ] = { 546 , 2435 , 7 , 4534 , 23423 , 3 } ; <nl> - cout < < " d = 0x " < < reinterpret_cast < void * > ( d ) < < endl ; <nl> + # # # # # Steps to reproduce <nl> <nl> - return 0 ; <nl> - } <nl> - ` ` ` <nl> + < ! - - to add code example fence it with triple backticks and optional file extension <nl> + ` ` ` . cpp <nl> + / / C + + code example <nl> + ` ` ` <nl> + or attach as . txt or . zip file <nl> + - - > <nl> \ No newline at end of file <nl> mmm a / . github / PULL_REQUEST_TEMPLATE . md <nl> ppp b / . github / PULL_REQUEST_TEMPLATE . md <nl> <nl> - resolves # XXXX <nl> + < ! - - Please use this line to close one or multiple issues when this pullrequest gets merged <nl> + You can add another line right under the first one : <nl> + resolves # 1234 <nl> + resolves # 1235 <nl> + - - > <nl> <nl> - # # # What does this PR change ? <nl> - Please add your changes here . <nl> + # # # This pullrequest changes <nl> + <nl> + < ! - - Please describe what your pullrequest is changing - - > <nl>
Rephrase parts of the templates and use comments
opencv/opencv
61390e407d8fa7ad13b070f7ce0274cf9bab67fc
2016-07-02T15:33:06Z
mmm a / code / graph_algorithms / src / centroid_decomposition / centroid_decompostition . hpp <nl> ppp b / code / graph_algorithms / src / centroid_decomposition / centroid_decompostition . hpp <nl> <nl> <nl> class Graph <nl> { <nl> + std : : vector < int > empt ; <nl> int ver_ ; <nl> std : : vector < std : : vector < int > > adj_ ; <nl> std : : vector < std : : vector < int > > centroidTree_ ; <nl> std : : vector < int > sizes_ ; <nl> std : : vector < bool > marked_ ; <nl> <nl> - Graph ( int num ) : <nl> + Graph ( int num ) : <nl> ver_ ( num ) , <nl> - adj_ ( num + 1 , std : : vector < int > ) , <nl> - centroidTree_ ( num + 1 , std : : vector < int > ) , <nl> + adj_ ( num + 1 ) , <nl> + centroidTree_ ( num + 1 ) , <nl> sizes_ ( num + 1 ) , <nl> - marked_ ( num + 1 ) , <nl> + marked_ ( num + 1 ) <nl> { } <nl> <nl> void addEdge ( int x , int y , std : : vector < std : : vector < int > > & graph ) ; <nl> void calcSizes ( int cur , int par , std : : vector < std : : vector < int > > graph ) ; <nl> - int findCentroid ( int cur , int par , int vertices ) ; <nl> - void decomposeTree ( int cur , int total ) ; <nl> + int findCentroid ( int cur , int par , int vertices , std : : vector < std : : vector < int > > graph ) ; <nl> + void decomposeTree ( int cur , int par , int total , std : : vector < std : : vector < int > > graph ) ; <nl> } ; <nl> <nl> void Graph : : addEdge ( int x , int y , std : : vector < std : : vector < int > > & graph ) <nl> void Graph : : addEdge ( int x , int y , std : : vector < std : : vector < int > > & graph ) <nl> { <nl> throw std : : out_of_range { " y is out of boundaries " } ; <nl> } <nl> - arr [ x ] . push_back ( y ) ; <nl> + graph [ x ] . push_back ( y ) ; <nl> } <nl> <nl> void Graph : : calcSizes ( int cur , int par , std : : vector < std : : vector < int > > graph ) <nl> void Graph : : calcSizes ( int cur , int par , std : : vector < std : : vector < int > > graph ) <nl> { <nl> if ( ! ( to = = par | | marked_ [ to ] = = true ) ) <nl> { <nl> - calcSizes ( to , cur ) ; <nl> + calcSizes ( to , cur , graph ) ; <nl> sizes_ [ cur ] + = sizes_ [ to ] ; <nl> } <nl> } <nl> int Graph : : findCentroid ( int cur , int par , int vertices , std : : vector < std : : vector < <nl> { <nl> if ( ! ( to = = par | | marked_ [ to ] = = true ) ) <nl> { <nl> - if ( sizes [ to ] > vertices / 2 ) <nl> + if ( sizes_ [ to ] > vertices / 2 ) <nl> { <nl> - return findCentroid ( to , cur , vertices ) ; <nl> + return findCentroid ( to , cur , vertices , graph ) ; <nl> } <nl> } <nl> return cur ; <nl> } <nl> } <nl> <nl> - void Graph : : decomposeTree ( int cur , int par , int total ) <nl> + void Graph : : decomposeTree ( int cur , int par , int total , std : : vector < std : : vector < int > > graph ) <nl> { <nl> - calcSizes ( cur , - 1 ) ; <nl> - int centroid = findCentroid ( cur , - 1 , sizes [ cur ] ) ; <nl> - calcSizes ( centroid , - 1 ) ; <nl> + calcSizes ( cur , - 1 , graph ) ; <nl> + int centroid = findCentroid ( cur , - 1 , sizes_ [ cur ] , graph ) ; <nl> + calcSizes ( centroid , - 1 , graph ) ; <nl> marked_ [ centroid ] = true ; <nl> for ( const auto & to : adj_ [ cur ] ) <nl> { <nl> if ( ! ( to = = par | | marked_ [ to ] = = true ) ) <nl> { <nl> - decomposeTree ( to , cur , sizes [ to ] ) ; <nl> - addEdge ( cur , to , centroidTree ) ; <nl> + decomposeTree ( to , cur , sizes_ [ to ] , graph ) ; <nl> + addEdge ( cur , to , centroidTree_ ) ; <nl> } <nl> } <nl> } <nl>
Update centroid_decompostition . hpp
OpenGenus/cosmos
518c8762ef78df68e57964d70ac172f324e00bad
2018-03-06T04:26:50Z
mmm a / lib / AST / GenericSignatureBuilder . cpp <nl> ppp b / lib / AST / GenericSignatureBuilder . cpp <nl> ConstraintResult GenericSignatureBuilder : : expandConformanceRequirement ( <nl> <nl> auto inheritedReqResult = <nl> addInheritedRequirements ( proto , selfType . getUnresolvedType ( ) , source , <nl> - / * inferForModule = * / nullptr ) ; <nl> + proto - > getModuleContext ( ) ) ; <nl> if ( isErrorResult ( inheritedReqResult ) ) <nl> return inheritedReqResult ; <nl> } <nl> ConstraintResult GenericSignatureBuilder : : expandConformanceRequirement ( <nl> auto innerSource = FloatingRequirementSource : : viaProtocolRequirement ( <nl> source , proto , & req , / * inferred = * / false ) ; <nl> addRequirement ( & req , innerSource , & protocolSubMap , <nl> - / * inferForModule = * / nullptr ) ; <nl> + proto - > getModuleContext ( ) ) ; <nl> } <nl> } <nl> <nl> mmm a / test / Generics / requirement_inference . swift <nl> ppp b / test / Generics / requirement_inference . swift <nl> <nl> - / / RUN : % target - typecheck - verify - swift - typecheck % s - verify <nl> - / / RUN : % target - typecheck - verify - swift - typecheck - debug - generic - signatures % s > % t . dump 2 > & 1 <nl> + / / RUN : % target - typecheck - verify - swift - typecheck - verify <nl> + / / RUN : % target - typecheck - verify - swift - typecheck - debug - generic - signatures > % t . dump 2 > & 1 <nl> / / RUN : % FileCheck % s < % t . dump <nl> <nl> protocol P1 { <nl> extension X1WithP2 { <nl> _ = X5 < T > ( ) / / FIXME : expected - error { { type ' T ' does not conform to protocol ' P2 ' } } <nl> } <nl> } <nl> + <nl> + / / Inference from protocol inheritance clauses . <nl> + typealias ExistentialP4WithP2Assoc < T : P4 > = P4 where T . P4Assoc : P2 <nl> + <nl> + protocol P37 : ExistentialP4WithP2Assoc < Self > { } <nl> + <nl> + extension P37 { <nl> + func f ( ) { <nl> + _ = X5 < P4Assoc > ( ) / / requires P2 <nl> + } <nl> + } <nl>
[ GSB ] Infer requirements from protocol inheritance and where clauses .
apple/swift
ff5dc6081c1ed16097cb964a1ab44b06c8ff5099
2018-03-22T21:24:35Z
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> else ( ) <nl> # Make a job pool for things that can ' t yet be distributed <nl> cmake_host_system_information ( <nl> RESULT localhost_logical_cores QUERY NUMBER_OF_LOGICAL_CORES ) <nl> - set_property ( GLOBAL PROPERTY JOB_POOLS local_jobs = $ { localhost_logical_cores } ) <nl> + set_property ( GLOBAL APPEND PROPERTY JOB_POOLS local_jobs = $ { localhost_logical_cores } ) <nl> # Put linking in that category <nl> set ( CMAKE_JOB_POOL_LINK local_jobs ) <nl> endif ( ) <nl>
[ cmake ] Do not override existing JOB_POOLS .
apple/swift
1f1ceb47e43bb5ca1d951bb146b0f8daca1f5f8f
2019-10-25T22:32:55Z
mmm a / doc / classes / DynamicFont . xml <nl> ppp b / doc / classes / DynamicFont . xml <nl> <nl> dynamic_font . size = 64 <nl> $ " Label " . set ( " custom_fonts / font " , dynamic_font ) <nl> [ / codeblock ] <nl> - [ b ] Note : [ / b ] DynamicFont doesn ' t support features such as right - to - left typesetting , ligatures , text shaping , variable fonts and optional font features yet . If you wish to " bake " an optional font feature into a TTF font file , you can use [ url = https : / / fontforge . org / ] FontForge [ / url ] to do so . In FontForge , use [ b ] File & gt ; Generate Fonts [ / b ] , click [ b ] Options [ / b ] , choose the desired features then generate the font . <nl> + [ b ] Note : [ / b ] DynamicFont doesn ' t support features such as kerning , right - to - left typesetting , ligatures , text shaping , variable fonts and optional font features yet . If you wish to " bake " an optional font feature into a TTF font file , you can use [ url = https : / / fontforge . org / ] FontForge [ / url ] to do so . In FontForge , use [ b ] File & gt ; Generate Fonts [ / b ] , click [ b ] Options [ / b ] , choose the desired features then generate the font . <nl> < / description > <nl> < tutorials > <nl> < / tutorials > <nl>
Document the lack of kerning support in DynamicFont
godotengine/godot
e506479fcefa24eac55b9e83c8fa71e257d869ad
2020-08-07T13:03:35Z
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> include ( cmake / sanitizers . cmake ) <nl> # spdlog target <nl> # mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> <nl> - # Check if spdlog is being used directly or via add_subdirectory <nl> + # Check if spdlog is being used directly or via add_subdirectory , but allow overriding <nl> + if ( NOT DEFINED SPDLOG_MASTER_PROJECT ) <nl> if ( CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR ) <nl> set ( SPDLOG_MASTER_PROJECT ON ) <nl> else ( ) <nl> set ( SPDLOG_MASTER_PROJECT OFF ) <nl> endif ( ) <nl> + endif ( ) <nl> <nl> option ( BUILD_SHARED_LIBS " Global flag to cause add_library to create shared libraries if on . " ON ) <nl> option ( SPDLOG_BUILD_EXAMPLES " Build examples " $ { SPDLOG_MASTER_PROJECT } ) <nl>
Allowed overriding of ` SPDLOG_MASTER_PROJECT ` to better support Conan
gabime/spdlog
24e4f0aa872d1fb600520fcf9f57bb9a9cfcfbb7
2019-05-18T04:31:30Z
mmm a / docs / en / introduction / adopters . md <nl> ppp b / docs / en / introduction / adopters . md <nl> toc_title : Adopters <nl> | < a href = " https : / / db . com " class = " favicon " > Deutsche Bank < / a > | Finance | BI Analytics | — | — | [ Slides in English , October 2019 ] ( https : / / bigdatadays . ru / wp - content / uploads / 2019 / 10 / D2 - H3 - 3_Yakunin - Goihburg . pdf ) | <nl> | < a href = " https : / / www . diva - e . com " class = " favicon " > Diva - e < / a > | Digital consulting | Main Product | — | — | [ Slides in English , September 2019 ] ( https : / / github . com / ClickHouse / clickhouse - presentations / blob / master / meetup29 / ClickHouse - MeetUp - Unusual - Applications - sd - 2019 - 09 - 17 . pdf ) | <nl> | < a href = " https : / / www . ecwid . com / " class = " favicon " > Ecwid < / a > | E - commerce SaaS | Metrics , Logging | — | — | [ Slides in Russian , April 2019 ] ( https : / / nastachku . ru / var / files / 1 / presentation / backend / 2_Backend_6 . pdf ) | <nl> - | < a href = " https : / / www . ebay . com / " class = " favicon " > eBay < / a > | E - commerce | TBA | — | — | [ Webinar , Sep 2020 ] ( https : / / altinity . com / webinarspage / 2020 / 09 / 08 / migrating - from - druid - to - next - gen - olap - on - clickhouse - ebays - experience ) | <nl> + | < a href = " https : / / www . ebay . com / " class = " favicon " > eBay < / a > | E - commerce | Logs , Metrics and Events | — | — | [ Official website , Sep 2020 ] ( https : / / tech . ebayinc . com / engineering / ou - online - analytical - processing / ) | <nl> | < a href = " https : / / www . exness . com " class = " favicon " > Exness < / a > | Trading | Metrics , Logging | — | — | [ Talk in Russian , May 2019 ] ( https : / / youtu . be / _rpU - TvSfZ8 ? t = 3215 ) | <nl> | < a href = " https : / / fastnetmon . com / " class = " favicon " > FastNetMon < / a > | DDoS Protection | Main Product | | — | [ Official website ] ( https : / / fastnetmon . com / docs - fnm - advanced / fastnetmon - advanced - traffic - persistency / ) | <nl> | < a href = " https : / / www . flipkart . com / " class = " favicon " > Flipkart < / a > | e - Commerce | — | — | — | [ Talk in English , July 2020 ] ( https : / / youtu . be / GMiXCMFDMow ? t = 239 ) | <nl>
Update adopters . md
ClickHouse/ClickHouse
23fd72a3deb31a44272386c4a4399a05c58151be
2020-09-25T01:59:01Z
mmm a / folly / detail / ThreadLocalDetail . cpp <nl> ppp b / folly / detail / ThreadLocalDetail . cpp <nl> <nl> <nl> namespace folly { namespace threadlocal_detail { <nl> <nl> - void ThreadEntryNode : : initIfZero ( ) { <nl> - if ( UNLIKELY ( ! next ) ) { <nl> - next = prev = parent ; <nl> - parent - > meta - > pushBackLocked ( parent , id ) ; <nl> - } <nl> - } <nl> - <nl> - ThreadEntryNode * ThreadEntryNode : : getNext ( ) { <nl> - return & next - > elements [ id ] . node ; <nl> - } <nl> - <nl> - void ThreadEntryNode : : push_back ( ThreadEntry * head ) { <nl> - / / get the head prev and next nodes <nl> - ThreadEntryNode * hnode = & head - > elements [ id ] . node ; <nl> - <nl> - / / update current <nl> - next = head ; <nl> - prev = hnode - > prev ; <nl> - <nl> - / / hprev <nl> - ThreadEntryNode * hprev = & hnode - > prev - > elements [ id ] . node ; <nl> - hprev - > next = parent ; <nl> - hnode - > prev = parent ; <nl> - } <nl> - <nl> - void ThreadEntryNode : : eraseZero ( ) { <nl> - if ( LIKELY ( prev ! = nullptr ) ) { <nl> - / / get the prev and next nodes <nl> - ThreadEntryNode * nprev = & prev - > elements [ id ] . node ; <nl> - ThreadEntryNode * nnext = & next - > elements [ id ] . node ; <nl> - <nl> - / / update the prev and next <nl> - nnext - > prev = prev ; <nl> - nprev - > next = next ; <nl> - <nl> - / / set the prev and next to nullptr <nl> - next = prev = nullptr ; <nl> - } <nl> - } <nl> - <nl> StaticMetaBase : : StaticMetaBase ( ThreadEntry * ( * threadEntry ) ( ) , bool strict ) <nl> : nextId_ ( 1 ) , threadEntry_ ( threadEntry ) , strict_ ( strict ) { <nl> head_ . next = head_ . prev = & head_ ; <nl> void StaticMetaBase : : onThreadExit ( void * ptr ) { <nl> } <nl> { <nl> std : : lock_guard < std : : mutex > g ( meta . lock_ ) ; <nl> - / / mark it as removed <nl> - threadEntry - > removed_ = true ; <nl> meta . erase ( & ( * threadEntry ) ) ; <nl> - FOR_EACH_RANGE ( i , 0 , threadEntry - > elementsCapacity ) { <nl> - threadEntry - > elements [ i ] . node . eraseZero ( ) ; <nl> - } <nl> / / No need to hold the lock any longer ; the ThreadEntry is private to this <nl> / / thread now that it ' s been removed from meta . <nl> } <nl> void StaticMetaBase : : onThreadExit ( void * ptr ) { <nl> if ( tmp - > elements [ i ] . dispose ( TLPDestructionMode : : THIS_THREAD ) ) { <nl> shouldRunInner = true ; <nl> shouldRunOuter = true ; <nl> - <nl> - / / now delete the entry if not zero <nl> - std : : lock_guard < std : : mutex > g ( meta . lock_ ) ; <nl> - tmp - > elements [ i ] . node . eraseZero ( ) ; <nl> } <nl> } <nl> } <nl> uint32_t StaticMetaBase : : allocate ( EntryID * ent ) { <nl> <nl> uint32_t old_id = ent - > value . exchange ( id ) ; <nl> DCHECK_EQ ( old_id , kEntryIDInvalid ) ; <nl> - <nl> - reserveHeadUnlocked ( id ) ; <nl> - <nl> return id ; <nl> } <nl> <nl> void StaticMetaBase : : destroy ( EntryID * ent ) { <nl> return ; <nl> } <nl> <nl> - auto & node = meta . head_ . elements [ id ] . node ; <nl> - while ( ! node . empty ( ) ) { <nl> - auto * next = node . getNext ( ) ; <nl> - next - > eraseZero ( ) ; <nl> - <nl> - ThreadEntry * e = next - > parent ; <nl> - <nl> + for ( ThreadEntry * e = meta . head_ . next ; e ! = & meta . head_ ; e = e - > next ) { <nl> if ( id < e - > elementsCapacity & & e - > elements [ id ] . ptr ) { <nl> elements . push_back ( e - > elements [ id ] ) ; <nl> <nl> void StaticMetaBase : : destroy ( EntryID * ent ) { <nl> * it ' s illegal to call get on a thread local that ' s <nl> * destructing . <nl> * / <nl> - <nl> e - > elements [ id ] . ptr = nullptr ; <nl> e - > elements [ id ] . deleter1 = nullptr ; <nl> e - > elements [ id ] . ownsDeleter = false ; <nl> } <nl> } <nl> - <nl> meta . freeIds_ . push_back ( id ) ; <nl> } <nl> } <nl> void StaticMetaBase : : destroy ( EntryID * ent ) { <nl> } <nl> } <nl> <nl> - ElementWrapper * FOLLY_NULLABLE StaticMetaBase : : reallocate ( <nl> - ThreadEntry * threadEntry , <nl> - uint32_t idval , <nl> - size_t & newCapacity ) { <nl> + / * * <nl> + * Reserve enough space in the ThreadEntry : : elements for the item <nl> + * @ id to fit in . <nl> + * / <nl> + void StaticMetaBase : : reserve ( EntryID * id ) { <nl> + auto & meta = * this ; <nl> + ThreadEntry * threadEntry = ( * threadEntry_ ) ( ) ; <nl> size_t prevCapacity = threadEntry - > elementsCapacity ; <nl> <nl> + uint32_t idval = id - > getOrAllocate ( meta ) ; <nl> + if ( prevCapacity > idval ) { <nl> + return ; <nl> + } <nl> / / Growth factor < 2 , see folly / docs / FBVector . md ; + 5 to prevent <nl> / / very slow start . <nl> - newCapacity = static_cast < size_t > ( ( idval + 5 ) * 1 . 7 ) ; <nl> + size_t newCapacity = static_cast < size_t > ( ( idval + 5 ) * 1 . 7 ) ; <nl> assert ( newCapacity > prevCapacity ) ; <nl> ElementWrapper * reallocated = nullptr ; <nl> <nl> ElementWrapper * FOLLY_NULLABLE StaticMetaBase : : reallocate ( <nl> } <nl> } <nl> <nl> - return reallocated ; <nl> - } <nl> - <nl> - / * * <nl> - * Reserve enough space in the ThreadEntry : : elements for the item <nl> - * @ id to fit in . <nl> - * / <nl> - <nl> - void StaticMetaBase : : reserve ( EntryID * id ) { <nl> - auto & meta = * this ; <nl> - ThreadEntry * threadEntry = ( * threadEntry_ ) ( ) ; <nl> - size_t prevCapacity = threadEntry - > elementsCapacity ; <nl> - <nl> - uint32_t idval = id - > getOrAllocate ( meta ) ; <nl> - if ( prevCapacity > idval ) { <nl> - return ; <nl> - } <nl> - <nl> - size_t newCapacity ; <nl> - ElementWrapper * reallocated = reallocate ( threadEntry , idval , newCapacity ) ; <nl> - <nl> / / Success , update the entry <nl> { <nl> std : : lock_guard < std : : mutex > g ( meta . lock_ ) ; <nl> void StaticMetaBase : : reserve ( EntryID * id ) { <nl> } <nl> std : : swap ( reallocated , threadEntry - > elements ) ; <nl> } <nl> - <nl> - for ( size_t i = prevCapacity ; i < newCapacity ; i + + ) { <nl> - threadEntry - > elements [ i ] . node . initZero ( threadEntry , i ) ; <nl> - } <nl> - <nl> threadEntry - > elementsCapacity = newCapacity ; <nl> } <nl> <nl> free ( reallocated ) ; <nl> } <nl> <nl> - void StaticMetaBase : : reserveHeadUnlocked ( uint32_t id ) { <nl> - if ( head_ . elementsCapacity < = id ) { <nl> - size_t prevCapacity = head_ . elementsCapacity ; <nl> - size_t newCapacity ; <nl> - ElementWrapper * reallocated = reallocate ( & head_ , id , newCapacity ) ; <nl> - <nl> - if ( reallocated ) { <nl> - if ( prevCapacity ! = 0 ) { <nl> - memcpy ( <nl> - reallocated , head_ . elements , sizeof ( * reallocated ) * prevCapacity ) ; <nl> - } <nl> - std : : swap ( reallocated , head_ . elements ) ; <nl> - } <nl> - head_ . elementsCapacity = newCapacity ; <nl> - free ( reallocated ) ; <nl> - } <nl> - <nl> - head_ . elements [ id ] . node . init ( & head_ , id ) ; <nl> - } <nl> - <nl> - void StaticMetaBase : : pushBackLocked ( ThreadEntry * t , uint32_t id ) { <nl> - if ( LIKELY ( ! t - > removed_ ) ) { <nl> - auto * node = & t - > elements [ id ] . node ; <nl> - std : : lock_guard < std : : mutex > g ( lock_ ) ; <nl> - node - > push_back ( & head_ ) ; <nl> - } <nl> - } <nl> <nl> FOLLY_STATIC_CTOR_PRIORITY_MAX <nl> PthreadKeyUnregister PthreadKeyUnregister : : instance_ ; <nl> mmm a / folly / detail / ThreadLocalDetail . h <nl> ppp b / folly / detail / ThreadLocalDetail . h <nl> struct AccessModeStrict { } ; <nl> <nl> namespace threadlocal_detail { <nl> <nl> - constexpr uint32_t kEntryIDInvalid = std : : numeric_limits < uint32_t > : : max ( ) ; <nl> - <nl> - struct ThreadEntry ; <nl> - / * This represents a node in doubly linked list where all the nodes <nl> - * are part of an ElementWrapper struct that has the same id . <nl> - * we cannot use prev and next as ThreadEntryNode pointers since the <nl> - * ThreadEntry : : elements can be reallocated and the pointers will change <nl> - * in this case . So we keep a pointer to the parent ThreadEntry struct <nl> - * one for the prev and next and also the id . <nl> - * We will traverse and update the list only when holding the <nl> - * StaticMetaBase : : lock_ <nl> - * / <nl> - struct ThreadEntryNode { <nl> - uint32_t id ; <nl> - ThreadEntry * parent ; <nl> - ThreadEntry * prev ; <nl> - ThreadEntry * next ; <nl> - <nl> - void initIfZero ( ) ; <nl> - <nl> - void init ( ThreadEntry * entry , uint32_t newId ) { <nl> - id = newId ; <nl> - prev = next = parent = entry ; <nl> - } <nl> - <nl> - void initZero ( ThreadEntry * entry , uint32_t newId ) { <nl> - id = newId ; <nl> - parent = entry ; <nl> - prev = next = nullptr ; <nl> - } <nl> - <nl> - / / if the list this node is part of is empty <nl> - bool empty ( ) const { <nl> - return ( next = = parent ) ; <nl> - } <nl> - <nl> - ThreadEntryNode * getNext ( ) ; <nl> - <nl> - void push_back ( ThreadEntry * head ) ; <nl> - <nl> - void eraseZero ( ) ; <nl> - } ; <nl> - <nl> / * * <nl> * POD wrapper around an element ( a void * ) and an associated deleter . <nl> * This must be POD , as we memset ( ) it to 0 and memcpy ( ) it around . <nl> struct ElementWrapper { <nl> DCHECK ( deleter1 = = nullptr ) ; <nl> <nl> if ( p ) { <nl> - node . initIfZero ( ) ; <nl> ptr = p ; <nl> deleter1 = [ ] ( void * pt , TLPDestructionMode ) { <nl> delete static_cast < Ptr > ( pt ) ; <nl> struct ElementWrapper { <nl> DCHECK ( ptr = = nullptr ) ; <nl> DCHECK ( deleter2 = = nullptr ) ; <nl> if ( p ) { <nl> - node . initIfZero ( ) ; <nl> ptr = p ; <nl> auto d2 = d ; / / gcc - 4 . 8 doesn ' t decay types correctly in lambda captures <nl> deleter2 = new std : : function < DeleterFunType > ( <nl> struct ElementWrapper { <nl> std : : function < DeleterFunType > * deleter2 ; <nl> } ; <nl> bool ownsDeleter ; <nl> - ThreadEntryNode node ; <nl> } ; <nl> <nl> struct StaticMetaBase ; <nl> struct ThreadEntry { <nl> ThreadEntryList * list { nullptr } ; <nl> ThreadEntry * listNext { nullptr } ; <nl> StaticMetaBase * meta { nullptr } ; <nl> - bool removed_ { false } ; <nl> } ; <nl> <nl> struct ThreadEntryList { <nl> struct ThreadEntryList { <nl> size_t count { 0 } ; <nl> } ; <nl> <nl> + constexpr uint32_t kEntryIDInvalid = std : : numeric_limits < uint32_t > : : max ( ) ; <nl> + <nl> struct PthreadKeyUnregisterTester ; <nl> <nl> / * * <nl> struct StaticMetaBase { <nl> <nl> ElementWrapper & getElement ( EntryID * ent ) ; <nl> <nl> - / / reserve an id in the head_ ThreadEntry - > elements <nl> - / / array if not already there <nl> - void reserveHeadUnlocked ( uint32_t id ) ; <nl> - <nl> - / / push back an entry in the doubly linked list <nl> - / / that corresponds to idx id <nl> - void pushBackLocked ( ThreadEntry * t , uint32_t id ) ; <nl> - <nl> - / / static helper method to reallocate the ThreadEntry : : elements <nl> - / / returns ! = nullptr if the ThreadEntry : : elements was reallocated <nl> - / / nullptr if the ThreadEntry : : elements was just extended <nl> - / / and throws stdd : bad_alloc if memory cannot be allocated <nl> - static ElementWrapper * <nl> - reallocate ( ThreadEntry * threadEntry , uint32_t idval , size_t & newCapacity ) ; <nl> - <nl> uint32_t nextId_ ; <nl> std : : vector < uint32_t > freeIds_ ; <nl> std : : mutex lock_ ; <nl>
Back out " Iterate only through the threads that have in use entries "
facebook/folly
360d0d321a6bb0cc51892610f46d6e0cb35bc422
2018-05-23T22:23:41Z
mmm a / tensorflow / python / keras / engine / training_test . py <nl> ppp b / tensorflow / python / keras / engine / training_test . py <nl> def test_class_weights ( self ) : <nl> <nl> model . train_on_batch ( <nl> x_train [ : batch_size ] , y_train [ : batch_size ] , class_weight = class_weight ) <nl> - ref_score = model . evaluate ( x_test , y_test , verbose = 0 ) <nl> - score = model . evaluate ( <nl> + ref_score = model . evaluate ( x_test , y_test , verbose = 0 ) # pylint : disable = unused - variable <nl> + score = model . evaluate ( # pylint : disable = unused - variable <nl> x_test [ test_ids , : ] , y_test [ test_ids , : ] , verbose = 0 ) <nl> - self . assertLess ( score [ 0 ] , ref_score [ 0 ] ) <nl> - <nl> - @ keras_parameterized . run_all_keras_modes <nl> - def test_sample_weights ( self ) : <nl> - num_classes = 5 <nl> - batch_size = 5 <nl> - epochs = 10 <nl> - weighted_class = 3 <nl> - weight = 10 . <nl> - train_samples = 1000 <nl> - test_samples = 1000 <nl> - input_dim = 5 <nl> - learning_rate = 0 . 001 <nl> - <nl> - model = testing_utils . get_small_sequential_mlp ( <nl> - num_hidden = 10 , num_classes = num_classes , input_dim = input_dim ) <nl> - model . compile ( <nl> - RMSPropOptimizer ( learning_rate = learning_rate ) , <nl> - metrics = [ ' acc ' , metrics_module . CategoricalAccuracy ( ) ] , <nl> - weighted_metrics = [ ' mae ' , metrics_module . CategoricalAccuracy ( ) ] , <nl> - loss = ' categorical_crossentropy ' , <nl> - run_eagerly = testing_utils . should_run_eagerly ( ) ) <nl> - <nl> - np . random . seed ( 43 ) <nl> - ( x_train , y_train ) , ( x_test , y_test ) = testing_utils . get_test_data ( <nl> - train_samples = train_samples , <nl> - test_samples = test_samples , <nl> - input_shape = ( input_dim , ) , <nl> - num_classes = num_classes ) <nl> - int_y_test = y_test . copy ( ) <nl> - int_y_train = y_train . copy ( ) <nl> - # convert class vectors to binary class matrices <nl> - y_train = np_utils . to_categorical ( y_train , num_classes ) <nl> - y_test = np_utils . to_categorical ( y_test , num_classes ) <nl> - test_ids = np . where ( int_y_test = = np . array ( weighted_class ) ) [ 0 ] <nl> - <nl> - sample_weight = np . ones ( ( y_train . shape [ 0 ] ) ) <nl> - sample_weight [ int_y_train = = weighted_class ] = weight <nl> - <nl> - model . fit ( <nl> - x_train , <nl> - y_train , <nl> - batch_size = batch_size , <nl> - epochs = epochs / / 3 , <nl> - verbose = 0 , <nl> - sample_weight = sample_weight ) <nl> - model . fit ( <nl> - x_train , <nl> - y_train , <nl> - batch_size = batch_size , <nl> - epochs = epochs / / 3 , <nl> - verbose = 0 , <nl> - sample_weight = sample_weight , <nl> - validation_split = 0 . 1 ) <nl> - <nl> - model . train_on_batch ( <nl> - x_train [ : batch_size ] , <nl> - y_train [ : batch_size ] , <nl> - sample_weight = sample_weight [ : batch_size ] ) <nl> - model . test_on_batch ( <nl> - x_train [ : batch_size ] , <nl> - y_train [ : batch_size ] , <nl> - sample_weight = sample_weight [ : batch_size ] ) <nl> - ref_score = model . evaluate ( <nl> - x_test , y_test , verbose = 0 , sample_weight = sample_weight ) <nl> - score = model . evaluate ( <nl> - x_test [ test_ids , : ] , <nl> - y_test [ test_ids , : ] , <nl> - verbose = 0 , <nl> - sample_weight = sample_weight [ test_ids ] ) <nl> - self . assertLess ( score [ 0 ] , ref_score [ 0 ] ) <nl> + # TODO ( b / 152990697 ) : Fix the class weights test here . <nl> + # self . assertLess ( score [ 0 ] , ref_score [ 0 ] ) <nl> <nl> @ keras_parameterized . run_all_keras_modes <nl> def test_temporal_sample_weights ( self ) : <nl>
Removing the assertion in LossWeightingTest . test_class_weights and LossWeightingTest . test_sample_weights .
tensorflow/tensorflow
dad6e13620d4d667cdfa71187cca6a2d9ff9b323
2020-04-09T04:03:49Z
mmm a / include / fmt / format - inl . h <nl> ppp b / include / fmt / format - inl . h <nl> FMT_FUNC Char thousands_sep_impl ( locale_ref loc ) { <nl> } <nl> # else <nl> template < typename Char > <nl> - FMT_FUNC Char internal : : thousands_sep ( locale_ref ) { <nl> + FMT_FUNC Char internal : : thousands_sep_impl ( locale_ref ) { <nl> return FMT_STATIC_THOUSANDS_SEPARATOR ; <nl> } <nl> # endif <nl>
thousands_sep - > thousands_sep_impl ( )
fmtlib/fmt
b7b8548559fb6af7d3c385c16d5c702e70d06e52
2018-11-20T23:43:17Z
mmm a / folly / functional / Invoke . h <nl> ppp b / folly / functional / Invoke . h <nl> struct free_invoke_proxy { <nl> * / <nl> # define FOLLY_CREATE_FREE_INVOKE_TRAITS ( classname , funcname , . . . ) \ <nl> namespace classname # # __folly_detail_invoke_ns { \ <nl> - namespace classname # # __folly_detail_invoke_ns_inline { \ <nl> - FOLLY_PUSH_WARNING \ <nl> - FOLLY_CLANG_DISABLE_WARNING ( " - Wunused - function " ) \ <nl> - void funcname ( : : folly : : detail : : invoke_private_overload & ) ; \ <nl> - FOLLY_POP_WARNING \ <nl> + namespace __folly_detail_invoke_base { \ <nl> + FOLLY_PUSH_WARNING \ <nl> + FOLLY_CLANG_DISABLE_WARNING ( " - Wunused - function " ) \ <nl> + void funcname ( : : folly : : detail : : invoke_private_overload & ) ; \ <nl> + FOLLY_POP_WARNING \ <nl> } \ <nl> using FB_ARG_2_OR_1 ( \ <nl> - classname # # __folly_detail_invoke_ns_inline \ <nl> - FOLLY_PP_DETAIL_APPEND_VA_ARG ( __VA_ARGS__ ) ) : : funcname ; \ <nl> - struct classname # # __folly_detail_invoke { \ <nl> + __folly_detail_invoke_base FOLLY_PP_DETAIL_APPEND_VA_ARG ( \ <nl> + __VA_ARGS__ ) ) : : funcname ; \ <nl> + struct __folly_detail_invoke_obj { \ <nl> template < typename . . . Args > \ <nl> constexpr auto operator ( ) ( Args & & . . . args ) const \ <nl> noexcept ( noexcept ( funcname ( static_cast < Args & & > ( args ) . . . ) ) ) \ <nl> struct free_invoke_proxy { <nl> } \ <nl> } ; \ <nl> } \ <nl> - struct classname : : : folly : : detail : : free_invoke_proxy < \ <nl> - classname # # __folly_detail_invoke_ns : : \ <nl> - classname # # __folly_detail_invoke > { } <nl> + struct classname \ <nl> + : : : folly : : detail : : free_invoke_proxy < \ <nl> + classname # # __folly_detail_invoke_ns : : __folly_detail_invoke_obj > { } <nl> <nl> namespace folly { <nl> namespace detail { <nl>
Remove classname from free - invoke traits inner names
facebook/folly
00319abe89a593bbe051fa5628fad333fec40faf
2019-10-28T00:40:19Z
mmm a / src / core_io . h <nl> ppp b / src / core_io . h <nl> <nl> # define __BITCOIN_CORE_IO_H__ <nl> <nl> # include < string > <nl> + # include < vector > <nl> <nl> class uint256 ; <nl> class CScript ; <nl> mmm a / src / core_read . cpp <nl> ppp b / src / core_read . cpp <nl> <nl> <nl> - # include < vector > <nl> # include " core_io . h " <nl> # include " core . h " <nl> # include " serialize . h " <nl> mmm a / src / core_write . cpp <nl> ppp b / src / core_write . cpp <nl> <nl> <nl> - # include < vector > <nl> # include " core_io . h " <nl> # include " univalue / univalue . h " <nl> # include " script . h " <nl>
fix compilation error in core_io . h
bitcoin/bitcoin
1b73d36b2c71c92044012dc736215bf95f2e908b
2014-08-01T06:38:23Z
mmm a / libraries / testing / include / eosio / testing / tester . hpp <nl> ppp b / libraries / testing / include / eosio / testing / tester . hpp <nl> namespace boost { namespace test_tools { namespace tt_detail { <nl> <nl> namespace eosio { namespace testing { <nl> std : : vector < uint8_t > read_wasm ( const char * fn ) ; <nl> - std : : vector < char > read_abi ( const char * fn ) ; <nl> - <nl> + std : : vector < char > read_abi ( const char * fn ) ; <nl> + std : : string read_wast ( const char * fn ) ; <nl> using namespace eosio : : chain ; <nl> <nl> fc : : variant_object filter_fields ( const fc : : variant_object & filter , const fc : : variant_object & value ) ; <nl> namespace eosio { namespace testing { <nl> action get_action ( account_name code , action_name acttype , vector < permission_level > auths , <nl> const variant_object & data ) const ; <nl> <nl> - void set_transaction_headers ( signed_transaction & trx , <nl> + void set_transaction_headers ( transaction & trx , <nl> uint32_t expiration = DEFAULT_EXPIRATION_DELTA , <nl> - uint32_t delay_sec = 0 ) const ; <nl> + uint32_t delay_sec = 0 ) const ; <nl> <nl> vector < transaction_trace_ptr > create_accounts ( vector < account_name > names , <nl> bool multisig = false , <nl>
Update tester . hpp
EOSIO/eos
a53efe7c4663bcb2b34aa822c57e9c0befdd1dc3
2018-06-29T21:20:52Z
mmm a / platform / x11 / os_x11 . cpp <nl> ppp b / platform / x11 / os_x11 . cpp <nl> void OS_X11 : : handle_key_event ( XKeyEvent * p_event , bool p_echo ) { <nl> <nl> input - > accumulate_input_event ( k ) ; <nl> } <nl> + memfree ( utf8string ) ; <nl> return ; <nl> } <nl> memfree ( utf8string ) ; <nl>
Merge pull request from akien - mga / x11 - memleak - utf8string
godotengine/godot
1d0bd3eb71c8c7905f12dd9c5490f2560d07db78
2019-11-12T07:40:23Z
mmm a / src / mongo / db / repl / repl_coordinator . h <nl> ppp b / src / mongo / db / repl / repl_coordinator . h <nl> namespace repl { <nl> <nl> / * * <nl> * Returns a reference to the parsed command line arguments that are related to replication . <nl> - * TODO ( spencer ) : Change this to a const ref once we are no longer using it for mutable <nl> - * global state . <nl> * / <nl> - virtual ReplSettings & getSettings ( ) = 0 ; <nl> + virtual const ReplSettings & getSettings ( ) const = 0 ; <nl> <nl> enum Mode { <nl> modeNone = 0 , <nl> mmm a / src / mongo / db / repl / repl_coordinator_impl . cpp <nl> ppp b / src / mongo / db / repl / repl_coordinator_impl . cpp <nl> namespace { <nl> _externalState - > shutdown ( ) ; <nl> } <nl> <nl> - ReplSettings & ReplicationCoordinatorImpl : : getSettings ( ) { <nl> + const ReplSettings & ReplicationCoordinatorImpl : : getSettings ( ) const { <nl> return _settings ; <nl> } <nl> <nl> mmm a / src / mongo / db / repl / repl_coordinator_impl . h <nl> ppp b / src / mongo / db / repl / repl_coordinator_impl . h <nl> namespace repl { <nl> <nl> virtual void shutdown ( ) ; <nl> <nl> - virtual ReplSettings & getSettings ( ) ; <nl> + virtual const ReplSettings & getSettings ( ) const ; <nl> <nl> virtual Mode getReplicationMode ( ) const ; <nl> <nl> namespace repl { <nl> unordered_set < HostAndPort > _seedList ; / / ( X ) <nl> <nl> / / Parsed command line arguments related to replication . <nl> - / / TODO ( spencer ) : Currently there is global mutable state <nl> - / / in ReplSettings , but we should be able to get rid of that after the legacy repl <nl> - / / coordinator is gone . At that point we can make this const . <nl> - ReplSettings _settings ; / / ( R ) <nl> + const ReplSettings _settings ; / / ( R ) <nl> <nl> / / Pointer to the TopologyCoordinator owned by this ReplicationCoordinator . <nl> boost : : scoped_ptr < TopologyCoordinator > _topCoord ; / / ( X ) <nl> mmm a / src / mongo / db / repl / repl_coordinator_mock . cpp <nl> ppp b / src / mongo / db / repl / repl_coordinator_mock . cpp <nl> namespace repl { <nl> / / TODO <nl> } <nl> <nl> - ReplSettings & ReplicationCoordinatorMock : : getSettings ( ) { <nl> + const ReplSettings & ReplicationCoordinatorMock : : getSettings ( ) const { <nl> return _settings ; <nl> } <nl> <nl> mmm a / src / mongo / db / repl / repl_coordinator_mock . h <nl> ppp b / src / mongo / db / repl / repl_coordinator_mock . h <nl> namespace repl { <nl> <nl> virtual void shutdown ( ) ; <nl> <nl> - virtual ReplSettings & getSettings ( ) ; <nl> + virtual const ReplSettings & getSettings ( ) const ; <nl> <nl> virtual bool isReplEnabled ( ) const ; <nl> <nl> namespace repl { <nl> <nl> private : <nl> <nl> - ReplSettings _settings ; <nl> + const ReplSettings _settings ; <nl> } ; <nl> <nl> } / / namespace repl <nl> mmm a / src / mongo / db / repl / repl_settings . h <nl> ppp b / src / mongo / db / repl / repl_settings . h <nl> namespace repl { <nl> <nl> std : : string rsIndexPrefetch ; / / - - indexPrefetch <nl> <nl> - std : : set < std : : string > discoveredSeeds ; <nl> - mutex discoveredSeeds_mx ; <nl> - <nl> - BSONObj reconfig ; <nl> - <nl> ReplSettings ( ) <nl> : slave ( NotSlave ) , <nl> master ( false ) , <nl> namespace repl { <nl> autoresync ( false ) , <nl> slavedelay ( ) , <nl> oplogSize ( 0 ) , <nl> - pretouch ( 0 ) , <nl> - discoveredSeeds ( ) , <nl> - discoveredSeeds_mx ( " ReplSettings : : discoveredSeeds " ) { <nl> + pretouch ( 0 ) { <nl> } <nl> <nl> / / TODO ( spencer ) : Remove explicit copy constructor after we no longer have mutable state <nl> namespace repl { <nl> only ( other . only ) , <nl> pretouch ( other . pretouch ) , <nl> replSet ( other . replSet ) , <nl> - rsIndexPrefetch ( other . rsIndexPrefetch ) , <nl> - discoveredSeeds ( other . discoveredSeeds ) , <nl> - discoveredSeeds_mx ( " ReplSettings : : discoveredSeeds " ) , <nl> - reconfig ( other . reconfig . getOwned ( ) ) { } <nl> + rsIndexPrefetch ( other . rsIndexPrefetch ) { } <nl> <nl> ReplSettings & operator = ( const ReplSettings & other ) { <nl> if ( this = = & other ) return * this ; <nl> namespace repl { <nl> pretouch = other . pretouch ; <nl> replSet = other . replSet ; <nl> rsIndexPrefetch = other . rsIndexPrefetch ; <nl> - discoveredSeeds = other . discoveredSeeds ; <nl> - reconfig = other . reconfig . getOwned ( ) ; <nl> return * this ; <nl> } <nl> <nl> mmm a / src / mongo / db / repl / rs_sync . cpp <nl> ppp b / src / mongo / db / repl / rs_sync . cpp <nl> namespace repl { <nl> ReplicationCoordinator * replCoord = getGlobalReplicationCoordinator ( ) ; <nl> <nl> / / Set initial indexPrefetch setting <nl> - std : : string & prefetch = replCoord - > getSettings ( ) . rsIndexPrefetch ; <nl> + const std : : string & prefetch = replCoord - > getSettings ( ) . rsIndexPrefetch ; <nl> if ( ! prefetch . empty ( ) ) { <nl> BackgroundSync : : IndexPrefetchConfig prefetchConfig = BackgroundSync : : PREFETCH_ALL ; <nl> if ( prefetch = = " none " ) <nl>
SERVER - 14429 Make ReplSettings const now that it is no longer modified after startup
mongodb/mongo
564e34cb9da16b13d7ee9eaf6ee3e7a57b7aad69
2014-12-01T22:30:58Z
mmm a / aten / src / ATen / native / TensorCompare . cpp <nl> ppp b / aten / src / ATen / native / TensorCompare . cpp <nl> <nl> # include < ATen / native / ReduceOpsUtils . h > <nl> # include < c10 / util / Exception . h > <nl> # include < ATen / native / cpu / TensorCompareKernel . h > <nl> + # include < ATen / native / cpu / Loops . h > <nl> <nl> namespace { <nl> template < typename scalar_t > <nl> void where_cpu ( <nl> const at : : Tensor & condition , <nl> const at : : Tensor & self , <nl> const at : : Tensor & other ) { <nl> + auto iter = at : : TensorIterator ( ) ; <nl> + iter . set_check_mem_overlap ( true ) ; <nl> + iter . add_output ( ret ) ; <nl> + iter . add_input ( condition ) ; <nl> + iter . add_input ( self ) ; <nl> + iter . add_input ( other ) ; <nl> + iter . dont_compute_common_dtype ( ) ; <nl> + iter . build ( ) ; <nl> if ( condition . scalar_type ( ) = = at : : ScalarType : : Byte ) { <nl> - at : : CPU_tensor_apply4 < scalar_t , uint8_t , scalar_t , scalar_t > ( <nl> - ret , <nl> - condition , <nl> - self , <nl> - other , <nl> - [ ] ( scalar_t & ret_val , <nl> - const uint8_t & cond_val , <nl> - const scalar_t & self_val , <nl> - const scalar_t & other_val ) { <nl> - ret_val = cond_val ? self_val : other_val ; <nl> - } ) ; <nl> - } else { <nl> - at : : CPU_tensor_apply4 < scalar_t , bool , scalar_t , scalar_t > ( <nl> - ret , <nl> - condition , <nl> - self , <nl> - other , <nl> - [ ] ( scalar_t & ret_val , <nl> - const bool & cond_val , <nl> - const scalar_t & self_val , <nl> - const scalar_t & other_val ) { <nl> - ret_val = cond_val ? self_val : other_val ; <nl> - } ) ; <nl> - } <nl> + at : : native : : cpu_kernel ( <nl> + iter , <nl> + [ = ] ( uint8_t cond_val , scalar_t self_val , scalar_t other_val ) - > scalar_t { <nl> + return cond_val ? self_val : other_val ; <nl> + } ) ; <nl> + } else { <nl> + at : : native : : cpu_kernel ( <nl> + iter , <nl> + [ = ] ( bool cond_val , scalar_t self_val , scalar_t other_val ) - > scalar_t { <nl> + return cond_val ? self_val : other_val ; <nl> + } ) ; <nl> + } <nl> } <nl> } / / namespace <nl> <nl>
Migrate CPU_tensor_apply to TensorIterator in aten / src / ATen / native / TensorCompare . cpp ( )
pytorch/pytorch
60c4e74e49770ca7204b812006926ac0778d30df
2019-08-31T03:59:57Z
mmm a / tensorflow / python / keras / mixed_precision / experimental / autocast_variable_test . py <nl> ppp b / tensorflow / python / keras / mixed_precision / experimental / autocast_variable_test . py <nl> def test_assign ( self , distribute ) : <nl> self . evaluate ( x . initializer ) <nl> <nl> # outside of auto cast scope . <nl> - v1 = constant_op . constant ( 3 . 14 , dtype = dtypes . float32 ) <nl> - v2 = constant_op . constant ( 3 . 14 , dtype = dtypes . float16 ) <nl> + v1 = constant_op . constant ( 3 . , dtype = dtypes . float32 ) <nl> + v2 = constant_op . constant ( 3 . , dtype = dtypes . float16 ) <nl> <nl> def run_and_check ( ) : <nl> # Assign float32 values <nl> - self . assertAllClose ( 3 . 14 , self . evaluate ( x . assign ( v1 ) ) ) <nl> - self . assertAllClose ( 3 . 14 * 2 , self . evaluate ( x . assign_add ( v1 ) ) ) <nl> - self . assertAllClose ( 3 . 14 , self . evaluate ( x . assign_sub ( v1 ) ) ) <nl> + self . assertAllClose ( 3 . , self . evaluate ( x . assign ( v1 ) ) ) <nl> + self . assertAllClose ( 3 . * 2 , self . evaluate ( x . assign_add ( v1 ) ) ) <nl> + self . assertAllClose ( 3 . , self . evaluate ( x . assign_sub ( v1 ) ) ) <nl> <nl> # Attempt to assign float16 values <nl> with self . assertRaisesRegexp ( <nl> def run_and_check ( ) : <nl> <nl> # Assign Python floats <nl> self . assertAllClose ( 0 . , self . evaluate ( x . assign ( 0 . ) ) ) <nl> - self . assertAllClose ( 3 . 14 , self . evaluate ( x . assign ( 3 . 14 ) ) ) <nl> - self . assertAllClose ( 3 . 14 * 2 , self . evaluate ( x . assign_add ( 3 . 14 ) ) ) <nl> - self . assertAllClose ( 3 . 14 , self . evaluate ( x . assign_sub ( 3 . 14 ) ) ) <nl> + self . assertAllClose ( 3 . , self . evaluate ( x . assign ( 3 . ) ) ) <nl> + self . assertAllClose ( 3 . * 2 , self . evaluate ( x . assign_add ( 3 . ) ) ) <nl> + self . assertAllClose ( 3 . , self . evaluate ( x . assign_sub ( 3 . ) ) ) <nl> <nl> # Assign multiple times <nl> assign = x . assign ( 1 . ) <nl> self . assertAllClose ( 1 . , self . evaluate ( assign ) ) <nl> self . assertAllClose ( 0 . , self . evaluate ( assign . assign ( 0 . ) ) ) <nl> - assign_add = x . assign_add ( 3 . 14 ) <nl> - self . assertAllClose ( 3 . 14 , self . evaluate ( assign_add ) ) <nl> - self . assertAllClose ( 3 . 14 * 3 , <nl> - self . evaluate ( x . assign_add ( 3 . 14 ) . assign_add ( 3 . 14 ) ) ) <nl> - self . assertAllClose ( 3 . 14 * 3 , x ) <nl> - assign_sub = x . assign_sub ( 3 . 14 ) <nl> - self . assertAllClose ( 3 . 14 * 2 , self . evaluate ( assign_sub ) ) <nl> + assign_add = x . assign_add ( 3 . ) <nl> + self . assertAllClose ( 3 . , self . evaluate ( assign_add ) ) <nl> + self . assertAllClose ( 3 . * 3 , <nl> + self . evaluate ( x . assign_add ( 3 . ) . assign_add ( 3 . ) ) ) <nl> + self . assertAllClose ( 3 . * 3 , x ) <nl> + assign_sub = x . assign_sub ( 3 . ) <nl> + self . assertAllClose ( 3 . * 2 , self . evaluate ( assign_sub ) ) <nl> self . assertAllClose ( 0 . , <nl> - self . evaluate ( x . assign_sub ( 3 . 14 ) . assign_sub ( 3 . 14 ) ) ) <nl> + self . evaluate ( x . assign_sub ( 3 . ) . assign_sub ( 3 . ) ) ) <nl> <nl> # Assign with read_value = False <nl> self . assertIsNone ( self . evaluate ( x . assign ( 1 . , read_value = False ) ) ) <nl> def run_and_check ( ) : <nl> <nl> # Use the tf . assign functions instead of the var . assign methods . <nl> self . assertAllClose ( 0 . , self . evaluate ( state_ops . assign ( x , 0 . ) ) ) <nl> - self . assertAllClose ( 3 . 14 , self . evaluate ( state_ops . assign ( x , 3 . 14 ) ) ) <nl> - self . assertAllClose ( 3 . 14 * 2 , <nl> - self . evaluate ( state_ops . assign_add ( x , 3 . 14 ) ) ) <nl> - self . assertAllClose ( 3 . 14 , self . evaluate ( state_ops . assign_sub ( x , 3 . 14 ) ) ) <nl> + self . assertAllClose ( 3 . , self . evaluate ( state_ops . assign ( x , 3 . ) ) ) <nl> + self . assertAllClose ( 3 . * 2 , <nl> + self . evaluate ( state_ops . assign_add ( x , 3 . ) ) ) <nl> + self . assertAllClose ( 3 . , self . evaluate ( state_ops . assign_sub ( x , 3 . ) ) ) <nl> <nl> run_and_check ( ) <nl> # reset x <nl>
Lower precisions in autocast_variable_test
tensorflow/tensorflow
f8263bae627f27ab50541b8dfe9345acaed9be6c
2020-04-29T21:23:13Z
mmm a / src / buffer_cache / alt / alt . cc <nl> ppp b / src / buffer_cache / alt / alt . cc <nl> <nl> namespace alt { <nl> <nl> alt_cache_t : : alt_cache_t ( serializer_t * serializer ) <nl> - : page_cache_ ( serializer ) , <nl> + : tracker_ ( ) , <nl> + page_cache_ ( serializer , & tracker_ ) , <nl> drainer_ ( make_scoped < auto_drainer_t > ( ) ) { } <nl> <nl> alt_cache_t : : ~ alt_cache_t ( ) { <nl> mmm a / src / buffer_cache / alt / alt . hpp <nl> ppp b / src / buffer_cache / alt / alt . hpp <nl> enum class alt_create_t { <nl> create , <nl> } ; <nl> <nl> + class alt_memory_tracker_t : public memory_tracker_t { <nl> + public : <nl> + alt_memory_tracker_t ( ) { } <nl> + void inform_memory_change ( UNUSED uint64_t in_memory_size , <nl> + UNUSED uint64_t memory_limit ) { <nl> + / / RSI : implement this . <nl> + } <nl> + DISABLE_COPYING ( alt_memory_tracker_t ) ; <nl> + } ; <nl> + <nl> class alt_cache_t : public home_thread_mixin_t { <nl> public : <nl> - explicit alt_cache_t ( serializer_t * serializer ) ; <nl> + alt_cache_t ( serializer_t * serializer ) ; <nl> ~ alt_cache_t ( ) ; <nl> <nl> block_size_t max_block_size ( ) const ; <nl> / / RSI : Remove this . <nl> block_size_t get_block_size ( ) const { return max_block_size ( ) ; } <nl> <nl> + private : <nl> + friend class alt_txn_t ; / / for drainer_ - > lock ( ) <nl> + friend class alt_inner_txn_t ; / / for & page_cache_ <nl> + friend class alt_buf_read_t ; / / for & page_cache_ <nl> + friend class alt_buf_write_t ; / / for & page_cache_ <nl> + <nl> + / / tracker_ is used for throttling ( which can cause the alt_txn_t constructor to <nl> + / / block ) . RSI : The throttling interface is bad ( maybe ) because it ' s worried <nl> + / / about transaction_t ' s passing one another ( ? ) or maybe the callers are bad with <nl> + / / their use of chained mutexes . Make sure that timestamps don ' t get mixed up in <nl> + / / their ordering , once they begin to play a role . <nl> + alt_memory_tracker_t tracker_ ; <nl> page_cache_t page_cache_ ; <nl> <nl> - private : <nl> - friend class alt_txn_t ; / / for cache ( ) - > drainer_ - > lock ( ) . <nl> scoped_ptr_t < auto_drainer_t > drainer_ ; <nl> <nl> DISABLE_COPYING ( alt_cache_t ) ; <nl> mmm a / src / buffer_cache / alt / page . cc <nl> ppp b / src / buffer_cache / alt / page . cc <nl> <nl> <nl> namespace alt { <nl> <nl> - page_cache_t : : page_cache_t ( serializer_t * serializer , uint64_t memory_limit ) <nl> + page_cache_t : : page_cache_t ( serializer_t * serializer , <nl> + memory_tracker_t * tracker , <nl> + uint64_t memory_limit ) <nl> : serializer_ ( serializer ) , <nl> free_list_ ( serializer ) , <nl> - evicter_ ( memory_limit ) , <nl> - drainer_ ( new auto_drainer_t ) { <nl> + evicter_ ( tracker , memory_limit ) , <nl> + drainer_ ( make_scoped < auto_drainer_t > ( ) ) { <nl> { <nl> on_thread_t thread_switcher ( serializer - > home_thread ( ) ) ; <nl> reads_io_account . init ( serializer - > make_io_account ( CACHE_READS_IO_PRIORITY ) ) ; <nl> bool eviction_bag_t : : remove_random ( page_t * * page_out ) { <nl> } <nl> } <nl> <nl> - evicter_t : : evicter_t ( uint64_t memory_limit ) <nl> - : memory_limit_ ( memory_limit ) { } <nl> + evicter_t : : evicter_t ( memory_tracker_t * tracker , uint64_t memory_limit ) <nl> + : tracker_ ( tracker ) , memory_limit_ ( memory_limit ) { } <nl> <nl> evicter_t : : ~ evicter_t ( ) { <nl> assert_thread ( ) ; <nl> evicter_t : : ~ evicter_t ( ) { <nl> void evicter_t : : add_not_yet_loaded ( page_t * page ) { <nl> assert_thread ( ) ; <nl> unevictable_ . add_without_size ( page ) ; <nl> + inform_tracker ( ) ; <nl> } <nl> <nl> void evicter_t : : add_now_loaded_size ( uint32_t ser_buf_size ) { <nl> assert_thread ( ) ; <nl> unevictable_ . add_size ( ser_buf_size ) ; <nl> + inform_tracker ( ) ; <nl> evict_if_necessary ( ) ; <nl> } <nl> <nl> bool evicter_t : : page_is_in_unevictable_bag ( page_t * page ) const { <nl> void evicter_t : : add_to_evictable_unbacked ( page_t * page ) { <nl> assert_thread ( ) ; <nl> evictable_unbacked_ . add ( page , page - > ser_buf_size_ ) ; <nl> + inform_tracker ( ) ; <nl> evict_if_necessary ( ) ; <nl> } <nl> <nl> void evicter_t : : move_unevictable_to_evictable ( page_t * page ) { <nl> rassert ( new_bag = = & evictable_disk_backed_ <nl> | | new_bag = = & evictable_unbacked_ ) ; <nl> new_bag - > add ( page , page - > ser_buf_size_ ) ; <nl> + inform_tracker ( ) ; <nl> evict_if_necessary ( ) ; <nl> } <nl> <nl> void evicter_t : : change_eviction_bag ( eviction_bag_t * current_bag , <nl> current_bag - > remove ( page , page - > ser_buf_size_ ) ; <nl> eviction_bag_t * new_bag = correct_eviction_category ( page ) ; <nl> new_bag - > add ( page , page - > ser_buf_size_ ) ; <nl> + inform_tracker ( ) ; <nl> evict_if_necessary ( ) ; <nl> } <nl> <nl> void evicter_t : : remove_page ( page_t * page ) { <nl> rassert ( page - > snapshot_refcount_ = = 0 ) ; <nl> eviction_bag_t * bag = correct_eviction_category ( page ) ; <nl> bag - > remove ( page , page - > ser_buf_size_ ) ; <nl> + inform_tracker ( ) ; <nl> evict_if_necessary ( ) ; <nl> } <nl> <nl> void evicter_t : : evict_if_necessary ( ) { <nl> } <nl> } <nl> <nl> + void evicter_t : : inform_tracker ( ) const { <nl> + tracker_ - > inform_memory_change ( in_memory_size ( ) , <nl> + memory_limit_ ) ; <nl> + } <nl> + <nl> } / / namespace alt <nl> <nl> mmm a / src / buffer_cache / alt / page . hpp <nl> ppp b / src / buffer_cache / alt / page . hpp <nl> class eviction_bag_t { <nl> DISABLE_COPYING ( eviction_bag_t ) ; <nl> } ; <nl> <nl> + class memory_tracker_t { <nl> + public : <nl> + virtual ~ memory_tracker_t ( ) { } <nl> + virtual void inform_memory_change ( uint64_t in_memory_size , <nl> + uint64_t memory_limit ) = 0 ; <nl> + } ; <nl> + <nl> class evicter_t : public home_thread_mixin_t { <nl> public : <nl> void add_not_yet_loaded ( page_t * page ) ; <nl> class evicter_t : public home_thread_mixin_t { <nl> eviction_bag_t * correct_eviction_category ( page_t * page ) ; <nl> void remove_page ( page_t * page ) ; <nl> <nl> - explicit evicter_t ( uint64_t memory_limit ) ; <nl> + explicit evicter_t ( memory_tracker_t * tracker , <nl> + uint64_t memory_limit ) ; <nl> ~ evicter_t ( ) ; <nl> <nl> private : <nl> void evict_if_necessary ( ) ; <nl> uint64_t in_memory_size ( ) const ; <nl> <nl> + void inform_tracker ( ) const ; <nl> + <nl> / / RSI : Implement issue 97 . <nl> + memory_tracker_t * const tracker_ ; <nl> uint64_t memory_limit_ ; <nl> <nl> / / These track whether every page ' s eviction status . <nl> class evicter_t : public home_thread_mixin_t { <nl> class page_cache_t : public home_thread_mixin_t { <nl> public : <nl> / / RSI : Remove default parameter of memory_limit ? <nl> - explicit page_cache_t ( serializer_t * serializer , uint64_t memory_limit = GIGABYTE ) ; <nl> + explicit page_cache_t ( serializer_t * serializer , <nl> + memory_tracker_t * tracker , <nl> + uint64_t memory_limit = GIGABYTE ) ; <nl> ~ page_cache_t ( ) ; <nl> current_page_t * page_for_block_id ( block_id_t block_id ) ; <nl> current_page_t * page_for_new_block_id ( block_id_t * block_id_out ) ; <nl> mmm a / src / unittest / page_test . cc <nl> ppp b / src / unittest / page_test . cc <nl> <nl> # include " arch / runtime / coroutines . hpp " <nl> # include " arch / timing . hpp " <nl> # include " buffer_cache / alt / page . hpp " <nl> + # include " buffer_cache / alt / alt . hpp " / / RSI : for alt_memory_tracker_t . We ' ll want to have some mock one here probably . <nl> # include " concurrency / auto_drainer . hpp " <nl> # include " serializer / config . hpp " <nl> # include " unittest / gtest . hpp " <nl> using alt : : page_txn_t ; <nl> struct mock_ser_t { <nl> mock_file_opener_t opener ; <nl> scoped_ptr_t < standard_serializer_t > ser ; <nl> + scoped_ptr_t < alt : : alt_memory_tracker_t > tracker ; <nl> <nl> mock_ser_t ( ) <nl> : opener ( ) { <nl> struct mock_ser_t { <nl> ser = make_scoped < standard_serializer_t > ( log_serializer_t : : dynamic_config_t ( ) , <nl> & opener , <nl> & get_global_perfmon_collection ( ) ) ; <nl> + tracker = make_scoped < alt : : alt_memory_tracker_t > ( ) ; <nl> } <nl> } ; <nl> <nl> TEST ( PageTest , Control ) { <nl> <nl> void run_CreateDestroy ( ) { <nl> mock_ser_t mock ; <nl> - page_cache_t page_cache ( mock . ser . get ( ) ) ; <nl> + page_cache_t page_cache ( mock . ser . get ( ) , mock . tracker . get ( ) ) ; <nl> } <nl> <nl> TEST ( PageTest , CreateDestroy ) { <nl> TEST ( PageTest , CreateDestroy ) { <nl> void run_OneTxn ( ) { <nl> mock_ser_t mock ; <nl> { <nl> - page_cache_t page_cache ( mock . ser . get ( ) ) ; <nl> + page_cache_t page_cache ( mock . ser . get ( ) , mock . tracker . get ( ) ) ; <nl> { <nl> page_txn_t txn ( & page_cache ) ; <nl> } <nl> TEST ( PageTest , OneTxn ) { <nl> <nl> void run_TwoIndependentTxn ( ) { <nl> mock_ser_t mock ; <nl> - page_cache_t page_cache ( mock . ser . get ( ) ) ; <nl> + page_cache_t page_cache ( mock . ser . get ( ) , mock . tracker . get ( ) ) ; <nl> page_txn_t txn1 ( & page_cache ) ; <nl> page_txn_t txn2 ( & page_cache ) ; <nl> } <nl> TEST ( PageTest , TwoIndependentTxn ) { <nl> <nl> void run_TwoIndependentTxnSwitch ( ) { <nl> mock_ser_t mock ; <nl> - page_cache_t page_cache ( mock . ser . get ( ) ) ; <nl> + page_cache_t page_cache ( mock . ser . get ( ) , mock . tracker . get ( ) ) ; <nl> auto txn1 = make_scoped < page_txn_t > ( & page_cache ) ; <nl> page_txn_t txn2 ( & page_cache ) ; <nl> txn1 . reset ( ) ; <nl> TEST ( PageTest , TwoIndependentTxnSwitch ) { <nl> <nl> void run_TwoSequentialTxnSwitch ( ) { <nl> mock_ser_t mock ; <nl> - page_cache_t page_cache ( mock . ser . get ( ) ) ; <nl> + page_cache_t page_cache ( mock . ser . get ( ) , mock . tracker . get ( ) ) ; <nl> auto txn1 = make_scoped < page_txn_t > ( & page_cache ) ; <nl> page_txn_t txn2 ( & page_cache , txn1 . get ( ) ) ; <nl> txn1 . reset ( ) ; <nl> TEST ( PageTest , TwoSequentialTxnSwitch ) { <nl> <nl> void run_OneReadAcq ( ) { <nl> mock_ser_t mock ; <nl> - page_cache_t page_cache ( mock . ser . get ( ) ) ; <nl> + page_cache_t page_cache ( mock . ser . get ( ) , mock . tracker . get ( ) ) ; <nl> page_txn_t txn ( & page_cache ) ; <nl> current_page_acq_t acq ( & txn , 0 , alt_access_t : : read ) ; <nl> / / Do nothing with the acq . <nl> TEST ( PageTest , OneReadAcq ) { <nl> <nl> void run_OneWriteAcq ( ) { <nl> mock_ser_t mock ; <nl> - page_cache_t page_cache ( mock . ser . get ( ) ) ; <nl> + page_cache_t page_cache ( mock . ser . get ( ) , mock . tracker . get ( ) ) ; <nl> page_txn_t txn ( & page_cache ) ; <nl> current_page_acq_t acq ( & txn , 0 , alt_access_t : : write ) ; <nl> / / Do nothing with the acq . <nl> TEST ( PageTest , OneWriteAcq ) { <nl> <nl> void run_OneWriteAcqWait ( ) { <nl> mock_ser_t mock ; <nl> - page_cache_t page_cache ( mock . ser . get ( ) ) ; <nl> + page_cache_t page_cache ( mock . ser . get ( ) , mock . tracker . get ( ) ) ; <nl> page_txn_t txn ( & page_cache ) ; <nl> current_page_acq_t acq ( & txn , alt_access_t : : write ) ; <nl> page_acq_t page_acq ; <nl> class bigger_test_t { <nl> <nl> void run ( ) { <nl> { <nl> - page_cache_t cache ( mock . ser . get ( ) , memory_limit ) ; <nl> + page_cache_t cache ( mock . ser . get ( ) , mock . tracker . get ( ) , memory_limit ) ; <nl> auto_drainer_t drain ; <nl> c = & cache ; <nl> <nl> class bigger_test_t { <nl> c = NULL ; <nl> <nl> { <nl> - page_cache_t cache ( mock . ser . get ( ) , memory_limit ) ; <nl> + page_cache_t cache ( mock . ser . get ( ) , mock . tracker . get ( ) , memory_limit ) ; <nl> auto_drainer_t drain ; <nl> c = & cache ; <nl> coro_t : : spawn_ordered ( std : : bind ( & bigger_test_t : : run_txn14 , <nl> class bigger_test_t { <nl> c = NULL ; <nl> <nl> { <nl> - page_cache_t cache ( mock . ser . get ( ) , memory_limit ) ; <nl> + page_cache_t cache ( mock . ser . get ( ) , mock . tracker . get ( ) , memory_limit ) ; <nl> c = & cache ; <nl> page_txn_t txn ( c ) ; <nl> <nl>
Added do - nothing memory tracker .
rethinkdb/rethinkdb
eef2cc6e400ffc8fd53a0c1945da33265500b760
2013-12-12T03:30:14Z
mmm a / src / support . js <nl> ppp b / src / support . js <nl> var functionPointers = new Array ( { { { RESERVED_FUNCTION_POINTERS } } } ) ; <nl> function convertJsFunctionToWasm ( func , sig ) { <nl> # if WASM2JS <nl> return func ; <nl> - # endif <nl> + # else / / WASM2JS <nl> <nl> / / The module is static , with the exception of the type section , which is <nl> / / generated based on the signature passed in . <nl> function convertJsFunctionToWasm ( func , sig ) { <nl> } ) ; <nl> var wrappedFunc = instance . exports . f ; <nl> return wrappedFunc ; <nl> + # endif / / WASM2JS <nl> } <nl> <nl> / / Add a wasm function to the table . <nl>
Remove conditional dead code in convertJsFunctionToWasm ( )
emscripten-core/emscripten
b19192c7865f1a57f86c3f415522ccba30e78d45
2019-08-01T16:51:46Z
mmm a / tensorflow / python / estimator / export / export . py <nl> ppp b / tensorflow / python / estimator / export / export . py <nl> def serving_input_receiver_fn ( ) : <nl> " " " A serving_input_receiver_fn that expects features to be fed directly . " " " <nl> receiver_tensors = _placeholders_from_receiver_tensors_dict ( <nl> features , default_batch_size ) <nl> - <nl> - # TODO ( b / 34885899 ) : remove the unnecessary copy <nl> - # The features provided are simply the placeholders , but we defensively copy <nl> - # the dict because it may be mutated . <nl> - return ServingInputReceiver ( receiver_tensors , receiver_tensors . copy ( ) ) <nl> + return ServingInputReceiver ( receiver_tensors , receiver_tensors ) <nl> <nl> return serving_input_receiver_fn <nl> <nl>
Removed unnecessary copying of dict ( )
tensorflow/tensorflow
3e16768d63f43864c724745f91f5b92d83032a75
2018-06-14T02:18:11Z
mmm a / src / input_common / sdl / sdl_impl . cpp <nl> ppp b / src / input_common / sdl / sdl_impl . cpp <nl> SDLState : : SDLState ( ) { <nl> <nl> initialized = true ; <nl> if ( start_thread ) { <nl> - poll_thread = std : : thread ( [ & ] { <nl> + poll_thread = std : : thread ( [ this ] { <nl> using namespace std : : chrono_literals ; <nl> while ( initialized ) { <nl> SDL_PumpEvents ( ) ; <nl>
input_common / sdl_impl : Make lambda capture more specific in SDLState constructor
yuzu-emu/yuzu
114060fd87a8b8a869a20ba4b64f8bb40c37c7e6
2019-03-17T08:02:52Z
similarity index 100 % <nl> rename from Tutorials / SLUHandsOn / query . wl <nl> rename to Examples / Text / ATIS / Data / trash / query . wl <nl> similarity index 100 % <nl> rename from Tutorials / SLUHandsOn / slots . wl <nl> rename to Examples / Text / ATIS / Data / trash / slots . wl <nl> similarity index 100 % <nl> rename from Tutorials / SLUHandsOn / intent . wl <nl> rename to Examples / Text / ATIS / intent . wl <nl>
moved away the . wl files
microsoft/CNTK
7e764a012280eaf7ab3f1f46455a8b5ca0be73ca
2016-08-14T04:13:29Z
mmm a / fdbserver / Knobs . cpp <nl> ppp b / fdbserver / Knobs . cpp <nl> void ServerKnobs : : initialize ( bool randomize , ClientKnobs * clientKnobs , bool isSi <nl> init ( FASTRESTORE_SCHED_INFLIGHT_SENDPARAM_THRESHOLD , 10 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_SCHED_INFLIGHT_SENDPARAM_THRESHOLD = deterministicRandom ( ) - > random01 ( ) < 0 . 2 ? 1 : deterministicRandom ( ) - > random01 ( ) * 30 + 1 ; } <nl> init ( FASTRESTORE_SCHED_SEND_FUTURE_VB_REQS_BATCH , 2 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_SCHED_SEND_FUTURE_VB_REQS_BATCH = deterministicRandom ( ) - > random01 ( ) < 0 . 2 ? 1 : deterministicRandom ( ) - > random01 ( ) * 30 + 1 ; } <nl> init ( FASTRESTORE_NUM_TRACE_EVENTS , 100 ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_NUM_TRACE_EVENTS = deterministicRandom ( ) - > random01 ( ) < 0 . 2 ? 1 : deterministicRandom ( ) - > random01 ( ) * 500 + 1 ; } <nl> + init ( FASTRESTORE_EXPENSIVE_VALIDATION , false ) ; if ( randomize & & BUGGIFY ) { FASTRESTORE_EXPENSIVE_VALIDATION = deterministicRandom ( ) - > random01 ( ) < 0 . 5 ? true : false ; } <nl> + <nl> <nl> init ( REDWOOD_DEFAULT_PAGE_SIZE , 4096 ) ; <nl> init ( REDWOOD_KVSTORE_CONCURRENT_READS , 64 ) ; <nl> mmm a / fdbserver / Knobs . h <nl> ppp b / fdbserver / Knobs . h <nl> class ServerKnobs : public Knobs { <nl> int FASTRESTORE_SCHED_INFLIGHT_SENDPARAM_THRESHOLD ; / / we can send future VB requests if it is less than this knob <nl> int FASTRESTORE_SCHED_SEND_FUTURE_VB_REQS_BATCH ; / / number of future VB sendLoadingParam requests to process at once <nl> int FASTRESTORE_NUM_TRACE_EVENTS ; <nl> + bool FASTRESTORE_EXPENSIVE_VALIDATION ; / / when set true , performance will be heavily affected <nl> <nl> int REDWOOD_DEFAULT_PAGE_SIZE ; / / Page size for new Redwood files <nl> int REDWOOD_KVSTORE_CONCURRENT_READS ; / / Max number of simultaneous point or range reads in progress . <nl> mmm a / fdbserver / RestoreLoader . actor . cpp <nl> ppp b / fdbserver / RestoreLoader . actor . cpp <nl> ACTOR Future < Void > dispatchRequests ( Reference < RestoreLoaderData > self ) { <nl> self - > cpuUsage > = SERVER_KNOBS - > FASTRESTORE_SCHED_MAX_CPU_PERCENT ) & & <nl> ( self - > inflightSendingReqs > 0 & & self - > inflightLoadingReqs > 0 ) ) { <nl> if ( self - > inflightSendingReqs > = SERVER_KNOBS - > FASTRESTORE_SCHED_INFLIGHT_SEND_REQS ) { <nl> - TraceEvent ( SevWarn , " FastRestoreLoaderTooManyInflightSendingMutationRequests " ) <nl> + TraceEvent ( SevWarn , " FastRestoreLoaderTooManyInflightRequests " ) <nl> . detail ( " VersionBatchesBlockedAtSendingMutationsToAppliers " , self - > inflightSendingReqs ) <nl> - . detail ( " Reason " , " Sending mutations is too slow " ) ; <nl> + . detail ( " CpuUsage " , self - > cpuUsage ) <nl> + . detail ( " InflightSendingReq " , self - > inflightSendingReqs ) <nl> + . detail ( " InflightSendingReqThreshold " , SERVER_KNOBS - > FASTRESTORE_SCHED_INFLIGHT_SEND_REQS ) <nl> + . detail ( " InflightLoadingReq " , self - > inflightLoadingReqs ) <nl> + . detail ( " InflightLoadingReqThreshold " , SERVER_KNOBS - > FASTRESTORE_SCHED_INFLIGHT_LOAD_REQS ) ; <nl> } <nl> wait ( delay ( SERVER_KNOBS - > FASTRESTORE_SCHED_UPDATE_DELAY ) ) ; <nl> updateProcessStats ( self ) ; <nl> ACTOR Future < Void > handleFinishVersionBatchRequest ( RestoreVersionBatchRequest re <nl> . detail ( " RequestedBatchIndex " , req . batchIndex ) ; <nl> wait ( self - > finishedBatch . whenAtLeast ( req . batchIndex - 1 ) ) ; <nl> if ( self - > finishedBatch . get ( ) = = req . batchIndex - 1 ) { <nl> + / / Sanity check : All requests before and in this batchIndex must have been processed ; otherwise , <nl> + / / those requests may cause segmentation fault after applier remove the batch data <nl> + if ( ! self - > loadingQueue . empty ( ) & & self - > loadingQueue . top ( ) . batchIndex < = req . batchIndex ) { <nl> + / / Still has pending requests from earlier batchIndex and current batchIndex , which should not happen <nl> + TraceEvent ( SevError , " FastRestoreLoaderHasPendingLoadFileRequests " ) <nl> + . detail ( " PendingRequest " , self - > loadingQueue . top ( ) . toString ( ) ) ; <nl> + } <nl> + if ( ! self - > sendingQueue . empty ( ) & & self - > sendingQueue . top ( ) . batchIndex < = req . batchIndex ) { <nl> + TraceEvent ( SevError , " FastRestoreLoaderHasPendingSendRequests " ) <nl> + . detail ( " PendingRequest " , self - > sendingQueue . top ( ) . toString ( ) ) ; <nl> + } <nl> + if ( ! self - > sendLoadParamQueue ( ) & & self - > sendLoadParamQueue . top ( ) . batchIndex < = req . batchIndex ) { <nl> + TraceEvent ( SevError , " FastRestoreLoaderHasPendingSendLoadParamRequests " ) <nl> + . detail ( " PendingRequest " , self - > sendLoadParamQueue . top ( ) . toString ( ) ) ; <nl> + } <nl> + <nl> self - > finishedBatch . set ( req . batchIndex ) ; <nl> / / Clean up batchData <nl> self - > batch . erase ( req . batchIndex ) ; <nl>
FastRestore : Loader : Add sanity check before remove batch data
apple/foundationdb
a32cf7d2932d647a89fd4158c628934c92b3e5fb
2020-08-18T22:44:22Z
mmm a / Code / CryEngine / RenderDll / XRenderD3D9 / DeviceManager / DeviceResources . cpp <nl> ppp b / Code / CryEngine / RenderDll / XRenderD3D9 / DeviceManager / DeviceResources . cpp <nl> int32 CDeviceResource : : Cleanup ( ) <nl> <nl> / / Gracefully deal with NULL - resources which are nullptr <nl> int32 nRef = m_resourceElements ? - 1 : 0 ; / / - ! ! bool <nl> + <nl> if ( m_pNativeResource ) <nl> { <nl> + / / Figure out current ref - count <nl> nRef = m_pNativeResource - > AddRef ( ) ; <nl> nRef = m_pNativeResource - > Release ( ) ; <nl> <nl> + / / NOTE : Heap are ref - counting ( first register , then release yourself ) <nl> + if ( m_eFlags & CDeviceObjectFactory : : USAGE_HIFREQ_HEAP ) <nl> + GetDeviceObjectFactory ( ) . RecycleResource ( m_pNativeResource ) ; <nl> + else <nl> + GetDeviceObjectFactory ( ) . ReleaseResource ( m_pNativeResource ) ; <nl> + <nl> / / NOTE : CDeviceResource might be shared , take care the texture - pointer stays valid for the other aliases <nl> if ( nRef = = 1 ) <nl> { <nl> nRef = 0 ; <nl> <nl> - / / NOTE : Heap are ref - counting ( first register , then release yourself ) <nl> - if ( m_eFlags & CDeviceObjectFactory : : USAGE_HIFREQ_HEAP ) <nl> - GetDeviceObjectFactory ( ) . RecycleResource ( m_pNativeResource ) ; <nl> - else <nl> - GetDeviceObjectFactory ( ) . ReleaseResource ( m_pNativeResource ) ; <nl> - <nl> m_pNativeResource = nullptr ; <nl> } <nl> } <nl>
! B ( CE - 17435 , CE - 16090 ) ( Renderer ) Fixes scaleform memory leak
CRYTEK/CRYENGINE
1934faba5ad5a3cf4eaf93c5d0376ca15bc05371
2018-07-30T14:54:24Z
mmm a / editor / editor_folding . cpp <nl> ppp b / editor / editor_folding . cpp <nl> void EditorFolding : : save_scene_folding ( const Node * p_scene , const String & p_path <nl> String path = EditorSettings : : get_singleton ( ) - > get_project_settings_dir ( ) ; <nl> String file = p_path . get_file ( ) + " - folding - " + p_path . md5_text ( ) + " . cfg " ; <nl> file = EditorSettings : : get_singleton ( ) - > get_project_settings_dir ( ) . plus_file ( file ) ; <nl> - print_line ( " save folding for : " + file ) ; <nl> config - > save ( file ) ; <nl> } <nl> void EditorFolding : : load_scene_folding ( Node * p_scene , const String & p_path ) { <nl>
Merge pull request from YeldhamDev / remove_folding_print
godotengine/godot
8a4045358ec127680302c61db1fc239708f9d853
2018-10-31T09:06:05Z
mmm a / doc / features . md <nl> ppp b / doc / features . md <nl> <nl> * Support optional relaxed syntax . <nl> * Single line ( ` / / . . . ` ) and multiple line ( ` / * . . . * / ` ) comments ( ` kParseCommentsFlag ` ) . <nl> * Trailing commas at the end of objects and arrays ( ` kParseTrailingCommasFlag ` ) . <nl> + * [ NPM compliant ] ( doc / npm . md ) . <nl> <nl> # # Unicode <nl> <nl> new file mode 100644 <nl> index 000000000 . . 5efa76821 <nl> mmm / dev / null <nl> ppp b / doc / npm . md <nl> <nl> + # # NPM <nl> + <nl> + # package . json { # package } <nl> + <nl> + ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ js <nl> + { <nl> + . . . <nl> + " dependencies " : { <nl> + . . . <nl> + " rapidjson " : " git @ github . com : miloyip / rapidjson . git " <nl> + } , <nl> + . . . <nl> + " gypfile " : true <nl> + } <nl> + ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> + <nl> + # binding . gyp { # binding } <nl> + <nl> + ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ js <nl> + { <nl> + . . . <nl> + ' targets ' : [ <nl> + { <nl> + . . . <nl> + ' include_dirs ' : [ <nl> + ' < ! ( node - e \ ' require ( " rapidjson " ) \ ' ) ' <nl> + ] <nl> + } <nl> + ] <nl> + } <nl> + ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl>
npm docs
Tencent/rapidjson
77089614841eda75823cfbf0642c99f7cc09c869
2016-05-17T12:33:26Z
mmm a / src / python / grpcio_tests / tests / unit / _channel_connectivity_test . py <nl> ppp b / src / python / grpcio_tests / tests / unit / _channel_connectivity_test . py <nl> <nl> import threading <nl> import time <nl> import unittest <nl> - from concurrent import futures <nl> <nl> import grpc <nl> from grpc import _channel <nl> from grpc import _server <nl> from tests . unit . framework . common import test_constants <nl> + from tests . unit import _thread_pool <nl> <nl> <nl> def _ready_in_connectivities ( connectivities ) : <nl> def test_lonely_channel_connectivity ( self ) : <nl> grpc . ChannelConnectivity . READY , fifth_connectivities ) <nl> <nl> def test_immediately_connectable_channel_connectivity ( self ) : <nl> - server = _server . Server ( futures . ThreadPoolExecutor ( max_workers = 0 ) , ( ) ) <nl> + thread_pool = _thread_pool . RecordingThreadPool ( max_workers = None ) <nl> + server = _server . Server ( thread_pool , ( ) ) <nl> port = server . add_insecure_port ( ' [ : : ] : 0 ' ) <nl> server . start ( ) <nl> first_callback = _Callback ( ) <nl> def test_immediately_connectable_channel_connectivity ( self ) : <nl> fourth_connectivities ) <nl> self . assertNotIn ( <nl> grpc . ChannelConnectivity . SHUTDOWN , fourth_connectivities ) <nl> + self . assertFalse ( thread_pool . was_used ( ) ) <nl> <nl> def test_reachable_then_unreachable_channel_connectivity ( self ) : <nl> - server = _server . Server ( futures . ThreadPoolExecutor ( max_workers = 0 ) , ( ) ) <nl> + thread_pool = _thread_pool . RecordingThreadPool ( max_workers = None ) <nl> + server = _server . Server ( thread_pool , ( ) ) <nl> port = server . add_insecure_port ( ' [ : : ] : 0 ' ) <nl> server . start ( ) <nl> callback = _Callback ( ) <nl> def test_reachable_then_unreachable_channel_connectivity ( self ) : <nl> server . stop ( None ) <nl> callback . block_until_connectivities_satisfy ( _last_connectivity_is_not_ready ) <nl> channel . unsubscribe ( callback . update ) <nl> + self . assertFalse ( thread_pool . was_used ( ) ) <nl> <nl> <nl> if __name__ = = ' __main__ ' : <nl> mmm a / src / python / grpcio_tests / tests / unit / _channel_ready_future_test . py <nl> ppp b / src / python / grpcio_tests / tests / unit / _channel_ready_future_test . py <nl> <nl> <nl> import threading <nl> import unittest <nl> - from concurrent import futures <nl> <nl> import grpc <nl> from grpc import _channel <nl> from grpc import _server <nl> from tests . unit . framework . common import test_constants <nl> + from tests . unit import _thread_pool <nl> <nl> <nl> class _Callback ( object ) : <nl> def test_lonely_channel_connectivity ( self ) : <nl> self . assertFalse ( ready_future . running ( ) ) <nl> <nl> def test_immediately_connectable_channel_connectivity ( self ) : <nl> - server = _server . Server ( futures . ThreadPoolExecutor ( max_workers = 0 ) , ( ) ) <nl> + thread_pool = _thread_pool . RecordingThreadPool ( max_workers = None ) <nl> + server = _server . Server ( thread_pool , ( ) ) <nl> port = server . add_insecure_port ( ' [ : : ] : 0 ' ) <nl> server . start ( ) <nl> channel = grpc . insecure_channel ( ' localhost : { } ' . format ( port ) ) <nl> def test_immediately_connectable_channel_connectivity ( self ) : <nl> self . assertFalse ( ready_future . cancelled ( ) ) <nl> self . assertTrue ( ready_future . done ( ) ) <nl> self . assertFalse ( ready_future . running ( ) ) <nl> + self . assertFalse ( thread_pool . was_used ( ) ) <nl> <nl> <nl> if __name__ = = ' __main__ ' : <nl> new file mode 100644 <nl> index 00000000000 . . f13cc2f86fc <nl> mmm / dev / null <nl> ppp b / src / python / grpcio_tests / tests / unit / _thread_pool . py <nl> <nl> + # Copyright 2016 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + import threading <nl> + from concurrent import futures <nl> + <nl> + <nl> + class RecordingThreadPool ( futures . Executor ) : <nl> + " " " A thread pool that records if used . " " " <nl> + def __init__ ( self , max_workers ) : <nl> + self . _tp_executor = futures . ThreadPoolExecutor ( max_workers = max_workers ) <nl> + self . _lock = threading . Lock ( ) <nl> + self . _was_used = False <nl> + <nl> + def submit ( self , fn , * args , * * kwargs ) : <nl> + with self . _lock : <nl> + self . _was_used = True <nl> + self . _tp_executor . submit ( fn , * args , * * kwargs ) <nl> + <nl> + def was_used ( self ) : <nl> + with self . _lock : <nl> + return self . _was_used <nl>
Merge pull request from thunderboltsid / python - test - fix
grpc/grpc
220d14e0202683901f353b08a5e3a32c9eec0d50
2016-08-05T22:09:31Z
new file mode 100644 <nl> index 0000000000 . . 55dc4823d8 <nl> mmm / dev / null <nl> ppp b / code / greedy - algorithms / hillclimber / Hillclimber . java <nl> <nl> + import java . util . ArrayList ; <nl> + import java . util . Arrays ; <nl> + import java . util . Collections ; <nl> + import java . util . List ; <nl> + <nl> + public class Hillclimber { <nl> + <nl> + private double [ ] [ ] distances ; <nl> + private int [ ] shortestRoute ; <nl> + private static final int NUMBER_OF_CITIES = 100 ; <nl> + <nl> + <nl> + public Hillclimber ( ) { <nl> + distances = new double [ NUMBER_OF_CITIES ] [ NUMBER_OF_CITIES ] ; <nl> + } <nl> + <nl> + public static void main ( String [ ] args ) { <nl> + Hillclimber hillclimber = new Hillclimber ( ) ; <nl> + hillclimber . fillList ( ) ; <nl> + <nl> + <nl> + <nl> + hillclimber . shortestRoute = hillclimber . getShortestRouteStart ( ) ; <nl> + <nl> + hillclimber . calculateShortestDistance ( ) ; <nl> + <nl> + <nl> + } <nl> + <nl> + / * * <nl> + * calculate the current fitness when getting the route as an array of integers , which city to visit first <nl> + * <nl> + * @ param route array of integers , current route <nl> + * @ return distance of the route , lower is better <nl> + * / <nl> + private float fitness ( int [ ] route ) { <nl> + float currentFitness = 0 ; <nl> + for ( int i = 0 ; i < route . length - 1 ; i + + ) { <nl> + currentFitness + = distanceFromAtoB ( route [ i ] , route [ i + 1 ] ) ; <nl> + } <nl> + return currentFitness ; <nl> + } <nl> + <nl> + / * * <nl> + * returns a random route that passes every city <nl> + * @ return <nl> + * / <nl> + private int [ ] getShortestRouteStart ( ) { <nl> + List < Integer > numbers = new ArrayList < > ( ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + numbers . add ( i ) ; <nl> + } <nl> + Collections . shuffle ( numbers ) ; <nl> + int [ ] shortestRoute = numbers . stream ( ) . mapToInt ( i - > i ) . toArray ( ) ; <nl> + <nl> + return shortestRoute ; <nl> + <nl> + } <nl> + <nl> + public double distanceFromAtoB ( int positionA , int positionB ) { <nl> + return distances [ positionA ] [ positionB ] ; <nl> + } <nl> + <nl> + <nl> + @ Override <nl> + public String toString ( ) { <nl> + String order = " " ; <nl> + double distance = 0 ; <nl> + for ( int i = 0 ; i < shortestRoute . length - 1 ; i + + ) { <nl> + order + = shortestRoute [ i ] + " , " ; <nl> + distance + = distanceFromAtoB ( shortestRoute [ i ] , shortestRoute [ i + 1 ] ) ; <nl> + } <nl> + return order + " \ n total distance = " + distance ; <nl> + } <nl> + <nl> + / * * <nl> + * swap the position of 2 cities on the route <nl> + * <nl> + * @ param currentRoute <nl> + * @ return <nl> + * / <nl> + private int [ ] moveOneStepAtRandom ( int [ ] currentRoute ) { <nl> + int [ ] currentRouteCopy = Arrays . copyOf ( currentRoute , currentRoute . length ) ; <nl> + <nl> + int position1 = ( int ) ( Math . random ( ) * 100 ) ; <nl> + int position2 = ( int ) ( Math . random ( ) * 100 ) ; <nl> + currentRouteCopy [ position1 ] = currentRoute [ position2 ] ; <nl> + currentRouteCopy [ position2 ] = currentRoute [ position1 ] ; <nl> + return currentRouteCopy ; <nl> + } <nl> + <nl> + / * * <nl> + * find the shortest distance <nl> + * / <nl> + private void calculateShortestDistance ( ) { <nl> + float lastFitness = fitness ( shortestRoute ) ; <nl> + double threshold = lastFitness / 9 ; <nl> + int i = 0 ; <nl> + do { <nl> + i + + ; <nl> + <nl> + int [ ] lastRoute = Arrays . copyOf ( shortestRoute , shortestRoute . length ) ; <nl> + <nl> + shortestRoute = moveOneStepAtRandom ( shortestRoute ) ; <nl> + if ( fitness ( shortestRoute ) < lastFitness ) { <nl> + lastFitness = fitness ( shortestRoute ) ; <nl> + } else { <nl> + shortestRoute = lastRoute ; <nl> + <nl> + } <nl> + } <nl> + while ( lastFitness > threshold & & i < 50000 ) ; / / TODO : find the Threshold <nl> + } <nl> + <nl> + <nl> + <nl> + / * * Initializes the list with random distances between each of the cities <nl> + * <nl> + * / <nl> + private void fillList ( ) { <nl> + for ( int i = 0 ; i < NUMBER_OF_CITIES ; i + + ) { <nl> + <nl> + for ( int j = i ; j < NUMBER_OF_CITIES ; j + + ) { <nl> + if ( i = = j ) { <nl> + distances [ i ] [ j ] = 0 ; <nl> + } else { <nl> + distances [ i ] [ j ] = Math . random ( ) * 100 ; <nl> + distances [ j ] [ i ] = distances [ i ] [ j ] ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + } <nl> + <nl> + <nl> + } <nl>
added a solution for the travelling salesman problem using a hillclimber algorithm to solve it
OpenGenus/cosmos
658a9e7bea6f9d9b3752c81bdaf2de7a4e9b6a6d
2017-10-17T13:24:19Z
mmm a / dbms / src / Storages / IStorage . cpp <nl> ppp b / dbms / src / Storages / IStorage . cpp <nl> void IStorage : : check ( const Block & block , bool need_all ) const <nl> <nl> if ( need_all & & names_in_block . size ( ) < columns_map . size ( ) ) <nl> { <nl> - for ( NamesAndTypesList : : iterator it = available_columns . begin ( ) ; it ! = available_columns . end ( ) ; + + it ) <nl> + for ( NamesAndTypesList : : const_iterator it = available_columns . begin ( ) ; it ! = available_columns . end ( ) ; + + it ) <nl> { <nl> if ( ! names_in_block . count ( it - > first ) ) <nl> throw Exception ( " Expected column " + it - > first , ErrorCodes : : NOT_FOUND_COLUMN_IN_BLOCK ) ; <nl>
clickhouse : fixed build [ # CONV - 2944 ] .
ClickHouse/ClickHouse
0ddaa9048edaae59c844978720af1a5ffe27c138
2013-02-25T10:51:52Z
mmm a / tensorflow / lite / experimental / micro / tools / make / Makefile <nl> ppp b / tensorflow / lite / experimental / micro / tools / make / Makefile <nl> include $ ( wildcard tensorflow / lite / experimental / micro / examples / * / Makefile . inc ) <nl> MICROLITE_LIB_OBJS : = $ ( addprefix $ ( OBJDIR ) , \ <nl> $ ( patsubst % . cc , % . o , $ ( patsubst % . c , % . o , $ ( MICROLITE_CC_SRCS ) ) ) ) <nl> <nl> + MICROLITE_LIB_OBJS + = $ ( addprefix $ ( OBJDIR ) , \ <nl> + $ ( patsubst % . S , % . o , $ ( patsubst % . cc , % . o , $ ( patsubst % . c , % . o , $ ( THIRD_PARTY_CC_SRCS ) ) ) ) ) <nl> + <nl> # For normal manually - created TensorFlow C + + source files . <nl> $ ( OBJDIR ) % . o : % . cc <nl> @ mkdir - p $ ( dir $ @ ) <nl>
Add third party source files to list of objects to compile .
tensorflow/tensorflow
bf793a97d1e6973669a719a6d93c5ba7d28530bd
2019-01-30T23:18:57Z
mmm a / tensorflow / core / graph / graph_constructor . cc <nl> ppp b / tensorflow / core / graph / graph_constructor . cc <nl> class GraphConstructor { <nl> prefix ( in . prefix . empty ( ) | | StringPiece ( in . prefix ) . ends_with ( " / " ) <nl> ? in . prefix <nl> : in . prefix + " / " ) , <nl> + input_map ( in . input_map ) , <nl> importing ( true ) { } <nl> <nl> bool allow_internal_ops ; <nl> bool expect_device_spec ; <nl> <nl> string prefix ; <nl> + std : : map < TensorId , TensorId > input_map ; <nl> + <nl> / / TODO ( ashankar ) : This bool exists to separate out functionality required <nl> / / to make ImportGraphDef a close equivalent of Python ' s import_graph_def <nl> / / without affecting the behavior of ConvertGraphDefToGraph at the time <nl> class GraphConstructor { <nl> <nl> Status TryImport ( ) { <nl> TF_RETURN_IF_ERROR ( EnsureNoNameCollisions ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( ValidateInputMap ( ) ) ; <nl> TF_RETURN_IF_ERROR ( BuildNodeIndex ( ) ) ; <nl> TF_RETURN_IF_ERROR ( InitFromEdges ( ) ) ; <nl> TF_RETURN_IF_ERROR ( Convert ( ) ) ; <nl> class GraphConstructor { <nl> } <nl> <nl> Status EnsureNoNameCollisions ( ) ; <nl> + Status ValidateInputMap ( ) ; <nl> Status BuildNodeIndex ( ) ; <nl> Status InitFromEdges ( ) ; <nl> Status Convert ( ) ; <nl> class GraphConstructor { <nl> Status MakeEdge ( Node * src , int output_index , Node * dst , int input_index ) ; <nl> Status ValidateShape ( Node * node ) ; <nl> Status ModifyNodeDefForImport ( NodeDef * node_def ) ; <nl> - void AddPrefixToNodeDef ( NodeDef * node_def ) ; <nl> + / / Modifies node_def ' s inputs according to opts_ . input_map . input_remapped is <nl> + / / a pre - initialized vector of length node_def - > input_size ( ) indicating <nl> + / / whether each input has been remapped . <nl> + void RemapNodeDefInputs ( NodeDef * node_def , std : : vector < bool > * input_remapped ) ; <nl> + void AddPrefixToNodeDef ( const std : : vector < bool > & input_remapped , <nl> + NodeDef * node_def ) ; <nl> <nl> / / From constructor <nl> const Options opts_ ; <nl> class GraphConstructor { <nl> } ; <nl> / / TODO ( vrv ) : Profile this data structure to see if we should use an <nl> / / alternative implementation of std : : unordered_map . <nl> - std : : unordered_map < StringPiece , NodeInfo , StringPiece : : Hasher > name_index_ ; <nl> + std : : unordered_map < StringPiece , NodeInfo , StringPiece : : Hasher > gdef_nodes_ ; <nl> + <nl> + / / Mapping from node name to the existing node in g_ <nl> + std : : unordered_map < StringPiece , Node * , StringPiece : : Hasher > existing_nodes_ ; <nl> <nl> / / Index of NodeDefs in gdef_ with all inputs already converted . <nl> std : : vector < int > ready_ ; <nl> class GraphConstructor { <nl> / / Used in the conversion from gdef_ to g_ to represent the ith input <nl> / / of a node . <nl> struct InputInfo { <nl> - explicit InputInfo ( StringPiece node_name , Node * n , int i ) <nl> + explicit InputInfo ( const string & node_name , Node * n , int i ) <nl> : name ( node_name ) , node ( n ) , index ( i ) { } <nl> - StringPiece name ; <nl> + / / Use string instead of StringPiece so we don ' t have to manage lifetime <nl> + string name ; <nl> Node * node ; <nl> int index ; <nl> } ; <nl> class GraphConstructor { <nl> / / Used in the conversion from gdef_ to g_ to represent an edge from <nl> / / the node named ' name ' to node ' n ' . <nl> struct EdgeInfo { <nl> - explicit EdgeInfo ( StringPiece name , int i1 , Node * n , int i2 ) <nl> + explicit EdgeInfo ( const string & name , int i1 , Node * n , int i2 ) <nl> : src_name ( name ) , src_index ( i1 ) , dst_node ( n ) , dst_index ( i2 ) { } <nl> - StringPiece src_name ; <nl> + / / Use string instead of StringPiece so we don ' t have to manage lifetime <nl> + string src_name ; <nl> int src_index ; <nl> Node * dst_node ; <nl> int dst_index ; <nl> class GraphConstructor { <nl> std : : vector < EdgeInfo > back_edges_ ; <nl> } ; <nl> <nl> + / / This could be expensive but we don ' t expect to call it often , if at all ( only <nl> + / / if there are multiple nodes in g_ with the same name ) <nl> + bool NodeNameInValues ( const std : : map < TensorId , TensorId > & input_map , <nl> + const StringPiece & node_name ) { <nl> + for ( auto iter = input_map . begin ( ) ; iter ! = input_map . end ( ) ; + + iter ) { <nl> + if ( iter - > second . first = = node_name ) return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> Status GraphConstructor : : EnsureNoNameCollisions ( ) { <nl> - if ( opts_ . prefix . empty ( ) & & opts_ . importing ) { <nl> - std : : unordered_set < string > existing ( g_ - > num_nodes ( ) ) ; <nl> - for ( const Node * n : g_ - > nodes ( ) ) { <nl> - existing . insert ( n - > name ( ) ) ; <nl> + existing_nodes_ . reserve ( g_ - > num_nodes ( ) ) ; <nl> + for ( Node * n : g_ - > nodes ( ) ) { <nl> + bool already_exists = ! existing_nodes_ . insert ( { n - > name ( ) , n } ) . second ; <nl> + if ( already_exists & & NodeNameInValues ( opts_ . input_map , n - > name ( ) ) ) { <nl> + return errors : : InvalidArgument ( <nl> + " cannot resolve input_map because multiple nodes exist with name ' " , <nl> + n - > name ( ) , " ' " ) ; <nl> } <nl> + } <nl> + if ( opts_ . prefix . empty ( ) & & opts_ . importing ) { <nl> for ( int n = 0 ; n < gdef_ - > node_size ( ) ; + + n ) { <nl> const string & name = gdef_ - > node ( n ) . name ( ) ; <nl> - if ( existing . find ( name ) ! = existing . end ( ) ) { <nl> + if ( existing_nodes_ . find ( name ) ! = existing_nodes_ . end ( ) ) { <nl> return errors : : InvalidArgument ( " Node ' " , name , <nl> " ' already exists in the Graph " ) ; <nl> } <nl> Status GraphConstructor : : EnsureNoNameCollisions ( ) { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + Status GraphConstructor : : ValidateInputMap ( ) { <nl> + for ( const auto & mapping : opts_ . input_map ) { <nl> + TensorId src = mapping . first ; <nl> + TensorId dst = mapping . second ; <nl> + if ( existing_nodes_ . count ( dst . first ) = = 0 ) { <nl> + return errors : : InvalidArgument ( <nl> + " node ' " , dst . first , " ' in input_map does not exist in graph " , <nl> + " ( input_map entry : " , src . ToString ( ) , " - > " , dst . ToString ( ) , " ) " ) ; <nl> + } <nl> + if ( ( src . second = = Graph : : kControlSlot ) ! = <nl> + ( dst . second = = Graph : : kControlSlot ) ) { <nl> + return errors : : InvalidArgument ( <nl> + " input_map entry " , src . ToString ( ) , " - > " , dst . ToString ( ) , " between " , <nl> + " control edge and non - control edge " ) ; <nl> + } <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> Status GraphConstructor : : BuildNodeIndex ( ) { <nl> - / / Validate the node names and add them to name_index_ . <nl> + / / Validate the node names and add them to gdef_nodes_ . <nl> for ( int n = 0 ; n < gdef_ - > node_size ( ) ; + + n ) { <nl> const NodeDef & node_def ( gdef_ - > node ( n ) ) ; <nl> if ( ! IsValidNodeName ( node_def . name ( ) , opts_ . allow_internal_ops ) ) { <nl> Status GraphConstructor : : BuildNodeIndex ( ) { <nl> " Node ' " , node_def . name ( ) , <nl> " ' : Node name contains invalid characters " ) ; <nl> } <nl> - if ( ! name_index_ <nl> + if ( ! gdef_nodes_ <nl> . insert ( std : : make_pair ( StringPiece ( node_def . name ( ) ) , NodeInfo ( n ) ) ) <nl> . second ) { <nl> return errors : : InvalidArgument ( " Node ' " , node_def . name ( ) , <nl> Status GraphConstructor : : BuildNodeIndex ( ) { <nl> return errors : : InvalidArgument ( " Node ' " , node_def . name ( ) , <nl> " ' is missing a device specification " ) ; <nl> } <nl> + / / Validate control edges at end <nl> + bool in_control_dependence = false ; <nl> + for ( int i = 0 ; i < node_def . input_size ( ) ; + + i ) { <nl> + StringPiece input_name = node_def . input ( i ) ; <nl> + if ( ! input_name . empty ( ) & & input_name . starts_with ( " ^ " ) ) { <nl> + in_control_dependence = true ; <nl> + } else if ( in_control_dependence ) { <nl> + return errors : : InvalidArgument ( <nl> + " Node ' " , node_def . name ( ) , <nl> + " ' : Control dependencies must come after regular dependencies " ) ; <nl> + } <nl> + } <nl> } <nl> return Status : : OK ( ) ; <nl> } <nl> Status GraphConstructor : : InitFromEdges ( ) { <nl> for ( int i = 0 ; i < node_def . input_size ( ) ; + + i ) { <nl> StringPiece input_name = node_def . input ( i ) ; <nl> TensorId id ( ParseTensorName ( input_name ) ) ; <nl> - auto iter = name_index_ . find ( id . first ) ; <nl> - if ( iter = = name_index_ . end ( ) ) { <nl> + auto iter = gdef_nodes_ . find ( id . first ) ; <nl> + if ( iter = = gdef_nodes_ . end ( ) ) { <nl> return errors : : InvalidArgument ( " Node ' " , node_def . name ( ) , <nl> " ' : Unknown input node ' " , <nl> node_def . input ( i ) , " ' " ) ; <nl> Status GraphConstructor : : ValidateColocationConstraints ( <nl> for ( const string & c : iter - > second . list ( ) . s ( ) ) { <nl> StringPiece s ( c ) ; <nl> if ( s . Consume ( kColocationGroupPrefix ) & & <nl> - name_index_ . find ( s ) = = name_index_ . end ( ) ) { <nl> + gdef_nodes_ . find ( s ) = = gdef_nodes_ . end ( ) ) { <nl> return errors : : InvalidArgument ( <nl> " Node ' " , node_def . name ( ) , <nl> " ' expects to be colocated with unknown node ' " , s , " ' " ) ; <nl> Status GraphConstructor : : ModifyNodeDefForImport ( NodeDef * node_def ) { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - void GraphConstructor : : AddPrefixToNodeDef ( NodeDef * node_def ) { <nl> + void RemoveInputs ( NodeDef * node_def , const std : : vector < int > & inputs_to_remove ) { <nl> + / / TODO ( skyewm ) : is there a better way to do this ? <nl> + std : : vector < string > inputs ; <nl> + for ( int i = 0 ; i < node_def - > input_size ( ) ; + + i ) { <nl> + inputs . push_back ( node_def - > input ( i ) ) ; <nl> + } <nl> + node_def - > clear_input ( ) ; <nl> + for ( int i = 0 , j = 0 ; i < inputs . size ( ) ; + + i ) { <nl> + if ( j < inputs_to_remove . size ( ) & & i = = inputs_to_remove [ j ] ) { <nl> + + + j ; <nl> + } else { <nl> + node_def - > add_input ( inputs [ i ] ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void GraphConstructor : : RemapNodeDefInputs ( NodeDef * node_def , <nl> + std : : vector < bool > * input_remapped ) { <nl> + DCHECK_EQ ( input_remapped - > size ( ) , node_def - > input_size ( ) ) ; <nl> + std : : set < TensorId > control_inputs ; <nl> + std : : vector < int > inputs_to_remove ; <nl> + <nl> + for ( int i = 0 ; i < node_def - > input_size ( ) ; + + i ) { <nl> + auto iter = opts_ . input_map . find ( ParseTensorName ( node_def - > input ( i ) ) ) ; <nl> + if ( iter = = opts_ . input_map . end ( ) ) continue ; <nl> + <nl> + TensorId new_input = iter - > second ; <nl> + if ( new_input . second = = Graph : : kControlSlot ) { <nl> + / / Check if we ' ve already remapped a different input to new_input , and if <nl> + / / so remove this input . <nl> + if ( control_inputs . count ( new_input ) > 0 ) { <nl> + inputs_to_remove . push_back ( i ) ; <nl> + continue ; <nl> + } <nl> + control_inputs . insert ( new_input ) ; <nl> + } <nl> + node_def - > set_input ( i , new_input . ToString ( ) ) ; <nl> + ( * input_remapped ) [ i ] = true ; <nl> + } <nl> + if ( ! inputs_to_remove . empty ( ) ) RemoveInputs ( node_def , inputs_to_remove ) ; <nl> + } <nl> + <nl> + void GraphConstructor : : AddPrefixToNodeDef ( <nl> + const std : : vector < bool > & input_remapped , NodeDef * node_def ) { <nl> const string & prefix = opts_ . prefix ; <nl> if ( prefix . empty ( ) ) return ; <nl> node_def - > set_name ( strings : : StrCat ( prefix , node_def - > name ( ) ) ) ; <nl> / / Update names of input nodes <nl> for ( int i = 0 ; i < node_def - > input_size ( ) ; + + i ) { <nl> StringPiece input ( node_def - > input ( i ) ) ; <nl> + / / Skip remapped inputs ( which already exist in g_ and are not being <nl> + / / imported ) <nl> + if ( input_remapped [ i ] ) continue ; <nl> if ( input . Consume ( " ^ " ) ) { <nl> node_def - > set_input ( i , strings : : StrCat ( " ^ " , prefix , input ) ) ; <nl> } else { <nl> Status GraphConstructor : : Convert ( ) { <nl> int o = ready_ . back ( ) ; <nl> ready_ . pop_back ( ) ; <nl> + + processed ; <nl> - const NodeDef & node_def ( gdef_ - > node ( o ) ) ; <nl> inputs . clear ( ) ; <nl> - bool in_control_dependence = false ; <nl> bool has_data_back_edge = false ; <nl> - TF_RETURN_IF_ERROR ( ValidateColocationConstraints ( node_def ) ) ; <nl> - for ( int i = 0 ; i < node_def . input_size ( ) ; + + i ) { <nl> - StringPiece input_name ( node_def . input ( i ) ) ; <nl> - if ( input_name . starts_with ( " ^ " ) ) { <nl> - in_control_dependence = true ; <nl> - } else if ( in_control_dependence ) { <nl> - return errors : : InvalidArgument ( <nl> - " Node ' " , node_def . name ( ) , <nl> - " ' : Control dependencies must come after regular dependencies " ) ; <nl> + <nl> + const NodeDef & original_node_def = gdef_ - > node ( o ) ; <nl> + NodeDef imported_node_def ; <nl> + const NodeDef * node_def ; <nl> + <nl> + std : : vector < bool > input_remapped ( original_node_def . input_size ( ) , false ) ; <nl> + <nl> + if ( opts_ . importing ) { <nl> + / / TODO ( ashankar ) : The line below means an additional copy of the NodeDef , <nl> + / / which can be expensive if the NodeDef contains large tensors in it . <nl> + / / Might make sense to change the API for ImportGraphDef to take a mutable <nl> + / / GraphDef * and avoid the copying . <nl> + imported_node_def = original_node_def ; <nl> + if ( ! opts_ . input_map . empty ( ) ) { <nl> + RemapNodeDefInputs ( & imported_node_def , & input_remapped ) ; <nl> } <nl> - TensorId id ( ParseTensorName ( input_name ) ) ; <nl> - auto iter = name_index_ . find ( id . first ) ; <nl> - DCHECK ( iter ! = name_index_ . end ( ) ) ; <nl> - Node * src_node = iter - > second . node ; <nl> - if ( src_node = = nullptr ) { <nl> - has_data_back_edge = true ; <nl> - inputs . push_back ( InputInfo ( id . first , src_node , id . second ) ) ; <nl> + node_def = & imported_node_def ; <nl> + } else { <nl> + node_def = & original_node_def ; <nl> + } <nl> + <nl> + TF_RETURN_IF_ERROR ( ValidateColocationConstraints ( * node_def ) ) ; <nl> + for ( int i = 0 ; i < node_def - > input_size ( ) ; + + i ) { <nl> + TensorId id ( ParseTensorName ( node_def - > input ( i ) ) ) ; <nl> + Node * src_node ; <nl> + int src_index ; <nl> + <nl> + if ( ! input_remapped [ i ] ) { <nl> + / / Locate input in newly - imported nodes <nl> + auto iter = gdef_nodes_ . find ( id . first ) ; <nl> + DCHECK ( iter ! = gdef_nodes_ . end ( ) ) < < id . first ; <nl> + src_node = iter - > second . node ; <nl> + src_index = id . second ; <nl> + if ( src_node = = nullptr ) has_data_back_edge = true ; <nl> } else { <nl> - if ( id . second > = src_node - > num_outputs ( ) ) { <nl> - return errors : : InvalidArgument ( <nl> - " Node ' " , node_def . name ( ) , " ' : Connecting to invalid output " , <nl> - id . second , " of source node " , id . first , " which has " , <nl> - src_node - > num_outputs ( ) , " outputs " ) ; <nl> - } <nl> - inputs . push_back ( InputInfo ( id . first , src_node , id . second ) ) ; <nl> + / / Input was remapped according to input_map <nl> + auto iter = existing_nodes_ . find ( id . first ) ; <nl> + DCHECK ( iter ! = existing_nodes_ . end ( ) ) < < id . first ; <nl> + src_node = iter - > second ; <nl> + src_index = id . second ; <nl> + } <nl> + <nl> + if ( src_node ! = nullptr & & src_index > = src_node - > num_outputs ( ) ) { <nl> + return errors : : InvalidArgument ( <nl> + " Node ' " , node_def - > name ( ) , " ' : Connecting to invalid output " , <nl> + id . second , " of source node " , id . first , " which has " , <nl> + src_node - > num_outputs ( ) , " outputs " ) ; <nl> } <nl> + <nl> + inputs . push_back ( InputInfo ( id . first . ToString ( ) , src_node , src_index ) ) ; <nl> } <nl> - if ( has_data_back_edge & & ! IsMerge ( node_def ) ) { <nl> + <nl> + if ( has_data_back_edge & & ! IsMerge ( * node_def ) ) { <nl> return errors : : InvalidArgument ( <nl> - " Node ' " , node_def . name ( ) , <nl> + " Node ' " , node_def - > name ( ) , <nl> " ' had a back edge , but only Merge nodes can have back edges . " ) ; <nl> } <nl> <nl> Node * node ; <nl> if ( opts_ . importing ) { <nl> - / / TODO ( ashankar ) : The line below means an additional copy of the NodeDef , <nl> - / / which can be expensive if the NodeDef contains large tensors in it . <nl> - / / Might make sense to change the API for ImportGraphDef to take a mutable <nl> - / / GraphDef * and avoid the copying . <nl> - NodeDef imported_node_def = node_def ; <nl> - AddPrefixToNodeDef ( & imported_node_def ) ; <nl> + AddPrefixToNodeDef ( input_remapped , & imported_node_def ) ; <nl> TF_RETURN_IF_ERROR ( ModifyNodeDefForImport ( & imported_node_def ) ) ; <nl> - TF_RETURN_IF_ERROR ( MakeNode ( imported_node_def , & node ) ) ; <nl> - } else { <nl> - TF_RETURN_IF_ERROR ( MakeNode ( node_def , & node ) ) ; <nl> } <nl> - name_index_ [ node_def . name ( ) ] . node = node ; <nl> + TF_RETURN_IF_ERROR ( MakeNode ( * node_def , & node ) ) ; <nl> + / / Use original_node_def so name StringPiece remains valid <nl> + gdef_nodes_ [ original_node_def . name ( ) ] . node = node ; <nl> <nl> / / Add edges from inputs to * node to the graph . <nl> for ( size_t i = 0 ; i < inputs . size ( ) ; + + i ) { <nl> Status GraphConstructor : : Convert ( ) { <nl> / / are created . <nl> back_edges_ . push_back ( <nl> EdgeInfo ( inputs [ i ] . name , inputs [ i ] . index , node , i ) ) ; <nl> - } else if ( inputs [ i ] . index = = - 1 ) { <nl> + } else if ( inputs [ i ] . index = = Graph : : kControlSlot ) { <nl> g_ - > AddControlEdge ( inputs [ i ] . node , node ) ; <nl> } else { <nl> TF_RETURN_IF_ERROR ( MakeEdge ( inputs [ i ] . node , inputs [ i ] . index , node , i ) ) ; <nl> Status GraphConstructor : : Convert ( ) { <nl> Status GraphConstructor : : AddBackEdges ( ) { <nl> / / Add the back edges after all nodes are created . <nl> for ( auto e : back_edges_ ) { <nl> - Node * src_node = name_index_ [ e . src_name ] . node ; <nl> - if ( e . src_index = = - 1 ) { <nl> + Node * src_node = gdef_nodes_ [ e . src_name ] . node ; <nl> + if ( e . src_index = = Graph : : kControlSlot ) { <nl> g_ - > AddControlEdge ( src_node , e . dst_node ) ; <nl> } else { <nl> TF_RETURN_IF_ERROR ( <nl> Status GraphConstructor : : UpdateVersionDef ( ) { <nl> } <nl> <nl> void GraphConstructor : : Undo ( ) { <nl> - for ( const auto & iter : name_index_ ) { <nl> + for ( const auto & iter : gdef_nodes_ ) { <nl> if ( iter . second . node ! = nullptr ) { <nl> g_ - > RemoveNode ( iter . second . node ) ; <nl> } <nl> mmm a / tensorflow / core / graph / graph_constructor . h <nl> ppp b / tensorflow / core / graph / graph_constructor . h <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / graph / graph . h " <nl> + # include " tensorflow / core / graph / tensor_id . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> <nl> namespace tensorflow { <nl> struct ImportGraphDefOptions { <nl> / / named " animals / bunny " in * g . <nl> string prefix ; <nl> <nl> - / / TODO ( ashankar ) : Enable node rebinding ( in Python ' s import_graph_def <nl> - / / this is achieved by providing an input_map ) . <nl> + / / Maps tensors in ` gdef ` to existing tensors in ` g ` . Inputs in ` gdef ` <nl> + / / corresponding to ` input_map ` keys will be remapped to the nodes in ` g ` <nl> + / / corresponding to the values . <nl> / / <nl> + / / Keys should not include ` prefix ` , i . e . , a key TensorId ' s name should be the <nl> + / / name as it originally appears in ` gdef ` . <nl> + / / <nl> + / / If this is non - empty , ImportGraphDef must be called with the shape refiner <nl> + / / used to create the existing nodes referenced in ` input_map ` . <nl> + / / TODO ( skyewm ) : can we remove this requirement ? How do we access the original <nl> + / / shape refiner ? <nl> + / / <nl> + / / TODO ( skyewm ) : add functionality to retrieve unused ` input_map ` keys <nl> + std : : map < TensorId , TensorId > input_map ; <nl> + <nl> / / TODO ( ashankar ) : Enable handling of GraphDefs produced by newer binaries <nl> / / with ops that are not defined in the binary calling ImportGraphDef . <nl> / / Similar to the producer_op_list argument to import_graph_def in the <nl> mmm a / tensorflow / core / graph / graph_constructor_test . cc <nl> ppp b / tensorflow / core / graph / graph_constructor_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / graph / graph_constructor . h " <nl> <nl> # include < vector > <nl> + # include " tensorflow / core / common_runtime / shape_refiner . h " <nl> # include " tensorflow / core / framework / common_shape_fns . h " <nl> # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / shape_inference . h " <nl> # include " tensorflow / core / graph / graph . h " <nl> + # include " tensorflow / core / graph / node_builder . h " <nl> # include " tensorflow / core / kernels / ops_util . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> # include " tensorflow / core / lib / strings / str_util . h " <nl> class GraphConstructorTest : public : : testing : : Test { <nl> <nl> void ExpectError ( const string & gdef_ascii , <nl> const std : : vector < string > & expected_error_strs ) { <nl> + / / Used to verify that errors don ' t change graph <nl> + const string original_graph_description = GraphDebugString ( ) ; <nl> + <nl> Convert ( gdef_ascii ) ; <nl> GraphConstructorOptions opts ; <nl> Status status = ConvertGraphDefToGraph ( opts , gdef_ , & graph_ ) ; <nl> class GraphConstructorTest : public : : testing : : Test { <nl> EXPECT_TRUE ( status . error_message ( ) . find ( error ) ! = string : : npos ) <nl> < < " Expected to find ' " < < error < < " ' in " < < status ; <nl> } <nl> + <nl> + EXPECT_EQ ( original_graph_description , GraphDebugString ( ) ) ; <nl> + } <nl> + <nl> + void ExpectError ( const string & gdef_ascii , const ImportGraphDefOptions & opts , <nl> + const std : : vector < string > & expected_error_strs , <nl> + ShapeRefiner * refiner = nullptr ) { <nl> + / / Used to verify that errors don ' t change graph <nl> + const string original_graph_description = GraphDebugString ( ) ; <nl> + <nl> + Convert ( gdef_ascii ) ; <nl> + Status status = ImportGraphDef ( opts , gdef_ , & graph_ , refiner ) ; <nl> + EXPECT_FALSE ( status . ok ( ) ) ; <nl> + <nl> + for ( const string & error : expected_error_strs ) { <nl> + EXPECT_TRUE ( status . error_message ( ) . find ( error ) ! = string : : npos ) <nl> + < < " Expected to find ' " < < error < < " ' in " < < status ; <nl> + } <nl> + <nl> + EXPECT_EQ ( original_graph_description , GraphDebugString ( ) ) ; <nl> } <nl> <nl> void ExpectOK ( const string & gdef_ascii ) { <nl> class GraphConstructorTest : public : : testing : : Test { <nl> TF_CHECK_OK ( ConvertGraphDefToGraph ( opts , gdef_ , & graph_ ) ) ; <nl> } <nl> <nl> + void ExpectOK ( const string & gdef_ascii , const ImportGraphDefOptions & opts , <nl> + ShapeRefiner * refiner = nullptr ) { <nl> + Convert ( gdef_ascii ) ; <nl> + Status s = ImportGraphDef ( opts , gdef_ , & graph_ , refiner ) ; <nl> + EXPECT_EQ ( Status : : OK ( ) , s ) < < s ; <nl> + } <nl> + <nl> void ExpectVersions ( int min_consumer , int producer ) { <nl> EXPECT_EQ ( min_consumer , graph_ . versions ( ) . min_consumer ( ) ) <nl> < < " Expected min consumer " < < min_consumer < < " , got " <nl> Status Scalars ( shape_inference : : InferenceContext * c ) { <nl> <nl> REGISTER_OP ( " ABC " ) ; <nl> REGISTER_OP ( " TestParams " ) . Output ( " o : float " ) . SetShapeFn ( Scalars ) ; <nl> - REGISTER_OP ( " TestInput " ) . Output ( " a : float " ) . Output ( " b : float " ) ; <nl> + REGISTER_OP ( " TestInput " ) <nl> + . Output ( " a : float " ) <nl> + . Output ( " b : float " ) <nl> + . SetShapeFn ( Scalars ) ; <nl> REGISTER_OP ( " TestMul " ) <nl> . Input ( " a : float " ) <nl> . Input ( " b : float " ) <nl> TEST_F ( GraphConstructorTest , ImportGraphDef_ShapeWhitelist ) { <nl> EXPECT_EQ ( Status : : OK ( ) , s ) < < s ; <nl> } <nl> <nl> + TEST_F ( GraphConstructorTest , ImportGraphDef_InputMap ) { <nl> + ShapeRefiner refiner ( graph_ . op_registry ( ) ) ; <nl> + <nl> + / / Populate graph with node we ' ll use in input map <nl> + ExpectOK ( " node { name : ' input ' op : ' TestInput ' } " , ImportGraphDefOptions ( ) , <nl> + & refiner ) ; <nl> + <nl> + / / Create input_map and use it to import more nodes <nl> + ImportGraphDefOptions opts ; <nl> + opts . input_map [ TensorId ( " new_input " , 0 ) ] = TensorId ( " input " , 1 ) ; <nl> + opts . input_map [ TensorId ( " new_input " , 1 ) ] = TensorId ( " input " , 0 ) ; <nl> + <nl> + ExpectOK ( <nl> + R " EOF ( <nl> + node { name : ' new_input ' op : ' TestInput ' } <nl> + node { name : ' t1 ' op : ' TestMul ' input : [ ' new_input : 0 ' , ' new_input : 1 ' ] } <nl> + node { name : ' t2 ' op : ' TestMul ' input : [ ' t1 : 0 ' , ' t1 : 0 ' ] } <nl> + ) EOF " , <nl> + opts , & refiner ) ; <nl> + <nl> + EXPECT_TRUE ( HasNode ( " input " ) ) ; <nl> + EXPECT_TRUE ( HasNode ( " t1 " ) ) ; <nl> + EXPECT_TRUE ( HasNode ( " t2 " ) ) ; <nl> + / / ` new_input ` node is imported even though it ' s outputs aren ' t used <nl> + EXPECT_TRUE ( HasNode ( " new_input " ) ) ; <nl> + <nl> + EXPECT_TRUE ( HasEdge ( " input " , 1 , " t1 " , 0 ) ) ; <nl> + EXPECT_TRUE ( HasEdge ( " input " , 0 , " t1 " , 1 ) ) ; <nl> + EXPECT_FALSE ( HasEdge ( " new_input " , 0 , " t1 " , 0 ) ) ; <nl> + EXPECT_FALSE ( HasEdge ( " new_input " , 0 , " t1 " , 1 ) ) ; <nl> + / / Test that t2 is unaffected <nl> + EXPECT_TRUE ( HasEdge ( " t1 " , 0 , " t2 " , 0 ) ) ; <nl> + <nl> + / / Check that t1 ' s NodeDef is consistent with graph <nl> + Node * t1 = FindNode ( " t1 " ) ; <nl> + ASSERT_EQ ( t1 - > def ( ) . input_size ( ) , 2 ) ; <nl> + ASSERT_EQ ( t1 - > def ( ) . input ( 0 ) , " input : 1 " ) ; <nl> + ASSERT_EQ ( t1 - > def ( ) . input ( 1 ) , " input : 0 " ) ; <nl> + } <nl> + <nl> + TEST_F ( GraphConstructorTest , ImportGraphDef_InputMapWithPrefix ) { <nl> + ShapeRefiner refiner ( graph_ . op_registry ( ) ) ; <nl> + <nl> + / / Populate graph with node we ' ll use in input map <nl> + ExpectOK ( <nl> + " node { name : ' input ' op : ' TestInput ' } " <nl> + " node { name : ' unmapped_input ' op : ' TestInput ' } " , <nl> + ImportGraphDefOptions ( ) , & refiner ) ; <nl> + <nl> + / / Map multiple inputs to the same existing input for more coverage <nl> + ImportGraphDefOptions opts ; <nl> + opts . input_map [ TensorId ( " input " , 0 ) ] = TensorId ( " input " , 0 ) ; <nl> + opts . input_map [ TensorId ( " input " , 1 ) ] = TensorId ( " input " , 0 ) ; <nl> + <nl> + opts . prefix = " import " ; <nl> + <nl> + / / Import nodes with the same names as those already in the graph ( the prefix <nl> + / / makes them unique ) <nl> + ExpectOK ( <nl> + R " EOF ( <nl> + node { name : ' input ' op : ' TestInput ' } <nl> + node { name : ' unmapped_input ' op : ' TestInput ' } <nl> + node { name : ' t1 ' op : ' TestMul ' input : [ ' input : 0 ' , ' input : 1 ' ] } <nl> + node { name : ' t2 ' op : ' TestMul ' input : [ ' t1 : 0 ' , ' t1 : 0 ' ] } <nl> + node { name : ' t3 ' op : ' TestMul ' input : [ ' unmapped_input : 0 ' , <nl> + ' unmapped_input : 1 ' ] } <nl> + ) EOF " , <nl> + opts , & refiner ) ; <nl> + <nl> + EXPECT_TRUE ( HasNode ( " input " ) ) ; <nl> + EXPECT_TRUE ( HasNode ( " unmapped_input " ) ) ; <nl> + EXPECT_TRUE ( HasNode ( " import / unmapped_input " ) ) ; <nl> + EXPECT_TRUE ( HasNode ( " import / t1 " ) ) ; <nl> + EXPECT_TRUE ( HasNode ( " import / t2 " ) ) ; <nl> + / / ` input ` node is imported even though it ' s outputs aren ' t used <nl> + EXPECT_TRUE ( HasNode ( " import / input " ) ) ; <nl> + <nl> + EXPECT_TRUE ( HasEdge ( " input " , 0 , " import / t1 " , 0 ) ) ; <nl> + EXPECT_TRUE ( HasEdge ( " input " , 0 , " import / t1 " , 1 ) ) ; <nl> + EXPECT_FALSE ( HasEdge ( " import / input " , 0 , " import / t1 " , 0 ) ) ; <nl> + EXPECT_FALSE ( HasEdge ( " import / input " , 0 , " import / t1 " , 1 ) ) ; <nl> + / / Test that t2 and t3 are unaffected <nl> + EXPECT_TRUE ( HasEdge ( " import / t1 " , 0 , " import / t2 " , 0 ) ) ; <nl> + EXPECT_TRUE ( HasEdge ( " import / unmapped_input " , 0 , " import / t3 " , 0 ) ) ; <nl> + EXPECT_TRUE ( HasEdge ( " import / unmapped_input " , 1 , " import / t3 " , 1 ) ) ; <nl> + <nl> + / / Check that NodeDefs are consistent with graph <nl> + Node * t1 = FindNode ( " import / t1 " ) ; <nl> + ASSERT_EQ ( t1 - > def ( ) . input_size ( ) , 2 ) ; <nl> + EXPECT_EQ ( t1 - > def ( ) . input ( 0 ) , " input : 0 " ) ; <nl> + EXPECT_EQ ( t1 - > def ( ) . input ( 1 ) , " input : 0 " ) ; <nl> + <nl> + Node * t2 = FindNode ( " import / t2 " ) ; <nl> + ASSERT_EQ ( t2 - > def ( ) . input_size ( ) , 2 ) ; <nl> + EXPECT_EQ ( t2 - > def ( ) . input ( 0 ) , " import / t1 : 0 " ) ; <nl> + EXPECT_EQ ( t2 - > def ( ) . input ( 1 ) , " import / t1 : 0 " ) ; <nl> + <nl> + Node * t3 = FindNode ( " import / t3 " ) ; <nl> + ASSERT_EQ ( t3 - > def ( ) . input_size ( ) , 2 ) ; <nl> + EXPECT_EQ ( t3 - > def ( ) . input ( 0 ) , " import / unmapped_input : 0 " ) ; <nl> + EXPECT_EQ ( t3 - > def ( ) . input ( 1 ) , " import / unmapped_input : 1 " ) ; <nl> + } <nl> + <nl> + TEST_F ( GraphConstructorTest , ImportGraphDef_InputMapWithControlEdges ) { <nl> + ShapeRefiner refiner ( graph_ . op_registry ( ) ) ; <nl> + <nl> + / / Populate graph with node we ' ll use in input map <nl> + ExpectOK ( " node { name : ' W1 ' op : ' TestParams ' } " , ImportGraphDefOptions ( ) , <nl> + & refiner ) ; <nl> + <nl> + / / Create input_map containing control edges and use it to import more nodes <nl> + ImportGraphDefOptions opts ; <nl> + opts . input_map [ TensorId ( " W2 " , - 1 ) ] = TensorId ( " W1 " , - 1 ) ; <nl> + opts . input_map [ TensorId ( " W3 " , - 1 ) ] = TensorId ( " W1 " , - 1 ) ; <nl> + ExpectOK ( <nl> + R " EOF ( <nl> + node { name : ' W2 ' op : ' TestParams ' } <nl> + node { name : ' W3 ' op : ' TestParams ' } <nl> + node { name : ' input ' op : ' TestInput ' input : [ ' ^ W2 ' ] } <nl> + node { name : ' t1 ' op : ' TestOneInputTwoOutputs ' input : [ ' W2 ' ] } <nl> + node { name : ' t2 ' op : ' TestOneInputTwoOutputs ' <nl> + input : [ ' input ' , ' ^ W2 ' , ' ^ W3 ' ] } <nl> + ) EOF " , <nl> + opts , & refiner ) ; <nl> + <nl> + EXPECT_TRUE ( HasNode ( " W1 " ) ) ; <nl> + EXPECT_TRUE ( HasNode ( " W2 " ) ) ; <nl> + EXPECT_TRUE ( HasNode ( " W3 " ) ) ; <nl> + EXPECT_TRUE ( HasNode ( " input " ) ) ; <nl> + EXPECT_TRUE ( HasNode ( " t1 " ) ) ; <nl> + EXPECT_TRUE ( HasNode ( " t2 " ) ) ; <nl> + <nl> + EXPECT_TRUE ( HasControlEdge ( " W1 " , " input " ) ) ; <nl> + EXPECT_FALSE ( HasControlEdge ( " W2 " , " input " ) ) ; <nl> + <nl> + / / Test that non - control edge is unaffected <nl> + EXPECT_TRUE ( HasEdge ( " W2 " , 0 , " t1 " , 0 ) ) ; <nl> + <nl> + EXPECT_TRUE ( HasControlEdge ( " W1 " , " t2 " ) ) ; <nl> + EXPECT_FALSE ( HasControlEdge ( " W2 " , " t2 " ) ) ; <nl> + EXPECT_TRUE ( HasEdge ( " input " , 0 , " t2 " , 0 ) ) ; <nl> + / / Test that t2 ' s control inputs have been merged to single W1 edge <nl> + Node * t2 = FindNode ( " t2 " ) ; <nl> + EXPECT_EQ ( t2 - > in_edges ( ) . size ( ) , 2 ) ; <nl> + <nl> + / / Test remapping a control edge from a node with the same name as an existing <nl> + / / node <nl> + opts . prefix = " import " ; <nl> + opts . input_map . clear ( ) ; <nl> + opts . input_map [ TensorId ( " W1 " , - 1 ) ] = TensorId ( " W1 " , - 1 ) ; <nl> + ExpectOK ( <nl> + R " EOF ( <nl> + node { name : ' W1 ' op : ' TestParams ' } <nl> + node { name : ' input ' op : ' TestInput ' input : [ ' ^ W1 ' ] } <nl> + node { name : ' t1 ' op : ' TestOneInputTwoOutputs ' input : [ ' W1 ' ] } <nl> + ) EOF " , <nl> + opts , & refiner ) ; <nl> + <nl> + EXPECT_TRUE ( HasNode ( " import / W1 " ) ) ; <nl> + EXPECT_TRUE ( HasNode ( " import / input " ) ) ; <nl> + EXPECT_TRUE ( HasNode ( " import / t1 " ) ) ; <nl> + <nl> + EXPECT_TRUE ( HasControlEdge ( " W1 " , " import / input " ) ) ; <nl> + EXPECT_FALSE ( HasControlEdge ( " import / W1 " , " import / input " ) ) ; <nl> + EXPECT_TRUE ( HasEdge ( " import / W1 " , 0 , " import / t1 " , 0 ) ) ; <nl> + } <nl> + <nl> + TEST_F ( GraphConstructorTest , ImportGraphDef_InputMapWithBadControlEdge ) { <nl> + ShapeRefiner refiner ( graph_ . op_registry ( ) ) ; <nl> + <nl> + / / Populate graph with node we ' ll use in input map <nl> + ExpectOK ( " node { name : ' W1 ' op : ' TestParams ' } " , ImportGraphDefOptions ( ) , <nl> + & refiner ) ; <nl> + <nl> + / / Create input_map with bad control edge mapping <nl> + ImportGraphDefOptions opts ; <nl> + opts . input_map [ TensorId ( " W2 " , - 1 ) ] = TensorId ( " W1 " , 0 ) ; <nl> + ExpectError ( <nl> + R " EOF ( <nl> + node { name : ' W2 ' op : ' TestParams ' } <nl> + node { name : ' input ' op : ' TestInput ' input : [ ' ^ W2 ' ] } <nl> + ) EOF " , <nl> + opts , <nl> + { " input_map entry ^ W2 - > W1 : 0 between control edge and non - control edge " } , <nl> + & refiner ) ; <nl> + <nl> + opts . input_map . clear ( ) ; <nl> + / / " W2 : 0 " isn ' t used in the imported graph but still causes an error <nl> + opts . input_map [ TensorId ( " W2 " , 0 ) ] = TensorId ( " W1 " , - 1 ) ; <nl> + ExpectError ( <nl> + R " EOF ( <nl> + node { name : ' W2 ' op : ' TestParams ' } <nl> + node { name : ' input ' op : ' TestInput ' input : [ ' ^ W2 ' ] } <nl> + ) EOF " , <nl> + opts , <nl> + { " input_map entry W2 : 0 - > ^ W1 between control edge and non - control edge " } , <nl> + & refiner ) ; <nl> + } <nl> + <nl> + TEST_F ( GraphConstructorTest , ImportGraphDef_InputMapWithInvalidNodeIndex ) { <nl> + ShapeRefiner refiner ( graph_ . op_registry ( ) ) ; <nl> + <nl> + / / Populate graph with node we ' ll use in input map <nl> + ExpectOK ( " node { name : ' input1 ' op : ' TestInput ' } " , ImportGraphDefOptions ( ) , <nl> + & refiner ) ; <nl> + <nl> + / / Create input_map with invalid source node index <nl> + ImportGraphDefOptions opts ; <nl> + opts . input_map [ TensorId ( " input2 " , 0 ) ] = TensorId ( " input1 " , 3 ) ; <nl> + ExpectError ( <nl> + R " EOF ( <nl> + node { name : ' input2 ' op : ' TestInput ' } <nl> + node { name : ' t1 ' op : ' TestMul ' input : [ ' input2 : 0 ' , ' input2 : 1 ' ] } <nl> + ) EOF " , <nl> + opts , <nl> + { " Node ' t1 ' : Connecting to invalid output 3 of source node input1 which " <nl> + " has 2 outputs " } , <nl> + & refiner ) ; <nl> + } <nl> + <nl> + TEST_F ( GraphConstructorTest , ImportGraphDef_InputMapWithMissingEntries ) { <nl> + ShapeRefiner refiner ( graph_ . op_registry ( ) ) ; <nl> + <nl> + / / Populate graph with node we ' ll use in input map <nl> + ExpectOK ( " node { name : ' W1 ' op : ' TestParams ' } " , ImportGraphDefOptions ( ) , <nl> + & refiner ) ; <nl> + <nl> + / / Create input_map referencing node that doesn ' t exist in graph <nl> + ImportGraphDefOptions opts ; <nl> + opts . input_map [ TensorId ( " W2 " , - 1 ) ] = TensorId ( " DNE " , - 1 ) ; <nl> + ExpectError ( <nl> + R " EOF ( <nl> + node { name : ' W2 ' op : ' TestParams ' } <nl> + node { name : ' input ' op : ' TestInput ' input : [ ' ^ W2 ' ] } <nl> + ) EOF " , <nl> + opts , <nl> + { " node ' DNE ' in input_map does not exist in graph ( input_map entry : " <nl> + " ^ W2 - > ^ DNE ) " } , <nl> + & refiner ) ; <nl> + } <nl> + <nl> + TEST_F ( GraphConstructorTest , ImportGraphDef_InputMapDuplicateNodeNames ) { <nl> + ShapeRefiner refiner ( graph_ . op_registry ( ) ) ; <nl> + <nl> + / / Add two nodes with the same name to graph <nl> + Node * node ; <nl> + TF_CHECK_OK ( NodeBuilder ( " dup " , " Placeholder " ) <nl> + . Attr ( " dtype " , DT_FLOAT ) <nl> + . Finalize ( & graph_ , & node ) ) ; <nl> + TF_CHECK_OK ( NodeBuilder ( " dup " , " Placeholder " ) <nl> + . Attr ( " dtype " , DT_FLOAT ) <nl> + . Finalize ( & graph_ , & node ) ) ; <nl> + <nl> + / / Create input_map referencing duplicate node <nl> + ImportGraphDefOptions opts ; <nl> + opts . input_map [ TensorId ( " new_input " , 0 ) ] = TensorId ( " dup " , 0 ) ; <nl> + ExpectError ( <nl> + R " EOF ( <nl> + node { name : ' new_input ' op : ' TestInput ' } <nl> + node { name : ' t1 ' op : ' TestMul ' input : [ ' new_input : 0 ' , ' new_input : 1 ' ] } <nl> + ) EOF " , <nl> + opts , <nl> + { " cannot resolve input_map because multiple nodes exist with name ' dup ' " } , <nl> + & refiner ) ; <nl> + } <nl> + <nl> TEST_F ( GraphConstructorTest , ImportGraphDef_WithCycle ) { <nl> / / Test graph produced in python using : <nl> / * <nl>
Add input_map parameter to ImportGraphDefOptions .
tensorflow/tensorflow
a00f53f831047feeb9a5086542116eba1b9f590d
2017-01-09T18:44:55Z
mmm a / tools / editor / plugins / script_editor_plugin . cpp <nl> ppp b / tools / editor / plugins / script_editor_plugin . cpp <nl> void ScriptEditor : : _menu_option ( int p_option ) { <nl> } <nl> } <nl> <nl> - EditorHelp * help = tab_container - > get_child ( selected ) - > cast_to < EditorHelp > ( ) ; <nl> + EditorHelp * help = tab_container - > get_current_tab_control ( ) - > cast_to < EditorHelp > ( ) ; <nl> if ( help ) { <nl> <nl> switch ( p_option ) { <nl>
Merge pull request from volzhs / out - of - size
godotengine/godot
1fde6f25cd34a5264d7a4114b546389fafc21b27
2016-10-30T21:46:23Z
mmm a / tensorflow / core / common_runtime / executor . cc <nl> ppp b / tensorflow / core / common_runtime / executor . cc <nl> struct NodeItem { <nl> / / The kernel for this node . <nl> OpKernel * kernel = nullptr ; <nl> <nl> - bool kernel_is_expensive = false ; / / True iff kernel - > IsExpensive ( ) <nl> - bool kernel_is_async = false ; / / True iff kernel - > AsAsync ( ) ! = nullptr <nl> - bool is_merge = false ; / / True iff IsMerge ( node ) <nl> + bool kernel_is_expensive : 1 ; / / True iff kernel - > IsExpensive ( ) <nl> + bool kernel_is_async : 1 ; / / True iff kernel - > AsAsync ( ) ! = nullptr <nl> + bool is_merge : 1 ; / / True iff IsMerge ( node ) <nl> + bool is_enter : 1 ; / / True iff IsEnter ( node ) <nl> + bool is_exit : 1 ; / / True iff IsExit ( node ) <nl> + bool is_control_trigger : 1 ; / / True iff IsControlTrigger ( node ) <nl> + bool is_sink : 1 ; / / True iff IsSink ( node ) <nl> + / / True iff IsEnter ( node ) | | IsExit ( node ) | | IsNextIteration ( node ) <nl> + bool is_enter_exit_or_next_iter : 1 ; <nl> <nl> / / Cached values of node - > num_inputs ( ) and node - > num_outputs ( ) , to <nl> / / avoid levels of indirection . <nl> Status ExecutorImpl : : Initialize ( ) { <nl> item - > kernel_is_expensive = item - > kernel - > IsExpensive ( ) ; <nl> item - > kernel_is_async = ( item - > kernel - > AsAsync ( ) ! = nullptr ) ; <nl> item - > is_merge = IsMerge ( n ) ; <nl> + item - > is_enter = IsEnter ( n ) ; <nl> + item - > is_exit = IsExit ( n ) ; <nl> + item - > is_control_trigger = IsControlTrigger ( n ) ; <nl> + item - > is_sink = IsSink ( n ) ; <nl> + item - > is_enter_exit_or_next_iter = <nl> + ( IsEnter ( n ) | | IsExit ( n ) | | IsNextIteration ( n ) ) ; <nl> <nl> / / Initialize static information about the frames in the graph . <nl> frame_local_ids_ [ id ] = frame_count [ frame_name ] + + ; <nl> void ExecutorState : : PropagateOutputs ( const TaggedNode & tagged_node , <nl> FrameState * output_frame = input_frame ; <nl> int64 output_iter = input_iter ; <nl> <nl> - if ( IsEnter ( node ) ) { <nl> + if ( ! item - > is_enter_exit_or_next_iter ) { <nl> + / / Fast path for nodes types that don ' t need special handling <nl> + DCHECK_EQ ( input_frame , output_frame ) ; <nl> + / / Normal path for most nodes <nl> + mutex_lock l ( input_frame - > mu ) ; <nl> + output_frame - > ActivateNodes ( item , is_dead , output_iter , outputs , ready ) ; <nl> + is_frame_done = input_frame - > DecrementOutstandingOpsLocked ( <nl> + & impl_ - > gview_ , input_iter , ready ) ; <nl> + } else if ( item - > is_enter ) { <nl> bool is_constant ; <nl> Status s = GetNodeAttr ( node - > def ( ) , " is_constant " , & is_constant ) ; <nl> DCHECK ( s . ok ( ) ) < < s ; <nl> void ExecutorState : : PropagateOutputs ( const TaggedNode & tagged_node , <nl> } <nl> is_frame_done = <nl> input_frame - > DecrementOutstandingOps ( & impl_ - > gview_ , input_iter , ready ) ; <nl> - } else if ( IsExit ( node ) ) { <nl> + } else if ( item - > is_exit ) { <nl> if ( is_dead ) { <nl> mutex_lock l ( input_frame - > mu ) ; <nl> / / Stop and remember this node if it is a dead exit . <nl> void ExecutorState : : PropagateOutputs ( const TaggedNode & tagged_node , <nl> is_frame_done = input_frame - > DecrementOutstandingOps ( & impl_ - > gview_ , <nl> input_iter , ready ) ; <nl> } <nl> - } else if ( IsNextIteration ( node ) ) { <nl> + } else { <nl> + DCHECK ( IsNextIteration ( node ) ) ; <nl> mutex_lock l ( input_frame - > mu ) ; <nl> if ( is_dead ) { <nl> / / Stop the deadness propagation . <nl> void ExecutorState : : PropagateOutputs ( const TaggedNode & tagged_node , <nl> } <nl> is_frame_done = input_frame - > DecrementOutstandingOpsLocked ( <nl> & impl_ - > gview_ , input_iter , ready ) ; <nl> - } else { <nl> - / / Normal path for most nodes <nl> - mutex_lock l ( input_frame - > mu ) ; <nl> - output_frame - > ActivateNodes ( item , is_dead , output_iter , outputs , ready ) ; <nl> - is_frame_done = input_frame - > DecrementOutstandingOpsLocked ( <nl> - & impl_ - > gview_ , input_iter , ready ) ; <nl> } <nl> <nl> / / At this point , this node is completely done . We also know if the <nl> void ExecutorState : : FrameState : : ActivateNodes ( const NodeItem * item , <nl> for ( int out_index = 0 ; out_index < num_output_edges ; out_index + + ) { <nl> const EdgeInfo & e = edges [ out_index ] ; <nl> const int dst_id = e . dst_id ; <nl> - const Node * dst_node = gview . node ( dst_id ) - > node ; <nl> + const NodeItem * dst_item = gview . node ( dst_id ) ; <nl> const int dst_pending_id = pending_ids [ dst_id ] ; <nl> const int src_slot = e . output_slot ; <nl> <nl> / / TODO ( yuanbyu ) : We don ' t need this if we require the subgraph <nl> / / given to an executor not to contain a sink node . <nl> - if ( dst_node - > IsSink ( ) ) continue ; <nl> + if ( dst_item - > is_sink ) continue ; <nl> <nl> bool dst_dead = false ; <nl> bool dst_ready = false ; <nl> void ExecutorState : : FrameState : : ActivateNodes ( const NodeItem * item , <nl> / / analysis happy . <nl> const bool is_control_edge = ( src_slot = = Graph : : kControlSlot ) ; <nl> bool dst_need_input = ! is_control_edge ; <nl> - if ( IsMerge ( dst_node ) ) { <nl> + if ( dst_item - > is_merge ) { <nl> / / A merge node is ready if all control inputs have arrived and either <nl> / / a ) a live data input becomes available or b ) all data inputs are dead . <nl> / / For Merge , pending ' s LSB is set iff a live data input has arrived . <nl> void ExecutorState : : FrameState : : ActivateNodes ( const NodeItem * item , <nl> iter_state - > decrement_pending ( dst_pending_id , 2 ) ; <nl> int count = iter_state - > pending ( dst_pending_id ) ; <nl> int dead_cnt = iter_state - > dead_count ( dst_pending_id ) ; <nl> - dst_dead = ( dead_cnt = = dst_node - > num_inputs ( ) ) ; <nl> + dst_dead = ( dead_cnt = = dst_item - > num_inputs ) ; <nl> dst_ready = ( count = = 0 ) | | ( ( count = = 1 ) & & dst_dead ) ; <nl> } else { <nl> if ( outputs [ src_slot ] . has_value ) { <nl> void ExecutorState : : FrameState : : ActivateNodes ( const NodeItem * item , <nl> / / This is a dead data input . Note that dst_node is dead if node is <nl> / / a dead enter . We need this to handle properly a while loop on <nl> / / the untaken branch of a conditional . <nl> - / / TODO ( yuanbyu ) : This is a bit hacky , but a good solution for now . <nl> + / / TODO ( yuanbyu ) : This is a bit hacky , but a good solution for <nl> + / / now . <nl> iter_state - > increment_dead_count ( dst_pending_id ) ; <nl> const int dead_cnt = iter_state - > dead_count ( dst_pending_id ) ; <nl> - dst_dead = <nl> - ( dead_cnt = = dst_node - > num_inputs ( ) ) | | IsEnter ( item - > node ) ; <nl> + dst_dead = ( dead_cnt = = dst_item - > num_inputs ) | | item - > is_enter ; <nl> dst_ready = ( iter_state - > pending ( dst_pending_id ) = = 1 ) & & dst_dead ; <nl> dst_need_input = false ; <nl> } <nl> void ExecutorState : : FrameState : : ActivateNodes ( const NodeItem * item , <nl> } <nl> <nl> if ( dst_need_input ) { <nl> - const NodeItem & dst_item = * gview . node ( dst_id ) ; <nl> const int dst_slot = e . input_slot ; <nl> Entry * input_tensors = iter_state - > input_tensors ; <nl> - int dst_loc = dst_item . input_start + dst_slot ; <nl> + int dst_loc = dst_item - > input_start + dst_slot ; <nl> input_tensors [ dst_loc ] = outputs [ src_slot ] ; <nl> } <nl> <nl> / / Add dst to the ready queue if it ' s ready <nl> if ( dst_ready ) { <nl> - if ( IsControlTrigger ( dst_node ) ) dst_dead = false ; <nl> - ready - > push_back ( TaggedNode ( dst_node , this , iter , dst_dead ) ) ; <nl> + if ( dst_item - > is_control_trigger ) dst_dead = false ; <nl> + ready - > push_back ( TaggedNode ( dst_item - > node , this , iter , dst_dead ) ) ; <nl> iter_state - > outstanding_ops + + ; <nl> } <nl> } <nl> mmm a / tensorflow / core / graph / graph . h <nl> ppp b / tensorflow / core / graph / graph . h <nl> class Graph { <nl> <nl> / / Helper routines <nl> <nl> + inline bool IsSource ( const Node * node ) { return node - > IsSource ( ) ; } <nl> + inline bool IsSink ( const Node * node ) { return node - > IsSink ( ) ; } <nl> inline bool IsSwitch ( const Node * node ) { return node - > IsSwitch ( ) ; } <nl> inline bool IsMerge ( const Node * node ) { return node - > IsMerge ( ) ; } <nl> inline bool IsEnter ( const Node * node ) { return node - > IsEnter ( ) ; } <nl>
Improve memory system behavior of core graph execution module .
tensorflow/tensorflow
9f577121d2ece24d523a1cfedb495a39007b9ed5
2017-02-08T06:25:55Z
mmm a / RELEASE . md <nl> ppp b / RELEASE . md <nl> <nl> * Added support for saved model ' s session initializer through <nl> ` TFLiteConverter . from_saved_model ` . <nl> * Added dynamic range quantization support for the BatchMatMul op . <nl> + * Added DEPTH_TO_SPACE support in Post training quantization . <nl> * Add ` RFFT2D ` as builtin op . ( ` RFFT2D ` also supports ` RFFTD ` . ) Currently <nl> only supports float32 input . <nl> * TFLite Supports SingatureDef : <nl> mmm a / tensorflow / api_template . __init__ . py <nl> ppp b / tensorflow / api_template . __init__ . py <nl> def _running_from_pip_package ( ) : <nl> _plugin_dir = _os . path . join ( _s , ' tensorflow - plugins ' ) <nl> if _os . path . exists ( _plugin_dir ) : <nl> _ll . load_library ( _plugin_dir ) <nl> + # Load Pluggable Device Library <nl> + _ll . load_pluggable_device_library ( _plugin_dir ) <nl> <nl> # Add module aliases <nl> if hasattr ( _current_module , ' keras ' ) : <nl> mmm a / tensorflow / api_template_v1 . __init__ . py <nl> ppp b / tensorflow / api_template_v1 . __init__ . py <nl> def _running_from_pip_package ( ) : <nl> _plugin_dir = _os . path . join ( _s , ' tensorflow - plugins ' ) <nl> if _os . path . exists ( _plugin_dir ) : <nl> _ll . load_library ( _plugin_dir ) <nl> + # Load Pluggable Device Library <nl> + _ll . load_pluggable_device_library ( _plugin_dir ) <nl> <nl> # Delete modules that should be hidden from dir ( ) . <nl> # Don ' t fail if these modules are not available . <nl> mmm a / tensorflow / c / BUILD <nl> ppp b / tensorflow / c / BUILD <nl> tf_cc_test ( <nl> name = " c_api_experimental_test " , <nl> size = " medium " , <nl> srcs = [ " c_api_experimental_test . cc " ] , <nl> - data = [ " testdata / tf_record " ] , <nl> + data = [ <nl> + " testdata / tf_record " , <nl> + " / / tensorflow / c / experimental / stream_executor / test : test_pluggable_device . so " , <nl> + ] , <nl> linkopts = select ( { <nl> " / / tensorflow : macos " : [ " - headerpad_max_install_names " ] , <nl> " / / conditions : default " : [ ] , <nl> tf_cc_test ( <nl> " / / tensorflow / core : protos_all_cc " , <nl> " / / tensorflow / core : test " , <nl> " / / tensorflow / core : test_main " , <nl> + " / / tensorflow / core / platform : resource_loader " , <nl> " @ com_google_absl / / absl / types : optional " , <nl> ] , <nl> ) <nl> mmm a / tensorflow / c / c_api_experimental . cc <nl> ppp b / tensorflow / c / c_api_experimental . cc <nl> limitations under the License . <nl> # include " tensorflow / core / graph / node_builder . h " <nl> # include " tensorflow / core / platform / blocking_counter . h " <nl> # include " tensorflow / core / platform / casts . h " <nl> + # include " tensorflow / core / platform / env . h " <nl> # include " tensorflow / core / platform / init_main . h " <nl> + # include " tensorflow / core / platform / mutex . h " <nl> # include " tensorflow / core / platform / net . h " <nl> # include " tensorflow / core / platform / platform . h " <nl> # include " tensorflow / core / platform / strcat . h " <nl> void TF_DeleteShapeAndTypeListArray ( TF_ShapeAndTypeList * * shape_list_array , <nl> <nl> namespace tensorflow { <nl> Status TF_TensorToTensor ( const TF_Tensor * src , Tensor * dst ) ; <nl> + <nl> + / / Helpers for loadding a TensorFlow PluggableDevice plugin ( a . so file ) . <nl> + Status LoadPluggableDeviceLibrary ( const char * library_filename , void * * result ) ; <nl> } / / namespace tensorflow <nl> <nl> void TFE_InferShapes ( TFE_Op * tfe_op , TF_ShapeAndTypeList * input_shapes , <nl> void TF_ImportGraphDefOptionsSetValidateColocationConstraints ( <nl> TF_ImportGraphDefOptions * opts , unsigned char enable ) { <nl> opts - > opts . validate_colocation_constraints = enable ; <nl> } <nl> + <nl> + / / Load a Pluggable Device library . <nl> + / / On success , returns the handle to library in result and return OK from the <nl> + / / function . Otherwise return nullptr in result and error Status from the <nl> + / / function . <nl> + / / <nl> + / / If ` library_filename ` has already been loaded , we return a cached handle . <nl> + / / Device and Kernels / Ops are registered as globals when a library is loaded <nl> + / / for the first time . <nl> + TF_Library * TF_LoadPluggableDeviceLibrary ( const char * library_filename , <nl> + TF_Status * status ) { <nl> + # if defined ( IS_MOBILE_PLATFORM ) | | defined ( IS_SLIM_BUILD ) <nl> + status - > status = tensorflow : : errors : : Unimplemented ( <nl> + " PluggableDevice plugin functionality is not supported on mobile " ) ; <nl> + return nullptr ; <nl> + # else <nl> + TF_Library * lib_handle = new TF_Library ; <nl> + static tensorflow : : mutex mu ( tensorflow : : LINKER_INITIALIZED ) ; <nl> + static std : : unordered_map < std : : string , void * > * loaded_libs = <nl> + new std : : unordered_map < std : : string , void * > ( ) ; <nl> + tensorflow : : Env * env = tensorflow : : Env : : Default ( ) ; <nl> + { <nl> + tensorflow : : mutex_lock lock ( mu ) ; <nl> + auto it = loaded_libs - > find ( library_filename ) ; <nl> + if ( it ! = loaded_libs - > end ( ) ) { <nl> + lib_handle - > lib_handle = it - > second ; <nl> + } else { <nl> + status - > status = <nl> + env - > LoadDynamicLibrary ( library_filename , & lib_handle - > lib_handle ) ; <nl> + if ( ! status - > status . ok ( ) ) { <nl> + delete lib_handle ; <nl> + return nullptr ; <nl> + } <nl> + } <nl> + return lib_handle ; <nl> + } <nl> + # endif <nl> + } <nl> + <nl> + void TF_DeletePluggableDeviceLibraryHandle ( TF_Library * lib_handle ) { <nl> + delete lib_handle ; <nl> + } <nl> mmm a / tensorflow / c / c_api_experimental . h <nl> ppp b / tensorflow / c / c_api_experimental . h <nl> TF_CAPI_EXPORT extern void <nl> TF_ImportGraphDefOptionsSetValidateColocationConstraints ( <nl> TF_ImportGraphDefOptions * opts , unsigned char enable ) ; <nl> <nl> + / / Load the library specified by library_filename and register the pluggable <nl> + / / device and related kernels present in that library . This function is not <nl> + / / supported on embedded on mobile and embedded platforms and will fail if <nl> + / / called . <nl> + / / <nl> + / / Pass " library_filename " to a platform - specific mechanism for dynamically <nl> + / / loading a library . The rules for determining the exact location of the <nl> + / / library are platform - specific and are not documented here . <nl> + / / <nl> + / / On success , returns the newly created library handle and places OK in status . <nl> + / / The caller owns the library handle . <nl> + / / <nl> + / / On failure , returns nullptr and places an error status in status . <nl> + TF_CAPI_EXPORT extern TF_Library * TF_LoadPluggableDeviceLibrary ( <nl> + const char * library_filename , TF_Status * status ) ; <nl> + <nl> + / / Frees the memory associated with the library handle . <nl> + / / Does NOT unload the library . <nl> + TF_CAPI_EXPORT extern void TF_DeletePluggableDeviceLibraryHandle ( <nl> + TF_Library * lib_handle ) ; <nl> + <nl> # ifdef __cplusplus <nl> } / * end extern " C " * / <nl> # endif <nl> mmm a / tensorflow / c / c_api_experimental_test . cc <nl> ppp b / tensorflow / c / c_api_experimental_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / lib / io / path . h " <nl> # include " tensorflow / core / platform / env . h " <nl> # include " tensorflow / core / platform / logging . h " <nl> + # include " tensorflow / core / platform / resource_loader . h " <nl> # include " tensorflow / core / platform / test . h " <nl> # include " tensorflow / core / protobuf / tensorflow_server . pb . h " <nl> <nl> TEST_F ( ShapeInferenceTest , InfersShapesFromInputTensors ) { <nl> TF_DeleteTensor ( tensor_1X6 ) ; <nl> } <nl> <nl> + TEST ( CAPI_EXPERIMENTAL , LibraryPluggableDeviceLoadFunctions ) { <nl> + # if ! defined ( TENSORFLOW_NO_SHARED_OBJECTS ) <nl> + / / Load the library . <nl> + TF_Status * status = TF_NewStatus ( ) ; <nl> + string lib_path = <nl> + tensorflow : : GetDataDependencyFilepath ( tensorflow : : io : : JoinPath ( <nl> + " tensorflow " , " c " , " experimental " , " stream_executor " , " test " , <nl> + " test_pluggable_device . so " ) ) ; <nl> + TF_Library * lib = TF_LoadPluggableDeviceLibrary ( lib_path . c_str ( ) , status ) ; <nl> + TF_Code code = TF_GetCode ( status ) ; <nl> + string status_msg ( TF_Message ( status ) ) ; <nl> + TF_DeleteStatus ( status ) ; <nl> + ASSERT_EQ ( TF_OK , code ) < < status_msg ; <nl> + TF_DeletePluggableDeviceLibraryHandle ( lib ) ; <nl> + # endif / / ! defined ( TENSORFLOW_NO_SHARED_OBJECTS ) <nl> + } <nl> + <nl> } / / namespace <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / c / c_api_function . cc <nl> ppp b / tensorflow / c / c_api_function . cc <nl> TF_Function * TF_GraphToFunctionWithControlOutputs ( <nl> TF_DeleteFunction ( tf_function ) ; <nl> return nullptr ; <nl> } <nl> - tf_function - > graph_with_debug_info = & fn_body - > graph ; <nl> + <nl> + for ( const Node * n : fn_body - > graph . nodes ( ) ) { <nl> + tf_function - > stack_traces [ n - > name ( ) ] = n - > GetStackTrace ( ) ; <nl> + } <nl> + <nl> return tf_function ; <nl> } <nl> <nl> mmm a / tensorflow / c / c_api_internal . h <nl> ppp b / tensorflow / c / c_api_internal . h <nl> struct TF_DeviceList { <nl> <nl> struct TF_Function { <nl> tensorflow : : FunctionDef fdef ; <nl> - <nl> - / / Graph with nodes with debug stack traces . <nl> - const tensorflow : : Graph * graph_with_debug_info = nullptr ; <nl> + tensorflow : : StackTracesMap stack_traces ; <nl> } ; <nl> <nl> struct TF_ApiDefMap { <nl> mmm a / tensorflow / c / eager / c_api . cc <nl> ppp b / tensorflow / c / eager / c_api . cc <nl> void TFE_ContextAddFunctionDef ( TFE_Context * ctx , <nl> <nl> void TFE_ContextAddFunction ( TFE_Context * ctx , TF_Function * function , <nl> TF_Status * status ) { <nl> - status - > status = tensorflow : : unwrap ( ctx ) - > AddFunctionDefWithDebugInfo ( <nl> - function - > fdef , function - > graph_with_debug_info ) ; <nl> + status - > status = tensorflow : : unwrap ( ctx ) - > AddFunctionDefWithStackTraces ( <nl> + function - > fdef , function - > stack_traces ) ; <nl> } <nl> <nl> void TFE_ContextRemoveFunction ( TFE_Context * ctx , const char * name , <nl> mmm a / tensorflow / c / eager / immediate_execution_context . h <nl> ppp b / tensorflow / c / eager / immediate_execution_context . h <nl> class ImmediateExecutionContext : public AbstractContext { <nl> / / already exists . <nl> virtual Status AddFunctionDef ( const FunctionDef & fdef ) = 0 ; <nl> <nl> - / / Same as ` AddFunctionDef ` , and additionally saves a pointer to the Graph <nl> - / / which has nodes containing stack traces for the nodes in ` fdef ` . Assumes <nl> - / / ` graph ` is alive while the function is alive . <nl> - virtual Status AddFunctionDefWithDebugInfo ( const FunctionDef & fdef , <nl> - const Graph * graph ) = 0 ; <nl> + / / Same as ` AddFunctionDef ` , but additionally saves the ` stack_traces ` under <nl> + / / the key of the function definition name ( to be retrieved during function <nl> + / / instantiation ) . <nl> + virtual Status AddFunctionDefWithStackTraces ( <nl> + const FunctionDef & fdef , const StackTracesMap & stack_traces ) = 0 ; <nl> <nl> / / Find and return a added function by its name . <nl> virtual const FunctionDef * FindFunctionDef ( const string & name ) const = 0 ; <nl> new file mode 100644 <nl> index 0000000000000 . . ca8bdaf641d93 <nl> mmm / dev / null <nl> ppp b / tensorflow / c / experimental / stream_executor / test / BUILD <nl> <nl> + # Description : <nl> + # test for stream_executor <nl> + load ( <nl> + " / / tensorflow : tensorflow . bzl " , <nl> + " tf_cc_shared_object " , <nl> + ) <nl> + <nl> + package ( <nl> + licenses = [ " notice " ] , # Apache 2 . 0 <nl> + ) <nl> + <nl> + tf_cc_shared_object ( <nl> + name = " test_pluggable_device . so " , <nl> + srcs = [ " test_pluggable_device . cc " ] , <nl> + visibility = [ " / / tensorflow / c : __subpackages__ " ] , <nl> + deps = [ " / / tensorflow / c / experimental / stream_executor : stream_executor_hdrs " ] , <nl> + ) <nl> similarity index 61 % <nl> rename from tensorflow / compiler / xla / python / bfloat16 . h <nl> rename to tensorflow / c / experimental / stream_executor / test / test_pluggable_device . cc <nl> mmm a / tensorflow / compiler / xla / python / bfloat16 . h <nl> ppp b / tensorflow / c / experimental / stream_executor / test / test_pluggable_device . cc <nl> <nl> - / * Copyright 2019 The TensorFlow Authors . All Rights Reserved . <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> you may not use this file except in compliance with the License . <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # ifndef TENSORFLOW_COMPILER_XLA_PYTHON_BFLOAT16_H_ <nl> - # define TENSORFLOW_COMPILER_XLA_PYTHON_BFLOAT16_H_ <nl> + # include " tensorflow / c / experimental / stream_executor / stream_executor . h " <nl> <nl> - # include " pybind11 / pybind11 . h " <nl> - # include " tensorflow / compiler / xla / statusor . h " <nl> - <nl> - namespace xla { <nl> - <nl> - xla : : StatusOr < pybind11 : : object > Bfloat16Dtype ( ) ; <nl> - <nl> - } / / namespace xla <nl> - <nl> - # endif / / TENSORFLOW_COMPILER_XLA_PYTHON_BFLOAT16_H_ <nl> + void SE_InitPlugin ( SE_PlatformRegistrationParams * const params , <nl> + TF_Status * const status ) { <nl> + params - > platform - > struct_size = SP_PLATFORM_STRUCT_SIZE ; <nl> + params - > platform - > name = " GPU " ; <nl> + params - > platform - > type = " XGPU " ; <nl> + } <nl> mmm a / tensorflow / c / kernels . cc <nl> ppp b / tensorflow / c / kernels . cc <nl> limitations under the License . <nl> # include " tensorflow / stream_executor / stream . h " <nl> # endif / / ! defined ( IS_MOBILE_PLATFORM ) & & ! defined ( IS_SLIM_BUILD ) <nl> <nl> + using tensorflow : : errors : : InvalidArgument ; <nl> / / This file forms the basis of a stable ABI for third - party kernel <nl> / / implementations . It is crucial that changes to this file are made cautiously <nl> / / and with a focus on maintaining both source and binary compatibility . <nl> void AddTypeConstraint ( TF_KernelBuilder * kernel_builder , const char * attr_name , <nl> TF_SetStatus ( status , TF_OK , " " ) ; <nl> } <nl> # undef CASE <nl> + <nl> } / / namespace <nl> } / / namespace tensorflow <nl> <nl> + namespace { <nl> + const tensorflow : : AttrValue * GetAttrValue ( TF_OpKernelConstruction * ctx , <nl> + const char * attr_name , <nl> + TF_Status * status ) { <nl> + auto * cc_ctx = reinterpret_cast < : : tensorflow : : OpKernelConstruction * > ( ctx ) ; <nl> + const tensorflow : : AttrValue * attr = <nl> + : : tensorflow : : AttrSlice ( cc_ctx - > def ( ) ) . Find ( attr_name ) ; <nl> + if ( attr = = nullptr ) { <nl> + status - > status = InvalidArgument ( " Operation ' " , cc_ctx - > def ( ) . name ( ) , <nl> + " ' has no attr named ' " , attr_name , " ' . " ) ; <nl> + } <nl> + return attr ; <nl> + } <nl> + } / / namespace <nl> + <nl> void TF_KernelBuilder_TypeConstraint ( TF_KernelBuilder * kernel_builder , <nl> const char * attr_name , <nl> const TF_DataType type , <nl> void TF_OpKernelContext_Failure ( TF_OpKernelContext * ctx , TF_Status * status ) { <nl> cc_ctx - > CtxFailure ( s ) ; <nl> } <nl> <nl> - # define DEFINE_TF_GETATTR ( func , c_type , cc_type ) \ <nl> + void TF_OpKernelConstruction_GetAttrSize ( TF_OpKernelConstruction * ctx , <nl> + const char * attr_name , <nl> + int32_t * list_size , <nl> + int32_t * total_size , <nl> + TF_Status * status ) { <nl> + const tensorflow : : AttrValue * attr = GetAttrValue ( ctx , attr_name , status ) ; <nl> + if ( ! status - > status . ok ( ) ) { <nl> + * list_size = - 1 ; <nl> + * total_size = - 1 ; <nl> + return ; <nl> + } <nl> + switch ( attr - > value_case ( ) ) { <nl> + # define SINGLE_CASE ( kK , attr_type , size_expr ) \ <nl> + case tensorflow : : AttrValue : : kK : \ <nl> + * list_size = - 1 ; \ <nl> + * total_size = size_expr ; \ <nl> + break ; <nl> + <nl> + SINGLE_CASE ( kS , TF_ATTR_STRING , attr - > s ( ) . length ( ) ) ; <nl> + SINGLE_CASE ( kI , TF_ATTR_INT , - 1 ) ; <nl> + SINGLE_CASE ( kF , TF_ATTR_FLOAT , - 1 ) ; <nl> + SINGLE_CASE ( kB , TF_ATTR_BOOL , - 1 ) ; <nl> + SINGLE_CASE ( kType , TF_ATTR_TYPE , - 1 ) ; <nl> + SINGLE_CASE ( kShape , TF_ATTR_SHAPE , <nl> + attr - > shape ( ) . unknown_rank ( ) ? - 1 : attr - > shape ( ) . dim_size ( ) ) ; <nl> + SINGLE_CASE ( kTensor , TF_ATTR_TENSOR , - 1 ) ; <nl> + # undef SINGLE_CASE <nl> + <nl> + case tensorflow : : AttrValue : : kList : <nl> + * list_size = 0 ; <nl> + * total_size = - 1 ; <nl> + # define LIST_CASE ( field , attr_type , . . . ) \ <nl> + if ( attr - > list ( ) . field # # _size ( ) > 0 ) { \ <nl> + * list_size = attr - > list ( ) . field # # _size ( ) ; \ <nl> + __VA_ARGS__ ; \ <nl> + break ; \ <nl> + } <nl> + <nl> + LIST_CASE ( <nl> + s , TF_ATTR_STRING , * total_size = 0 ; <nl> + for ( int i = 0 ; i < attr - > list ( ) . s_size ( ) ; <nl> + + + i ) { * total_size + = attr - > list ( ) . s ( i ) . size ( ) ; } ) ; <nl> + LIST_CASE ( i , TF_ATTR_INT ) ; <nl> + LIST_CASE ( f , TF_ATTR_FLOAT ) ; <nl> + LIST_CASE ( b , TF_ATTR_BOOL ) ; <nl> + LIST_CASE ( type , TF_ATTR_TYPE ) ; <nl> + LIST_CASE ( <nl> + shape , TF_ATTR_SHAPE , * total_size = 0 ; <nl> + for ( int i = 0 ; i < attr - > list ( ) . shape_size ( ) ; + + i ) { <nl> + const auto & s = attr - > list ( ) . shape ( i ) ; <nl> + * total_size + = s . unknown_rank ( ) ? 0 : s . dim_size ( ) ; <nl> + } ) ; <nl> + LIST_CASE ( tensor , TF_ATTR_TENSOR ) ; <nl> + LIST_CASE ( tensor , TF_ATTR_FUNC ) ; <nl> + # undef LIST_CASE <nl> + break ; <nl> + <nl> + case tensorflow : : AttrValue : : kPlaceholder : <nl> + * list_size = - 1 ; <nl> + * total_size = - 1 ; <nl> + break ; <nl> + <nl> + case tensorflow : : AttrValue : : kFunc : <nl> + * list_size = - 1 ; <nl> + * total_size = - 1 ; <nl> + break ; <nl> + <nl> + case tensorflow : : AttrValue : : VALUE_NOT_SET : <nl> + status - > status = <nl> + InvalidArgument ( " Attribute ' " , attr_name , " ' has no value set " ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + # define DEFINE_TF_GETATTR ( func , c_type , cc_type , attr_type , list_field ) \ <nl> void TF_OpKernelConstruction_GetAttr # # func ( TF_OpKernelConstruction * ctx , \ <nl> const char * attr_name , \ <nl> c_type * val , TF_Status * status ) { \ <nl> void TF_OpKernelContext_Failure ( TF_OpKernelContext * ctx , TF_Status * status ) { <nl> if ( s . ok ( ) ) { \ <nl> * val = static_cast < c_type > ( v ) ; \ <nl> } \ <nl> + } \ <nl> + void TF_OpKernelConstruction_GetAttr # # func # # List ( \ <nl> + TF_OpKernelConstruction * ctx , const char * attr_name , c_type * vals , \ <nl> + int max_vals , TF_Status * status ) { \ <nl> + TF_SetStatus ( status , TF_OK , " " ) ; \ <nl> + const tensorflow : : AttrValue * attr = GetAttrValue ( ctx , attr_name , status ) ; \ <nl> + if ( ! status - > status . ok ( ) ) return ; \ <nl> + if ( attr - > value_case ( ) ! = tensorflow : : AttrValue : : kList ) { \ <nl> + status - > status = \ <nl> + InvalidArgument ( " Value for ' " , attr_name , " ' is not a list . " ) ; \ <nl> + return ; \ <nl> + } \ <nl> + status - > status = \ <nl> + tensorflow : : AttrValueHasType ( * attr , " list ( " attr_type " ) " ) ; \ <nl> + if ( ! status - > status . ok ( ) ) return ; \ <nl> + const auto len = std : : min ( max_vals , attr - > list ( ) . list_field # # _size ( ) ) ; \ <nl> + for ( int i = 0 ; i < len ; + + i ) { \ <nl> + vals [ i ] = static_cast < c_type > ( attr - > list ( ) . list_field ( i ) ) ; \ <nl> + } \ <nl> } <nl> <nl> - DEFINE_TF_GETATTR ( Type , TF_DataType , tensorflow : : DataType ) <nl> - DEFINE_TF_GETATTR ( Int32 , tensorflow : : int32 , int32_t ) <nl> + DEFINE_TF_GETATTR ( Type , TF_DataType , tensorflow : : DataType , " type " , type ) <nl> + DEFINE_TF_GETATTR ( Int32 , int32_t , tensorflow : : int32 , " int " , i ) <nl> + DEFINE_TF_GETATTR ( Int64 , int64_t , tensorflow : : int64 , " int " , i ) <nl> + DEFINE_TF_GETATTR ( Float , float , float , " float " , f ) <nl> + DEFINE_TF_GETATTR ( Bool , TF_Bool , bool , " bool " , b ) <nl> + <nl> + void TF_OpKernelConstruction_GetAttrString ( TF_OpKernelConstruction * ctx , <nl> + const char * attr_name , char * value , <nl> + size_t max_length , <nl> + TF_Status * status ) { <nl> + std : : string v ; <nl> + auto * cc_ctx = reinterpret_cast < : : tensorflow : : OpKernelConstruction * > ( ctx ) ; <nl> + : : tensorflow : : Status s = cc_ctx - > GetAttr ( attr_name , & v ) ; <nl> + : : tensorflow : : Set_TF_Status_from_Status ( status , s ) ; <nl> + <nl> + if ( ! status - > status . ok ( ) ) return ; <nl> + <nl> + if ( max_length < = 0 ) { <nl> + return ; <nl> + } <nl> + std : : memcpy ( value , v . data ( ) , std : : min < size_t > ( v . length ( ) , max_length ) ) ; <nl> + } <nl> + <nl> + void TF_OpKernelConstruction_GetAttrStringList ( TF_OpKernelConstruction * ctx , <nl> + const char * attr_name , <nl> + char * * values , size_t * lengths , <nl> + int max_values , void * storage , <nl> + size_t storage_size , <nl> + TF_Status * status ) { <nl> + std : : vector < std : : string > v ; <nl> + auto * cc_ctx = reinterpret_cast < : : tensorflow : : OpKernelConstruction * > ( ctx ) ; <nl> + : : tensorflow : : Status s = cc_ctx - > GetAttr ( attr_name , & v ) ; <nl> + : : tensorflow : : Set_TF_Status_from_Status ( status , s ) ; <nl> + <nl> + if ( ! status - > status . ok ( ) ) return ; <nl> + <nl> + const auto len = std : : min ( max_values , static_cast < int > ( v . size ( ) ) ) ; <nl> + char * p = static_cast < char * > ( storage ) ; <nl> + for ( int i = 0 ; i < len ; + + i ) { <nl> + const std : : string & s = v [ i ] ; <nl> + values [ i ] = p ; <nl> + lengths [ i ] = s . size ( ) ; <nl> + if ( ( p + s . size ( ) ) > ( static_cast < char * > ( storage ) + storage_size ) ) { <nl> + status - > status = InvalidArgument ( <nl> + " Not enough storage to hold the requested list of strings " ) ; <nl> + return ; <nl> + } <nl> + memcpy ( values [ i ] , s . data ( ) , s . size ( ) ) ; <nl> + p + = s . size ( ) ; <nl> + } <nl> + } <nl> + <nl> + bool TF_OpKernelConstruction_HasAttr ( TF_OpKernelConstruction * ctx , <nl> + const char * attr_name , TF_Status * status ) { <nl> + auto * cc_ctx = reinterpret_cast < : : tensorflow : : OpKernelConstruction * > ( ctx ) ; <nl> + return cc_ctx - > HasAttr ( attr_name ) ; <nl> + } <nl> <nl> TF_StringView TF_OpKernelConstruction_GetName ( TF_OpKernelConstruction * ctx ) { <nl> auto * cc_ctx = reinterpret_cast < tensorflow : : OpKernelConstruction * > ( ctx ) ; <nl> mmm a / tensorflow / c / kernels . h <nl> ppp b / tensorflow / c / kernels . h <nl> TF_CAPI_EXPORT extern TF_DataType TF_ExpectedOutputDataType ( <nl> / / Returns the step ID of the given context . <nl> TF_CAPI_EXPORT extern int64_t TF_StepId ( TF_OpKernelContext * ctx ) ; <nl> <nl> + / / Get the list_size and total_size of the attribute ` attr_name ` of ` oper ` . <nl> + / / list_size - the length of the list . <nl> + / / total_size - total size of the list . <nl> + / / ( 1 ) If attr_type = = TF_ATTR_STRING <nl> + / / then total_size is the cumulative byte size <nl> + / / of all the strings in the list . <nl> + / / ( 3 ) If attr_type = = TF_ATTR_SHAPE <nl> + / / then total_size is the number of dimensions <nl> + / / of the shape valued attribute , or - 1 <nl> + / / if its rank is unknown . <nl> + / / ( 4 ) If attr_type = = TF_ATTR_SHAPE <nl> + / / then total_size is the cumulative number <nl> + / / of dimensions of all shapes in the list . <nl> + / / ( 5 ) Otherwise , total_size is undefined . <nl> + TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrSize ( <nl> + TF_OpKernelConstruction * ctx , const char * attr_name , int32_t * list_size , <nl> + int32_t * total_size , TF_Status * status ) ; <nl> + <nl> / / Interprets the named kernel construction attribute as a TF_DataType and <nl> / / places it into * val . * status is set to TF_OK . <nl> / / <nl> TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt32 ( <nl> TF_OpKernelConstruction * ctx , const char * attr_name , int32_t * val , <nl> TF_Status * status ) ; <nl> <nl> + / / Interprets the named kernel construction attribute as int64_t and <nl> + / / places it into * val . * status is set to TF_OK . <nl> + / / <nl> + / / If the attribute could not be found or could not be interpreted as <nl> + / / int64 , * status is populated with an error . <nl> + TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt64 ( <nl> + TF_OpKernelConstruction * ctx , const char * attr_name , int64_t * val , <nl> + TF_Status * status ) ; <nl> + <nl> + / / Interprets the named kernel construction attribute as float and <nl> + / / places it into * val . * status is set to TF_OK . <nl> + / / <nl> + / / If the attribute could not be found or could not be interpreted as <nl> + / / float , * status is populated with an error . <nl> + TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrFloat ( <nl> + TF_OpKernelConstruction * ctx , const char * attr_name , float * val , <nl> + TF_Status * status ) ; <nl> + <nl> + / / Interprets the named kernel construction attribute as bool and <nl> + / / places it into * val . * status is set to TF_OK . <nl> + / / <nl> + / / If the attribute could not be found or could not be interpreted as <nl> + / / bool , * status is populated with an error . <nl> + TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrBool ( <nl> + TF_OpKernelConstruction * ctx , const char * attr_name , TF_Bool * val , <nl> + TF_Status * status ) ; <nl> + <nl> + / / Interprets the named kernel construction attribute as string and <nl> + / / places it into * val . ` val ` must <nl> + / / point to an array of length at least ` max_length ` ( ideally set to <nl> + / / total_size from TF_OpKernelConstruction_GetAttrSize ( ctx , <nl> + / / attr_name , list_size , total_size ) ) . * status is set to TF_OK . <nl> + / / <nl> + / / If the attribute could not be found or could not be interpreted as <nl> + / / string , * status is populated with an error . <nl> + TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrString ( <nl> + TF_OpKernelConstruction * ctx , const char * attr_name , char * val , <nl> + size_t max_length , TF_Status * status ) ; <nl> + <nl> + / / Interprets the named kernel construction attribute as a TF_DataType array and <nl> + / / places it into * vals . * status is set to TF_OK . <nl> + / / ` vals ` must point to an array of length at least ` max_values ` ( ideally set <nl> + / / to list_size from <nl> + / / TF_OpKernelConstruction_GetAttrSize ( ctx , attr_name , list_size , <nl> + / / total_size ) ) . <nl> + TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrTypeList ( <nl> + TF_OpKernelConstruction * ctx , const char * attr_name , TF_DataType * vals , <nl> + int max_vals , TF_Status * status ) ; <nl> + <nl> + / / Interprets the named kernel construction attribute as int32_t array and <nl> + / / places it into * vals . * status is set to TF_OK . <nl> + / / ` vals ` must point to an array of length at least ` max_values ` ( ideally set <nl> + / / to list_size from <nl> + / / TF_OpKernelConstruction_GetAttrSize ( ctx , attr_name , list_size , <nl> + / / total_size ) ) . <nl> + TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt32List ( <nl> + TF_OpKernelConstruction * ctx , const char * attr_name , int32_t * vals , <nl> + int max_vals , TF_Status * status ) ; <nl> + <nl> + / / Interprets the named kernel construction attribute as int64_t array and <nl> + / / places it into * vals . * status is set to TF_OK . <nl> + / / ` vals ` must point to an array of length at least ` max_values ` ( ideally set <nl> + / / to list_size from <nl> + / / TF_OpKernelConstruction_GetAttrSize ( ctx , attr_name , list_size , <nl> + / / total_size ) ) . <nl> + TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrInt64List ( <nl> + TF_OpKernelConstruction * ctx , const char * attr_name , int64_t * vals , <nl> + int max_vals , TF_Status * status ) ; <nl> + <nl> + / / Interprets the named kernel construction attribute as float array and <nl> + / / places it into * vals . * status is set to TF_OK . <nl> + / / ` vals ` must point to an array of length at least ` max_values ` ( ideally set <nl> + / / to list_size from <nl> + / / TF_OpKernelConstruction_GetAttrSize ( ctx , attr_name , list_size , <nl> + / / total_size ) ) . <nl> + TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrFloatList ( <nl> + TF_OpKernelConstruction * ctx , const char * attr_name , float * vals , <nl> + int max_vals , TF_Status * status ) ; <nl> + <nl> + / / Interprets the named kernel construction attribute as bool array and <nl> + / / places it into * vals . * status is set to TF_OK . <nl> + / / ` vals ` must point to an array of length at least ` max_values ` ( ideally set <nl> + / / to list_size from <nl> + / / TF_OpKernelConstruction_GetAttrSize ( ctx , attr_name , list_size , <nl> + / / total_size ) ) . <nl> + TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrBoolList ( <nl> + TF_OpKernelConstruction * ctx , const char * attr_name , TF_Bool * vals , <nl> + int max_vals , TF_Status * status ) ; <nl> + <nl> + / / Interprets the named kernel construction attribute as string array and fills <nl> + / / in ` vals ` and ` lengths ` , each of which must point to an array of length at <nl> + / / least ` max_values ` . * status is set to TF_OK . The elements of values will <nl> + / / point to addresses in ` storage ` which must be at least ` storage_size ` bytes <nl> + / / in length . Ideally , max_values would be set to list_size and ` storage ` would <nl> + / / be at least total_size , obtained from <nl> + / / TF_OpKernelConstruction_GetAttrSize ( ctx , attr_name , list_size , <nl> + / / total_size ) . <nl> + TF_CAPI_EXPORT extern void TF_OpKernelConstruction_GetAttrStringList ( <nl> + TF_OpKernelConstruction * ctx , const char * attr_name , char * * vals , <nl> + size_t * lengths , int max_values , void * storage , size_t storage_size , <nl> + TF_Status * status ) ; <nl> + <nl> + / / Return true if the kernel construction has the attr_name <nl> + TF_CAPI_EXPORT extern bool TF_OpKernelConstruction_HasAttr ( <nl> + TF_OpKernelConstruction * ctx , const char * attr_name , TF_Status * status ) ; <nl> + <nl> / / Returns the unique operation name for this OpKernel . <nl> TF_CAPI_EXPORT extern TF_StringView TF_OpKernelConstruction_GetName ( <nl> TF_OpKernelConstruction * ctx ) ; <nl> mmm a / tensorflow / c / kernels_test . cc <nl> ppp b / tensorflow / c / kernels_test . cc <nl> TEST ( TestKernel , TestRegisterKernelBuilder ) { <nl> ASSERT_TRUE ( delete_called ) ; <nl> } <nl> <nl> + / / REGISTER_OP for TF_OpKernelConstruction_GetAttr * test cases . <nl> + / / Registers two ops , each with a single attribute called ' Attr ' . <nl> + / / The attribute in one op will have a type ' type ' , the other <nl> + / / will have list ( type ) . <nl> + # define ATTR_TEST_REGISTER_OP ( name , type ) \ <nl> + REGISTER_OP ( " TestKernelAttr " # name ) \ <nl> + . Attr ( " Attr : " # type ) \ <nl> + . SetShapeFn ( tensorflow : : shape_inference : : UnknownShape ) ; \ <nl> + REGISTER_OP ( " TestKernelAttr " # name " List " ) \ <nl> + . Attr ( " Attr : list ( " # type " ) " ) \ <nl> + . SetShapeFn ( tensorflow : : shape_inference : : UnknownShape ) <nl> + ATTR_TEST_REGISTER_OP ( String , string ) ; <nl> + ATTR_TEST_REGISTER_OP ( Int , int ) ; <nl> + ATTR_TEST_REGISTER_OP ( Float , float ) ; <nl> + ATTR_TEST_REGISTER_OP ( Bool , bool ) ; <nl> + ATTR_TEST_REGISTER_OP ( Type , type ) ; <nl> + # undef ATTR_TEST_REGISTER_OP <nl> + <nl> + / / Helper macros for the TF_OpKernelConstruction_GetAttr * tests . <nl> + # define EXPECT_TF_SIZE ( attr_name , expected_list_size , expected_total_size ) \ <nl> + do { \ <nl> + int32_t list_size , total_size ; \ <nl> + TF_OpKernelConstruction_GetAttrSize ( ctx , attr_name , & list_size , \ <nl> + & total_size , status ) ; \ <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; \ <nl> + EXPECT_EQ ( expected_list_size , list_size ) ; \ <nl> + EXPECT_EQ ( expected_total_size , total_size ) ; \ <nl> + } while ( 0 ) <nl> + <nl> + typedef void * ( * MyCreateFuncWithAttr ) ( TF_OpKernelConstruction * ) ; <nl> + class TestKernelAttr : public : : testing : : Test { <nl> + public : <nl> + TestKernelAttr ( ) { } <nl> + ~ TestKernelAttr ( ) { } <nl> + <nl> + std : : unique_ptr < OpKernel > GetFakeKernelWithAttr ( const char * op_name , <nl> + AttrValue v , Status * status ) { <nl> + NodeDef def ; <nl> + def . set_op ( op_name ) ; <nl> + def . set_name ( " FakeNode " ) ; <nl> + def . set_device ( " FakeDevice " ) ; <nl> + ( * def . mutable_attr ( ) ) [ " Attr " ] = v ; <nl> + return CreateOpKernel ( DeviceType ( " FakeDevice " ) , nullptr , nullptr , def , 1 , <nl> + status ) ; <nl> + } <nl> + <nl> + void SetAttr ( MyCreateFuncWithAttr MyCreateFuncAttr , const char * op_name , <nl> + AttrValue & v ) { <nl> + TF_KernelBuilder * builder = TF_NewKernelBuilder ( <nl> + op_name , " FakeDevice " , MyCreateFuncAttr , & MyComputeFunc , & MyDeleteFunc ) ; <nl> + { <nl> + TF_Status * status = TF_NewStatus ( ) ; <nl> + TF_RegisterKernelBuilder ( " FakeNode " , builder , status ) ; <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) ; <nl> + TF_DeleteStatus ( status ) ; <nl> + } <nl> + Status status ; <nl> + std : : unique_ptr < OpKernel > kernel = <nl> + GetFakeKernelWithAttr ( op_name , v , & status ) ; <nl> + TF_EXPECT_OK ( status ) ; <nl> + ASSERT_NE ( nullptr , kernel . get ( ) ) ; <nl> + kernel - > Compute ( nullptr ) ; <nl> + <nl> + ASSERT_TRUE ( delete_called ) ; <nl> + } <nl> + } ; <nl> + <nl> + TEST_F ( TestKernelAttr , String ) { <nl> + auto my_create_func = [ ] ( TF_OpKernelConstruction * ctx ) { <nl> + struct MyCustomKernel * s = new struct MyCustomKernel ; <nl> + s - > created = true ; <nl> + s - > compute_called = false ; <nl> + <nl> + std : : unique_ptr < char [ ] > val ( new char [ 5 ] ) ; <nl> + TF_Status * status = TF_NewStatus ( ) ; <nl> + EXPECT_TF_SIZE ( / * attr_name * / " Attr " , / * expected_list_size * / - 1 , <nl> + / * expected_total_size * / 5 ) ; <nl> + TF_OpKernelConstruction_GetAttrString ( ctx , " Attr " , val . get ( ) , <nl> + / * max_length * / 5 , status ) ; <nl> + <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + EXPECT_EQ ( " bunny " , string ( static_cast < const char * > ( val . get ( ) ) , 5 ) ) ; <nl> + TF_DeleteStatus ( status ) ; <nl> + return static_cast < void * > ( s ) ; <nl> + } ; <nl> + <nl> + AttrValue v ; <nl> + v . set_s ( " bunny " ) ; <nl> + SetAttr ( my_create_func , " TestKernelAttrString " , v ) ; <nl> + } <nl> + <nl> + TEST_F ( TestKernelAttr , StringList ) { <nl> + auto my_create_func = [ ] ( TF_OpKernelConstruction * ctx ) { <nl> + struct MyCustomKernel * s = new struct MyCustomKernel ; <nl> + s - > created = true ; <nl> + s - > compute_called = false ; <nl> + <nl> + std : : vector < string > list = { " bugs " , " bunny " , " duck " } ; <nl> + int list_total_size = 0 ; <nl> + for ( const auto & s : list ) { <nl> + list_total_size + = s . size ( ) ; <nl> + } <nl> + <nl> + TF_Status * status = TF_NewStatus ( ) ; <nl> + std : : unique_ptr < char * [ ] > values ( new char * [ list . size ( ) ] ) ; <nl> + std : : unique_ptr < size_t [ ] > lens ( new size_t [ list . size ( ) ] ) ; <nl> + std : : unique_ptr < char [ ] > storage ( new char [ list_total_size ] ) ; <nl> + EXPECT_TF_SIZE ( / * attr_name * / " Attr " , / * expected_list_size * / list . size ( ) , <nl> + / * expected_total_size * / list_total_size ) ; <nl> + TF_OpKernelConstruction_GetAttrStringList ( <nl> + ctx , " Attr " , values . get ( ) , lens . get ( ) , list . size ( ) , storage . get ( ) , <nl> + list_total_size , status ) ; <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + <nl> + for ( size_t i = 0 ; i < list . size ( ) ; + + i ) { <nl> + EXPECT_EQ ( list [ i ] . size ( ) , lens [ i ] ) < < i ; <nl> + EXPECT_EQ ( list [ i ] , string ( static_cast < const char * > ( values [ i ] ) , lens [ i ] ) ) <nl> + < < i ; <nl> + } <nl> + TF_DeleteStatus ( status ) ; <nl> + return static_cast < void * > ( s ) ; <nl> + } ; <nl> + <nl> + AttrValue v ; <nl> + auto attr_in = gtl : : ArraySlice < StringPiece > ( { " bugs " , " bunny " , " duck " } ) ; <nl> + SetAttrValue ( attr_in , & v ) ; <nl> + SetAttr ( my_create_func , " TestKernelAttrStringList " , v ) ; <nl> + } <nl> + <nl> + TEST_F ( TestKernelAttr , Int ) { <nl> + auto my_create_func = [ ] ( TF_OpKernelConstruction * ctx ) { <nl> + struct MyCustomKernel * s = new struct MyCustomKernel ; <nl> + s - > created = true ; <nl> + s - > compute_called = false ; <nl> + <nl> + int64_t val ; <nl> + TF_Status * status = TF_NewStatus ( ) ; <nl> + EXPECT_TF_SIZE ( / * attr_name * / " Attr " , / * expected_list_size * / - 1 , <nl> + / * expected_total_size * / - 1 ) ; <nl> + TF_OpKernelConstruction_GetAttrInt64 ( ctx , " Attr " , & val , status ) ; <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + EXPECT_EQ ( 1234 , val ) ; <nl> + TF_DeleteStatus ( status ) ; <nl> + return static_cast < void * > ( s ) ; <nl> + } ; <nl> + <nl> + AttrValue v ; <nl> + v . set_i ( 1234 ) ; <nl> + SetAttr ( my_create_func , " TestKernelAttrInt " , v ) ; <nl> + } <nl> + <nl> + TEST_F ( TestKernelAttr , IntList ) { <nl> + auto my_create_func = [ ] ( TF_OpKernelConstruction * ctx ) { <nl> + struct MyCustomKernel * s = new struct MyCustomKernel ; <nl> + s - > created = true ; <nl> + s - > compute_called = false ; <nl> + <nl> + const int64_t list [ ] = { 1 , 2 , 3 , 4 } ; <nl> + const size_t list_size = TF_ARRAYSIZE ( list ) ; <nl> + int64_t values [ list_size ] ; <nl> + <nl> + TF_Status * status = TF_NewStatus ( ) ; <nl> + EXPECT_TF_SIZE ( / * attr_name * / " Attr " , / * expected_list_size * / list_size , <nl> + / * expected_total_size * / - 1 ) ; <nl> + TF_OpKernelConstruction_GetAttrInt64List ( ctx , " Attr " , values , list_size , <nl> + status ) ; <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + EXPECT_TRUE ( <nl> + std : : equal ( std : : begin ( list ) , std : : end ( list ) , std : : begin ( values ) ) ) ; <nl> + TF_DeleteStatus ( status ) ; <nl> + return static_cast < void * > ( s ) ; <nl> + } ; <nl> + <nl> + AttrValue v ; <nl> + auto attr_in = gtl : : ArraySlice < int64 > ( { 1 , 2 , 3 , 4 } ) ; <nl> + SetAttrValue ( attr_in , & v ) ; <nl> + SetAttr ( my_create_func , " TestKernelAttrIntList " , v ) ; <nl> + } <nl> + <nl> + TEST_F ( TestKernelAttr , Float ) { <nl> + auto my_create_func = [ ] ( TF_OpKernelConstruction * ctx ) { <nl> + struct MyCustomKernel * s = new struct MyCustomKernel ; <nl> + s - > created = true ; <nl> + s - > compute_called = false ; <nl> + <nl> + float val ; <nl> + TF_Status * status = TF_NewStatus ( ) ; <nl> + EXPECT_TF_SIZE ( / * attr_name * / " Attr " , / * expected_list_size * / - 1 , <nl> + / * expected_total_size * / - 1 ) ; <nl> + TF_OpKernelConstruction_GetAttrFloat ( ctx , " Attr " , & val , status ) ; <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + EXPECT_FLOAT_EQ ( 2 . 718 , val ) ; <nl> + TF_DeleteStatus ( status ) ; <nl> + return static_cast < void * > ( s ) ; <nl> + } ; <nl> + <nl> + AttrValue v ; <nl> + v . set_f ( 2 . 718 ) ; <nl> + SetAttr ( my_create_func , " TestKernelAttrFloat " , v ) ; <nl> + } <nl> + <nl> + TEST_F ( TestKernelAttr , FloatList ) { <nl> + auto my_create_func = [ ] ( TF_OpKernelConstruction * ctx ) { <nl> + struct MyCustomKernel * s = new struct MyCustomKernel ; <nl> + s - > created = true ; <nl> + s - > compute_called = false ; <nl> + <nl> + const float list [ ] = { 1 . 414 , 2 . 718 , 3 . 1415 } ; <nl> + const size_t list_size = TF_ARRAYSIZE ( list ) ; <nl> + float values [ list_size ] ; <nl> + <nl> + TF_Status * status = TF_NewStatus ( ) ; <nl> + EXPECT_TF_SIZE ( / * attr_name * / " Attr " , / * expected_list_size * / list_size , <nl> + / * expected_total_size * / - 1 ) ; <nl> + TF_OpKernelConstruction_GetAttrFloatList ( ctx , " Attr " , values , list_size , <nl> + status ) ; <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + EXPECT_TRUE ( <nl> + std : : equal ( std : : begin ( list ) , std : : end ( list ) , std : : begin ( values ) ) ) ; <nl> + TF_DeleteStatus ( status ) ; <nl> + return static_cast < void * > ( s ) ; <nl> + } ; <nl> + <nl> + AttrValue v ; <nl> + auto attr_in = gtl : : ArraySlice < float > ( { 1 . 414 , 2 . 718 , 3 . 1415 } ) ; <nl> + SetAttrValue ( attr_in , & v ) ; <nl> + SetAttr ( my_create_func , " TestKernelAttrFloatList " , v ) ; <nl> + } <nl> + <nl> + TEST_F ( TestKernelAttr , Bool ) { <nl> + auto my_create_func = [ ] ( TF_OpKernelConstruction * ctx ) { <nl> + struct MyCustomKernel * s = new struct MyCustomKernel ; <nl> + s - > created = true ; <nl> + s - > compute_called = false ; <nl> + <nl> + unsigned char val ; <nl> + TF_Status * status = TF_NewStatus ( ) ; <nl> + EXPECT_TF_SIZE ( / * attr_name * / " Attr " , / * expected_list_size * / - 1 , <nl> + / * expected_total_size * / - 1 ) ; <nl> + TF_OpKernelConstruction_GetAttrBool ( ctx , " Attr " , & val , status ) ; <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + EXPECT_EQ ( 1 , val ) ; <nl> + TF_DeleteStatus ( status ) ; <nl> + return static_cast < void * > ( s ) ; <nl> + } ; <nl> + <nl> + AttrValue v ; <nl> + v . set_b ( 1 ) ; <nl> + SetAttr ( my_create_func , " TestKernelAttrBool " , v ) ; <nl> + } <nl> + <nl> + TEST_F ( TestKernelAttr , BoolList ) { <nl> + auto my_create_func = [ ] ( TF_OpKernelConstruction * ctx ) { <nl> + struct MyCustomKernel * s = new struct MyCustomKernel ; <nl> + s - > created = true ; <nl> + s - > compute_called = false ; <nl> + <nl> + const unsigned char list [ ] = { 1 , 0 , 1 , 0 } ; <nl> + const size_t list_size = TF_ARRAYSIZE ( list ) ; <nl> + unsigned char values [ list_size ] ; <nl> + <nl> + TF_Status * status = TF_NewStatus ( ) ; <nl> + EXPECT_TF_SIZE ( / * attr_name * / " Attr " , / * expected_list_size * / list_size , <nl> + / * expected_total_size * / - 1 ) ; <nl> + TF_OpKernelConstruction_GetAttrBoolList ( ctx , " Attr " , values , list_size , <nl> + status ) ; <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + EXPECT_TRUE ( <nl> + std : : equal ( std : : begin ( list ) , std : : end ( list ) , std : : begin ( values ) ) ) ; <nl> + TF_DeleteStatus ( status ) ; <nl> + return static_cast < void * > ( s ) ; <nl> + } ; <nl> + <nl> + AttrValue v ; <nl> + auto attr_in = gtl : : ArraySlice < bool > ( { 1 , 0 , 1 , 0 } ) ; <nl> + SetAttrValue ( attr_in , & v ) ; <nl> + SetAttr ( my_create_func , " TestKernelAttrBoolList " , v ) ; <nl> + } <nl> + <nl> + TEST_F ( TestKernelAttr , Type ) { <nl> + auto my_create_func = [ ] ( TF_OpKernelConstruction * ctx ) { <nl> + struct MyCustomKernel * s = new struct MyCustomKernel ; <nl> + s - > created = true ; <nl> + s - > compute_called = false ; <nl> + <nl> + TF_DataType val ; <nl> + TF_Status * status = TF_NewStatus ( ) ; <nl> + EXPECT_TF_SIZE ( / * attr_name * / " Attr " , / * expected_list_size * / - 1 , <nl> + / * expected_total_size * / - 1 ) ; <nl> + TF_OpKernelConstruction_GetAttrType ( ctx , " Attr " , & val , status ) ; <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + EXPECT_EQ ( TF_FLOAT , val ) ; <nl> + TF_DeleteStatus ( status ) ; <nl> + return static_cast < void * > ( s ) ; <nl> + } ; <nl> + <nl> + AttrValue v ; <nl> + v . set_type ( DT_FLOAT ) ; <nl> + SetAttr ( my_create_func , " TestKernelAttrType " , v ) ; <nl> + } <nl> + <nl> + TEST_F ( TestKernelAttr , TypeList ) { <nl> + auto my_create_func = [ ] ( TF_OpKernelConstruction * ctx ) { <nl> + struct MyCustomKernel * s = new struct MyCustomKernel ; <nl> + s - > created = true ; <nl> + s - > compute_called = false ; <nl> + <nl> + const TF_DataType list [ ] = { TF_FLOAT , TF_DOUBLE , TF_HALF , TF_COMPLEX128 } ; <nl> + const size_t list_size = TF_ARRAYSIZE ( list ) ; <nl> + TF_DataType values [ list_size ] ; <nl> + <nl> + TF_Status * status = TF_NewStatus ( ) ; <nl> + EXPECT_TF_SIZE ( / * attr_name * / " Attr " , / * expected_list_size * / list_size , <nl> + / * expected_total_size * / - 1 ) ; <nl> + TF_OpKernelConstruction_GetAttrTypeList ( ctx , " Attr " , values , list_size , <nl> + status ) ; <nl> + EXPECT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + EXPECT_TRUE ( <nl> + std : : equal ( std : : begin ( list ) , std : : end ( list ) , std : : begin ( values ) ) ) ; <nl> + TF_DeleteStatus ( status ) ; <nl> + return static_cast < void * > ( s ) ; <nl> + } ; <nl> + <nl> + AttrValue v ; <nl> + auto attr_in = <nl> + gtl : : ArraySlice < DataType > ( { DT_FLOAT , DT_DOUBLE , DT_HALF , DT_COMPLEX128 } ) ; <nl> + SetAttrValue ( attr_in , & v ) ; <nl> + SetAttr ( my_create_func , " TestKernelAttrTypeList " , v ) ; <nl> + } <nl> + # undef EXPECT_TF_SIZE <nl> + <nl> class DummyDevice : public DeviceBase { <nl> public : <nl> explicit DummyDevice ( Env * env ) : DeviceBase ( env ) { } <nl> mmm a / tensorflow / compiler / mlir / hlo / include / mlir - hlo / Dialect / mhlo / transforms / map_hlo_to_lhlo_op . h <nl> ppp b / tensorflow / compiler / mlir / hlo / include / mlir - hlo / Dialect / mhlo / transforms / map_hlo_to_lhlo_op . h <nl> MAP_HLO_TO_LHLO ( MinOp ) ; <nl> MAP_HLO_TO_LHLO ( MulOp ) ; <nl> MAP_HLO_TO_LHLO ( NegOp ) ; <nl> MAP_HLO_TO_LHLO ( NotOp ) ; <nl> + MAP_HLO_TO_LHLO ( OrOp ) ; <nl> MAP_HLO_TO_LHLO ( RealOp ) ; <nl> MAP_HLO_TO_LHLO ( ReduceOp ) ; <nl> MAP_HLO_TO_LHLO ( ReshapeOp ) ; <nl> MAP_HLO_TO_LHLO ( SqrtOp ) ; <nl> MAP_HLO_TO_LHLO ( SubOp ) ; <nl> MAP_HLO_TO_LHLO ( TanhOp ) ; <nl> MAP_HLO_TO_LHLO ( TransposeOp ) ; <nl> + MAP_HLO_TO_LHLO ( XorOp ) ; <nl> <nl> # undef MAP_HLO_TO_LHLO <nl> <nl> mmm a / tensorflow / compiler / mlir / hlo / include / mlir - hlo / Dialect / mhlo / transforms / map_lmhlo_to_scalar_op . h <nl> ppp b / tensorflow / compiler / mlir / hlo / include / mlir - hlo / Dialect / mhlo / transforms / map_lmhlo_to_scalar_op . h <nl> inline Value MapLhloOpToStdScalarOp < lmhlo : : NotOp > ( Location loc , <nl> return nullptr ; <nl> } <nl> <nl> + template < > <nl> + inline Value MapLhloOpToStdScalarOp < lmhlo : : OrOp > ( Location loc , <nl> + ArrayRef < Type > result_types , <nl> + ArrayRef < Value > args , <nl> + OpBuilder * b ) { <nl> + return MapLhloOpToStdScalarOpImpl < IntegerType , : : mlir : : OrOp > { } ( <nl> + loc , result_types , args , b ) ; <nl> + } <nl> + <nl> template < > <nl> inline Value MapLhloOpToStdScalarOp < lmhlo : : RsqrtOp > ( Location loc , <nl> ArrayRef < Type > result_types , <nl> inline Value MapLhloOpToStdScalarOp < lmhlo : : TanhOp > ( Location loc , <nl> loc , result_types , args , b ) ; <nl> } <nl> <nl> + template < > <nl> + inline Value MapLhloOpToStdScalarOp < lmhlo : : XorOp > ( Location loc , <nl> + ArrayRef < Type > result_types , <nl> + ArrayRef < Value > args , <nl> + OpBuilder * b ) { <nl> + return MapLhloOpToStdScalarOpImpl < IntegerType , : : mlir : : XOrOp > { } ( <nl> + loc , result_types , args , b ) ; <nl> + } <nl> + <nl> } / / namespace impl <nl> <nl> struct HloOpToStdScalarOp { <nl> mmm a / tensorflow / compiler / mlir / hlo / lib / Dialect / mhlo / transforms / hlo_legalize_to_lhlo . cc <nl> ppp b / tensorflow / compiler / mlir / hlo / lib / Dialect / mhlo / transforms / hlo_legalize_to_lhlo . cc <nl> void populateHLOToLHLOConversionPattern ( MLIRContext * context , <nl> HloToLhloOpConverter < mhlo : : MulOp > , <nl> HloToLhloOpConverter < mhlo : : NegOp > , <nl> HloToLhloOpConverter < mhlo : : NotOp > , <nl> + HloToLhloOpConverter < mhlo : : OrOp > , <nl> HloToLhloOpConverter < mhlo : : RealOp > , <nl> HloToLhloOpConverter < mhlo : : RemOp > , <nl> HloToLhloOpConverter < mhlo : : RsqrtOp > , <nl> void populateHLOToLHLOConversionPattern ( MLIRContext * context , <nl> HloToLhloOpConverter < mhlo : : SubOp > , <nl> HloToLhloOpConverter < mhlo : : TanhOp > , <nl> HloToLhloOpConverter < mhlo : : TransposeOp > , <nl> + HloToLhloOpConverter < mhlo : : XorOp > , <nl> HloToLhloReduceOpConverter , <nl> HloToLhloReturnOpConverter , <nl> HloToLhloTensorLoadOpConverter , <nl> mmm a / tensorflow / compiler / mlir / hlo / lib / Dialect / mhlo / transforms / legalize_to_linalg . cc <nl> ppp b / tensorflow / compiler / mlir / hlo / lib / Dialect / mhlo / transforms / legalize_to_linalg . cc <nl> void populateLHLOToLinalgConversionPattern ( MLIRContext * context , <nl> PointwiseToLinalgConverter < lmhlo : : ExpOp > , <nl> PointwiseToLinalgConverter < lmhlo : : FloorOp > , <nl> PointwiseToLinalgConverter < lmhlo : : ImagOp > , <nl> + PointwiseToLinalgConverter < lmhlo : : IsFiniteOp > , <nl> PointwiseToLinalgConverter < lmhlo : : LogOp > , <nl> PointwiseToLinalgConverter < lmhlo : : MaxOp > , <nl> PointwiseToLinalgConverter < lmhlo : : MinOp > , <nl> PointwiseToLinalgConverter < lmhlo : : MulOp > , <nl> PointwiseToLinalgConverter < lmhlo : : NegOp > , <nl> PointwiseToLinalgConverter < lmhlo : : NotOp > , <nl> + PointwiseToLinalgConverter < lmhlo : : OrOp > , <nl> PointwiseToLinalgConverter < lmhlo : : RealOp > , <nl> PointwiseToLinalgConverter < lmhlo : : RemOp > , <nl> PointwiseToLinalgConverter < lmhlo : : RsqrtOp > , <nl> void populateLHLOToLinalgConversionPattern ( MLIRContext * context , <nl> PointwiseToLinalgConverter < lmhlo : : SqrtOp > , <nl> PointwiseToLinalgConverter < lmhlo : : SubOp > , <nl> PointwiseToLinalgConverter < lmhlo : : TanhOp > , <nl> - PointwiseToLinalgConverter < lmhlo : : IsFiniteOp > , <nl> + PointwiseToLinalgConverter < lmhlo : : XorOp > , <nl> ReduceConverter , <nl> ReshapeOpConverter < lmhlo : : ReshapeOp > , <nl> ReverseConverter < lmhlo : : ReverseOp > , <nl> void populateHLOToLinalgConversionPattern ( MLIRContext * context , <nl> PointwiseToLinalgConverter < mhlo : : ExpOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : FloorOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : ImagOp , false > , <nl> + PointwiseToLinalgConverter < mhlo : : IsFiniteOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : LogOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : MaxOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : MinOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : MulOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : NegOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : NotOp , false > , <nl> + PointwiseToLinalgConverter < mhlo : : OrOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : RealOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : RemOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : RsqrtOp , false > , <nl> void populateHLOToLinalgConversionPattern ( MLIRContext * context , <nl> PointwiseToLinalgConverter < mhlo : : ShiftLeftOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : ShiftRightArithmeticOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : ShiftRightLogicalOp , false > , <nl> + PointwiseToLinalgConverter < mhlo : : SignOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : SinOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : SqrtOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : SubOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : TanhOp , false > , <nl> - PointwiseToLinalgConverter < mhlo : : IsFiniteOp , false > , <nl> + PointwiseToLinalgConverter < mhlo : : XorOp , false > , <nl> ReshapeOpConverter < mhlo : : ReshapeOp , false > , <nl> ReverseConverter < mhlo : : ReverseOp , false > , <nl> TransposeConverter < mhlo : : TransposeOp , false > > ( context ) ; <nl> mmm a / tensorflow / compiler / mlir / hlo / lib / Dialect / mhlo / transforms / transform_unranked_hlo . cc <nl> ppp b / tensorflow / compiler / mlir / hlo / lib / Dialect / mhlo / transforms / transform_unranked_hlo . cc <nl> namespace { <nl> sep fn ( SqrtOp ) sep fn ( TanhOp ) <nl> <nl> / / TODO ( herhut ) : Generate these out of op definitions . <nl> - # define MAP_XLA_OPERATION_CWISE_BINARY ( fn , sep ) \ <nl> - fn ( AddOp ) sep fn ( Atan2Op ) sep fn ( ComplexOp ) sep fn ( DivOp ) sep fn ( MaxOp ) \ <nl> - sep fn ( MinOp ) sep fn ( MulOp ) sep fn ( PowOp ) sep fn ( RemOp ) \ <nl> - sep fn ( ShiftLeftOp ) sep fn ( ShiftRightArithmeticOp ) \ <nl> - sep fn ( ShiftRightLogicalOp ) sep fn ( SubOp ) <nl> + # define MAP_XLA_OPERATION_CWISE_BINARY ( fn , sep ) \ <nl> + fn ( AddOp ) sep fn ( AndOp ) sep fn ( Atan2Op ) sep fn ( ComplexOp ) sep fn ( DivOp ) \ <nl> + sep fn ( MaxOp ) sep fn ( MinOp ) sep fn ( MulOp ) sep fn ( OrOp ) sep fn ( PowOp ) \ <nl> + sep fn ( RemOp ) sep fn ( ShiftLeftOp ) sep fn ( ShiftRightArithmeticOp ) \ <nl> + sep fn ( ShiftRightLogicalOp ) sep fn ( SubOp ) sep fn ( XorOp ) <nl> <nl> / / TODO ( herhut ) : Generate these out of op definitions . <nl> # define MAP_CHLO_OPERATION_CWISE_UNARY ( fn , sep ) \ <nl> mmm a / tensorflow / compiler / mlir / hlo / tests / hlo - legalize - to - lhlo . mlir <nl> ppp b / tensorflow / compiler / mlir / hlo / tests / hlo - legalize - to - lhlo . mlir <nl> func @ abs ( % operand : memref < 2x2xf32 > , % result : memref < 2x2xf32 > ) { <nl> <nl> / / mmm - - <nl> <nl> + / / CHECK - LABEL : func @ and <nl> + func @ and ( % operand0 : memref < 2x2xi32 > , % operand1 : memref < 2x2xi32 > , <nl> + % result : memref < 2x2xi32 > ) { <nl> + % tensor_operand0 = tensor_load % operand0 : memref < 2x2xi32 > <nl> + % tensor_operand1 = tensor_load % operand1 : memref < 2x2xi32 > <nl> + % tensor_result = " mhlo . and " ( % tensor_operand0 , % tensor_operand1 ) <nl> + : ( tensor < 2x2xi32 > , tensor < 2x2xi32 > ) - > tensor < 2x2xi32 > <nl> + / / CHECK : " lmhlo . and " ( % { { . * } } , % { { . * } } , % { { . * } } ) <nl> + tensor_store % tensor_result , % result : memref < 2x2xi32 > <nl> + return <nl> + } <nl> + <nl> + / / mmm - - <nl> + <nl> / / CHECK - LABEL : func @ ceil <nl> func @ ceil ( % operand : memref < 2x2xf32 > , % result : memref < 2x2xf32 > ) { <nl> % tensor_operand = tensor_load % operand : memref < 2x2xf32 > <nl> func @ not ( % operand : memref < 2x2xi32 > , % result : memref < 2x2xi32 > ) { <nl> <nl> / / mmm - - <nl> <nl> + / / CHECK - LABEL : func @ or <nl> + func @ or ( % operand0 : memref < 2x2xi32 > , % operand1 : memref < 2x2xi32 > , <nl> + % result : memref < 2x2xi32 > ) { <nl> + % tensor_operand0 = tensor_load % operand0 : memref < 2x2xi32 > <nl> + % tensor_operand1 = tensor_load % operand1 : memref < 2x2xi32 > <nl> + % tensor_result = " mhlo . or " ( % tensor_operand0 , % tensor_operand1 ) <nl> + : ( tensor < 2x2xi32 > , tensor < 2x2xi32 > ) - > tensor < 2x2xi32 > <nl> + / / CHECK : " lmhlo . or " ( % { { . * } } , % { { . * } } , % { { . * } } ) <nl> + tensor_store % tensor_result , % result : memref < 2x2xi32 > <nl> + return <nl> + } <nl> + <nl> + / / mmm - - <nl> + <nl> / / CHECK - LABEL : func @ rsqrt <nl> func @ rsqrt ( % operand : memref < 2x2xf32 > , % result : memref < 2x2xf32 > ) { <nl> % tensor_operand = tensor_load % operand : memref < 2x2xf32 > <nl> func @ tanh ( % operand : memref < 2x2xf32 > , % result : memref < 2x2xf32 > ) { <nl> / / mmm - - <nl> <nl> / / CHECK - LABEL : func @ remainder <nl> - func @ remainder ( % lhs : memref < 2x2xf32 > , % rhs : memref < 2x2xf32 > , % result : memref < 2x2xf32 > ) { <nl> + func @ remainder ( % lhs : memref < 2x2xf32 > , % rhs : memref < 2x2xf32 > , <nl> + % result : memref < 2x2xf32 > ) { <nl> % tensor_lhs = tensor_load % lhs : memref < 2x2xf32 > <nl> % tensor_rhs = tensor_load % rhs : memref < 2x2xf32 > <nl> % tensor_result = " mhlo . remainder " ( % tensor_lhs , % tensor_rhs ) <nl> func @ remainder ( % lhs : memref < 2x2xf32 > , % rhs : memref < 2x2xf32 > , % result : memref < 2x <nl> <nl> / / mmm - - <nl> <nl> + / / CHECK - LABEL : func @ xor <nl> + func @ xor ( % operand0 : memref < 2x2xi32 > , % operand1 : memref < 2x2xi32 > , <nl> + % result : memref < 2x2xi32 > ) { <nl> + % tensor_operand0 = tensor_load % operand0 : memref < 2x2xi32 > <nl> + % tensor_operand1 = tensor_load % operand1 : memref < 2x2xi32 > <nl> + % tensor_result = " mhlo . xor " ( % tensor_operand0 , % tensor_operand1 ) <nl> + : ( tensor < 2x2xi32 > , tensor < 2x2xi32 > ) - > tensor < 2x2xi32 > <nl> + / / CHECK : " lmhlo . xor " ( % { { . * } } , % { { . * } } ) <nl> + tensor_store % tensor_result , % result : memref < 2x2xi32 > <nl> + return <nl> + } <nl> + <nl> + / / mmm - - <nl> + <nl> / / Dynamic shape binary element - wise operation . <nl> / / CHECK - LABEL : func @ add_dyn <nl> func @ add_dyn ( % lhs : tensor < ? x ? xf32 > , % rhs : tensor < ? x ? xf32 > ) { <nl> mmm a / tensorflow / compiler / mlir / hlo / tests / hlo - legalize - to - linalg . mlir <nl> ppp b / tensorflow / compiler / mlir / hlo / tests / hlo - legalize - to - linalg . mlir <nl> func @ integer_and ( % lhs : tensor < 2x2xi32 > , <nl> <nl> / / mmm - - <nl> <nl> + / / CHECK - LABEL : func @ integer_or <nl> + func @ integer_or ( % lhs : tensor < 2x2xi32 > , <nl> + % rhs : tensor < 2x2xi32 > ) - > tensor < 2x2xi32 > { <nl> + / / CHECK : linalg . generic <nl> + / / CHECK : or <nl> + % 0 = " mhlo . or " ( % lhs , % rhs ) : ( tensor < 2x2xi32 > , <nl> + tensor < 2x2xi32 > ) - > tensor < 2x2xi32 > <nl> + return % 0 : tensor < 2x2xi32 > <nl> + } <nl> + <nl> + / / mmm - - <nl> + <nl> + / / CHECK - LABEL : func @ integer_xor <nl> + func @ integer_xor ( % lhs : tensor < 2x2xi32 > , <nl> + % rhs : tensor < 2x2xi32 > ) - > tensor < 2x2xi32 > { <nl> + / / CHECK : linalg . generic <nl> + / / CHECK : xor <nl> + % 0 = " mhlo . xor " ( % lhs , % rhs ) : ( tensor < 2x2xi32 > , <nl> + tensor < 2x2xi32 > ) - > tensor < 2x2xi32 > <nl> + return % 0 : tensor < 2x2xi32 > <nl> + } <nl> + <nl> + / / mmm - - <nl> + <nl> / / CHECK - LABEL : func @ float_cmp <nl> func @ float_cmp ( % lhs : tensor < 2x2xf32 > , <nl> % rhs : tensor < 2x2xf32 > ) - > ( tensor < 2x2xi1 > ) { <nl> mmm a / tensorflow / compiler / mlir / lite / flatbuffer_import . cc <nl> ppp b / tensorflow / compiler / mlir / lite / flatbuffer_import . cc <nl> Operation * BuildVariableOp ( const tflite : : TensorT & tensor , <nl> return op . getOperation ( ) ; <nl> } <nl> auto op = builder . create < tfl : : ConstOp > ( loc , value ) ; <nl> - if ( ! tensor . quantization - > min . empty ( ) ) { <nl> + if ( tensor . quantization & & ! tensor . quantization - > min . empty ( ) ) { <nl> if ( auto stats_op = <nl> ConvertMinMaxToStatsOp ( tensor , builder , op . getResult ( ) ) ) { <nl> return stats_op ; <nl> mmm a / tensorflow / compiler / mlir / tensorflow / BUILD <nl> ppp b / tensorflow / compiler / mlir / tensorflow / BUILD <nl> cc_library ( <nl> hdrs = [ " utils / bridge_logger . h " ] , <nl> deps = [ <nl> " : dump_mlir_util " , <nl> + " @ com_google_absl / / absl / strings " , <nl> " @ llvm - project / / llvm : Support " , <nl> " @ llvm - project / / mlir : IR " , <nl> " @ llvm - project / / mlir : Pass " , <nl> mmm a / tensorflow / compiler / mlir / tensorflow / ir / tf_generated_ops . td <nl> ppp b / tensorflow / compiler / mlir / tensorflow / ir / tf_generated_ops . td <nl> Table initializer that takes two tensors for keys and values respectively . <nl> TF_DerivedOperandTypeAttr Tkey = TF_DerivedOperandTypeAttr < 1 > ; <nl> } <nl> <nl> + def TF_InplaceAddOp : TF_Op < " InplaceAdd " , [ AllTypesMatch < [ " x " , " y " ] > , NoSideEffect ] > { <nl> + let summary = " Adds v into specified rows of x . " ; <nl> + <nl> + let description = [ { <nl> + Computes y = x ; y [ i , : ] + = v ; return y . <nl> + } ] ; <nl> + <nl> + let arguments = ( ins <nl> + TF_Tensor : $ x , <nl> + TF_Int32Tensor : $ i , <nl> + TF_Tensor : $ v <nl> + ) ; <nl> + <nl> + let results = ( outs <nl> + TF_Tensor : $ y <nl> + ) ; <nl> + <nl> + TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> + } <nl> + <nl> def TF_InplaceUpdateOp : TF_Op < " InplaceUpdate " , [ NoSideEffect ] > { <nl> let summary = " Updates specified rows ' i ' with values ' v ' . " ; <nl> <nl> def TF_IteratorV2Op : TF_Op < " IteratorV2 " , [ ] > { <nl> ) ; <nl> } <nl> <nl> + def TF_KthOrderStatisticOp : TF_Op < " KthOrderStatistic " , [ NoSideEffect ] > { <nl> + let summary = " Computes the Kth order statistic of a data set . The current " ; <nl> + <nl> + let description = [ { <nl> + implementation uses a binary search requiring exactly 32 passes over <nl> + the input data . The running time is linear with respect to input <nl> + size . The median - of - medians algorithm is probably faster , but is <nl> + difficult to implement efficiently in XLA . The implementation imposes <nl> + a total ordering on floats . The ordering is consistent with the usual <nl> + partial order . Positive NaNs are greater than positive <nl> + infinity . Negative NaNs are less than negative infinity . NaNs with <nl> + distinct payloads are treated as distinct . Subnormal numbers are <nl> + preserved ( not flushed to zero ) . Positive infinity is greater than all <nl> + numbers . Negative infinity is less than all numbers . Positive is <nl> + greater than negative zero . There are less than k values greater than <nl> + the kth order statistic . There are at least k values greater than or <nl> + equal to the Kth order statistic . The semantics are not the same as <nl> + top_k_unique . <nl> + } ] ; <nl> + <nl> + let arguments = ( ins <nl> + TF_Float32Tensor : $ input , <nl> + <nl> + I64Attr : $ k <nl> + ) ; <nl> + <nl> + let results = ( outs <nl> + TF_Float32Tensor : $ output <nl> + ) ; <nl> + } <nl> + <nl> def TF_L2LossOp : TF_Op < " L2Loss " , [ NoSideEffect ] > { <nl> let summary = " L2 Loss . " ; <nl> <nl> iterator in ` iterator ` to the first element of ` dataset ` . <nl> let results = ( outs ) ; <nl> } <nl> <nl> + def TF_MakeUniqueOp : TF_Op < " MakeUnique " , [ NoSideEffect ] > { <nl> + let summary = [ { <nl> + Make all elements in the non - Batch dimension unique , but \ " close \ " to <nl> + } ] ; <nl> + <nl> + let description = [ { <nl> + their initial value . Never returns a sub - normal number . Never returns <nl> + zero . The sign of each input element is always identical to the sign <nl> + of the corresponding output element . Behavior for infinite elements is <nl> + undefined . Behavior for subnormal elements is undefined . <nl> + } ] ; <nl> + <nl> + let arguments = ( ins <nl> + TF_Float32Tensor : $ input <nl> + ) ; <nl> + <nl> + let results = ( outs <nl> + TF_Float32Tensor : $ output <nl> + ) ; <nl> + } <nl> + <nl> def TF_MatMulOp : TF_Op < " MatMul " , [ NoSideEffect , TF_SameOperandsAndResultElementTypeResolveRef ] > { <nl> let summary = [ { <nl> Multiply the matrix " a " by the matrix " b " . <nl> array ( [ [ 1 , 2 , 3 , 1 , 2 , 3 ] , <nl> let hasFolder = 1 ; <nl> } <nl> <nl> + def TF_TopKUniqueOp : TF_Op < " TopKUnique " , [ NoSideEffect ] > { <nl> + let summary = " Returns the TopK unique values in the array in sorted order . " ; <nl> + <nl> + let description = [ { <nl> + The running time is proportional to the product of K and the input <nl> + size . Sorting the whole array is more efficient for sufficiently large <nl> + values of K . The median - of - medians algorithm is probably faster , but <nl> + difficult to implement efficiently in XLA . If there are fewer than K <nl> + unique numbers ( not NANs ) , the results are padded with negative <nl> + infinity . NaNs are never returned . Subnormal numbers are flushed to <nl> + zero . If an element appears at multiple indices , the highest index is <nl> + returned . If a TopK element never appears in the input due to padding <nl> + values , the indices are padded with negative one . If a padding value <nl> + appears in the input and padding is needed , the highest index of the <nl> + padding value will be returned . The semantics are not the same as <nl> + kth_order_statistic . <nl> + } ] ; <nl> + <nl> + let arguments = ( ins <nl> + TF_Float32Tensor : $ input , <nl> + <nl> + I64Attr : $ k <nl> + ) ; <nl> + <nl> + let results = ( outs <nl> + TF_Float32Tensor : $ topk , <nl> + TF_Int32Tensor : $ topk_indices <nl> + ) ; <nl> + } <nl> + <nl> def TF_TopKV2Op : TF_Op < " TopKV2 " , [ NoSideEffect ] > { <nl> let summary = [ { <nl> Finds values and indices of the ` k ` largest elements for the last dimension . <nl> If two elements are equal , the lower - index element appears first . <nl> let verifier = [ { return Verify ( * this ) ; } ] ; <nl> } <nl> <nl> + def TF_TopKWithUniqueOp : TF_Op < " TopKWithUnique " , [ NoSideEffect ] > { <nl> + let summary = " Returns the TopK values in the array in sorted order . " ; <nl> + <nl> + let description = [ { <nl> + This is a combination of MakeUnique and TopKUnique . The returned top - K will <nl> + have its lower bits replaced by iota , thus it will be close to the original <nl> + value but not exactly the same . The running time is proportional to the product <nl> + of K and the input size . NaNs are never returned . Subnormal numbers are flushed <nl> + to zero . <nl> + } ] ; <nl> + <nl> + let arguments = ( ins <nl> + TF_Float32Tensor : $ input , <nl> + <nl> + I64Attr : $ k <nl> + ) ; <nl> + <nl> + let results = ( outs <nl> + TF_Float32Tensor : $ topk , <nl> + TF_Int32Tensor : $ topk_indices <nl> + ) ; <nl> + } <nl> + <nl> def TF_TransposeOp : TF_Op < " Transpose " , [ NoSideEffect ] > { <nl> let summary = " Shuffle dimensions of x according to a permutation . " ; <nl> <nl> mmm a / tensorflow / compiler / mlir / tensorflow / ir / tf_ops_n_z . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / ir / tf_ops_n_z . cc <nl> OpFoldResult RankOp : : fold ( ArrayRef < Attribute > operands ) { <nl> auto ranked_type = type . dyn_cast < RankedTensorType > ( ) ; <nl> if ( ! ranked_type ) return { } ; <nl> <nl> - auto output_type = getType ( ) . cast < ShapedType > ( ) ; <nl> + / / DenseIntElementsAttr : : get requires the output type be ranked with static <nl> + / / shape . <nl> + auto output_type = getType ( ) . dyn_cast < RankedTensorType > ( ) ; <nl> + if ( ! output_type | | ! output_type . hasStaticShape ( ) ) return { } ; <nl> + <nl> int32_t rank = ranked_type . getRank ( ) ; <nl> return DenseIntElementsAttr : : get ( output_type , rank ) ; <nl> } <nl> mmm a / tensorflow / compiler / mlir / tensorflow / tests / canonicalize . mlir <nl> ppp b / tensorflow / compiler / mlir / tensorflow / tests / canonicalize . mlir <nl> func @ testRankOfRankedTensor ( % arg0 : tensor < 4x3x2xf32 > ) - > tensor < i32 > { <nl> return % 0 : tensor < i32 > <nl> } <nl> <nl> + / / CHECK - LABEL : testRankOfRankedTensorUnrankedOutput <nl> + func @ testRankOfRankedTensorUnrankedOutput ( % arg0 : tensor < 4x3x2xf32 > ) - > tensor < * xi32 > { <nl> + / / Regression test to make sure we don ' t crash in this case . <nl> + % 0 = " tf . Rank " ( % arg0 ) : ( tensor < 4x3x2xf32 > ) - > tensor < * xi32 > <nl> + return % 0 : tensor < * xi32 > <nl> + } <nl> + <nl> + / / CHECK - LABEL : testRankOfRankedTensorDynamicShapeOutput <nl> + func @ testRankOfRankedTensorDynamicShapeOutput ( % arg0 : tensor < 4x3x2xf32 > ) - > tensor < ? xi32 > { <nl> + / / Regression test to make sure we don ' t crash in this case . <nl> + % 0 = " tf . Rank " ( % arg0 ) : ( tensor < 4x3x2xf32 > ) - > tensor < ? xi32 > <nl> + return % 0 : tensor < ? xi32 > <nl> + } <nl> + <nl> / / CHECK - LABEL : @ foldFill <nl> func @ foldFill ( ) - > ( tensor < 3x2x1xf32 > , tensor < * xf32 > , tensor < * xcomplex < f32 > > ) { <nl> % 0 = " tf . Const " ( ) { value = dense < [ 3 , 2 , 1 ] > : tensor < 3xi32 > } : ( ) - > tensor < 3xi32 > <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / shape_inference . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / shape_inference . cc <nl> LogicalResult InferModuleShape ( ModuleOp module , int64_t max_iterations ) { <nl> return success ( ) ; <nl> } <nl> int64_t producer = producer_or . ValueOrDie ( ) ; <nl> + / / TODO ( jpienaar ) : Clean up propagate_caller_callee_constants if it is no <nl> + / / longer needed . <nl> ShapeInference context ( producer , module . getContext ( ) , <nl> - / * propagate_caller_callee_constants = * / true ) ; <nl> + / * propagate_caller_callee_constants = * / false ) ; <nl> if ( auto main = module . lookupSymbol < mlir : : FuncOp > ( " main " ) ) <nl> context . enqueue ( main ) ; <nl> for ( auto func : module . getOps < FuncOp > ( ) ) context . enqueue ( func ) ; <nl> mmm a / tensorflow / compiler / mlir / tensorflow / utils / bridge_logger . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / utils / bridge_logger . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / compiler / mlir / tensorflow / utils / bridge_logger . h " <nl> <nl> + # include < atomic > <nl> + <nl> + # include " absl / strings / str_split . h " <nl> # include " llvm / ADT / StringRef . h " <nl> # include " llvm / Support / FormatVariadic . h " <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> <nl> + / / Counter is used as a prefix for filenames . <nl> + static std : : atomic < int > log_counter ( 0 ) ; <nl> + <nl> BridgeLoggerConfig : : BridgeLoggerConfig ( bool print_module_scope , <nl> bool print_after_only_on_change ) <nl> : mlir : : PassManager : : IRPrinterConfig ( print_module_scope , <nl> - print_after_only_on_change ) { } <nl> + print_after_only_on_change ) { <nl> + const char * log_pass_patterns = getenv ( " MLIR_BRIDGE_LOG_PASS_PATTERNS " ) ; <nl> + if ( log_pass_patterns ) { <nl> + log_pass_patterns_ = <nl> + absl : : StrSplit ( log_pass_patterns , ' , ' , absl : : SkipWhitespace ( ) ) ; <nl> + } <nl> + } <nl> <nl> - / / Logs op to file with name of format ` mlir_bridge - pass_name - file_suffix . mlir ` . <nl> + / / Logs op to file with name of format <nl> + / / ` < log_counter > _mlir_bridge_ < pass_name > _ < file_suffix > . mlir ` . <nl> inline static void Log ( BridgeLoggerConfig : : PrintCallbackFn print_callback , <nl> mlir : : Pass * pass , mlir : : Operation * op , <nl> llvm : : StringRef file_suffix ) { <nl> - std : : string name = <nl> - llvm : : formatv ( " mlir_bridge_ { 0 } _ { 1 } " , pass - > getName ( ) , file_suffix ) . str ( ) ; <nl> + std : : string pass_name = pass - > getName ( ) . str ( ) ; <nl> + <nl> + / / Add 4 - digit counter as prefix so the order of the passes is obvious . <nl> + std : : string name = llvm : : formatv ( " { 0 , 0 + 4 } _mlir_bridge_ { 1 } _ { 2 } " , log_counter + + , <nl> + pass_name , file_suffix ) ; <nl> <nl> std : : unique_ptr < llvm : : raw_ostream > os ; <nl> std : : string filepath ; <nl> inline static void Log ( BridgeLoggerConfig : : PrintCallbackFn print_callback , <nl> void BridgeLoggerConfig : : printBeforeIfEnabled ( mlir : : Pass * pass , <nl> mlir : : Operation * operation , <nl> PrintCallbackFn print_callback ) { <nl> - Log ( print_callback , pass , operation , " before " ) ; <nl> + if ( should_print ( pass ) ) Log ( print_callback , pass , operation , " before " ) ; <nl> } <nl> <nl> void BridgeLoggerConfig : : printAfterIfEnabled ( mlir : : Pass * pass , <nl> mlir : : Operation * operation , <nl> PrintCallbackFn print_callback ) { <nl> - Log ( print_callback , pass , operation , " after " ) ; <nl> + if ( should_print ( pass ) ) Log ( print_callback , pass , operation , " after " ) ; <nl> + } <nl> + <nl> + bool BridgeLoggerConfig : : should_print ( mlir : : Pass * pass ) { <nl> + if ( log_pass_patterns_ . empty ( ) ) return true ; <nl> + <nl> + std : : string pass_name = pass - > getName ( ) . str ( ) ; <nl> + for ( const auto & pattern : log_pass_patterns_ ) { <nl> + if ( pass_name . find ( pattern ) ! = std : : string : : npos ) { <nl> + / / pattern matches pass <nl> + return true ; <nl> + } <nl> + } <nl> + / / no pattern matches pass <nl> + VLOG ( 2 ) < < " Not logging pass " < < pass_name <nl> + < < " because it does not match any pattern in " <nl> + " MLIR_BRIDGE_LOG_PASS_PATTERNS " ; <nl> + return false ; <nl> } <nl> <nl> void BridgeTimingConfig : : printTiming ( PrintCallbackFn printCallback ) { <nl> mmm a / tensorflow / compiler / mlir / tensorflow / utils / bridge_logger . h <nl> ppp b / tensorflow / compiler / mlir / tensorflow / utils / bridge_logger . h <nl> limitations under the License . <nl> namespace tensorflow { <nl> <nl> / / Logger for logging / dumping MLIR modules before and after passes in bridge <nl> - / / targeting TPUs . <nl> + / / targeting TPUs . The passes being logged can be restricted via environment <nl> + / / variable ` MLIR_BRIDGE_LOG_PASS_PATTERNS ` which is interpreted as a comma - <nl> + / / separated list of strings , and only passes whose name contains any of those <nl> + / / strings as a substring are logged ( no regex support ) . If <nl> + / / ` MLIR_BRIDGE_LOG_PASS_PATTERNS ` is not defined , then all passes are logged . <nl> class BridgeLoggerConfig : public mlir : : PassManager : : IRPrinterConfig { <nl> public : <nl> explicit BridgeLoggerConfig ( bool print_module_scope = false , <nl> class BridgeLoggerConfig : public mlir : : PassManager : : IRPrinterConfig { <nl> / / with the stream to dump into . <nl> void printAfterIfEnabled ( mlir : : Pass * pass , mlir : : Operation * operation , <nl> PrintCallbackFn print_callback ) override ; <nl> + <nl> + private : <nl> + bool should_print ( mlir : : Pass * pass ) ; <nl> + <nl> + / / Only print passes that match any of these patterns . A pass matches a <nl> + / / pattern if its name contains the pattern as a substring . If <nl> + / / ` log_pass_patterns_ ` is empty , print all passes . <nl> + std : : vector < std : : string > log_pass_patterns_ ; <nl> } ; <nl> <nl> / / Logger for logging / dumping pass pipeline timings after completion . <nl> mmm a / tensorflow / compiler / mlir / xla / mlir_hlo_to_hlo . cc <nl> ppp b / tensorflow / compiler / mlir / xla / mlir_hlo_to_hlo . cc <nl> class ConvertToHloModule { <nl> / / <nl> / / TODO ( hinsu ) : Check for dynamic shapes and exit instead of crashing . <nl> LogicalResult Run ( ) { <nl> + auto main = module_ . lookupSymbol < mlir : : FuncOp > ( " main " ) ; <nl> + if ( ! main ) <nl> + return module_ . emitError ( <nl> + " conversion requires module with ` main ` function " ) ; <nl> + <nl> for ( auto func : module_ . getOps < FuncOp > ( ) ) { <nl> if ( func . empty ( ) ) continue ; <nl> if ( failed ( RunOnFunction ( func ) ) ) return failure ( ) ; <nl> class ConvertToHloModule { <nl> xla : : XlaComputation * result ) ; <nl> <nl> : : xla : : HloModuleProto ConsumeMainProto ( ) { <nl> - return lowered_computation_ [ module_ . lookupSymbol < mlir : : FuncOp > ( " main " ) ] <nl> - . proto ( ) ; <nl> + auto main = module_ . lookupSymbol < mlir : : FuncOp > ( " main " ) ; <nl> + / / This is an invariant check as Run returns failure if there is no main <nl> + / / function and so the main proto shouldn ' t be consumed in that case . <nl> + CHECK ( main ) < < " requires module to have main function " ; / / Crash Ok . <nl> + return lowered_computation_ [ main ] . proto ( ) ; <nl> } <nl> <nl> / / Lower function call to HLO call instruction <nl> mmm a / tensorflow / compiler / mlir / xla / tests / legalize - tf - binary - elementwise . mlir <nl> ppp b / tensorflow / compiler / mlir / xla / tests / legalize - tf - binary - elementwise . mlir <nl> func @ bitwise_or ( % arg0 : tensor < 4xi32 > , % arg1 : tensor < 4xi32 > ) - > tensor < 4xi32 > { <nl> return % 0 : tensor < 4xi32 > <nl> } <nl> <nl> + / / CHECK - LABEL : func @ bitwise_xor <nl> + func @ bitwise_xor ( % arg0 : tensor < 4xi32 > , % arg1 : tensor < 4xi32 > ) - > tensor < 4xi32 > { <nl> + / / CHECK - NEXT : mhlo . xor <nl> + % 0 = " tf . BitwiseXor " ( % arg0 , % arg1 ) : ( tensor < 4xi32 > , tensor < 4xi32 > ) - > tensor < 4xi32 > <nl> + return % 0 : tensor < 4xi32 > <nl> + } <nl> + <nl> / / CHECK - LABEL : func @ bitwise_and <nl> func @ bitwise_and ( % arg0 : tensor < 4xi32 > , % arg1 : tensor < 4xi32 > ) - > tensor < 4xi32 > { <nl> / / CHECK - NEXT : mhlo . and <nl> mmm a / tensorflow / compiler / mlir / xla / tests / legalize - tf . mlir <nl> ppp b / tensorflow / compiler / mlir / xla / tests / legalize - tf . mlir <nl> func @ floordiv_dynamic ( % arg0 : tensor < ? x ? xi32 > , % arg1 : tensor < ? xi32 > ) - > tensor < ? <nl> } <nl> <nl> / / CHECK - LABEL : func @ floordiv_unranked <nl> - func @ floordiv_unranked ( % arg0 : tensor < * xi32 > , % arg1 : tensor < * xi32 > ) - > tensor < * xi32 > { <nl> + func @ floordiv_unranked ( % arg0 : tensor < * xf32 > , % arg1 : tensor < * xf32 > ) - > tensor < * xf32 > { <nl> + / / CHECK - NOT : tf . FloorDiv <nl> + % 0 = " tf . FloorDiv " ( % arg0 , % arg1 ) : ( tensor < * xf32 > , tensor < * xf32 > ) - > tensor < * xf32 > <nl> + return % 0 : tensor < * xf32 > <nl> + } <nl> + <nl> + / / CHECK - LABEL : func @ floordiv_int <nl> + func @ floordiv_int ( % arg0 : tensor < * xi32 > , % arg1 : tensor < * xi32 > ) - > tensor < * xi32 > { <nl> / / CHECK : tf . FloorDiv <nl> % 0 = " tf . FloorDiv " ( % arg0 , % arg1 ) : ( tensor < * xi32 > , tensor < * xi32 > ) - > tensor < * xi32 > <nl> return % 0 : tensor < * xi32 > <nl> func @ floormod_dynamic ( % arg0 : tensor < ? x ? xi32 > , % arg1 : tensor < ? xi32 > ) - > tensor < ? <nl> <nl> / / CHECK - LABEL : func @ floormod_unranked <nl> func @ floormod_unranked ( % arg0 : tensor < * xi32 > , % arg1 : tensor < * xi32 > ) - > tensor < * xi32 > { <nl> - / / CHECK : tf . FloorMod <nl> + / / CHECK - NOT : tf . FloorMod <nl> % 0 = " tf . FloorMod " ( % arg0 , % arg1 ) : ( tensor < * xi32 > , tensor < * xi32 > ) - > tensor < * xi32 > <nl> return % 0 : tensor < * xi32 > <nl> } <nl> new file mode 100644 <nl> index 0000000000000 . . a2647d2c29f9a <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / mlir / xla / tests / translate / missing_main . mlir <nl> <nl> + / / RUN : not tf - mlir - translate - split - input - file - mlir - hlo - to - hlo - text % s 2 > & 1 | FileCheck % s <nl> + <nl> + / / CHECK : conversion requires module with ` main ` <nl> + func @ non_main ( ) { <nl> + % 0 = " mhlo . constant " ( ) { value = opaque < " mhlo " , " 0x0123456789ABCDEF " > : tensor < 4xf32 > } : ( ) - > tensor < 4xf32 > <nl> + return <nl> + } <nl> mmm a / tensorflow / compiler / mlir / xla / transforms / legalize_tf_patterns . td <nl> ppp b / tensorflow / compiler / mlir / xla / transforms / legalize_tf_patterns . td <nl> def : Pat < ( TF_ComplexOp $ r , $ i ) , ( HLO_ComplexOp $ r , $ i ) > ; <nl> / / Performs a substitution of FloorDiv , pseudo code below : <nl> / / <nl> / / return floor ( div ( x , y ) ) <nl> - def : Pat < ( TF_FloorDivOp AnyRankedTensor : $ l , AnyRankedTensor : $ r ) , <nl> + def : Pat < ( TF_FloorDivOp AnyTensor : $ l , AnyTensor : $ r ) , <nl> ( HLO_FloorOp <nl> ( HLOClient_BroadcastDivOp $ l , $ r , ( BinBroadcastDimensions $ l , $ r ) ) ) , <nl> [ ( IEEEFloatTensor $ l ) ] > ; <nl> def : Pat < ( TF_FloorDivOp AnyRankedTensor : $ l , AnyRankedTensor : $ r ) , <nl> / / return trunc_mod ! = 0 & & ( y < 0 ! = trunc_mod < 0 ) ? trunc_mod + y <nl> / / Requires static shaped inputs to create constant splats and computation of <nl> / / broadcast attributes . <nl> - def : Pat < ( TF_FloorModOp AnyRankedTensor : $ l , AnyRankedTensor : $ r ) , <nl> + def : Pat < ( TF_FloorModOp AnyTensor : $ l , AnyTensor : $ r ) , <nl> ( HLO_SelectOp <nl> ( HLOClient_BroadcastAndOp <nl> ( HLOClient_BroadcastCompareOp <nl> def : Pat < ( TF_FloorModOp AnyRankedTensor : $ l , AnyRankedTensor : $ r ) , <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> class DirectLogicalBinaryPat < Op FromOp , Op ToOp > <nl> - : Pat < ( FromOp AnyRankedTensor : $ l , AnyRankedTensor : $ r ) , <nl> + : Pat < ( FromOp AnyTensor : $ l , AnyTensor : $ r ) , <nl> ( ToOp $ l , $ r , ( BinBroadcastDimensions $ l , $ r ) ) , <nl> [ ( SignedIntTensor $ l ) ] > ; <nl> <nl> foreach fromToBinPair = [ [ TF_LogicalAndOp , HLOClient_BroadcastAndOp ] , <nl> [ TF_LogicalOrOp , HLOClient_BroadcastOrOp ] , <nl> + [ TF_BitwiseAndOp , HLOClient_BroadcastAndOp ] , <nl> [ TF_BitwiseOrOp , HLOClient_BroadcastOrOp ] , <nl> - [ TF_BitwiseAndOp , HLOClient_BroadcastAndOp ] ] in <nl> + [ TF_BitwiseXorOp , HLOClient_BroadcastXorOp ] ] in <nl> def : DirectLogicalBinaryPat < fromToBinPair [ 0 ] , fromToBinPair [ 1 ] > ; <nl> <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> mmm a / tensorflow / compiler / mlir / xla / transforms / legalize_tf_with_tf2xla . cc <nl> ppp b / tensorflow / compiler / mlir / xla / transforms / legalize_tf_with_tf2xla . cc <nl> bool IsOpAllowedTf2XlaFallback ( Operation * op ) { <nl> TypeID : : get < TF : : IgammaOp > ( ) , <nl> TypeID : : get < TF : : IgammacOp > ( ) , <nl> TypeID : : get < TF : : IgammaGradAOp > ( ) , <nl> + TypeID : : get < TF : : InplaceAddOp > ( ) , <nl> TypeID : : get < TF : : InTopKV2Op > ( ) , <nl> TypeID : : get < TF : : InvertOp > ( ) , <nl> TypeID : : get < TF : : InvOp > ( ) , <nl> + TypeID : : get < TF : : KthOrderStatisticOp > ( ) , <nl> TypeID : : get < TF : : LRNOp > ( ) , <nl> TypeID : : get < TF : : LRNGradOp > ( ) , <nl> TypeID : : get < TF : : LeakyReluGradOp > ( ) , <nl> bool IsOpAllowedTf2XlaFallback ( Operation * op ) { <nl> TypeID : : get < TF : : LogicalOrOp > ( ) , <nl> TypeID : : get < TF : : LogOp > ( ) , <nl> TypeID : : get < TF : : LowerBoundOp > ( ) , <nl> + TypeID : : get < TF : : MakeUniqueOp > ( ) , <nl> TypeID : : get < TF : : MatMulOp > ( ) , <nl> TypeID : : get < TF : : MatrixDiagV3Op > ( ) , <nl> TypeID : : get < TF : : MatrixInverseOp > ( ) , <nl> bool IsOpAllowedTf2XlaFallback ( Operation * op ) { <nl> TypeID : : get < TF : : TensorScatterAddOp > ( ) , <nl> TypeID : : get < TF : : TensorScatterSubOp > ( ) , <nl> TypeID : : get < TF : : TPUEmbeddingActivationsOp > ( ) , <nl> + TypeID : : get < TF : : TopKUniqueOp > ( ) , <nl> + TypeID : : get < TF : : TopKWithUniqueOp > ( ) , <nl> TypeID : : get < TF : : TransposeOp > ( ) , <nl> TypeID : : get < TF : : TridiagonalSolveOp > ( ) , <nl> TypeID : : get < TF : : TruncateDivOp > ( ) , <nl> mmm a / tensorflow / compiler / mlir / xla / transforms / mhlo_to_lhlo_with_xla . cc <nl> ppp b / tensorflow / compiler / mlir / xla / transforms / mhlo_to_lhlo_with_xla . cc <nl> Status ConvertModule ( std : : unique_ptr < HloModule > hlo_module , ModuleOp module , <nl> / / Run all HLO passes to produce an optimized module . <nl> auto result_or = backend - > compiler ( ) - > RunHloPassesAndBufferAssignement ( <nl> std : : move ( hlo_module ) , backend - > default_stream_executor ( ) , <nl> - backend - > memory_allocator ( ) , optimize_xla_hlo ) ; <nl> + optimize_xla_hlo , { backend - > memory_allocator ( ) } ) ; <nl> TF_RETURN_WITH_CONTEXT_IF_ERROR ( result_or . status ( ) , <nl> " running XLA pass pipeline " ) ; <nl> std : : unique_ptr < HloModule > optimized_hlo_module = <nl> mmm a / tensorflow / compiler / xla / client / executable_build_options . h <nl> ppp b / tensorflow / compiler / xla / client / executable_build_options . h <nl> class ExecutableBuildOptions { <nl> return * this ; <nl> } <nl> <nl> + / / Thread pool for parallel compilation . <nl> + tensorflow : : thread : : ThreadPool * compile_thread_pool ( ) const { <nl> + return compile_thread_pool_ ; <nl> + } <nl> + ExecutableBuildOptions & set_run_backend_only ( <nl> + tensorflow : : thread : : ThreadPool * compile_thread_pool ) { <nl> + compile_thread_pool_ = compile_thread_pool ; <nl> + return * this ; <nl> + } <nl> + <nl> private : <nl> int device_ordinal_ = - 1 ; <nl> Shape result_layout_ ; <nl> class ExecutableBuildOptions { <nl> absl : : optional < DeviceAssignment > device_assignment_ ; <nl> bool alias_passthrough_params_ = false ; <nl> bool run_backend_only_ = false ; <nl> + tensorflow : : thread : : ThreadPool * compile_thread_pool_ = nullptr ; <nl> } ; <nl> <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / client / xla_builder . cc <nl> ppp b / tensorflow / compiler / xla / client / xla_builder . cc <nl> StatusOr < XlaComputation > XlaBuilder : : BuildDynamicInferenceGraph ( XlaOp root_op ) { <nl> / / contant False if dimension is static . <nl> / / - Reduce : Convert to reduce or . <nl> / / - Constant : Convert to constant False . <nl> + / / - Reshape , slice , transpose , pad : <nl> + / / Convert into predicate type with same opcode . <nl> / / - Other ops : Not supported . <nl> / / Create the instruction for the new handle . <nl> TF_ASSIGN_OR_RETURN ( HloOpcode opcode , <nl> StatusOr < XlaComputation > XlaBuilder : : BuildDynamicInferenceGraph ( XlaOp root_op ) { <nl> case HloOpcode : : kBroadcast : <nl> case HloOpcode : : kConcatenate : <nl> case HloOpcode : : kReshape : <nl> + case HloOpcode : : kPad : <nl> break ; <nl> case HloOpcode : : kGetDimensionSize : { <nl> int64 dimension = instr_proto - > dimensions ( 0 ) ; <nl> mmm a / tensorflow / compiler / xla / pjrt / cpu_device . cc <nl> ppp b / tensorflow / compiler / xla / pjrt / cpu_device . cc <nl> static const char kCpuPlatformName [ ] = " cpu " ; <nl> <nl> CpuDevice : : CpuDevice ( int id , <nl> std : : unique_ptr < LocalDeviceState > local_device_state ) <nl> - : PjRtDevice ( id , std : : move ( local_device_state ) , <nl> - / * device_kind = * / kCpuPlatformName ) { } <nl> + : PjRtStreamExecutorDevice ( id , std : : move ( local_device_state ) , <nl> + / * device_kind = * / kCpuPlatformName ) { } <nl> <nl> StatusOr < std : : unique_ptr < PjRtClient > > GetCpuClient ( bool asynchronous ) { <nl> TF_ASSIGN_OR_RETURN ( se : : Platform * platform , <nl> StatusOr < std : : unique_ptr < PjRtClient > > GetCpuClient ( bool asynchronous ) { <nl> TF_ASSIGN_OR_RETURN ( LocalClient * client , <nl> ClientLibrary : : GetOrCreateLocalClient ( options ) ) ; <nl> <nl> - std : : vector < std : : unique_ptr < PjRtDevice > > devices ; <nl> + std : : vector < std : : unique_ptr < PjRtStreamExecutorDevice > > devices ; <nl> for ( int i = 0 ; i < client - > device_count ( ) ; + + i ) { <nl> se : : StreamExecutorConfig config ; <nl> config . ordinal = i ; <nl> StatusOr < std : : unique_ptr < PjRtClient > > GetCpuClient ( bool asynchronous ) { <nl> devices . push_back ( std : : move ( device ) ) ; <nl> } <nl> <nl> - return std : : make_unique < PjRtClient > ( <nl> + return std : : unique_ptr < PjRtClient > ( std : : make_unique < PjRtStreamExecutorClient > ( <nl> kCpuName , client , std : : move ( devices ) , / * host_id = * / 0 , <nl> / * allocator = * / nullptr , / * host_memory_allocator = * / nullptr , <nl> / * should_stage_host_to_device_transfers = * / false , <nl> - / * gpu_run_options = * / nullptr ) ; <nl> + / * gpu_run_options = * / nullptr ) ) ; <nl> } <nl> <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / pjrt / cpu_device . h <nl> ppp b / tensorflow / compiler / xla / pjrt / cpu_device . h <nl> limitations under the License . <nl> <nl> namespace xla { <nl> <nl> - class CpuDevice : public PjRtDevice { <nl> + class CpuDevice : public PjRtStreamExecutorDevice { <nl> public : <nl> CpuDevice ( int id , std : : unique_ptr < LocalDeviceState > local_device_state ) ; <nl> } ; <nl> mmm a / tensorflow / compiler / xla / pjrt / gpu_device . cc <nl> ppp b / tensorflow / compiler / xla / pjrt / gpu_device . cc <nl> namespace xla { <nl> namespace { <nl> <nl> / / A custom PjRtClient that overrides the device assignment method . <nl> - class GpuClient : public xla : : PjRtClient { <nl> + class GpuClient : public xla : : PjRtStreamExecutorClient { <nl> public : <nl> - using xla : : PjRtClient : : PjRtClient ; <nl> + using xla : : PjRtStreamExecutorClient : : PjRtStreamExecutorClient ; <nl> <nl> xla : : StatusOr < xla : : DeviceAssignment > GetDefaultDeviceAssignment ( <nl> int num_replicas , int num_partitions ) const override ; <nl> xla : : StatusOr < xla : : DeviceAssignment > GpuClient : : GetDefaultDeviceAssignment ( <nl> return assignment ; <nl> } <nl> / / Fallback to default global device assignment if we can ' t run locally . <nl> - return PjRtClient : : GetDefaultDeviceAssignment ( num_replicas , num_partitions ) ; <nl> + return PjRtStreamExecutorClient : : GetDefaultDeviceAssignment ( num_replicas , <nl> + num_partitions ) ; <nl> } <nl> <nl> / / Builds an xla : : LocalClient for the GPU platform . <nl> StatusOr < std : : string > NcclIdStore : : GetNcclUniqueId ( <nl> return result . first - > second ; <nl> } <nl> <nl> - std : : vector < std : : unique_ptr < PjRtDevice > > BuildLocalDevices ( <nl> + std : : vector < std : : unique_ptr < PjRtStreamExecutorDevice > > BuildLocalDevices ( <nl> std : : vector < std : : unique_ptr < LocalDeviceState > > local_device_states ) { <nl> - std : : vector < std : : unique_ptr < PjRtDevice > > devices ; <nl> + std : : vector < std : : unique_ptr < PjRtStreamExecutorDevice > > devices ; <nl> for ( auto & local_device : local_device_states ) { <nl> int device_ordinal = local_device - > device_ordinal ( ) ; <nl> const se : : DeviceDescription & description = <nl> std : : vector < std : : unique_ptr < PjRtDevice > > BuildLocalDevices ( <nl> Status BuildDistributedDevices ( <nl> std : : vector < std : : unique_ptr < LocalDeviceState > > local_device_states , <nl> std : : shared_ptr < DistributedRuntimeClient > distributed_client , int node_id , <nl> - std : : vector < std : : unique_ptr < PjRtDevice > > * devices , <nl> + std : : vector < std : : unique_ptr < PjRtStreamExecutorDevice > > * devices , <nl> gpu : : GpuExecutableRunOptions * gpu_executable_run_options ) { <nl> LocalTopologyProto local_topology ; <nl> local_topology . set_node_id ( node_id ) ; <nl> Status BuildDistributedDevices ( <nl> GpuDevice : : GpuDevice ( int id , <nl> std : : unique_ptr < LocalDeviceState > local_device_state , <nl> std : : string device_kind , int node_id ) <nl> - : PjRtDevice ( id , std : : move ( local_device_state ) , std : : move ( device_kind ) , <nl> - node_id ) { } <nl> + : PjRtStreamExecutorDevice ( id , std : : move ( local_device_state ) , <nl> + std : : move ( device_kind ) , node_id ) { } <nl> <nl> StatusOr < std : : unique_ptr < PjRtClient > > GetGpuClient ( <nl> bool asynchronous , const GpuAllocatorConfig & allocator_config , <nl> StatusOr < std : : unique_ptr < PjRtClient > > GetGpuClient ( <nl> auto host_memory_allocator = <nl> GetGpuHostAllocator ( local_device_states . front ( ) - > executor ( ) ) ; <nl> <nl> - std : : vector < std : : unique_ptr < PjRtDevice > > devices ; <nl> + std : : vector < std : : unique_ptr < PjRtStreamExecutorDevice > > devices ; <nl> auto gpu_run_options = absl : : make_unique < gpu : : GpuExecutableRunOptions > ( ) ; <nl> if ( distributed_client ) { <nl> TF_RETURN_IF_ERROR ( BuildDistributedDevices ( <nl> mmm a / tensorflow / compiler / xla / pjrt / gpu_device . h <nl> ppp b / tensorflow / compiler / xla / pjrt / gpu_device . h <nl> limitations under the License . <nl> <nl> namespace xla { <nl> <nl> - class GpuDevice : public PjRtDevice { <nl> + class GpuDevice : public PjRtStreamExecutorDevice { <nl> public : <nl> GpuDevice ( int id , std : : unique_ptr < LocalDeviceState > local_device_state , <nl> std : : string device_kind , int node_id ) ; <nl> mmm a / tensorflow / compiler / xla / pjrt / interpreter_device . cc <nl> ppp b / tensorflow / compiler / xla / pjrt / interpreter_device . cc <nl> static const char kInterpreterPlatformName [ ] = " interpreter " ; <nl> <nl> InterpreterDevice : : InterpreterDevice ( <nl> int id , std : : unique_ptr < LocalDeviceState > local_device_state ) <nl> - : PjRtDevice ( id , std : : move ( local_device_state ) , <nl> - / * device_kind = * / kInterpreterPlatformName ) { } <nl> + : PjRtStreamExecutorDevice ( id , std : : move ( local_device_state ) , <nl> + / * device_kind = * / kInterpreterPlatformName ) { } <nl> <nl> StatusOr < std : : unique_ptr < PjRtClient > > GetInterpreterClient ( ) { <nl> TF_ASSIGN_OR_RETURN ( se : : Platform * platform , <nl> StatusOr < std : : unique_ptr < PjRtClient > > GetInterpreterClient ( ) { <nl> TF_ASSIGN_OR_RETURN ( LocalClient * client , <nl> ClientLibrary : : GetOrCreateLocalClient ( options ) ) ; <nl> <nl> - std : : vector < std : : unique_ptr < PjRtDevice > > devices ; <nl> + std : : vector < std : : unique_ptr < PjRtStreamExecutorDevice > > devices ; <nl> se : : StreamExecutor * executor = <nl> client - > backend ( ) . stream_executor ( 0 ) . ValueOrDie ( ) ; <nl> auto device_state = absl : : make_unique < LocalDeviceState > ( <nl> StatusOr < std : : unique_ptr < PjRtClient > > GetInterpreterClient ( ) { <nl> absl : : make_unique < InterpreterDevice > ( 0 , std : : move ( device_state ) ) ; <nl> devices . push_back ( std : : move ( device ) ) ; <nl> <nl> - return std : : make_unique < PjRtClient > ( <nl> + return std : : unique_ptr < PjRtClient > ( std : : make_unique < PjRtStreamExecutorClient > ( <nl> " interpreter " , client , std : : move ( devices ) , / * host_id = * / 0 , <nl> / * allocator = * / nullptr , / * host_memory_allocator = * / nullptr , <nl> / * should_stage_host_to_device_transfers = * / false , <nl> - / * gpu_run_options = * / nullptr ) ; <nl> + / * gpu_run_options = * / nullptr ) ) ; <nl> } <nl> <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / pjrt / interpreter_device . h <nl> ppp b / tensorflow / compiler / xla / pjrt / interpreter_device . h <nl> limitations under the License . <nl> <nl> namespace xla { <nl> <nl> - class InterpreterDevice : public PjRtDevice { <nl> + class InterpreterDevice : public PjRtStreamExecutorDevice { <nl> public : <nl> InterpreterDevice ( int id , <nl> std : : unique_ptr < LocalDeviceState > local_device_state ) ; <nl> mmm a / tensorflow / compiler / xla / pjrt / pjrt_client . cc <nl> ppp b / tensorflow / compiler / xla / pjrt / pjrt_client . cc <nl> limitations under the License . <nl> <nl> namespace xla { <nl> <nl> - PjRtPlatformId PjRtDevice : : platform_id ( ) const { <nl> + PjRtPlatformId PjRtStreamExecutorDevice : : platform_id ( ) const { <nl> return client_ - > platform_id ( ) ; <nl> } <nl> - const std : : string & PjRtDevice : : platform_name ( ) const { <nl> + const std : : string & PjRtStreamExecutorDevice : : platform_name ( ) const { <nl> return client_ - > platform_name ( ) ; <nl> } <nl> <nl> - StatusOr < LocalDeviceState * > PjRtDevice : : GetLocalDeviceState ( ) const { <nl> + StatusOr < LocalDeviceState * > PjRtStreamExecutorDevice : : GetLocalDeviceState ( ) <nl> + const { <nl> if ( local_device_state_ ) { <nl> return local_device_state_ . get ( ) ; <nl> } <nl> return InvalidArgument ( " Device % s is not a local device . " , DebugString ( ) ) ; <nl> } <nl> <nl> - std : : string PjRtDevice : : DebugString ( ) const { <nl> + std : : string PjRtStreamExecutorDevice : : DebugString ( ) const { <nl> return absl : : StrCat ( platform_name ( ) , " : " , id ( ) ) ; <nl> } <nl> <nl> StatusOr < DeviceAssignment > DevicesToDeviceAssignment ( <nl> devices [ replica ] . size ( ) , replica , devices [ 0 ] . size ( ) ) ; <nl> } <nl> for ( int partition = 0 ; partition < devices [ replica ] . size ( ) ; + + partition ) { <nl> - if ( devices [ 0 ] [ 0 ] - > platform_id ( ) ! = <nl> - devices [ replica ] [ partition ] - > platform_id ( ) ) { <nl> + if ( devices [ 0 ] [ 0 ] - > client ( ) - > platform_id ( ) ! = <nl> + devices [ replica ] [ partition ] - > client ( ) - > platform_id ( ) ) { <nl> return InvalidArgument ( <nl> " Device assignment passed to Compile ( ) must have devices of a " <nl> " single kind , got % s for replica 0 partition 0 and % s for replica " <nl> " % d partition % d . " , <nl> - devices [ 0 ] [ 0 ] - > platform_name ( ) , <nl> - devices [ replica ] [ partition ] - > platform_name ( ) , replica , partition ) ; <nl> + devices [ 0 ] [ 0 ] - > client ( ) - > platform_name ( ) , <nl> + devices [ replica ] [ partition ] - > client ( ) - > platform_name ( ) , replica , <nl> + partition ) ; <nl> } <nl> xla_assignment ( replica , partition ) = devices [ replica ] [ partition ] - > id ( ) ; <nl> } <nl> class CpuAllocator : public tensorflow : : Allocator { <nl> } <nl> } ; <nl> <nl> - PjRtClient : : PjRtClient ( <nl> + PjRtStreamExecutorClient : : PjRtStreamExecutorClient ( <nl> std : : string platform_name , LocalClient * client , <nl> - std : : vector < std : : unique_ptr < PjRtDevice > > devices , int host_id , <nl> + std : : vector < std : : unique_ptr < PjRtStreamExecutorDevice > > devices , int host_id , <nl> std : : unique_ptr < se : : DeviceMemoryAllocator > allocator , <nl> std : : unique_ptr < tensorflow : : Allocator > host_memory_allocator , <nl> bool should_stage_host_to_device_transfers , <nl> PjRtClient : : PjRtClient ( <nl> platform_name_ ( std : : move ( platform_name ) ) , <nl> client_ ( client ) , <nl> host_memory_allocator_ ( std : : move ( host_memory_allocator ) ) , <nl> - devices_ ( std : : move ( devices ) ) , <nl> + owned_devices_ ( std : : move ( devices ) ) , <nl> host_id_ ( host_id ) , <nl> owned_allocator_ ( std : : move ( allocator ) ) , <nl> should_stage_host_to_device_transfers_ ( <nl> PjRtClient : : PjRtClient ( <nl> host_memory_allocator_ = std : : make_unique < CpuAllocator > ( ) ; <nl> } <nl> <nl> - for ( const std : : unique_ptr < PjRtDevice > & device : devices_ ) { <nl> + for ( const std : : unique_ptr < PjRtStreamExecutorDevice > & device : <nl> + owned_devices_ ) { <nl> + devices_ . push_back ( device . get ( ) ) ; <nl> CHECK ( id_to_device_ . insert ( { device - > id ( ) , device . get ( ) } ) . second ) <nl> < < " Duplicate device id : " < < device - > id ( ) ; <nl> <nl> - if ( device - > IsLocalDevice ( ) ) { <nl> - int idx = device - > local_device_id ( ) ; <nl> + if ( device - > IsAddressable ( ) ) { <nl> + int idx = device - > local_hardware_id ( ) ; <nl> if ( idx > = local_devices_ . size ( ) ) { <nl> local_devices_ . resize ( idx + 1 ) ; <nl> } <nl> PjRtClient : : PjRtClient ( <nl> } <nl> } <nl> <nl> - StatusOr < DeviceAssignment > PjRtClient : : GetDefaultDeviceAssignment ( <nl> + StatusOr < DeviceAssignment > PjRtStreamExecutorClient : : GetDefaultDeviceAssignment ( <nl> int num_replicas , int num_partitions ) const { <nl> return client_ - > backend ( ) . computation_placer ( ) - > AssignDevices ( num_replicas , <nl> num_partitions ) ; <nl> } <nl> <nl> - std : : unique_ptr < HloCostAnalysis > PjRtClient : : GetHloCostAnalysis ( ) { <nl> + std : : unique_ptr < HloCostAnalysis > <nl> + PjRtStreamExecutorClient : : GetHloCostAnalysis ( ) { <nl> return absl : : make_unique < HloCostAnalysis > ( <nl> client_ - > backend ( ) . compiler ( ) - > ShapeSizeBytesFunction ( ) ) ; <nl> } <nl> StatusOr < std : : unique_ptr < PjRtBuffer > > AllocateDestinationBuffer ( <nl> return InvalidArgument ( " Can ' t make a buffer from an empty tuple " ) ; <nl> } <nl> <nl> + auto * se_client = tensorflow : : down_cast < PjRtStreamExecutorClient * > ( client ) ; <nl> TransferManager * transfer_manager = <nl> - client - > client ( ) - > backend ( ) . transfer_manager ( ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - ScopedShapedBuffer dst_buffer , <nl> - transfer_manager - > AllocateScopedShapedBuffer ( <nl> - on_host_shape , client - > allocator ( ) , local_device - > device_ordinal ( ) ) ) ; <nl> + se_client - > client ( ) - > backend ( ) . transfer_manager ( ) ; <nl> + TF_ASSIGN_OR_RETURN ( ScopedShapedBuffer dst_buffer , <nl> + transfer_manager - > AllocateScopedShapedBuffer ( <nl> + on_host_shape , se_client - > allocator ( ) , <nl> + local_device - > device_ordinal ( ) ) ) ; <nl> if ( local_device - > allocation_model ( ) = = <nl> LocalDeviceState : : kComputeSynchronized ) { <nl> if ( copy_stream = = nullptr ) { <nl> void PjRtBuffer : : ScopedHold : : AddToInput ( <nl> <nl> bool PjRtBuffer : : IsOnCpu ( ) const { return client ( ) - > platform_id ( ) = = kCpuId ; } <nl> <nl> - StatusOr < std : : unique_ptr < PjRtBuffer > > PjRtClient : : BufferFromHostBuffer ( <nl> + StatusOr < std : : unique_ptr < PjRtBuffer > > <nl> + PjRtStreamExecutorClient : : BufferFromHostBuffer ( <nl> const void * data , const Shape & shape , <nl> HostBufferSemantics host_buffer_semantics , <nl> std : : shared_ptr < void > buffer_reference , PjRtDevice * device ) { <nl> - tensorflow : : profiler : : TraceMe traceme ( " PjRtClient : : BufferFromHostBuffer " ) ; <nl> - VLOG ( 2 ) < < " PjRtClient : : BufferFromHostBuffer : shape : " < < shape . ToString ( ) <nl> - < < " device : " < < device - > DebugString ( ) ; <nl> + tensorflow : : profiler : : TraceMe traceme ( <nl> + " PjRtStreamExecutorClient : : BufferFromHostBuffer " ) ; <nl> + VLOG ( 2 ) < < " PjRtStreamExecutorClient : : BufferFromHostBuffer : shape : " <nl> + < < shape . ToString ( ) < < " device : " < < device - > DebugString ( ) ; <nl> if ( shape . IsTuple ( ) ) { <nl> return InvalidArgument ( " Use BufferFromHostLiteral to transfer a tuple " ) ; <nl> } <nl> TF_ASSIGN_OR_RETURN ( LocalDeviceState * local_device , <nl> - device - > GetLocalDeviceState ( ) ) ; <nl> + tensorflow : : down_cast < PjRtStreamExecutorDevice * > ( device ) <nl> + - > GetLocalDeviceState ( ) ) ; <nl> int64 size = ShapeUtil : : ByteSizeOf ( shape ) ; <nl> <nl> TransferManager * transfer_manager = client ( ) - > backend ( ) . transfer_manager ( ) ; <nl> StatusOr < std : : unique_ptr < PjRtBuffer > > PjRtClient : : BufferFromHostBuffer ( <nl> return py_buffer ; <nl> } <nl> <nl> - StatusOr < std : : unique_ptr < PjRtBuffer > > PjRtClient : : CreateUninitializedBuffer ( <nl> - const Shape & shape , PjRtDevice * device ) { <nl> + StatusOr < std : : unique_ptr < PjRtBuffer > > <nl> + PjRtStreamExecutorClient : : CreateUninitializedBuffer ( const Shape & shape , <nl> + PjRtDevice * device ) { <nl> return CreateUninitializedBuffer ( shape , device , nullptr ) ; <nl> } <nl> <nl> - StatusOr < std : : unique_ptr < PjRtBuffer > > PjRtClient : : CreateUninitializedBuffer ( <nl> + StatusOr < std : : unique_ptr < PjRtBuffer > > <nl> + PjRtStreamExecutorClient : : CreateUninitializedBuffer ( <nl> const Shape & shape , PjRtDevice * device , <nl> std : : shared_ptr < BufferSequencingEvent > definition_event ) { <nl> tensorflow : : profiler : : TraceMe traceme ( <nl> - " PjRtClient : : CreateUninitializedBuffer " ) ; <nl> - VLOG ( 2 ) < < " PjRtClient : : CreateUninitializedBuffer : shape : " <nl> + " PjRtStreamExecutorClient : : CreateUninitializedBuffer " ) ; <nl> + VLOG ( 2 ) < < " PjRtStreamExecutorClient : : CreateUninitializedBuffer : shape : " <nl> < < shape . ToString ( ) < < " device : " < < device - > DebugString ( ) ; <nl> TF_ASSIGN_OR_RETURN ( LocalDeviceState * local_device , <nl> - device - > GetLocalDeviceState ( ) ) ; <nl> + tensorflow : : down_cast < PjRtStreamExecutorDevice * > ( device ) <nl> + - > GetLocalDeviceState ( ) ) ; <nl> <nl> TransferManager * transfer_manager = client ( ) - > backend ( ) . transfer_manager ( ) ; <nl> TF_ASSIGN_OR_RETURN ( Shape compact_shape , <nl> StatusOr < std : : unique_ptr < PjRtBuffer > > PjRtClient : : CreateUninitializedBuffer ( <nl> definition_event ) ; <nl> } <nl> <nl> - StatusOr < std : : unique_ptr < PjRtBuffer > > PjRtClient : : BufferFromHostLiteral ( <nl> - const LiteralSlice & literal , PjRtDevice * device ) { <nl> - tensorflow : : profiler : : TraceMe traceme ( " PjRtClient : : BufferFromHostLiteral " ) ; <nl> - VLOG ( 2 ) < < " PjRtClient : : BufferFromHostLiteral : shape : " <nl> + StatusOr < std : : unique_ptr < PjRtBuffer > > <nl> + PjRtStreamExecutorClient : : BufferFromHostLiteral ( const LiteralSlice & literal , <nl> + PjRtDevice * device ) { <nl> + tensorflow : : profiler : : TraceMe traceme ( <nl> + " PjRtStreamExecutorClient : : BufferFromHostLiteral " ) ; <nl> + VLOG ( 2 ) < < " PjRtStreamExecutorClient : : BufferFromHostLiteral : shape : " <nl> < < literal . shape ( ) . ToString ( ) < < " device : " < < device - > DebugString ( ) ; <nl> TF_ASSIGN_OR_RETURN ( LocalDeviceState * local_device , <nl> - device - > GetLocalDeviceState ( ) ) ; <nl> + tensorflow : : down_cast < PjRtStreamExecutorDevice * > ( device ) <nl> + - > GetLocalDeviceState ( ) ) ; <nl> <nl> TransferManager * transfer_manager = client ( ) - > backend ( ) . transfer_manager ( ) ; <nl> TF_ASSIGN_OR_RETURN ( <nl> StatusOr < std : : unique_ptr < PjRtBuffer > > PjRtClient : : BufferFromHostLiteral ( <nl> return py_buffer ; <nl> } <nl> <nl> - void PjRtClient : : MakeCrossHostReceiveBuffers ( <nl> + void PjRtStreamExecutorClient : : MakeCrossHostReceiveBuffers ( <nl> absl : : Span < const Shape > shapes , PjRtDevice * device , <nl> PjRtCrossHostRecvNotifier & & notifier ) { <nl> if ( shapes . empty ( ) ) { <nl> void PjRtClient : : MakeCrossHostReceiveBuffers ( <nl> return ; <nl> } <nl> <nl> - auto local_device_or = device - > GetLocalDeviceState ( ) ; <nl> + auto local_device_or = <nl> + tensorflow : : down_cast < PjRtStreamExecutorDevice * > ( device ) <nl> + - > GetLocalDeviceState ( ) ; <nl> if ( ! local_device_or . ok ( ) ) { <nl> notifier ( local_device_or . status ( ) ) ; <nl> return ; <nl> void PjRtClient : : MakeCrossHostReceiveBuffers ( <nl> } <nl> <nl> / / Transfer the given literal to the infeed queue of the given local device . <nl> - Status PjRtDevice : : TransferToInfeed ( const LiteralSlice & literal ) const { <nl> + Status PjRtStreamExecutorDevice : : TransferToInfeed ( <nl> + const LiteralSlice & literal ) const { <nl> / / Only support infeed to local device . <nl> TF_ASSIGN_OR_RETURN ( LocalDeviceState * local_device , GetLocalDeviceState ( ) ) ; <nl> return local_device - > client ( ) - > TransferToInfeedLocal ( <nl> literal , local_device - > device_ordinal ( ) ) ; <nl> } <nl> <nl> - StatusOr < Literal > PjRtDevice : : TransferFromOutfeed ( const Shape & shape ) const { <nl> + StatusOr < Literal > PjRtStreamExecutorDevice : : TransferFromOutfeed ( <nl> + const Shape & shape ) const { <nl> TF_ASSIGN_OR_RETURN ( LocalDeviceState * local_device , GetLocalDeviceState ( ) ) ; <nl> return local_device - > client ( ) - > TransferFromOutfeedLocal ( <nl> shape , local_device - > device_ordinal ( ) ) ; <nl> } <nl> <nl> - StatusOr < PjRtDevice * > PjRtClient : : LookupLocalDevice ( int local_device_id ) const { <nl> + StatusOr < PjRtDevice * > PjRtStreamExecutorClient : : LookupAddressableDevice ( <nl> + int local_hardware_id ) const { <nl> for ( auto * device : local_devices_ ) { <nl> - if ( local_device_id = = device - > local_device_id ( ) ) { <nl> + if ( local_hardware_id = = device - > local_hardware_id ( ) ) { <nl> return device ; <nl> } <nl> } <nl> - return InvalidArgument ( " No matching device found for local_device_id % d " , <nl> - local_device_id ) ; <nl> + return InvalidArgument ( " No matching device found for local_hardware_id % d " , <nl> + local_hardware_id ) ; <nl> } <nl> <nl> PjRtBuffer : : PjRtBuffer ( Shape on_host_shape , Shape on_device_shape , <nl> PjRtBuffer : : ~ PjRtBuffer ( ) { <nl> } <nl> <nl> int64 PjRtBuffer : : OnDeviceSizeInBytes ( ) const { <nl> - return client_ - > client ( ) <nl> + return tensorflow : : down_cast < PjRtStreamExecutorClient * > ( client_ ) <nl> + - > client ( ) <nl> - > backend ( ) <nl> . transfer_manager ( ) <nl> - > GetByteSizeRequirement ( on_device_shape_ ) ; <nl> StatusOr < std : : shared_ptr < TrackedDeviceBuffer > > PjRtBuffer : : Release ( <nl> / / the final set of usage events . <nl> events = device_buffer - > LockUseAndTransferUsageEvents ( ) ; <nl> } <nl> - LocalDeviceState * local_device_state = device_ - > local_device_state ( ) ; <nl> + LocalDeviceState * local_device_state = <nl> + tensorflow : : down_cast < PjRtStreamExecutorDevice * > ( device_ ) <nl> + - > local_device_state ( ) ; <nl> if ( wait_for_operations_to_complete ) { <nl> / / Block the host until all usage events have completed . Usage events <nl> / / dominate definition events , so this also waits for the buffer to be <nl> PjRtBuffer : : CopyToHostAsyncInternal ( bool discard_cached_copy , <nl> } <nl> ScopedHold device_buffer ( this , ScopedHold : : kUsage ) ; <nl> std : : shared_ptr < HostValue > host_value ; <nl> - LocalDeviceState * local_device = device_ - > local_device_state ( ) ; <nl> + LocalDeviceState * local_device = <nl> + tensorflow : : down_cast < PjRtStreamExecutorDevice * > ( device_ ) <nl> + - > local_device_state ( ) ; <nl> se : : Stream * stream = local_device - > GetDeviceToHostStream ( ) ; <nl> const xla : : Layout & host_layout = <nl> layout . has_value ( ) ? layout . value ( ) : on_host_shape_ . layout ( ) ; <nl> PjRtBuffer : : CopyToHostAsyncInternal ( bool discard_cached_copy , <nl> host_value - > value = std : : make_shared < Literal > ( host_shape ) ; <nl> ShapedBuffer shaped_buffer = <nl> device_buffer - > AsShapedBuffer ( host_shape , on_device_shape_ ) ; <nl> - client_ - > client ( ) - > backend ( ) . transfer_manager ( ) - > TransferLiteralFromDevice ( <nl> - stream , shaped_buffer , host_value - > value . get ( ) , <nl> - [ host_value ] ( Status done_status ) { <nl> - host_value - > status = done_status ; <nl> - host_value - > ready . Notify ( ) ; <nl> - } ) ; <nl> + tensorflow : : down_cast < PjRtStreamExecutorClient * > ( client_ ) <nl> + - > client ( ) <nl> + - > backend ( ) <nl> + . transfer_manager ( ) <nl> + - > TransferLiteralFromDevice ( stream , shaped_buffer , <nl> + host_value - > value . get ( ) , <nl> + [ host_value ] ( Status done_status ) { <nl> + host_value - > status = done_status ; <nl> + host_value - > ready . Notify ( ) ; <nl> + } ) ; <nl> <nl> auto usage_event = std : : make_shared < BufferSequencingEvent > ( ) ; <nl> StatusOr < EventPool : : Handle > event_or = <nl> PjRtBuffer : : CopyToHostAsyncInternal ( bool discard_cached_copy , <nl> <nl> StatusOr < std : : shared_ptr < Literal > > PjRtBuffer : : ToLiteral ( <nl> const bool discard_cached_copy , absl : : optional < xla : : Layout > layout ) { <nl> - tensorflow : : profiler : : TraceMe traceme ( " PjRtClient : : ToLiteral " ) ; <nl> + tensorflow : : profiler : : TraceMe traceme ( " PjRtStreamExecutorClient : : ToLiteral " ) ; <nl> TF_ASSIGN_OR_RETURN ( std : : shared_ptr < HostValue > host_value , <nl> CopyToHostAsyncInternal ( discard_cached_copy , layout ) ) ; <nl> if ( host_value = = nullptr ) { <nl> PjRtBuffer : : CopyToDeviceHelper ( <nl> / / StallStreamOnError only makes sure the destination device is ok , so <nl> / / make sure that the src buffer remains valid until after any transfers <nl> / / have completed . <nl> - device_ - > local_device_state ( ) - > ThenRelease ( transfer_stream , <nl> - src_device_buffer ) ; <nl> + tensorflow : : down_cast < PjRtStreamExecutorDevice * > ( device_ ) <nl> + - > local_device_state ( ) <nl> + - > ThenRelease ( transfer_stream , src_device_buffer ) ; <nl> } <nl> return copy_event_or . status ( ) ; <nl> } <nl> StatusOr < std : : unique_ptr < PjRtBuffer > > PjRtBuffer : : CopyToDevice ( <nl> TF_ASSIGN_OR_RETURN ( std : : shared_ptr < Literal > literal , ToLiteral ( ) ) ; <nl> return dst_device - > client ( ) - > BufferFromHostBuffer ( <nl> literal - > untyped_data ( ) , literal - > shape ( ) , <nl> - PjRtClient : : HostBufferSemantics : : kZeroCopy , nullptr , dst_device ) ; <nl> + PjRtStreamExecutorClient : : HostBufferSemantics : : kZeroCopy , nullptr , <nl> + dst_device ) ; <nl> } <nl> <nl> - TF_ASSIGN_OR_RETURN ( LocalDeviceState * dst_local_device , <nl> - dst_device - > GetLocalDeviceState ( ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + LocalDeviceState * dst_local_device , <nl> + tensorflow : : down_cast < PjRtStreamExecutorDevice * > ( dst_device ) <nl> + - > GetLocalDeviceState ( ) ) ; <nl> LocalDeviceState * transfer_local_device = <nl> - client_ - > EnqueueD2DTransfersOnSrcStream ( ) ? device_ - > local_device_state ( ) <nl> - : dst_local_device ; <nl> + tensorflow : : down_cast < PjRtStreamExecutorClient * > ( client_ ) <nl> + - > EnqueueD2DTransfersOnSrcStream ( ) <nl> + ? tensorflow : : down_cast < PjRtStreamExecutorDevice * > ( device_ ) <nl> + - > local_device_state ( ) <nl> + : dst_local_device ; <nl> CHECK_EQ ( dst_local_device - > allocation_model ( ) , <nl> transfer_local_device - > allocation_model ( ) ) ; <nl> <nl> StatusOr < std : : unique_ptr < PjRtBuffer > > PjRtBuffer : : CopyToDevice ( <nl> / / alternative is to ensure , before freeing the buffer , that the compute <nl> / / stream is synchronized past the transfer , but it seems better to hold onto <nl> / / the buffer too long than to stall the compute stream . <nl> - RecordUsage ( std : : move ( src_device_buffer ) , device_ - > local_device_state ( ) , <nl> + RecordUsage ( std : : move ( src_device_buffer ) , <nl> + tensorflow : : down_cast < PjRtStreamExecutorDevice * > ( device_ ) <nl> + - > local_device_state ( ) , <nl> transfer_local_device , event , transfer_stream , <nl> / * prefer_to_retain_reference = * / true ) ; <nl> <nl> StatusOr < std : : unique_ptr < PjRtBuffer > > PjRtBuffer : : CopyToDevice ( <nl> } <nl> <nl> Status PjRtBuffer : : CopyToRemoteDevice ( absl : : string_view serialized_descriptor ) { <nl> - return client_ - > CopyToRemoteDevice ( this , serialized_descriptor ) ; <nl> + return tensorflow : : down_cast < PjRtStreamExecutorClient * > ( client_ ) <nl> + - > CopyToRemoteDevice ( this , serialized_descriptor ) ; <nl> } <nl> <nl> Status PjRtBuffer : : BlockHostUntilReady ( ) { <nl> Status PjRtBuffer : : BlockHostUntilReady ( ) { <nl> } <nl> device_buffer = device_buffer_ ; <nl> } <nl> - LocalDeviceState * local_device_state = device_ - > local_device_state ( ) ; <nl> + LocalDeviceState * local_device_state = <nl> + tensorflow : : down_cast < PjRtStreamExecutorDevice * > ( device_ ) <nl> + - > local_device_state ( ) ; <nl> std : : unique_ptr < se : : Stream > stream ; <nl> for ( auto & event : device_buffer - > definition_events ( ) ) { <nl> if ( ! event - > IsComplete ( ) ) { <nl> StatusOr < TupleHandle > MakeTupleHelper ( <nl> Shape on_host_shape = ShapeUtil : : MakeTupleShape ( host_shapes ) ; <nl> Shape on_device_shape = ShapeUtil : : MakeTupleShape ( device_shapes ) ; <nl> <nl> - se : : DeviceMemoryAllocator * allocator = client - > allocator ( ) ; <nl> + se : : DeviceMemoryAllocator * allocator = <nl> + tensorflow : : down_cast < PjRtStreamExecutorClient * > ( client ) - > allocator ( ) ; <nl> TransferManager * transfer_manager = <nl> - client - > client ( ) - > backend ( ) . transfer_manager ( ) ; <nl> + tensorflow : : down_cast < PjRtStreamExecutorClient * > ( client ) <nl> + - > client ( ) <nl> + - > backend ( ) <nl> + . transfer_manager ( ) ; <nl> se : : Stream * stream = local_device - > host_to_device_stream ( ) ; <nl> TF_ASSIGN_OR_RETURN ( <nl> se : : OwningDeviceMemory root_table_memory , <nl> std : : unique_ptr < PjRtBuffer > OutputBufferHelper ( <nl> / * prefer_to_retain_reference = * / false ) ; <nl> return pjrt_buffer ; <nl> } <nl> - <nl> - static PjRtDevice * LookupDevice ( const PjRtClient & client , int device_id ) { <nl> - auto it = client . id_to_device ( ) . find ( device_id ) ; <nl> - CHECK ( it ! = client . id_to_device ( ) . end ( ) ) <nl> - < < " Unknown device id : " < < device_id ; <nl> - return it - > second ; <nl> - } <nl> - <nl> } / / namespace <nl> <nl> PjRtStreamExecutorExecutable : : PjRtStreamExecutorExecutable ( <nl> PjRtStreamExecutorExecutable : : PjRtStreamExecutorExecutable ( <nl> bool parameter_is_tupled_arguments , <nl> std : : shared_ptr < DeviceAssignment > device_assignment , <nl> std : : vector < LogicalDeviceIds > addressable_device_logical_ids , <nl> - std : : vector < PjRtDevice * > addressable_devices , PjRtClient * client ) <nl> + std : : vector < PjRtDevice * > addressable_devices , <nl> + PjRtStreamExecutorClient * client ) <nl> : client_ ( client ) , <nl> device_assignment_ ( std : : move ( device_assignment ) ) , <nl> parameter_is_tupled_arguments_ ( parameter_is_tupled_arguments ) , <nl> PjRtStreamExecutorExecutable : : PjRtStreamExecutorExecutable ( <nl> VLOG ( 1 ) < < " PjRtStreamExecutorExecutable device_assignment : \ n " <nl> < < device_assignment_ - > ToString ( ) ; <nl> CHECK_GE ( addressable_devices_ . size ( ) , 1 ) < < device_assignment_ - > ToString ( ) ; <nl> - CHECK_LE ( addressable_devices_ . size ( ) , client_ - > local_device_count ( ) ) <nl> + CHECK_LE ( addressable_devices_ . size ( ) , client_ - > addressable_device_count ( ) ) <nl> < < " Inconsistent local device count . " ; <nl> num_partitions = device_assignment_ - > computation_count ( ) ; <nl> } <nl> PjRtStreamExecutorExecutable : : MakeExecutionInputsAndWaitForEvents ( <nl> absl : : Span < const PjRtBuffer : : ScopedHold > device_buffers , <nl> absl : : flat_hash_set < BufferSequencingEvent * > & events ) const { <nl> std : : vector < ExecutionInput > execution_inputs ; <nl> - LocalDeviceState * device_state = & client_ - > device_state ( device_ordinal ) ; <nl> + LocalDeviceState * device_state = & ( client_ - > device_state ( device_ordinal ) ) ; <nl> / / Lift tuple_handle outside the conditional so that the event it returns is <nl> / / not destroyed until after the loop below that waits on events . <nl> absl : : optional < TupleHandle > tuple_handle ; <nl> PjRtStreamExecutorExecutable : : MakeExecutionInputsAndWaitForEvents ( <nl> execution_input . MutableBuffers ( ) - > begin ( ) ; <nl> ShapeTree < MaybeOwningDeviceMemory > : : iterator iterator_end = <nl> execution_input . MutableBuffers ( ) - > end ( ) ; <nl> - device_buffers [ i ] . AddToInput ( & input_iterator , iterator_end , <nl> - & execution_input , client_ - > allocator ( ) ) ; <nl> + device_buffers [ i ] . AddToInput ( <nl> + & input_iterator , iterator_end , & execution_input , <nl> + tensorflow : : down_cast < PjRtStreamExecutorClient * > ( client_ ) <nl> + - > allocator ( ) ) ; <nl> CHECK ( input_iterator = = iterator_end ) ; <nl> } <nl> } <nl> StatusOr < ScopedShapedBuffer > PjRtStreamExecutorExecutable : : EnqueueExecution ( <nl> int executable_idx , const RunId & run_id , const ExecuteOptions & options , <nl> PjRtDevice * device , std : : vector < PjRtBuffer : : ScopedHold > * device_buffers , <nl> std : : shared_ptr < DeviceAssignment > device_assignment ) const { <nl> - int device_ordinal = device - > local_device_state ( ) - > device_ordinal ( ) ; <nl> - LocalDeviceState * device_state = & client_ - > device_state ( device_ordinal ) ; <nl> + int device_ordinal = tensorflow : : down_cast < PjRtStreamExecutorDevice * > ( device ) <nl> + - > local_device_state ( ) <nl> + - > device_ordinal ( ) ; <nl> + LocalDeviceState * device_state = & ( client_ - > device_state ( device_ordinal ) ) ; <nl> tensorflow : : profiler : : TraceMeConsumer activity ( <nl> " LocalExecutable : : Execute " , tensorflow : : profiler : : ContextType : : kPjRt , <nl> run_id . ToInt ( ) ) ; <nl> PjRtStreamExecutorExecutable : : MakeOutputBuffers ( <nl> std : : shared_ptr < BufferSequencingEvent > definition_event , <nl> PjRtDevice * device ) const { <nl> std : : vector < std : : unique_ptr < PjRtBuffer > > outputs ; <nl> - LocalDeviceState * device_state = & client_ - > device_state ( device_ordinal ) ; <nl> + LocalDeviceState * device_state = & ( client_ - > device_state ( device_ordinal ) ) ; <nl> if ( options . untuple_result & & result_buffer . on_host_shape ( ) . IsTuple ( ) ) { <nl> int tuple_count = result_buffer . on_host_shape ( ) . tuple_shapes_size ( ) ; <nl> outputs . reserve ( tuple_count ) ; <nl> PjRtStreamExecutorExecutable : : ExecuteHelper ( <nl> if ( device = = nullptr ) { <nl> CHECK ( device_assignment_ ! = nullptr ) ; <nl> const int device_id = ( * device_assignment_ ) ( replica , partition ) ; <nl> - device = LookupDevice ( * client_ , device_id ) ; <nl> + TF_ASSIGN_OR_RETURN ( device , client_ - > LookupDevice ( device_id ) ) ; <nl> device_assignment = device_assignment_ ; <nl> } else { <nl> CHECK ( device_assignment_ = = nullptr ) ; <nl> PjRtStreamExecutorExecutable : : ExecuteHelper ( <nl> } <nl> <nl> CHECK_EQ ( device - > host_id ( ) , client_ - > host_id ( ) ) ; <nl> - int device_ordinal = device - > local_device_state ( ) - > device_ordinal ( ) ; <nl> + int device_ordinal = tensorflow : : down_cast < PjRtStreamExecutorDevice * > ( device ) <nl> + - > local_device_state ( ) <nl> + - > device_ordinal ( ) ; <nl> tensorflow : : profiler : : TraceMe traceme ( " LocalExecutable : : Execute " ) ; <nl> VLOG ( 3 ) < < " Replica " < < replica < < " , partition " < < partition <nl> < < " mapped to device ordinal for execution : " < < device_ordinal ; <nl> PjRtStreamExecutorExecutable : : ExecuteHelper ( <nl> ScopedShapedBuffer result_buffer = <nl> result_buffer_or_status . ConsumeValueOrDie ( ) ; <nl> <nl> - LocalDeviceState * device_state = & client_ - > device_state ( device_ordinal ) ; <nl> + LocalDeviceState * device_state = & ( client_ - > device_state ( device_ordinal ) ) ; <nl> se : : Stream * stream = device_state - > compute_stream ( ) ; <nl> StatusOr < EventPool : : Handle > event_or = <nl> device_state - > event_pool ( ) . ThenAllocateAndRecordEvent ( stream ) ; <nl> PjRtStreamExecutorExecutable : : Execute ( <nl> const int replica = addressable_device_logical_ids_ [ i ] . replica ; <nl> const int partition = addressable_device_logical_ids_ [ i ] . partition ; <nl> PjRtDevice * device = addressable_devices_ [ i ] ; <nl> - const LocalDeviceState & device_state = * device - > local_device_state ( ) ; <nl> + const LocalDeviceState & device_state = <nl> + * tensorflow : : down_cast < PjRtStreamExecutorDevice * > ( device ) <nl> + - > local_device_state ( ) ; <nl> device_state . execute_thread ( ) - > Schedule ( [ & , replica , partition , i ] { <nl> results [ i ] = ExecuteHelper ( argument_handles [ i ] , replica , partition , <nl> run_id , options ) ; <nl> StatusOr < std : : pair < std : : vector < Shape > , Shape > > GetShardedProgramShapes ( <nl> <nl> } / / namespace <nl> <nl> - StatusOr < std : : unique_ptr < PjRtExecutable > > PjRtClient : : Compile ( <nl> + StatusOr < std : : unique_ptr < PjRtExecutable > > PjRtStreamExecutorClient : : Compile ( <nl> const XlaComputation & computation , CompileOptions options ) { <nl> - tensorflow : : profiler : : TraceMe traceme ( " PjRtClient : : Compile " ) ; <nl> + tensorflow : : profiler : : TraceMe traceme ( " PjRtStreamExecutorClient : : Compile " ) ; <nl> <nl> ExecutableBuildOptions & build_options = options . executable_build_options ; <nl> if ( ! build_options . device_allocator ( ) ) { <nl> StatusOr < std : : unique_ptr < PjRtExecutable > > PjRtClient : : Compile ( <nl> num_partitions = 1 ; <nl> } else { <nl> if ( ! build_options . has_device_assignment ( ) ) { <nl> - VLOG ( 2 ) < < " PjRtClient : : Compile using default device_assignment . " ; <nl> + VLOG ( 2 ) < < " PjRtStreamExecutorClient : : Compile using default " <nl> + " device_assignment . " ; <nl> TF_ASSIGN_OR_RETURN ( <nl> DeviceAssignment device_assignment , <nl> GetDefaultDeviceAssignment ( build_options . num_replicas ( ) , <nl> build_options . num_partitions ( ) ) ) ; <nl> build_options . set_device_assignment ( device_assignment ) ; <nl> } <nl> - VLOG ( 2 ) < < " PjRtClient : : Compile device_assignment : \ n " <nl> + VLOG ( 2 ) < < " PjRtStreamExecutorClient : : Compile device_assignment : \ n " <nl> < < build_options . device_assignment ( ) . ToString ( ) ; <nl> num_replicas = build_options . device_assignment ( ) . replica_count ( ) ; <nl> num_partitions = build_options . device_assignment ( ) . computation_count ( ) ; <nl> StatusOr < std : : unique_ptr < PjRtExecutable > > PjRtClient : : Compile ( <nl> for ( int replica = 0 ; replica < num_replicas ; + + replica ) { <nl> for ( int partition = 0 ; partition < num_partitions ; + + partition ) { <nl> int device_id = ( * device_assignment ) ( replica , partition ) ; <nl> - PjRtDevice * device = LookupDevice ( * this , device_id ) ; <nl> + TF_ASSIGN_OR_RETURN ( PjRtDevice * device , LookupDevice ( device_id ) ) ; <nl> if ( device - > host_id ( ) ! = host_id ( ) ) { <nl> VLOG ( 3 ) < < " Non - local device : " < < device_id ; <nl> continue ; <nl> StatusOr < std : : unique_ptr < PjRtExecutable > > PjRtClient : : Compile ( <nl> <nl> if ( build_options . device_ordinal ( ) < 0 ) { <nl> build_options . set_device_ordinal ( <nl> - addressable_devices . front ( ) - > local_device_state ( ) - > device_ordinal ( ) ) ; <nl> + addressable_devices . front ( ) - > local_hardware_id ( ) ) ; <nl> } <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / pjrt / pjrt_client . h <nl> ppp b / tensorflow / compiler / xla / pjrt / pjrt_client . h <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / xla_data . pb . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> + # include " tensorflow / core / platform / casts . h " <nl> # include " tensorflow / core / platform / fingerprint . h " <nl> # include " tensorflow / core / platform / thread_annotations . h " <nl> # include " tensorflow / core / platform / types . h " <nl> class PjRtClient ; <nl> <nl> class PjRtDevice { <nl> public : <nl> - explicit PjRtDevice ( int id , <nl> - std : : unique_ptr < LocalDeviceState > local_device_state , <nl> - std : : string device_kind , int host_id = 0 ) <nl> + virtual ~ PjRtDevice ( ) { } <nl> + <nl> + / / Return the client that owns this device . <nl> + virtual PjRtClient * client ( ) const = 0 ; <nl> + <nl> + / / Whether client can issue command to this device . <nl> + virtual bool IsAddressable ( ) const = 0 ; <nl> + <nl> + / / The ID of this device . IDs are unique among devices of this type <nl> + / / ( e . g . CPUs , GPUs ) . On multi - host platforms , this will be unique across all <nl> + / / hosts ' devices . This is the ID that should be used in a DeviceAssignment . <nl> + virtual int id ( ) const = 0 ; <nl> + <nl> + / / The task ID of this device according to TpuTopology . This is not the same <nl> + / / as PjRtClient : : host_id ( ) in a multi - task setting , where each client can see <nl> + / / devices from all tasks , but only a subset of them are addressable and have <nl> + / / the same task_id as the client . <nl> + virtual int host_id ( ) const = 0 ; <nl> + <nl> + / / Opaque hardware ID , e . g . , the CUDA device number , useful for identifying <nl> + / / which GPU when interacting with non - JAX code . In general , not guaranteed to <nl> + / / be dense , and - 1 if undefined . <nl> + virtual int local_hardware_id ( ) const = 0 ; <nl> + <nl> + / / A vendor - dependent string that uniquely identifies the kind of device , <nl> + / / e . g . , " Tesla V100 - SXM2 - 16GB " . May be used to determine whether two GPUs are <nl> + / / compatible compilation . <nl> + virtual const std : : string & device_kind ( ) const = 0 ; <nl> + <nl> + virtual std : : string DebugString ( ) const = 0 ; <nl> + <nl> + / / Transfer the given literal to the infeed queue . <nl> + virtual Status TransferToInfeed ( const LiteralSlice & literal ) const = 0 ; <nl> + <nl> + / / Transfer and return a value of the given shape from the outfeed queue . <nl> + virtual StatusOr < Literal > TransferFromOutfeed ( const Shape & shape ) const = 0 ; <nl> + } ; <nl> + <nl> + class PjRtStreamExecutorDevice : public PjRtDevice { <nl> + public : <nl> + explicit PjRtStreamExecutorDevice ( <nl> + int id , std : : unique_ptr < LocalDeviceState > local_device_state , <nl> + std : : string device_kind , int host_id = 0 ) <nl> : id_ ( id ) , <nl> - local_device_id_ ( <nl> + device_ordinal_ ( <nl> local_device_state ? local_device_state - > device_ordinal ( ) : - 1 ) , <nl> local_device_state_ ( std : : move ( local_device_state ) ) , <nl> host_id_ ( host_id ) , <nl> device_kind_ ( std : : move ( device_kind ) ) { } <nl> - virtual ~ PjRtDevice ( ) { } <nl> + ~ PjRtStreamExecutorDevice ( ) override { } <nl> <nl> / / Must set client exactly once . <nl> void SetClient ( PjRtClient * client ) { <nl> class PjRtDevice { <nl> client_ = client ; <nl> } <nl> <nl> + / / Task ID . This is always 0 on single - task setup . <nl> + int host_id ( ) const override { return host_id_ ; } <nl> + <nl> + / / Return ` platform_id ` from client . <nl> + PjRtPlatformId platform_id ( ) const ; <nl> + <nl> + / / Return ` platform_name ` from client . <nl> + const std : : string & platform_name ( ) const ; <nl> + <nl> + PjRtClient * client ( ) const override { return client_ ; } <nl> + <nl> / / The ID of this device . IDs are unique among devices of this type <nl> / / ( e . g . CPUs , GPUs ) . On multi - host platforms , this will be unique across all <nl> / / hosts ' devices . This is the ID that should be used in a DeviceAssignment . <nl> - int id ( ) const { return id_ ; } <nl> + int id ( ) const override { return id_ ; } <nl> <nl> - bool IsLocalDevice ( ) const { return local_device_id_ ! = - 1 ; } <nl> + bool IsAddressable ( ) const override { return device_ordinal_ ! = - 1 ; } <nl> <nl> - int local_device_id ( ) const { return local_device_id_ ; } <nl> + int local_hardware_id ( ) const override { return device_ordinal_ ; } <nl> <nl> / / If this is a device local to this host , returns a LocalDeviceState object <nl> / / that can be used to manipulate the device . Returns nullptr if the device is <nl> class PjRtDevice { <nl> / / is not local to this host . <nl> StatusOr < LocalDeviceState * > GetLocalDeviceState ( ) const ; <nl> <nl> - / / The ID of this device ' s host . This is always 0 on single - host platforms . <nl> - int host_id ( ) const { return host_id_ ; } <nl> - <nl> - / / Return ` platform_id ` from client . <nl> - PjRtPlatformId platform_id ( ) const ; <nl> - <nl> - / / Return ` platform_name ` from client . <nl> - const std : : string & platform_name ( ) const ; <nl> - <nl> / / A vendor - dependent string that uniquely identifies the kind of device . <nl> - const std : : string & device_kind ( ) const { return device_kind_ ; } <nl> + const std : : string & device_kind ( ) const override { return device_kind_ ; } <nl> <nl> - virtual std : : string DebugString ( ) const ; <nl> - <nl> - PjRtClient * client ( ) const { return client_ ; } <nl> + std : : string DebugString ( ) const override ; <nl> <nl> / / Transfer the given literal to the infeed queue of the given localdevice . <nl> - virtual Status TransferToInfeed ( const LiteralSlice & literal ) const ; <nl> + Status TransferToInfeed ( const LiteralSlice & literal ) const override ; <nl> <nl> / / Transfer and return a value of the given shape from the outfeed of the <nl> / / given device . <nl> - virtual StatusOr < Literal > TransferFromOutfeed ( const Shape & shape ) const ; <nl> + StatusOr < Literal > TransferFromOutfeed ( const Shape & shape ) const override ; <nl> <nl> private : <nl> const int id_ ; <nl> - const int local_device_id_ ; / / - 1 means not local . <nl> + const int device_ordinal_ ; / / - 1 means not local . <nl> const std : : unique_ptr < LocalDeviceState > local_device_state_ ; <nl> const int host_id_ ; <nl> const std : : string device_kind_ ; <nl> class PjRtExecutable ; <nl> / / alive as long as any of the other runtime objects are alive . <nl> class PjRtClient { <nl> public : <nl> - / / ` allocator ` may null , in which case the platform default allocator is used . <nl> - explicit PjRtClient ( <nl> - std : : string platform_name , LocalClient * client , <nl> - std : : vector < std : : unique_ptr < PjRtDevice > > devices , int host_id , <nl> - std : : unique_ptr < se : : DeviceMemoryAllocator > allocator , <nl> - std : : unique_ptr < tensorflow : : Allocator > host_memory_allocator , <nl> - bool should_stage_host_to_device_transfers , <nl> - std : : unique_ptr < gpu : : GpuExecutableRunOptions > gpu_run_options ) ; <nl> virtual ~ PjRtClient ( ) = default ; <nl> <nl> - virtual StatusOr < DeviceAssignment > GetDefaultDeviceAssignment ( <nl> - int num_replicas , int num_partitions ) const ; <nl> + / / TODO ( zhangqiaorjc ) : Rename to task_id . <nl> + / / Return the task id of this client . In single - task setting , always 0 . <nl> + virtual int host_id ( ) const = 0 ; <nl> <nl> - int device_count ( ) const { return devices_ . size ( ) ; } <nl> - int local_device_count ( ) const { return local_devices_ . size ( ) ; } <nl> - const std : : vector < std : : unique_ptr < PjRtDevice > > & devices ( ) const { <nl> - return devices_ ; <nl> - } <nl> - const std : : vector < PjRtDevice * > & local_devices ( ) const { <nl> - return local_devices_ ; <nl> - } <nl> - const std : : map < int , PjRtDevice * > & id_to_device ( ) const { <nl> - return id_to_device_ ; <nl> - } <nl> - int host_id ( ) const { return host_id_ ; } <nl> - PjRtPlatformId platform_id ( ) const { return platform_id_ ; } <nl> - const std : : string & platform_name ( ) const { return platform_name_ ; } <nl> + / / Return the number of devices in the entire computation . In multi - headed <nl> + / / client setting , some are addressable by this client , some are not . In a <nl> + / / single - client setting , this is equal to the number of addressable devices . <nl> + virtual int device_count ( ) const = 0 ; <nl> <nl> - LocalDeviceState & device_state ( int device_ordinal ) const { <nl> - return * local_devices_ . at ( device_ordinal ) - > local_device_state ( ) ; <nl> - } <nl> + / / Return number of addressable devices . Addressable devices are those that <nl> + / / the client can issue commands to . <nl> + virtual int addressable_device_count ( ) const = 0 ; <nl> <nl> - / / Return a local PjRtDevice for a given ` local_device_id ` . <nl> - virtual StatusOr < PjRtDevice * > LookupLocalDevice ( int local_device_id ) const ; <nl> + / / Return all devices in the entire computation , including addressable and <nl> + / / non - addressable devices . <nl> + virtual absl : : Span < PjRtDevice * const > devices ( ) const = 0 ; <nl> <nl> - LocalClient * client ( ) const { return client_ ; } <nl> - se : : DeviceMemoryAllocator * allocator ( ) const { return allocator_ ; } <nl> - tensorflow : : Allocator * host_memory_allocator ( ) const { <nl> - return host_memory_allocator_ . get ( ) ; <nl> - } <nl> - bool should_stage_host_to_device_transfers ( ) const { <nl> - return should_stage_host_to_device_transfers_ ; <nl> - } <nl> + / / TODO ( zhangqiaorjc ) : Rename to addressable_devices . <nl> + / / Return only addressable devices . <nl> + virtual absl : : Span < PjRtDevice * const > local_devices ( ) const = 0 ; <nl> <nl> - gpu : : GpuExecutableRunOptions * gpu_run_options ( ) const { <nl> - return gpu_run_options_ . get ( ) ; <nl> - } <nl> + / / Lookup any PjRtDevice for a given PjRtDevice : : id ( ) . <nl> + virtual StatusOr < PjRtDevice * > LookupDevice ( int device_id ) const = 0 ; <nl> <nl> - tensorflow : : thread : : ThreadPool * h2d_transfer_pool ( ) { <nl> - return & h2d_transfer_pool_ ; <nl> - } <nl> + / / Return an addressable PjRtDevice for a given <nl> + / / PjRtDevice : : local_hardware_id ( ) . <nl> + virtual StatusOr < PjRtDevice * > LookupAddressableDevice ( <nl> + int local_hardware_id ) const = 0 ; <nl> <nl> - / / Most platforms expect device - to - device transfers to be enqueued on the <nl> - / / source d2d stream , but some platforms use the destination d2d stream . This <nl> - / / function specifies which one the platform expects . <nl> - virtual bool EnqueueD2DTransfersOnSrcStream ( ) const { return true ; } <nl> + / / Return an ID that identifies the platform ( CPU / GPU / TPU ) . <nl> + virtual PjRtPlatformId platform_id ( ) const = 0 ; <nl> <nl> - / / Generates a unique fingerprint for ` executable ` . <nl> - virtual StatusOr < absl : : optional < std : : string > > ExecutableFingerprint ( <nl> - const PjRtExecutable & executable ) const { <nl> - return absl : : optional < std : : string > ( ) ; <nl> - } <nl> + / / Returns a string that identifies the platform ( CPU / GPU / TPU ) . <nl> + virtual const std : : string & platform_name ( ) const = 0 ; <nl> + <nl> + / / Return a device - specific default device assignment , e . g . , GPU and TPU may <nl> + / / be different . <nl> + virtual StatusOr < DeviceAssignment > GetDefaultDeviceAssignment ( <nl> + int num_replicas , int num_partitions ) const = 0 ; <nl> <nl> / / Returns a backend - specific HLO cost analysis visitor . <nl> - virtual std : : unique_ptr < HloCostAnalysis > GetHloCostAnalysis ( ) ; <nl> + virtual std : : unique_ptr < HloCostAnalysis > GetHloCostAnalysis ( ) = 0 ; <nl> <nl> + / / Compile ` computation ` with given ` options ` . <nl> virtual StatusOr < std : : unique_ptr < PjRtExecutable > > Compile ( <nl> - const XlaComputation & computation , CompileOptions options ) ; <nl> + const XlaComputation & computation , CompileOptions options ) = 0 ; <nl> + <nl> + / / Generates a unique fingerprint for ` executable ` , may be absl : : nullopt . <nl> + virtual StatusOr < absl : : optional < std : : string > > ExecutableFingerprint ( <nl> + const PjRtExecutable & executable ) const = 0 ; <nl> <nl> / / Creates a buffer on the device without initializing or copying any data . <nl> - / / An optional ` definition_event ` may be speficied that can be used to <nl> - / / ensure the buffer isn ' t referenced until some external mechanism has <nl> - / / initialized the data . <nl> - / / NOTE : The sequencing mechanism is not guaranteed to be supported by all <nl> - / / future backends and so callers should avoid wherever possible . <nl> - virtual StatusOr < std : : unique_ptr < PjRtBuffer > > CreateUninitializedBuffer ( <nl> - const Shape & shape , PjRtDevice * device ) ; <nl> virtual StatusOr < std : : unique_ptr < PjRtBuffer > > CreateUninitializedBuffer ( <nl> - const Shape & shape , PjRtDevice * device , <nl> - std : : shared_ptr < BufferSequencingEvent > definition_event ) ; <nl> + const Shape & shape , PjRtDevice * device ) = 0 ; <nl> <nl> / / Describes the semantics the caller to BufferFromHostBuffer expects from the <nl> / / runtime , in a total order from most restrictive to least restrictive . <nl> class PjRtClient { <nl> virtual StatusOr < std : : unique_ptr < PjRtBuffer > > BufferFromHostBuffer ( <nl> const void * data , const Shape & shape , <nl> HostBufferSemantics host_buffer_semantics , <nl> - std : : shared_ptr < void > buffer_reference , PjRtDevice * device ) ; <nl> + std : : shared_ptr < void > buffer_reference , PjRtDevice * device ) = 0 ; <nl> <nl> / / Note that literal must remain in scope until the transfer has completed , so <nl> / / the caller should , for example , wait for BlockHostUntilReady ( ) completes on <nl> / / the return value before letting literal go out of scope . <nl> virtual StatusOr < std : : unique_ptr < PjRtBuffer > > BufferFromHostLiteral ( <nl> - const LiteralSlice & literal , PjRtDevice * device ) ; <nl> + const LiteralSlice & literal , PjRtDevice * device ) = 0 ; <nl> <nl> / / Asynchronously makes a vector of PjRtBuffers that can be used to receive <nl> / / cross host transfers using ` client ` on ` device ' . ` shapes ` must be the exact <nl> class PjRtClient { <nl> / / buffers will become ready until * all * of the sends have completed . <nl> virtual void MakeCrossHostReceiveBuffers ( <nl> absl : : Span < const Shape > shapes , PjRtDevice * device , <nl> - PjRtCrossHostRecvNotifier & & notifier ) ; <nl> + PjRtCrossHostRecvNotifier & & notifier ) = 0 ; <nl> <nl> - virtual StatusOr < ChannelHandle > CreateChannelHandle ( ) { <nl> + / / Create ChannelHandles for XLA send / recv . <nl> + virtual StatusOr < ChannelHandle > CreateChannelHandle ( ) = 0 ; <nl> + virtual StatusOr < ChannelHandle > CreateDeviceToHostChannelHandle ( ) = 0 ; <nl> + virtual StatusOr < ChannelHandle > CreateHostToDeviceChannelHandle ( ) = 0 ; <nl> + } ; <nl> + <nl> + class PjRtStreamExecutorClient : public PjRtClient { <nl> + public : <nl> + / / ` allocator ` may null , in which case the platform default allocator is used . <nl> + explicit PjRtStreamExecutorClient ( <nl> + std : : string platform_name , LocalClient * client , <nl> + std : : vector < std : : unique_ptr < PjRtStreamExecutorDevice > > devices , <nl> + int host_id , std : : unique_ptr < se : : DeviceMemoryAllocator > allocator , <nl> + std : : unique_ptr < tensorflow : : Allocator > host_memory_allocator , <nl> + bool should_stage_host_to_device_transfers , <nl> + std : : unique_ptr < gpu : : GpuExecutableRunOptions > gpu_run_options ) ; <nl> + ~ PjRtStreamExecutorClient ( ) override = default ; <nl> + <nl> + int host_id ( ) const override { return host_id_ ; } <nl> + <nl> + int device_count ( ) const override { return devices_ . size ( ) ; } <nl> + int addressable_device_count ( ) const override { <nl> + return local_devices_ . size ( ) ; <nl> + } <nl> + absl : : Span < PjRtDevice * const > devices ( ) const override { return devices_ ; } <nl> + absl : : Span < PjRtDevice * const > local_devices ( ) const override { <nl> + return local_devices_ ; <nl> + } <nl> + <nl> + StatusOr < PjRtDevice * > LookupDevice ( int device_id ) const override { <nl> + auto it = id_to_device_ . find ( device_id ) ; <nl> + if ( it ! = id_to_device_ . end ( ) ) { <nl> + return it - > second ; <nl> + } <nl> + return InvalidArgument ( " No matching device found for device_id % d " , <nl> + device_id ) ; <nl> + } <nl> + <nl> + StatusOr < PjRtDevice * > LookupAddressableDevice ( <nl> + int local_hardware_id ) const override ; <nl> + <nl> + PjRtPlatformId platform_id ( ) const override { return platform_id_ ; } <nl> + const std : : string & platform_name ( ) const override { return platform_name_ ; } <nl> + <nl> + / / Most platforms expect device - to - device transfers to be enqueued on the <nl> + / / source d2d stream , but some platforms use the destination d2d stream . This <nl> + / / function specifies which one the platform expects . <nl> + virtual bool EnqueueD2DTransfersOnSrcStream ( ) const { return true ; } <nl> + <nl> + StatusOr < DeviceAssignment > GetDefaultDeviceAssignment ( <nl> + int num_replicas , int num_partitions ) const override ; <nl> + <nl> + StatusOr < std : : unique_ptr < PjRtExecutable > > Compile ( <nl> + const XlaComputation & computation , CompileOptions options ) override ; <nl> + <nl> + / / Generates a unique fingerprint for ` executable ` . <nl> + StatusOr < absl : : optional < std : : string > > ExecutableFingerprint ( <nl> + const PjRtExecutable & executable ) const override { <nl> + return absl : : optional < std : : string > ( ) ; <nl> + } <nl> + <nl> + / / Returns a backend - specific HLO cost analysis visitor . <nl> + std : : unique_ptr < HloCostAnalysis > GetHloCostAnalysis ( ) override ; <nl> + <nl> + / / Creates a buffer on the device without initializing or copying any data . <nl> + / / An optional ` definition_event ` may be speficied that can be used to <nl> + / / ensure the buffer isn ' t referenced until some external mechanism has <nl> + / / initialized the data . <nl> + / / NOTE : The sequencing mechanism is not guaranteed to be supported by all <nl> + / / future backends and so callers should avoid wherever possible . <nl> + StatusOr < std : : unique_ptr < PjRtBuffer > > CreateUninitializedBuffer ( <nl> + const Shape & shape , PjRtDevice * device ) override ; <nl> + virtual StatusOr < std : : unique_ptr < PjRtBuffer > > CreateUninitializedBuffer ( <nl> + const Shape & shape , PjRtDevice * device , <nl> + std : : shared_ptr < BufferSequencingEvent > definition_event ) ; <nl> + <nl> + StatusOr < std : : unique_ptr < PjRtBuffer > > BufferFromHostBuffer ( <nl> + const void * data , const Shape & shape , <nl> + HostBufferSemantics host_buffer_semantics , <nl> + std : : shared_ptr < void > buffer_reference , PjRtDevice * device ) override ; <nl> + <nl> + / / Note that literal must remain in scope until the transfer has completed , so <nl> + / / the caller should , for example , wait for BlockHostUntilReady ( ) completes on <nl> + / / the return value before letting literal go out of scope . <nl> + StatusOr < std : : unique_ptr < PjRtBuffer > > BufferFromHostLiteral ( <nl> + const LiteralSlice & literal , PjRtDevice * device ) override ; <nl> + <nl> + / / Asynchronously makes a vector of PjRtBuffers that can be used to receive <nl> + / / cross host transfers using ` client ` on ` device ' . ` shapes ` must be the exact <nl> + / / shapes , with identical layouts , corresponding to the buffers that will be <nl> + / / sent . When resources for the transfer are available , notifier will be <nl> + / / called with a vector of PjRtCrossHostRecvBuffer structs , one for each <nl> + / / shape in ` shapes ` . Each struct contains a buffer that will contain the <nl> + / / received value , and an opaque string that should be transmitted to the <nl> + / / sending host and used in a call to CopyToRemoteDevice . None of the recv <nl> + / / buffers will become ready until * all * of the sends have completed . <nl> + void MakeCrossHostReceiveBuffers ( <nl> + absl : : Span < const Shape > shapes , PjRtDevice * device , <nl> + PjRtCrossHostRecvNotifier & & notifier ) override ; <nl> + <nl> + StatusOr < ChannelHandle > CreateChannelHandle ( ) override { <nl> return client ( ) - > CreateChannelHandle ( ) ; <nl> } <nl> - virtual StatusOr < ChannelHandle > CreateDeviceToHostChannelHandle ( ) { <nl> + StatusOr < ChannelHandle > CreateDeviceToHostChannelHandle ( ) override { <nl> return client ( ) - > CreateDeviceToHostChannelHandle ( ) ; <nl> } <nl> - virtual StatusOr < ChannelHandle > CreateHostToDeviceChannelHandle ( ) { <nl> + StatusOr < ChannelHandle > CreateHostToDeviceChannelHandle ( ) override { <nl> return client ( ) - > CreateHostToDeviceChannelHandle ( ) ; <nl> } <nl> <nl> + LocalDeviceState & device_state ( int device_ordinal ) const { <nl> + return * tensorflow : : down_cast < PjRtStreamExecutorDevice * > ( <nl> + local_devices_ . at ( device_ordinal ) ) <nl> + - > local_device_state ( ) ; <nl> + } <nl> + LocalClient * client ( ) const { return client_ ; } <nl> + se : : DeviceMemoryAllocator * allocator ( ) const { return allocator_ ; } <nl> + tensorflow : : Allocator * host_memory_allocator ( ) const { <nl> + return host_memory_allocator_ . get ( ) ; <nl> + } <nl> + bool should_stage_host_to_device_transfers ( ) const { <nl> + return should_stage_host_to_device_transfers_ ; <nl> + } <nl> + <nl> + gpu : : GpuExecutableRunOptions * gpu_run_options ( ) const { <nl> + return gpu_run_options_ . get ( ) ; <nl> + } <nl> + <nl> + tensorflow : : thread : : ThreadPool * h2d_transfer_pool ( ) { <nl> + return & h2d_transfer_pool_ ; <nl> + } <nl> + <nl> protected : <nl> friend class PjRtBuffer ; <nl> virtual void EnqueueCrossHostReceive ( <nl> class PjRtClient { <nl> std : : unique_ptr < tensorflow : : Allocator > host_memory_allocator_ ; <nl> <nl> / / Includes all devices , including non - local devices on multi - host platforms . <nl> - std : : vector < std : : unique_ptr < PjRtDevice > > devices_ ; <nl> + std : : vector < std : : unique_ptr < PjRtStreamExecutorDevice > > owned_devices_ ; <nl> + / / Pointers to ` owned_devices_ ` . <nl> + std : : vector < PjRtDevice * > devices_ ; <nl> / / Maps Device : : id ( ) to the corresponding Device . Includes all devices . <nl> std : : map < int , PjRtDevice * > id_to_device_ ; <nl> / / Local devices indexed by local device ordinal . <nl> class PjRtBuffer { <nl> <nl> private : <nl> friend class PjRtBuffer ; <nl> - friend class PjRtClient ; <nl> + friend class PjRtStreamExecutorClient ; <nl> <nl> / / Helper struct that makes it possible to move a ScopedHold through a <nl> / / closure . <nl> class PjRtExecutable { <nl> virtual PjRtClient * client ( ) const = 0 ; <nl> <nl> / / Unique name for this executable , e . g . , HloModule name . <nl> - virtual const string & name ( ) const = 0 ; <nl> + virtual const std : : string & name ( ) const = 0 ; <nl> <nl> virtual int num_replicas ( ) const = 0 ; <nl> <nl> class PjRtExecutable { <nl> virtual absl : : Span < const LogicalDeviceIds > addressable_device_logical_ids ( ) <nl> const = 0 ; <nl> <nl> + / / An addressable_device is one which the client can issue commands to . <nl> / / addressable_devices ( ) [ i ] is the Device to which <nl> / / addressable_device_logical_ids ( ) [ i ] is assigned . <nl> virtual absl : : Span < PjRtDevice * const > addressable_devices ( ) const = 0 ; <nl> class PjRtStreamExecutorExecutable : public PjRtExecutable { <nl> bool parameter_is_tupled_arguments , <nl> std : : shared_ptr < DeviceAssignment > device_assignment , <nl> std : : vector < LogicalDeviceIds > addressable_device_logical_ids , <nl> - std : : vector < PjRtDevice * > addressable_devices , PjRtClient * client ) ; <nl> + std : : vector < PjRtDevice * > addressable_devices , <nl> + PjRtStreamExecutorClient * client ) ; <nl> <nl> ~ PjRtStreamExecutorExecutable ( ) override = default ; <nl> <nl> - PjRtClient * client ( ) const override { return client_ ; } <nl> + PjRtStreamExecutorClient * client ( ) const override { return client_ ; } <nl> <nl> - const string & name ( ) const override ; <nl> + const std : : string & name ( ) const override ; <nl> <nl> int num_replicas ( ) const override { <nl> return executables_ [ 0 ] - > build_options ( ) . num_replicas ( ) ; <nl> class PjRtStreamExecutorExecutable : public PjRtExecutable { <nl> } <nl> <nl> private : <nl> - friend class PjRtClient ; <nl> + friend class PjRtStreamExecutorClient ; <nl> / / Initializes information about which arguments to which executables must be <nl> / / donated due to aliases that were specified by the computation . <nl> Status SetUpDonation ( bool tuple_inputs ) ; <nl> class PjRtStreamExecutorExecutable : public PjRtExecutable { <nl> / / Create shared pointers so we can free them after the execution : with <nl> / / asynchronous execution , the process being executed can outlive the <nl> / / executable itself . <nl> - PjRtClient * const client_ ; <nl> + PjRtStreamExecutorClient * const client_ ; <nl> / / One executable per partition . <nl> std : : vector < std : : shared_ptr < LocalExecutable > > executables_ ; <nl> / / Per - executable set of parameters that have any aliased buffers and thus <nl> mmm a / tensorflow / compiler / xla / pjrt / tpu_client . cc <nl> ppp b / tensorflow / compiler / xla / pjrt / tpu_client . cc <nl> Status TpuDeviceState : : ThenMemcpyDeviceToDevice ( <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - class PjRtTpuClient : public PjRtClient { <nl> + class PjRtTpuClient : public PjRtStreamExecutorClient { <nl> public : <nl> PjRtTpuClient ( LocalClient * client , <nl> - std : : vector < std : : unique_ptr < PjRtDevice > > devices , int host_id ) ; <nl> + std : : vector < std : : unique_ptr < PjRtStreamExecutorDevice > > devices , <nl> + int host_id ) ; <nl> <nl> StatusOr < DeviceAssignment > GetDefaultDeviceAssignment ( <nl> int num_replicas , int num_partitions ) const override ; <nl> class PjRtTpuClient : public PjRtClient { <nl> const PjRtExecutable & executable ) const override ; <nl> } ; <nl> <nl> - PjRtTpuClient : : PjRtTpuClient ( LocalClient * client , <nl> - std : : vector < std : : unique_ptr < PjRtDevice > > devices , <nl> - int host_id ) <nl> - : PjRtClient ( kTpuName , client , std : : move ( devices ) , host_id , <nl> - / * allocator = * / nullptr , <nl> - / * host_memory_allocator = * / nullptr , <nl> - / * should_stage_host_to_device_transfers = * / false , <nl> - / * gpu_run_options = * / nullptr ) { } <nl> + PjRtTpuClient : : PjRtTpuClient ( <nl> + LocalClient * client , <nl> + std : : vector < std : : unique_ptr < PjRtStreamExecutorDevice > > devices , int host_id ) <nl> + : PjRtStreamExecutorClient ( kTpuName , client , std : : move ( devices ) , host_id , <nl> + / * allocator = * / nullptr , <nl> + / * host_memory_allocator = * / nullptr , <nl> + / * should_stage_host_to_device_transfers = * / false , <nl> + / * gpu_run_options = * / nullptr ) { } <nl> <nl> StatusOr < DeviceAssignment > PjRtTpuClient : : GetDefaultDeviceAssignment ( <nl> int num_replicas , int num_partitions ) const { <nl> StatusOr < DeviceAssignment > PjRtTpuClient : : GetDefaultDeviceAssignment ( <nl> num_partitions ) ; <nl> } <nl> / / Fallback to default global device assignment if we can ' t run locally . <nl> - return PjRtClient : : GetDefaultDeviceAssignment ( num_replicas , num_partitions ) ; <nl> + return PjRtStreamExecutorClient : : GetDefaultDeviceAssignment ( num_replicas , <nl> + num_partitions ) ; <nl> } <nl> <nl> StatusOr < absl : : optional < std : : string > > PjRtTpuClient : : ExecutableFingerprint ( <nl> StatusOr < absl : : optional < std : : string > > PjRtTpuClient : : ExecutableFingerprint ( <nl> return absl : : optional < std : : string > ( tpu_executable - > fingerprint ( ) ) ; <nl> } <nl> <nl> - StatusOr < std : : vector < std : : unique_ptr < PjRtDevice > > > GetTpuDevices ( <nl> + StatusOr < std : : vector < std : : unique_ptr < PjRtStreamExecutorDevice > > > GetTpuDevices ( <nl> LocalClient * client , <nl> std : : vector < std : : unique_ptr < LocalDeviceState > > local_device_states ) { <nl> - std : : vector < std : : unique_ptr < PjRtDevice > > devices ; <nl> + std : : vector < std : : unique_ptr < PjRtStreamExecutorDevice > > devices ; <nl> tf_tpu : : TpuTopologyExternal topology = <nl> tf_tpu : : TpuPlatformInterface : : GetRegisteredPlatform ( ) - > topology ( ) ; <nl> <nl> mmm a / tensorflow / compiler / xla / pjrt / tpu_client . h <nl> ppp b / tensorflow / compiler / xla / pjrt / tpu_client . h <nl> limitations under the License . <nl> <nl> namespace xla { <nl> <nl> - class PjRtTpuDevice : public PjRtDevice { <nl> + class PjRtTpuDevice : public PjRtStreamExecutorDevice { <nl> public : <nl> PjRtTpuDevice ( const tensorflow : : tpu : : TpuCoreLocationExternal core , <nl> std : : unique_ptr < LocalDeviceState > local_device_state , <nl> int host_id , const std : : array < int , 3 > & coords , <nl> std : : string device_kind ) <nl> - : PjRtDevice ( core . Id ( ) , std : : move ( local_device_state ) , <nl> - std : : move ( device_kind ) , host_id ) , <nl> + : PjRtStreamExecutorDevice ( core . Id ( ) , std : : move ( local_device_state ) , <nl> + std : : move ( device_kind ) , host_id ) , <nl> core_ ( core ) , <nl> coords_ ( coords ) { } <nl> <nl> mmm a / tensorflow / compiler / xla / python / BUILD <nl> ppp b / tensorflow / compiler / xla / python / BUILD <nl> cc_library ( <nl> name = " types " , <nl> srcs = [ " types . cc " ] , <nl> hdrs = [ " types . h " ] , <nl> + compatible_with = [ ] , <nl> copts = [ <nl> " - fexceptions " , <nl> " - fno - strict - aliasing " , <nl> ] , <nl> features = [ " - use_header_modules " ] , <nl> deps = [ <nl> - " : bfloat16 " , <nl> " / / tensorflow / compiler / xla : literal " , <nl> " / / tensorflow / compiler / xla : shape_util " , <nl> " / / tensorflow / compiler / xla : status " , <nl> cc_library ( <nl> " / / tensorflow / compiler / xla : xla_data_proto_cc " , <nl> " / / tensorflow / compiler / xla / pjrt : pjrt_client " , <nl> " / / tensorflow / core : lib " , <nl> + " / / tensorflow / python : bfloat16_lib " , <nl> " / / third_party / py / numpy : headers " , <nl> " @ com_google_absl / / absl / container : flat_hash_map " , <nl> " @ com_google_absl / / absl / container : inlined_vector " , <nl> cc_library ( <nl> ] , <nl> ) <nl> <nl> - cc_library ( <nl> - name = " bfloat16 " , <nl> - srcs = [ " bfloat16 . cc " ] , <nl> - hdrs = [ " bfloat16 . h " ] , <nl> - copts = [ <nl> - " - fexceptions " , <nl> - " - fno - strict - aliasing " , <nl> - ] , <nl> - features = [ " - use_header_modules " ] , <nl> - deps = [ <nl> - " / / tensorflow / compiler / xla : statusor " , <nl> - " / / tensorflow / compiler / xla : types " , <nl> - " / / tensorflow / compiler / xla : util " , <nl> - " / / tensorflow / core / platform : bfloat16 " , <nl> - " / / tensorflow / core / platform : logging " , <nl> - " / / third_party / py / numpy : headers " , <nl> - " / / third_party / python_runtime : headers " , # buildcleaner : keep <nl> - " @ com_google_absl / / absl / strings " , <nl> - " @ pybind11 " , <nl> - ] , <nl> - ) <nl> - <nl> - py_test ( <nl> - name = " bfloat16_test " , <nl> - srcs = [ " bfloat16_test . py " ] , <nl> - main = " bfloat16_test . py " , <nl> - python_version = " PY3 " , <nl> - tags = [ " no_oss " ] , <nl> - deps = [ <nl> - " : xla_client " , <nl> - " : xla_extension " , <nl> - " @ absl_py / / absl / testing : absltest " , <nl> - " @ absl_py / / absl / testing : parameterized " , <nl> - ] + xla_py_test_deps ( ) , <nl> - ) <nl> - <nl> cc_library ( <nl> name = " py_client " , <nl> srcs = [ <nl> cc_library ( <nl> " py_client . h " , <nl> " py_executable . h " , <nl> ] , <nl> + compatible_with = [ ] , <nl> copts = [ <nl> " - fexceptions " , <nl> " - fno - strict - aliasing " , <nl> cc_library ( <nl> name = " dlpack " , <nl> srcs = [ " dlpack . cc " ] , <nl> hdrs = [ " dlpack . h " ] , <nl> + compatible_with = [ ] , <nl> copts = [ <nl> " - fexceptions " , <nl> " - fno - strict - aliasing " , <nl> cc_library ( <nl> name = " jax_jit " , <nl> srcs = [ " jax_jit . cc " ] , <nl> hdrs = [ " jax_jit . h " ] , <nl> + compatible_with = [ ] , <nl> copts = [ <nl> " - fexceptions " , <nl> " - fno - strict - aliasing " , <nl> cc_library ( <nl> name = " ops " , <nl> srcs = [ " ops . cc " ] , <nl> hdrs = [ " ops . h " ] , <nl> + compatible_with = [ ] , <nl> copts = [ <nl> " - fexceptions " , <nl> " - fno - strict - aliasing " , <nl> cc_library ( <nl> name = " outfeed_receiver_py " , <nl> srcs = [ " outfeed_receiver_py . cc " ] , <nl> hdrs = [ " outfeed_receiver_py . h " ] , <nl> + compatible_with = [ ] , <nl> copts = [ <nl> " - fexceptions " , <nl> " - fno - strict - aliasing " , <nl> cc_library ( <nl> name = " pytree " , <nl> srcs = [ " pytree . cc " ] , <nl> hdrs = [ " pytree . h " ] , <nl> + compatible_with = [ ] , <nl> copts = [ <nl> " - fexceptions " , <nl> " - fno - strict - aliasing " , <nl> ] , <nl> features = [ " - use_header_modules " ] , <nl> deps = [ <nl> + " : types " , <nl> " @ com_google_absl / / absl / algorithm : container " , <nl> " @ com_google_absl / / absl / container : flat_hash_map " , <nl> " @ com_google_absl / / absl / hash " , <nl> cc_library ( <nl> name = " xla_compiler " , <nl> srcs = [ " xla_compiler . cc " ] , <nl> hdrs = [ " xla_compiler . h " ] , <nl> + compatible_with = [ ] , <nl> copts = [ <nl> " - fexceptions " , <nl> " - fno - strict - aliasing " , <nl> pybind_extension ( <nl> features = [ " - use_header_modules " ] , <nl> module_name = " xla_extension " , <nl> deps = [ <nl> - " : bfloat16 " , <nl> " : dlpack " , <nl> " : jax_jit " , <nl> " : ops " , <nl> pybind_extension ( <nl> # without any TF dependencies as " jaxlib " on Pypi , and " jaxlib " does <nl> # not require Tensorflow . <nl> " / / tensorflow / core : lib_internal_impl " , # buildcleaner : keep <nl> + " / / tensorflow / python : bfloat16_lib " , <nl> " / / tensorflow / stream_executor : device_memory_allocator " , <nl> " / / tensorflow / stream_executor : platform " , <nl> ] + select ( { <nl> deleted file mode 100644 <nl> index 5f96c494c2504 . . 0000000000000 <nl> mmm a / tensorflow / compiler / xla / python / bfloat16 . cc <nl> ppp / dev / null <nl> <nl> - / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - <nl> - # include " tensorflow / compiler / xla / python / bfloat16 . h " <nl> - <nl> - # include < array > <nl> - # include < locale > <nl> - / / Place ` < locale > ` before < Python . h > to avoid a build failure in macOS . <nl> - # include < Python . h > <nl> - <nl> - # define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION <nl> - <nl> - # include " numpy / arrayobject . h " <nl> - # include " numpy / ufuncobject . h " <nl> - # include " absl / strings / str_cat . h " <nl> - # include " tensorflow / compiler / xla / types . h " <nl> - # include " tensorflow / compiler / xla / util . h " <nl> - # include " tensorflow / core / platform / bfloat16 . h " <nl> - # include " tensorflow / core / platform / logging . h " <nl> - <nl> - namespace xla { <nl> - namespace { <nl> - <nl> - namespace py = pybind11 ; <nl> - <nl> - struct PyDecrefDeleter { <nl> - void operator ( ) ( PyObject * p ) const { Py_DECREF ( p ) ; } <nl> - } ; <nl> - <nl> - / / Safe container for an owned PyObject . On destruction , the reference count of <nl> - / / the contained object will be decremented . <nl> - using Safe_PyObjectPtr = std : : unique_ptr < PyObject , PyDecrefDeleter > ; <nl> - Safe_PyObjectPtr make_safe ( PyObject * object ) { <nl> - return Safe_PyObjectPtr ( object ) ; <nl> - } <nl> - <nl> - bool PyLong_CheckNoOverflow ( PyObject * object ) { <nl> - if ( ! PyLong_Check ( object ) ) { <nl> - return false ; <nl> - } <nl> - int overflow = 0 ; <nl> - PyLong_AsLongAndOverflow ( object , & overflow ) ; <nl> - return ( overflow = = 0 ) ; <nl> - } <nl> - <nl> - / / Registered numpy type ID . Global variable populated by the registration code . <nl> - / / Protected by the GIL . <nl> - int npy_bfloat16 = - 1 ; <nl> - <nl> - / / Forward declaration . <nl> - extern PyTypeObject PyBfloat16_Type ; <nl> - <nl> - / / Representation of a Python bfloat16 object . <nl> - struct PyBfloat16 { <nl> - PyObject_HEAD ; / / Python object header <nl> - bfloat16 value ; <nl> - } ; <nl> - <nl> - / / Returns true if ' object ' is a PyBfloat16 . <nl> - bool PyBfloat16_Check ( PyObject * object ) { <nl> - return PyObject_IsInstance ( object , <nl> - reinterpret_cast < PyObject * > ( & PyBfloat16_Type ) ) ; <nl> - } <nl> - <nl> - / / Extracts the value of a PyBfloat16 object . <nl> - bfloat16 PyBfloat16_Bfloat16 ( PyObject * object ) { <nl> - return reinterpret_cast < PyBfloat16 * > ( object ) - > value ; <nl> - } <nl> - <nl> - / / Constructs a PyBfloat16 object from a bfloat16 . <nl> - Safe_PyObjectPtr PyBfloat16_FromBfloat16 ( bfloat16 x ) { <nl> - Safe_PyObjectPtr ref = <nl> - make_safe ( PyBfloat16_Type . tp_alloc ( & PyBfloat16_Type , 0 ) ) ; <nl> - PyBfloat16 * p = reinterpret_cast < PyBfloat16 * > ( ref . get ( ) ) ; <nl> - if ( p ) { <nl> - p - > value = x ; <nl> - } <nl> - return ref ; <nl> - } <nl> - <nl> - / / Converts a Python object to a bfloat16 value . Returns true on success , <nl> - / / returns false and reports a Python error on failure . <nl> - bool CastToBfloat16 ( PyObject * arg , bfloat16 * output ) { <nl> - if ( PyBfloat16_Check ( arg ) ) { <nl> - * output = PyBfloat16_Bfloat16 ( arg ) ; <nl> - return true ; <nl> - } <nl> - if ( PyFloat_Check ( arg ) ) { <nl> - double d = PyFloat_AsDouble ( arg ) ; <nl> - if ( PyErr_Occurred ( ) ) { <nl> - return false ; <nl> - } <nl> - / / TODO ( phawkins ) : check for overflow <nl> - * output = bfloat16 ( d ) ; <nl> - return true ; <nl> - } <nl> - if ( PyLong_CheckNoOverflow ( arg ) ) { <nl> - long l = PyLong_AsLong ( arg ) ; / / NOLINT <nl> - if ( PyErr_Occurred ( ) ) { <nl> - return false ; <nl> - } <nl> - / / TODO ( phawkins ) : check for overflow <nl> - * output = bfloat16 ( static_cast < float > ( l ) ) ; <nl> - return true ; <nl> - } <nl> - if ( PyArray_IsScalar ( arg , Half ) ) { <nl> - Eigen : : half f ; <nl> - PyArray_ScalarAsCtype ( arg , & f ) ; <nl> - * output = bfloat16 ( f ) ; <nl> - return true ; <nl> - } <nl> - if ( PyArray_IsScalar ( arg , Float ) ) { <nl> - float f ; <nl> - PyArray_ScalarAsCtype ( arg , & f ) ; <nl> - * output = bfloat16 ( f ) ; <nl> - return true ; <nl> - } <nl> - if ( PyArray_IsScalar ( arg , Double ) ) { <nl> - double f ; <nl> - PyArray_ScalarAsCtype ( arg , & f ) ; <nl> - * output = bfloat16 ( f ) ; <nl> - return true ; <nl> - } <nl> - if ( PyArray_IsZeroDim ( arg ) ) { <nl> - Safe_PyObjectPtr ref ; <nl> - PyArrayObject * arr = reinterpret_cast < PyArrayObject * > ( arg ) ; <nl> - if ( PyArray_TYPE ( arr ) ! = npy_bfloat16 ) { <nl> - ref = make_safe ( PyArray_Cast ( arr , npy_bfloat16 ) ) ; <nl> - if ( PyErr_Occurred ( ) ) { <nl> - return false ; <nl> - } <nl> - arg = ref . get ( ) ; <nl> - arr = reinterpret_cast < PyArrayObject * > ( arg ) ; <nl> - } <nl> - * output = * reinterpret_cast < bfloat16 * > ( PyArray_DATA ( arr ) ) ; <nl> - return true ; <nl> - } <nl> - return false ; <nl> - } <nl> - <nl> - bool SafeCastToBfloat16 ( PyObject * arg , bfloat16 * output ) { <nl> - if ( PyBfloat16_Check ( arg ) ) { <nl> - * output = PyBfloat16_Bfloat16 ( arg ) ; <nl> - return true ; <nl> - } <nl> - return false ; <nl> - } <nl> - <nl> - / / Converts a PyBfloat16 into a PyFloat . <nl> - PyObject * PyBfloat16_Float ( PyObject * self ) { <nl> - bfloat16 x = PyBfloat16_Bfloat16 ( self ) ; <nl> - return PyFloat_FromDouble ( static_cast < double > ( x ) ) ; <nl> - } <nl> - <nl> - / / Converts a PyBfloat16 into a PyInt . <nl> - PyObject * PyBfloat16_Int ( PyObject * self ) { <nl> - bfloat16 x = PyBfloat16_Bfloat16 ( self ) ; <nl> - long y = static_cast < long > ( x ) ; / / NOLINT <nl> - return PyLong_FromLong ( y ) ; <nl> - } <nl> - <nl> - / / Negates a PyBfloat16 . <nl> - PyObject * PyBfloat16_Negative ( PyObject * self ) { <nl> - bfloat16 x = PyBfloat16_Bfloat16 ( self ) ; <nl> - return PyBfloat16_FromBfloat16 ( - x ) . release ( ) ; <nl> - } <nl> - <nl> - PyObject * PyBfloat16_Add ( PyObject * a , PyObject * b ) { <nl> - bfloat16 x , y ; <nl> - if ( SafeCastToBfloat16 ( a , & x ) & & SafeCastToBfloat16 ( b , & y ) ) { <nl> - return PyBfloat16_FromBfloat16 ( x + y ) . release ( ) ; <nl> - } <nl> - return PyArray_Type . tp_as_number - > nb_add ( a , b ) ; <nl> - } <nl> - <nl> - PyObject * PyBfloat16_Subtract ( PyObject * a , PyObject * b ) { <nl> - bfloat16 x , y ; <nl> - if ( SafeCastToBfloat16 ( a , & x ) & & SafeCastToBfloat16 ( b , & y ) ) { <nl> - return PyBfloat16_FromBfloat16 ( x - y ) . release ( ) ; <nl> - } <nl> - return PyArray_Type . tp_as_number - > nb_subtract ( a , b ) ; <nl> - } <nl> - <nl> - PyObject * PyBfloat16_Multiply ( PyObject * a , PyObject * b ) { <nl> - bfloat16 x , y ; <nl> - if ( SafeCastToBfloat16 ( a , & x ) & & SafeCastToBfloat16 ( b , & y ) ) { <nl> - return PyBfloat16_FromBfloat16 ( x * y ) . release ( ) ; <nl> - } <nl> - return PyArray_Type . tp_as_number - > nb_multiply ( a , b ) ; <nl> - } <nl> - <nl> - PyObject * PyBfloat16_TrueDivide ( PyObject * a , PyObject * b ) { <nl> - bfloat16 x , y ; <nl> - if ( SafeCastToBfloat16 ( a , & x ) & & SafeCastToBfloat16 ( b , & y ) ) { <nl> - return PyBfloat16_FromBfloat16 ( x / y ) . release ( ) ; <nl> - } <nl> - return PyArray_Type . tp_as_number - > nb_true_divide ( a , b ) ; <nl> - } <nl> - <nl> - / / Python number methods for PyBfloat16 objects . <nl> - PyNumberMethods PyBfloat16_AsNumber = { <nl> - PyBfloat16_Add , / / nb_add <nl> - PyBfloat16_Subtract , / / nb_subtract <nl> - PyBfloat16_Multiply , / / nb_multiply <nl> - nullptr , / / nb_remainder <nl> - nullptr , / / nb_divmod <nl> - nullptr , / / nb_power <nl> - PyBfloat16_Negative , / / nb_negative <nl> - nullptr , / / nb_positive <nl> - nullptr , / / nb_absolute <nl> - nullptr , / / nb_nonzero <nl> - nullptr , / / nb_invert <nl> - nullptr , / / nb_lshift <nl> - nullptr , / / nb_rshift <nl> - nullptr , / / nb_and <nl> - nullptr , / / nb_xor <nl> - nullptr , / / nb_or <nl> - PyBfloat16_Int , / / nb_int <nl> - nullptr , / / reserved <nl> - PyBfloat16_Float , / / nb_float <nl> - <nl> - nullptr , / / nb_inplace_add <nl> - nullptr , / / nb_inplace_subtract <nl> - nullptr , / / nb_inplace_multiply <nl> - nullptr , / / nb_inplace_remainder <nl> - nullptr , / / nb_inplace_power <nl> - nullptr , / / nb_inplace_lshift <nl> - nullptr , / / nb_inplace_rshift <nl> - nullptr , / / nb_inplace_and <nl> - nullptr , / / nb_inplace_xor <nl> - nullptr , / / nb_inplace_or <nl> - <nl> - nullptr , / / nb_floor_divide <nl> - PyBfloat16_TrueDivide , / / nb_true_divide <nl> - nullptr , / / nb_inplace_floor_divide <nl> - nullptr , / / nb_inplace_true_divide <nl> - nullptr , / / nb_index <nl> - } ; <nl> - <nl> - / / Constructs a new PyBfloat16 . <nl> - PyObject * PyBfloat16_New ( PyTypeObject * type , PyObject * args , PyObject * kwds ) { <nl> - if ( kwds & & PyDict_Size ( kwds ) ) { <nl> - PyErr_SetString ( PyExc_TypeError , " constructor takes no keyword arguments " ) ; <nl> - return nullptr ; <nl> - } <nl> - Py_ssize_t size = PyTuple_Size ( args ) ; <nl> - if ( size ! = 1 ) { <nl> - PyErr_SetString ( PyExc_TypeError , <nl> - " expected number as argument to bfloat16 constructor " ) ; <nl> - return nullptr ; <nl> - } <nl> - PyObject * arg = PyTuple_GetItem ( args , 0 ) ; <nl> - <nl> - bfloat16 value ; <nl> - if ( PyBfloat16_Check ( arg ) ) { <nl> - Py_INCREF ( arg ) ; <nl> - return arg ; <nl> - } else if ( CastToBfloat16 ( arg , & value ) ) { <nl> - return PyBfloat16_FromBfloat16 ( value ) . release ( ) ; <nl> - } else if ( PyArray_Check ( arg ) ) { <nl> - PyArrayObject * arr = reinterpret_cast < PyArrayObject * > ( arg ) ; <nl> - if ( PyArray_TYPE ( arr ) ! = npy_bfloat16 ) { <nl> - return PyArray_Cast ( arr , npy_bfloat16 ) ; <nl> - } else { <nl> - Py_INCREF ( arg ) ; <nl> - return arg ; <nl> - } <nl> - } <nl> - PyErr_Format ( PyExc_TypeError , " expected number , got % s " , <nl> - arg - > ob_type - > tp_name ) ; <nl> - return nullptr ; <nl> - } <nl> - <nl> - / / Comparisons on PyBfloat16s . <nl> - PyObject * PyBfloat16_RichCompare ( PyObject * a , PyObject * b , int op ) { <nl> - bfloat16 x , y ; <nl> - if ( ! SafeCastToBfloat16 ( a , & x ) | | ! SafeCastToBfloat16 ( b , & y ) ) { <nl> - return PyGenericArrType_Type . tp_richcompare ( a , b , op ) ; <nl> - } <nl> - bool result ; <nl> - switch ( op ) { <nl> - case Py_LT : <nl> - result = x < y ; <nl> - break ; <nl> - case Py_LE : <nl> - result = x < = y ; <nl> - break ; <nl> - case Py_EQ : <nl> - result = x = = y ; <nl> - break ; <nl> - case Py_NE : <nl> - result = x ! = y ; <nl> - break ; <nl> - case Py_GT : <nl> - result = x > y ; <nl> - break ; <nl> - case Py_GE : <nl> - result = x > = y ; <nl> - break ; <nl> - default : <nl> - LOG ( FATAL ) < < " Invalid op type " < < op ; <nl> - } <nl> - return PyBool_FromLong ( result ) ; <nl> - } <nl> - <nl> - / / Implementation of repr ( ) for PyBfloat16 . <nl> - PyObject * PyBfloat16_Repr ( PyObject * self ) { <nl> - bfloat16 x = reinterpret_cast < PyBfloat16 * > ( self ) - > value ; <nl> - std : : string v = absl : : StrCat ( static_cast < float > ( x ) ) ; <nl> - return PyUnicode_FromString ( v . c_str ( ) ) ; <nl> - } <nl> - <nl> - / / Implementation of str ( ) for PyBfloat16 . <nl> - PyObject * PyBfloat16_Str ( PyObject * self ) { <nl> - bfloat16 x = reinterpret_cast < PyBfloat16 * > ( self ) - > value ; <nl> - std : : string v = absl : : StrCat ( static_cast < float > ( x ) ) ; <nl> - return PyUnicode_FromString ( v . c_str ( ) ) ; <nl> - } <nl> - <nl> - / / Hash function for PyBfloat16 . We use the identity function , which is a weak <nl> - / / hash function . <nl> - Py_hash_t PyBfloat16_Hash ( PyObject * self ) { <nl> - bfloat16 x = reinterpret_cast < PyBfloat16 * > ( self ) - > value ; <nl> - return x . value ; <nl> - } <nl> - <nl> - / / Python type for PyBfloat16 objects . <nl> - PyTypeObject PyBfloat16_Type = { <nl> - PyVarObject_HEAD_INIT ( nullptr , 0 ) " bfloat16 " , / / tp_name <nl> - sizeof ( PyBfloat16 ) , / / tp_basicsize <nl> - 0 , / / tp_itemsize <nl> - nullptr , / / tp_dealloc <nl> - # if PY_VERSION_HEX < 0x03080000 <nl> - nullptr , / / tp_print <nl> - # else <nl> - 0 , / / tp_vectorcall_offset <nl> - # endif <nl> - nullptr , / / tp_getattr <nl> - nullptr , / / tp_setattr <nl> - nullptr , / / tp_compare / tp_reserved <nl> - PyBfloat16_Repr , / / tp_repr <nl> - & PyBfloat16_AsNumber , / / tp_as_number <nl> - nullptr , / / tp_as_sequence <nl> - nullptr , / / tp_as_mapping <nl> - PyBfloat16_Hash , / / tp_hash <nl> - nullptr , / / tp_call <nl> - PyBfloat16_Str , / / tp_str <nl> - nullptr , / / tp_getattro <nl> - nullptr , / / tp_setattro <nl> - nullptr , / / tp_as_buffer <nl> - / / tp_flags <nl> - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE , <nl> - " bfloat16 floating - point values " , / / tp_doc <nl> - nullptr , / / tp_traverse <nl> - nullptr , / / tp_clear <nl> - PyBfloat16_RichCompare , / / tp_richcompare <nl> - 0 , / / tp_weaklistoffset <nl> - nullptr , / / tp_iter <nl> - nullptr , / / tp_iternext <nl> - nullptr , / / tp_methods <nl> - nullptr , / / tp_members <nl> - nullptr , / / tp_getset <nl> - nullptr , / / tp_base <nl> - nullptr , / / tp_dict <nl> - nullptr , / / tp_descr_get <nl> - nullptr , / / tp_descr_set <nl> - 0 , / / tp_dictoffset <nl> - nullptr , / / tp_init <nl> - nullptr , / / tp_alloc <nl> - PyBfloat16_New , / / tp_new <nl> - nullptr , / / tp_free <nl> - nullptr , / / tp_is_gc <nl> - nullptr , / / tp_bases <nl> - nullptr , / / tp_mro <nl> - nullptr , / / tp_cache <nl> - nullptr , / / tp_subclasses <nl> - nullptr , / / tp_weaklist <nl> - nullptr , / / tp_del <nl> - 0 , / / tp_version_tag <nl> - } ; <nl> - <nl> - / / Numpy support <nl> - <nl> - PyArray_ArrFuncs NPyBfloat16_ArrFuncs ; <nl> - <nl> - PyArray_Descr NPyBfloat16_Descr = { <nl> - PyObject_HEAD_INIT ( nullptr ) / / <nl> - / * typeobj = * / <nl> - ( & PyBfloat16_Type ) , <nl> - / / We must register bfloat16 with a kind other than " f " , because numpy <nl> - / / considers two types with the same kind and size to be equal , but <nl> - / / float16 ! = bfloat16 . <nl> - / / The downside of this is that NumPy scalar promotion does not work with <nl> - / / bfloat16 values . <nl> - / * kind = * / ' V ' , <nl> - / / TODO ( phawkins ) : there doesn ' t seem to be a way of guaranteeing a type <nl> - / / character is unique . <nl> - / * type = * / ' E ' , <nl> - / * byteorder = * / ' = ' , <nl> - / * flags = * / NPY_NEEDS_PYAPI | NPY_USE_GETITEM | NPY_USE_SETITEM , <nl> - / * type_num = * / 0 , <nl> - / * elsize = * / sizeof ( bfloat16 ) , <nl> - / * alignment = * / alignof ( bfloat16 ) , <nl> - / * subarray = * / nullptr , <nl> - / * fields = * / nullptr , <nl> - / * names = * / nullptr , <nl> - / * f = * / & NPyBfloat16_ArrFuncs , <nl> - / * metadata = * / nullptr , <nl> - / * c_metadata = * / nullptr , <nl> - / * hash = * / - 1 , / / - 1 means " not computed yet " . <nl> - } ; <nl> - <nl> - / / Implementations of NumPy array methods . <nl> - <nl> - PyObject * NPyBfloat16_GetItem ( void * data , void * arr ) { <nl> - bfloat16 x ; <nl> - memcpy ( & x , data , sizeof ( bfloat16 ) ) ; <nl> - return PyBfloat16_FromBfloat16 ( x ) . release ( ) ; <nl> - } <nl> - <nl> - int NPyBfloat16_SetItem ( PyObject * item , void * data , void * arr ) { <nl> - bfloat16 x ; <nl> - if ( ! CastToBfloat16 ( item , & x ) ) { <nl> - PyErr_Format ( PyExc_TypeError , " expected number , got % s " , <nl> - item - > ob_type - > tp_name ) ; <nl> - return - 1 ; <nl> - } <nl> - memcpy ( data , & x , sizeof ( bfloat16 ) ) ; <nl> - return 0 ; <nl> - } <nl> - <nl> - void ByteSwap16 ( void * value ) { <nl> - char * p = reinterpret_cast < char * > ( value ) ; <nl> - std : : swap ( p [ 0 ] , p [ 1 ] ) ; <nl> - } <nl> - <nl> - int NPyBfloat16_Compare ( const void * a , const void * b , void * arr ) { <nl> - bfloat16 x ; <nl> - memcpy ( & x , a , sizeof ( bfloat16 ) ) ; <nl> - <nl> - bfloat16 y ; <nl> - memcpy ( & y , b , sizeof ( bfloat16 ) ) ; <nl> - <nl> - if ( x < y ) { <nl> - return - 1 ; <nl> - } <nl> - if ( y < x ) { <nl> - return 1 ; <nl> - } <nl> - / / NaNs sort to the end . <nl> - if ( ! Eigen : : numext : : isnan ( x ) & & Eigen : : numext : : isnan ( y ) ) { <nl> - return - 1 ; <nl> - } <nl> - if ( Eigen : : numext : : isnan ( x ) & & ! Eigen : : numext : : isnan ( y ) ) { <nl> - return 1 ; <nl> - } <nl> - return 0 ; <nl> - } <nl> - <nl> - void NPyBfloat16_CopySwapN ( void * dstv , npy_intp dstride , void * srcv , <nl> - npy_intp sstride , npy_intp n , int swap , void * arr ) { <nl> - char * dst = reinterpret_cast < char * > ( dstv ) ; <nl> - char * src = reinterpret_cast < char * > ( srcv ) ; <nl> - if ( ! src ) { <nl> - return ; <nl> - } <nl> - if ( swap ) { <nl> - for ( npy_intp i = 0 ; i < n ; i + + ) { <nl> - char * r = dst + dstride * i ; <nl> - memcpy ( r , src + sstride * i , sizeof ( uint16_t ) ) ; <nl> - ByteSwap16 ( r ) ; <nl> - } <nl> - } else if ( dstride = = sizeof ( uint16_t ) & & sstride = = sizeof ( uint16_t ) ) { <nl> - memcpy ( dst , src , n * sizeof ( uint16_t ) ) ; <nl> - } else { <nl> - for ( npy_intp i = 0 ; i < n ; i + + ) { <nl> - memcpy ( dst + dstride * i , src + sstride * i , sizeof ( uint16_t ) ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - void NPyBfloat16_CopySwap ( void * dst , void * src , int swap , void * arr ) { <nl> - if ( ! src ) { <nl> - return ; <nl> - } <nl> - memcpy ( dst , src , sizeof ( uint16_t ) ) ; <nl> - if ( swap ) { <nl> - ByteSwap16 ( dst ) ; <nl> - } <nl> - } <nl> - <nl> - npy_bool NPyBfloat16_NonZero ( void * data , void * arr ) { <nl> - bfloat16 x ; <nl> - memcpy ( & x , data , sizeof ( x ) ) ; <nl> - return x ! = static_cast < bfloat16 > ( 0 ) ; <nl> - } <nl> - <nl> - int NPyBfloat16_Fill ( void * buffer_raw , npy_intp length , void * ignored ) { <nl> - bfloat16 * const buffer = reinterpret_cast < bfloat16 * > ( buffer_raw ) ; <nl> - const float start ( buffer [ 0 ] ) ; <nl> - const float delta = static_cast < float > ( buffer [ 1 ] ) - start ; <nl> - for ( npy_intp i = 2 ; i < length ; + + i ) { <nl> - buffer [ i ] = static_cast < bfloat16 > ( start + i * delta ) ; <nl> - } <nl> - return 0 ; <nl> - } <nl> - <nl> - void NPyBfloat16_DotFunc ( void * ip1 , npy_intp is1 , void * ip2 , npy_intp is2 , <nl> - void * op , npy_intp n , void * arr ) { <nl> - char * c1 = reinterpret_cast < char * > ( ip1 ) ; <nl> - char * c2 = reinterpret_cast < char * > ( ip2 ) ; <nl> - float acc = 0 . 0f ; <nl> - for ( npy_intp i = 0 ; i < n ; + + i ) { <nl> - bfloat16 * const b1 = reinterpret_cast < bfloat16 * > ( c1 ) ; <nl> - bfloat16 * const b2 = reinterpret_cast < bfloat16 * > ( c2 ) ; <nl> - acc + = static_cast < float > ( * b1 ) * static_cast < float > ( * b2 ) ; <nl> - c1 + = is1 ; <nl> - c2 + = is2 ; <nl> - } <nl> - bfloat16 * out = reinterpret_cast < bfloat16 * > ( op ) ; <nl> - * out = static_cast < bfloat16 > ( acc ) ; <nl> - } <nl> - <nl> - int NPyBfloat16_CompareFunc ( const void * v1 , const void * v2 , void * arr ) { <nl> - bfloat16 b1 = * reinterpret_cast < const bfloat16 * > ( v1 ) ; <nl> - bfloat16 b2 = * reinterpret_cast < const bfloat16 * > ( v2 ) ; <nl> - if ( b1 < b2 ) { <nl> - return - 1 ; <nl> - } <nl> - if ( b1 > b2 ) { <nl> - return 1 ; <nl> - } <nl> - return 0 ; <nl> - } <nl> - <nl> - int NPyBfloat16_ArgMaxFunc ( void * data , npy_intp n , npy_intp * max_ind , <nl> - void * arr ) { <nl> - const bfloat16 * bdata = reinterpret_cast < const bfloat16 * > ( data ) ; <nl> - float max_val = - std : : numeric_limits < float > : : infinity ( ) ; <nl> - for ( npy_intp i = 0 ; i < n ; + + i ) { <nl> - if ( static_cast < float > ( bdata [ i ] ) > max_val ) { <nl> - max_val = static_cast < float > ( bdata [ i ] ) ; <nl> - * max_ind = i ; <nl> - } <nl> - } <nl> - return 0 ; <nl> - } <nl> - <nl> - int NPyBfloat16_ArgMinFunc ( void * data , npy_intp n , npy_intp * min_ind , <nl> - void * arr ) { <nl> - const bfloat16 * bdata = reinterpret_cast < const bfloat16 * > ( data ) ; <nl> - float min_val = std : : numeric_limits < float > : : infinity ( ) ; <nl> - for ( npy_intp i = 0 ; i < n ; + + i ) { <nl> - if ( static_cast < float > ( bdata [ i ] ) < min_val ) { <nl> - min_val = static_cast < float > ( bdata [ i ] ) ; <nl> - * min_ind = i ; <nl> - } <nl> - } <nl> - return 0 ; <nl> - } <nl> - <nl> - / / NumPy casts <nl> - <nl> - template < typename T , typename Enable = void > <nl> - struct TypeDescriptor { <nl> - / / typedef . . . T ; / / Representation type in memory for NumPy values of type <nl> - / / static int Dtype ( ) { return NPY_ . . . ; } / / Numpy type number for T . <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < bfloat16 > { <nl> - typedef bfloat16 T ; <nl> - static int Dtype ( ) { return npy_bfloat16 ; } <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < uint8 > { <nl> - typedef uint8 T ; <nl> - static int Dtype ( ) { return NPY_UINT8 ; } <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < uint16 > { <nl> - typedef uint16 T ; <nl> - static int Dtype ( ) { return NPY_UINT16 ; } <nl> - } ; <nl> - <nl> - / / We register " int " , " long " , and " long long " types for portability across <nl> - / / Linux , where " int " and " long " are the same type , and Windows , where " long " <nl> - / / and " longlong " are the same type . <nl> - template < > <nl> - struct TypeDescriptor < unsigned int > { <nl> - typedef unsigned int T ; <nl> - static int Dtype ( ) { return NPY_UINT ; } <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < unsigned long > { / / NOLINT <nl> - typedef unsigned long T ; / / NOLINT <nl> - static int Dtype ( ) { return NPY_ULONG ; } <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < unsigned long long > { / / NOLINT <nl> - typedef unsigned long long T ; / / NOLINT <nl> - static int Dtype ( ) { return NPY_ULONGLONG ; } <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < int8 > { <nl> - typedef int8 T ; <nl> - static int Dtype ( ) { return NPY_INT8 ; } <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < int16 > { <nl> - typedef int16 T ; <nl> - static int Dtype ( ) { return NPY_INT16 ; } <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < int > { <nl> - typedef int T ; <nl> - static int Dtype ( ) { return NPY_INT ; } <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < long > { / / NOLINT <nl> - typedef long T ; / / NOLINT <nl> - static int Dtype ( ) { return NPY_LONG ; } <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < long long > { / / NOLINT <nl> - typedef long long T ; / / NOLINT <nl> - static int Dtype ( ) { return NPY_LONGLONG ; } <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < bool > { <nl> - typedef int8 T ; <nl> - static int Dtype ( ) { return NPY_BOOL ; } <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < Eigen : : half > { <nl> - typedef Eigen : : half T ; <nl> - static int Dtype ( ) { return NPY_HALF ; } <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < float > { <nl> - typedef float T ; <nl> - static int Dtype ( ) { return NPY_FLOAT ; } <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < double > { <nl> - typedef double T ; <nl> - static int Dtype ( ) { return NPY_DOUBLE ; } <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < complex64 > { <nl> - typedef complex64 T ; <nl> - static int Dtype ( ) { return NPY_COMPLEX64 ; } <nl> - } ; <nl> - <nl> - template < > <nl> - struct TypeDescriptor < complex128 > { <nl> - typedef complex128 T ; <nl> - static int Dtype ( ) { return NPY_COMPLEX128 ; } <nl> - } ; <nl> - <nl> - / / Performs a NumPy array cast from type ' From ' to ' To ' . <nl> - template < typename From , typename To > <nl> - void NPyCast ( void * from_void , void * to_void , npy_intp n , void * fromarr , <nl> - void * toarr ) { <nl> - const auto * from = <nl> - reinterpret_cast < typename TypeDescriptor < From > : : T * > ( from_void ) ; <nl> - auto * to = reinterpret_cast < typename TypeDescriptor < To > : : T * > ( to_void ) ; <nl> - for ( npy_intp i = 0 ; i < n ; + + i ) { <nl> - to [ i ] = <nl> - static_cast < typename TypeDescriptor < To > : : T > ( static_cast < To > ( from [ i ] ) ) ; <nl> - } <nl> - } <nl> - <nl> - / / Registers a cast between bfloat16 and type ' T ' . ' numpy_type ' is the NumPy <nl> - / / type corresponding to ' T ' . If ' cast_is_safe ' , registers that bfloat16 can be <nl> - / / safely coerced to T . <nl> - template < typename T > <nl> - bool RegisterBfloat16Cast ( int numpy_type , bool cast_is_safe ) { <nl> - if ( PyArray_RegisterCastFunc ( PyArray_DescrFromType ( numpy_type ) , npy_bfloat16 , <nl> - NPyCast < T , bfloat16 > ) < 0 ) { <nl> - return false ; <nl> - } <nl> - if ( PyArray_RegisterCastFunc ( & NPyBfloat16_Descr , numpy_type , <nl> - NPyCast < bfloat16 , T > ) < 0 ) { <nl> - return false ; <nl> - } <nl> - if ( cast_is_safe & & PyArray_RegisterCanCast ( & NPyBfloat16_Descr , numpy_type , <nl> - NPY_NOSCALAR ) < 0 ) { <nl> - return false ; <nl> - } <nl> - return true ; <nl> - } <nl> - <nl> - template < typename InType , typename OutType , typename Functor > <nl> - struct UnaryUFunc { <nl> - static std : : vector < int > Types ( ) { <nl> - return { TypeDescriptor < InType > : : Dtype ( ) , TypeDescriptor < OutType > : : Dtype ( ) } ; <nl> - } <nl> - static void Call ( char * * args , const npy_intp * dimensions , <nl> - const npy_intp * steps , void * data ) { <nl> - const char * i0 = args [ 0 ] ; <nl> - char * o = args [ 1 ] ; <nl> - for ( npy_intp k = 0 ; k < * dimensions ; k + + ) { <nl> - auto x = * reinterpret_cast < const typename TypeDescriptor < InType > : : T * > ( i0 ) ; <nl> - * reinterpret_cast < typename TypeDescriptor < OutType > : : T * > ( o ) = Functor ( ) ( x ) ; <nl> - i0 + = steps [ 0 ] ; <nl> - o + = steps [ 1 ] ; <nl> - } <nl> - } <nl> - } ; <nl> - <nl> - template < typename InType , typename OutType , typename OutType2 , <nl> - typename Functor > <nl> - struct UnaryUFunc2 { <nl> - static std : : vector < int > Types ( ) { <nl> - return { TypeDescriptor < InType > : : Dtype ( ) , TypeDescriptor < OutType > : : Dtype ( ) , <nl> - TypeDescriptor < OutType2 > : : Dtype ( ) } ; <nl> - } <nl> - static void Call ( char * * args , const npy_intp * dimensions , <nl> - const npy_intp * steps , void * data ) { <nl> - const char * i0 = args [ 0 ] ; <nl> - char * o0 = args [ 1 ] ; <nl> - char * o1 = args [ 2 ] ; <nl> - for ( npy_intp k = 0 ; k < * dimensions ; k + + ) { <nl> - auto x = * reinterpret_cast < const typename TypeDescriptor < InType > : : T * > ( i0 ) ; <nl> - std : : tie ( * reinterpret_cast < typename TypeDescriptor < OutType > : : T * > ( o0 ) , <nl> - * reinterpret_cast < typename TypeDescriptor < OutType2 > : : T * > ( o1 ) ) = <nl> - Functor ( ) ( x ) ; <nl> - i0 + = steps [ 0 ] ; <nl> - o0 + = steps [ 1 ] ; <nl> - o1 + = steps [ 2 ] ; <nl> - } <nl> - } <nl> - } ; <nl> - <nl> - template < typename InType , typename OutType , typename Functor > <nl> - struct BinaryUFunc { <nl> - static std : : vector < int > Types ( ) { <nl> - return { TypeDescriptor < InType > : : Dtype ( ) , TypeDescriptor < InType > : : Dtype ( ) , <nl> - TypeDescriptor < OutType > : : Dtype ( ) } ; <nl> - } <nl> - static void Call ( char * * args , const npy_intp * dimensions , <nl> - const npy_intp * steps , void * data ) { <nl> - const char * i0 = args [ 0 ] ; <nl> - const char * i1 = args [ 1 ] ; <nl> - char * o = args [ 2 ] ; <nl> - for ( npy_intp k = 0 ; k < * dimensions ; k + + ) { <nl> - auto x = * reinterpret_cast < const typename TypeDescriptor < InType > : : T * > ( i0 ) ; <nl> - auto y = * reinterpret_cast < const typename TypeDescriptor < InType > : : T * > ( i1 ) ; <nl> - * reinterpret_cast < typename TypeDescriptor < OutType > : : T * > ( o ) = <nl> - Functor ( ) ( x , y ) ; <nl> - i0 + = steps [ 0 ] ; <nl> - i1 + = steps [ 1 ] ; <nl> - o + = steps [ 2 ] ; <nl> - } <nl> - } <nl> - } ; <nl> - <nl> - template < typename InType , typename InType2 , typename OutType , typename Functor > <nl> - struct BinaryUFunc2 { <nl> - static std : : vector < int > Types ( ) { <nl> - return { TypeDescriptor < InType > : : Dtype ( ) , TypeDescriptor < InType2 > : : Dtype ( ) , <nl> - TypeDescriptor < OutType > : : Dtype ( ) } ; <nl> - } <nl> - static void Call ( char * * args , const npy_intp * dimensions , <nl> - const npy_intp * steps , void * data ) { <nl> - const char * i0 = args [ 0 ] ; <nl> - const char * i1 = args [ 1 ] ; <nl> - char * o = args [ 2 ] ; <nl> - for ( npy_intp k = 0 ; k < * dimensions ; k + + ) { <nl> - auto x = * reinterpret_cast < const typename TypeDescriptor < InType > : : T * > ( i0 ) ; <nl> - auto y = <nl> - * reinterpret_cast < const typename TypeDescriptor < InType2 > : : T * > ( i1 ) ; <nl> - * reinterpret_cast < typename TypeDescriptor < OutType > : : T * > ( o ) = <nl> - Functor ( ) ( x , y ) ; <nl> - i0 + = steps [ 0 ] ; <nl> - i1 + = steps [ 1 ] ; <nl> - o + = steps [ 2 ] ; <nl> - } <nl> - } <nl> - } ; <nl> - <nl> - template < typename UFunc > <nl> - bool RegisterUFunc ( PyObject * numpy , const char * name ) { <nl> - std : : vector < int > types = UFunc : : Types ( ) ; <nl> - PyUFuncGenericFunction fn = <nl> - reinterpret_cast < PyUFuncGenericFunction > ( UFunc : : Call ) ; <nl> - Safe_PyObjectPtr ufunc_obj = make_safe ( PyObject_GetAttrString ( numpy , name ) ) ; <nl> - if ( ! ufunc_obj ) { <nl> - return false ; <nl> - } <nl> - PyUFuncObject * ufunc = reinterpret_cast < PyUFuncObject * > ( ufunc_obj . get ( ) ) ; <nl> - if ( static_cast < int > ( types . size ( ) ) ! = ufunc - > nargs ) { <nl> - PyErr_Format ( PyExc_AssertionError , <nl> - " ufunc % s takes % d arguments , loop takes % lu " , name , <nl> - ufunc - > nargs , types . size ( ) ) ; <nl> - return false ; <nl> - } <nl> - if ( PyUFunc_RegisterLoopForType ( ufunc , npy_bfloat16 , fn , <nl> - const_cast < int * > ( types . data ( ) ) , <nl> - nullptr ) < 0 ) { <nl> - return false ; <nl> - } <nl> - return true ; <nl> - } <nl> - <nl> - namespace ufuncs { <nl> - <nl> - struct Add { <nl> - bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { return a + b ; } <nl> - } ; <nl> - struct Subtract { <nl> - bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { return a - b ; } <nl> - } ; <nl> - struct Multiply { <nl> - bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { return a * b ; } <nl> - } ; <nl> - struct TrueDivide { <nl> - bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { return a / b ; } <nl> - } ; <nl> - <nl> - std : : pair < float , float > divmod ( float a , float b ) { <nl> - if ( b = = 0 . 0f ) { <nl> - float nan = std : : numeric_limits < float > : : quiet_NaN ( ) ; <nl> - return { nan , nan } ; <nl> - } <nl> - float mod = std : : fmod ( a , b ) ; <nl> - float div = ( a - mod ) / b ; <nl> - if ( mod ! = 0 . 0f ) { <nl> - if ( ( b < 0 . 0f ) ! = ( mod < 0 . 0f ) ) { <nl> - mod + = b ; <nl> - div - = 1 . 0f ; <nl> - } <nl> - } else { <nl> - mod = std : : copysign ( 0 . 0f , b ) ; <nl> - } <nl> - <nl> - float floordiv ; <nl> - if ( div ! = 0 . 0f ) { <nl> - floordiv = std : : floor ( div ) ; <nl> - if ( div - floordiv > 0 . 5f ) { <nl> - floordiv + = 1 . 0f ; <nl> - } <nl> - } else { <nl> - floordiv = std : : copysign ( 0 . 0f , a / b ) ; <nl> - } <nl> - return { floordiv , mod } ; <nl> - } <nl> - <nl> - struct FloorDivide { <nl> - bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> - return bfloat16 ( divmod ( static_cast < float > ( a ) , static_cast < float > ( b ) ) . first ) ; <nl> - } <nl> - } ; <nl> - struct Remainder { <nl> - bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> - return bfloat16 ( <nl> - divmod ( static_cast < float > ( a ) , static_cast < float > ( b ) ) . second ) ; <nl> - } <nl> - } ; <nl> - struct DivmodUFunc { <nl> - static std : : vector < int > Types ( ) { <nl> - return { npy_bfloat16 , npy_bfloat16 , npy_bfloat16 , npy_bfloat16 } ; <nl> - } <nl> - static void Call ( char * * args , npy_intp * dimensions , npy_intp * steps , <nl> - void * data ) { <nl> - const char * i0 = args [ 0 ] ; <nl> - const char * i1 = args [ 1 ] ; <nl> - char * o0 = args [ 2 ] ; <nl> - char * o1 = args [ 3 ] ; <nl> - for ( npy_intp k = 0 ; k < * dimensions ; k + + ) { <nl> - bfloat16 x = * reinterpret_cast < const bfloat16 * > ( i0 ) ; <nl> - bfloat16 y = * reinterpret_cast < const bfloat16 * > ( i1 ) ; <nl> - float floordiv , mod ; <nl> - std : : tie ( floordiv , mod ) = <nl> - divmod ( static_cast < float > ( x ) , static_cast < float > ( y ) ) ; <nl> - * reinterpret_cast < bfloat16 * > ( o0 ) = bfloat16 ( floordiv ) ; <nl> - * reinterpret_cast < bfloat16 * > ( o1 ) = bfloat16 ( mod ) ; <nl> - i0 + = steps [ 0 ] ; <nl> - i1 + = steps [ 1 ] ; <nl> - o0 + = steps [ 2 ] ; <nl> - o1 + = steps [ 3 ] ; <nl> - } <nl> - } <nl> - } ; <nl> - struct Fmod { <nl> - bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> - return bfloat16 ( std : : fmod ( static_cast < float > ( a ) , static_cast < float > ( b ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Negative { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { return - a ; } <nl> - } ; <nl> - struct Positive { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { return a ; } <nl> - } ; <nl> - struct Power { <nl> - bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> - return bfloat16 ( std : : pow ( static_cast < float > ( a ) , static_cast < float > ( b ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Abs { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : abs ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Cbrt { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : cbrt ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Ceil { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : ceil ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct CopySign { <nl> - bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> - return bfloat16 ( <nl> - std : : copysign ( static_cast < float > ( a ) , static_cast < float > ( b ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Exp { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : exp ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Exp2 { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : exp2 ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Expm1 { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : expm1 ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Floor { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : floor ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Frexp { <nl> - std : : pair < bfloat16 , int > operator ( ) ( bfloat16 a ) { <nl> - int exp ; <nl> - float f = std : : frexp ( static_cast < float > ( a ) , & exp ) ; <nl> - return { bfloat16 ( f ) , exp } ; <nl> - } <nl> - } ; <nl> - struct Heaviside { <nl> - bfloat16 operator ( ) ( bfloat16 bx , bfloat16 h0 ) { <nl> - float x = static_cast < float > ( bx ) ; <nl> - if ( Eigen : : numext : : isnan ( x ) ) { <nl> - return bx ; <nl> - } <nl> - if ( x < 0 ) { <nl> - return bfloat16 ( 0 . 0f ) ; <nl> - } <nl> - if ( x > 0 ) { <nl> - return bfloat16 ( 1 . 0f ) ; <nl> - } <nl> - return h0 ; / / x = = 0 <nl> - } <nl> - } ; <nl> - struct Conjugate { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { return a ; } <nl> - } ; <nl> - struct IsFinite { <nl> - bool operator ( ) ( bfloat16 a ) { return std : : isfinite ( static_cast < float > ( a ) ) ; } <nl> - } ; <nl> - struct IsInf { <nl> - bool operator ( ) ( bfloat16 a ) { return std : : isinf ( static_cast < float > ( a ) ) ; } <nl> - } ; <nl> - struct IsNan { <nl> - bool operator ( ) ( bfloat16 a ) { <nl> - return Eigen : : numext : : isnan ( static_cast < float > ( a ) ) ; <nl> - } <nl> - } ; <nl> - struct Ldexp { <nl> - bfloat16 operator ( ) ( bfloat16 a , int exp ) { <nl> - return bfloat16 ( std : : ldexp ( static_cast < float > ( a ) , exp ) ) ; <nl> - } <nl> - } ; <nl> - struct Log { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : log ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Log2 { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : log2 ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Log10 { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : log10 ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Log1p { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : log1p ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct LogAddExp { <nl> - bfloat16 operator ( ) ( bfloat16 bx , bfloat16 by ) { <nl> - float x = static_cast < float > ( bx ) ; <nl> - float y = static_cast < float > ( by ) ; <nl> - if ( x = = y ) { <nl> - / / Handles infinities of the same sign . <nl> - return bfloat16 ( x + std : : log ( 2 . 0f ) ) ; <nl> - } <nl> - float out = std : : numeric_limits < float > : : quiet_NaN ( ) ; <nl> - if ( x > y ) { <nl> - out = x + std : : log1p ( std : : exp ( y - x ) ) ; <nl> - } else if ( x < y ) { <nl> - out = y + std : : log1p ( std : : exp ( x - y ) ) ; <nl> - } <nl> - return bfloat16 ( out ) ; <nl> - } <nl> - } ; <nl> - struct LogAddExp2 { <nl> - bfloat16 operator ( ) ( bfloat16 bx , bfloat16 by ) { <nl> - float x = static_cast < float > ( bx ) ; <nl> - float y = static_cast < float > ( by ) ; <nl> - if ( x = = y ) { <nl> - / / Handles infinities of the same sign . <nl> - return bfloat16 ( x + 1 . 0f ) ; <nl> - } <nl> - float out = std : : numeric_limits < float > : : quiet_NaN ( ) ; <nl> - if ( x > y ) { <nl> - out = x + std : : log1p ( std : : exp2 ( y - x ) ) / std : : log ( 2 . 0f ) ; <nl> - } else if ( x < y ) { <nl> - out = y + std : : log1p ( std : : exp2 ( x - y ) ) / std : : log ( 2 . 0f ) ; <nl> - } <nl> - return bfloat16 ( out ) ; <nl> - } <nl> - } ; <nl> - struct Modf { <nl> - std : : pair < bfloat16 , bfloat16 > operator ( ) ( bfloat16 a ) { <nl> - float integral ; <nl> - float f = std : : modf ( static_cast < float > ( a ) , & integral ) ; <nl> - return { bfloat16 ( f ) , bfloat16 ( integral ) } ; <nl> - } <nl> - } ; <nl> - <nl> - struct Reciprocal { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( 1 . f / static_cast < float > ( a ) ) ; <nl> - } <nl> - } ; <nl> - struct Rint { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : rint ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Sign { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - float f ( a ) ; <nl> - if ( f < 0 ) { <nl> - return bfloat16 ( - 1 ) ; <nl> - } <nl> - if ( f > 0 ) { <nl> - return bfloat16 ( 1 ) ; <nl> - } <nl> - return a ; <nl> - } <nl> - } ; <nl> - struct SignBit { <nl> - bool operator ( ) ( bfloat16 a ) { return std : : signbit ( static_cast < float > ( a ) ) ; } <nl> - } ; <nl> - struct Sqrt { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : sqrt ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Square { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - float f ( a ) ; <nl> - return bfloat16 ( f * f ) ; <nl> - } <nl> - } ; <nl> - struct Trunc { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : trunc ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - <nl> - / / Trigonometric functions <nl> - struct Sin { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : sin ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Cos { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : cos ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Tan { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : tan ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Arcsin { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : asin ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Arccos { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : acos ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Arctan { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : atan ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Arctan2 { <nl> - bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> - return bfloat16 ( std : : atan2 ( static_cast < float > ( a ) , static_cast < float > ( b ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Hypot { <nl> - bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> - return bfloat16 ( std : : hypot ( static_cast < float > ( a ) , static_cast < float > ( b ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Sinh { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : sinh ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Cosh { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : cosh ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Tanh { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : tanh ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Arcsinh { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : asinh ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Arccosh { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : acosh ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Arctanh { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - return bfloat16 ( std : : atanh ( static_cast < float > ( a ) ) ) ; <nl> - } <nl> - } ; <nl> - struct Deg2rad { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - static constexpr float radians_per_degree = M_PI / 180 . 0f ; <nl> - return bfloat16 ( static_cast < float > ( a ) * radians_per_degree ) ; <nl> - } <nl> - } ; <nl> - struct Rad2deg { <nl> - bfloat16 operator ( ) ( bfloat16 a ) { <nl> - static constexpr float degrees_per_radian = 180 . 0f / M_PI ; <nl> - return bfloat16 ( static_cast < float > ( a ) * degrees_per_radian ) ; <nl> - } <nl> - } ; <nl> - <nl> - struct Eq { <nl> - npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a = = b ; } <nl> - } ; <nl> - struct Ne { <nl> - npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a ! = b ; } <nl> - } ; <nl> - struct Lt { <nl> - npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a < b ; } <nl> - } ; <nl> - struct Gt { <nl> - npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a > b ; } <nl> - } ; <nl> - struct Le { <nl> - npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a < = b ; } <nl> - } ; <nl> - struct Ge { <nl> - npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a > = b ; } <nl> - } ; <nl> - struct Maximum { <nl> - bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> - float fa ( a ) , fb ( b ) ; <nl> - return Eigen : : numext : : isnan ( fa ) | | fa > fb ? a : b ; <nl> - } <nl> - } ; <nl> - struct Minimum { <nl> - bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> - float fa ( a ) , fb ( b ) ; <nl> - return Eigen : : numext : : isnan ( fa ) | | fa < fb ? a : b ; <nl> - } <nl> - } ; <nl> - struct Fmax { <nl> - bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> - float fa ( a ) , fb ( b ) ; <nl> - return Eigen : : numext : : isnan ( fb ) | | fa > fb ? a : b ; <nl> - } <nl> - } ; <nl> - struct Fmin { <nl> - bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> - float fa ( a ) , fb ( b ) ; <nl> - return Eigen : : numext : : isnan ( fb ) | | fa < fb ? a : b ; <nl> - } <nl> - } ; <nl> - <nl> - struct LogicalNot { <nl> - npy_bool operator ( ) ( bfloat16 a ) { return ! a ; } <nl> - } ; <nl> - struct LogicalAnd { <nl> - npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a & & b ; } <nl> - } ; <nl> - struct LogicalOr { <nl> - npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a | | b ; } <nl> - } ; <nl> - struct LogicalXor { <nl> - npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> - return static_cast < bool > ( a ) ^ static_cast < bool > ( b ) ; <nl> - } <nl> - } ; <nl> - <nl> - struct NextAfter { <nl> - bfloat16 operator ( ) ( bfloat16 from , bfloat16 to ) { <nl> - uint16_t from_as_int , to_as_int ; <nl> - const uint16_t sign_mask = 1 < < 15 ; <nl> - float from_as_float ( from ) , to_as_float ( to ) ; <nl> - memcpy ( & from_as_int , & from , sizeof ( bfloat16 ) ) ; <nl> - memcpy ( & to_as_int , & to , sizeof ( bfloat16 ) ) ; <nl> - if ( Eigen : : numext : : isnan ( from_as_float ) | | <nl> - Eigen : : numext : : isnan ( to_as_float ) ) { <nl> - return bfloat16 ( std : : numeric_limits < float > : : quiet_NaN ( ) ) ; <nl> - } <nl> - if ( from_as_int = = to_as_int ) { <nl> - return to ; <nl> - } <nl> - if ( from_as_float = = 0 ) { <nl> - if ( to_as_float = = 0 ) { <nl> - return to ; <nl> - } else { <nl> - / / Smallest subnormal signed like ` to ` . <nl> - uint16_t out_int = ( to_as_int & sign_mask ) | 1 ; <nl> - bfloat16 out ; <nl> - memcpy ( & out , & out_int , sizeof ( bfloat16 ) ) ; <nl> - return out ; <nl> - } <nl> - } <nl> - uint16_t from_sign = from_as_int & sign_mask ; <nl> - uint16_t to_sign = to_as_int & sign_mask ; <nl> - uint16_t from_abs = from_as_int & ~ sign_mask ; <nl> - uint16_t to_abs = to_as_int & ~ sign_mask ; <nl> - uint16_t magnitude_adjustment = <nl> - ( from_abs > to_abs | | from_sign ! = to_sign ) ? 0xFFFF : 0x0001 ; <nl> - uint16_t out_int = from_as_int + magnitude_adjustment ; <nl> - bfloat16 out ; <nl> - memcpy ( & out , & out_int , sizeof ( bfloat16 ) ) ; <nl> - return out ; <nl> - } <nl> - } ; <nl> - <nl> - / / TODO ( phawkins ) : implement spacing <nl> - <nl> - } / / namespace ufuncs <nl> - <nl> - } / / namespace <nl> - <nl> - / / Initializes the module . <nl> - bool Initialize ( ) { <nl> - import_array1 ( false ) ; <nl> - import_umath1 ( false ) ; <nl> - <nl> - Safe_PyObjectPtr numpy_str = make_safe ( PyUnicode_FromString ( " numpy " ) ) ; <nl> - if ( ! numpy_str ) { <nl> - return false ; <nl> - } <nl> - Safe_PyObjectPtr numpy = make_safe ( PyImport_Import ( numpy_str . get ( ) ) ) ; <nl> - if ( ! numpy ) { <nl> - return false ; <nl> - } <nl> - <nl> - PyBfloat16_Type . tp_base = & PyGenericArrType_Type ; <nl> - <nl> - if ( PyType_Ready ( & PyBfloat16_Type ) < 0 ) { <nl> - return false ; <nl> - } <nl> - <nl> - / / Initializes the NumPy descriptor . <nl> - PyArray_InitArrFuncs ( & NPyBfloat16_ArrFuncs ) ; <nl> - NPyBfloat16_ArrFuncs . getitem = NPyBfloat16_GetItem ; <nl> - NPyBfloat16_ArrFuncs . setitem = NPyBfloat16_SetItem ; <nl> - NPyBfloat16_ArrFuncs . compare = NPyBfloat16_Compare ; <nl> - NPyBfloat16_ArrFuncs . copyswapn = NPyBfloat16_CopySwapN ; <nl> - NPyBfloat16_ArrFuncs . copyswap = NPyBfloat16_CopySwap ; <nl> - NPyBfloat16_ArrFuncs . nonzero = NPyBfloat16_NonZero ; <nl> - NPyBfloat16_ArrFuncs . fill = NPyBfloat16_Fill ; <nl> - NPyBfloat16_ArrFuncs . dotfunc = NPyBfloat16_DotFunc ; <nl> - NPyBfloat16_ArrFuncs . compare = NPyBfloat16_CompareFunc ; <nl> - NPyBfloat16_ArrFuncs . argmax = NPyBfloat16_ArgMaxFunc ; <nl> - NPyBfloat16_ArrFuncs . argmin = NPyBfloat16_ArgMinFunc ; <nl> - <nl> - Py_TYPE ( & NPyBfloat16_Descr ) = & PyArrayDescr_Type ; <nl> - npy_bfloat16 = PyArray_RegisterDataType ( & NPyBfloat16_Descr ) ; <nl> - if ( npy_bfloat16 < 0 ) { <nl> - return false ; <nl> - } <nl> - <nl> - / / Support dtype ( bfloat16 ) <nl> - if ( PyDict_SetItemString ( PyBfloat16_Type . tp_dict , " dtype " , <nl> - reinterpret_cast < PyObject * > ( & NPyBfloat16_Descr ) ) < <nl> - 0 ) { <nl> - return false ; <nl> - } <nl> - <nl> - / / Register casts <nl> - if ( ! RegisterBfloat16Cast < Eigen : : half > ( NPY_HALF , / * cast_is_safe = * / false ) ) { <nl> - return false ; <nl> - } <nl> - if ( ! RegisterBfloat16Cast < float > ( NPY_FLOAT , / * cast_is_safe = * / true ) ) { <nl> - return false ; <nl> - } <nl> - if ( ! RegisterBfloat16Cast < double > ( NPY_DOUBLE , / * cast_is_safe = * / true ) ) { <nl> - return false ; <nl> - } <nl> - if ( ! RegisterBfloat16Cast < bool > ( NPY_BOOL , / * cast_is_safe = * / false ) ) { <nl> - return false ; <nl> - } <nl> - if ( ! RegisterBfloat16Cast < uint8 > ( NPY_UINT8 , / * cast_is_safe = * / false ) ) { <nl> - return false ; <nl> - } <nl> - if ( ! RegisterBfloat16Cast < uint16 > ( NPY_UINT16 , / * cast_is_safe = * / false ) ) { <nl> - return false ; <nl> - } <nl> - if ( ! RegisterBfloat16Cast < unsigned int > ( NPY_UINT , / * cast_is_safe = * / false ) ) { <nl> - return false ; <nl> - } <nl> - if ( ! RegisterBfloat16Cast < unsigned long > ( NPY_ULONG , / / NOLINT <nl> - / * cast_is_safe = * / false ) ) { <nl> - return false ; <nl> - } <nl> - if ( ! RegisterBfloat16Cast < unsigned long long > ( / / NOLINT <nl> - NPY_ULONGLONG , / * cast_is_safe = * / false ) ) { <nl> - return false ; <nl> - } <nl> - if ( ! RegisterBfloat16Cast < uint64 > ( NPY_UINT64 , / * cast_is_safe = * / false ) ) { <nl> - return false ; <nl> - } <nl> - if ( ! RegisterBfloat16Cast < int8 > ( NPY_INT8 , / * cast_is_safe = * / false ) ) { <nl> - return false ; <nl> - } <nl> - if ( ! RegisterBfloat16Cast < int16 > ( NPY_INT16 , / * cast_is_safe = * / false ) ) { <nl> - return false ; <nl> - } <nl> - if ( ! RegisterBfloat16Cast < int > ( NPY_INT , / * cast_is_safe = * / false ) ) { <nl> - return false ; <nl> - } <nl> - if ( ! RegisterBfloat16Cast < long > ( NPY_LONG , / / NOLINT <nl> - / * cast_is_safe = * / false ) ) { <nl> - return false ; <nl> - } <nl> - if ( ! RegisterBfloat16Cast < long long > ( / / NOLINT <nl> - NPY_LONGLONG , / * cast_is_safe = * / false ) ) { <nl> - return false ; <nl> - } <nl> - / / Following the numpy convention . imag part is dropped when converting to <nl> - / / float . <nl> - if ( ! RegisterBfloat16Cast < complex64 > ( NPY_COMPLEX64 , / * cast_is_safe = * / true ) ) { <nl> - return false ; <nl> - } <nl> - if ( ! RegisterBfloat16Cast < complex128 > ( NPY_COMPLEX128 , <nl> - / * cast_is_safe = * / true ) ) { <nl> - return false ; <nl> - } <nl> - <nl> - bool ok = <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Add > > ( numpy . get ( ) , <nl> - " add " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Subtract > > ( <nl> - numpy . get ( ) , " subtract " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Multiply > > ( <nl> - numpy . get ( ) , " multiply " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : TrueDivide > > ( <nl> - numpy . get ( ) , " divide " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : LogAddExp > > ( <nl> - numpy . get ( ) , " logaddexp " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : LogAddExp2 > > ( <nl> - numpy . get ( ) , " logaddexp2 " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Negative > > ( <nl> - numpy . get ( ) , " negative " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Positive > > ( <nl> - numpy . get ( ) , " positive " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : TrueDivide > > ( <nl> - numpy . get ( ) , " true_divide " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : FloorDivide > > ( <nl> - numpy . get ( ) , " floor_divide " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Power > > ( numpy . get ( ) , <nl> - " power " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Remainder > > ( <nl> - numpy . get ( ) , " remainder " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Remainder > > ( <nl> - numpy . get ( ) , " mod " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Fmod > > ( numpy . get ( ) , <nl> - " fmod " ) & & <nl> - RegisterUFunc < ufuncs : : DivmodUFunc > ( numpy . get ( ) , " divmod " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Abs > > ( numpy . get ( ) , <nl> - " absolute " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Abs > > ( numpy . get ( ) , <nl> - " fabs " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Rint > > ( numpy . get ( ) , <nl> - " rint " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Sign > > ( numpy . get ( ) , <nl> - " sign " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Heaviside > > ( <nl> - numpy . get ( ) , " heaviside " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Conjugate > > ( <nl> - numpy . get ( ) , " conjugate " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Exp > > ( numpy . get ( ) , <nl> - " exp " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Exp2 > > ( numpy . get ( ) , <nl> - " exp2 " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Expm1 > > ( numpy . get ( ) , <nl> - " expm1 " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Log > > ( numpy . get ( ) , <nl> - " log " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Log2 > > ( numpy . get ( ) , <nl> - " log2 " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Log10 > > ( numpy . get ( ) , <nl> - " log10 " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Log1p > > ( numpy . get ( ) , <nl> - " log1p " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Sqrt > > ( numpy . get ( ) , <nl> - " sqrt " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Square > > ( numpy . get ( ) , <nl> - " square " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Cbrt > > ( numpy . get ( ) , <nl> - " cbrt " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Reciprocal > > ( <nl> - numpy . get ( ) , " reciprocal " ) & & <nl> - <nl> - / / Trigonometric functions <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Sin > > ( numpy . get ( ) , <nl> - " sin " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Cos > > ( numpy . get ( ) , <nl> - " cos " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Tan > > ( numpy . get ( ) , <nl> - " tan " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Arcsin > > ( numpy . get ( ) , <nl> - " arcsin " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Arccos > > ( numpy . get ( ) , <nl> - " arccos " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Arctan > > ( numpy . get ( ) , <nl> - " arctan " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Arctan2 > > ( <nl> - numpy . get ( ) , " arctan2 " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Hypot > > ( numpy . get ( ) , <nl> - " hypot " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Sinh > > ( numpy . get ( ) , <nl> - " sinh " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Cosh > > ( numpy . get ( ) , <nl> - " cosh " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Tanh > > ( numpy . get ( ) , <nl> - " tanh " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Arcsinh > > ( <nl> - numpy . get ( ) , " arcsinh " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Arccosh > > ( <nl> - numpy . get ( ) , " arccosh " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Arctanh > > ( <nl> - numpy . get ( ) , " arctanh " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Deg2rad > > ( <nl> - numpy . get ( ) , " deg2rad " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Rad2deg > > ( <nl> - numpy . get ( ) , " rad2deg " ) & & <nl> - <nl> - / / Comparison functions <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : Eq > > ( numpy . get ( ) , <nl> - " equal " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : Ne > > ( numpy . get ( ) , <nl> - " not_equal " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : Lt > > ( numpy . get ( ) , <nl> - " less " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : Gt > > ( numpy . get ( ) , <nl> - " greater " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : Le > > ( numpy . get ( ) , <nl> - " less_equal " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : Ge > > ( numpy . get ( ) , <nl> - " greater_equal " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Maximum > > ( <nl> - numpy . get ( ) , " maximum " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Minimum > > ( <nl> - numpy . get ( ) , " minimum " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Fmax > > ( numpy . get ( ) , <nl> - " fmax " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Fmin > > ( numpy . get ( ) , <nl> - " fmin " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : LogicalAnd > > ( <nl> - numpy . get ( ) , " logical_and " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : LogicalOr > > ( <nl> - numpy . get ( ) , " logical_or " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : LogicalXor > > ( <nl> - numpy . get ( ) , " logical_xor " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bool , ufuncs : : LogicalNot > > ( <nl> - numpy . get ( ) , " logical_not " ) & & <nl> - <nl> - / / Floating point functions <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bool , ufuncs : : IsFinite > > ( numpy . get ( ) , <nl> - " isfinite " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bool , ufuncs : : IsInf > > ( numpy . get ( ) , <nl> - " isinf " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bool , ufuncs : : IsNan > > ( numpy . get ( ) , <nl> - " isnan " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bool , ufuncs : : SignBit > > ( numpy . get ( ) , <nl> - " signbit " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : CopySign > > ( <nl> - numpy . get ( ) , " copysign " ) & & <nl> - RegisterUFunc < UnaryUFunc2 < bfloat16 , bfloat16 , bfloat16 , ufuncs : : Modf > > ( <nl> - numpy . get ( ) , " modf " ) & & <nl> - RegisterUFunc < BinaryUFunc2 < bfloat16 , int , bfloat16 , ufuncs : : Ldexp > > ( <nl> - numpy . get ( ) , " ldexp " ) & & <nl> - RegisterUFunc < UnaryUFunc2 < bfloat16 , bfloat16 , int , ufuncs : : Frexp > > ( <nl> - numpy . get ( ) , " frexp " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Floor > > ( numpy . get ( ) , <nl> - " floor " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Ceil > > ( numpy . get ( ) , <nl> - " ceil " ) & & <nl> - RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Trunc > > ( numpy . get ( ) , <nl> - " trunc " ) & & <nl> - RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : NextAfter > > ( <nl> - numpy . get ( ) , " nextafter " ) ; <nl> - <nl> - return ok ; <nl> - } <nl> - <nl> - StatusOr < py : : object > Bfloat16Dtype ( ) { <nl> - if ( npy_bfloat16 < 0 ) { <nl> - / / Not yet initialized . We assume the GIL protects npy_bfloat16 . <nl> - if ( ! Initialize ( ) ) { <nl> - return InternalError ( " Bfloat16 numpy type initialization failed . " ) ; <nl> - } <nl> - } <nl> - return py : : object ( reinterpret_cast < PyObject * > ( & PyBfloat16_Type ) , <nl> - / * is_borrowed = * / true ) ; <nl> - } <nl> - <nl> - } / / namespace xla <nl> deleted file mode 100644 <nl> index 4c7321a5b7f87 . . 0000000000000 <nl> mmm a / tensorflow / compiler / xla / python / bfloat16_test . py <nl> ppp / dev / null <nl> <nl> - # Copyright 2015 The TensorFlow Authors . All Rights Reserved . <nl> - # <nl> - # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - # you may not use this file except in compliance with the License . <nl> - # You may obtain a copy of the License at <nl> - # <nl> - # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - # <nl> - # Unless required by applicable law or agreed to in writing , software <nl> - # distributed under the License is distributed on an " AS IS " BASIS , <nl> - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - # See the License for the specific language governing permissions and <nl> - # limitations under the License . <nl> - # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - " " " Test cases for the bfloat16 Python type . " " " <nl> - <nl> - from __future__ import absolute_import <nl> - from __future__ import division <nl> - from __future__ import print_function <nl> - <nl> - import collections <nl> - import copy <nl> - import itertools <nl> - import math <nl> - <nl> - from absl . testing import absltest <nl> - from absl . testing import parameterized <nl> - <nl> - import numpy as np <nl> - <nl> - from tensorflow . compiler . xla . python import xla_client <nl> - <nl> - bfloat16 = xla_client . bfloat16 <nl> - <nl> - <nl> - def numpy_assert_allclose ( a , b , * * kwargs ) : <nl> - a = a . astype ( np . float32 ) if a . dtype = = bfloat16 else a <nl> - b = b . astype ( np . float32 ) if b . dtype = = bfloat16 else b <nl> - return np . testing . assert_allclose ( a , b , * * kwargs ) <nl> - <nl> - <nl> - epsilon = float . fromhex ( " 1 . 0p - 7 " ) <nl> - <nl> - # Values that should round trip exactly to float and back . <nl> - FLOAT_VALUES = [ <nl> - 0 . 0 , 1 . 0 , - 1 , 0 . 5 , - 0 . 5 , epsilon , 1 . 0 + epsilon , 1 . 0 - epsilon , <nl> - - 1 . 0 - epsilon , - 1 . 0 + epsilon , 3 . 5 , 42 . 0 , 255 . 0 , 256 . 0 , <nl> - float ( " inf " ) , <nl> - float ( " - inf " ) , <nl> - float ( " nan " ) <nl> - ] <nl> - <nl> - <nl> - class Bfloat16Test ( parameterized . TestCase ) : <nl> - " " " Tests the non - numpy Python methods of the bfloat16 type . " " " <nl> - <nl> - def testRoundTripToFloat ( self ) : <nl> - for v in FLOAT_VALUES : <nl> - np . testing . assert_equal ( v , float ( bfloat16 ( v ) ) ) <nl> - <nl> - def testRoundTripNumpyTypes ( self ) : <nl> - for dtype in [ np . float16 , np . float32 , np . float64 ] : <nl> - np . testing . assert_equal ( - 3 . 75 , dtype ( bfloat16 ( dtype ( - 3 . 75 ) ) ) ) <nl> - np . testing . assert_equal ( 1 . 5 , float ( bfloat16 ( dtype ( 1 . 5 ) ) ) ) <nl> - np . testing . assert_equal ( 4 . 5 , dtype ( bfloat16 ( np . array ( 4 . 5 , dtype ) ) ) ) <nl> - np . testing . assert_equal ( <nl> - np . array ( [ 2 , 5 , - 1 ] , bfloat16 ) , bfloat16 ( np . array ( [ 2 , 5 , - 1 ] , dtype ) ) ) <nl> - <nl> - def testRoundTripToInt ( self ) : <nl> - for v in [ - 256 , - 255 , - 34 , - 2 , - 1 , 0 , 1 , 2 , 10 , 47 , 128 , 255 , 256 , 512 ] : <nl> - self . assertEqual ( v , int ( bfloat16 ( v ) ) ) <nl> - <nl> - # pylint : disable = g - complex - comprehension <nl> - @ parameterized . named_parameters ( ( { <nl> - " testcase_name " : " _ " + dtype . __name__ , <nl> - " dtype " : dtype <nl> - } for dtype in [ bfloat16 , np . float16 , np . float32 , np . float64 ] ) ) <nl> - def testRoundTripToNumpy ( self , dtype ) : <nl> - for v in FLOAT_VALUES : <nl> - np . testing . assert_equal ( v , bfloat16 ( dtype ( v ) ) ) <nl> - np . testing . assert_equal ( v , dtype ( bfloat16 ( dtype ( v ) ) ) ) <nl> - np . testing . assert_equal ( v , dtype ( bfloat16 ( np . array ( v , dtype ) ) ) ) <nl> - if dtype ! = bfloat16 : <nl> - np . testing . assert_equal ( <nl> - np . array ( FLOAT_VALUES , dtype ) , <nl> - bfloat16 ( np . array ( FLOAT_VALUES , dtype ) ) . astype ( dtype ) ) <nl> - <nl> - def testStr ( self ) : <nl> - self . assertEqual ( " 0 " , str ( bfloat16 ( 0 . 0 ) ) ) <nl> - self . assertEqual ( " 1 " , str ( bfloat16 ( 1 . 0 ) ) ) <nl> - self . assertEqual ( " - 3 . 5 " , str ( bfloat16 ( - 3 . 5 ) ) ) <nl> - self . assertEqual ( " 0 . 0078125 " , str ( bfloat16 ( float . fromhex ( " 1 . 0p - 7 " ) ) ) ) <nl> - self . assertEqual ( " inf " , str ( bfloat16 ( float ( " inf " ) ) ) ) <nl> - self . assertEqual ( " - inf " , str ( bfloat16 ( float ( " - inf " ) ) ) ) <nl> - self . assertEqual ( " nan " , str ( bfloat16 ( float ( " nan " ) ) ) ) <nl> - <nl> - def testRepr ( self ) : <nl> - self . assertEqual ( " 0 " , repr ( bfloat16 ( 0 ) ) ) <nl> - self . assertEqual ( " 1 " , repr ( bfloat16 ( 1 ) ) ) <nl> - self . assertEqual ( " - 3 . 5 " , repr ( bfloat16 ( - 3 . 5 ) ) ) <nl> - self . assertEqual ( " 0 . 0078125 " , repr ( bfloat16 ( float . fromhex ( " 1 . 0p - 7 " ) ) ) ) <nl> - self . assertEqual ( " inf " , repr ( bfloat16 ( float ( " inf " ) ) ) ) <nl> - self . assertEqual ( " - inf " , repr ( bfloat16 ( float ( " - inf " ) ) ) ) <nl> - self . assertEqual ( " nan " , repr ( bfloat16 ( float ( " nan " ) ) ) ) <nl> - <nl> - def testHash ( self ) : <nl> - self . assertEqual ( 0 , hash ( bfloat16 ( 0 . 0 ) ) ) <nl> - self . assertEqual ( 0x3f80 , hash ( bfloat16 ( 1 . 0 ) ) ) <nl> - self . assertEqual ( 0x7fc0 , hash ( bfloat16 ( float ( " nan " ) ) ) ) <nl> - <nl> - # Tests for Python operations <nl> - def testNegate ( self ) : <nl> - for v in FLOAT_VALUES : <nl> - np . testing . assert_equal ( - v , float ( - bfloat16 ( v ) ) ) <nl> - <nl> - def testAdd ( self ) : <nl> - np . testing . assert_equal ( 0 , float ( bfloat16 ( 0 ) + bfloat16 ( 0 ) ) ) <nl> - np . testing . assert_equal ( 1 , float ( bfloat16 ( 1 ) + bfloat16 ( 0 ) ) ) <nl> - np . testing . assert_equal ( 0 , float ( bfloat16 ( 1 ) + bfloat16 ( - 1 ) ) ) <nl> - np . testing . assert_equal ( 5 . 5 , float ( bfloat16 ( 2 ) + bfloat16 ( 3 . 5 ) ) ) <nl> - np . testing . assert_equal ( 1 . 25 , float ( bfloat16 ( 3 . 5 ) + bfloat16 ( - 2 . 25 ) ) ) <nl> - np . testing . assert_equal ( <nl> - float ( " inf " ) , float ( bfloat16 ( float ( " inf " ) ) + bfloat16 ( - 2 . 25 ) ) ) <nl> - np . testing . assert_equal ( <nl> - float ( " - inf " ) , float ( bfloat16 ( float ( " - inf " ) ) + bfloat16 ( - 2 . 25 ) ) ) <nl> - self . assertTrue ( math . isnan ( float ( bfloat16 ( 3 . 5 ) + bfloat16 ( float ( " nan " ) ) ) ) ) <nl> - <nl> - # Test type promotion against Numpy scalar values . <nl> - self . assertEqual ( np . float32 , type ( bfloat16 ( 3 . 5 ) + np . float16 ( 2 . 25 ) ) ) <nl> - self . assertEqual ( np . float32 , type ( np . float16 ( 3 . 5 ) + bfloat16 ( 2 . 25 ) ) ) <nl> - self . assertEqual ( np . float32 , type ( bfloat16 ( 3 . 5 ) + np . float32 ( 2 . 25 ) ) ) <nl> - self . assertEqual ( np . float32 , type ( np . float32 ( 3 . 5 ) + bfloat16 ( 2 . 25 ) ) ) <nl> - self . assertEqual ( np . float64 , type ( bfloat16 ( 3 . 5 ) + np . float64 ( 2 . 25 ) ) ) <nl> - self . assertEqual ( np . float64 , type ( np . float64 ( 3 . 5 ) + bfloat16 ( 2 . 25 ) ) ) <nl> - self . assertEqual ( np . float64 , type ( bfloat16 ( 3 . 5 ) + float ( 2 . 25 ) ) ) <nl> - self . assertEqual ( np . float64 , type ( float ( 3 . 5 ) + bfloat16 ( 2 . 25 ) ) ) <nl> - self . assertEqual ( np . float32 , <nl> - type ( bfloat16 ( 3 . 5 ) + np . array ( 2 . 25 , np . float32 ) ) ) <nl> - self . assertEqual ( np . float32 , <nl> - type ( np . array ( 3 . 5 , np . float32 ) + bfloat16 ( 2 . 25 ) ) ) <nl> - <nl> - def testSub ( self ) : <nl> - np . testing . assert_equal ( 0 , float ( bfloat16 ( 0 ) - bfloat16 ( 0 ) ) ) <nl> - np . testing . assert_equal ( 1 , float ( bfloat16 ( 1 ) - bfloat16 ( 0 ) ) ) <nl> - np . testing . assert_equal ( 2 , float ( bfloat16 ( 1 ) - bfloat16 ( - 1 ) ) ) <nl> - np . testing . assert_equal ( - 1 . 5 , float ( bfloat16 ( 2 ) - bfloat16 ( 3 . 5 ) ) ) <nl> - np . testing . assert_equal ( 5 . 75 , float ( bfloat16 ( 3 . 5 ) - bfloat16 ( - 2 . 25 ) ) ) <nl> - np . testing . assert_equal ( <nl> - float ( " - inf " ) , float ( bfloat16 ( - 2 . 25 ) - bfloat16 ( float ( " inf " ) ) ) ) <nl> - np . testing . assert_equal ( <nl> - float ( " inf " ) , float ( bfloat16 ( - 2 . 25 ) - bfloat16 ( float ( " - inf " ) ) ) ) <nl> - self . assertTrue ( math . isnan ( float ( bfloat16 ( 3 . 5 ) - bfloat16 ( float ( " nan " ) ) ) ) ) <nl> - <nl> - def testMul ( self ) : <nl> - np . testing . assert_equal ( 0 , float ( bfloat16 ( 0 ) * bfloat16 ( 0 ) ) ) <nl> - np . testing . assert_equal ( 0 , float ( bfloat16 ( 1 ) * bfloat16 ( 0 ) ) ) <nl> - np . testing . assert_equal ( - 1 , float ( bfloat16 ( 1 ) * bfloat16 ( - 1 ) ) ) <nl> - np . testing . assert_equal ( - 7 . 875 , float ( bfloat16 ( 3 . 5 ) * bfloat16 ( - 2 . 25 ) ) ) <nl> - np . testing . assert_equal ( <nl> - float ( " - inf " ) , float ( bfloat16 ( float ( " inf " ) ) * bfloat16 ( - 2 . 25 ) ) ) <nl> - np . testing . assert_equal ( <nl> - float ( " inf " ) , float ( bfloat16 ( float ( " - inf " ) ) * bfloat16 ( - 2 . 25 ) ) ) <nl> - self . assertTrue ( math . isnan ( float ( bfloat16 ( 3 . 5 ) * bfloat16 ( float ( " nan " ) ) ) ) ) <nl> - <nl> - def testDiv ( self ) : <nl> - self . assertTrue ( math . isnan ( float ( bfloat16 ( 0 ) / bfloat16 ( 0 ) ) ) ) <nl> - np . testing . assert_equal ( float ( " inf " ) , float ( bfloat16 ( 1 ) / bfloat16 ( 0 ) ) ) <nl> - np . testing . assert_equal ( - 1 , float ( bfloat16 ( 1 ) / bfloat16 ( - 1 ) ) ) <nl> - np . testing . assert_equal ( - 1 . 75 , float ( bfloat16 ( 3 . 5 ) / bfloat16 ( - 2 ) ) ) <nl> - np . testing . assert_equal ( <nl> - float ( " - inf " ) , float ( bfloat16 ( float ( " inf " ) ) / bfloat16 ( - 2 . 25 ) ) ) <nl> - np . testing . assert_equal ( <nl> - float ( " inf " ) , float ( bfloat16 ( float ( " - inf " ) ) / bfloat16 ( - 2 . 25 ) ) ) <nl> - self . assertTrue ( math . isnan ( float ( bfloat16 ( 3 . 5 ) / bfloat16 ( float ( " nan " ) ) ) ) ) <nl> - <nl> - def testLess ( self ) : <nl> - for v in FLOAT_VALUES : <nl> - for w in FLOAT_VALUES : <nl> - self . assertEqual ( v < w , bfloat16 ( v ) < bfloat16 ( w ) ) <nl> - <nl> - def testLessEqual ( self ) : <nl> - for v in FLOAT_VALUES : <nl> - for w in FLOAT_VALUES : <nl> - self . assertEqual ( v < = w , bfloat16 ( v ) < = bfloat16 ( w ) ) <nl> - <nl> - def testGreater ( self ) : <nl> - for v in FLOAT_VALUES : <nl> - for w in FLOAT_VALUES : <nl> - self . assertEqual ( v > w , bfloat16 ( v ) > bfloat16 ( w ) ) <nl> - <nl> - def testGreaterEqual ( self ) : <nl> - for v in FLOAT_VALUES : <nl> - for w in FLOAT_VALUES : <nl> - self . assertEqual ( v > = w , bfloat16 ( v ) > = bfloat16 ( w ) ) <nl> - <nl> - def testEqual ( self ) : <nl> - for v in FLOAT_VALUES : <nl> - for w in FLOAT_VALUES : <nl> - self . assertEqual ( v = = w , bfloat16 ( v ) = = bfloat16 ( w ) ) <nl> - <nl> - def testNotEqual ( self ) : <nl> - for v in FLOAT_VALUES : <nl> - for w in FLOAT_VALUES : <nl> - self . assertEqual ( v ! = w , bfloat16 ( v ) ! = bfloat16 ( w ) ) <nl> - <nl> - def testNan ( self ) : <nl> - a = np . isnan ( bfloat16 ( float ( " nan " ) ) ) <nl> - self . assertTrue ( a ) <nl> - numpy_assert_allclose ( np . array ( [ 1 . 0 , a ] ) , np . array ( [ 1 . 0 , a ] ) ) <nl> - <nl> - a = np . array ( [ bfloat16 ( 1 . 34375 ) , <nl> - bfloat16 ( 1 . 4375 ) , <nl> - bfloat16 ( float ( " nan " ) ) ] , <nl> - dtype = bfloat16 ) <nl> - b = np . array ( <nl> - [ bfloat16 ( 1 . 3359375 ) , <nl> - bfloat16 ( 1 . 4375 ) , <nl> - bfloat16 ( float ( " nan " ) ) ] , <nl> - dtype = bfloat16 ) <nl> - numpy_assert_allclose ( <nl> - a , b , rtol = 0 . 1 , atol = 0 . 1 , equal_nan = True , err_msg = " " , verbose = True ) <nl> - <nl> - def testSort ( self ) : <nl> - values_to_sort = np . float32 ( FLOAT_VALUES ) <nl> - sorted_f32 = np . sort ( values_to_sort ) <nl> - sorted_bf16 = np . sort ( values_to_sort . astype ( bfloat16 ) ) <nl> - np . testing . assert_equal ( sorted_f32 , np . float32 ( sorted_bf16 ) ) <nl> - <nl> - <nl> - BinaryOp = collections . namedtuple ( " BinaryOp " , [ " op " ] ) <nl> - <nl> - UNARY_UFUNCS = [ <nl> - np . negative , np . positive , np . absolute , np . fabs , np . rint , np . sign , <nl> - np . conjugate , np . exp , np . exp2 , np . expm1 , np . log , np . log10 , np . log1p , <nl> - np . log2 , np . sqrt , np . square , np . cbrt , np . reciprocal , np . sin , np . cos , np . tan , <nl> - np . arcsin , np . arccos , np . arctan , np . sinh , np . cosh , np . tanh , np . arcsinh , <nl> - np . arccosh , np . arctanh , np . deg2rad , np . rad2deg , np . floor , np . ceil , np . trunc <nl> - ] <nl> - <nl> - BINARY_UFUNCS = [ <nl> - np . add , np . subtract , np . multiply , np . divide , np . logaddexp , np . logaddexp2 , <nl> - np . floor_divide , np . power , np . remainder , np . fmod , np . heaviside , np . arctan2 , <nl> - np . hypot , np . maximum , np . minimum , np . fmax , np . fmin , np . copysign <nl> - ] <nl> - <nl> - BINARY_PREDICATE_UFUNCS = [ <nl> - np . equal , np . not_equal , np . less , np . greater , np . less_equal , <nl> - np . greater_equal , np . logical_and , np . logical_or , np . logical_xor <nl> - ] <nl> - <nl> - <nl> - class Bfloat16NumPyTest ( parameterized . TestCase ) : <nl> - " " " Tests the NumPy integration of the bfloat16 type . " " " <nl> - <nl> - def testDtype ( self ) : <nl> - self . assertEqual ( bfloat16 , np . dtype ( bfloat16 ) ) <nl> - <nl> - def testDeepCopyDoesNotAlterHash ( self ) : <nl> - # For context , see https : / / github . com / google / jax / issues / 4651 . If the hash <nl> - # value of the type descriptor is not initialized correctly , a deep copy <nl> - # can change the type hash . <nl> - dtype = np . dtype ( bfloat16 ) <nl> - h = hash ( dtype ) <nl> - _ = copy . deepcopy ( dtype ) <nl> - self . assertEqual ( h , hash ( dtype ) ) <nl> - <nl> - def testArray ( self ) : <nl> - x = np . array ( [ [ 1 , 2 , 3 ] ] , dtype = bfloat16 ) <nl> - self . assertEqual ( bfloat16 , x . dtype ) <nl> - self . assertEqual ( " [ [ 1 2 3 ] ] " , str ( x ) ) <nl> - np . testing . assert_equal ( x , x ) <nl> - numpy_assert_allclose ( x , x ) <nl> - self . assertTrue ( ( x = = x ) . all ( ) ) <nl> - <nl> - def testComparisons ( self ) : <nl> - x = np . array ( [ 401408 , 7 , - 32 ] , dtype = np . float32 ) <nl> - bx = x . astype ( bfloat16 ) <nl> - y = np . array ( [ 82432 , 7 , 0 ] , dtype = np . float32 ) <nl> - by = y . astype ( bfloat16 ) <nl> - np . testing . assert_equal ( x = = y , bx = = by ) <nl> - np . testing . assert_equal ( x ! = y , bx ! = by ) <nl> - np . testing . assert_equal ( x < y , bx < by ) <nl> - np . testing . assert_equal ( x > y , bx > by ) <nl> - np . testing . assert_equal ( x < = y , bx < = by ) <nl> - np . testing . assert_equal ( x > = y , bx > = by ) <nl> - <nl> - def testEqual2 ( self ) : <nl> - a = np . array ( [ 401408 ] , bfloat16 ) <nl> - b = np . array ( [ 82432 ] , bfloat16 ) <nl> - self . assertFalse ( a . __eq__ ( b ) ) <nl> - <nl> - def testCasts ( self ) : <nl> - for dtype in [ <nl> - np . float16 , np . float32 , np . float64 , np . int8 , np . int16 , np . int32 , <nl> - np . int64 , np . complex64 , np . complex128 , np . uint8 , np . uint16 , np . uint32 , <nl> - np . uint64 , np . intc , np . int_ , np . longlong , np . uintc , np . ulonglong <nl> - ] : <nl> - x = np . array ( [ [ 1 , 2 , 3 ] ] , dtype = dtype ) <nl> - y = x . astype ( bfloat16 ) <nl> - z = y . astype ( dtype ) <nl> - self . assertTrue ( np . all ( x = = y ) ) <nl> - self . assertEqual ( bfloat16 , y . dtype ) <nl> - self . assertTrue ( np . all ( x = = z ) ) <nl> - self . assertEqual ( dtype , z . dtype ) <nl> - <nl> - def testConformNumpyComplex ( self ) : <nl> - for dtype in [ np . complex64 , np . complex128 ] : <nl> - x = np . array ( [ 1 . 1 , 2 . 2 + 2 . 2j , 3 . 3 ] , dtype = dtype ) <nl> - y_np = x . astype ( np . float32 ) <nl> - y_tf = x . astype ( bfloat16 ) <nl> - numpy_assert_allclose ( y_np , y_tf , atol = 2e - 2 ) <nl> - <nl> - z_np = y_np . astype ( dtype ) <nl> - z_tf = y_tf . astype ( dtype ) <nl> - numpy_assert_allclose ( z_np , z_tf , atol = 2e - 2 ) <nl> - <nl> - def testArange ( self ) : <nl> - np . testing . assert_equal ( <nl> - np . arange ( 100 , dtype = np . float32 ) . astype ( bfloat16 ) , <nl> - np . arange ( 100 , dtype = bfloat16 ) ) <nl> - np . testing . assert_equal ( <nl> - np . arange ( - 10 . 5 , 7 . 8 , 0 . 5 , dtype = np . float32 ) . astype ( bfloat16 ) , <nl> - np . arange ( - 10 . 5 , 7 . 8 , 0 . 5 , dtype = bfloat16 ) ) <nl> - np . testing . assert_equal ( <nl> - np . arange ( - 0 . , - 7 . , - 0 . 25 , dtype = np . float32 ) . astype ( bfloat16 ) , <nl> - np . arange ( - 0 . , - 7 . , - 0 . 25 , dtype = bfloat16 ) ) <nl> - np . testing . assert_equal ( <nl> - np . arange ( - 16384 . , 16384 . , 64 . , dtype = np . float32 ) . astype ( bfloat16 ) , <nl> - np . arange ( - 16384 . , 16384 . , 64 . , dtype = bfloat16 ) ) <nl> - <nl> - # pylint : disable = g - complex - comprehension <nl> - @ parameterized . named_parameters ( ( { <nl> - " testcase_name " : " _ " + op . __name__ , <nl> - " op " : op <nl> - } for op in UNARY_UFUNCS ) ) <nl> - def testUnaryUfunc ( self , op ) : <nl> - rng = np . random . RandomState ( seed = 42 ) <nl> - x = rng . randn ( 3 , 7 , 10 ) . astype ( bfloat16 ) <nl> - numpy_assert_allclose ( <nl> - op ( x ) . astype ( np . float32 ) , op ( x . astype ( np . float32 ) ) , rtol = 1e - 2 ) <nl> - <nl> - @ parameterized . named_parameters ( ( { <nl> - " testcase_name " : " _ " + op . __name__ , <nl> - " op " : op <nl> - } for op in BINARY_UFUNCS ) ) <nl> - def testBinaryUfunc ( self , op ) : <nl> - rng = np . random . RandomState ( seed = 42 ) <nl> - x = rng . randn ( 3 , 7 , 10 ) . astype ( bfloat16 ) <nl> - y = rng . randn ( 4 , 1 , 7 , 10 ) . astype ( bfloat16 ) <nl> - numpy_assert_allclose ( <nl> - op ( x , y ) . astype ( np . float32 ) , <nl> - op ( x . astype ( np . float32 ) , y . astype ( np . float32 ) ) , <nl> - rtol = 1e - 2 ) <nl> - <nl> - @ parameterized . named_parameters ( ( { <nl> - " testcase_name " : " _ " + op . __name__ , <nl> - " op " : op <nl> - } for op in BINARY_PREDICATE_UFUNCS ) ) <nl> - def testBinaryPredicateUfunc ( self , op ) : <nl> - rng = np . random . RandomState ( seed = 42 ) <nl> - x = rng . randn ( 3 , 7 ) . astype ( bfloat16 ) <nl> - y = rng . randn ( 4 , 1 , 7 ) . astype ( bfloat16 ) <nl> - np . testing . assert_equal ( <nl> - op ( x , y ) , op ( x . astype ( np . float32 ) , y . astype ( np . float32 ) ) ) <nl> - <nl> - @ parameterized . named_parameters ( ( { <nl> - " testcase_name " : " _ " + op . __name__ , <nl> - " op " : op <nl> - } for op in [ np . isfinite , np . isinf , np . isnan , np . signbit , np . logical_not ] ) ) <nl> - def testPredicateUfunc ( self , op ) : <nl> - rng = np . random . RandomState ( seed = 42 ) <nl> - shape = ( 3 , 7 , 10 ) <nl> - posinf_flips = rng . rand ( * shape ) < 0 . 1 <nl> - neginf_flips = rng . rand ( * shape ) < 0 . 1 <nl> - nan_flips = rng . rand ( * shape ) < 0 . 1 <nl> - vals = rng . randn ( * shape ) <nl> - vals = np . where ( posinf_flips , np . inf , vals ) <nl> - vals = np . where ( neginf_flips , - np . inf , vals ) <nl> - vals = np . where ( nan_flips , np . nan , vals ) <nl> - vals = vals . astype ( bfloat16 ) <nl> - np . testing . assert_equal ( op ( vals ) , op ( vals . astype ( np . float32 ) ) ) <nl> - <nl> - def testDivmod ( self ) : <nl> - rng = np . random . RandomState ( seed = 42 ) <nl> - x = rng . randn ( 3 , 7 ) . astype ( bfloat16 ) <nl> - y = rng . randn ( 4 , 1 , 7 ) . astype ( bfloat16 ) <nl> - o1 , o2 = np . divmod ( x , y ) <nl> - e1 , e2 = np . divmod ( x . astype ( np . float32 ) , y . astype ( np . float32 ) ) <nl> - numpy_assert_allclose ( o1 , e1 , rtol = 1e - 2 ) <nl> - numpy_assert_allclose ( o2 , e2 , rtol = 1e - 2 ) <nl> - <nl> - def testModf ( self ) : <nl> - rng = np . random . RandomState ( seed = 42 ) <nl> - x = rng . randn ( 3 , 7 ) . astype ( bfloat16 ) <nl> - o1 , o2 = np . modf ( x ) <nl> - e1 , e2 = np . modf ( x . astype ( np . float32 ) ) <nl> - numpy_assert_allclose ( o1 . astype ( np . float32 ) , e1 , rtol = 1e - 2 ) <nl> - numpy_assert_allclose ( o2 . astype ( np . float32 ) , e2 , rtol = 1e - 2 ) <nl> - <nl> - def testLdexp ( self ) : <nl> - rng = np . random . RandomState ( seed = 42 ) <nl> - x = rng . randn ( 3 , 7 ) . astype ( bfloat16 ) <nl> - y = rng . randint ( - 50 , 50 , ( 1 , 7 ) ) <nl> - numpy_assert_allclose ( <nl> - np . ldexp ( x , y ) . astype ( np . float32 ) , <nl> - np . ldexp ( x . astype ( np . float32 ) , y ) , <nl> - rtol = 1e - 2 , <nl> - atol = 1e - 6 ) <nl> - <nl> - def testFrexp ( self ) : <nl> - rng = np . random . RandomState ( seed = 42 ) <nl> - x = rng . randn ( 3 , 7 ) . astype ( bfloat16 ) <nl> - mant1 , exp1 = np . frexp ( x ) <nl> - mant2 , exp2 = np . frexp ( x . astype ( np . float32 ) ) <nl> - np . testing . assert_equal ( exp1 , exp2 ) <nl> - numpy_assert_allclose ( mant1 , mant2 , rtol = 1e - 2 ) <nl> - <nl> - def testNextAfter ( self ) : <nl> - one = np . array ( 1 . , dtype = bfloat16 ) <nl> - two = np . array ( 2 . , dtype = bfloat16 ) <nl> - zero = np . array ( 0 . , dtype = bfloat16 ) <nl> - nan = np . array ( np . nan , dtype = bfloat16 ) <nl> - np . testing . assert_equal ( np . nextafter ( one , two ) - one , epsilon ) <nl> - np . testing . assert_equal ( np . nextafter ( one , zero ) - one , - epsilon / 2 ) <nl> - np . testing . assert_equal ( np . isnan ( np . nextafter ( nan , one ) ) , True ) <nl> - np . testing . assert_equal ( np . isnan ( np . nextafter ( one , nan ) ) , True ) <nl> - np . testing . assert_equal ( np . nextafter ( one , one ) , one ) <nl> - smallest_denormal = float . fromhex ( " 1 . 0p - 133 " ) <nl> - np . testing . assert_equal ( np . nextafter ( zero , one ) , smallest_denormal ) <nl> - np . testing . assert_equal ( np . nextafter ( zero , - one ) , - smallest_denormal ) <nl> - for a , b in itertools . permutations ( [ 0 . , - 0 . , nan ] , 2 ) : <nl> - np . testing . assert_equal ( <nl> - np . nextafter ( <nl> - np . array ( a , dtype = np . float32 ) , np . array ( b , dtype = np . float32 ) ) , <nl> - np . nextafter ( <nl> - np . array ( a , dtype = bfloat16 ) , np . array ( b , dtype = bfloat16 ) ) ) <nl> - <nl> - <nl> - if __name__ = = " __main__ " : <nl> - absltest . main ( ) <nl> mmm a / tensorflow / compiler / xla / python / dlpack . cc <nl> ppp b / tensorflow / compiler / xla / python / dlpack . cc <nl> StatusOr < std : : vector < int64 > > StridesToLayout ( absl : : Span < int64 const > dims , <nl> } <nl> <nl> StatusOr < DLDeviceType > DLDeviceTypeForDevice ( const PjRtDevice & device ) { <nl> - const se : : Platform * platform = <nl> - device . local_device_state ( ) - > executor ( ) - > platform ( ) ; <nl> - if ( platform - > id ( ) = = se : : host : : kHostPlatformId ) { <nl> + if ( device . client ( ) - > platform_id ( ) = = kCpuId ) { <nl> return kDLCPU ; <nl> - } else if ( platform - > id ( ) = = se : : cuda : : kCudaPlatformId ) { <nl> + } else if ( device . client ( ) - > platform_id ( ) = = kGpuId ) { <nl> return kDLGPU ; <nl> } <nl> return InvalidArgument ( " Device % s cannot be used as a DLPack device . " , <nl> StatusOr < DLDeviceType > DLDeviceTypeForDevice ( const PjRtDevice & device ) { <nl> StatusOr < DLContext > DLContextForDevice ( const PjRtDevice & device ) { <nl> DLContext context ; <nl> TF_ASSIGN_OR_RETURN ( context . device_type , DLDeviceTypeForDevice ( device ) ) ; <nl> - context . device_id = device . local_device_id ( ) ; <nl> + context . device_id = device . local_hardware_id ( ) ; <nl> return context ; <nl> } <nl> <nl> StatusOr < PjRtDevice * > DeviceForDLContext ( const PjRtClient & client , <nl> " DLPack CPU device type mismatch with PjRtClient platform % s " , <nl> client . platform_name ( ) ) ; <nl> } <nl> - return client . LookupLocalDevice ( context . device_id ) ; <nl> + return client . LookupAddressableDevice ( context . device_id ) ; <nl> case kDLGPU : <nl> if ( client . platform_id ( ) ! = kGpuId ) { <nl> return InvalidArgument ( <nl> " DLPack GPU device type mismatch with PjRtClient platform % s " , <nl> client . platform_name ( ) ) ; <nl> } <nl> - return client . LookupLocalDevice ( context . device_id ) ; <nl> + return client . LookupAddressableDevice ( context . device_id ) ; <nl> default : <nl> return InvalidArgument ( " Unknown / unsupported DLPack device type % d " , <nl> context . device_type ) ; <nl> StatusOr < py : : capsule > BufferToDLPackManagedTensor ( py : : handle py_buffer , <nl> pack - > tensor . manager_ctx = pack . get ( ) ; <nl> pack - > tensor . deleter = DLPackTensorDeleter ; <nl> TF_ASSIGN_OR_RETURN ( dt . ctx , DLContextForDevice ( * buffer - > buffer ( ) - > device ( ) ) ) ; <nl> - dt . ctx . device_id = buffer - > buffer ( ) - > device ( ) - > local_device_id ( ) ; <nl> + dt . ctx . device_id = buffer - > buffer ( ) - > device ( ) - > local_hardware_id ( ) ; <nl> dt . ndim = buffer - > buffer ( ) - > on_host_shape ( ) . dimensions_size ( ) ; <nl> TF_ASSIGN_OR_RETURN ( dt . dtype , <nl> PrimitiveTypeToDLDataType ( <nl> mmm a / tensorflow / compiler / xla / python / outfeed_receiver . cc <nl> ppp b / tensorflow / compiler / xla / python / outfeed_receiver . cc <nl> OutfeedReceiverImpl : : OutfeedReceiverImpl ( <nl> callback_ = callback ; <nl> max_callback_queue_size_bytes_ = max_callback_queue_size_bytes ; <nl> for ( const auto & client : clients ) { <nl> - for ( const auto & device : client - > devices ( ) ) { <nl> - devices_ . push_back ( device . get ( ) ) ; <nl> + for ( auto device : client - > devices ( ) ) { <nl> + devices_ . push_back ( device ) ; <nl> } <nl> } <nl> CHECK_GT ( devices_ . size ( ) , 0 ) ; <nl> StatusOr < std : : unique_ptr < Literal > > OutfeedReceiverImpl : : ReceiveRawFromOutfeed ( <nl> const PjRtDevice * device , const Shape & shape ) { <nl> std : : shared_ptr < Literal > literal_shared ; <nl> <nl> - TF_ASSIGN_OR_RETURN ( LocalDeviceState * local_device , <nl> - device - > GetLocalDeviceState ( ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( Literal literal , <nl> - local_device - > client ( ) - > TransferFromOutfeedLocal ( <nl> - shape , local_device - > device_ordinal ( ) ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( Literal literal , device - > TransferFromOutfeed ( shape ) ) ; <nl> <nl> return absl : : make_unique < Literal > ( std : : move ( literal ) ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / python / py_buffer . cc <nl> ppp b / tensorflow / compiler / xla / python / py_buffer . cc <nl> StatusOr < std : : uintptr_t > PyBuffer : : UnsafeBufferPointer ( ) const { <nl> } <nl> <nl> StatusOr < py : : dict > PyBuffer : : CudaArrayInterface ( ) const { <nl> - if ( buffer_ - > device ( ) - > local_device_state ( ) - > executor ( ) - > platform_kind ( ) ! = <nl> - se : : PlatformKind : : kCuda ) { <nl> + / / TODO ( zhangqiaorjc ) : Differentiate between NVidia and other GPUs . <nl> + if ( buffer_ - > client ( ) - > platform_id ( ) ! = kGpuId ) { <nl> return InvalidArgument ( <nl> " __cuda_array_interface__ is only defined for NVidia GPU buffers . " ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / python / py_client . cc <nl> ppp b / tensorflow / compiler / xla / python / py_client . cc <nl> PyClient : : PyClient ( std : : shared_ptr < PjRtClient > pjrt_client ) <nl> <nl> std : : vector < ClientAndPtr < PjRtDevice > > PyClient : : Devices ( ) { <nl> std : : vector < ClientAndPtr < PjRtDevice > > devices ; <nl> - devices . reserve ( pjrt_client_ - > devices ( ) . size ( ) ) ; <nl> - for ( const auto & device : pjrt_client_ - > devices ( ) ) { <nl> - devices . push_back ( WrapWithClient ( shared_from_this ( ) , device . get ( ) ) ) ; <nl> + auto span = pjrt_client_ - > devices ( ) ; <nl> + devices . reserve ( span . size ( ) ) ; <nl> + for ( PjRtDevice * device : span ) { <nl> + devices . push_back ( WrapWithClient ( shared_from_this ( ) , device ) ) ; <nl> } <nl> return devices ; <nl> } <nl> PyClient : : GetDefaultDeviceAssignment ( int num_replicas , int num_partitions ) { <nl> result [ r ] . resize ( num_partitions ) ; <nl> for ( int p = 0 ; p < num_partitions ; + + p ) { <nl> int device_id = device_assignment ( r , p ) ; <nl> - auto iter = pjrt_client_ - > id_to_device ( ) . find ( device_id ) ; <nl> - CHECK ( iter ! = pjrt_client_ - > id_to_device ( ) . end ( ) ) < < device_id ; <nl> - result [ r ] [ p ] = WrapWithClient ( shared_from_this ( ) , iter - > second ) ; <nl> + TF_ASSIGN_OR_RETURN ( PjRtDevice * device , <nl> + pjrt_client_ - > LookupDevice ( device_id ) ) ; <nl> + result [ r ] [ p ] = WrapWithClient ( shared_from_this ( ) , device ) ; <nl> } <nl> } <nl> return result ; <nl> PyClient : : GetDefaultDeviceAssignment1D ( int num_replicas ) { <nl> std : : vector < ClientAndPtr < PjRtDevice > > result ; <nl> for ( int i = 0 ; i < num_replicas ; + + i ) { <nl> int device_id = device_assignment ( i , 0 ) ; <nl> - auto iter = pjrt_client_ - > id_to_device ( ) . find ( device_id ) ; <nl> - CHECK ( iter ! = pjrt_client_ - > id_to_device ( ) . end ( ) ) < < device_id ; <nl> - result . push_back ( WrapWithClient ( shared_from_this ( ) , iter - > second ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( PjRtDevice * device , <nl> + pjrt_client_ - > LookupDevice ( device_id ) ) ; <nl> + result . push_back ( WrapWithClient ( shared_from_this ( ) , device ) ) ; <nl> } <nl> return result ; <nl> } <nl> StatusOr < std : : unique_ptr < PyBuffer > > PyClient : : BufferFromPyval ( <nl> device = pjrt_client_ - > local_devices ( ) . front ( ) ; <nl> } <nl> CHECK ( device ! = nullptr ) ; <nl> - auto iter = pjrt_client_ - > id_to_device ( ) . find ( device - > id ( ) ) ; <nl> - if ( iter - > second ! = device ) { <nl> + TF_ASSIGN_OR_RETURN ( PjRtDevice * found_device , <nl> + pjrt_client_ - > LookupDevice ( device - > id ( ) ) ) ; <nl> + if ( found_device ! = device ) { <nl> return InvalidArgument ( " Cannot copy value to device ' % s ' with ' % s ' backend " , <nl> device - > DebugString ( ) , <nl> pjrt_client_ - > platform_name ( ) ) ; <nl> mmm a / tensorflow / compiler / xla / python / py_client . h <nl> ppp b / tensorflow / compiler / xla / python / py_client . h <nl> class PyClient : public std : : enable_shared_from_this < PyClient > { <nl> const std : : string & platform_name ( ) const { <nl> return pjrt_client_ - > platform_name ( ) ; <nl> } <nl> - int local_device_count ( ) const { return pjrt_client_ - > local_device_count ( ) ; } <nl> + int addressable_device_count ( ) const { <nl> + return pjrt_client_ - > addressable_device_count ( ) ; <nl> + } <nl> int device_count ( ) const { return pjrt_client_ - > device_count ( ) ; } <nl> int host_id ( ) const { return pjrt_client_ - > host_id ( ) ; } <nl> <nl> mmm a / tensorflow / compiler / xla / python / pytree . cc <nl> ppp b / tensorflow / compiler / xla / python / pytree . cc <nl> limitations under the License . <nl> # include " pybind11 / pybind11 . h " <nl> # include " pybind11 / pytypes . h " <nl> # include " pybind11 / stl . h " <nl> + # include " tensorflow / compiler / xla / python / types . h " <nl> <nl> namespace xla { <nl> <nl> bool PyTreeDef : : operator = = ( const PyTreeDef & other ) const { <nl> } <nl> } <nl> <nl> - void PyTreeDef : : FlattenInto ( py : : handle handle , <nl> - std : : vector < py : : object > & leaves ) { <nl> + void PyTreeDef : : FlattenInto ( py : : handle handle , std : : vector < py : : object > & leaves , <nl> + absl : : optional < py : : function > leaf_predicate ) { <nl> Node node ; <nl> int start_num_nodes = traversal_ . size ( ) ; <nl> int start_num_leaves = leaves . size ( ) ; <nl> - node . kind = GetKind ( handle , & node . custom ) ; <nl> - if ( node . kind = = Kind : : kNone ) { <nl> - / / Nothing to do . <nl> - } else if ( node . kind = = Kind : : kTuple ) { <nl> - py : : tuple tuple = py : : reinterpret_borrow < py : : tuple > ( handle ) ; <nl> - node . arity = tuple . size ( ) ; <nl> - for ( py : : handle entry : tuple ) { <nl> - FlattenInto ( entry , leaves ) ; <nl> - } <nl> - } else if ( node . kind = = Kind : : kList ) { <nl> - py : : list list = py : : reinterpret_borrow < py : : list > ( handle ) ; <nl> - node . arity = list . size ( ) ; <nl> - for ( py : : handle entry : list ) { <nl> - FlattenInto ( entry , leaves ) ; <nl> - } <nl> - } else if ( node . kind = = Kind : : kDict ) { <nl> - py : : dict dict = py : : reinterpret_borrow < py : : dict > ( handle ) ; <nl> - py : : list keys = py : : reinterpret_steal < py : : list > ( PyDict_Keys ( dict . ptr ( ) ) ) ; <nl> - if ( PyList_Sort ( keys . ptr ( ) ) ) { <nl> - throw std : : runtime_error ( " Dictionary key sort failed . " ) ; <nl> - } <nl> - for ( py : : handle key : keys ) { <nl> - FlattenInto ( dict [ key ] , leaves ) ; <nl> - } <nl> - node . arity = dict . size ( ) ; <nl> - node . node_data = std : : move ( keys ) ; <nl> - } else if ( node . kind = = Kind : : kCustom ) { <nl> - py : : tuple out = py : : cast < py : : tuple > ( node . custom - > to_iterable ( handle ) ) ; <nl> - if ( out . size ( ) ! = 2 ) { <nl> - throw std : : runtime_error ( <nl> - " PyTree custom to_iterable function should return a pair " ) ; <nl> - } <nl> - node . node_data = out [ 1 ] ; <nl> - node . arity = 0 ; <nl> - for ( py : : handle entry : py : : cast < py : : iterable > ( out [ 0 ] ) ) { <nl> - + + node . arity ; <nl> - FlattenInto ( entry , leaves ) ; <nl> - } <nl> - } else if ( node . kind = = Kind : : kNamedTuple ) { <nl> - py : : tuple tuple = py : : reinterpret_borrow < py : : tuple > ( handle ) ; <nl> - node . arity = tuple . size ( ) ; <nl> - node . node_data = py : : reinterpret_borrow < py : : object > ( tuple . get_type ( ) ) ; <nl> - for ( py : : handle entry : tuple ) { <nl> - FlattenInto ( entry , leaves ) ; <nl> - } <nl> + if ( leaf_predicate & & ( * leaf_predicate ) ( handle ) . cast < bool > ( ) ) { <nl> + leaves . push_back ( py : : reinterpret_borrow < py : : object > ( handle ) ) ; <nl> } else { <nl> - assert ( node . kind = = Kind : : kLeaf ) ; <nl> - leaves . push_back ( pybind11 : : reinterpret_borrow < py : : object > ( handle ) ) ; <nl> + node . kind = GetKind ( handle , & node . custom ) ; <nl> + auto recurse = [ this , & leaf_predicate , & leaves ] ( py : : handle child ) { <nl> + FlattenInto ( child , leaves , leaf_predicate ) ; <nl> + } ; <nl> + if ( node . kind = = Kind : : kNone ) { <nl> + / / Nothing to do . <nl> + } else if ( node . kind = = Kind : : kTuple ) { <nl> + py : : tuple tuple = py : : reinterpret_borrow < py : : tuple > ( handle ) ; <nl> + node . arity = tuple . size ( ) ; <nl> + for ( py : : handle entry : tuple ) { <nl> + recurse ( entry ) ; <nl> + } <nl> + } else if ( node . kind = = Kind : : kList ) { <nl> + py : : list list = py : : reinterpret_borrow < py : : list > ( handle ) ; <nl> + node . arity = list . size ( ) ; <nl> + for ( py : : handle entry : list ) { <nl> + recurse ( entry ) ; <nl> + } <nl> + } else if ( node . kind = = Kind : : kDict ) { <nl> + py : : dict dict = py : : reinterpret_borrow < py : : dict > ( handle ) ; <nl> + py : : list keys = py : : reinterpret_steal < py : : list > ( PyDict_Keys ( dict . ptr ( ) ) ) ; <nl> + if ( PyList_Sort ( keys . ptr ( ) ) ) { <nl> + throw std : : runtime_error ( " Dictionary key sort failed . " ) ; <nl> + } <nl> + for ( py : : handle key : keys ) { <nl> + recurse ( dict [ key ] ) ; <nl> + } <nl> + node . arity = dict . size ( ) ; <nl> + node . node_data = std : : move ( keys ) ; <nl> + } else if ( node . kind = = Kind : : kCustom ) { <nl> + py : : tuple out = py : : cast < py : : tuple > ( node . custom - > to_iterable ( handle ) ) ; <nl> + if ( out . size ( ) ! = 2 ) { <nl> + throw std : : runtime_error ( <nl> + " PyTree custom to_iterable function should return a pair " ) ; <nl> + } <nl> + node . node_data = out [ 1 ] ; <nl> + node . arity = 0 ; <nl> + for ( py : : handle entry : py : : cast < py : : iterable > ( out [ 0 ] ) ) { <nl> + + + node . arity ; <nl> + recurse ( entry ) ; <nl> + } <nl> + } else if ( node . kind = = Kind : : kNamedTuple ) { <nl> + py : : tuple tuple = py : : reinterpret_borrow < py : : tuple > ( handle ) ; <nl> + node . arity = tuple . size ( ) ; <nl> + node . node_data = py : : reinterpret_borrow < py : : object > ( tuple . get_type ( ) ) ; <nl> + for ( py : : handle entry : tuple ) { <nl> + recurse ( entry ) ; <nl> + } <nl> + } else { <nl> + assert ( node . kind = = Kind : : kLeaf ) ; <nl> + leaves . push_back ( py : : reinterpret_borrow < py : : object > ( handle ) ) ; <nl> + } <nl> } <nl> node . num_nodes = traversal_ . size ( ) - start_num_nodes + 1 ; <nl> node . num_leaves = leaves . size ( ) - start_num_leaves ; <nl> void PyTreeDef : : FlattenInto ( py : : handle handle , <nl> } <nl> <nl> / * static * / std : : pair < std : : vector < py : : object > , std : : unique_ptr < PyTreeDef > > <nl> - PyTreeDef : : Flatten ( py : : handle x ) { <nl> + PyTreeDef : : Flatten ( py : : handle x , absl : : optional < py : : function > leaf_predicate ) { <nl> std : : vector < py : : object > leaves ; <nl> auto tree = absl : : make_unique < PyTreeDef > ( ) ; <nl> - tree - > FlattenInto ( x , leaves ) ; <nl> + tree - > FlattenInto ( x , leaves , leaf_predicate ) ; <nl> return std : : make_pair ( std : : move ( leaves ) , std : : move ( tree ) ) ; <nl> } <nl> <nl> std : : string PyTreeDef : : ToString ( ) const { <nl> <nl> void BuildPytreeSubmodule ( py : : module & m ) { <nl> py : : module pytree = m . def_submodule ( " pytree " , " Python tree library " ) ; <nl> - pytree . def ( " flatten " , & PyTreeDef : : Flatten ) ; <nl> + pytree . def ( " flatten " , & PyTreeDef : : Flatten , py : : arg ( " tree " ) , <nl> + py : : arg ( " leaf_predicate " ) = absl : : nullopt ) ; <nl> pytree . def ( " tuple " , & PyTreeDef : : Tuple ) ; <nl> pytree . def ( " all_leaves " , & PyTreeDef : : AllLeaves ) ; <nl> <nl> mmm a / tensorflow / compiler / xla / python / pytree . h <nl> ppp b / tensorflow / compiler / xla / python / pytree . h <nl> class PyTreeDef { <nl> <nl> / / Flattens a Pytree into a list of leaves and a PyTreeDef . <nl> static std : : pair < std : : vector < pybind11 : : object > , std : : unique_ptr < PyTreeDef > > <nl> - Flatten ( pybind11 : : handle x ) ; <nl> + Flatten ( pybind11 : : handle x , <nl> + absl : : optional < pybind11 : : function > leaf_predicate = absl : : nullopt ) ; <nl> <nl> / / Recursive helper used to implement Flatten ( ) . <nl> - void FlattenInto ( pybind11 : : handle handle , <nl> - std : : vector < pybind11 : : object > & leaves ) ; <nl> + void FlattenInto ( <nl> + pybind11 : : handle handle , std : : vector < pybind11 : : object > & leaves , <nl> + absl : : optional < pybind11 : : function > leaf_predicate = absl : : nullopt ) ; <nl> <nl> / / Tests whether the given list is a flat list of leaves . <nl> static bool AllLeaves ( const pybind11 : : iterable & x ) ; <nl> mmm a / tensorflow / compiler / xla / python / tpu_driver / client / BUILD <nl> ppp b / tensorflow / compiler / xla / python / tpu_driver / client / BUILD <nl> cc_library ( <nl> " / / tensorflow / compiler / xla / service : computation_placer " , <nl> " / / tensorflow / compiler / xla / service : shaped_buffer " , <nl> " / / tensorflow / core / framework : allocator " , <nl> + " / / tensorflow / core / platform : casts " , <nl> " / / tensorflow / core / platform : env " , <nl> " / / tensorflow / core / profiler / lib : traceme " , <nl> " @ com_google_absl / / absl / memory " , <nl> mmm a / tensorflow / compiler / xla / python / tpu_driver / client / tpu_client . cc <nl> ppp b / tensorflow / compiler / xla / python / tpu_driver / client / tpu_client . cc <nl> namespace xla { <nl> <nl> TpuDevice : : TpuDevice ( int id , int host_id , const std : : array < int , 3 > & coords , <nl> int core_on_chip ) <nl> - : xla : : PjRtDevice ( id , / * local_device_state = * / nullptr , <nl> - / * device_kind = * / " Cloud TPU " , host_id ) , <nl> + : xla : : PjRtStreamExecutorDevice ( id , / * local_device_state = * / nullptr , <nl> + / * device_kind = * / " Cloud TPU " , host_id ) , <nl> coords_ ( coords ) , <nl> core_on_chip_ ( core_on_chip ) { } <nl> <nl> PyTpuExecutable : : PyTpuExecutable ( <nl> < < " Inserting duplicate replica : " < < replica ; <nl> executables_ [ replica ] = <nl> client_ - > driver ( ) - > LoadProgram ( device_id , compiled_program . get ( ) , { } ) ; <nl> - addressable_device_logical_ids_ . emplace_back ( replica , partition ) ; <nl> + local_logical_device_ids_ . emplace_back ( replica , partition ) ; <nl> local_devices_ . push_back ( device ) ; <nl> } <nl> } <nl> PyTpuExecutable : : ExecuteOnLocalDevices ( <nl> / / long time and we want all cores to be scheduled in parallel . <nl> thread_pool - > Schedule ( [ this , i , argument_handles , & results , & results_lock , <nl> & execute_semaphore ] ( ) { <nl> - const int replica = addressable_device_logical_ids_ [ i ] . first ; <nl> - const int partition = addressable_device_logical_ids_ [ i ] . second ; <nl> + const int replica = local_logical_device_ids_ [ i ] . first ; <nl> + const int partition = local_logical_device_ids_ [ i ] . second ; <nl> RunId run_id ; <nl> auto result = ExecuteHelper ( argument_handles , argument_handles [ i ] , <nl> replica , partition , run_id ) ; <nl> mmm a / tensorflow / compiler / xla / python / tpu_driver / client / tpu_client . h <nl> ppp b / tensorflow / compiler / xla / python / tpu_driver / client / tpu_client . h <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / status . h " <nl> # include " tensorflow / compiler / xla / statusor . h " <nl> # include " tensorflow / compiler / xla / util . h " <nl> + # include " tensorflow / core / platform / casts . h " <nl> # include " tensorflow / core / platform / threadpool . h " <nl> <nl> namespace xla { <nl> <nl> constexpr char kTpuPlatform [ ] = " tpu " ; <nl> <nl> - class TpuDevice : public PjRtDevice { <nl> + class TpuDevice : public PjRtStreamExecutorDevice { <nl> public : <nl> TpuDevice ( int id , int host_id , const std : : array < int , 3 > & coords , <nl> int core_on_chip ) ; <nl> class PyTpuExecutable { <nl> return device_assignment_ ; <nl> } <nl> <nl> - const std : : vector < std : : pair < int , int > > & addressable_device_logical_ids ( ) <nl> - const { <nl> - return addressable_device_logical_ids_ ; <nl> + const std : : vector < std : : pair < int , int > > & local_logical_device_ids ( ) const { <nl> + return local_logical_device_ids_ ; <nl> } <nl> <nl> const std : : vector < std : : shared_ptr < PjRtDevice > > & local_devices ( ) const { <nl> class PyTpuExecutable { <nl> <nl> / / The replica and partition indices of device_assignment_ to be run by this <nl> / / client . On single - host platforms without partitioning , this is all replicas <nl> - / / ( i . e . addressable_device_logical_ids_ [ i ] = ( i , 0 ) ) , but this may not be the <nl> - / / case on multi - host platforms . If there are 4 replicas and 2 partitions on a <nl> - / / single host platform , size of addressable_device_logical_ids_ is 4 * 2 = 8 . <nl> - std : : vector < std : : pair < int , int > > addressable_device_logical_ids_ ; <nl> - <nl> - / / local_devices_ [ i ] is the Device to which addressable_device_logical_ids_ [ i ] <nl> - / / is assigned . shared_ptrs instead of unique_ptrs to play well with the <nl> - / / Python bindings ( see xla . cc ) . <nl> + / / ( i . e . local_logical_device_ids_ [ i ] = ( i , 0 ) ) , but this may not be the case <nl> + / / on multi - host platforms . <nl> + / / If there are 4 replicas and 2 partitions on a single host platform , size of <nl> + / / local_logical_device_ids_ is 4 * 2 = 8 . <nl> + std : : vector < std : : pair < int , int > > local_logical_device_ids_ ; <nl> + <nl> + / / local_devices_ [ i ] is the Device to which local_logical_device_ids_ [ i ] is <nl> + / / assigned . <nl> + / / shared_ptrs instead of unique_ptrs to play well with the Python bindings <nl> + / / ( see xla . cc ) . <nl> std : : vector < std : : shared_ptr < PjRtDevice > > local_devices_ ; <nl> <nl> xla : : Shape result_shape_ ; <nl> mmm a / tensorflow / compiler / xla / python / tpu_driver / client / tpu_client_extension . cc <nl> ppp b / tensorflow / compiler / xla / python / tpu_driver / client / tpu_client_extension . cc <nl> PYBIND11_MODULE ( tpu_client_extension , m ) { <nl> <nl> py : : class_ < PyTpuExecutable > ( m , " TpuExecutable " ) <nl> . def ( " local_logical_device_ids " , <nl> - & PyTpuExecutable : : addressable_device_logical_ids ) <nl> + & PyTpuExecutable : : local_logical_device_ids ) <nl> . def ( " local_devices " , & PyTpuExecutable : : local_devices ) <nl> . def_property_readonly ( " client " , & PyTpuExecutable : : client ) <nl> . def ( " size_of_generated_code_in_bytes " , <nl> mmm a / tensorflow / compiler / xla / python / types . cc <nl> ppp b / tensorflow / compiler / xla / python / types . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / python / types . h " <nl> <nl> # include " absl / container / flat_hash_map . h " <nl> - # include " tensorflow / compiler / xla / python / bfloat16 . h " <nl> # include " tensorflow / compiler / xla / status_macros . h " <nl> + # include " tensorflow / python / lib / core / bfloat16 . h " <nl> <nl> namespace xla { <nl> <nl> xla : : StatusOr < py : : dtype > PrimitiveTypeToDtype ( PrimitiveType type ) { <nl> case U64 : <nl> return py : : dtype : : of < uint64 > ( ) ; <nl> case BF16 : { <nl> - TF_ASSIGN_OR_RETURN ( py : : object bfloat16 , Bfloat16Dtype ( ) ) ; <nl> - return py : : dtype : : from_args ( bfloat16 ) ; <nl> + py : : handle bfloat16 ( tensorflow : : Bfloat16Dtype ( ) ) ; <nl> + return py : : dtype : : from_args ( py : : reinterpret_borrow < py : : object > ( bfloat16 ) ) ; <nl> } <nl> case F16 : <nl> return py : : dtype ( " e " ) ; / / PEP 3118 code for " float16 <nl> StatusOr < py : : object > LiteralToPython ( std : : shared_ptr < xla : : Literal > literal ) { <nl> / / We requested an array of uint16 since NumPy doesn ' t know how <nl> / / to produce our custom bfloat16 type . Reinterpret the array as bfloat16 <nl> / / before handing it back to the caller . <nl> - TF_ASSIGN_OR_RETURN ( py : : object bfloat16 , Bfloat16Dtype ( ) ) ; <nl> + py : : handle bfloat16 ( tensorflow : : Bfloat16Dtype ( ) ) ; <nl> + bfloat16 . inc_ref ( ) ; <nl> array = py : : reinterpret_steal < py : : array > ( <nl> PyArray_View ( reinterpret_cast < PyArrayObject * > ( array . ptr ( ) ) , <nl> - reinterpret_cast < PyArray_Descr * > ( bfloat16 . release ( ) . ptr ( ) ) , <nl> + reinterpret_cast < PyArray_Descr * > ( bfloat16 . ptr ( ) ) , <nl> static_cast < PyTypeObject * > ( nullptr ) ) ) ; <nl> } <nl> return array ; <nl> mmm a / tensorflow / compiler / xla / python / xla . cc <nl> ppp b / tensorflow / compiler / xla / python / xla . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / pjrt / interpreter_device . h " <nl> # include " tensorflow / compiler / xla / pjrt / pjrt_client . h " <nl> # include " tensorflow / compiler / xla / pjrt / tpu_client . h " <nl> - # include " tensorflow / compiler / xla / python / bfloat16 . h " <nl> # include " tensorflow / compiler / xla / python / dlpack . h " <nl> # include " tensorflow / compiler / xla / python / jax_jit . h " <nl> # include " tensorflow / compiler / xla / python / ops . h " <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / statusor . h " <nl> # include " tensorflow / compiler / xla / util . h " <nl> # include " tensorflow / core / platform / errors . h " <nl> + # include " tensorflow / python / lib / core / bfloat16 . h " <nl> # include " tensorflow / stream_executor / platform . h " <nl> <nl> namespace xla { <nl> PYBIND11_MODULE ( xla_extension , m ) { <nl> throw std : : runtime_error ( " Unable to initialize Numpy API " ) ; <nl> } <nl> <nl> + CHECK ( tensorflow : : RegisterNumpyBfloat16 ( ) ) ; <nl> + <nl> / / Types <nl> py : : enum_ < PrimitiveType > ( m , " PrimitiveType " ) <nl> . value ( " PRIMITIVE_TYPE_INVALID " , PRIMITIVE_TYPE_INVALID ) <nl> PYBIND11_MODULE ( xla_extension , m ) { <nl> . value ( " OPAQUE_TYPE " , OPAQUE_TYPE ) <nl> . value ( " TOKEN " , TOKEN ) ; <nl> <nl> - m . def ( " bfloat16_dtype " , Bfloat16Dtype ) ; <nl> + m . def ( " bfloat16_dtype " , <nl> + [ ] ( ) { return py : : handle ( tensorflow : : Bfloat16Dtype ( ) ) ; } ) ; <nl> <nl> / / Must be before PyClient . compile . <nl> BuildXlaCompilerSubmodule ( m ) ; <nl> PYBIND11_MODULE ( xla_extension , m ) { <nl> . def_property_readonly ( " host_id " , & PjRtDevice : : host_id , <nl> " Integer ID of this device ' s host . \ n \ n " <nl> " This is always 0 except on multi - host platforms . " ) <nl> - . def_property_readonly ( " platform " , & PjRtDevice : : platform_name ) <nl> + . def_property_readonly ( " platform " , <nl> + [ ] ( const PjRtDevice & device ) { <nl> + return device . client ( ) - > platform_name ( ) ; <nl> + } ) <nl> . def_property_readonly ( " device_kind " , & PjRtDevice : : device_kind ) <nl> . def_property_readonly ( <nl> " client " , <nl> PYBIND11_MODULE ( xla_extension , m ) { <nl> py : : class_ < PyClient , std : : shared_ptr < PyClient > > py_local_client ( m , " Client " ) ; <nl> py_local_client . def_property_readonly ( " platform " , & PyClient : : platform_name ) <nl> . def ( " device_count " , & PyClient : : device_count ) <nl> - . def ( " local_device_count " , & PyClient : : local_device_count ) <nl> + . def ( " local_device_count " , & PyClient : : addressable_device_count ) <nl> . def ( " devices " , & PyClient : : Devices ) <nl> . def ( " local_devices " , & PyClient : : LocalDevices ) <nl> . def ( " host_id " , & PyClient : : host_id ) <nl> PYBIND11_MODULE ( xla_extension , m ) { <nl> [ ] ( PyExecutable * exec ) { <nl> auto span = exec - > addressable_device_logical_ids ( ) ; <nl> / / Not on dispatch critical path , so ok to have heap allocation . <nl> - std : : vector < std : : pair < int , int > > addressable_device_logical_ids ; <nl> - addressable_device_logical_ids . reserve ( span . size ( ) ) ; <nl> + std : : vector < std : : pair < int , int > > addressable_device_logic_ids ; <nl> + addressable_device_logic_ids . reserve ( span . size ( ) ) ; <nl> for ( const auto & logical_device_id : span ) { <nl> - addressable_device_logical_ids . push_back ( std : : make_pair ( <nl> + addressable_device_logic_ids . push_back ( std : : make_pair ( <nl> logical_device_id . replica , logical_device_id . partition ) ) ; <nl> } <nl> } ) <nl> mmm a / tensorflow / compiler / xla / service / compiler . h <nl> ppp b / tensorflow / compiler / xla / service / compiler . h <nl> limitations under the License . <nl> # include " tensorflow / core / platform / protobuf . h " <nl> # include " tensorflow / core / platform / stream_executor_no_cuda . h " <nl> # include " tensorflow / core / platform / thread_annotations . h " <nl> + # include " tensorflow / core / platform / threadpool . h " <nl> <nl> namespace xla { <nl> <nl> class AotCompilationMetadata { <nl> / / platform . <nl> class Compiler { <nl> public : <nl> + struct CompileOptions { <nl> + / / If device_allocator is not null , the compiler may use it to allocate temp <nl> + / / space on the device for use during compilation . For example , the <nl> + / / compiler may allocate buffers on the device and then run variants of a <nl> + / / given algorithm over those buffers , to see which variant is fastest . Any <nl> + / / space allocated will be deallocated before the compilation returns . <nl> + se : : DeviceMemoryAllocator * device_allocator = nullptr ; <nl> + <nl> + / / An optional thread pool for parallel compilation . <nl> + tensorflow : : thread : : ThreadPool * thread_pool = nullptr ; <nl> + } ; <nl> + <nl> virtual ~ Compiler ( ) { } <nl> <nl> / / Returns the ID of the platform that this compiler targets . <nl> class Compiler { <nl> <nl> / / Runs Hlo passes to optimize the given Hlo module , returns the optimized <nl> / / module . <nl> - / / <nl> - / / If device_allocator is not null , the compiler may use it to allocate temp <nl> - / / space on the device for use during compilation . For example , the compiler <nl> - / / may allocate buffers on the device and then run variants of a given <nl> - / / algorithm over those buffers , to see which variant is fastest . Any space <nl> - / / allocated should be deallocated before this function returns . <nl> virtual StatusOr < std : : unique_ptr < HloModule > > RunHloPasses ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * executor , <nl> - se : : DeviceMemoryAllocator * device_allocator ) = 0 ; <nl> + const CompileOptions & options ) = 0 ; <nl> + StatusOr < std : : unique_ptr < HloModule > > RunHloPasses ( <nl> + std : : unique_ptr < HloModule > module , se : : StreamExecutor * executor , <nl> + se : : DeviceMemoryAllocator * device_allocator ) { <nl> + return RunHloPasses ( std : : move ( module ) , executor , <nl> + CompileOptions { device_allocator } ) ; <nl> + } <nl> <nl> / / Runs HLO passes to optimize the given HloModule , perform scheduling and <nl> / / buffer assignment , returns the optimized module and the buffer assignments . <nl> / / This interface is intentionally narrow . <nl> - / / <nl> - / / If device_allocator is not null , the compiler may use it to allocate temp <nl> - / / space on the device for use during compilation . For example , the compiler <nl> - / / may allocate buffers on the device and then run variants of a given <nl> - / / algorithm over those buffers , to see which variant is fastest . Any space <nl> - / / allocated should be deallocated before this function returns . <nl> virtual StatusOr < <nl> std : : tuple < std : : unique_ptr < HloModule > , std : : unique_ptr < BufferAssignment > > > <nl> RunHloPassesAndBufferAssignement ( std : : unique_ptr < HloModule > module , <nl> - se : : StreamExecutor * executor , <nl> - se : : DeviceMemoryAllocator * device_allocator , <nl> - bool optimize ) { <nl> + se : : StreamExecutor * executor , bool optimize , <nl> + const CompileOptions & options ) { <nl> return Unimplemented ( " This compiler does not support this method " ) ; <nl> } <nl> <nl> class Compiler { <nl> / / <nl> / / The compiler may optionally specialize to the individual device <nl> / / ( not just type of device ) indicated by the executor . <nl> - / / <nl> - / / device_allocator is optional ; see RunHloPasses . <nl> virtual StatusOr < std : : unique_ptr < Executable > > RunBackend ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * executor , <nl> - se : : DeviceMemoryAllocator * device_allocator ) = 0 ; <nl> + const CompileOptions & options ) = 0 ; <nl> + StatusOr < std : : unique_ptr < Executable > > RunBackend ( <nl> + std : : unique_ptr < HloModule > module , se : : StreamExecutor * executor , <nl> + se : : DeviceMemoryAllocator * device_allocator ) { <nl> + return RunBackend ( std : : move ( module ) , executor , <nl> + CompileOptions { device_allocator } ) ; <nl> + } <nl> <nl> / / Compiles a set of HLO modules that can run in parallel , potentially <nl> / / communicating data between the modules , and returns a corresponding <nl> / / sequence of executable objects . <nl> / / <nl> - / / device_allocator is optional ; see RunHloPasses . <nl> - / / <nl> / / TODO ( b / 68666782 ) : Remove this method after adding support for multiple <nl> / / modules to RunHloPasses and RunBackends . <nl> virtual StatusOr < std : : vector < std : : unique_ptr < Executable > > > Compile ( <nl> std : : unique_ptr < HloModuleGroup > module_group , <nl> std : : vector < std : : vector < se : : StreamExecutor * > > stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) = 0 ; <nl> + const CompileOptions & options ) = 0 ; <nl> + StatusOr < std : : vector < std : : unique_ptr < Executable > > > Compile ( <nl> + std : : unique_ptr < HloModuleGroup > module_group , <nl> + std : : vector < std : : vector < se : : StreamExecutor * > > stream_exec , <nl> + se : : DeviceMemoryAllocator * device_allocator ) { <nl> + return Compile ( std : : move ( module_group ) , stream_exec , <nl> + CompileOptions { device_allocator } ) ; <nl> + } <nl> <nl> / / Returns the backend configurations that the backend will consider for the <nl> / / given HLO . Returns no configurations if the backend does not support <nl> mmm a / tensorflow / compiler / xla / service / cpu / cpu_compiler . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / cpu_compiler . cc <nl> Status CreateHloProfilingArtifacts ( <nl> <nl> StatusOr < std : : unique_ptr < HloModule > > CpuCompiler : : RunHloPasses ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * / * stream_exec * / , <nl> - se : : DeviceMemoryAllocator * / * device_allocator * / ) { <nl> + const CompileOptions & / * options * / ) { <nl> std : : unique_ptr < llvm : : TargetMachine > jit_target_machine = <nl> SimpleOrcJIT : : InferTargetMachineForJIT ( <nl> CompilerTargetOptions ( module - > config ( ) ) , <nl> StatusOr < std : : unique_ptr < HloModule > > CpuCompiler : : RunHloPasses ( <nl> <nl> StatusOr < <nl> std : : tuple < std : : unique_ptr < HloModule > , std : : unique_ptr < BufferAssignment > > > <nl> - CpuCompiler : : RunHloPassesAndBufferAssignement ( <nl> - std : : unique_ptr < HloModule > module , se : : StreamExecutor * executor , <nl> - se : : DeviceMemoryAllocator * device_allocator , bool optimize ) { <nl> + CpuCompiler : : RunHloPassesAndBufferAssignement ( std : : unique_ptr < HloModule > module , <nl> + se : : StreamExecutor * executor , <nl> + bool optimize , <nl> + const CompileOptions & options ) { <nl> if ( optimize ) { <nl> - TF_ASSIGN_OR_RETURN ( <nl> - module , RunHloPasses ( std : : move ( module ) , executor , device_allocator ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( module , <nl> + RunHloPasses ( std : : move ( module ) , executor , options ) ) ; <nl> } <nl> <nl> / / Select an order for emitting the HLO instructions for each computation . <nl> struct OrcJITPostCompilationHook { <nl> <nl> StatusOr < std : : unique_ptr < Executable > > CpuCompiler : : RunBackend ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * / * device_allocator * / ) { <nl> + const CompileOptions & options ) { <nl> VLOG ( 1 ) < < " Compiling : " < < module - > name ( ) ; <nl> XLA_SCOPED_LOGGING_TIMER ( <nl> absl : : StrFormat ( " Compiling [ % s ] for CPU using JIT " , module - > name ( ) ) ) ; <nl> mmm a / tensorflow / compiler / xla / service / cpu / cpu_compiler . h <nl> ppp b / tensorflow / compiler / xla / service / cpu / cpu_compiler . h <nl> class CpuCompiler : public LLVMCompiler { <nl> <nl> StatusOr < std : : unique_ptr < HloModule > > RunHloPasses ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) override ; <nl> + const CompileOptions & options ) override ; <nl> <nl> StatusOr < <nl> std : : tuple < std : : unique_ptr < HloModule > , std : : unique_ptr < BufferAssignment > > > <nl> RunHloPassesAndBufferAssignement ( std : : unique_ptr < HloModule > module , <nl> - se : : StreamExecutor * executor , <nl> - se : : DeviceMemoryAllocator * device_allocator , <nl> - bool optimize ) override ; <nl> + se : : StreamExecutor * executor , bool optimize , <nl> + const CompileOptions & options ) override ; <nl> <nl> StatusOr < std : : unique_ptr < Executable > > RunBackend ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) override ; <nl> + const CompileOptions & options ) override ; <nl> <nl> StatusOr < std : : vector < std : : unique_ptr < AotCompilationResult > > > <nl> CompileAheadOfTime ( std : : unique_ptr < HloModuleGroup > module_group , <nl> mmm a / tensorflow / compiler / xla / service / gpu / BUILD <nl> ppp b / tensorflow / compiler / xla / service / gpu / BUILD <nl> filegroup ( <nl> name = " nccl_collective_thunk_src " , <nl> srcs = if_nccl ( <nl> [ " nccl_collective_thunk . cc " ] , <nl> - [ " dummy_collective_thunk . cc " ] , <nl> + [ " nccl_collective_thunk_dummy . cc " ] , <nl> ) , <nl> ) <nl> <nl> tf_cuda_library ( <nl> name = " nccl_collective_thunk " , <nl> srcs = if_cuda_or_rocm ( <nl> [ " : nccl_collective_thunk_src " ] , <nl> - [ " dummy_collective_thunk . cc " ] , <nl> + [ " nccl_collective_thunk_dummy . cc " ] , <nl> ) , <nl> hdrs = [ " nccl_collective_thunk . h " ] , <nl> deps = [ <nl> filegroup ( <nl> name = " nccl_all_gather_thunk_src " , <nl> srcs = if_nccl ( <nl> [ " nccl_all_gather_thunk . cc " ] , <nl> - [ " dummy_all_gather_thunk . cc " ] , <nl> + [ " nccl_all_gather_thunk_dummy . cc " ] , <nl> ) , <nl> ) <nl> <nl> tf_cuda_library ( <nl> name = " nccl_all_gather_thunk " , <nl> srcs = if_cuda_or_rocm ( <nl> [ " : nccl_all_gather_thunk_src " ] , <nl> - [ " dummy_all_gather_thunk . cc " ] , <nl> + [ " nccl_all_gather_thunk_dummy . cc " ] , <nl> ) , <nl> hdrs = [ " nccl_all_gather_thunk . h " ] , <nl> deps = [ <nl> filegroup ( <nl> name = " nccl_all_reduce_thunk_src " , <nl> srcs = if_nccl ( <nl> [ " nccl_all_reduce_thunk . cc " ] , <nl> - [ " dummy_all_reduce_thunk . cc " ] , <nl> + [ " nccl_all_reduce_thunk_dummy . cc " ] , <nl> ) , <nl> ) <nl> <nl> tf_cuda_library ( <nl> name = " nccl_all_reduce_thunk " , <nl> srcs = if_cuda_or_rocm ( <nl> [ " : nccl_all_reduce_thunk_src " ] , <nl> - [ " dummy_all_reduce_thunk . cc " ] , <nl> + [ " nccl_all_reduce_thunk_dummy . cc " ] , <nl> ) , <nl> hdrs = [ " nccl_all_reduce_thunk . h " ] , <nl> deps = [ <nl> filegroup ( <nl> name = " nccl_all_to_all_thunk_src " , <nl> srcs = if_nccl ( <nl> [ " nccl_all_to_all_thunk . cc " ] , <nl> - [ " dummy_all_to_all_thunk . cc " ] , <nl> + [ " nccl_all_to_all_thunk_dummy . cc " ] , <nl> ) , <nl> ) <nl> <nl> tf_cuda_library ( <nl> name = " nccl_all_to_all_thunk " , <nl> srcs = if_cuda_or_rocm ( <nl> [ " : nccl_all_to_all_thunk_src " ] , <nl> - [ " dummy_all_to_all_thunk . cc " ] , <nl> + [ " nccl_all_to_all_thunk_dummy . cc " ] , <nl> ) , <nl> hdrs = [ " nccl_all_to_all_thunk . h " ] , <nl> deps = [ <nl> filegroup ( <nl> name = " nccl_test_utils_src " , <nl> srcs = if_nccl ( <nl> [ " nccl_test_utils . cc " ] , <nl> - [ " dummy_nccl_test_utils . cc " ] , <nl> + [ " nccl_test_utils_dummy . cc " ] , <nl> ) , <nl> ) <nl> <nl> tf_cuda_library ( <nl> name = " nccl_test_utils " , <nl> srcs = if_cuda_or_rocm ( <nl> [ " : nccl_test_utils_src " ] , <nl> - [ " dummy_nccl_test_utils . cc " ] , <nl> + [ " nccl_test_utils_dummy . cc " ] , <nl> ) , <nl> hdrs = [ " nccl_test_utils . h " ] , <nl> deps = [ <nl> cc_library ( <nl> " / / tensorflow / stream_executor : stream_executor_headers " , <nl> " @ com_google_absl / / absl / memory " , <nl> " @ com_google_absl / / absl / strings " , <nl> + " @ llvm - project / / llvm : AsmParser " , <nl> + " @ llvm - project / / llvm : BitReader " , <nl> + " @ llvm - project / / llvm : BitWriter " , <nl> " @ llvm - project / / llvm : Core " , <nl> + " @ llvm - project / / llvm : TransformUtils " , <nl> " @ llvm - project / / mlir : AllPassesAndDialectsNoRegistration " , <nl> " @ llvm - project / / mlir : IR " , <nl> ] , <nl> cc_library ( <nl> " / / tensorflow / stream_executor : stream_executor_headers " , <nl> " / / tensorflow / stream_executor / cuda : cuda_diagnostics " , <nl> " / / tensorflow / stream_executor / gpu : asm_compiler " , <nl> - ] ) , <nl> + ] ) + [ " / / tensorflow / stream_executor / gpu : gpu_driver_header " ] , <nl> ) <nl> <nl> cc_library ( <nl> mmm a / tensorflow / compiler / xla / service / gpu / amdgpu_compiler . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / amdgpu_compiler . cc <nl> StatusOr < std : : pair < std : : string , std : : vector < uint8 > > > <nl> AMDGPUCompiler : : CompileTargetBinary ( const HloModule * module , <nl> llvm : : Module * llvm_module , <nl> GpuVersion gpu_version , <nl> - se : : StreamExecutor * stream_exec ) { <nl> + se : : StreamExecutor * stream_exec , <nl> + bool relocatable ) { <nl> if ( rocdl_dir_ . empty ( ) ) { <nl> / / Compute rocdl_dir_ just once and cache it in this member . <nl> rocdl_dir_ = GetROCDLDir ( module - > config ( ) ) ; <nl> } <nl> <nl> + if ( relocatable ) { <nl> + return Unimplemented ( " relocatable target binary is not implemented " ) ; <nl> + } <nl> + <nl> std : : vector < uint8 > hsaco ; <nl> { <nl> XLA_SCOPED_LOGGING_TIMER ( <nl> mmm a / tensorflow / compiler / xla / service / gpu / amdgpu_compiler . h <nl> ppp b / tensorflow / compiler / xla / service / gpu / amdgpu_compiler . h <nl> class AMDGPUCompiler : public GpuCompiler { <nl> <nl> StatusOr < std : : pair < std : : string , std : : vector < uint8 > > > CompileTargetBinary ( <nl> const HloModule * hlo_module , llvm : : Module * llvm_module , <nl> - GpuVersion gpu_version , se : : StreamExecutor * stream_exec ) override ; <nl> + GpuVersion gpu_version , se : : StreamExecutor * stream_exec , <nl> + bool relocatable ) override ; <nl> <nl> private : <nl> / / The parent directory of ROCm - Device - Libs IR libraries . <nl> mmm a / tensorflow / compiler / xla / service / gpu / gpu_compiler . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / gpu_compiler . cc <nl> limitations under the License . <nl> # include " absl / memory / memory . h " <nl> # include " absl / strings / numbers . h " <nl> # include " absl / strings / str_cat . h " <nl> + # include " llvm / AsmParser / Parser . h " <nl> + # include " llvm / Bitcode / BitcodeReader . h " <nl> + # include " llvm / Bitcode / BitcodeWriter . h " <nl> # include " llvm / IR / DiagnosticInfo . h " <nl> # include " llvm / IR / DiagnosticPrinter . h " <nl> # include " llvm / IR / LLVMContext . h " <nl> # include " llvm / IR / Module . h " <nl> # include " llvm / IR / Verifier . h " <nl> + # include " llvm / Transforms / Utils / SplitModule . h " <nl> # include " mlir / IR / BuiltinOps . h " / / from @ llvm - project <nl> # include " mlir / InitAllDialects . h " / / from @ llvm - project <nl> # include " tensorflow / compiler / xla / protobuf_util . h " <nl> limitations under the License . <nl> # include " tensorflow / core / lib / core / status . h " <nl> # include " tensorflow / core / lib / gtl / cleanup . h " <nl> # include " tensorflow / core / lib / io / path . h " <nl> + # include " tensorflow / core / platform / blocking_counter . h " <nl> # include " tensorflow / core / platform / env . h " <nl> # include " tensorflow / core / platform / logging . h " <nl> # include " tensorflow / core / platform / regexp . h " <nl> # include " tensorflow / core / platform / stream_executor_no_cuda . h " <nl> # include " tensorflow / core / platform / subprocess . h " <nl> + # include " tensorflow / core / platform / threadpool . h " <nl> # include " tensorflow / core / platform / tracing . h " <nl> # include " tensorflow / core / profiler / lib / traceme . h " <nl> # include " tensorflow / core / util / env_var . h " <nl> Status GpuCompiler : : OptimizeHloPostLayoutAssignment ( <nl> <nl> StatusOr < std : : unique_ptr < HloModule > > GpuCompiler : : RunHloPasses ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) { <nl> + const CompileOptions & options ) { <nl> / / We dump the post - optimization HLO in RunBackend so no need to dump it here . <nl> XLA_SCOPED_LOGGING_TIMER ( " GpuCompiler : : RunHloPasses " ) ; <nl> tensorflow : : profiler : : TraceMe activity ( <nl> [ & ] { return absl : : StrCat ( " HLO Transforms : " , module - > name ( ) ) ; } , <nl> tensorflow : : profiler : : TraceMeLevel : : kInfo ) ; <nl> TF_RETURN_IF_ERROR ( <nl> - OptimizeHloModule ( module . get ( ) , stream_exec , device_allocator ) ) ; <nl> + OptimizeHloModule ( module . get ( ) , stream_exec , options . device_allocator ) ) ; <nl> <nl> TF_RETURN_IF_ERROR ( PrepareHloModuleForIrEmitting ( module . get ( ) ) ) ; <nl> <nl> StatusOr < <nl> std : : tuple < std : : unique_ptr < HloModule > , std : : unique_ptr < BufferAssignment > > > <nl> GpuCompiler : : RunHloPassesAndBufferAssignement ( <nl> std : : unique_ptr < HloModule > hlo_module , se : : StreamExecutor * executor , <nl> - se : : DeviceMemoryAllocator * device_allocator , bool optimize ) { <nl> + bool optimize , const CompileOptions & options ) { <nl> if ( optimize ) { <nl> - TF_ASSIGN_OR_RETURN ( hlo_module , RunHloPasses ( std : : move ( hlo_module ) , <nl> - executor , device_allocator ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( hlo_module , <nl> + RunHloPasses ( std : : move ( hlo_module ) , executor , options ) ) ; <nl> } <nl> <nl> std : : unique_ptr < StreamAssignment > stream_assignment = <nl> static Status CompileModuleToLlvmIrImpl ( <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + StatusOr < std : : pair < std : : string , std : : vector < uint8 > > > <nl> + GpuCompiler : : CompileToTargetBinary ( const HloModule & module , <nl> + std : : unique_ptr < llvm : : Module > llvm_module , <nl> + se : : StreamExecutor * stream_exec , <nl> + const CompileOptions & options ) { <nl> + using BackendCompileResult = std : : pair < std : : string , std : : vector < uint8 > > ; <nl> + <nl> + const auto compile_single_module = <nl> + [ this , stream_exec , & module ] ( <nl> + llvm : : Module * llvm_module , <nl> + bool relocatable ) - > StatusOr < BackendCompileResult > { <nl> + { <nl> + XLA_SCOPED_LOGGING_TIMER ( <nl> + " GpuCompiler : : RunBackend - Running LLVM verifier " ) ; <nl> + <nl> + std : : string err ; <nl> + llvm : : raw_string_ostream err_stream ( err ) ; <nl> + <nl> + / / verifyModule ( ) returns true if the module is broken . <nl> + TF_RET_CHECK ( ! llvm : : verifyModule ( * llvm_module , & err_stream ) ) <nl> + < < " Invalid LLVM IR before optimizations : \ n " <nl> + < < err_stream . str ( ) <nl> + < < " \ nThis probably indicates a bug in the HLO - > LLVM IR " <nl> + " lowering . " <nl> + " Rerun with - - xla_dump_to to get the IR and looks for files " <nl> + " with " <nl> + " name containing : * " <nl> + < < FilenameFor ( module , " " , " " ) < < " * " ; <nl> + } <nl> + GpuVersion gpu_version = GetGpuVersion ( stream_exec ) ; <nl> + return CompileTargetBinary ( & module , llvm_module , gpu_version , stream_exec , <nl> + relocatable ) ; <nl> + } ; <nl> + <nl> + tensorflow : : thread : : ThreadPool * thread_pool = options . thread_pool ; <nl> + if ( ! thread_pool ) { <nl> + return compile_single_module ( llvm_module . get ( ) , / * relocatable = * / false ) ; <nl> + } <nl> + <nl> + / / Test whether LinkModules is supported . <nl> + if ( this - > LinkModules ( stream_exec , { } ) . status ( ) . code ( ) = = <nl> + tensorflow : : error : : Code : : UNIMPLEMENTED ) { <nl> + return compile_single_module ( llvm_module . get ( ) , / * relocatable = * / false ) ; <nl> + } <nl> + <nl> + std : : vector < std : : unique_ptr < llvm : : Module > > llvm_modules ; <nl> + int num_functions = 0 ; <nl> + for ( llvm : : Function & func : llvm_module - > functions ( ) ) { <nl> + if ( ! func . isDeclaration ( ) & & <nl> + func . getLinkage ( ) = = llvm : : GlobalValue : : LinkageTypes : : ExternalLinkage ) { <nl> + num_functions + + ; <nl> + } <nl> + } <nl> + <nl> + llvm : : SplitModule ( <nl> + std : : move ( llvm_module ) , <nl> + std : : max < unsigned > ( <nl> + 1 , std : : min < unsigned > ( thread_pool - > NumThreads ( ) , num_functions ) ) , <nl> + [ & ] ( std : : unique_ptr < llvm : : Module > module ) { <nl> + llvm_modules . push_back ( std : : move ( module ) ) ; <nl> + } , <nl> + / * PreserveLocals = * / true ) ; <nl> + <nl> + std : : vector < StatusOr < BackendCompileResult > > compile_results ( <nl> + llvm_modules . size ( ) ) ; <nl> + tensorflow : : BlockingCounter counter ( llvm_modules . size ( ) ) ; <nl> + for ( int i = 0 ; i < llvm_modules . size ( ) ; i + + ) { <nl> + thread_pool - > Schedule ( [ & compile_results , compile_single_module , i , <nl> + & llvm_modules , & counter ] { <nl> + llvm : : Module * original_module = llvm_modules [ i ] . get ( ) ; <nl> + llvm : : LLVMContext context ; <nl> + std : : string buffer ; <nl> + llvm : : raw_string_ostream error ( buffer ) ; <nl> + llvm : : DiagnosticPrinterRawOStream printer ( error ) ; <nl> + auto DiagnosticHandler = [ ] ( const llvm : : DiagnosticInfo & diag_info , <nl> + void * Context ) { <nl> + auto printer = static_cast < llvm : : DiagnosticPrinterRawOStream * > ( Context ) ; <nl> + diag_info . print ( * printer ) ; <nl> + } ; <nl> + context . setDiagnosticHandlerCallBack ( DiagnosticHandler , & printer ) ; <nl> + <nl> + std : : unique_ptr < llvm : : Module > new_llvm_module ; <nl> + { <nl> + std : : string ir ; <nl> + { <nl> + llvm : : raw_string_ostream os ( ir ) ; <nl> + original_module - > print ( os , nullptr ) ; <nl> + } <nl> + llvm : : SMDiagnostic err ; <nl> + new_llvm_module = llvm : : parseAssemblyString ( ir , err , context ) ; <nl> + } <nl> + <nl> + compile_results [ i ] = <nl> + compile_single_module ( new_llvm_module . get ( ) , / * relocatable = * / true ) ; <nl> + counter . DecrementCount ( ) ; <nl> + } ) ; <nl> + } <nl> + counter . Wait ( ) ; <nl> + <nl> + std : : string ptx_snippets ; <nl> + std : : vector < std : : vector < uint8 > > submodule_compile_results ; <nl> + for ( auto & maybe_result : compile_results ) { <nl> + TF_ASSIGN_OR_RETURN ( auto result , maybe_result ) ; <nl> + if ( result . second . empty ( ) ) { <nl> + continue ; <nl> + } <nl> + ptx_snippets + = result . first ; <nl> + ptx_snippets + = " \ n " ; <nl> + submodule_compile_results . push_back ( result . second ) ; <nl> + } <nl> + <nl> + TF_ASSIGN_OR_RETURN ( <nl> + std : : vector < uint8 > backend_result , <nl> + this - > LinkModules ( stream_exec , std : : move ( submodule_compile_results ) ) ) ; <nl> + <nl> + return std : : make_pair ( ptx_snippets , backend_result ) ; <nl> + } <nl> + <nl> StatusOr < std : : unique_ptr < Executable > > GpuCompiler : : RunBackend ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) { <nl> + const CompileOptions & options ) { <nl> XLA_SCOPED_LOGGING_TIMER ( " GpuCompiler : : RunBackend " ) ; <nl> auto slow_compile_alarm = SlowCompilationAlarm ( ) ; <nl> <nl> TF_RET_CHECK ( stream_exec ! = nullptr ) ; <nl> <nl> llvm : : LLVMContext llvm_context ; <nl> - std : : string buffer ; <nl> - llvm : : raw_string_ostream error ( buffer ) ; <nl> - llvm : : DiagnosticPrinterRawOStream printer ( error ) ; <nl> - auto DiagnosticHandler = [ ] ( const llvm : : DiagnosticInfo & diag_info , <nl> - void * Context ) { <nl> - auto printer = static_cast < llvm : : DiagnosticPrinterRawOStream * > ( Context ) ; <nl> - diag_info . print ( * printer ) ; <nl> - } ; <nl> - llvm_context . setDiagnosticHandlerCallBack ( DiagnosticHandler , & printer ) ; <nl> <nl> GpuDeviceInfo gpu_device_info ; <nl> gpu_device_info . threads_per_block_limit = <nl> StatusOr < std : : unique_ptr < Executable > > GpuCompiler : : RunBackend ( <nl> <nl> llvm_ir : : DumpIrIfEnabled ( * module , * llvm_module , / * optimized = * / false ) ; <nl> <nl> - { <nl> - XLA_SCOPED_LOGGING_TIMER ( " GpuCompiler : : RunBackend - Running LLVM verifier " ) ; <nl> - <nl> - std : : string err ; <nl> - llvm : : raw_string_ostream err_stream ( err ) ; <nl> - <nl> - / / verifyModule ( ) returns true if the module is broken . <nl> - TF_RET_CHECK ( ! llvm : : verifyModule ( * llvm_module , & err_stream ) ) <nl> - < < " Invalid LLVM IR before optimizations : \ n " <nl> - < < err_stream . str ( ) <nl> - < < " \ nThis probably indicates a bug in the HLO - > LLVM IR lowering . " <nl> - " Rerun with - - xla_dump_to to get the IR and looks for files with " <nl> - " name containing : * " <nl> - < < FilenameFor ( * module , " " , " " ) < < " * " ; <nl> - } <nl> - <nl> - GpuVersion gpu_version = GetGpuVersion ( stream_exec ) ; <nl> - <nl> using BackendCompileResult = std : : pair < std : : string , std : : vector < uint8 > > ; <nl> TF_ASSIGN_OR_RETURN ( BackendCompileResult backend_result , <nl> - CompileTargetBinary ( module . get ( ) , llvm_module . get ( ) , <nl> - gpu_version , stream_exec ) ) ; <nl> - <nl> + CompileToTargetBinary ( * module , std : : move ( llvm_module ) , <nl> + stream_exec , options ) ) ; <nl> if ( DumpingEnabledForHloModule ( * module ) ) { <nl> DumpToFileInDirOrStdout ( * module , " " , " thunk_schedule " , <nl> thunk_schedule - > ToString ( ) ) ; <nl> } <nl> <nl> + GpuVersion gpu_version = GetGpuVersion ( stream_exec ) ; <nl> auto * gpu_executable = new GpuExecutable ( <nl> backend_result . first , backend_result . second , gpu_version , <nl> std : : move ( thunk_schedule ) , std : : move ( module ) , <nl> mmm a / tensorflow / compiler / xla / service / gpu / gpu_compiler . h <nl> ppp b / tensorflow / compiler / xla / service / gpu / gpu_compiler . h <nl> class GpuCompiler : public LLVMCompiler { <nl> <nl> StatusOr < std : : unique_ptr < HloModule > > RunHloPasses ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) override ; <nl> + const CompileOptions & options ) override ; <nl> <nl> StatusOr < <nl> std : : tuple < std : : unique_ptr < HloModule > , std : : unique_ptr < BufferAssignment > > > <nl> RunHloPassesAndBufferAssignement ( std : : unique_ptr < HloModule > hlo_module , <nl> - se : : StreamExecutor * executor , <nl> - se : : DeviceMemoryAllocator * device_allocator , <nl> - bool optimize ) override ; <nl> + se : : StreamExecutor * executor , bool optimize , <nl> + const CompileOptions & options ) override ; <nl> <nl> Status OptimizeHloModule ( HloModule * hlo_module , <nl> se : : StreamExecutor * stream_exec , <nl> class GpuCompiler : public LLVMCompiler { <nl> <nl> virtual StatusOr < std : : pair < std : : string , std : : vector < uint8 > > > <nl> CompileTargetBinary ( const HloModule * hlo_module , llvm : : Module * llvm_module , <nl> - GpuVersion gpu_version , <nl> - se : : StreamExecutor * stream_exec ) = 0 ; <nl> + GpuVersion gpu_version , se : : StreamExecutor * stream_exec , <nl> + bool relocatable ) = 0 ; <nl> <nl> Status PrepareHloModuleForIrEmitting ( HloModule * hlo_module ) ; <nl> <nl> StatusOr < std : : unique_ptr < Executable > > RunBackend ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) override ; <nl> + const CompileOptions & options ) override ; <nl> <nl> StatusOr < std : : vector < std : : unique_ptr < AotCompilationResult > > > <nl> CompileAheadOfTime ( std : : unique_ptr < HloModuleGroup > module_group , <nl> AotCompilationOptions const & options ) override ; <nl> <nl> + StatusOr < std : : pair < std : : string , std : : vector < uint8 > > > CompileToTargetBinary ( <nl> + const HloModule & module , std : : unique_ptr < llvm : : Module > llvm_module , <nl> + se : : StreamExecutor * stream_exec , const CompileOptions & options ) ; <nl> + <nl> se : : Platform : : Id PlatformId ( ) const override { return platform_id_ ; } <nl> <nl> HloCostAnalysis : : ShapeSizeFunction ShapeSizeBytesFunction ( ) const override { <nl> class GpuCompiler : public LLVMCompiler { <nl> } <nl> <nl> private : <nl> + virtual StatusOr < std : : vector < uint8 > > LinkModules ( <nl> + se : : StreamExecutor * stream_exec , <nl> + std : : vector < std : : vector < uint8 > > modules ) { <nl> + return Unimplemented ( " LinkModules is not implemented . " ) ; <nl> + } <nl> + <nl> se : : Platform : : Id platform_id_ ; <nl> <nl> / / The triple that represents our target . <nl> similarity index 100 % <nl> rename from tensorflow / compiler / xla / service / gpu / dummy_all_gather_thunk . cc <nl> rename to tensorflow / compiler / xla / service / gpu / nccl_all_gather_thunk_dummy . cc <nl> similarity index 100 % <nl> rename from tensorflow / compiler / xla / service / gpu / dummy_all_reduce_thunk . cc <nl> rename to tensorflow / compiler / xla / service / gpu / nccl_all_reduce_thunk_dummy . cc <nl> similarity index 100 % <nl> rename from tensorflow / compiler / xla / service / gpu / dummy_all_to_all_thunk . cc <nl> rename to tensorflow / compiler / xla / service / gpu / nccl_all_to_all_thunk_dummy . cc <nl> similarity index 100 % <nl> rename from tensorflow / compiler / xla / service / gpu / dummy_collective_thunk . cc <nl> rename to tensorflow / compiler / xla / service / gpu / nccl_collective_thunk_dummy . cc <nl> similarity index 100 % <nl> rename from tensorflow / compiler / xla / service / gpu / dummy_nccl_test_utils . cc <nl> rename to tensorflow / compiler / xla / service / gpu / nccl_test_utils_dummy . cc <nl> mmm a / tensorflow / compiler / xla / service / gpu / nvptx_compiler . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / nvptx_compiler . cc <nl> limitations under the License . <nl> # include " tensorflow / core / profiler / lib / traceme . h " <nl> # include " tensorflow / stream_executor / cuda / cuda_diagnostics . h " <nl> # include " tensorflow / stream_executor / gpu / asm_compiler . h " <nl> + # include " tensorflow / stream_executor / gpu / gpu_driver . h " <nl> <nl> namespace xla { <nl> namespace gpu { <nl> StatusOr < std : : pair < std : : string , std : : vector < uint8 > > > <nl> NVPTXCompiler : : CompileTargetBinary ( const HloModule * module , <nl> llvm : : Module * llvm_module , <nl> GpuVersion gpu_version , <nl> - se : : StreamExecutor * stream_exec ) { <nl> + se : : StreamExecutor * stream_exec , <nl> + bool relocatable ) { <nl> std : : pair < int , int > compute_capability = <nl> absl : : get < std : : pair < int , int > > ( gpu_version ) ; <nl> <nl> NVPTXCompiler : : CompileTargetBinary ( const HloModule * module , <nl> <nl> std : : vector < uint8 > cubin = CompileGpuAsmOrGetCachedResult ( <nl> stream_exec , ptx , compute_capability . first , compute_capability . second , <nl> - module - > config ( ) ) ; <nl> + module - > config ( ) , relocatable ) ; <nl> <nl> return std : : pair < std : : string , std : : vector < uint8 > > ( std : : move ( ptx ) , <nl> std : : move ( cubin ) ) ; <nl> NVPTXCompiler : : CompileTargetBinary ( const HloModule * module , <nl> <nl> std : : vector < uint8 > NVPTXCompiler : : CompileGpuAsmOrGetCachedResult ( <nl> se : : StreamExecutor * stream_exec , const string & ptx , int cc_major , <nl> - int cc_minor , const HloModuleConfig & hlo_module_config ) { <nl> + int cc_minor , const HloModuleConfig & hlo_module_config , bool relocatable ) { <nl> XLA_SCOPED_LOGGING_TIMER ( " NVPTXCompiler : : CompileGpuAsmOrGetCachedResult " ) ; <nl> tensorflow : : profiler : : TraceMe activity ( <nl> " PTX - > CUBIN " , tensorflow : : profiler : : TraceMeLevel : : kInfo ) ; <nl> std : : vector < uint8 > NVPTXCompiler : : CompileGpuAsmOrGetCachedResult ( <nl> tensorflow : : mutex_lock lock ( mutex_ ) ; <nl> std : : tie ( iter , inserted ) = compilation_cache_ . emplace ( <nl> std : : piecewise_construct , <nl> - std : : forward_as_tuple ( ptx , cc_major , cc_minor ) , <nl> + std : : forward_as_tuple ( ptx , cc_major , cc_minor , relocatable ) , <nl> std : : forward_as_tuple ( ) ) ; <nl> cache_ptx = & iter - > first . ptx ; <nl> cache_value = & iter - > second ; <nl> std : : vector < uint8 > NVPTXCompiler : : CompileGpuAsmOrGetCachedResult ( <nl> if ( inserted ) { <nl> CHECK ( ! cache_value - > compilation_done ) ; <nl> if ( ! ptx . empty ( ) ) { <nl> - StatusOr < std : : vector < uint8 > > maybe_cubin = <nl> - se : : CompileGpuAsm ( stream_exec - > device_ordinal ( ) , cache_ptx - > c_str ( ) , <nl> - PtxOptsFromConfig ( hlo_module_config ) ) ; <nl> + auto ptxas_config = PtxOptsFromConfig ( hlo_module_config ) ; <nl> + if ( relocatable ) { <nl> + ptxas_config . extra_flags . push_back ( " - c " ) ; <nl> + } <nl> + StatusOr < std : : vector < uint8 > > maybe_cubin = se : : CompileGpuAsm ( <nl> + stream_exec - > device_ordinal ( ) , cache_ptx - > c_str ( ) , ptxas_config ) ; <nl> + <nl> if ( maybe_cubin . ok ( ) ) { <nl> cache_value - > cubin_data = std : : move ( maybe_cubin ) . ValueOrDie ( ) ; <nl> VLOG ( 2 ) < < " Compiled PTX size : " < < ptx . size ( ) <nl> std : : vector < uint8 > NVPTXCompiler : : CompileGpuAsmOrGetCachedResult ( <nl> return cache_value - > cubin_data ; <nl> } <nl> <nl> + StatusOr < std : : vector < uint8 > > NVPTXCompiler : : LinkModules ( <nl> + se : : StreamExecutor * stream_exec , std : : vector < std : : vector < uint8 > > modules ) { <nl> + std : : vector < stream_executor : : CubinOrPTXImage > images ; <nl> + images . reserve ( modules . size ( ) ) ; <nl> + for ( auto & module : modules ) { <nl> + images . push_back ( { " " , std : : move ( module ) } ) ; <nl> + } <nl> + return LinkGpuAsm ( static_cast < se : : gpu : : GpuContext * > ( <nl> + stream_exec - > implementation ( ) - > GpuContextHack ( ) ) , <nl> + images ) ; <nl> + } <nl> + <nl> } / / namespace gpu <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / service / gpu / nvptx_compiler . h <nl> ppp b / tensorflow / compiler / xla / service / gpu / nvptx_compiler . h <nl> class NVPTXCompiler : public GpuCompiler { <nl> <nl> StatusOr < std : : pair < std : : string , std : : vector < uint8 > > > CompileTargetBinary ( <nl> const HloModule * hlo_module , llvm : : Module * llvm_module , <nl> - GpuVersion gpu_version , se : : StreamExecutor * stream_exec ) override ; <nl> + GpuVersion gpu_version , se : : StreamExecutor * stream_exec , <nl> + bool relocatable ) override ; <nl> <nl> private : <nl> + StatusOr < std : : vector < uint8 > > LinkModules ( <nl> + se : : StreamExecutor * stream_exec , <nl> + std : : vector < std : : vector < uint8 > > modules ) override ; <nl> + <nl> tensorflow : : mutex mutex_ ; <nl> <nl> / / When compiling an HLO module , we need to find a path to the nvvm libdevice <nl> class NVPTXCompiler : public GpuCompiler { <nl> / / compiled cubin . If compilation was unsuccessful , returns an empty vector . <nl> std : : vector < uint8 > CompileGpuAsmOrGetCachedResult ( <nl> se : : StreamExecutor * stream_exec , const string & ptx , int cc_major , <nl> - int cc_minor , const HloModuleConfig & hlo_module_config ) ; <nl> + int cc_minor , const HloModuleConfig & hlo_module_config , bool relocatable ) ; <nl> <nl> / / The compilation_cache_ map is a cache from { ptx string , cc_major , cc_minor } <nl> / / - > cubin so we don ' t recompile the same ptx twice . This is important for <nl> class NVPTXCompiler : public GpuCompiler { <nl> / / If compiling the ptx fails , we return an empty cubin , cross our fingers , <nl> / / and leave compilation up to the driver . <nl> struct CompilationCacheKey { <nl> - CompilationCacheKey ( std : : string ptx , int cc_major , int cc_minor ) <nl> - : ptx ( std : : move ( ptx ) ) , cc_major ( cc_major ) , cc_minor ( cc_minor ) { } <nl> + CompilationCacheKey ( std : : string ptx , int cc_major , int cc_minor , <nl> + bool relocatable ) <nl> + : ptx ( std : : move ( ptx ) ) , <nl> + cc_major ( cc_major ) , <nl> + cc_minor ( cc_minor ) , <nl> + relocatable ( relocatable ) { } <nl> string ptx ; <nl> int cc_major ; <nl> int cc_minor ; <nl> + bool relocatable ; <nl> } ; <nl> struct CompilationCacheHash { <nl> size_t operator ( ) ( const CompilationCacheKey & key ) const { <nl> return tensorflow : : Hash64Combine ( <nl> - tensorflow : : Hash64Combine ( tensorflow : : Hash64 ( key . ptx ) , key . cc_major ) , <nl> - key . cc_minor ) ; <nl> + tensorflow : : Hash64Combine ( <nl> + tensorflow : : Hash64Combine ( tensorflow : : Hash64 ( key . ptx ) , <nl> + key . cc_major ) , <nl> + key . cc_minor ) , <nl> + key . relocatable ) ; <nl> } <nl> } ; <nl> struct CompilationCacheEq { <nl> size_t operator ( ) ( const CompilationCacheKey & a , <nl> const CompilationCacheKey & b ) const { <nl> return a . cc_major = = b . cc_major & & a . cc_minor = = b . cc_minor & & <nl> - a . ptx = = b . ptx ; <nl> + a . ptx = = b . ptx & & a . relocatable = = b . relocatable ; <nl> } <nl> } ; <nl> struct CompilationCacheValue { <nl> mmm a / tensorflow / compiler / xla / service / interpreter / compiler . cc <nl> ppp b / tensorflow / compiler / xla / service / interpreter / compiler . cc <nl> Status InterpreterCompiler : : RunHloOptimization ( HloModule * hlo_module ) { <nl> <nl> StatusOr < std : : unique_ptr < HloModule > > InterpreterCompiler : : RunHloPasses ( <nl> std : : unique_ptr < HloModule > hlo_module , se : : StreamExecutor * / * stream_exec * / , <nl> - se : : DeviceMemoryAllocator * / * device_allocator * / ) { <nl> + const CompileOptions & / * options * / ) { <nl> VLOG ( 1 ) < < " Run hlo passes on graph " < < hlo_module - > name ( ) ; <nl> TF_RETURN_IF_ERROR ( RunHloOptimization ( hlo_module . get ( ) ) ) ; <nl> return std : : move ( hlo_module ) ; <nl> StatusOr < std : : unique_ptr < HloModule > > InterpreterCompiler : : RunHloPasses ( <nl> <nl> StatusOr < std : : unique_ptr < Executable > > InterpreterCompiler : : RunBackend ( <nl> std : : unique_ptr < HloModule > hlo_module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * / * device_allocator * / ) { <nl> + const CompileOptions & / * options * / ) { <nl> TF_RET_CHECK ( stream_exec ! = nullptr ) ; <nl> <nl> VLOG ( 1 ) < < " Run backend " < < hlo_module - > name ( ) ; <nl> StatusOr < std : : unique_ptr < Executable > > InterpreterCompiler : : RunBackend ( <nl> StatusOr < std : : vector < std : : unique_ptr < Executable > > > InterpreterCompiler : : Compile ( <nl> std : : unique_ptr < HloModuleGroup > module_group , <nl> std : : vector < std : : vector < se : : StreamExecutor * > > stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) { <nl> + const CompileOptions & options ) { <nl> if ( module_group - > empty ( ) ) { <nl> return std : : vector < std : : unique_ptr < Executable > > ( ) ; <nl> } <nl> StatusOr < std : : vector < std : : unique_ptr < Executable > > > InterpreterCompiler : : Compile ( <nl> " Unexpected number of StreamExecutor ' s . " ) ; <nl> } <nl> auto hlo_modules = module_group - > ConsumeModules ( ) ; <nl> - TF_ASSIGN_OR_RETURN ( auto module , <nl> - RunHloPasses ( std : : move ( hlo_modules [ 0 ] ) , stream_exec [ 0 ] [ 0 ] , <nl> - device_allocator ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto executable , <nl> - RunBackend ( std : : move ( module ) , stream_exec [ 0 ] [ 0 ] , device_allocator ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto module , RunHloPasses ( std : : move ( hlo_modules [ 0 ] ) , <nl> + stream_exec [ 0 ] [ 0 ] , options ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto executable , RunBackend ( std : : move ( module ) , <nl> + stream_exec [ 0 ] [ 0 ] , options ) ) ; <nl> std : : vector < std : : unique_ptr < Executable > > ret ; <nl> ret . push_back ( std : : move ( executable ) ) ; <nl> return std : : move ( ret ) ; <nl> mmm a / tensorflow / compiler / xla / service / interpreter / compiler . h <nl> ppp b / tensorflow / compiler / xla / service / interpreter / compiler . h <nl> class InterpreterCompiler : public Compiler { <nl> <nl> StatusOr < std : : unique_ptr < HloModule > > RunHloPasses ( <nl> std : : unique_ptr < HloModule > hlo_module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) override ; <nl> + const CompileOptions & options ) override ; <nl> StatusOr < std : : unique_ptr < Executable > > RunBackend ( <nl> std : : unique_ptr < HloModule > hlo_module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) override ; <nl> + const CompileOptions & options ) override ; <nl> StatusOr < std : : vector < std : : unique_ptr < Executable > > > Compile ( <nl> std : : unique_ptr < HloModuleGroup > module_group , <nl> std : : vector < std : : vector < se : : StreamExecutor * > > stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) override ; <nl> + const CompileOptions & options ) override ; <nl> <nl> StatusOr < std : : vector < std : : unique_ptr < AotCompilationResult > > > <nl> CompileAheadOfTime ( std : : unique_ptr < HloModuleGroup > module_group , <nl> mmm a / tensorflow / compiler / xla / service / llvm_compiler . cc <nl> ppp b / tensorflow / compiler / xla / service / llvm_compiler . cc <nl> namespace xla { <nl> StatusOr < std : : vector < std : : unique_ptr < Executable > > > LLVMCompiler : : Compile ( <nl> std : : unique_ptr < HloModuleGroup > module_group , <nl> std : : vector < std : : vector < se : : StreamExecutor * > > stream_execs , <nl> - se : : DeviceMemoryAllocator * device_allocator ) { <nl> + const CompileOptions & options ) { <nl> / / Tensorflow tries to enable the following behaviors in all its threads : <nl> / / <nl> / / - Denormals are zero ( DAZ ) : roughly , operations treat denormal floats as <nl> StatusOr < std : : vector < std : : unique_ptr < Executable > > > LLVMCompiler : : Compile ( <nl> <nl> TF_ASSIGN_OR_RETURN ( modules [ i ] , <nl> RunHloPasses ( std : : move ( modules [ i ] ) , stream_execs [ i ] [ 0 ] , <nl> - device_allocator ) ) ; <nl> + options . device_allocator ) ) ; <nl> TF_ASSIGN_OR_RETURN ( std : : unique_ptr < Executable > executable , <nl> RunBackend ( std : : move ( modules [ i ] ) , stream_execs [ i ] [ 0 ] , <nl> - device_allocator ) ) ; <nl> + options . device_allocator ) ) ; <nl> result . push_back ( std : : move ( executable ) ) ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / service / llvm_compiler . h <nl> ppp b / tensorflow / compiler / xla / service / llvm_compiler . h <nl> class LLVMCompiler : public Compiler { <nl> / / std : : unique_ptr < HloModule > module , <nl> / / se : : StreamExecutor * stream_exec , <nl> / / se : : DeviceMemoryAllocator * device_allocator ) <nl> + using Compiler : : Compile ; <nl> using Compiler : : RunBackend ; <nl> using Compiler : : RunHloPasses ; <nl> <nl> StatusOr < std : : vector < std : : unique_ptr < Executable > > > Compile ( <nl> std : : unique_ptr < HloModuleGroup > module_group , <nl> std : : vector < std : : vector < se : : StreamExecutor * > > stream_execs , <nl> - se : : DeviceMemoryAllocator * device_allocator ) override ; <nl> + const CompileOptions & options ) override ; <nl> <nl> protected : <nl> ModuleHook user_pre_optimization_hook_ ; <nl> mmm a / tensorflow / compiler / xla / service / local_service . cc <nl> ppp b / tensorflow / compiler / xla / service / local_service . cc <nl> LocalService : : CompileExecutables ( <nl> / / single partition computations are built using ` BuildExecutables ` , fix it , <nl> / / and remove this special case ( provided the performance if similar ) . <nl> if ( build_options . num_partitions ( ) = = 1 ) { <nl> - TF_ASSIGN_OR_RETURN ( <nl> - std : : unique_ptr < Executable > executable , <nl> - BuildExecutable ( proto , std : : move ( module_config ) , execute_backend_ . get ( ) , <nl> - executor , build_options . device_allocator ( ) , <nl> - build_options . run_backend_only ( ) ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( std : : unique_ptr < Executable > executable , <nl> + BuildExecutable ( proto , std : : move ( module_config ) , <nl> + execute_backend_ . get ( ) , executor , <nl> + { build_options . device_allocator ( ) , <nl> + build_options . compile_thread_pool ( ) } , <nl> + build_options . run_backend_only ( ) ) ) ; <nl> std : : vector < std : : unique_ptr < Executable > > executables ; <nl> executables . push_back ( std : : move ( executable ) ) ; <nl> return executables ; <nl> LocalService : : CompileExecutables ( <nl> std : : vector < se : : StreamExecutor * > executors ( build_options . num_partitions ( ) , <nl> executor ) ; <nl> <nl> - return BuildExecutables ( { & proto } , std : : move ( module_configs ) , <nl> - execute_backend_ . get ( ) , { executors } , <nl> - build_options . device_allocator ( ) , <nl> - build_options . run_backend_only ( ) ) ; <nl> + return BuildExecutables ( <nl> + / * module_protos = * / { & proto } , std : : move ( module_configs ) , <nl> + execute_backend_ . get ( ) , { executors } , <nl> + Compiler : : CompileOptions { build_options . device_allocator ( ) , <nl> + build_options . compile_thread_pool ( ) } , <nl> + build_options . run_backend_only ( ) ) ; <nl> } <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / service / mlir_gpu / failover_compiler . cc <nl> ppp b / tensorflow / compiler / xla / service / mlir_gpu / failover_compiler . cc <nl> bool IsUnimplemented ( StatusOr < T > & result ) { <nl> <nl> StatusOr < std : : unique_ptr < HloModule > > FailoverCompiler : : RunHloPasses ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) { <nl> - auto result = <nl> - primary_ - > RunHloPasses ( module - > Clone ( ) , stream_exec , device_allocator ) ; <nl> + const CompileOptions & options ) { <nl> + auto result = primary_ - > RunHloPasses ( module - > Clone ( ) , stream_exec , options ) ; <nl> if ( IsUnimplemented ( result ) ) { <nl> VLOG ( 2 ) < < " RunHloPasses resulted in " < < result . status ( ) <nl> < < " , falling back to secondary backend " ; <nl> - return secondary_ - > RunHloPasses ( std : : move ( module ) , stream_exec , <nl> - device_allocator ) ; <nl> + return secondary_ - > RunHloPasses ( std : : move ( module ) , stream_exec , options ) ; <nl> } <nl> return result ; <nl> } <nl> <nl> StatusOr < std : : unique_ptr < Executable > > FailoverCompiler : : RunBackend ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) { <nl> - auto result = <nl> - primary_ - > RunBackend ( module - > Clone ( ) , stream_exec , device_allocator ) ; <nl> + const CompileOptions & options ) { <nl> + auto result = primary_ - > RunBackend ( module - > Clone ( ) , stream_exec , options ) ; <nl> if ( IsUnimplemented ( result ) ) { <nl> VLOG ( 2 ) < < " RunBackend resulted in " < < result . status ( ) <nl> < < " , falling back to secondary backend " ; <nl> - return secondary_ - > RunBackend ( std : : move ( module ) , stream_exec , <nl> - device_allocator ) ; <nl> + return secondary_ - > RunBackend ( std : : move ( module ) , stream_exec , options ) ; <nl> } <nl> return result ; <nl> } <nl> StatusOr < std : : unique_ptr < Executable > > FailoverCompiler : : RunBackend ( <nl> StatusOr < std : : vector < std : : unique_ptr < Executable > > > FailoverCompiler : : Compile ( <nl> std : : unique_ptr < HloModuleGroup > module_group , <nl> std : : vector < std : : vector < se : : StreamExecutor * > > stream_execs , <nl> - se : : DeviceMemoryAllocator * device_allocator ) { <nl> + const CompileOptions & options ) { <nl> std : : vector < std : : unique_ptr < Executable > > result ; <nl> std : : vector < std : : unique_ptr < HloModule > > modules = <nl> module_group - > ConsumeModules ( ) ; <nl> StatusOr < std : : vector < std : : unique_ptr < Executable > > > FailoverCompiler : : Compile ( <nl> return Unimplemented ( <nl> " Model partitioning not implemented for the failover compiler ! " ) ; <nl> } <nl> - auto executable = [ stream_execs , device_allocator , i , <nl> + auto executable = [ stream_execs , & options , i , <nl> this ] ( std : : unique_ptr < HloModule > module ) <nl> - > StatusOr < std : : unique_ptr < Executable > > { <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto processed_module , <nl> - primary_ - > RunHloPasses ( std : : move ( module ) , stream_execs [ i ] [ 0 ] , <nl> - device_allocator ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto result , <nl> - primary_ - > RunBackend ( std : : move ( processed_module ) , stream_execs [ i ] [ 0 ] , <nl> - device_allocator ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto processed_module , <nl> + primary_ - > RunHloPasses ( std : : move ( module ) , <nl> + stream_execs [ i ] [ 0 ] , options ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto result , <nl> + primary_ - > RunBackend ( std : : move ( processed_module ) , <nl> + stream_execs [ i ] [ 0 ] , options ) ) ; <nl> return result ; <nl> } ( modules [ i ] - > Clone ( ) ) ; <nl> <nl> StatusOr < std : : vector < std : : unique_ptr < Executable > > > FailoverCompiler : : Compile ( <nl> VLOG ( 2 ) < < " Compile resulted in " < < executable . status ( ) <nl> < < " , falling back to secondary backend " ; <nl> TF_ASSIGN_OR_RETURN ( <nl> - modules [ i ] , <nl> - secondary_ - > RunHloPasses ( std : : move ( modules [ i ] ) , stream_execs [ i ] [ 0 ] , <nl> - device_allocator ) ) ; <nl> - TF_ASSIGN_OR_RETURN ( <nl> - executable , <nl> - secondary_ - > RunBackend ( std : : move ( modules [ i ] ) , stream_execs [ i ] [ 0 ] , <nl> - device_allocator ) ) ; <nl> + modules [ i ] , secondary_ - > RunHloPasses ( std : : move ( modules [ i ] ) , <nl> + stream_execs [ i ] [ 0 ] , options ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( executable , <nl> + secondary_ - > RunBackend ( std : : move ( modules [ i ] ) , <nl> + stream_execs [ i ] [ 0 ] , options ) ) ; <nl> } <nl> <nl> if ( ! executable . ok ( ) ) { <nl> mmm a / tensorflow / compiler / xla / service / mlir_gpu / failover_compiler . h <nl> ppp b / tensorflow / compiler / xla / service / mlir_gpu / failover_compiler . h <nl> class FailoverCompiler final : public Compiler { <nl> <nl> StatusOr < std : : unique_ptr < HloModule > > RunHloPasses ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) override ; <nl> + const CompileOptions & options ) override ; <nl> <nl> StatusOr < std : : unique_ptr < Executable > > RunBackend ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) override ; <nl> + const CompileOptions & options ) override ; <nl> <nl> StatusOr < std : : vector < std : : unique_ptr < Executable > > > Compile ( <nl> std : : unique_ptr < HloModuleGroup > module_group , <nl> std : : vector < std : : vector < se : : StreamExecutor * > > stream_execs , <nl> - se : : DeviceMemoryAllocator * device_allocator ) override ; <nl> + const CompileOptions & options ) override ; <nl> <nl> StatusOr < std : : vector < std : : unique_ptr < AotCompilationResult > > > <nl> CompileAheadOfTime ( std : : unique_ptr < HloModuleGroup > module_group , <nl> mmm a / tensorflow / compiler / xla / service / mlir_gpu / mlir_compiler_impl . cc <nl> ppp b / tensorflow / compiler / xla / service / mlir_gpu / mlir_compiler_impl . cc <nl> class MlirCompilerImpl : public MlirCompiler { <nl> public : <nl> StatusOr < std : : unique_ptr < HloModule > > RunHloPasses ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) override ; <nl> + const CompileOptions & options ) override ; <nl> <nl> StatusOr < std : : unique_ptr < Executable > > RunBackend ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) override ; <nl> + const CompileOptions & options ) override ; <nl> <nl> StatusOr < std : : vector < std : : unique_ptr < Executable > > > Compile ( <nl> std : : unique_ptr < HloModuleGroup > module_group , <nl> std : : vector < std : : vector < se : : StreamExecutor * > > stream_execs , <nl> - se : : DeviceMemoryAllocator * device_allocator ) override ; <nl> + const CompileOptions & options ) override ; <nl> <nl> StatusOr < std : : vector < std : : unique_ptr < AotCompilationResult > > > <nl> CompileAheadOfTime ( std : : unique_ptr < HloModuleGroup > module_group , <nl> std : : string GetLibdeviceDir ( const HloModuleConfig & hlo_module_config ) { <nl> <nl> StatusOr < std : : unique_ptr < HloModule > > MlirCompilerImpl : : RunHloPasses ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) { <nl> + const CompileOptions & options ) { <nl> / / Until we find a reason to do something different , run the same passes <nl> / / that the normal GPU backend runs . <nl> gpu : : NVPTXCompiler xla_compiler ; <nl> TF_RETURN_IF_ERROR ( xla_compiler . OptimizeHloModule ( module . get ( ) , stream_exec , <nl> - device_allocator ) ) ; <nl> + options . device_allocator ) ) ; <nl> TF_RETURN_IF_ERROR ( xla_compiler . PrepareHloModuleForIrEmitting ( module . get ( ) ) ) ; <nl> <nl> return std : : move ( module ) ; <nl> StatusOr < std : : unique_ptr < gpu : : KernelThunk > > TransformKernelToXlaThunk ( <nl> <nl> StatusOr < std : : unique_ptr < Executable > > MlirCompilerImpl : : RunBackend ( <nl> std : : unique_ptr < HloModule > module , se : : StreamExecutor * stream_exec , <nl> - se : : DeviceMemoryAllocator * device_allocator ) { <nl> + const CompileOptions & options ) { <nl> / / Determine the HLO schedule , which is an ordering of HLO instructions . This <nl> / / is used by buffer assignment to enable buffer reuse , and the same ordering <nl> / / must also be used to determine the thunk launch schedule . <nl> StatusOr < std : : unique_ptr < Executable > > MlirCompilerImpl : : RunBackend ( <nl> StatusOr < std : : vector < std : : unique_ptr < Executable > > > MlirCompilerImpl : : Compile ( <nl> std : : unique_ptr < HloModuleGroup > module_group , <nl> std : : vector < std : : vector < se : : StreamExecutor * > > stream_execs , <nl> - se : : DeviceMemoryAllocator * device_allocator ) { <nl> + const CompileOptions & options ) { <nl> return Unimplemented ( " Not yet implemented in MLIR compiler " ) ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / service / service . cc <nl> ppp b / tensorflow / compiler / xla / service / service . cc <nl> StatusOr < std : : vector < std : : unique_ptr < Executable > > > Service : : BuildExecutables ( <nl> const std : : vector < const HloModuleProto * > & module_protos , <nl> std : : vector < std : : unique_ptr < HloModuleConfig > > module_configs , <nl> Backend * backend , std : : vector < std : : vector < se : : StreamExecutor * > > executors , <nl> - se : : DeviceMemoryAllocator * device_allocator , bool run_backend_only ) { <nl> + const Compiler : : CompileOptions & options , bool run_backend_only ) { <nl> VLOG ( 1 ) < < StrFormat ( " BuildExecutable on service % p " , this ) ; <nl> <nl> / / Dump computation proto state if flag is set . <nl> StatusOr < std : : vector < std : : unique_ptr < Executable > > > Service : : BuildExecutables ( <nl> <nl> std : : vector < std : : unique_ptr < Executable > > executables ; <nl> if ( ! run_backend_only ) { <nl> - TF_ASSIGN_OR_RETURN ( <nl> - executables , <nl> - backend - > compiler ( ) - > Compile ( std : : move ( module_group ) , <nl> - std : : move ( executors ) , device_allocator ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( executables , backend - > compiler ( ) - > Compile ( <nl> + std : : move ( module_group ) , <nl> + std : : move ( executors ) , options ) ) ; <nl> } else { <nl> auto modules = module_group - > ConsumeModules ( ) ; <nl> for ( std : : unique_ptr < HloModule > & module : modules ) { <nl> - TF_ASSIGN_OR_RETURN ( <nl> - std : : unique_ptr < Executable > executable , <nl> - backend - > compiler ( ) - > RunBackend ( std : : move ( module ) , executors [ 0 ] [ 0 ] , <nl> - device_allocator ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( std : : unique_ptr < Executable > executable , <nl> + backend - > compiler ( ) - > RunBackend ( <nl> + std : : move ( module ) , executors [ 0 ] [ 0 ] , options ) ) ; <nl> executables . push_back ( std : : move ( executable ) ) ; <nl> } <nl> } <nl> Status Service : : ExecuteGraphParallel ( const ExecuteGraphParallelRequest * arg , <nl> TF_ASSIGN_OR_RETURN ( std : : vector < std : : unique_ptr < Executable > > executables , <nl> BuildExecutables ( module_protos , std : : move ( module_configs ) , <nl> execute_backend_ . get ( ) , all_executors , <nl> - / * device_allocator = * / nullptr ) ) ; <nl> + { / * device_allocator = * / nullptr } ) ) ; <nl> std : : vector < Executable * > executable_ptrs ; <nl> executable_ptrs . reserve ( executables . size ( ) ) ; <nl> for ( const auto & executable : executables ) { <nl> Status Service : : GetDeviceHandles ( const GetDeviceHandlesRequest * arg , <nl> StatusOr < std : : unique_ptr < Executable > > Service : : BuildExecutable ( <nl> const HloModuleProto & module_proto , <nl> std : : unique_ptr < HloModuleConfig > module_config , Backend * backend , <nl> - se : : StreamExecutor * executor , se : : DeviceMemoryAllocator * device_allocator , <nl> + se : : StreamExecutor * executor , const Compiler : : CompileOptions & options , <nl> bool run_backend_only ) { <nl> VLOG ( 1 ) < < StrFormat ( <nl> " BuildExecutable on service % p with serialized module proto : % s " , this , <nl> StatusOr < std : : unique_ptr < Executable > > Service : : BuildExecutable ( <nl> DumpHloModuleIfEnabled ( * module , kBeforeOptimizationsDumpName ) ; <nl> <nl> if ( ! run_backend_only ) { <nl> - TF_ASSIGN_OR_RETURN ( <nl> - module , backend - > compiler ( ) - > RunHloPasses ( std : : move ( module ) , executor , <nl> - device_allocator ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( module , backend - > compiler ( ) - > RunHloPasses ( <nl> + std : : move ( module ) , executor , options ) ) ; <nl> } <nl> <nl> - TF_ASSIGN_OR_RETURN ( std : : unique_ptr < Executable > executable , <nl> - backend - > compiler ( ) - > RunBackend ( <nl> - std : : move ( module ) , executor , device_allocator ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + std : : unique_ptr < Executable > executable , <nl> + backend - > compiler ( ) - > RunBackend ( std : : move ( module ) , executor , options ) ) ; <nl> <nl> const auto & debug_opts = module_config - > debug_options ( ) ; <nl> if ( DumpingEnabledForHloModule ( module_proto . name ( ) , debug_opts ) & & <nl> Status Service : : Compile ( const CompileRequest * arg , CompileResponse * result ) { <nl> BuildExecutable ( arg - > computation ( ) , std : : move ( module_config ) , <nl> execute_backend_ . get ( ) , <nl> execute_backend_ - > default_stream_executor ( ) , <nl> - / * device_allocator = * / nullptr ) ) ; <nl> + { / * device_allocator = * / nullptr } ) ) ; <nl> <nl> * result - > mutable_handle ( ) = compilation_cache_ . Insert ( std : : move ( executable ) ) ; <nl> <nl> mmm a / tensorflow / compiler / xla / service / service . h <nl> ppp b / tensorflow / compiler / xla / service / service . h <nl> class Service : public ServiceInterface { <nl> StatusOr < std : : unique_ptr < Executable > > BuildExecutable ( <nl> const HloModuleProto & module_proto , <nl> std : : unique_ptr < HloModuleConfig > module_config , Backend * backend , <nl> - se : : StreamExecutor * executor , <nl> - se : : DeviceMemoryAllocator * device_allocator = nullptr , <nl> + se : : StreamExecutor * executor , const Compiler : : CompileOptions & options , <nl> bool run_backend_only = false ) ; <nl> <nl> / / Same as BuildExecutable ( ) above , but builds a list of Executables for the <nl> class Service : public ServiceInterface { <nl> const std : : vector < const HloModuleProto * > & module_protos , <nl> std : : vector < std : : unique_ptr < HloModuleConfig > > module_configs , <nl> Backend * backend , std : : vector < std : : vector < se : : StreamExecutor * > > executors , <nl> - se : : DeviceMemoryAllocator * device_allocator , <nl> - bool run_backend_only = false ) ; <nl> + const Compiler : : CompileOptions & options , bool run_backend_only = false ) ; <nl> <nl> / / Runs the given executable with the given arguments and register the result <nl> / / in the allocation tracker . The handle of the result from the tracker is <nl> mmm a / tensorflow / compiler / xla / service / sharding_propagation . cc <nl> ppp b / tensorflow / compiler / xla / service / sharding_propagation . cc <nl> bool IsShardingMoreSpecific ( const HloSharding & lhs , const HloSharding & rhs ) { <nl> } <nl> } <nl> <nl> - / / Returns a sharding where each tuple element is chosen as the more specific <nl> - / / one of the corresponding elements in a and b . Requires a an b to have the <nl> - / / same tuple nesting . <nl> - HloSharding MergeForMoreSpecificSharding ( const HloSharding & a , <nl> - const HloSharding & b ) { <nl> - if ( a . IsTuple ( ) ) { <nl> - HloSharding result = a ; <nl> - CHECK ( b . IsTuple ( ) ) ; <nl> - CHECK_EQ ( a . tuple_elements ( ) . size ( ) , b . tuple_elements ( ) . size ( ) ) ; <nl> - for ( int64 i = 0 ; i < result . tuple_elements ( ) . size ( ) ; + + i ) { <nl> - result . tuple_elements ( ) [ i ] = MergeForMoreSpecificSharding ( <nl> - a . tuple_elements ( ) [ i ] , b . tuple_elements ( ) [ i ] ) ; <nl> - } <nl> - return result ; <nl> - } <nl> - return IsShardingMoreSpecific ( a , b ) ? a : b ; <nl> - } <nl> - <nl> / / Tries to refine ` to_merge ` by combining with ` old ` . Returns if the final <nl> - / / ` to_merge ` is more specific than ` old ` . May combine partial sharding in <nl> - / / addition to MergeForMoreSpecificSharding ( ) . <nl> + / / ` to_merge ` is more specific than ` old ` . <nl> bool MergeSharding ( const HloSharding & old , HloSharding * to_merge , <nl> bool may_combine_partial_sharding ) { <nl> if ( old . IsTuple ( ) ) { <nl> bool InferShardingFromOperands ( HloInstruction * instruction , <nl> } <nl> auto sharding = instruction - > operand ( 0 ) - > sharding ( ) ; <nl> if ( instruction - > has_sharding ( ) ) { <nl> - sharding = <nl> - MergeForMoreSpecificSharding ( sharding , instruction - > sharding ( ) ) ; <nl> + MergeSharding ( instruction - > sharding ( ) , & sharding , <nl> + may_combine_partial_sharding ) ; <nl> } <nl> return MaybeImproveInstructionSharding ( std : : move ( sharding ) , instruction , <nl> may_combine_partial_sharding ) ; <nl> absl : : optional < HloSharding > GetShardingFromUser ( <nl> return hlo_sharding_util : : ReshapeSharding ( <nl> user . shape ( ) , instruction . shape ( ) , user . sharding ( ) ) ; <nl> } <nl> + case HloOpcode : : kPad : { <nl> + if ( & instruction ! = user . operand ( 0 ) ) { <nl> + return absl : : nullopt ; <nl> + } <nl> + return user . sharding ( ) ; <nl> + } <nl> case HloOpcode : : kSlice : { <nl> return user . sharding ( ) ; <nl> } <nl> StatusOr < bool > ShardingPropagation : : Run ( HloModule * module ) { <nl> / / If instruction is a while , or the root or a parameter of a while body , <nl> / / then propagate its sharding to the while instruction , to its body root , <nl> / / and to its condition parameter . <nl> - std : : function < void ( HloInstruction * ) > maybe_computation_propagation = <nl> - [ & ] ( HloInstruction * instruction ) { <nl> + std : : function < void ( HloInstruction * , absl : : flat_hash_set < HloInstruction * > * ) > <nl> + maybe_computation_propagation = [ & ] ( HloInstruction * instruction , <nl> + absl : : flat_hash_set < HloInstruction * > * <nl> + changed ) { <nl> auto propagate_to_instruction = [ & ] ( HloInstruction * search_inst ) { <nl> auto related_instructions = get_related_instructions ( search_inst ) ; <nl> if ( absl : : c_count ( related_instructions , instruction ) ) { <nl> StatusOr < bool > ShardingPropagation : : Run ( HloModule * module ) { <nl> inst - > sharding ( ) ! = instruction - > sharding ( ) ) { <nl> VLOG ( 2 ) < < " Add computation sharding : " < < inst - > name ( ) ; <nl> inst - > set_sharding ( instruction - > sharding ( ) ) ; <nl> - maybe_computation_propagation ( inst ) ; <nl> + changed - > insert ( inst ) ; <nl> + maybe_computation_propagation ( inst , changed ) ; <nl> } <nl> } <nl> } <nl> StatusOr < bool > ShardingPropagation : : Run ( HloModule * module ) { <nl> for ( const HloInstruction * instruction : instructions ) { <nl> already_sharded_counter + = ( instruction - > has_sharding ( ) ? 1 : 0 ) ; <nl> } <nl> + auto clear_cache = [ & ] ( HloInstruction * hlo ) { <nl> + for ( auto operand : hlo - > operands ( ) ) { <nl> + already_inferred_from_users . erase ( operand ) ; <nl> + } <nl> + for ( auto user : hlo - > users ( ) ) { <nl> + already_inferred_from_operands . erase ( user ) ; <nl> + } <nl> + } ; <nl> / / First iterate the HLO graph in post order taking shardings from <nl> / / operands . <nl> for ( HloInstruction * instruction : instructions ) { <nl> StatusOr < bool > ShardingPropagation : : Run ( HloModule * module ) { <nl> any_changed = true ; <nl> VLOG ( 2 ) < < " Add sharding ( forward - pass ) : " <nl> < < instruction - > ToString ( ) ; <nl> - maybe_computation_propagation ( instruction ) ; <nl> - for ( auto operand : instruction - > operands ( ) ) { <nl> - already_inferred_from_users . erase ( operand ) ; <nl> - } <nl> - for ( auto user : instruction - > users ( ) ) { <nl> - already_inferred_from_operands . erase ( user ) ; <nl> + absl : : flat_hash_set < HloInstruction * > changed_in_comp_prop ; <nl> + maybe_computation_propagation ( instruction , & changed_in_comp_prop ) ; <nl> + clear_cache ( instruction ) ; <nl> + for ( auto hlo : changed_in_comp_prop ) { <nl> + clear_cache ( hlo ) ; <nl> } <nl> changed_last_iter = true ; <nl> } <nl> StatusOr < bool > ShardingPropagation : : Run ( HloModule * module ) { <nl> + + inferred_from_user_counter ; <nl> any_changed = true ; <nl> VLOG ( 2 ) < < " Add sharding ( backward - pass ) : " < < ( * it ) - > ToString ( ) ; <nl> - maybe_computation_propagation ( * it ) ; <nl> - for ( auto operand : ( * it ) - > operands ( ) ) { <nl> - already_inferred_from_users . erase ( operand ) ; <nl> - } <nl> - for ( auto user : ( * it ) - > users ( ) ) { <nl> - already_inferred_from_operands . erase ( user ) ; <nl> + absl : : flat_hash_set < HloInstruction * > changed_in_comp_prop ; <nl> + maybe_computation_propagation ( * it , & changed_in_comp_prop ) ; <nl> + clear_cache ( * it ) ; <nl> + for ( auto hlo : changed_in_comp_prop ) { <nl> + clear_cache ( hlo ) ; <nl> } <nl> changed_last_iter = true ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / sharding_propagation_test . cc <nl> ppp b / tensorflow / compiler / xla / service / sharding_propagation_test . cc <nl> ENTRY % pad { <nl> op : : Sharding ( " { devices = [ 2 , 2 ] 0 , 1 , 2 , 3 } " ) ) ; <nl> } <nl> <nl> + TEST_F ( ShardingPropagationTest , PadBackwardPass ) { <nl> + const char * const hlo_string = R " ( <nl> + HloModule module <nl> + ENTRY % pad { <nl> + % input = f32 [ 11 , 17 ] { 1 , 0 } parameter ( 0 ) <nl> + % copy = f32 [ 11 , 17 ] { 1 , 0 } copy ( % input ) <nl> + % pad_value = f32 [ ] parameter ( 1 ) <nl> + % pad = f32 [ 27 , 51 ] { 1 , 0 } pad ( % copy , % pad_value ) , padding = 2_4_1x1_1_2 , <nl> + sharding = { devices = [ 2 , 2 ] 0 , 1 , 2 , 3 } <nl> + ROOT % result = f32 [ 27 , 51 ] { 1 , 0 } copy ( % pad ) <nl> + } ) " ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( auto module , <nl> + ParseAndReturnVerifiedModule ( hlo_string ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( bool changed , <nl> + ShardingPropagation ( ) . Run ( module . get ( ) ) ) ; <nl> + EXPECT_TRUE ( changed ) ; <nl> + EXPECT_THAT ( FindInstruction ( module . get ( ) , " copy " ) , <nl> + op : : Sharding ( " { devices = [ 2 , 2 ] 0 , 1 , 2 , 3 } " ) ) ; <nl> + } <nl> + <nl> TEST_F ( ShardingPropagationTest , PartialReplicatedPadForwardPass ) { <nl> const char * const hlo_string = R " ( <nl> HloModule module <nl> TEST_F ( ShardingPropagationTest , While ) { <nl> HloModule module <nl> <nl> % cond { <nl> - % vars . cond = ( u32 [ ] , f32 [ 10 ] { 0 } ) parameter ( 0 ) <nl> - % count . cond = u32 [ ] get - tuple - element ( ( u32 [ ] , f32 [ 10 ] { 0 } ) % vars . cond ) , index = 0 <nl> + % vars . cond = ( u32 [ ] , f32 [ 10 , 10 ] ) parameter ( 0 ) <nl> + % count . cond = u32 [ ] get - tuple - element ( ( u32 [ ] , f32 [ 10 , 10 ] ) % vars . cond ) , index = 0 <nl> % limit = u32 [ ] constant ( 10 ) <nl> ROOT % lt = pred [ ] compare ( u32 [ ] % count . cond , u32 [ ] % limit ) , direction = LT <nl> } <nl> <nl> % body { <nl> - % vars = ( u32 [ ] , f32 [ 10 ] { 0 } ) parameter ( 0 ) <nl> + % vars = ( u32 [ ] , f32 [ 10 , 10 ] ) parameter ( 0 ) <nl> % count = u32 [ ] get - tuple - element ( % vars ) , index = 0 <nl> - % acc = f32 [ 10 ] { 0 } get - tuple - element ( ( u32 [ ] , f32 [ 10 ] { 0 } ) % vars ) , index = 1 <nl> + % acc = f32 [ 10 , 10 ] get - tuple - element ( ( u32 [ ] , f32 [ 10 , 10 ] ) % vars ) , index = 1 <nl> <nl> % one = u32 [ ] constant ( 1 ) <nl> % count . 1 = u32 [ ] add ( u32 [ ] % count , u32 [ ] % one ) , sharding = { replicated } <nl> - % acc . 1 = f32 [ 10 ] { 0 } add ( f32 [ 10 ] { 0 } % acc , f32 [ 10 ] { 0 } % acc ) <nl> - ROOT % tuple = ( u32 [ ] , f32 [ 10 ] { 0 } ) tuple ( u32 [ ] % count . 1 , f32 [ 10 ] { 0 } % acc . 1 ) <nl> + % acc . 1 = f32 [ 10 , 10 ] add ( f32 [ 10 , 10 ] % acc , f32 [ 10 , 10 ] % acc ) <nl> + ROOT % tuple = ( u32 [ ] , f32 [ 10 , 10 ] ) tuple ( u32 [ ] % count . 1 , f32 [ 10 , 10 ] % acc . 1 ) <nl> } <nl> <nl> ENTRY % entry { <nl> - % p0 = f32 [ 10 ] { 0 } parameter ( 0 ) <nl> - % p0 . copy = f32 [ 10 ] { 0 } copy ( f32 [ 10 ] { 0 } % p0 ) <nl> - % p1 = f32 [ 10 ] { 0 } parameter ( 1 ) <nl> + % p0 = f32 [ 10 , 10 ] parameter ( 0 ) <nl> + % p0 . copy = f32 [ 10 , 10 ] copy ( f32 [ 10 , 10 ] % p0 ) <nl> + % p1 = f32 [ 10 , 10 ] parameter ( 1 ) <nl> % zero = u32 [ ] constant ( 0 ) <nl> - % init = ( u32 [ ] , f32 [ 10 ] { 0 } ) tuple ( u32 [ ] % zero , f32 [ 10 ] { 0 } % p0 . copy ) <nl> - % while = ( u32 [ ] , f32 [ 10 ] { 0 } ) while ( ( u32 [ ] , f32 [ 10 ] { 0 } ) % init ) , <nl> + % init = ( u32 [ ] , f32 [ 10 , 10 ] ) tuple ( u32 [ ] % zero , f32 [ 10 , 10 ] % p0 . copy ) <nl> + % while = ( u32 [ ] , f32 [ 10 , 10 ] ) while ( ( u32 [ ] , f32 [ 10 , 10 ] ) % init ) , <nl> body = % body , condition = % cond <nl> - % res = f32 [ 10 ] { 0 } get - tuple - element ( ( u32 [ ] , f32 [ 10 ] { 0 } ) % while ) , index = 1 <nl> - % prev = f32 [ 10 ] { 0 } get - tuple - element ( ( u32 [ ] , f32 [ 10 ] { 0 } ) % init ) , index = 1 <nl> - % res . 1 = f32 [ 10 ] { 0 } multiply ( f32 [ 10 ] { 0 } % res , % prev ) <nl> - ROOT % res_tuple = ( f32 [ 10 ] { 0 } ) tuple ( f32 [ 10 ] { 0 } % res . 1 ) <nl> + % res = f32 [ 10 , 10 ] get - tuple - element ( ( u32 [ ] , f32 [ 10 , 10 ] ) % while ) , index = 1 <nl> + % prev = f32 [ 10 , 10 ] get - tuple - element ( ( u32 [ ] , f32 [ 10 , 10 ] ) % init ) , index = 1 <nl> + % res . 1 = f32 [ 10 , 10 ] multiply ( f32 [ 10 , 10 ] % res , % prev ) <nl> + ROOT % res_tuple = ( f32 [ 10 , 10 ] ) tuple ( f32 [ 10 , 10 ] % res . 1 ) <nl> } ) " ; <nl> <nl> auto while_is_sharded = [ this ] ( HloModule * module , <nl> const HloSharding & sharding ) { <nl> - TF_ASSERT_OK_AND_ASSIGN ( bool changed , ShardingPropagation ( ) . Run ( module ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( bool changed , <nl> + ShardingPropagation ( / * is_spmd = * / true ) . Run ( module ) ) ; <nl> EXPECT_TRUE ( changed ) ; <nl> auto while_instr = FindInstruction ( module , " while " ) ; <nl> EXPECT_NE ( nullptr , while_instr ) ; <nl> ENTRY % entry { <nl> auto body_root = FindInstruction ( module . get ( ) , " tuple " ) ; <nl> EXPECT_NE ( nullptr , body_root ) ; <nl> auto sharding = <nl> - ParseSharding ( " { { replicated } , { devices = [ 2 ] 0 , 1 } } " ) . ConsumeValueOrDie ( ) ; <nl> + ParseSharding ( " { { replicated } , { devices = [ 2 , 1 ] 0 , 1 } } " ) . ConsumeValueOrDie ( ) ; <nl> body_root - > set_sharding ( sharding ) ; <nl> while_is_sharded ( module . get ( ) , sharding ) ; <nl> } <nl> ENTRY % entry { <nl> ParseAndReturnVerifiedModule ( hlo_string ) ) ; <nl> auto acc_1 = FindInstruction ( module . get ( ) , " acc . 1 " ) ; <nl> EXPECT_NE ( nullptr , acc_1 ) ; <nl> - acc_1 - > set_sharding ( ParseSharding ( " { devices = [ 2 ] 0 , 1 } " ) . ConsumeValueOrDie ( ) ) ; <nl> + acc_1 - > set_sharding ( <nl> + ParseSharding ( " { devices = [ 2 , 1 ] 0 , 1 } " ) . ConsumeValueOrDie ( ) ) ; <nl> <nl> - while_is_sharded ( <nl> - module . get ( ) , <nl> - ParseSharding ( " { { replicated } , { devices = [ 2 ] 0 , 1 } } " ) . ConsumeValueOrDie ( ) ) ; <nl> + while_is_sharded ( module . get ( ) , <nl> + ParseSharding ( " { { replicated } , { devices = [ 2 , 1 ] 0 , 1 } } " ) <nl> + . ConsumeValueOrDie ( ) ) ; <nl> + } <nl> + { <nl> + / / Merge partial sharding from operand and body . <nl> + TF_ASSERT_OK_AND_ASSIGN ( auto module , <nl> + ParseAndReturnVerifiedModule ( hlo_string ) ) ; <nl> + auto acc_1 = FindInstruction ( module . get ( ) , " acc . 1 " ) ; <nl> + EXPECT_NE ( nullptr , acc_1 ) ; <nl> + acc_1 - > set_sharding ( <nl> + ParseSharding ( " { devices = [ 2 , 1 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } " ) <nl> + . ConsumeValueOrDie ( ) ) ; <nl> + auto p0 = FindInstruction ( module . get ( ) , " p0 " ) ; <nl> + p0 - > set_sharding ( <nl> + ParseSharding ( " { devices = [ 1 , 2 , 2 ] 0 , 2 , 1 , 3 last_tile_dim_replicate } " ) <nl> + . ConsumeValueOrDie ( ) ) ; <nl> + <nl> + while_is_sharded ( module . get ( ) , <nl> + ParseSharding ( " { { replicated } , { devices = [ 2 , 2 ] 0 , 1 , 2 , 3 } } " ) <nl> + . ConsumeValueOrDie ( ) ) ; <nl> } <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / service / space_to_batch_converter . cc <nl> ppp b / tensorflow / compiler / xla / service / space_to_batch_converter . cc <nl> class ConvolutionVisitor { <nl> return permute_dims [ id ] ; <nl> } <nl> <nl> + int64 ReverseDimLookUp ( absl : : Span < const int64 > permute_dims , int64 id ) { <nl> + return std : : distance ( permute_dims . begin ( ) , absl : : c_find ( permute_dims , id ) ) ; <nl> + } <nl> + <nl> HloInstruction * DoesConvolutionFeedReduceWindowOrSelectAndScatter ( <nl> HloInstruction * instr , int64 depth ) ; <nl> <nl> class ConvolutionVisitor { <nl> / / Limit on batch size to apply this technique on . <nl> int64 limit_on_batch_size_ ; <nl> <nl> - / / We choose the new batch size to be a constant so that space - to - batch <nl> - / / propagation through several convolutional layers is consistent . <nl> - static constexpr int64 kNewBatchSize = 8 ; <nl> + / / We choose the new batch size to be kNumSplits times that of the old batch <nl> + / / so that space - to - batch propagation through several convolutional layers is <nl> + / / consistent . <nl> + static constexpr int64 kNumSplits = 8 ; <nl> <nl> / / Depth for searching reduce window <nl> static constexpr int64 kReduceWindowSearchDepth = 10 ; <nl> bool ConvolutionVisitor : : IsConvSuitableForSpaceToBatch ( <nl> if ( old_batch_size > limit_on_batch_size_ ) { <nl> return false ; <nl> } <nl> - / / We currently only cater to evenly divisible cases . <nl> - if ( kNewBatchSize % old_batch_size ! = 0 ) { <nl> - return false ; <nl> - } <nl> <nl> VLOG ( 1 ) < < " spatial size " < < c . spatial_size ; <nl> <nl> - const int64 num_splits = kNewBatchSize / old_batch_size ; <nl> / / If the ratio is not within the 2X range , we can ' t Halo Pad from the next <nl> / / split . <nl> - if ( c . halo_size > CeilOfRatio ( c . spatial_size , num_splits ) ) { <nl> + if ( c . halo_size > CeilOfRatio ( c . spatial_size , kNumSplits ) ) { <nl> return false ; <nl> } <nl> VLOG ( 1 ) < < " Legal space - to - batch convolution " < < convolution - > ToString ( ) ; <nl> StatusOr < HloInstruction * > ConvolutionVisitor : : HaloDuplicateWithSlice ( <nl> int64 activations_batch_dim , int64 old_batch_size , int64 low_padding , <nl> int64 high_padding , int64 halo_size , int64 original_split_dim_size , <nl> HloInstruction * pad_val ) { <nl> + const int64 original_batch_size = <nl> + activations - > shape ( ) . dimensions ( activations_batch_dim ) / kNumSplits ; <nl> + <nl> + if ( original_batch_size > 1 ) { <nl> + std : : vector < int64 > new_dimensions ( activations - > shape ( ) . dimensions ( ) . begin ( ) , <nl> + activations - > shape ( ) . dimensions ( ) . end ( ) ) ; <nl> + new_dimensions [ activations_batch_dim ] = kNumSplits ; <nl> + new_dimensions . insert ( new_dimensions . begin ( ) + activations_batch_dim , <nl> + original_batch_size ) ; <nl> + <nl> + / / Reshape the output of the new conv into the old convolutions shape . <nl> + TF_ASSIGN_OR_RETURN ( activations , <nl> + MakeReshapeHlo ( new_dimensions , activations ) ) ; <nl> + <nl> + spatial_dimension_to_split + + ; <nl> + activations_batch_dim + + ; <nl> + } <nl> + <nl> const int64 rank = activations - > shape ( ) . rank ( ) ; <nl> const int64 spatial_split_size = <nl> activations - > shape ( ) . dimensions ( spatial_dimension_to_split ) ; <nl> StatusOr < HloInstruction * > ConvolutionVisitor : : HaloDuplicateWithSlice ( <nl> TF_ASSIGN_OR_RETURN ( activations , MakeConcatHlo ( { activations , halo_region } , <nl> spatial_dimension_to_split ) ) ; <nl> } <nl> + <nl> + if ( original_batch_size > 1 ) { <nl> + std : : vector < int64 > new_dimensions ( activations - > shape ( ) . dimensions ( ) . begin ( ) , <nl> + activations - > shape ( ) . dimensions ( ) . end ( ) ) ; <nl> + new_dimensions [ activations_batch_dim ] = original_batch_size * kNumSplits ; <nl> + new_dimensions . erase ( new_dimensions . begin ( ) + activations_batch_dim - 1 ) ; <nl> + <nl> + / / Reshape the output of the new conv into the old convolutions shape . <nl> + TF_ASSIGN_OR_RETURN ( activations , <nl> + MakeReshapeHlo ( new_dimensions , activations ) ) ; <nl> + <nl> + spatial_dimension_to_split + + ; <nl> + activations_batch_dim + + ; <nl> + } <nl> + <nl> VLOG ( 1 ) < < " HaloDuplicated activations " < < activations - > ToString ( ) ; <nl> return activations ; <nl> } <nl> ConvolutionVisitor : : BringSpaceNextToBatch ( <nl> HloInstruction * activations , ConvolutionDimensionNumbers & dim_numbers , <nl> int64 & spatial_dimension_to_split , int64 & activations_batch_dim , <nl> bool is_backprop ) { <nl> - std : : vector < int64 > transpose_dims ; <nl> - ConvolutionDimensionNumbers new_dim_numbers = dim_numbers ; <nl> - if ( spatial_dimension_to_split ! = activations_batch_dim + 1 ) { <nl> + std : : vector < int64 > transpose_dims ( activations - > shape ( ) . rank ( ) ) ; <nl> + if ( spatial_dimension_to_split = = activations_batch_dim + 1 ) { <nl> + absl : : c_iota ( transpose_dims , 0 ) ; <nl> + } else { <nl> + ConvolutionDimensionNumbers new_dim_numbers = dim_numbers ; <nl> int64 pushed_counter = 0 ; <nl> int64 new_batch_dim , new_spatial_dim ; <nl> + int64 dim_counter = 0 ; <nl> for ( int i = 0 ; i < activations - > shape ( ) . rank ( ) ; + + i ) { <nl> if ( i = = activations_batch_dim ) { <nl> continue ; <nl> } <nl> if ( i = = spatial_dimension_to_split ) { <nl> - transpose_dims . push_back ( activations_batch_dim ) ; <nl> + transpose_dims [ dim_counter + + ] = activations_batch_dim ; <nl> new_batch_dim = pushed_counter ; <nl> pushed_counter + + ; <nl> new_spatial_dim = pushed_counter ; <nl> ConvolutionVisitor : : BringSpaceNextToBatch ( <nl> } <nl> } <nl> } <nl> - transpose_dims . push_back ( i ) ; <nl> + transpose_dims [ dim_counter + + ] = i ; <nl> pushed_counter + + ; <nl> } <nl> <nl> ConvolutionVisitor : : BringSpaceNextToBatch ( <nl> spatial_dimension_to_split = new_spatial_dim ; <nl> TF_ASSIGN_OR_RETURN ( activations , <nl> MakeTransposeHlo ( activations , transpose_dims ) ) ; <nl> - } <nl> <nl> - if ( is_backprop ) { <nl> - new_dim_numbers . set_input_feature_dimension ( activations_batch_dim ) ; <nl> - } else { <nl> - new_dim_numbers . set_input_batch_dimension ( activations_batch_dim ) ; <nl> + if ( is_backprop ) { <nl> + new_dim_numbers . set_input_feature_dimension ( activations_batch_dim ) ; <nl> + } else { <nl> + new_dim_numbers . set_input_batch_dimension ( activations_batch_dim ) ; <nl> + } <nl> + dim_numbers = new_dim_numbers ; <nl> } <nl> - dim_numbers = new_dim_numbers ; <nl> <nl> return SpaceNextToBatchDetails { activations , transpose_dims } ; <nl> } <nl> bool ConvolutionVisitor : : CanPropagate ( HloInstruction * consumer , <nl> VLOG ( 1 ) < < " Checking if conv is supported for propagation " <nl> < < consumer - > ToString ( ) ; <nl> if ( IsConvSuitableForSpaceToBatch ( consumer ) ) { <nl> - for ( int64 i = 0 ; i < consumer - > operand_count ( ) ; + + i ) { <nl> - auto old_producer = consumer - > mutable_operand ( i ) ; <nl> - if ( i = = 0 & & ! old_to_new_instrs_ . contains ( old_producer ) ) { <nl> - return false ; <nl> - } <nl> + if ( ! old_to_new_instrs_ . contains ( consumer - > mutable_operand ( 0 ) ) ) { <nl> + return false ; <nl> } <nl> + auto dim_map_val_op_0 = instr_to_dim_map_ [ consumer - > mutable_operand ( 0 ) ] ; <nl> + / / Make sure that the space dimension is the same across the producer <nl> + / / and consumer . <nl> + if ( consumer - > convolution_dimension_numbers ( ) . input_spatial_dimensions ( <nl> + get_chosen_spatial_dim ( consumer ) ) ! = dim_map_val_op_0 . second ) { <nl> + return false ; <nl> + } <nl> + / / Make sure that the batch dimension is the same across the producer <nl> + / / and consumer . <nl> + if ( consumer - > convolution_dimension_numbers ( ) . input_batch_dimension ( ) ! = <nl> + dim_map_val_op_0 . first ) { <nl> + return false ; <nl> + } <nl> + <nl> return true ; <nl> } <nl> <nl> bool ConvolutionVisitor : : CanPropagate ( HloInstruction * consumer , <nl> VLOG ( 2 ) < < " Checking for backprop filter conv operands " <nl> < < consumer - > operand_count ( ) ; <nl> <nl> - if ( ! old_to_new_instrs_ . contains ( consumer - > mutable_operand ( 1 ) ) ) { <nl> + auto activations = consumer - > mutable_operand ( 0 ) ; <nl> + auto kernel = consumer - > mutable_operand ( 1 ) ; <nl> + <nl> + if ( ! old_to_new_instrs_ . contains ( kernel ) ) { <nl> VLOG ( 2 ) < < " Backprop filter conv not ready for propagation because of " <nl> " kernel is not space - to - batched " ; <nl> return false ; <nl> } <nl> <nl> - if ( ! old_to_new_instrs_ . contains ( consumer - > mutable_operand ( 0 ) ) ) { <nl> + if ( ! old_to_new_instrs_ . contains ( activations ) ) { <nl> + const int64 lhs_batch = activations - > shape ( ) . dimensions ( <nl> + consumer - > convolution_dimension_numbers ( ) . input_feature_dimension ( ) ) ; <nl> + auto dim_map_val_op_1 = instr_to_dim_map_ [ consumer - > mutable_operand ( 1 ) ] ; <nl> + const int64 old_batch_dim = dim_map_val_op_1 . first ; <nl> + auto second_operand = old_to_new_instrs_ [ kernel ] ; <nl> + auto permute_dims_second_operand = <nl> + instr_to_dim_permute_map_ [ second_operand ] ; <nl> + const int64 new_batch_dim = <nl> + DimLookUp ( permute_dims_second_operand , old_batch_dim ) ; <nl> + const int64 rhs_batch = second_operand - > shape ( ) . dimensions ( new_batch_dim ) ; <nl> + <nl> + / / Because we want to convert activations into a space - to - batched version <nl> + / / only for backprop filter convolutions , we want to make sure that the <nl> + / / batch dimensions ( feature dimensions , technically ) are same sized . <nl> + / / Since RHS is already space - to - batched , we need to account for it too . <nl> + if ( rhs_batch ! = kNumSplits * lhs_batch ) { <nl> + return false ; <nl> + } <nl> + <nl> / / If activations have not been propagated through , we can do <nl> / / space - to - batch on them provided kernel has been propagated . <nl> VLOG ( 2 ) < < " Backprop filter conv ready for propagation : kernel ready , " <nl> bool ConvolutionVisitor : : CanPropagate ( HloInstruction * consumer , <nl> return true ; <nl> } <nl> <nl> - auto first_operand = old_to_new_instrs_ [ consumer - > mutable_operand ( 0 ) ] ; <nl> - auto dim_map_val_op_0 = instr_to_dim_map_ [ consumer - > mutable_operand ( 0 ) ] ; <nl> - auto second_operand = old_to_new_instrs_ [ consumer - > mutable_operand ( 1 ) ] ; <nl> - auto dim_map_val_op_1 = instr_to_dim_map_ [ consumer - > mutable_operand ( 1 ) ] ; <nl> + auto first_operand = old_to_new_instrs_ [ activations ] ; <nl> + auto dim_map_val_op_0 = instr_to_dim_map_ [ activations ] ; <nl> + auto second_operand = old_to_new_instrs_ [ kernel ] ; <nl> + auto dim_map_val_op_1 = instr_to_dim_map_ [ kernel ] ; <nl> <nl> auto permute_dims_first_operand = instr_to_dim_permute_map_ [ first_operand ] ; <nl> auto permute_dims_second_operand = <nl> StatusOr < bool > ConvolutionVisitor : : Propagate ( HloInstruction * consumer , <nl> <nl> Window new_win ; <nl> for ( int64 i = 0 ; i < consumer - > window ( ) . dimensions ( ) . size ( ) ; + + i ) { <nl> - auto dim = DimLookUp ( permute_dims , i ) ; <nl> + auto dim = ReverseDimLookUp ( permute_dims , i ) ; <nl> new_win . add_dimensions ( ) ; <nl> new_win . mutable_dimensions ( i ) - > set_stride ( <nl> consumer - > window ( ) . dimensions ( dim ) . stride ( ) ) ; <nl> StatusOr < HloInstruction * > ConvolutionVisitor : : SelectValidPortion ( <nl> const int64 new_space_size = new_shape . dimensions ( new_space_dim ) ; <nl> const int64 old_batch_size = old_shape . dimensions ( old_batch_dim ) ; <nl> const int64 old_space_size = old_shape . dimensions ( old_space_dim ) ; <nl> - CHECK_EQ ( new_batch_size % old_batch_size , 0 ) ; <nl> + CHECK_EQ ( new_batch_size % old_batch_size , 0 ) <nl> + < < " New batch size " < < new_batch_size < < " old batch size " <nl> + < < old_batch_size ; <nl> const int64 num_splits = new_batch_size / old_batch_size ; <nl> / / Build a constant PRED to decide which elements in the split dimension <nl> / / are from halo . <nl> StatusOr < HloInstruction * > ConvolutionVisitor : : BatchToSpace ( <nl> CHECK ( old_to_new_instrs_ . contains ( old_instr ) ) ; <nl> auto new_instr = old_to_new_instrs_ [ old_instr ] ; <nl> VLOG ( 2 ) < < " old_batch_dim " < < old_batch_dim < < " old_space_dim " <nl> - < < old_space_dim < < " new_instr " < < new_instr - > ToString ( ) <nl> - < < " permute dims " < < instr_to_dim_permute_map_ . count ( new_instr ) ; <nl> + < < old_space_dim < < " old_instr " < < old_instr - > ToString ( ) <nl> + < < " \ n new_instr " < < new_instr - > ToString ( ) < < " permute dims " <nl> + < < instr_to_dim_permute_map_ . count ( new_instr ) < < " old_batch_size " <nl> + < < old_batch_size ; <nl> CHECK ( instr_to_dim_permute_map_ . contains ( new_instr ) ) ; <nl> auto permute_dims = instr_to_dim_permute_map_ [ new_instr ] ; <nl> const int64 batch_dim = DimLookUp ( permute_dims , old_batch_dim ) ; <nl> Status ConvolutionVisitor : : PropagateOnConv ( HloInstruction * convolution ) { <nl> c . spatial_dimension_to_split , activations_batch_dim ) ) ; <nl> activations_new = retval . instr ; <nl> std : : vector < int64 > trans_dims = retval . transpose_dims ; <nl> + CHECK ( ! trans_dims . empty ( ) ) ; <nl> auto select_val = computation_ - > AddInstruction ( HloInstruction : : CreateConstant ( <nl> LiteralUtil : : Zero ( activations_new - > shape ( ) . element_type ( ) ) ) ) ; <nl> <nl> Status ConvolutionVisitor : : PropagateOnConv ( HloInstruction * convolution ) { <nl> <nl> VLOG ( 1 ) < < " spatial size " < < c . spatial_size ; <nl> <nl> - const int64 num_splits = kNewBatchSize / old_batch_size ; <nl> - <nl> + const int64 num_splits = kNumSplits ; <nl> const int64 output_offsets = convolution - > shape ( ) . dimensions ( <nl> permuted_conv_dims_numbers . output_spatial_dimensions ( <nl> get_chosen_spatial_dim ( convolution ) ) ) ; <nl> Status ConvolutionVisitor : : PropagateOnConv ( HloInstruction * convolution ) { <nl> activations_new - > shape ( ) . dimensions ( ) . end ( ) ) ; <nl> const int64 reshaped_space_size = <nl> new_space_size * new_batch_size / old_batch_size ; <nl> + VLOG ( 3 ) < < " Increasing the spatial size while propagating new_batch_size " <nl> + < < new_batch_size < < " old_batch_size " < < old_batch_size ; <nl> new_dimensions [ c . spatial_dimension_to_split ] = reshaped_space_size ; <nl> new_dimensions [ activations_batch_dim ] = old_batch_size ; <nl> <nl> Status ConvolutionVisitor : : PropagateOnConv ( HloInstruction * convolution ) { <nl> TF_ASSIGN_OR_RETURN ( HloInstruction * reshaped_activations , <nl> MakeReshapeHlo ( new_dimensions , activations_new ) ) ; <nl> <nl> + VLOG ( 3 ) < < " First reshape done " ; <nl> PaddingConfig padding_config = <nl> MakeNoPaddingConfig ( reshaped_activations - > shape ( ) . dimensions_size ( ) ) ; <nl> padding_config . mutable_dimensions ( c . spatial_dimension_to_split ) <nl> - - > set_edge_padding_high ( spatial_split_size * new_batch_size - <nl> + - > set_edge_padding_high ( spatial_split_size * new_batch_size / <nl> + old_batch_size - <nl> reshaped_space_size ) ; <nl> padding_config . mutable_dimensions ( c . spatial_dimension_to_split ) <nl> - > set_edge_padding_low ( 0 ) ; <nl> Status ConvolutionVisitor : : PropagateOnConv ( HloInstruction * convolution ) { <nl> reshaped_activations , <nl> MakeReshapeHlo ( reshape_back_dims , reshaped_activations ) ) ; <nl> <nl> + VLOG ( 3 ) < < " Second reshape done " ; <nl> + <nl> TF_ASSIGN_OR_RETURN ( <nl> activations_new , <nl> HaloDuplicateWithSlice ( <nl> Status ConvolutionVisitor : : PropagateOnConv ( HloInstruction * convolution ) { <nl> / / additional space available , and adjust the required slice size ( and <nl> / / thereby the halo size ) . <nl> if ( spatial_split_size < new_space_size ) { <nl> + VLOG ( 3 ) < < " Decreasing the spatial size while propagating " ; <nl> const int64 additional_space_present = spatial_split_size % c . stride ; <nl> spatial_split_size = new_space_size ; <nl> slice_size = <nl> ConvolutionVisitor : : SplitSpace ( HloInstruction * activations , <nl> <nl> activations = retval . instr ; <nl> std : : vector < int64 > transpose_dims = retval . transpose_dims ; <nl> + CHECK ( ! transpose_dims . empty ( ) ) ; <nl> / / Because we are splitting the spatial dimension , if convolution needed <nl> / / padding in the spatial dimension , we materialize it . <nl> if ( high_padding | | low_padding ) { <nl> ConvolutionVisitor : : SplitSpace ( HloInstruction * activations , <nl> MakePadHlo ( activations , padding , padding_config ) ) ; <nl> } <nl> VLOG ( 1 ) < < " Initial padded activations shape " <nl> - < < activations - > shape ( ) . ToString ( ) ; <nl> + < < activations - > shape ( ) . ToString ( ) < < " old_batch_size " <nl> + < < old_batch_size < < " activations_batch_dim " <nl> + < < activations_batch_dim ; <nl> <nl> / / Now we reorganize the activations . E . g . if the shape [ B , SPACE ] was [ 1 , 16 ] <nl> / / and 4 splits were needed , we first create [ 4 , 4 ] . Next , to deal with halo <nl> Status ConvolutionVisitor : : PropagateOnBackpropFilterConv ( <nl> CHECK ( old_to_new_instrs_ . contains ( kernel_old ) ) ; <nl> auto kernel_new = old_to_new_instrs_ [ kernel_old ] ; <nl> <nl> + auto permute_dims_kernel = instr_to_dim_permute_map_ [ kernel_new ] ; <nl> + <nl> HloInstruction * activations_new = nullptr ; <nl> + bool activations_locally_space_to_batched = false ; <nl> / / If activations were no space - to - batched , we space - to - batch them below . <nl> if ( ! old_to_new_instrs_ . contains ( activations_old ) ) { <nl> VLOG ( 1 ) < < " Space - to - batching activations to enable space - to - depth " ; <nl> Status ConvolutionVisitor : : PropagateOnBackpropFilterConv ( <nl> instr_to_dim_map_ [ activations_old ] = <nl> std : : make_pair ( prev_feature_dim , prev_batch_dim ) ; <nl> <nl> - int64 activations_batch_dim = original_conv_dims . input_feature_dimension ( ) ; <nl> - const int64 old_batch_size = <nl> - activations_old - > shape ( ) . dimensions ( activations_batch_dim ) ; <nl> - const int64 num_splits = kNewBatchSize / old_batch_size ; <nl> + const int64 new_kernel_space_dim = <nl> + DimLookUp ( permute_dims_kernel , kernel_space_dim ) ; <nl> + <nl> const int64 new_kernel_split_dim_size = <nl> - kernel_new - > shape ( ) . dimensions ( kernel_space_dim ) ; <nl> + kernel_new - > shape ( ) . dimensions ( new_kernel_space_dim ) ; <nl> const int64 needed_spatial_size = rhs_dilation * new_kernel_split_dim_size ; <nl> const int64 pad_size = <nl> - needed_spatial_size * num_splits - old_split_dim_size ; <nl> + needed_spatial_size * kNumSplits - old_split_dim_size ; <nl> ConvolutionDimensionNumbers tmp_dim_numbers ; <nl> tmp_dim_numbers = original_conv_dims ; <nl> TF_ASSIGN_OR_RETURN ( <nl> auto retval , <nl> SplitSpace ( activations_old , tmp_dim_numbers , old_space_dim , <nl> - activations_batch_dim , <nl> + old_batch_dim , <nl> / * high_padding = * / pad_size , / * low_padding = * / 0 , <nl> - needed_spatial_size , num_splits , / * is_backprop = * / true ) ) ; <nl> + needed_spatial_size , kNumSplits , / * is_backprop = * / true ) ) ; <nl> <nl> old_to_new_instrs_ [ activations_old ] = retval . first ; <nl> - instr_to_dim_permute_map_ [ retval . first ] = retval . second ; <nl> <nl> - VLOG ( 3 ) < < " Edited conv dims " < < original_conv_dims . DebugString ( ) ; <nl> + std : : vector < int64 > reversed_transpose_dims ( retval . second . size ( ) ) ; <nl> + for ( int64 i = 0 ; i < retval . second . size ( ) ; + + i ) { <nl> + reversed_transpose_dims [ i ] = ReverseDimLookUp ( retval . second , i ) ; <nl> + } <nl> + instr_to_dim_permute_map_ [ retval . first ] = reversed_transpose_dims ; <nl> + <nl> + VLOG ( 3 ) < < " New Activations " < < retval . first - > ToString ( ) ; <nl> + <nl> + activations_locally_space_to_batched = true ; <nl> } <nl> <nl> CHECK ( old_to_new_instrs_ . contains ( activations_old ) ) ; <nl> Status ConvolutionVisitor : : PropagateOnBackpropFilterConv ( <nl> i , DimLookUp ( permute_dims , <nl> original_conv_dims . input_spatial_dimensions ( i ) ) ) ; <nl> permuted_conv_dims_numbers . set_kernel_spatial_dimensions ( <nl> - i , DimLookUp ( permute_dims , <nl> + i , DimLookUp ( permute_dims_kernel , <nl> original_conv_dims . kernel_spatial_dimensions ( i ) ) ) ; <nl> } <nl> <nl> Status ConvolutionVisitor : : PropagateOnBackpropFilterConv ( <nl> previous_spatial_dim_count , previous_chosen_spatial_dim_in_output ) ; <nl> <nl> const int64 kernel_input_feature_dim = DimLookUp ( <nl> - permute_dims , original_conv_dims . kernel_input_feature_dimension ( ) ) ; <nl> + permute_dims_kernel , original_conv_dims . kernel_input_feature_dimension ( ) ) ; <nl> <nl> - const int64 kernel_output_feature_dim = DimLookUp ( <nl> - permute_dims , original_conv_dims . kernel_output_feature_dimension ( ) ) ; <nl> + const int64 kernel_output_feature_dim = <nl> + DimLookUp ( permute_dims_kernel , <nl> + original_conv_dims . kernel_output_feature_dimension ( ) ) ; <nl> <nl> permuted_conv_dims_numbers . set_kernel_input_feature_dimension ( <nl> kernel_input_feature_dim ) ; <nl> Status ConvolutionVisitor : : PropagateOnBackpropFilterConv ( <nl> <nl> VLOG ( 1 ) < < " Propagating on conv activations_batch_dim " <nl> < < activations_batch_dim < < " spatial_dimension_to_split " <nl> - < < spatial_dimension_to_split < < " old_batch_size " < < old_batch_size ; <nl> + < < spatial_dimension_to_split < < " old_batch_size " < < old_batch_size <nl> + < < " new_split_dim_size " < < new_split_dim_size ; <nl> <nl> TF_ASSIGN_OR_RETURN ( <nl> auto retval , <nl> Status ConvolutionVisitor : : PropagateOnBackpropFilterConv ( <nl> spatial_dimension_to_split , activations_batch_dim , <nl> / * is_backprop = * / true ) ) ; <nl> std : : vector < int64 > transpose_dims = retval . transpose_dims ; <nl> + CHECK ( ! transpose_dims . empty ( ) ) ; <nl> activations_new = retval . instr ; <nl> <nl> VLOG ( 1 ) < < " Activations_new post BringSpaceNextToBatch " <nl> Status ConvolutionVisitor : : PropagateOnBackpropFilterConv ( <nl> auto select_val = computation_ - > AddInstruction ( HloInstruction : : CreateConstant ( <nl> LiteralUtil : : Zero ( activations_new - > shape ( ) . element_type ( ) ) ) ) ; <nl> <nl> - / / Select activations correctly by masking additional space . <nl> - TF_ASSIGN_OR_RETURN ( <nl> - activations_new , <nl> - SelectValidPortion ( activations_new , activations_old , select_val , <nl> - activations_batch_dim , spatial_dimension_to_split , <nl> - old_batch_dim , old_space_dim ) ) ; <nl> - <nl> + if ( ! activations_locally_space_to_batched ) { <nl> + / / Select activations correctly by masking additional space . <nl> + TF_ASSIGN_OR_RETURN ( <nl> + activations_new , <nl> + SelectValidPortion ( activations_new , activations_old , select_val , <nl> + activations_batch_dim , spatial_dimension_to_split , <nl> + old_batch_dim , old_space_dim ) ) ; <nl> + } <nl> + VLOG ( 3 ) < < " Selecting the valid kernel area " ; <nl> / / Select kernel correctly by masking additional space . <nl> TF_ASSIGN_OR_RETURN ( <nl> kernel_new , <nl> Status ConvolutionVisitor : : PerformSpaceToBatchOnConvolution ( <nl> <nl> VLOG ( 1 ) < < " spatial size " < < c . spatial_size ; <nl> <nl> - const int64 num_splits = kNewBatchSize / old_batch_size ; <nl> auto original_conv = convolution ; <nl> <nl> const int64 output_spatial_dim = dim_numbers . output_spatial_dimensions ( <nl> Status ConvolutionVisitor : : PerformSpaceToBatchOnConvolution ( <nl> const int64 output_offsets = <nl> convolution - > shape ( ) . dimensions ( output_spatial_dim ) ; <nl> const int64 output_offsets_per_split = <nl> - CeilOfRatio ( output_offsets , num_splits ) ; <nl> + CeilOfRatio ( output_offsets , kNumSplits ) ; <nl> <nl> int64 spatial_split_size = <nl> CeilOfRatio ( output_offsets_per_split , c . base_dilation_factor ) * c . stride ; <nl> / / Keep increasing the split size so that overall size isn ' t smaller than the <nl> / / original spatial dimension . <nl> - while ( spatial_split_size * num_splits - c . spatial_size < 0 ) { <nl> + while ( spatial_split_size * kNumSplits - c . spatial_size < 0 ) { <nl> spatial_split_size + = c . stride ; <nl> } <nl> <nl> Status ConvolutionVisitor : : PerformSpaceToBatchOnConvolution ( <nl> const int64 slice_size = spatial_split_size + c . halo_size ; <nl> <nl> / / Pad spatial dim . <nl> - const int64 pad_size = spatial_split_size * num_splits - c . spatial_size ; <nl> + const int64 pad_size = spatial_split_size * kNumSplits - c . spatial_size ; <nl> <nl> VLOG ( 1 ) < < " spatial_split_size " < < spatial_split_size < < " stride " <nl> < < c . stride < < " slice_size " < < slice_size ; <nl> VLOG ( 1 ) < < " spatial_dimension_to_split " < < c . spatial_dimension_to_split <nl> - < < " num_splits " < < num_splits < < " kernel_spatial_dim_size " <nl> + < < " num_splits " < < kNumSplits < < " kernel_spatial_dim_size " <nl> < < c . kernel_spatial_dim_size ; <nl> int64 spatial_dimension_to_split = c . spatial_dimension_to_split ; <nl> TF_ASSIGN_OR_RETURN ( <nl> Status ConvolutionVisitor : : PerformSpaceToBatchOnConvolution ( <nl> / * low_padding = * / c . base_dilation_factor = = 1 <nl> ? c . inherent_low_padding <nl> : 0 , <nl> - spatial_split_size , num_splits ) ) ; <nl> + spatial_split_size , kNumSplits ) ) ; <nl> HloInstruction * batch_increased_reshape = retval . first ; <nl> convolution - > SetupDerivedInstruction ( batch_increased_reshape ) ; <nl> <nl> mmm a / tensorflow / compiler / xla / tests / dynamism_inference_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / dynamism_inference_test . cc <nl> TEST_F ( DynamismInferenceTest , GatherWithSharedConstantParent ) { <nl> } <nl> } <nl> <nl> + TEST_F ( DynamismInferenceTest , InferThroughPad ) { <nl> + for ( ClientType client_type : client_types ) { <nl> + Client * client = ClientOrDie ( platform_ , client_type ) ; <nl> + XlaBuilder b ( TestName ( ) ) ; <nl> + / / Test the analysis on a gather . <nl> + auto operand1 = ConstantR1 < int32 > ( & b , { 1 , 2 } ) ; <nl> + auto parameter = Parameter ( & b , 0 , ShapeUtil : : MakeShape ( S32 , { } ) , " p0 " ) ; <nl> + PaddingConfig padding_config ; <nl> + padding_config . add_dimensions ( ) - > set_edge_padding_high ( 1 ) ; <nl> + / / After pad the value is [ constant , constant , parameter ] . <nl> + auto pad = Pad ( operand1 , parameter , padding_config ) ; <nl> + ASSERT_TRUE ( b . first_error ( ) . ok ( ) ) < < b . first_error ( ) . error_message ( ) ; <nl> + / / Everything is constant , result is also contant . <nl> + EXPECT_FALSE ( <nl> + ComputeDynamismLiteral ( client , pad , & b ) . ValueOrDie ( ) . Get < bool > ( { 0 } ) ) ; <nl> + EXPECT_FALSE ( <nl> + ComputeDynamismLiteral ( client , pad , & b ) . ValueOrDie ( ) . Get < bool > ( { 1 } ) ) ; <nl> + EXPECT_TRUE ( <nl> + ComputeDynamismLiteral ( client , pad , & b ) . ValueOrDie ( ) . Get < bool > ( { 2 } ) ) ; <nl> + } <nl> + } <nl> + <nl> } / / namespace <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / tests / llvm_compiler_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / llvm_compiler_test . cc <nl> class GpuDummyCompiler : public GpuCompiler { <nl> <nl> StatusOr < std : : pair < std : : string , std : : vector < uint8 > > > CompileTargetBinary ( <nl> const HloModule * hlo_module , llvm : : Module * llvm_module , <nl> - GpuVersion gpu_version , se : : StreamExecutor * stream_exec ) { <nl> + GpuVersion gpu_version , se : : StreamExecutor * stream_exec , <nl> + bool relocatable ) { <nl> if ( user_post_optimization_hook_ ) { <nl> user_post_optimization_hook_ ( * llvm_module ) ; <nl> } <nl> mmm a / tensorflow / core / api_def / base_api / api_def_InplaceAdd . pbtxt <nl> ppp b / tensorflow / core / api_def / base_api / api_def_InplaceAdd . pbtxt <nl> op { <nl> " A ` Tensor ` of type T . An alias of ` x ` . The content " <nl> " of ` y ` is undefined if there are duplicates in ` i ` . " <nl> } <nl> - summary : < < END <nl> - Adds v into specified rows of x . <nl> - <nl> + summary : " Adds v into specified rows of x . " <nl> + description : < < END <nl> Computes y = x ; y [ i , : ] + = v ; return y . <nl> END <nl> } <nl> mmm a / tensorflow / core / api_def / base_api / api_def_TopKUnique . pbtxt <nl> ppp b / tensorflow / core / api_def / base_api / api_def_TopKUnique . pbtxt <nl> <nl> op { <nl> graph_op_name : " TopKUnique " <nl> - summary : " Returns the TopK unique values in the array in sorted order . The " <nl> + summary : " Returns the TopK unique values in the array in sorted order . " <nl> description : < < END <nl> - running time is proportional to the product of K and the input <nl> + The running time is proportional to the product of K and the input <nl> size . Sorting the whole array is more efficient for sufficiently large <nl> values of K . The median - of - medians algorithm is probably faster , but <nl> difficult to implement efficiently in XLA . If there are fewer than K <nl> mmm a / tensorflow / core / api_def / base_api / api_def_TopKWithUnique . pbtxt <nl> ppp b / tensorflow / core / api_def / base_api / api_def_TopKWithUnique . pbtxt <nl> <nl> op { <nl> graph_op_name : " TopKWithUnique " <nl> - summary : " Returns the TopK values in the array in sorted order . This is a combination " <nl> + summary : " Returns the TopK values in the array in sorted order . " <nl> description : < < END <nl> - of MakeUnique and TopKUnique . The returned top - K will have its lower bits <nl> - replaced by iota , thus it will be close to the original value but not exactly <nl> - the same . The running time is proportional to the product of K and the input <nl> - size . NaNs are never returned . Subnormal numbers are flushed to zero . <nl> + This is a combination of MakeUnique and TopKUnique . The returned top - K will <nl> + have its lower bits replaced by iota , thus it will be close to the original <nl> + value but not exactly the same . The running time is proportional to the product <nl> + of K and the input size . NaNs are never returned . Subnormal numbers are flushed <nl> + to zero . <nl> END <nl> } <nl> mmm a / tensorflow / core / common_runtime / eager / context . cc <nl> ppp b / tensorflow / core / common_runtime / eager / context . cc <nl> Status EagerContext : : RegisterExistingFunctionsOnRemoteWorkers ( <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - Status EagerContext : : AddFunctionDefWithDebugInfo ( <nl> - const FunctionDef & fdef , const Graph * graph_with_debug_info ) { <nl> + Status EagerContext : : AddFunctionDefWithStackTraces ( <nl> + const FunctionDef & fdef , const StackTracesMap & stack_traces ) { <nl> return AddFunctionDef ( fdef , FunctionDefLibrary ( ) , <nl> - / * add_to_local_only = * / false , graph_with_debug_info ) ; <nl> + / * add_to_local_only = * / false , stack_traces ) ; <nl> } <nl> <nl> Status EagerContext : : AddFunctionDef ( const FunctionDef & fdef ) { <nl> Status EagerContext : : AddFunctionDef ( const FunctionDef & fdef ) { <nl> Status EagerContext : : AddFunctionDef ( const FunctionDef & fdef , <nl> const FunctionDefLibrary & library , <nl> const bool add_to_local_only , <nl> - const Graph * graph_with_debug_info ) { <nl> + const StackTracesMap & stack_traces ) { <nl> bool is_first_ref = false ; <nl> { <nl> mutex_lock l ( cache_mu_ ) ; <nl> Status EagerContext : : AddFunctionDef ( const FunctionDef & fdef , <nl> is_first_ref = registered_function - > RefCountIsOne ( ) ; <nl> } <nl> if ( is_first_ref ) { <nl> - TF_RETURN_IF_ERROR ( <nl> - func_lib_def_ . AddFunctionDef ( fdef , graph_with_debug_info ) ) ; <nl> + TF_RETURN_IF_ERROR ( func_lib_def_ . AddFunctionDef ( fdef , stack_traces ) ) ; <nl> TF_RETURN_IF_ERROR ( func_lib_def_ . AddLibrary ( library ) ) ; <nl> if ( ! add_to_local_only ) { <nl> return MaybeRegisterFunctionRemotely ( fdef ) ; <nl> mmm a / tensorflow / core / common_runtime / eager / context . h <nl> ppp b / tensorflow / core / common_runtime / eager / context . h <nl> class EagerContext : public ImmediateExecutionContext , public core : : RefCounted { <nl> / / entry to the KernelAndDevice cache for it if it ' s not exist . <nl> Status AddFunctionDef ( const FunctionDef & fdef ) override ; <nl> <nl> - Status AddFunctionDefWithDebugInfo ( <nl> - const FunctionDef & fdef , const Graph * graph_with_debug_info ) override ; <nl> + Status AddFunctionDefWithStackTraces ( <nl> + const FunctionDef & fdef , const StackTracesMap & stack_traces ) override ; <nl> <nl> / / ` library ` contains all FunctionDefs and GradientDefs to expand ` fdef ` . Add <nl> / / it to the local FunctionLibraryDefinition as well , but no need to add it <nl> class EagerContext : public ImmediateExecutionContext , public core : : RefCounted { <nl> Status AddFunctionDef ( const FunctionDef & fdef , <nl> const FunctionDefLibrary & library , <nl> const bool add_to_local_only = false , <nl> - const Graph * graph_with_debug_info = nullptr ) ; <nl> + const StackTracesMap & stack_traces = { } ) ; <nl> <nl> const FunctionDef * GetFunctionDef ( const string & function_name ) ; <nl> <nl> mmm a / tensorflow / core / common_runtime / function_def_utils . cc <nl> ppp b / tensorflow / core / common_runtime / function_def_utils . cc <nl> Status FunctionDefToBodyHelper ( <nl> InstantiationResult result ; <nl> TF_RETURN_IF_ERROR ( InstantiateFunction ( fdef , attrs , get_func_sig , & result ) ) ; <nl> <nl> - std : : unique_ptr < Graph > graph ( new Graph ( lib_def ) ) ; <nl> + auto graph = absl : : make_unique < Graph > ( lib_def ) ; <nl> graph - > SetConstructionContext ( ConstructionContext : : kFunctionDef ) ; <nl> - <nl> GraphConstructorOptions opts ; <nl> opts . allow_internal_ops = true ; <nl> opts . expect_device_spec = false ; <nl> TF_RETURN_IF_ERROR ( ConvertNodeDefsToGraph ( opts , result . nodes , graph . get ( ) ) ) ; <nl> <nl> + const StackTracesMap & stack_traces = <nl> + lib_def - > GetStackTraces ( fdef . signature ( ) . name ( ) ) ; <nl> + for ( Node * n : graph - > nodes ( ) ) { <nl> + auto it = stack_traces . find ( n - > name ( ) ) ; <nl> + if ( n & & it ! = stack_traces . end ( ) ) { <nl> + n - > SetStackTrace ( it - > second ) ; <nl> + } <nl> + } <nl> + <nl> / / Call BuildControlFlowInfo to validate that this function body has <nl> / / well - formed control flow . <nl> std : : vector < ControlFlowInfo > dummy ; <nl> mmm a / tensorflow / core / common_runtime / function_test . cc <nl> ppp b / tensorflow / core / common_runtime / function_test . cc <nl> TEST_F ( FunctionLibraryRuntimeTest , XTimesTwo ) { <nl> test : : ExpectTensorEqual < float > ( y , test : : AsTensor < float > ( { 2 , 4 , 6 , 8 } ) ) ; <nl> } <nl> <nl> + TEST_F ( FunctionLibraryRuntimeTest , InstantiationStackTraceCopying ) { <nl> + class DummyStackTrace : public AbstractStackTrace { <nl> + absl : : Span < StackFrame const > ToFrames ( ) const override { return { } ; } <nl> + <nl> + std : : string ToString ( const TracePrintingOptions & opts ) const override { <nl> + return " DummyStackTrace " ; <nl> + } <nl> + } ; <nl> + <nl> + FunctionDef func = test : : function : : XTimesTwo ( ) ; <nl> + Init ( { } ) ; <nl> + <nl> + StackTracesMap stack_traces ; <nl> + stack_traces [ " two " ] = std : : make_shared < DummyStackTrace > ( ) ; <nl> + <nl> + TF_CHECK_OK ( lib_def_ - > AddFunctionDef ( func , stack_traces ) ) ; <nl> + <nl> + FunctionLibraryRuntime : : Handle handle ; <nl> + TF_CHECK_OK ( Instantiate ( flr0_ , " XTimesTwo " , { { " T " , DT_FLOAT } } , { } , & handle ) ) ; <nl> + <nl> + const FunctionBody * func_body = flr0_ - > GetFunctionBody ( handle ) ; <nl> + for ( const Node * node : func_body - > graph - > nodes ( ) ) { <nl> + if ( node - > name ( ) = = " two " ) { <nl> + EXPECT_EQ ( node - > GetStackTrace ( ) - > ToString ( { } ) , " DummyStackTrace " ) ; <nl> + } <nl> + } <nl> + TF_CHECK_OK ( flr0_ - > ReleaseHandle ( handle ) ) ; <nl> + } <nl> + <nl> TEST_F ( FunctionLibraryRuntimeTest , XTimesTwo_MultiDeviceBacked ) { <nl> Init ( { test : : function : : XTimesTwo ( ) } ) ; <nl> auto x = test : : AsTensor < float > ( { 1 , 2 , 3 , 4 } ) ; <nl> mmm a / tensorflow / core / common_runtime / graph_constructor . cc <nl> ppp b / tensorflow / core / common_runtime / graph_constructor . cc <nl> limitations under the License . <nl> # include " tensorflow / core / lib / gtl / inlined_vector . h " <nl> # include " tensorflow / core / lib / strings / scanner . h " <nl> # include " tensorflow / core / lib / strings / str_util . h " <nl> + # include " tensorflow / core / platform / errors . h " <nl> # include " tensorflow / core / platform / logging . h " <nl> # include " tensorflow / core / platform / macros . h " <nl> # include " tensorflow / core / public / version . h " <nl> void GraphConstructor : : Undo ( ) { <nl> <nl> Status GraphConstructor : : MakeEdge ( Node * src , int output_index , Node * dst , <nl> int input_index ) { <nl> + if ( output_index > = src - > num_outputs ( ) ) { <nl> + return errors : : InvalidArgument ( <nl> + " Output " , output_index , " of node " , src - > name ( ) , <nl> + " does not exist . Node only has " , src - > num_outputs ( ) , " outputs . " ) ; <nl> + } <nl> + if ( input_index > = dst - > num_inputs ( ) ) { <nl> + return errors : : InvalidArgument ( <nl> + " Input " , input_index , " of node " , dst - > name ( ) , <nl> + " does not exist . Node only has " , dst - > num_inputs ( ) , " inputs . " ) ; <nl> + } <nl> + <nl> DataType src_out = src - > output_type ( output_index ) ; <nl> DataType dst_in = dst - > input_type ( input_index ) ; <nl> if ( ! TypesCompatible ( dst_in , src_out ) ) { <nl> mmm a / tensorflow / core / framework / function . cc <nl> ppp b / tensorflow / core / framework / function . cc <nl> Status FunctionCallFrame : : SetRetval ( int index , const Tensor & val ) { <nl> <nl> FunctionLibraryDefinition : : FunctionDefAndOpRegistration : : <nl> FunctionDefAndOpRegistration ( const FunctionDef & fdef_in , <nl> - const Graph * graph_with_debug_info ) <nl> + const StackTracesMap & stack_traces ) <nl> : fdef ( fdef_in ) , <nl> / / Exact shape inference for functions is handled by ShapeRefiner . <nl> / / Here we pass a dummy shape inference function for legacy code paths . <nl> op_registration_data ( fdef . signature ( ) , shape_inference : : UnknownShape , <nl> true / * is_function * / ) , <nl> - graph_with_debug_info ( graph_with_debug_info ) { } <nl> + stack_traces ( stack_traces ) { } <nl> <nl> FunctionLibraryDefinition : : FunctionLibraryDefinition ( <nl> const FunctionLibraryDefinition & other ) <nl> FunctionLibraryDefinition : : FindHelper ( const string & func ) const { <nl> } <nl> <nl> Status FunctionLibraryDefinition : : AddFunctionDef ( <nl> - const FunctionDef & fdef , const Graph * graph_with_debug_info ) { <nl> + const FunctionDef & fdef , const StackTracesMap & stack_traces ) { <nl> mutex_lock l ( mu_ ) ; <nl> bool added ; <nl> - return AddFunctionDefHelper ( fdef , graph_with_debug_info , & added ) ; <nl> + return AddFunctionDefHelper ( fdef , stack_traces , & added ) ; <nl> } <nl> <nl> Status FunctionLibraryDefinition : : AddFunctionDefHelper ( <nl> - const FunctionDef & fdef , const Graph * graph_with_debug_info , bool * added ) { <nl> + const FunctionDef & fdef , const StackTracesMap & stack_traces , bool * added ) { <nl> * added = false ; <nl> std : : shared_ptr < FunctionDefAndOpRegistration > & entry = <nl> function_defs_ [ fdef . signature ( ) . name ( ) ] ; <nl> Status FunctionLibraryDefinition : : AddFunctionDefHelper ( <nl> " Cannot add function ' " , fdef . signature ( ) . name ( ) , <nl> " ' because an op with the same name already exists . " ) ; <nl> } <nl> - entry = std : : make_shared < FunctionDefAndOpRegistration > ( fdef , <nl> - graph_with_debug_info ) ; <nl> + entry = std : : make_shared < FunctionDefAndOpRegistration > ( fdef , stack_traces ) ; <nl> * added = true ; <nl> return Status : : OK ( ) ; <nl> } <nl> Status FunctionLibraryDefinition : : AddLibrary ( <nl> Status s ; <nl> bool added ; <nl> for ( const FunctionDef & fdef : lib_def . function ( ) ) { <nl> - s = AddFunctionDefHelper ( fdef , / * graph_with_debug_info = * / nullptr , & added ) ; <nl> + s = AddFunctionDefHelper ( fdef , / * stack_traces = * / { } , & added ) ; <nl> if ( ! s . ok ( ) ) { <nl> Remove ( funcs , funcs_with_grads ) ; <nl> return s ; <nl> Status FunctionLibraryDefinition : : ReplaceFunction ( const string & func , <nl> mutex_lock l ( mu_ ) ; <nl> bool added ; <nl> TF_RETURN_IF_ERROR ( RemoveFunctionHelper ( func ) ) ; <nl> - TF_RETURN_IF_ERROR ( <nl> - AddFunctionDefHelper ( fdef , / * graph_with_debug_info = * / nullptr , & added ) ) ; <nl> + TF_RETURN_IF_ERROR ( AddFunctionDefHelper ( fdef , / * stack_traces = * / { } , & added ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> mmm a / tensorflow / core / framework / function . h <nl> ppp b / tensorflow / core / framework / function . h <nl> class AbstractStackTrace { <nl> virtual std : : string ToString ( const TracePrintingOptions & opts ) const = 0 ; <nl> } ; <nl> <nl> + using StackTracesMap = <nl> + std : : unordered_map < std : : string , <nl> + std : : shared_ptr < tensorflow : : AbstractStackTrace > > ; <nl> + <nl> / / Helper to maintain a map between function names in a given <nl> / / FunctionDefLibrary and function definitions . <nl> / / <nl> class FunctionLibraryDefinition : public OpRegistryInterface { <nl> / / Associates ` graph ` with a function ` func_name ` . Lifetime assumption : <nl> / / ` graph ` has to outlive all instantiated graphs . <nl> Status AddFunctionDef ( const FunctionDef & fdef , <nl> - const Graph * graph_with_debug_info = nullptr ) <nl> + const StackTracesMap & stack_traces = { } ) <nl> TF_LOCKS_EXCLUDED ( mu_ ) ; <nl> <nl> / / Adds gradient definition ' grad ' to this function library . <nl> class FunctionLibraryDefinition : public OpRegistryInterface { <nl> <nl> / / Returns graph with debug stack traces for the given function , or ` nullptr ` <nl> / / if none found . <nl> - const Graph * GetGraphWithDebugInfo ( const std : : string & func_name ) const { <nl> + const StackTracesMap & GetStackTraces ( const std : : string & func_name ) const { <nl> tf_shared_lock l ( mu_ ) ; <nl> std : : shared_ptr < FunctionDefAndOpRegistration > entry = FindHelper ( func_name ) ; <nl> - return entry ? entry - > graph_with_debug_info : nullptr ; <nl> + if ( entry ) { <nl> + return entry - > stack_traces ; <nl> + } <nl> + static const auto * empty_map = new StackTracesMap ; <nl> + return * empty_map ; <nl> } <nl> <nl> private : <nl> class FunctionLibraryDefinition : public OpRegistryInterface { <nl> <nl> struct FunctionDefAndOpRegistration { <nl> explicit FunctionDefAndOpRegistration ( <nl> - const FunctionDef & fdef_in , <nl> - const Graph * graph_with_debug_info = nullptr ) ; <nl> + const FunctionDef & fdef_in , const StackTracesMap & stack_traces = { } ) ; <nl> <nl> const FunctionDef fdef ; <nl> const OpRegistrationData op_registration_data ; <nl> - const Graph * graph_with_debug_info ; <nl> + const StackTracesMap stack_traces ; <nl> } ; <nl> <nl> std : : shared_ptr < FunctionDefAndOpRegistration > FindHelper ( <nl> class FunctionLibraryDefinition : public OpRegistryInterface { <nl> / / Same as AddFunctionDef / AddGradientDef except these methods set <nl> / / ` added ` to true if the ` fdef ` / ` grad ` were actually added to this . <nl> Status AddFunctionDefHelper ( const FunctionDef & fdef , <nl> - const Graph * graph_with_debug_info , bool * added ) <nl> + const StackTracesMap & stack_traces , bool * added ) <nl> TF_EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) ; <nl> Status AddGradientDefHelper ( const GradientDef & grad , bool * added ) <nl> TF_EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) ; <nl> mmm a / tensorflow / core / framework / model . cc <nl> ppp b / tensorflow / core / framework / model . cc <nl> inline bool IsAutotuneNode ( const std : : shared_ptr < Node > node ) { <nl> / / Wrapper for the square function to reduce verbosity . <nl> inline double Square ( double x ) { return x * x ; } <nl> <nl> + / / Collects " essential " parallelism parameters and buffer size parameters in the <nl> + / / tree rooted in the given node . Which parallelism parameters are essential is <nl> + / / determined by the relative processing time spent in the corresponding <nl> + / / transformation . The collected parameters are returned via maps that map node <nl> + / / names to their respective parameters . <nl> + inline void CollectParameters ( <nl> + std : : shared_ptr < Node > node , <nl> + const absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > & parameters , <nl> + absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > * <nl> + parallelism_parameters , <nl> + absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > * <nl> + buffer_size_parameters ) { <nl> + / / Parallelism parameter is considered to be essential if the corresponding <nl> + / / transformations ' s processing time is greater than essential rate times the <nl> + / / average transformation self processing time . <nl> + constexpr double kEssentialRate = 0 . 3L ; <nl> + <nl> + absl : : flat_hash_map < string , double > processing_times ; <nl> + double processing_time = node - > TotalProcessingTime ( & processing_times ) ; <nl> + double uniform_share = <nl> + processing_time / static_cast < double > ( processing_times . size ( ) ) ; <nl> + for ( auto & pair : parameters ) { <nl> + if ( pair . second - > name = = kParallelism & & <nl> + processing_times [ pair . first ] > kEssentialRate * uniform_share ) { <nl> + parallelism_parameters - > insert ( pair ) ; <nl> + } else if ( pair . second - > name = = kBufferSize ) { <nl> + buffer_size_parameters - > insert ( pair ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Applies the gradient descent method once and updates the parameter values . If <nl> + / / the new value is out of the range , bound it within the range between the <nl> + / / minimal and maximum values . <nl> + inline void UpdateParameterValues ( <nl> + const absl : : flat_hash_map < string , double > & gradients , <nl> + absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > * parameters ) { <nl> + / / Gradient descent step size . <nl> + constexpr double kDescentStep = 0 . 1L ; <nl> + double new_value ; <nl> + <nl> + double max_abs_derivative = 1 . 0 ; <nl> + for ( auto & pair : * parameters ) { <nl> + if ( std : : round ( pair . second - > value ) ! = pair . second - > max ) { <nl> + auto * gradient = gtl : : FindOrNull ( gradients , pair . first ) ; <nl> + if ( gradient ) { <nl> + max_abs_derivative = std : : max ( max_abs_derivative , std : : abs ( * gradient ) ) ; <nl> + } <nl> + } <nl> + } <nl> + for ( auto & pair : * parameters ) { <nl> + auto * gradient = gtl : : FindOrNull ( gradients , pair . first ) ; <nl> + if ( gradient ) { <nl> + new_value = <nl> + pair . second - > value - kDescentStep * ( * gradient ) / max_abs_derivative ; <nl> + / / Projection on a feasible interval . <nl> + if ( new_value > pair . second - > max ) { <nl> + pair . second - > value = pair . second - > max ; <nl> + } else if ( new_value < pair . second - > min ) { <nl> + pair . second - > value = pair . second - > min ; <nl> + } else { <nl> + pair . second - > value = new_value ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Copies the parameter values ( which are for optimization tuning ) and updates <nl> + / / the state values ( which are for the input pipeline to follow ) . <nl> + inline void UpdateStateValues ( <nl> + absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > * parameters ) { <nl> + VLOG ( 2 ) < < " Number of tunable parameters : " < < parameters - > size ( ) ; <nl> + for ( auto & pair : * parameters ) { <nl> + auto & parameter = pair . second ; <nl> + VLOG ( 2 ) < < " Setting tunable parameter " < < pair . first < < " to " <nl> + < < parameter - > value ; <nl> + mutex_lock l ( * parameter - > state - > mu ) ; <nl> + parameter - > state - > value = parameter - > value ; <nl> + parameter - > state - > cond_var - > notify_all ( ) ; <nl> + } <nl> + } <nl> + <nl> / / The first input of InterleaveMany corresponds to the input dataset whose <nl> / / elements are used to create the ( derived ) input datasets whose elements are <nl> / / interleaved as output . <nl> Model : : CollectTunableParameters ( std : : shared_ptr < Node > node ) { <nl> return parameters ; <nl> } <nl> <nl> - absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > <nl> - Model : : CollectEssentialParallelism ( <nl> - std : : shared_ptr < Node > node , <nl> - const absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > & parameters ) { <nl> - / / Parallelism parameter is considered to be essential if the corresponding <nl> - / / transformations ' s processing time is greater than essential rate times the <nl> - / / average transformation self processing time . <nl> - constexpr double kEssentialRate = 0 . 3L ; <nl> + bool Model : : ShouldStop ( <nl> + int64 cpu_budget , int64 ram_budget , <nl> + const absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > & parameters , <nl> + const absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > & <nl> + parallelism_parameters , <nl> + const absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > & <nl> + buffer_size_parameters , <nl> + std : : shared_ptr < Node > snapshot , bool * cpu_budget_reached ) { <nl> + if ( ! ( * cpu_budget_reached ) ) { <nl> + / / If those essential transformations ' parallelism reaches the CPU <nl> + / / budget , we will only tune the buffer size parameters in future <nl> + / / iterations . <nl> + int64 model_parallelism = 0 ; <nl> + for ( auto & pair : parallelism_parameters ) { <nl> + model_parallelism + = std : : round ( pair . second - > value ) ; <nl> + } <nl> + * cpu_budget_reached = ( model_parallelism > cpu_budget ) ; <nl> + } <nl> <nl> - absl : : flat_hash_map < string , double > processing_times ; <nl> - double processing_time = node - > TotalProcessingTime ( & processing_times ) ; <nl> - double uniform_share = <nl> - processing_time / static_cast < double > ( processing_times . size ( ) ) ; <nl> - absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > essential_parameters ; <nl> - for ( auto & pair : parameters ) { <nl> - if ( pair . second - > name = = kParallelism & & <nl> - processing_times [ pair . first ] > kEssentialRate * uniform_share ) { <nl> - essential_parameters . insert ( pair ) ; <nl> + bool all_max = true ; <nl> + for ( auto & pair : <nl> + ( * cpu_budget_reached ? buffer_size_parameters : parameters ) ) { <nl> + if ( std : : round ( pair . second - > value ) < pair . second - > max ) { <nl> + all_max = false ; <nl> + break ; <nl> } <nl> } <nl> - return essential_parameters ; <nl> + <nl> + / / If all parameters have reached their maximum values or RAM budget is <nl> + / / reached , we stop the iterations . <nl> + return all_max | | TotalMaximumBufferedBytes ( snapshot ) > ram_budget ; <nl> } <nl> <nl> void Model : : OptimizeGradientDescent ( int64 cpu_budget , int64 ram_budget , <nl> void Model : : OptimizeGradientDescent ( int64 cpu_budget , int64 ram_budget , <nl> } <nl> VLOG ( 2 ) < < " Starting optimization of tunable parameters with GradientDescent " ; <nl> auto parameters = CollectTunableParameters ( snapshot ) ; <nl> - auto essential_parameters = CollectEssentialParallelism ( snapshot , parameters ) ; <nl> + / / The maps of " essential " parallelism parameters and buffer size parameters . <nl> + absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > <nl> + parallelism_parameters , buffer_size_parameters ; <nl> + CollectParameters ( snapshot , parameters , & parallelism_parameters , <nl> + & buffer_size_parameters ) ; <nl> + <nl> + / / Initialize the parameter values to minimal before tuning . <nl> for ( auto & pair : parameters ) { <nl> pair . second - > value = pair . second - > min ; <nl> } <nl> - / / Gradient descent step size . <nl> - constexpr double kDescentStep = 0 . 1L ; <nl> <nl> / / Optimization is stopped once the ` OutputTime ` improvement is smaller than <nl> / / this value . <nl> void Model : : OptimizeGradientDescent ( int64 cpu_budget , int64 ram_budget , <nl> <nl> double output_time = 0 ; <nl> double new_output_time ; <nl> - double new_value ; <nl> - for ( int i = 0 ; i < kMaxIterations ; + + i ) { <nl> + <nl> + / / When the CPU budget is reached , the parallelism parameter values are fixed <nl> + / / and we only increase the buffer size parameters . <nl> + bool cpu_budget_reached = false ; <nl> + <nl> + for ( int i = 0 ; <nl> + i < kMaxIterations & & <nl> + ! ShouldStop ( cpu_budget , ram_budget , parameters , parallelism_parameters , <nl> + buffer_size_parameters , snapshot , & cpu_budget_reached ) ; <nl> + + + i ) { <nl> absl : : flat_hash_map < string , double > gradients ; <nl> new_output_time = OutputTime ( snapshot , model_input_time , & gradients ) ; <nl> - int64 model_parallelism = 0 ; <nl> - for ( auto & pair : essential_parameters ) { <nl> - model_parallelism + = std : : round ( pair . second - > value ) ; <nl> - } <nl> / / We terminate once the improvement of the output latency is too small or <nl> / / the essential transformations ' parallelism reaches the CPU budget or the <nl> / / worst - case total buffer size exceeds the memory budget . <nl> - if ( std : : abs ( output_time - new_output_time ) < kOptimizationPrecision | | <nl> - model_parallelism > cpu_budget | | <nl> - TotalMaximumBufferedBytes ( snapshot ) > ram_budget ) { <nl> + if ( std : : abs ( output_time - new_output_time ) < kOptimizationPrecision ) { <nl> break ; <nl> } <nl> - double max_abs_derivative = 1 . 0 ; <nl> - for ( auto & pair : parameters ) { <nl> - if ( pair . second - > value ! = pair . second - > max ) { <nl> - max_abs_derivative = <nl> - std : : max ( max_abs_derivative , std : : abs ( gradients [ pair . first ] ) ) ; <nl> - } <nl> - } <nl> - for ( auto & pair : parameters ) { <nl> - new_value = pair . second - > value - <nl> - kDescentStep * gradients [ pair . first ] / max_abs_derivative ; <nl> - / / Projection on a feasible interval . <nl> - if ( new_value > pair . second - > max ) { <nl> - pair . second - > value = pair . second - > max ; <nl> - } else if ( new_value < pair . second - > min ) { <nl> - pair . second - > value = pair . second - > min ; <nl> - } else { <nl> - pair . second - > value = new_value ; <nl> - } <nl> - } <nl> + <nl> + UpdateParameterValues ( <nl> + gradients , & ( cpu_budget_reached ? buffer_size_parameters : parameters ) ) ; <nl> output_time = new_output_time ; <nl> } <nl> - VLOG ( 2 ) < < " Number of tunable parameters : " < < parameters . size ( ) ; <nl> + <nl> for ( auto & pair : parameters ) { <nl> pair . second - > value = std : : round ( pair . second - > value ) ; <nl> - auto & parameter = pair . second ; <nl> - VLOG ( 2 ) < < " Setting tunable parameter " < < pair . first < < " to " <nl> - < < parameter - > value ; <nl> - mutex_lock l ( * parameter - > state - > mu ) ; <nl> - parameter - > state - > value = parameter - > value ; <nl> - parameter - > state - > cond_var - > notify_all ( ) ; <nl> } <nl> + UpdateStateValues ( & parameters ) ; <nl> } <nl> <nl> void Model : : OptimizeHillClimb ( int64 cpu_budget , int64 ram_budget , <nl> void Model : : OptimizeHillClimb ( int64 cpu_budget , int64 ram_budget , <nl> / / improvement is greater than this constant . <nl> constexpr double kBufferSizeMinDelta = 1 . 0L ; <nl> <nl> + / / Initialize the parameter values to minimal before tuning . <nl> for ( auto & pair : parameters ) { <nl> pair . second - > value = pair . second - > min ; <nl> } <nl> void Model : : OptimizeHillClimb ( int64 cpu_budget , int64 ram_budget , <nl> } <nl> best_parameter - > value + + ; <nl> } <nl> - VLOG ( 2 ) < < " Number of tunable parameters : " < < parameters . size ( ) ; <nl> - for ( auto & pair : parameters ) { <nl> - auto & parameter = pair . second ; <nl> - VLOG ( 2 ) < < " Setting tunable parameter " < < pair . first < < " to " <nl> - < < parameter - > value ; <nl> - mutex_lock l ( * parameter - > state - > mu ) ; <nl> - parameter - > state - > value = parameter - > value ; <nl> - parameter - > state - > cond_var - > notify_all ( ) ; <nl> - } <nl> + UpdateStateValues ( & parameters ) ; <nl> } <nl> <nl> double Model : : OutputTime ( std : : shared_ptr < Node > node , double model_input_time , <nl> mmm a / tensorflow / core / framework / model . h <nl> ppp b / tensorflow / core / framework / model . h <nl> class Model { <nl> absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > <nl> CollectTunableParameters ( std : : shared_ptr < Node > node ) ; <nl> <nl> - / / Collects " essential " parallelism parameters of transformations in the tree <nl> - / / rooted in the given node . Which parameters are essential is determined by <nl> - / / comparison the processing time spent in the corresponding transformation <nl> - / / relative to other transformations . The collected parameters are returned <nl> - / / as a mapping from a ( unique ) node name to a parallelism parameter . <nl> - absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > <nl> - CollectEssentialParallelism ( <nl> - std : : shared_ptr < Node > node , <nl> + / / Determines if we should stop the gradient descent optimization iterations <nl> + / / based on number of increasable parameters , CPU budget , RAM budget and <nl> + / / current resource usage . <nl> + bool ShouldStop ( <nl> + int64 cpu_budget , int64 ram_budget , <nl> + const absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > & parameters , <nl> + const absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > & <nl> + parallelism_parameters , <nl> const absl : : flat_hash_map < string , std : : shared_ptr < Parameter > > & <nl> - parameters ) ; <nl> + buffer_size_parameters , <nl> + std : : shared_ptr < Node > snapshot , bool * cpu_budget_reached ) ; <nl> <nl> / / This optimization algorithm starts by setting all tunable parallelism <nl> / / parameters to the minimum value . It then repeatedly identifies the <nl> mmm a / tensorflow / core / framework / op_def . proto <nl> ppp b / tensorflow / core / framework / op_def . proto <nl> option java_package = " org . tensorflow . framework " ; <nl> option go_package = " github . com / tensorflow / tensorflow / tensorflow / go / core / framework / op_def_go_proto " ; <nl> import " tensorflow / core / framework / attr_value . proto " ; <nl> import " tensorflow / core / framework / types . proto " ; <nl> + import " tensorflow / core / framework / resource_handle . proto " ; <nl> <nl> / / Defines an operation . A NodeDef in a GraphDef specifies an Op by <nl> / / using the " op " field which should match the name of a OpDef . <nl> message OpDef { <nl> / / type , type_attr , and number_attr may be specified . <nl> string type_list_attr = 6 ; <nl> <nl> + / / The handle data for resource inputs . <nl> + repeated ResourceHandleProto . DtypeAndShape handle_data = 7 ; <nl> + <nl> / / For inputs : if true , the inputs are required to be refs . <nl> / / By default , inputs can be either refs or non - refs . <nl> / / For outputs : if true , outputs are refs , otherwise they are not . <nl> mmm a / tensorflow / core / grappler / optimizers / meta_optimizer . cc <nl> ppp b / tensorflow / core / grappler / optimizers / meta_optimizer . cc <nl> bool MemoryOptimizerEnabled ( <nl> # define MK_OPT ( NAME , VALUE ) \ <nl> if ( optimizer = = NAME ) return std : : unique_ptr < GraphOptimizer > ( VALUE ) <nl> <nl> - bool MetaOptimizer : : IsSingleThreadedExecutor ( ) const { <nl> - return config_proto_ . experimental ( ) . executor_type ( ) = = <nl> - " SINGLE_THREADED_EXECUTOR " ; <nl> + bool MetaOptimizer : : LowerControlFlow ( ) const { <nl> + if ( config_proto_ . experimental ( ) . executor_type ( ) = = <nl> + " SINGLE_THREADED_EXECUTOR " ) <nl> + return false ; <nl> + <nl> + if ( config_proto_ . experimental ( ) . use_tfrt ( ) ) return false ; <nl> + <nl> + return true ; <nl> } <nl> <nl> std : : unique_ptr < GraphOptimizer > MetaOptimizer : : MakeNewOptimizer ( <nl> const string & optimizer ) const { <nl> MK_OPT ( " pruning " , new ModelPruner ( ) ) ; <nl> - MK_OPT ( " function " , new FunctionOptimizer ( <nl> - cfg_ . function_optimization ( ) , <nl> - / * lower_control_flow = * / ! IsSingleThreadedExecutor ( ) ) ) ; <nl> + MK_OPT ( " function " , <nl> + new FunctionOptimizer ( cfg_ . function_optimization ( ) , <nl> + / * lower_control_flow = * / LowerControlFlow ( ) ) ) ; <nl> MK_OPT ( " constfold " , <nl> new ConstantFolding ( <nl> cpu_device_ , <nl> Status MetaOptimizer : : InitializeOptimizers ( <nl> if ( cfg_ . function_optimization ( ) ! = RewriterConfig : : OFF ) { <nl> optimizers - > push_back ( MakeUnique < FunctionOptimizer > ( <nl> cfg_ . function_optimization ( ) , <nl> - / * lower_control_flow = * / ! IsSingleThreadedExecutor ( ) ) ) ; <nl> + / * lower_control_flow = * / LowerControlFlow ( ) ) ) ; <nl> } <nl> if ( cfg_ . common_subgraph_elimination ( ) ! = RewriterConfig : : OFF & & <nl> cfg_ . arithmetic_optimization ( ) ! = RewriterConfig : : OFF ) { <nl> mmm a / tensorflow / core / grappler / optimizers / meta_optimizer . h <nl> ppp b / tensorflow / core / grappler / optimizers / meta_optimizer . h <nl> class MetaOptimizer : public GraphOptimizer { <nl> std : : unique_ptr < GraphOptimizer > MakeNewOptimizer ( <nl> const string & optimizer ) const ; <nl> <nl> - bool IsSingleThreadedExecutor ( ) const ; <nl> + / / When grappler should lower control flow to V1 switch / merge style nodes . <nl> + bool LowerControlFlow ( ) const ; <nl> <nl> / / Initialize active optimizers from RewriterConfig toggles . <nl> Status InitializeOptimizers ( <nl> mmm a / tensorflow / core / kernels / BUILD <nl> ppp b / tensorflow / core / kernels / BUILD <nl> exports_files ( [ <nl> " cwise_op_gpu_sigmoid . cu . cc " , <nl> " cwise_op_gpu_sin . cu . cc " , <nl> " cwise_op_gpu_sqrt . cu . cc " , <nl> + " cwise_op_gpu_square . cu . cc " , <nl> " cwise_op_gpu_squared_difference . cu . cc " , <nl> " cwise_op_gpu_sub . cu . cc " , <nl> " cwise_op_gpu_tanh . cu . cc " , <nl> mmm a / tensorflow / core / kernels / depthwise_conv_op . h <nl> ppp b / tensorflow / core / kernels / depthwise_conv_op . h <nl> struct DepthwiseInputCopyOp { <nl> const int64 padded_filter_inner_dim_size , const int64 out_r , <nl> const int64 out_c , const T * input , T * input_buffer ) { <nl> typedef typename Eigen : : internal : : packet_traits < T > : : type Packet ; <nl> - static const int64 kPacketSize = ( sizeof ( Packet ) / sizeof ( T ) ) ; <nl> + static const int64 kPacketSize = Eigen : : internal : : packet_traits < T > : : size ; <nl> <nl> + const int64 kDepth = args . depth_multiplier ; <nl> / / Calculate vectorized and scalar ( residual ) lengths for ' in_depth ' . <nl> const int64 input_vectorized_size = <nl> ( args . in_depth / kPacketSize ) * kPacketSize ; <nl> - const int64 input_scalar_size = args . in_depth % kPacketSize ; <nl> - <nl> - / / Calculate vectorized and scalar ( residual ) lengths for <nl> - / / ' depth_multiplier ' . This is used to efficiently replicate data for <nl> - / / when ' depth_multiplier ' > kPacketSize . <nl> - const int64 dm_vectorized_size = <nl> - ( args . depth_multiplier / kPacketSize ) * kPacketSize ; <nl> - const int64 dm_scalar_size = args . depth_multiplier % kPacketSize ; <nl> + const int64 input_scalar_size = args . in_depth - input_vectorized_size ; <nl> <nl> / / Calculate output padding length . <nl> const int64 output_scalar_size = args . out_depth % kPacketSize ; <nl> const int64 output_pad_size = <nl> output_scalar_size > 0 ? kPacketSize - output_scalar_size : 0 ; <nl> <nl> - const int64 replicated_packet_size = kPacketSize * args . depth_multiplier ; <nl> - <nl> / / Iterate through all rows x cols reading ' in_depth ' from ' input ' and <nl> / / replicating by ' depth_multiplier ' into ' input_buffer ' ( otherwise <nl> / / zero - padding input buffer as needed ) . <nl> struct DepthwiseInputCopyOp { <nl> const int64 in_r_start = out_r * args . stride - args . pad_rows ; <nl> const int64 in_c_start = out_c * args . stride - args . pad_cols ; <nl> <nl> - for ( int64 f_r = 0 ; f_r < args . filter_rows ; + + f_r ) { <nl> - const int64 in_r = in_r_start + f_r ; <nl> - <nl> - for ( int64 f_c = 0 ; f_c < args . filter_cols ; + + f_c ) { <nl> - const int64 in_c = in_c_start + f_c ; <nl> - <nl> - if ( in_r > = 0 & & in_r < args . in_rows & & in_c > = 0 & & <nl> - in_c < args . in_cols ) { <nl> - auto * in = input + ( in_r * args . in_cols + in_c ) * args . in_depth ; <nl> - / / Copy vectorized portion of inner dimension . <nl> - for ( int64 d = 0 ; d < input_vectorized_size ; d + = kPacketSize ) { <nl> - auto v = Eigen : : internal : : ploadu < Packet > ( in + d ) ; <nl> - for ( int dm = 0 ; dm < args . depth_multiplier ; + + dm ) { <nl> - Eigen : : internal : : pscatter < T , Packet > ( in_buf + dm , v , <nl> - args . depth_multiplier ) ; <nl> + / / TODO : add a ploaddup variant for depth = = 2 if needed . <nl> + if ( kDepth > 1 & & kDepth < = kPacketSize ) { <nl> + for ( int64 f_r = 0 ; f_r < args . filter_rows ; + + f_r ) { <nl> + const int64 in_r = in_r_start + f_r ; <nl> + <nl> + for ( int64 f_c = 0 ; f_c < args . filter_cols ; + + f_c ) { <nl> + const int64 in_c = in_c_start + f_c ; <nl> + <nl> + if ( in_r > = 0 & & in_r < args . in_rows & & in_c > = 0 & & <nl> + in_c < args . in_cols ) { <nl> + const auto * in = <nl> + input + ( in_r * args . in_cols + in_c ) * args . in_depth ; <nl> + int64 limit = args . in_depth ; <nl> + / / This will overwrite up to kPacketSize next elements , <nl> + / / this is ok on all iterations except the last one , since <nl> + / / we will write correct values on a next iteration . <nl> + if ( f_c = = args . filter_cols - 1 ) { <nl> + limit - = ( kPacketSize - kDepth ) / kDepth + 1 ; <nl> + if ( limit < 0 ) { <nl> + limit = 0 ; <nl> + } <nl> + } <nl> + / / Copy vectorized portion of inner dimension . <nl> + for ( int64 d = 0 ; d < limit ; d + + ) { <nl> + const auto p = Eigen : : internal : : pset1 < Packet > ( in [ d ] ) ; <nl> + Eigen : : internal : : pstoreu < T > ( in_buf , p ) ; <nl> + in_buf + = kDepth ; <nl> } <nl> - in_buf + = replicated_packet_size ; <nl> - } <nl> <nl> - / / Copy scalar portion of inner dimension . <nl> - for ( int64 d = 0 ; d < input_scalar_size ; + + d ) { <nl> - T v = in [ input_vectorized_size + d ] ; <nl> - const int64 base = d * args . depth_multiplier ; <nl> - if ( dm_vectorized_size > 0 ) { <nl> - / / Copy vectorized portion of replicated output . <nl> - / / This branch is only taken if ' args . depth_multiplier ' is <nl> - / / vectorizable ( i . e . args . depth_multiplier > = register width ) . <nl> - auto p = Eigen : : internal : : pset1 < Packet > ( v ) ; <nl> - for ( int64 dm = 0 ; dm < dm_vectorized_size ; dm + = kPacketSize ) { <nl> - Eigen : : internal : : pstoreu < T > ( in_buf + base + dm , p ) ; <nl> - } <nl> - / / Copy scalar portion of replicated output . <nl> - for ( int64 dm = 0 ; dm < dm_scalar_size ; + + dm ) { <nl> - in_buf [ base + dm_vectorized_size + dm ] = v ; <nl> + / / Copy the scalar portion . <nl> + for ( int64 d = limit ; d < args . in_depth ; d + + ) { <nl> + const auto value = in [ d ] ; <nl> + for ( int64 dm = 0 ; dm < kDepth ; dm + + ) { <nl> + in_buf [ dm ] = value ; <nl> } <nl> - } else { <nl> - / / Depth multiplier is less than one packet : scalar copy . <nl> - for ( int dm = 0 ; dm < args . depth_multiplier ; + + dm ) { <nl> - in_buf [ base + dm ] = v ; <nl> + in_buf + = kDepth ; <nl> + } <nl> + <nl> + / / Pad the remainder of the output to vector register boundary . <nl> + for ( int64 d = 0 ; d < output_pad_size ; + + d ) { <nl> + in_buf [ d ] = static_cast < T > ( 0 ) ; <nl> + } <nl> + in_buf + = output_pad_size ; <nl> + } else { <nl> + / / Zero pad . <nl> + memset ( in_buf , 0 , sizeof ( T ) * padded_filter_inner_dim_size ) ; <nl> + in_buf + = padded_filter_inner_dim_size ; <nl> + } <nl> + } <nl> + } <nl> + } else if ( kDepth > kPacketSize ) { <nl> + / / Calculate vectorized and scalar ( residual ) lengths for <nl> + / / ' depth_multiplier ' . This is used to efficiently replicate data for <nl> + / / when ' depth_multiplier ' > kPacketSize . <nl> + const int64 dm_vectorized_size = ( kDepth / kPacketSize ) * kPacketSize ; <nl> + <nl> + for ( int64 f_r = 0 ; f_r < args . filter_rows ; + + f_r ) { <nl> + const int64 in_r = in_r_start + f_r ; <nl> + <nl> + for ( int64 f_c = 0 ; f_c < args . filter_cols ; + + f_c ) { <nl> + const int64 in_c = in_c_start + f_c ; <nl> + <nl> + if ( in_r > = 0 & & in_r < args . in_rows & & in_c > = 0 & & <nl> + in_c < args . in_cols ) { <nl> + const auto * in = <nl> + input + ( in_r * args . in_cols + in_c ) * args . in_depth ; <nl> + / / Copy vectorized portion of inner dimension . <nl> + for ( int64 d = 0 ; d < args . in_depth ; d + + ) { <nl> + const auto p = Eigen : : internal : : pset1 < Packet > ( in [ d ] ) ; <nl> + for ( int64 dm = 0 ; dm < dm_vectorized_size ; dm + = kPacketSize ) { <nl> + Eigen : : internal : : pstoreu < T > ( in_buf + dm , p ) ; <nl> } <nl> + / / Overlapping store for the remainder . <nl> + Eigen : : internal : : pstoreu < T > ( in_buf + kDepth - kPacketSize , p ) ; <nl> + in_buf + = kDepth ; <nl> + } <nl> + / / Pad the remainder of the output to vector register boundary . <nl> + for ( int64 d = 0 ; d < output_pad_size ; + + d ) { <nl> + in_buf [ d ] = static_cast < T > ( 0 ) ; <nl> } <nl> + in_buf + = output_pad_size ; <nl> + } else { <nl> + / / Zero pad . <nl> + memset ( in_buf , 0 , sizeof ( T ) * padded_filter_inner_dim_size ) ; <nl> + in_buf + = padded_filter_inner_dim_size ; <nl> } <nl> - in_buf + = input_scalar_size * args . depth_multiplier ; <nl> + } <nl> + } <nl> + } else if ( kDepth = = 1 ) { <nl> + for ( int64 f_r = 0 ; f_r < args . filter_rows ; + + f_r ) { <nl> + const int64 in_r = in_r_start + f_r ; <nl> + <nl> + for ( int64 f_c = 0 ; f_c < args . filter_cols ; + + f_c ) { <nl> + const int64 in_c = in_c_start + f_c ; <nl> + <nl> + if ( in_r > = 0 & & in_r < args . in_rows & & in_c > = 0 & & <nl> + in_c < args . in_cols ) { <nl> + const auto * in = <nl> + input + ( in_r * args . in_cols + in_c ) * args . in_depth ; <nl> + for ( int64 d = 0 ; d < input_vectorized_size ; d + = kPacketSize ) { <nl> + const auto p = Eigen : : internal : : ploadu < Packet > ( in + d ) ; <nl> + Eigen : : internal : : pstoreu < T > ( in_buf , p ) ; <nl> + in_buf + = kPacketSize ; <nl> + } <nl> + for ( int64 d = 0 ; d < input_scalar_size ; + + d ) { <nl> + T v = in [ input_vectorized_size + d ] ; <nl> + in_buf [ d ] = v ; <nl> + } <nl> + in_buf + = input_scalar_size ; <nl> <nl> - / / Pad the remainder of the output to vector register boundary . <nl> - for ( int64 d = 0 ; d < output_pad_size ; + + d ) { <nl> - in_buf [ d ] = static_cast < T > ( 0 ) ; <nl> + / / Pad the remainder of the output to vector register boundary . <nl> + for ( int64 d = 0 ; d < output_pad_size ; + + d ) { <nl> + in_buf [ d ] = static_cast < T > ( 0 ) ; <nl> + } <nl> + in_buf + = output_pad_size ; <nl> + } else { <nl> + / / Zero pad . <nl> + memset ( in_buf , 0 , sizeof ( T ) * padded_filter_inner_dim_size ) ; <nl> + in_buf + = padded_filter_inner_dim_size ; <nl> } <nl> - in_buf + = output_pad_size ; <nl> - <nl> - } else { <nl> - / / Zero pad . <nl> - memset ( in_buf , 0 , sizeof ( T ) * padded_filter_inner_dim_size ) ; <nl> - in_buf + = padded_filter_inner_dim_size ; <nl> } <nl> } <nl> } <nl> mmm a / tensorflow / core / kernels / mlir_generated / BUILD <nl> ppp b / tensorflow / core / kernels / mlir_generated / BUILD <nl> filegroup ( <nl> compatible_with = get_compatible_with_cloud ( ) , <nl> ) <nl> <nl> + filegroup ( <nl> + name = " unary_kernel_srcs " , <nl> + srcs = if_mlir_unranked_kernels_enabled ( <nl> + if_false = [ <nl> + " cwise_op_gpu_abs . cc " , <nl> + " cwise_op_gpu_base . cc " , <nl> + " cwise_op_gpu_base . h " , <nl> + " cwise_op_gpu_tanh . cc " , <nl> + ] , <nl> + if_true = [ " : unary_unranked_kernel_srcs " ] , <nl> + ) , <nl> + compatible_with = get_compatible_with_cloud ( ) , <nl> + ) <nl> + <nl> cc_library ( <nl> name = " unranked_op_gpu_base " , <nl> srcs = [ " unranked_op_gpu_base . cc " ] , <nl> cc_library ( <nl> <nl> tf_kernel_library ( <nl> name = " cwise_unary_op " , <nl> - srcs = [ " : unary_unranked_kernel_srcs " ] , <nl> + srcs = [ " : unary_kernel_srcs " ] , <nl> tags = [ <nl> " manual " , <nl> ] , <nl> - deps = [ <nl> - # Technically we only need to depend on the kernel libraries for the <nl> - # unranked kernels which are enabled by default . But this would <nl> - # make our BUILD target structure uglier . We already need to make <nl> - # sure that those targets can be built , so it should not hurt to <nl> - # link them in even if they are currently not needed yet . <nl> - " : abs_unranked_kernels " , <nl> - " : ceil_unranked_kernels " , <nl> - " : conj_unranked_kernels " , <nl> - " : cos_unranked_kernels " , <nl> - " : exp_unranked_kernels " , <nl> - " : floor_unranked_kernels " , <nl> - " : imag_unranked_kernels " , <nl> - " : is_inf_unranked_kernels " , <nl> - " : log_unranked_kernels " , <nl> - " : logical_not_unranked_kernels " , <nl> - " : real_unranked_kernels " , <nl> - " : rsqrt_unranked_kernels " , <nl> - " : sign_unranked_kernels " , <nl> - " : sin_unranked_kernels " , <nl> - " : sqrt_unranked_kernels " , <nl> - " : tanh_unranked_kernels " , <nl> - " : unranked_op_gpu_base " , <nl> - " / / third_party / eigen3 " , <nl> - ] , <nl> + deps = if_mlir_unranked_kernels_enabled ( <nl> + if_false = [ <nl> + " : abs_kernels " , <nl> + " : tanh_kernels " , <nl> + " @ com_google_absl / / absl / strings " , <nl> + " @ com_google_absl / / absl / synchronization " , <nl> + " @ com_google_absl / / absl / types : span " , <nl> + " / / third_party / eigen3 " , <nl> + " / / tensorflow / core : framework " , <nl> + " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core / platform : stream_executor " , <nl> + ] , <nl> + if_true = [ <nl> + # Technically we only need to depend on the kernel libraries for the <nl> + # unranked kernels which are enabled by default . But this would <nl> + # make our BUILD target structure uglier . We already need to make <nl> + # sure that those targets can be built , so it should not hurt to <nl> + # link them in even if they are currently not needed yet . <nl> + " : abs_unranked_kernels " , <nl> + " : ceil_unranked_kernels " , <nl> + " : conj_unranked_kernels " , <nl> + " : cos_unranked_kernels " , <nl> + " : exp_unranked_kernels " , <nl> + " : floor_unranked_kernels " , <nl> + " : imag_unranked_kernels " , <nl> + " : is_inf_unranked_kernels " , <nl> + " : log_unranked_kernels " , <nl> + " : logical_not_unranked_kernels " , <nl> + " : real_unranked_kernels " , <nl> + " : rsqrt_unranked_kernels " , <nl> + " : sign_unranked_kernels " , <nl> + " : sin_unranked_kernels " , <nl> + " : sqrt_unranked_kernels " , <nl> + " : tanh_unranked_kernels " , <nl> + " : unranked_op_gpu_base " , <nl> + " / / third_party / eigen3 " , <nl> + ] , <nl> + ) , <nl> ) <nl> <nl> tf_kernel_library ( <nl> name = " cwise_binary_op " , <nl> - srcs = [ " unranked_gpu_add . cc " ] , <nl> + srcs = [ <nl> + " unranked_op_gpu_add . cc " , <nl> + ] , <nl> tags = [ <nl> " manual " , <nl> ] , <nl> deps = [ <nl> " : add_v2_unranked_kernels " , <nl> + " : equal_unranked_kernels " , <nl> + " : greater_equal_unranked_kernels " , <nl> + " : greater_unranked_kernels " , <nl> + " : less_equal_unranked_kernels " , <nl> + " : less_unranked_kernels " , <nl> + " : maximum_unranked_kernels " , <nl> + " : minimum_unranked_kernels " , <nl> + " : not_equal_unranked_kernels " , <nl> " : unranked_op_gpu_base " , <nl> " / / third_party / eigen3 " , <nl> ] , <nl> tf_cuda_cc_test ( <nl> ) <nl> <nl> tf_cuda_cc_test ( <nl> - name = " gpu_add_test " , <nl> + name = " gpu_binary_ops_test " , <nl> size = " small " , <nl> - srcs = if_mlir_generated_gpu_kernels_enabled ( [ " gpu_add_test . cc " ] ) , <nl> + srcs = if_mlir_generated_gpu_kernels_enabled ( [ " gpu_binary_ops_test . cc " ] ) , <nl> tags = tf_cuda_tests_tags ( ) + [ <nl> " no_cuda_asan " , # b / 173033461 <nl> ] , <nl> tf_cuda_cc_test ( <nl> " / / tensorflow / core : testlib " , <nl> " / / tensorflow / core / common_runtime : device " , <nl> " / / tensorflow / core / common_runtime : device_factory " , <nl> + " / / tensorflow / core / framework : types_proto_cc " , <nl> " / / tensorflow / core / kernels : cwise_op " , <nl> " / / tensorflow / core / kernels : ops_testutil " , <nl> + " @ com_google_absl / / absl / container : inlined_vector " , <nl> ] , <nl> ) <nl> <nl> gen_kernel_library ( <nl> " f32 " , <nl> " f64 " , <nl> ] , <nl> - unroll_factors = " 4 " , <nl> + unroll_factors = " 2 " , <nl> ) <nl> <nl> gen_kernel_library ( <nl> gen_kernel_library ( <nl> generate_unranked = True , <nl> tile_size = " 256 " , <nl> types = [ " i1 " ] , <nl> - unroll_factors = " 4 " , <nl> + unroll_factors = " 1 " , <nl> ) <nl> <nl> gen_kernel_library ( <nl> gen_kernel_library ( <nl> " f64 " , <nl> " i64 " , <nl> ] , <nl> - # TODO ( b / 174543802 ) : Enable once fusion heursitics is better . <nl> + # TODO ( b / 174543802 ) : Enable once fusion heuristics is better . <nl> # unroll_factors = " 4 " , <nl> ) <nl> <nl> - gen_kernel_library ( <nl> - name = " equal " , <nl> - generate_ranked = False , <nl> - generate_unranked = True , <nl> - tile_size = " 256 , 1 , 1 " , <nl> - types = [ <nl> - " f16 " , <nl> - " f32 " , <nl> - " f64 " , <nl> - " i1 " , <nl> - " i8 " , <nl> - " i16 " , <nl> - " i32 " , <nl> - " i64 " , <nl> - ] , <nl> - # TODO ( b / 174543802 ) : Enable once fusion heursitics is better . <nl> - # unroll_factors = " 4 " , <nl> - ) <nl> + [ <nl> + gen_kernel_library ( <nl> + name = name , <nl> + generate_ranked = False , <nl> + generate_unranked = True , <nl> + tile_size = " 256 , 1 , 1 " , <nl> + types = [ <nl> + " f16 " , <nl> + " f32 " , <nl> + " f64 " , <nl> + " i1 " , <nl> + " i8 " , <nl> + " i16 " , <nl> + " i32 " , <nl> + " i64 " , <nl> + ] , <nl> + # TODO ( b / 174543802 ) : Enable once fusion heuristics is better . <nl> + # unroll_factors = " 4 " , <nl> + ) <nl> + for name in [ <nl> + " equal " , <nl> + " not_equal " , <nl> + ] <nl> + ] <nl> + <nl> + [ <nl> + gen_kernel_library ( <nl> + name = name , <nl> + generate_ranked = False , <nl> + generate_unranked = True , <nl> + tile_size = " 256 , 1 , 1 " , <nl> + types = [ <nl> + " f16 " , <nl> + " f32 " , <nl> + " f64 " , <nl> + " i8 " , <nl> + " i16 " , <nl> + " i32 " , <nl> + " i64 " , <nl> + ] , <nl> + # TODO ( b / 174543802 ) : Enable once fusion heuristics is better . <nl> + # unroll_factors = " 4 " , <nl> + ) <nl> + for name in [ <nl> + " less " , <nl> + " less_equal " , <nl> + " greater " , <nl> + " greater_equal " , <nl> + ] <nl> + ] <nl> + <nl> + [ <nl> + gen_kernel_library ( <nl> + name = name , <nl> + generate_ranked = False , <nl> + generate_unranked = True , <nl> + tile_size = " 256 , 1 , 1 " , <nl> + types = [ <nl> + " f16 " , <nl> + " f32 " , <nl> + " f64 " , <nl> + " i16 " , <nl> + " i32 " , <nl> + " i64 " , <nl> + ] , <nl> + # TODO ( b / 174543802 ) : Enable once fusion heuristics is better . <nl> + # unroll_factors = " 4 " , <nl> + ) <nl> + for name in [ <nl> + " maximum " , <nl> + " minimum " , <nl> + ] <nl> + ] <nl> <nl> # Kernels that support all floating - point types . <nl> [ <nl> mmm a / tensorflow / core / kernels / mlir_generated / build_test . sh <nl> ppp b / tensorflow / core / kernels / mlir_generated / build_test . sh <nl> OUTPUT_FILE = " $ { TEST_TMPDIR } / output . mlir " <nl> INPUT = " $ 2 " <nl> <nl> # Do something <nl> - $ { TF_TO_KERNEL } - - input = $ { INPUT } - - output = $ { OUTPUT_FILE } - - unroll_factors = 4 - - tile_sizes = 256 - - arch = sm_70 , compute_75 | | die " Failed to generate kernel " <nl> + $ { TF_TO_KERNEL } - - input = $ { INPUT } - - output = $ { OUTPUT_FILE } - - unroll_factors = 4 - - tile_sizes = 256 - - arch = sm_70 , compute_75 " $ { @ : 3 } " | | die " Failed to generate kernel " <nl> <nl> # Check something <nl> [ - s $ { OUTPUT_FILE } ] | | die " output file was empty " <nl> deleted file mode 100644 <nl> index b518aff7a033c . . 0000000000000 <nl> mmm a / tensorflow / core / kernels / mlir_generated / gpu_add_test . cc <nl> ppp / dev / null <nl> <nl> - / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - <nl> - # include < cmath > <nl> - # include < limits > <nl> - # include < memory > <nl> - # include < vector > <nl> - <nl> - # include " tensorflow / core / common_runtime / device . h " <nl> - # include " tensorflow / core / common_runtime / device_factory . h " <nl> - # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / node_def_builder . h " <nl> - # include " tensorflow / core / framework / tensor . h " <nl> - # include " tensorflow / core / kernels / ops_testutil . h " <nl> - # include " tensorflow / core / lib / core / status_test_util . h " <nl> - # include " tensorflow / core / platform / test . h " <nl> - <nl> - namespace tensorflow { <nl> - namespace { <nl> - <nl> - class GpuAddTest : public OpsTestBase { <nl> - protected : <nl> - void SetUp ( ) override { <nl> - std : : unique_ptr < tensorflow : : Device > device_gpu ( <nl> - tensorflow : : DeviceFactory : : NewDevice ( " GPU " , { } , <nl> - " / job : a / replica : 0 / task : 0 " ) ) ; <nl> - SetDevice ( tensorflow : : DEVICE_GPU , std : : move ( device_gpu ) ) ; <nl> - } <nl> - <nl> - template < typename T , typename BaselineType = T > <nl> - void SetAddOp ( std : : vector < T > input_1 , TensorShape shape_1 , <nl> - std : : vector < T > input_2 , TensorShape shape_2 ) { <nl> - TF_ASSERT_OK ( NodeDefBuilder ( " add_op " , " AddV2 " ) <nl> - . Input ( FakeInput ( DataTypeToEnum < T > : : v ( ) ) ) <nl> - . Input ( FakeInput ( DataTypeToEnum < T > : : v ( ) ) ) <nl> - . Attr ( " T " , DataTypeToEnum < T > : : v ( ) ) <nl> - . Finalize ( node_def ( ) ) ) ; <nl> - <nl> - TF_ASSERT_OK ( InitOp ( ) ) ; <nl> - inputs_ . clear ( ) ; <nl> - AddInputFromArray < T > ( shape_1 , input_1 ) ; <nl> - AddInputFromArray < T > ( shape_2 , input_2 ) ; <nl> - } <nl> - <nl> - template < typename T , typename BaselineType = T > <nl> - void RunAndCompareAddOp ( std : : vector < T > input_1 , TensorShape shape_1 , <nl> - std : : vector < T > input_2 , TensorShape shape_2 , <nl> - std : : vector < T > output , TensorShape output_shape ) { <nl> - SetAddOp < T > ( input_1 , shape_1 , input_2 , shape_2 ) ; <nl> - TF_ASSERT_OK ( RunOpKernel ( ) ) ; <nl> - Tensor expected_tensor ( allocator ( ) , DataTypeToEnum < T > : : value , output_shape ) ; <nl> - test : : FillValues < T > ( & expected_tensor , output ) ; <nl> - test : : ExpectEqual ( expected_tensor , * GetOutput ( 0 ) ) ; <nl> - } <nl> - <nl> - template < typename T , typename BaselineType = T > <nl> - void TestBroadcastingExpandAddOp ( ) { <nl> - auto input_1 = { static_cast < T > ( 10 ) } ; <nl> - auto input_2 = { static_cast < T > ( 1 ) , static_cast < T > ( 2 ) , static_cast < T > ( 3 ) , <nl> - static_cast < T > ( 4 ) , static_cast < T > ( 5 ) , static_cast < T > ( 6 ) } ; <nl> - std : : vector < T > expected { <nl> - static_cast < T > ( 11 ) , static_cast < T > ( 12 ) , static_cast < T > ( 13 ) , <nl> - static_cast < T > ( 14 ) , static_cast < T > ( 15 ) , static_cast < T > ( 16 ) , <nl> - } ; <nl> - auto expected_shape = TensorShape ( { 6 } ) ; <nl> - RunAndCompareAddOp < T , BaselineType > ( input_1 , TensorShape ( { 1 } ) , input_2 , <nl> - TensorShape ( { 6 } ) , expected , <nl> - expected_shape ) ; <nl> - } <nl> - <nl> - template < typename T , typename BaselineType = T > <nl> - void TestBroadcastingInDimAddOp ( ) { <nl> - auto input_1 = { static_cast < T > ( 10 ) , static_cast < T > ( 20 ) , static_cast < T > ( 30 ) } ; <nl> - auto input_2 = { static_cast < T > ( 1 ) , static_cast < T > ( 2 ) , static_cast < T > ( 3 ) , <nl> - static_cast < T > ( 4 ) , static_cast < T > ( 5 ) , static_cast < T > ( 6 ) } ; <nl> - std : : vector < T > expected { <nl> - static_cast < T > ( 11 ) , static_cast < T > ( 22 ) , static_cast < T > ( 33 ) , <nl> - static_cast < T > ( 14 ) , static_cast < T > ( 25 ) , static_cast < T > ( 36 ) , <nl> - } ; <nl> - auto expected_shape = TensorShape ( { 2 , 3 } ) ; <nl> - RunAndCompareAddOp < T , BaselineType > ( input_1 , TensorShape ( { 3 } ) , input_2 , <nl> - TensorShape ( { 2 , 3 } ) , expected , <nl> - expected_shape ) ; <nl> - } <nl> - <nl> - template < typename T , typename BaselineType = T > <nl> - void TestBroadcastingAddOp ( ) { <nl> - auto input_1 = { static_cast < T > ( 10 ) , static_cast < T > ( 20 ) } ; <nl> - auto input_2 = { static_cast < T > ( 1 ) , static_cast < T > ( 2 ) , static_cast < T > ( 3 ) } ; <nl> - std : : vector < T > expected { <nl> - static_cast < T > ( 11 ) , static_cast < T > ( 12 ) , static_cast < T > ( 13 ) , <nl> - static_cast < T > ( 21 ) , static_cast < T > ( 22 ) , static_cast < T > ( 23 ) , <nl> - } ; <nl> - auto expected_shape = TensorShape ( { 2 , 3 } ) ; <nl> - RunAndCompareAddOp < T , BaselineType > ( input_1 , TensorShape ( { 2 , 1 } ) , input_2 , <nl> - TensorShape ( { 3 } ) , expected , <nl> - expected_shape ) ; <nl> - } <nl> - <nl> - template < typename T , typename BaselineType = T > <nl> - void RunAddOp ( ) { <nl> - auto input_1 = { <nl> - static_cast < T > ( - std : : numeric_limits < BaselineType > : : infinity ( ) ) , <nl> - static_cast < T > ( - 0 . 1 ) , <nl> - static_cast < T > ( - 0 . 0 ) , <nl> - static_cast < T > ( 0 . 0 ) , <nl> - static_cast < T > ( 0 . 1 ) , <nl> - static_cast < T > ( std : : numeric_limits < BaselineType > : : infinity ( ) ) } ; <nl> - auto input_2 = { <nl> - static_cast < T > ( - std : : numeric_limits < BaselineType > : : infinity ( ) ) , <nl> - static_cast < T > ( - 0 . 1 ) , <nl> - static_cast < T > ( - 0 . 0 ) , <nl> - static_cast < T > ( 0 . 0 ) , <nl> - static_cast < T > ( 0 . 1 ) , <nl> - static_cast < T > ( std : : numeric_limits < BaselineType > : : infinity ( ) ) } ; <nl> - std : : vector < T > expected ; <nl> - for ( const T & inp : input_2 ) { <nl> - expected . push_back ( static_cast < T > ( static_cast < BaselineType > ( inp ) + <nl> - static_cast < BaselineType > ( inp ) ) ) ; <nl> - } <nl> - RunAndCompareAddOp < T , BaselineType > ( input_1 , TensorShape { 2 , 3 } , input_2 , <nl> - TensorShape { 2 , 3 } , expected , <nl> - TensorShape { 2 , 3 } ) ; <nl> - } <nl> - <nl> - template < typename T , typename BaselineType = T > <nl> - void TestEqualShapesAddOp ( ) { <nl> - auto input_1 = { <nl> - static_cast < T > ( - std : : numeric_limits < BaselineType > : : infinity ( ) ) , <nl> - static_cast < T > ( - 0 . 1 ) , <nl> - static_cast < T > ( - 0 . 0 ) , <nl> - static_cast < T > ( 0 . 0 ) , <nl> - static_cast < T > ( 0 . 1 ) , <nl> - static_cast < T > ( std : : numeric_limits < BaselineType > : : infinity ( ) ) } ; <nl> - auto input_2 = { <nl> - static_cast < T > ( - std : : numeric_limits < BaselineType > : : infinity ( ) ) , <nl> - static_cast < T > ( - 0 . 1 ) , <nl> - static_cast < T > ( - 0 . 0 ) , <nl> - static_cast < T > ( 0 . 0 ) , <nl> - static_cast < T > ( 0 . 1 ) , <nl> - static_cast < T > ( std : : numeric_limits < BaselineType > : : infinity ( ) ) } ; <nl> - std : : vector < T > expected ; <nl> - for ( const T & inp : input_2 ) { <nl> - expected . push_back ( static_cast < T > ( static_cast < BaselineType > ( inp ) + <nl> - static_cast < BaselineType > ( inp ) ) ) ; <nl> - } <nl> - RunAndCompareAddOp < T , BaselineType > ( input_1 , TensorShape { 2 , 3 } , input_2 , <nl> - TensorShape { 2 , 3 } , expected , <nl> - TensorShape { 2 , 3 } ) ; <nl> - } <nl> - <nl> - template < typename T , typename BaselineType = T > <nl> - void TestOneIsScalarAddOp ( ) { <nl> - auto input_1 = static_cast < T > ( 42 ) ; <nl> - auto input_2 = { <nl> - static_cast < T > ( - std : : numeric_limits < BaselineType > : : infinity ( ) ) , <nl> - static_cast < T > ( - 0 . 1 ) , <nl> - static_cast < T > ( - 0 . 0 ) , <nl> - static_cast < T > ( 0 . 0 ) , <nl> - static_cast < T > ( 0 . 1 ) , <nl> - static_cast < T > ( std : : numeric_limits < BaselineType > : : infinity ( ) ) } ; <nl> - std : : vector < T > expected ; <nl> - for ( const T & inp : input_2 ) { <nl> - expected . push_back ( static_cast < T > ( static_cast < BaselineType > ( input_1 ) + <nl> - static_cast < BaselineType > ( inp ) ) ) ; <nl> - } <nl> - RunAndCompareAddOp < T , BaselineType > ( { input_1 } , TensorShape { } , input_2 , <nl> - TensorShape { 2 , 3 } , expected , <nl> - TensorShape { 2 , 3 } ) ; <nl> - } <nl> - <nl> - template < typename T , typename RT = T > <nl> - void TestIncompatibleShapes ( ) { <nl> - auto input_1 = { static_cast < T > ( - 0 . 1 ) , static_cast < T > ( - 0 . 0 ) , <nl> - static_cast < T > ( 0 . 0 ) } ; <nl> - auto input_2 = { static_cast < T > ( - 0 . 1 ) , static_cast < T > ( 0 . 0 ) } ; <nl> - <nl> - SetAddOp < T > ( input_1 , TensorShape { 3 } , input_2 , TensorShape { 2 } ) ; <nl> - auto status = RunOpKernel ( ) ; <nl> - EXPECT_FALSE ( status . ok ( ) ) ; <nl> - EXPECT_EQ ( status . code ( ) , error : : INVALID_ARGUMENT ) ; <nl> - } <nl> - <nl> - template < typename T , typename BaselineType = T > <nl> - void TestEmptyShapeWithBroadcastingAddOp ( ) { <nl> - TensorShape input_shape_a { 2 , 0 , 1 } ; <nl> - TensorShape input_shape_b { 2 , 0 , 5 } ; <nl> - TensorShape expected_shape { 2 , 0 , 5 } ; <nl> - std : : vector < T > empty_input = { } ; <nl> - RunAndCompareAddOp < T , BaselineType > ( empty_input , input_shape_a , empty_input , <nl> - input_shape_b , empty_input , <nl> - expected_shape ) ; <nl> - RunAndCompareAddOp < T , BaselineType > ( empty_input , input_shape_b , empty_input , <nl> - input_shape_a , empty_input , <nl> - expected_shape ) ; <nl> - } <nl> - } ; <nl> - <nl> - TEST_F ( GpuAddTest , AddFloat ) { RunAddOp < float > ( ) ; } <nl> - TEST_F ( GpuAddTest , AddDouble ) { RunAddOp < double > ( ) ; } <nl> - TEST_F ( GpuAddTest , AddHalf ) { RunAddOp < Eigen : : half , float > ( ) ; } <nl> - TEST_F ( GpuAddTest , AddInt64 ) { RunAddOp < int64 , int64 > ( ) ; } <nl> - <nl> - TEST_F ( GpuAddTest , AddEqShapesFloat ) { TestEqualShapesAddOp < float > ( ) ; } <nl> - TEST_F ( GpuAddTest , AddEqShapesDouble ) { TestEqualShapesAddOp < double > ( ) ; } <nl> - TEST_F ( GpuAddTest , AddEqShapesHalf ) { <nl> - TestEqualShapesAddOp < Eigen : : half , float > ( ) ; <nl> - } <nl> - TEST_F ( GpuAddTest , AddEqShapesInt64 ) { TestEqualShapesAddOp < int64 > ( ) ; } <nl> - <nl> - TEST_F ( GpuAddTest , AddScalarFloat ) { TestOneIsScalarAddOp < float > ( ) ; } <nl> - TEST_F ( GpuAddTest , AddScalarDouble ) { TestOneIsScalarAddOp < double > ( ) ; } <nl> - TEST_F ( GpuAddTest , AddScalarHalf ) { <nl> - TestOneIsScalarAddOp < Eigen : : half , float > ( ) ; <nl> - } <nl> - TEST_F ( GpuAddTest , AddScalarInt64 ) { TestOneIsScalarAddOp < int64 > ( ) ; } <nl> - <nl> - TEST_F ( GpuAddTest , BCastExpandAddFloat ) { <nl> - TestBroadcastingExpandAddOp < float > ( ) ; <nl> - } <nl> - TEST_F ( GpuAddTest , BCastExpandAddDouble ) { <nl> - TestBroadcastingExpandAddOp < double > ( ) ; <nl> - } <nl> - TEST_F ( GpuAddTest , BCastExpandAddHalf ) { <nl> - TestBroadcastingExpandAddOp < Eigen : : half , float > ( ) ; <nl> - } <nl> - TEST_F ( GpuAddTest , BCastExpandAddInt64 ) { <nl> - TestBroadcastingExpandAddOp < int64 > ( ) ; <nl> - } <nl> - <nl> - TEST_F ( GpuAddTest , BCastInDimAddFloat ) { TestBroadcastingInDimAddOp < float > ( ) ; } <nl> - TEST_F ( GpuAddTest , BCastInDimAddDouble ) { <nl> - TestBroadcastingInDimAddOp < double > ( ) ; <nl> - } <nl> - TEST_F ( GpuAddTest , BCastInDimAddHalf ) { <nl> - TestBroadcastingInDimAddOp < Eigen : : half , float > ( ) ; <nl> - } <nl> - TEST_F ( GpuAddTest , BCastInDimAddInt64 ) { TestBroadcastingInDimAddOp < int64 > ( ) ; } <nl> - <nl> - TEST_F ( GpuAddTest , BCastAddFloat ) { TestBroadcastingAddOp < float > ( ) ; } <nl> - TEST_F ( GpuAddTest , BCastAddDouble ) { TestBroadcastingAddOp < double > ( ) ; } <nl> - TEST_F ( GpuAddTest , BCastAddHalf ) { <nl> - TestBroadcastingAddOp < Eigen : : half , float > ( ) ; <nl> - } <nl> - TEST_F ( GpuAddTest , BCastAddInt64 ) { TestBroadcastingAddOp < int64 > ( ) ; } <nl> - <nl> - TEST_F ( GpuAddTest , IncompatibleShapes ) { TestIncompatibleShapes < float > ( ) ; } <nl> - <nl> - TEST_F ( GpuAddTest , EmptyShapeBCastAddFloat ) { <nl> - TestEmptyShapeWithBroadcastingAddOp < float > ( ) ; <nl> - } <nl> - TEST_F ( GpuAddTest , EmptyShapeBCastAddDouble ) { <nl> - TestEmptyShapeWithBroadcastingAddOp < double > ( ) ; <nl> - } <nl> - <nl> - / / TEST_F ( GpuAddTest , AddV2Half ) { RunAddOp < Eigen : : half , float > ( ) ; } <nl> - } / / namespace <nl> - } / / end namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . 1448a86322aea <nl> mmm / dev / null <nl> ppp b / tensorflow / core / kernels / mlir_generated / gpu_binary_ops_test . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include < cmath > <nl> + # include < limits > <nl> + # include < memory > <nl> + # include < vector > <nl> + <nl> + # include " absl / container / inlined_vector . h " <nl> + # include " tensorflow / core / common_runtime / device . h " <nl> + # include " tensorflow / core / common_runtime / device_factory . h " <nl> + # include " tensorflow / core / framework / fake_input . h " <nl> + # include " tensorflow / core / framework / node_def_builder . h " <nl> + # include " tensorflow / core / framework / tensor . h " <nl> + # include " tensorflow / core / framework / types . h " <nl> + # include " tensorflow / core / framework / types . pb . h " <nl> + # include " tensorflow / core / kernels / ops_testutil . h " <nl> + # include " tensorflow / core / lib / core / status_test_util . h " <nl> + # include " tensorflow / core / platform / test . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace { <nl> + <nl> + / / Tests are parametrized with the kernel name , the input data type and the <nl> + / / output data type . <nl> + struct BinaryTestParam { <nl> + std : : string op_name ; <nl> + DataType input_type ; <nl> + DataType output_type ; <nl> + BinaryTestParam ( const std : : string & name , DataType input , DataType output ) <nl> + : op_name ( name ) , input_type ( input ) , output_type ( output ) { } <nl> + } ; <nl> + <nl> + / / To add additional tests for other kernels , search for PLACEHOLDER in this <nl> + / / file . <nl> + <nl> + class ParametricGpuBinaryOpsTest <nl> + : public OpsTestBase , <nl> + public : : testing : : WithParamInterface < BinaryTestParam > { <nl> + protected : <nl> + void SetUp ( ) override { <nl> + std : : unique_ptr < tensorflow : : Device > device_gpu ( <nl> + tensorflow : : DeviceFactory : : NewDevice ( " GPU " , { } , <nl> + " / job : a / replica : 0 / task : 0 " ) ) ; <nl> + SetDevice ( tensorflow : : DEVICE_GPU , std : : move ( device_gpu ) ) ; <nl> + } <nl> + <nl> + template < typename T > <nl> + void SetOp ( const absl : : InlinedVector < T , 10 > & input_1 , <nl> + const TensorShape & shape_1 , <nl> + const absl : : InlinedVector < T , 10 > & input_2 , <nl> + const TensorShape & shape_2 ) { <nl> + TF_ASSERT_OK ( NodeDefBuilder ( " some_name " , GetParam ( ) . op_name ) <nl> + . Input ( FakeInput ( DataTypeToEnum < T > : : v ( ) ) ) <nl> + . Input ( FakeInput ( DataTypeToEnum < T > : : v ( ) ) ) <nl> + . Attr ( " T " , DataTypeToEnum < T > : : v ( ) ) <nl> + . Finalize ( node_def ( ) ) ) ; <nl> + <nl> + TF_ASSERT_OK ( InitOp ( ) ) ; <nl> + inputs_ . clear ( ) ; <nl> + AddInputFromArray < T > ( shape_1 , input_1 ) ; <nl> + AddInputFromArray < T > ( shape_2 , input_2 ) ; <nl> + } <nl> + <nl> + template < typename T , typename BaselineType , typename OutT > <nl> + void RunAndCompare ( const absl : : InlinedVector < T , 10 > & input_1 , <nl> + const TensorShape & shape_1 , <nl> + const absl : : InlinedVector < T , 10 > & input_2 , <nl> + const TensorShape & shape_2 , <nl> + const absl : : InlinedVector < OutT , 10 > & output , <nl> + const TensorShape & output_shape ) { <nl> + SetOp < T > ( input_1 , shape_1 , input_2 , shape_2 ) ; <nl> + TF_ASSERT_OK ( RunOpKernel ( ) ) ; <nl> + Tensor expected_tensor ( allocator ( ) , DataTypeToEnum < OutT > : : value , <nl> + output_shape ) ; <nl> + test : : FillValues < OutT > ( & expected_tensor , output ) ; <nl> + test : : ExpectEqual ( expected_tensor , * GetOutput ( 0 ) ) ; <nl> + } <nl> + <nl> + template < typename T , typename BaselineType , typename OutT , <nl> + typename BaselineOutT > <nl> + void TestBroadcastingExpand ( ) { <nl> + auto input_1 = absl : : InlinedVector < T , 10 > { static_cast < T > ( 10 ) } ; <nl> + auto input_2 = absl : : InlinedVector < T , 10 > { <nl> + static_cast < T > ( 1 ) , static_cast < T > ( 2 ) , static_cast < T > ( 3 ) , <nl> + static_cast < T > ( 4 ) , static_cast < T > ( 5 ) , static_cast < T > ( 6 ) } ; <nl> + absl : : InlinedVector < OutT , 10 > expected { <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 0 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 0 ] ) ) ) , <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 0 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 1 ] ) ) ) , <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 0 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 2 ] ) ) ) , <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 0 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 3 ] ) ) ) , <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 0 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 4 ] ) ) ) , <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 0 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 5 ] ) ) ) , <nl> + } ; <nl> + auto expected_shape = TensorShape ( { 6 } ) ; <nl> + RunAndCompare < T , BaselineType , OutT > ( input_1 , TensorShape ( { 1 } ) , input_2 , <nl> + TensorShape ( { 6 } ) , expected , <nl> + expected_shape ) ; <nl> + } <nl> + <nl> + template < typename T , typename BaselineType , typename OutT , <nl> + typename BaselineOutT > <nl> + void TestBroadcastingInDim ( ) { <nl> + auto input_1 = absl : : InlinedVector < T , 10 > { <nl> + static_cast < T > ( 10 ) , static_cast < T > ( 20 ) , static_cast < T > ( 30 ) } ; <nl> + auto input_2 = absl : : InlinedVector < T , 10 > { <nl> + static_cast < T > ( 1 ) , static_cast < T > ( 2 ) , static_cast < T > ( 3 ) , <nl> + static_cast < T > ( 4 ) , static_cast < T > ( 5 ) , static_cast < T > ( 6 ) } ; <nl> + absl : : InlinedVector < OutT , 10 > expected { <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 0 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 0 ] ) ) ) , <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 1 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 1 ] ) ) ) , <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 2 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 2 ] ) ) ) , <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 0 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 3 ] ) ) ) , <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 1 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 4 ] ) ) ) , <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 2 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 5 ] ) ) ) , <nl> + } ; <nl> + auto expected_shape = TensorShape ( { 2 , 3 } ) ; <nl> + RunAndCompare < T , BaselineType , OutT > ( input_1 , TensorShape ( { 3 } ) , input_2 , <nl> + TensorShape ( { 2 , 3 } ) , expected , <nl> + expected_shape ) ; <nl> + } <nl> + <nl> + template < typename T , typename BaselineType , typename OutT , <nl> + typename BaselineOutT > <nl> + void TestBroadcasting ( ) { <nl> + auto input_1 = <nl> + absl : : InlinedVector < T , 10 > { static_cast < T > ( 10 ) , static_cast < T > ( 20 ) } ; <nl> + auto input_2 = absl : : InlinedVector < T , 10 > { <nl> + static_cast < T > ( 1 ) , static_cast < T > ( 2 ) , static_cast < T > ( 3 ) } ; <nl> + absl : : InlinedVector < OutT , 10 > expected { <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 0 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 0 ] ) ) ) , <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 0 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 1 ] ) ) ) , <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 0 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 2 ] ) ) ) , <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 1 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 0 ] ) ) ) , <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 1 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 1 ] ) ) ) , <nl> + static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 [ 1 ] ) , <nl> + static_cast < BaselineType > ( input_2 [ 2 ] ) ) ) , <nl> + } ; <nl> + auto expected_shape = TensorShape ( { 2 , 3 } ) ; <nl> + RunAndCompare < T , BaselineType , OutT > ( input_1 , TensorShape ( { 2 , 1 } ) , input_2 , <nl> + TensorShape ( { 3 } ) , expected , <nl> + expected_shape ) ; <nl> + } <nl> + <nl> + template < typename T , typename BaselineType , typename OutT , <nl> + typename BaselineOutT > <nl> + void RunOp ( ) { <nl> + auto input_1 = { <nl> + static_cast < T > ( - std : : numeric_limits < BaselineType > : : infinity ( ) ) , <nl> + static_cast < T > ( - 0 . 1 ) , <nl> + static_cast < T > ( - 0 . 0 ) , <nl> + static_cast < T > ( 0 . 0 ) , <nl> + static_cast < T > ( 0 . 1 ) , <nl> + static_cast < T > ( std : : numeric_limits < BaselineType > : : infinity ( ) ) } ; <nl> + auto input_2 = { <nl> + static_cast < T > ( - std : : numeric_limits < BaselineType > : : infinity ( ) ) , <nl> + static_cast < T > ( - 0 . 1 ) , <nl> + static_cast < T > ( - 0 . 0 ) , <nl> + static_cast < T > ( 0 . 0 ) , <nl> + static_cast < T > ( 0 . 1 ) , <nl> + static_cast < T > ( std : : numeric_limits < BaselineType > : : infinity ( ) ) } ; <nl> + absl : : InlinedVector < OutT , 10 > expected ; <nl> + for ( const T & inp : input_2 ) { <nl> + expected . push_back ( static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( inp ) , static_cast < BaselineType > ( inp ) ) ) ) ; <nl> + } <nl> + RunAndCompare < T , BaselineType , OutT > ( input_1 , TensorShape { 2 , 3 } , input_2 , <nl> + TensorShape { 2 , 3 } , expected , <nl> + TensorShape { 2 , 3 } ) ; <nl> + } <nl> + <nl> + template < typename T , typename BaselineType , typename OutT , <nl> + typename BaselineOutT > <nl> + void TestEqualShapes ( ) { <nl> + auto input_1 = { <nl> + static_cast < T > ( - std : : numeric_limits < BaselineType > : : infinity ( ) ) , <nl> + static_cast < T > ( - 0 . 1 ) , <nl> + static_cast < T > ( - 0 . 0 ) , <nl> + static_cast < T > ( 0 . 0 ) , <nl> + static_cast < T > ( 0 . 1 ) , <nl> + static_cast < T > ( std : : numeric_limits < BaselineType > : : infinity ( ) ) } ; <nl> + auto input_2 = { <nl> + static_cast < T > ( - std : : numeric_limits < BaselineType > : : infinity ( ) ) , <nl> + static_cast < T > ( - 0 . 1 ) , <nl> + static_cast < T > ( - 0 . 0 ) , <nl> + static_cast < T > ( 0 . 0 ) , <nl> + static_cast < T > ( 0 . 1 ) , <nl> + static_cast < T > ( std : : numeric_limits < BaselineType > : : infinity ( ) ) } ; <nl> + absl : : InlinedVector < OutT , 10 > expected ; <nl> + for ( const T & inp : input_2 ) { <nl> + expected . push_back ( static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( inp ) , static_cast < BaselineType > ( inp ) ) ) ) ; <nl> + } <nl> + RunAndCompare < T , BaselineType , OutT > ( input_1 , TensorShape { 2 , 3 } , input_2 , <nl> + TensorShape { 2 , 3 } , expected , <nl> + TensorShape { 2 , 3 } ) ; <nl> + } <nl> + <nl> + template < typename T , typename BaselineType , typename OutT , <nl> + typename BaselineOutT > <nl> + void TestOneIsScalar ( ) { <nl> + auto input_1 = static_cast < T > ( 42 ) ; <nl> + auto input_2 = { <nl> + static_cast < T > ( - std : : numeric_limits < BaselineType > : : infinity ( ) ) , <nl> + static_cast < T > ( - 0 . 1 ) , <nl> + static_cast < T > ( - 0 . 0 ) , <nl> + static_cast < T > ( 0 . 0 ) , <nl> + static_cast < T > ( 0 . 1 ) , <nl> + static_cast < T > ( std : : numeric_limits < BaselineType > : : infinity ( ) ) } ; <nl> + absl : : InlinedVector < OutT , 10 > expected ; <nl> + for ( const T & inp : input_2 ) { <nl> + expected . push_back ( static_cast < OutT > ( Expected < BaselineType , BaselineOutT > ( <nl> + static_cast < BaselineType > ( input_1 ) , static_cast < BaselineType > ( inp ) ) ) ) ; <nl> + } <nl> + RunAndCompare < T , BaselineType , OutT > ( { input_1 } , TensorShape { } , input_2 , <nl> + TensorShape { 2 , 3 } , expected , <nl> + TensorShape { 2 , 3 } ) ; <nl> + } <nl> + <nl> + template < typename T , typename BaselineType , typename OutT , <nl> + typename BaselineOutT > <nl> + void TestIncompatibleShapes ( ) { <nl> + auto input_1 = { static_cast < T > ( - 0 . 1 ) , static_cast < T > ( - 0 . 0 ) , <nl> + static_cast < T > ( 0 . 0 ) } ; <nl> + auto input_2 = { static_cast < T > ( - 0 . 1 ) , static_cast < T > ( 0 . 0 ) } ; <nl> + <nl> + SetOp < T > ( input_1 , TensorShape { 3 } , input_2 , TensorShape { 2 } ) ; <nl> + auto status = RunOpKernel ( ) ; <nl> + EXPECT_FALSE ( status . ok ( ) ) ; <nl> + EXPECT_EQ ( status . code ( ) , error : : INVALID_ARGUMENT ) ; <nl> + } <nl> + <nl> + template < typename T , typename BaselineType , typename OutT , <nl> + typename BaselineOutT > <nl> + void TestEmptyShapeWithBroadcasting ( ) { <nl> + TensorShape input_shape_a { 2 , 0 , 1 } ; <nl> + TensorShape input_shape_b { 2 , 0 , 5 } ; <nl> + TensorShape expected_shape { 2 , 0 , 5 } ; <nl> + absl : : InlinedVector < T , 10 > empty_input = { } ; <nl> + absl : : InlinedVector < OutT , 10 > expected_result = { } ; <nl> + RunAndCompare < T , BaselineType , OutT > ( empty_input , input_shape_a , <nl> + empty_input , input_shape_b , <nl> + expected_result , expected_shape ) ; <nl> + RunAndCompare < T , BaselineType , OutT > ( empty_input , input_shape_b , <nl> + empty_input , input_shape_a , <nl> + expected_result , expected_shape ) ; <nl> + } <nl> + <nl> + template < typename BaselineType , typename BaselineOutT > <nl> + BaselineOutT Expected ( BaselineType lhs , BaselineType rhs ) { <nl> + if ( GetParam ( ) . op_name = = " AddV2 " ) { <nl> + return static_cast < BaselineOutT > ( lhs + rhs ) ; <nl> + } <nl> + / / Add the logic for creating expected values for the kernel you want to <nl> + / / test here . <nl> + / / < PLACEHOLDER > <nl> + LOG ( FATAL ) < < " Cannot generate expected result for op " <nl> + < < GetParam ( ) . op_name ; <nl> + return static_cast < BaselineOutT > ( lhs ) ; <nl> + } <nl> + } ; <nl> + <nl> + std : : vector < BinaryTestParam > GetBinaryTestParameters ( ) { <nl> + std : : vector < BinaryTestParam > parameters ; <nl> + for ( DataType dt : <nl> + std : : vector < DataType > { DT_FLOAT , DT_DOUBLE , DT_HALF , DT_INT64 } ) { <nl> + parameters . emplace_back ( " AddV2 " , dt , dt ) ; <nl> + } <nl> + / / Add the parameters ( kernel name and data types to test ) here . <nl> + / / < PLACEHOLDER > <nl> + return parameters ; <nl> + } <nl> + <nl> + # define GENERATE_DATA_TYPE_SWITCH_CASE ( dt , nt , code ) \ <nl> + switch ( dt ) { \ <nl> + case DT_FLOAT : { \ <nl> + using nt = EnumToDataType < DT_FLOAT > : : Type ; \ <nl> + code ; \ <nl> + break ; \ <nl> + } \ <nl> + case DT_DOUBLE : { \ <nl> + using nt = EnumToDataType < DT_DOUBLE > : : Type ; \ <nl> + code ; \ <nl> + break ; \ <nl> + } \ <nl> + case DT_HALF : { \ <nl> + using nt = EnumToDataType < DT_HALF > : : Type ; \ <nl> + code ; \ <nl> + break ; \ <nl> + } \ <nl> + case DT_INT64 : { \ <nl> + using nt = EnumToDataType < DT_INT64 > : : Type ; \ <nl> + code ; \ <nl> + break ; \ <nl> + } \ <nl> + case DT_BOOL : { \ <nl> + using nt = EnumToDataType < DT_BOOL > : : Type ; \ <nl> + code ; \ <nl> + break ; \ <nl> + } \ <nl> + default : \ <nl> + LOG ( FATAL ) < < " Unsupported type : " < < DataType_Name ( dt ) ; \ <nl> + } <nl> + <nl> + # define COMMA , <nl> + <nl> + # define GENERATE_TEST_CALL ( test_fn ) \ <nl> + GENERATE_DATA_TYPE_SWITCH_CASE ( \ <nl> + GetParam ( ) . input_type , NativeInT , \ <nl> + GENERATE_DATA_TYPE_SWITCH_CASE ( \ <nl> + GetParam ( ) . output_type , NativeOutT , \ <nl> + if ( GetParam ( ) . input_type = = DT_HALF ) { \ <nl> + if ( GetParam ( ) . output_type = = DT_HALF ) { \ <nl> + test_fn < NativeInT COMMA float COMMA NativeOutT COMMA float > ( ) ; \ <nl> + } else { \ <nl> + test_fn < \ <nl> + NativeInT COMMA float COMMA NativeOutT COMMA NativeOutT > ( ) ; \ <nl> + } \ <nl> + } else { \ <nl> + test_fn < NativeInT COMMA NativeInT COMMA NativeOutT COMMA \ <nl> + NativeOutT > ( ) ; \ <nl> + } ) ) <nl> + <nl> + TEST_P ( ParametricGpuBinaryOpsTest , RunOp ) { GENERATE_TEST_CALL ( RunOp ) ; } <nl> + <nl> + TEST_P ( ParametricGpuBinaryOpsTest , EqShapes ) { <nl> + GENERATE_TEST_CALL ( TestEqualShapes ) ; <nl> + } <nl> + <nl> + TEST_P ( ParametricGpuBinaryOpsTest , Scalar ) { <nl> + GENERATE_TEST_CALL ( TestOneIsScalar ) ; <nl> + } <nl> + <nl> + TEST_P ( ParametricGpuBinaryOpsTest , BCastExpand ) { <nl> + GENERATE_TEST_CALL ( TestBroadcastingExpand ) ; <nl> + } <nl> + <nl> + TEST_P ( ParametricGpuBinaryOpsTest , BCastInDim ) { <nl> + GENERATE_TEST_CALL ( TestBroadcastingInDim ) ; <nl> + } <nl> + <nl> + TEST_P ( ParametricGpuBinaryOpsTest , BCast ) { <nl> + GENERATE_TEST_CALL ( TestBroadcasting ) ; <nl> + } <nl> + <nl> + TEST_P ( ParametricGpuBinaryOpsTest , IncompatibleShapes ) { <nl> + GENERATE_TEST_CALL ( TestIncompatibleShapes ) ; <nl> + } <nl> + <nl> + TEST_P ( ParametricGpuBinaryOpsTest , EmptyShapeBCast ) { <nl> + GENERATE_TEST_CALL ( TestEmptyShapeWithBroadcasting ) ; <nl> + } <nl> + <nl> + INSTANTIATE_TEST_SUITE_P ( GpuBinaryOpsTests , ParametricGpuBinaryOpsTest , <nl> + : : testing : : ValuesIn ( GetBinaryTestParameters ( ) ) ) ; <nl> + } / / namespace <nl> + } / / end namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . 47010eec80517 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / kernels / mlir_generated / op_definitions / greater . mlir . tmpl <nl> <nl> + func @ Greater_elem_type ( % arg0 : tensor < * xelem_type > , % arg1 : tensor < * xelem_type > ) <nl> + - > tensor < * xi1 > attributes { tf_entry , llvm . emit_c_interface } { <nl> + % 0 = " tf . Greater " ( % arg0 , % arg1 ) { T = elem_type , device = " " } <nl> + : ( tensor < * xelem_type > , tensor < * xelem_type > ) - > tensor < * xi1 > <nl> + return % 0 : tensor < * xi1 > <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 63c0ce9caa2f1 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / kernels / mlir_generated / op_definitions / greater_equal . mlir . tmpl <nl> <nl> + func @ GreaterEqual_elem_type ( % arg0 : tensor < * xelem_type > , % arg1 : tensor < * xelem_type > ) <nl> + - > tensor < * xi1 > attributes { tf_entry , llvm . emit_c_interface } { <nl> + % 0 = " tf . GreaterEqual " ( % arg0 , % arg1 ) { T = elem_type , device = " " } <nl> + : ( tensor < * xelem_type > , tensor < * xelem_type > ) - > tensor < * xi1 > <nl> + return % 0 : tensor < * xi1 > <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 59496dc7b1668 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / kernels / mlir_generated / op_definitions / less . mlir . tmpl <nl> <nl> + func @ Less_elem_type ( % arg0 : tensor < * xelem_type > , % arg1 : tensor < * xelem_type > ) <nl> + - > tensor < * xi1 > attributes { tf_entry , llvm . emit_c_interface } { <nl> + % 0 = " tf . Less " ( % arg0 , % arg1 ) { T = elem_type , device = " " } <nl> + : ( tensor < * xelem_type > , tensor < * xelem_type > ) - > tensor < * xi1 > <nl> + return % 0 : tensor < * xi1 > <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 245f27abf9a0c <nl> mmm / dev / null <nl> ppp b / tensorflow / core / kernels / mlir_generated / op_definitions / less_equal . mlir . tmpl <nl> <nl> + func @ LessEqual_elem_type ( % arg0 : tensor < * xelem_type > , % arg1 : tensor < * xelem_type > ) <nl> + - > tensor < * xi1 > attributes { tf_entry , llvm . emit_c_interface } { <nl> + % 0 = " tf . LessEqual " ( % arg0 , % arg1 ) { T = elem_type , device = " " } <nl> + : ( tensor < * xelem_type > , tensor < * xelem_type > ) - > tensor < * xi1 > <nl> + return % 0 : tensor < * xi1 > <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . c917b9a6c0d03 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / kernels / mlir_generated / op_definitions / maximum . mlir . tmpl <nl> <nl> + func @ Maximum_elem_type ( % arg0 : tensor < * xelem_type > , % arg1 : tensor < * xelem_type > ) <nl> + - > tensor < * xelem_type > attributes { tf_entry , llvm . emit_c_interface } { <nl> + % 0 = " tf . Maximum " ( % arg0 , % arg1 ) { T = elem_type , device = " " } <nl> + : ( tensor < * xelem_type > , tensor < * xelem_type > ) - > tensor < * xelem_type > <nl> + return % 0 : tensor < * xelem_type > <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 6d8987b0ce374 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / kernels / mlir_generated / op_definitions / minimum . mlir . tmpl <nl> <nl> + func @ Minimum_elem_type ( % arg0 : tensor < * xelem_type > , % arg1 : tensor < * xelem_type > ) <nl> + - > tensor < * xelem_type > attributes { tf_entry , llvm . emit_c_interface } { <nl> + % 0 = " tf . Minimum " ( % arg0 , % arg1 ) { T = elem_type , device = " " } <nl> + : ( tensor < * xelem_type > , tensor < * xelem_type > ) - > tensor < * xelem_type > <nl> + return % 0 : tensor < * xelem_type > <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 8efef8bc2f20f <nl> mmm / dev / null <nl> ppp b / tensorflow / core / kernels / mlir_generated / op_definitions / not_equal . mlir . tmpl <nl> <nl> + func @ NotEqual_elem_type ( % arg0 : tensor < * xelem_type > , % arg1 : tensor < * xelem_type > ) <nl> + - > tensor < * xi1 > attributes { tf_entry , llvm . emit_c_interface } { <nl> + % 0 = " tf . NotEqual " ( % arg0 , % arg1 ) { T = elem_type , device = " " } <nl> + : ( tensor < * xelem_type > , tensor < * xelem_type > ) - > tensor < * xi1 > <nl> + return % 0 : tensor < * xi1 > <nl> + } <nl> similarity index 100 % <nl> rename from tensorflow / core / kernels / mlir_generated / unranked_gpu_add . cc <nl> rename to tensorflow / core / kernels / mlir_generated / unranked_op_gpu_add . cc <nl> mmm a / tensorflow / core / kernels / mlir_generated / unranked_op_gpu_base . h <nl> ppp b / tensorflow / core / kernels / mlir_generated / unranked_op_gpu_base . h <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> <nl> + / / A type - erased version of the UnrankedMemRefType to allow it to be used <nl> + / / as the return type of an extern " C " function on windows . <nl> + struct UntypedUnrankedMemRefType { <nl> + int64_t rank ; <nl> + void * descriptor ; <nl> + } ; <nl> + <nl> + template < typename ElemType > <nl> + UnrankedMemRefType < ElemType > ConvertToTyped ( UntypedUnrankedMemRefType desc ) { <nl> + return { desc . rank , desc . descriptor } ; <nl> + } <nl> + <nl> / / Returns a pointer to an allocated MlirTensorBuffer that takes ownership of <nl> / / pre - allocated memory . <nl> TensorBuffer * GetMlirTensorBuffer ( const void * ptr , size_t size , <nl> class MlirUnrankedOp : public OpKernel { <nl> GENERATE_BINARY_KERNEL ( tf_op , mlir_type , tf_data_type , data_type ) \ <nl> REGISTER_KERNEL ( tf_op , mlir_type , data_type ) <nl> <nl> - # define GENERATE_BINARY_KERNEL ( tf_op , mlir_type , tf_data_type , data_type ) \ <nl> - extern " C " : : UnrankedMemRefType < data_type > MLIR_FUNCTION ( tf_op , mlir_type ) ( \ <nl> - tensorflow : : OpKernelContext * ctx , \ <nl> - const : : UnrankedMemRefType < data_type > * arg1 , \ <nl> - const : : UnrankedMemRefType < data_type > * arg2 ) ; \ <nl> - \ <nl> - namespace { \ <nl> - class MlirUnranked # # tf_op # # mlir_type # # Op \ <nl> - : public MlirUnrankedOp < tf_data_type , data_type , \ <nl> - MlirUnranked # # tf_op # # mlir_type # # Op > { \ <nl> - public : \ <nl> - using MlirUnrankedOp : : MlirUnrankedOp ; \ <nl> - \ <nl> - static : : UnrankedMemRefType < data_type > Invoke ( \ <nl> - OpKernelContext * ctx , \ <nl> - llvm : : ArrayRef < : : UnrankedMemRefType < data_type > > args ) { \ <nl> - return MLIR_FUNCTION ( tf_op , mlir_type ) ( ctx , & args [ 0 ] , & args [ 1 ] ) ; \ <nl> - } \ <nl> - } ; \ <nl> + # define GENERATE_BINARY_KERNEL ( tf_op , mlir_type , tf_data_type , data_type ) \ <nl> + extern " C " UntypedUnrankedMemRefType MLIR_FUNCTION ( tf_op , mlir_type ) ( \ <nl> + tensorflow : : OpKernelContext * ctx , \ <nl> + const : : UnrankedMemRefType < data_type > * arg1 , \ <nl> + const : : UnrankedMemRefType < data_type > * arg2 ) ; \ <nl> + \ <nl> + namespace { \ <nl> + class MlirUnranked # # tf_op # # mlir_type # # Op \ <nl> + : public MlirUnrankedOp < tf_data_type , data_type , \ <nl> + MlirUnranked # # tf_op # # mlir_type # # Op > { \ <nl> + public : \ <nl> + using MlirUnrankedOp : : MlirUnrankedOp ; \ <nl> + \ <nl> + static : : UnrankedMemRefType < data_type > Invoke ( \ <nl> + OpKernelContext * ctx , \ <nl> + llvm : : ArrayRef < : : UnrankedMemRefType < data_type > > args ) { \ <nl> + return ConvertToTyped < data_type > ( \ <nl> + MLIR_FUNCTION ( tf_op , mlir_type ) ( ctx , & args [ 0 ] , & args [ 1 ] ) ) ; \ <nl> + } \ <nl> + } ; \ <nl> } <nl> <nl> # define GENERATE_AND_REGISTER_UNARY_KERNEL ( tf_op , mlir_type , tf_data_type , \ <nl> class MlirUnrankedOp : public OpKernel { <nl> # define GENERATE_UNARY_KERNEL ( tf_op , mlir_type , tf_data_type , data_type ) \ <nl> GENERATE_UNARY_KERNEL2 ( tf_op , mlir_type , tf_data_type , data_type , data_type ) <nl> <nl> - # define GENERATE_UNARY_KERNEL2 ( tf_op , mlir_type , tf_data_type , data_type , \ <nl> - input_data_type ) \ <nl> - extern " C " : : UnrankedMemRefType < data_type > MLIR_FUNCTION ( tf_op , mlir_type ) ( \ <nl> - tensorflow : : OpKernelContext * ctx , \ <nl> - const : : UnrankedMemRefType < input_data_type > * arg ) ; \ <nl> - \ <nl> - namespace { \ <nl> - class MlirUnranked # # tf_op # # mlir_type # # Op \ <nl> - : public MlirUnrankedOp < tf_data_type , data_type , \ <nl> - MlirUnranked # # tf_op # # mlir_type # # Op , \ <nl> - input_data_type > { \ <nl> - public : \ <nl> - using MlirUnrankedOp : : MlirUnrankedOp ; \ <nl> - \ <nl> - static : : UnrankedMemRefType < data_type > Invoke ( \ <nl> - OpKernelContext * ctx , \ <nl> - llvm : : ArrayRef < : : UnrankedMemRefType < input_data_type > > args ) { \ <nl> - return MLIR_FUNCTION ( tf_op , mlir_type ) ( ctx , & args [ 0 ] ) ; \ <nl> - } \ <nl> - } ; \ <nl> + # define GENERATE_UNARY_KERNEL2 ( tf_op , mlir_type , tf_data_type , data_type , \ <nl> + input_data_type ) \ <nl> + extern " C " UntypedUnrankedMemRefType MLIR_FUNCTION ( tf_op , mlir_type ) ( \ <nl> + tensorflow : : OpKernelContext * ctx , \ <nl> + const : : UnrankedMemRefType < input_data_type > * arg ) ; \ <nl> + \ <nl> + namespace { \ <nl> + class MlirUnranked # # tf_op # # mlir_type # # Op \ <nl> + : public MlirUnrankedOp < tf_data_type , data_type , \ <nl> + MlirUnranked # # tf_op # # mlir_type # # Op , \ <nl> + input_data_type > { \ <nl> + public : \ <nl> + using MlirUnrankedOp : : MlirUnrankedOp ; \ <nl> + \ <nl> + static : : UnrankedMemRefType < data_type > Invoke ( \ <nl> + OpKernelContext * ctx , \ <nl> + llvm : : ArrayRef < : : UnrankedMemRefType < input_data_type > > args ) { \ <nl> + return ConvertToTyped < data_type > ( \ <nl> + MLIR_FUNCTION ( tf_op , mlir_type ) ( ctx , & args [ 0 ] ) ) ; \ <nl> + } \ <nl> + } ; \ <nl> } <nl> <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / kernels / sparse_xent_op . cc <nl> ppp b / tensorflow / core / kernels / sparse_xent_op . cc <nl> limitations under the License . <nl> # define EIGEN_USE_THREADS <nl> <nl> # include " tensorflow / core / kernels / sparse_xent_op . h " <nl> - <nl> # include " third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> REGISTER ( CPU , float , int32 ) <nl> REGISTER ( CPU , float , int64 ) <nl> REGISTER ( CPU , double , int32 ) <nl> REGISTER ( CPU , double , int64 ) <nl> - REGISTER ( CPU , bfloat16 , int32 ) <nl> - REGISTER ( CPU , bfloat16 , int64 ) <nl> REGISTER ( CPU , Eigen : : half , int32 ) <nl> REGISTER ( CPU , Eigen : : half , int64 ) <nl> <nl> mmm a / tensorflow / core / kernels / sparse_xent_op_test . cc <nl> ppp b / tensorflow / core / kernels / sparse_xent_op_test . cc <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> <nl> - static Graph * SparseXent ( int batch_size , int num_classes , DataType value_type ) { <nl> + static Graph * SparseXent ( int batch_size , int num_classes ) { <nl> Graph * g = new Graph ( OpRegistry : : Global ( ) ) ; <nl> - Tensor logits ( value_type , TensorShape ( { batch_size , num_classes } ) ) ; <nl> + Tensor logits ( DT_FLOAT , TensorShape ( { batch_size , num_classes } ) ) ; <nl> logits . flat < float > ( ) . setRandom ( ) ; <nl> Tensor labels ( DT_INT64 , TensorShape ( { batch_size } ) ) ; <nl> std : : random_device rd ; <nl> static Graph * SparseXent ( int batch_size , int num_classes , DataType value_type ) { <nl> return g ; <nl> } <nl> <nl> - # define BM_SparseXentDev ( BATCH , CLASS , DEVICE , DTYPE ) \ <nl> - static void BM_SparseXent # # _ # # BATCH # # _ # # CLASS # # _ # # DEVICE # # _ # # DTYPE ( \ <nl> + # define BM_SparseXentDev ( BATCH , CLASS , DEVICE ) \ <nl> + static void BM_SparseXent # # _ # # BATCH # # _ # # CLASS # # _ # # DEVICE ( \ <nl> : : testing : : benchmark : : State & state ) { \ <nl> - test : : Benchmark ( # DEVICE , SparseXent ( BATCH , CLASS , DTYPE ) , \ <nl> + test : : Benchmark ( # DEVICE , SparseXent ( BATCH , CLASS ) , \ <nl> / * old_benchmark_api * / false ) \ <nl> . Run ( state ) ; \ <nl> state . SetItemsProcessed ( static_cast < int64 > ( state . iterations ( ) ) * BATCH * \ <nl> CLASS ) ; \ <nl> } \ <nl> - BENCHMARK ( BM_SparseXent # # _ # # BATCH # # _ # # CLASS # # _ # # DEVICE # # _ # # DTYPE ) ; <nl> - <nl> - # define BM_SPARSE_XENT_DEV_CPU ( DTYPE ) \ <nl> - BM_SparseXentDev ( 8 , 1000000 , cpu , DTYPE ) ; \ <nl> - BM_SparseXentDev ( 16 , 10000 , cpu , DTYPE ) ; \ <nl> - BM_SparseXentDev ( 16 , 100000 , cpu , DTYPE ) ; \ <nl> - BM_SparseXentDev ( 32 , 10000 , cpu , DTYPE ) ; \ <nl> - BM_SparseXentDev ( 32 , 100000 , cpu , DTYPE ) ; \ <nl> - BM_SparseXentDev ( 64 , 10000 , cpu , DTYPE ) ; \ <nl> - BM_SparseXentDev ( 64 , 100000 , cpu , DTYPE ) ; <nl> - <nl> - / / CPU <nl> - BM_SPARSE_XENT_DEV_CPU ( DT_FLOAT ) ; <nl> - BM_SPARSE_XENT_DEV_CPU ( DT_BFLOAT16 ) ; <nl> + BENCHMARK ( BM_SparseXent # # _ # # BATCH # # _ # # CLASS # # _ # # DEVICE ) ; <nl> <nl> / / / The representative tests for ptb_word on GPU <nl> # if GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl> - BM_SparseXentDev ( 8 , 1000000 , gpu , DT_FLOAT ) ; <nl> + BM_SparseXentDev ( 8 , 1000000 , gpu ) ; <nl> <nl> - BM_SparseXentDev ( 16 , 10000 , gpu , DT_FLOAT ) ; <nl> - BM_SparseXentDev ( 16 , 30000 , gpu , DT_FLOAT ) ; <nl> - BM_SparseXentDev ( 16 , 100000 , gpu , DT_FLOAT ) ; <nl> + BM_SparseXentDev ( 16 , 10000 , gpu ) ; <nl> + BM_SparseXentDev ( 16 , 30000 , gpu ) ; <nl> + BM_SparseXentDev ( 16 , 100000 , gpu ) ; <nl> <nl> - BM_SparseXentDev ( 32 , 10000 , gpu , DT_FLOAT ) ; <nl> - BM_SparseXentDev ( 32 , 30000 , gpu , DT_FLOAT ) ; <nl> - BM_SparseXentDev ( 32 , 100000 , gpu , DT_FLOAT ) ; <nl> + BM_SparseXentDev ( 32 , 10000 , gpu ) ; <nl> + BM_SparseXentDev ( 32 , 30000 , gpu ) ; <nl> + BM_SparseXentDev ( 32 , 100000 , gpu ) ; <nl> <nl> - BM_SparseXentDev ( 64 , 10000 , gpu , DT_FLOAT ) ; <nl> - BM_SparseXentDev ( 64 , 30000 , gpu , DT_FLOAT ) ; <nl> - BM_SparseXentDev ( 64 , 100000 , gpu , DT_FLOAT ) ; <nl> + BM_SparseXentDev ( 64 , 10000 , gpu ) ; <nl> + BM_SparseXentDev ( 64 , 30000 , gpu ) ; <nl> + BM_SparseXentDev ( 64 , 100000 , gpu ) ; <nl> # endif / / GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl> <nl> + / / CPU <nl> + BM_SparseXentDev ( 8 , 1000000 , cpu ) ; <nl> + <nl> + BM_SparseXentDev ( 16 , 10000 , cpu ) ; <nl> + BM_SparseXentDev ( 16 , 100000 , cpu ) ; <nl> + <nl> + BM_SparseXentDev ( 32 , 10000 , cpu ) ; <nl> + BM_SparseXentDev ( 32 , 100000 , cpu ) ; <nl> + <nl> + BM_SparseXentDev ( 64 , 10000 , cpu ) ; <nl> + BM_SparseXentDev ( 64 , 100000 , cpu ) ; <nl> + <nl> } / / end namespace tensorflow <nl> mmm a / tensorflow / core / platform / default / logging . cc <nl> ppp b / tensorflow / core / platform / default / logging . cc <nl> void TFDefaultLogSink : : Send ( const TFLogEntry & entry ) { <nl> __android_log_write ( android_log_level , " native " , ss . str ( ) . c_str ( ) ) ; <nl> <nl> / / Also log to stderr ( for standalone Android apps ) . <nl> - std : : cerr < < " native : " < < ss . str ( ) < < std : : endl ; <nl> + / / Don ' t use ' std : : cerr ' since it crashes on Android . <nl> + fprintf ( stderr , " native : % s \ n " , ss . str ( ) . c_str ( ) ) ; <nl> <nl> / / Android logging at level FATAL does not terminate execution , so abort ( ) <nl> / / is still required to stop the program . <nl> mmm a / tensorflow / core / profiler / convert / post_process_single_host_xplane . cc <nl> ppp b / tensorflow / core / profiler / convert / post_process_single_host_xplane . cc <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> namespace profiler { <nl> - <nl> - void MergeHostPlanes ( XSpace * space ) { <nl> - const XPlane * cupti_driver_api_plane = <nl> - FindPlaneWithName ( * space , kCuptiDriverApiPlaneName ) ; <nl> - const XPlane * python_tracer_plane = <nl> - FindPlaneWithName ( * space , kPythonTracerPlaneName ) ; <nl> - if ( cupti_driver_api_plane | | python_tracer_plane ) { <nl> - XPlane * host_plane = <nl> - FindOrAddMutablePlaneWithName ( space , kHostThreadsPlaneName ) ; <nl> - if ( cupti_driver_api_plane ) { <nl> - MergePlanes ( * cupti_driver_api_plane , host_plane ) ; <nl> - } <nl> - if ( python_tracer_plane ) { <nl> - MergePlanes ( * python_tracer_plane , host_plane ) ; <nl> - } <nl> - SortXLinesBy ( host_plane , XLinesComparatorByName ( ) ) ; <nl> - if ( cupti_driver_api_plane ) { <nl> - RemovePlane ( space , cupti_driver_api_plane ) ; <nl> - } <nl> - if ( python_tracer_plane ) { <nl> - RemovePlane ( space , python_tracer_plane ) ; <nl> - } <nl> + namespace { <nl> + <nl> + / / Merges XPlanes generated by TraceMe , CUPTI API trace and Python tracer . <nl> + void MergeHostPlanesAndSortLines ( XSpace * space ) { <nl> + XPlane * host_plane = <nl> + FindOrAddMutablePlaneWithName ( space , kHostThreadsPlaneName ) ; <nl> + std : : vector < const XPlane * > additional_host_planes = FindPlanesWithNames ( <nl> + * space , { kCuptiDriverApiPlaneName , kPythonTracerPlaneName } ) ; <nl> + if ( ! additional_host_planes . empty ( ) ) { <nl> + MergePlanes ( additional_host_planes , host_plane ) ; <nl> + RemovePlanes ( space , additional_host_planes ) ; <nl> } <nl> + SortXLinesBy ( host_plane , XLinesComparatorByName ( ) ) ; <nl> } <nl> <nl> + } / / namespace <nl> + <nl> void PostProcessSingleHostXSpace ( XSpace * space , uint64 start_time_ns ) { <nl> VLOG ( 3 ) < < " Post processing local profiler XSpace . " ; <nl> / / Post processing the collected XSpace without hold profiler lock . <nl> - / / 1 . Merge plane of host events with plane of CUPTI driver api . <nl> - MergeHostPlanes ( space ) ; <nl> - <nl> + / / 1 . Merge all host planes and sorts lines by name . <nl> + MergeHostPlanesAndSortLines ( space ) ; <nl> / / 2 . Normalize all timestamps by shifting timeline to profiling start time . <nl> / / NOTE : this have to be done before sorting XSpace due to timestamp overflow . <nl> NormalizeTimestamps ( space , start_time_ns ) ; <nl> mmm a / tensorflow / core / profiler / convert / post_process_single_host_xplane . h <nl> ppp b / tensorflow / core / profiler / convert / post_process_single_host_xplane . h <nl> limitations under the License . <nl> namespace tensorflow { <nl> namespace profiler { <nl> <nl> - / / Merges XPlanes generated by TraceMe , CUPTI API trace and Python tracer . <nl> - void MergeHostPlanes ( XSpace * space ) ; <nl> - <nl> / / Post process XSpaces collected locally from multiple profilers . <nl> void PostProcessSingleHostXSpace ( XSpace * space , uint64 start_time_ns ) ; <nl> <nl> mmm a / tensorflow / core / profiler / utils / xplane_utils . cc <nl> ppp b / tensorflow / core / profiler / utils / xplane_utils . cc <nl> limitations under the License . <nl> # include < vector > <nl> <nl> # include " absl / container / flat_hash_map . h " <nl> + # include " absl / container / flat_hash_set . h " <nl> # include " absl / strings / match . h " <nl> # include " absl / strings / string_view . h " <nl> # include " tensorflow / core / platform / logging . h " <nl> namespace { <nl> / / Returns the index of the first element in array for which pred is true . <nl> / / Returns - 1 if no such element is found . <nl> template < typename T , typename Pred > <nl> - int FindIf ( const protobuf : : RepeatedPtrField < T > & array , Pred & & pred ) { <nl> + int Find ( const protobuf : : RepeatedPtrField < T > & array , const Pred & pred ) { <nl> for ( int i = 0 ; i < array . size ( ) ; + + i ) { <nl> if ( pred ( & array . Get ( i ) ) ) return i ; <nl> } <nl> return - 1 ; <nl> } <nl> <nl> + / / Returns the indices of all elements in array for which pred is true . <nl> + template < typename T , typename Pred > <nl> + std : : vector < int > FindAll ( const protobuf : : RepeatedPtrField < T > & array , <nl> + const Pred & pred ) { <nl> + std : : vector < int > indices ; <nl> + for ( int i = 0 ; i < array . size ( ) ; + + i ) { <nl> + if ( pred ( & array . Get ( i ) ) ) indices . push_back ( i ) ; <nl> + } <nl> + return indices ; <nl> + } <nl> + <nl> + template < typename T > <nl> + void RemoveAt ( protobuf : : RepeatedPtrField < T > * array , <nl> + const std : : vector < int > & indices ) { <nl> + if ( indices . empty ( ) ) return ; <nl> + if ( array - > size ( ) = = indices . size ( ) ) { <nl> + / / Assumes that ' indices ' consists of [ 0 . . . N - 1 ] . <nl> + array - > Clear ( ) ; <nl> + return ; <nl> + } <nl> + auto remove_iter = indices . begin ( ) ; <nl> + int i = * ( remove_iter + + ) ; <nl> + for ( int j = i + 1 ; j < array - > size ( ) ; + + j ) { <nl> + if ( remove_iter ! = indices . end ( ) & & * remove_iter = = j ) { <nl> + + + remove_iter ; <nl> + } else { <nl> + array - > SwapElements ( j , i + + ) ; <nl> + } <nl> + } <nl> + array - > DeleteSubrange ( i , array - > size ( ) - i ) ; <nl> + } <nl> + <nl> / / Removes the given element from array . <nl> template < typename T > <nl> void Remove ( protobuf : : RepeatedPtrField < T > * array , const T * elem ) { <nl> - int i = FindIf ( * array , [ elem ] ( const T * e ) { return elem = = e ; } ) ; <nl> - if ( i = = - 1 ) return ; <nl> - for ( ; i < array - > size ( ) - 1 ; + + i ) { <nl> - array - > SwapElements ( i + 1 , i ) ; <nl> - } <nl> - array - > RemoveLast ( ) ; <nl> + int i = Find ( * array , [ elem ] ( const T * e ) { return elem = = e ; } ) ; <nl> + RemoveAt ( array , { i } ) ; <nl> } <nl> <nl> template < typename T , typename Pred > <nl> void RemoveIf ( protobuf : : RepeatedPtrField < T > * array , Pred & & pred ) { <nl> - int i = FindIf ( * array , pred ) ; <nl> - if ( i = = - 1 ) return ; <nl> - for ( int j = i + 1 ; j < array - > size ( ) ; + + j ) { <nl> - if ( ! pred ( & array - > Get ( j ) ) ) array - > SwapElements ( j , i + + ) ; <nl> - } <nl> - array - > DeleteSubrange ( i , array - > size ( ) - i ) ; <nl> + std : : vector < int > indices = FindAll ( * array , pred ) ; <nl> + RemoveAt ( array , indices ) ; <nl> } <nl> <nl> } / / namespace <nl> <nl> const XPlane * FindPlaneWithName ( const XSpace & space , absl : : string_view name ) { <nl> - int i = FindIf ( space . planes ( ) , <nl> - [ name ] ( const XPlane * plane ) { return plane - > name ( ) = = name ; } ) ; <nl> + int i = Find ( space . planes ( ) , <nl> + [ name ] ( const XPlane * plane ) { return plane - > name ( ) = = name ; } ) ; <nl> return ( i ! = - 1 ) ? & space . planes ( i ) : nullptr ; <nl> } <nl> <nl> + std : : vector < const XPlane * > FindPlanesWithNames ( <nl> + const XSpace & space , const std : : vector < absl : : string_view > & names ) { <nl> + absl : : flat_hash_set < absl : : string_view > names_set ( names . begin ( ) , names . end ( ) ) ; <nl> + std : : vector < int > indices = <nl> + FindAll ( space . planes ( ) , [ & names_set ] ( const XPlane * plane ) { <nl> + return names_set . contains ( plane - > name ( ) ) ; <nl> + } ) ; <nl> + std : : vector < const XPlane * > planes ; <nl> + planes . reserve ( indices . size ( ) ) ; <nl> + for ( int i : indices ) { <nl> + planes . push_back ( & space . planes ( i ) ) ; <nl> + } <nl> + return planes ; <nl> + } <nl> + <nl> XPlane * FindMutablePlaneWithName ( XSpace * space , absl : : string_view name ) { <nl> - int i = FindIf ( space - > planes ( ) , <nl> - [ name ] ( const XPlane * plane ) { return plane - > name ( ) = = name ; } ) ; <nl> + int i = Find ( space - > planes ( ) , <nl> + [ name ] ( const XPlane * plane ) { return plane - > name ( ) = = name ; } ) ; <nl> return ( i ! = - 1 ) ? space - > mutable_planes ( i ) : nullptr ; <nl> } <nl> <nl> std : : vector < XPlane * > FindMutablePlanesWithPrefix ( XSpace * space , <nl> } <nl> <nl> const XLine * FindLineWithId ( const XPlane & plane , int64 id ) { <nl> - int i = FindIf ( plane . lines ( ) , <nl> - [ id ] ( const XLine * line ) { return line - > id ( ) = = id ; } ) ; <nl> + int i = <nl> + Find ( plane . lines ( ) , [ id ] ( const XLine * line ) { return line - > id ( ) = = id ; } ) ; <nl> return ( i ! = - 1 ) ? & plane . lines ( i ) : nullptr ; <nl> } <nl> <nl> void RemovePlane ( XSpace * space , const XPlane * plane ) { <nl> Remove ( space - > mutable_planes ( ) , plane ) ; <nl> } <nl> <nl> + void RemovePlanes ( XSpace * space , const std : : vector < const XPlane * > & planes ) { <nl> + absl : : flat_hash_set < const XPlane * > planes_set ( planes . begin ( ) , planes . end ( ) ) ; <nl> + RemoveIf ( space - > mutable_planes ( ) , [ & planes_set ] ( const XPlane * plane ) { <nl> + return planes_set . contains ( plane ) ; <nl> + } ) ; <nl> + } <nl> + <nl> void RemoveLine ( XPlane * plane , const XLine * line ) { <nl> DCHECK ( line ! = nullptr ) ; <nl> Remove ( plane - > mutable_lines ( ) , line ) ; <nl> void MergePlanes ( const XPlane & src_plane , XPlane * dst_plane ) { <nl> } ) ; <nl> } <nl> <nl> + void MergePlanes ( const std : : vector < const XPlane * > & src_planes , <nl> + XPlane * dst_plane ) { <nl> + for ( const XPlane * src_plane : src_planes ) { <nl> + MergePlanes ( * src_plane , dst_plane ) ; <nl> + } <nl> + } <nl> + <nl> uint64 GetStartTimestampNs ( const XPlane & plane ) { <nl> int64 plane_timestamp = 0 ; <nl> for ( const auto & line : plane . lines ( ) ) { <nl> mmm a / tensorflow / core / profiler / utils / xplane_utils . h <nl> ppp b / tensorflow / core / profiler / utils / xplane_utils . h <nl> inline Timespan XEventTimespan ( const XEvent & event ) { <nl> const XPlane * FindPlaneWithName ( const XSpace & space , absl : : string_view name ) ; <nl> XPlane * FindMutablePlaneWithName ( XSpace * space , absl : : string_view name ) ; <nl> <nl> + / / Returns the planes with the given names , if found . <nl> + std : : vector < const XPlane * > FindPlanesWithNames ( <nl> + const XSpace & space , const std : : vector < absl : : string_view > & names ) ; <nl> + <nl> / / Returns the plane with the given name in the container . If necessary , adds a <nl> / / new plane to the container . <nl> XPlane * FindOrAddMutablePlaneWithName ( XSpace * space , absl : : string_view name ) ; <nl> const XLine * FindLineWithId ( const XPlane & plane , int64 id ) ; <nl> XStat * FindOrAddMutableStat ( const XStatMetadata & stat_metadata , XEvent * event ) ; <nl> <nl> void RemovePlane ( XSpace * space , const XPlane * plane ) ; <nl> + void RemovePlanes ( XSpace * space , const std : : vector < const XPlane * > & planes ) ; <nl> void RemoveLine ( XPlane * plane , const XLine * line ) ; <nl> void RemoveEvents ( XLine * line , <nl> const absl : : flat_hash_set < const XEvent * > & events ) ; <nl> std : : vector < XEvent * > GetSortedEvents ( XPlane * plane , Compare comp , <nl> void NormalizeTimestamps ( XPlane * plane , uint64 start_time_ns ) ; <nl> void NormalizeTimestamps ( XSpace * space , uint64 start_time_ns ) ; <nl> <nl> - / / Merge Xplane src_plane into Xplane dst_plane , both plane level stats , lines , <nl> - / / events and event level stats are merged ; If src_plane and dst_plane both have <nl> - / / the same line , which have different start timestamps , we will normalize the <nl> - / / events offset timestamp correspondingly . <nl> + / / Merges src_plane into dst_plane . Both plane level stats , lines , events and <nl> + / / event level stats are merged . If src_plane and dst_plane both have the same <nl> + / / line , which have different start timestamps , we will normalize the events <nl> + / / offset timestamp correspondingly . <nl> void MergePlanes ( const XPlane & src_plane , XPlane * dst_plane ) ; <nl> <nl> + / / Merges each plane with a src_planes , into the dst_plane . <nl> + void MergePlanes ( const std : : vector < const XPlane * > & src_planes , <nl> + XPlane * dst_plane ) ; <nl> + <nl> / / Plane ' s start timestamp is defined as the minimum of all lines ' start <nl> / / timestamps . If zero line exists , return 0 ; <nl> uint64 GetStartTimestampNs ( const XPlane & plane ) ; <nl> mmm a / tensorflow / core / protobuf / config . proto <nl> ppp b / tensorflow / core / protobuf / config . proto <nl> message ConfigProto { <nl> / / The XLA fusion autotuner can improve performance by executing a heuristic <nl> / / search on the compiler parameters . <nl> int64 xla_fusion_autotuner_thresh = 15 ; <nl> + <nl> + / / Whether runtime execution uses TFRT . <nl> + bool use_tfrt = 18 ; <nl> + <nl> + / / Next : 19 <nl> } <nl> <nl> Experimental experimental = 16 ; <nl> mmm a / tensorflow / core / public / version . h <nl> ppp b / tensorflow / core / public / version . h <nl> limitations under the License . <nl> <nl> # define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0 <nl> # define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0 <nl> - # define TF_GRAPH_DEF_VERSION 608 / / Updated : 2020 / 12 / 7 <nl> + # define TF_GRAPH_DEF_VERSION 609 / / Updated : 2020 / 12 / 8 <nl> <nl> / / Checkpoint compatibility versions ( the versions field in SavedSliceMeta ) . <nl> / / <nl> mmm a / tensorflow / core / tpu / tpu_on_demand_compiler . cc <nl> ppp b / tensorflow / core / tpu / tpu_on_demand_compiler . cc <nl> class TpuCompiler : public Compiler { <nl> StatusOr < std : : unique_ptr < HloModule > > RunHloPasses ( <nl> std : : unique_ptr < HloModule > module , <nl> stream_executor : : StreamExecutor * executor , <nl> - stream_executor : : DeviceMemoryAllocator * device_allocator ) override { <nl> + const CompileOptions & options ) override { <nl> XLA_HloModule hlo_module ; <nl> XLA_HloModule result ; <nl> auto cleanup = xla : : MakeCleanup ( [ & hlo_module , & result ] ( ) { <nl> class TpuCompiler : public Compiler { <nl> } ) ; <nl> hlo_module . module_config = HloModuleConfigToC ( module - > config ( ) ) ; <nl> hlo_module . proto = stream_executor : : tpu : : SerializeProto ( module - > ToProto ( ) ) ; <nl> - auto allocator = ApiConverter : : ToC ( device_allocator ) ; <nl> + auto allocator = ApiConverter : : ToC ( options . device_allocator ) ; <nl> StatusHelper status ; <nl> ExecutorApiFn ( ) - > TpuCompiler_RunHloPassesFn ( <nl> compiler_ , & hlo_module , <nl> class TpuCompiler : public Compiler { <nl> StatusOr < std : : unique_ptr < Executable > > RunBackend ( <nl> std : : unique_ptr < HloModule > module , <nl> stream_executor : : StreamExecutor * executor , <nl> - stream_executor : : DeviceMemoryAllocator * device_allocator ) override { <nl> + const CompileOptions & options ) override { <nl> XLA_HloModule hlo_module ; <nl> auto cleanup = xla : : MakeCleanup ( [ & hlo_module ] ( ) { <nl> stream_executor : : tpu : : SerializedProto_Free ( hlo_module . proto ) ; <nl> class TpuCompiler : public Compiler { <nl> SE_Executable * result ; <nl> hlo_module . module_config = HloModuleConfigToC ( module - > config ( ) ) ; <nl> hlo_module . proto = stream_executor : : tpu : : SerializeProto ( module - > ToProto ( ) ) ; <nl> - auto allocator = ApiConverter : : ToC ( device_allocator ) ; <nl> + auto allocator = ApiConverter : : ToC ( options . device_allocator ) ; <nl> <nl> StatusHelper status ; <nl> ExecutorApiFn ( ) - > TpuCompiler_RunBackendFn ( <nl> class TpuCompiler : public Compiler { <nl> StatusOr < std : : vector < std : : unique_ptr < Executable > > > Compile ( <nl> std : : unique_ptr < HloModuleGroup > module_group , <nl> std : : vector < std : : vector < stream_executor : : StreamExecutor * > > stream_exec , <nl> - stream_executor : : DeviceMemoryAllocator * device_allocator ) override { <nl> + const CompileOptions & options ) override { <nl> XLA_HloModuleGroup se_module_group ; <nl> se_module_group . proto = <nl> stream_executor : : tpu : : SerializeProto ( module_group - > ToProto ( ) ) ; <nl> class TpuCompiler : public Compiler { <nl> } <nl> } <nl> <nl> - SE_DeviceMemoryAllocator allocator = ApiConverter : : ToC ( device_allocator ) ; <nl> + SE_DeviceMemoryAllocator allocator = <nl> + ApiConverter : : ToC ( options . device_allocator ) ; <nl> <nl> SE_Executable * * se_executables = new SE_Executable * [ module_group - > size ( ) ] ; <nl> <nl> mmm a / tensorflow / go / op / wrappers . go <nl> ppp b / tensorflow / go / op / wrappers . go <nl> func ShapeN ( scope * Scope , input [ ] tf . Output , optional . . . ShapeNAttr ) ( output [ ] t <nl> return output <nl> } <nl> <nl> - / / Returns the TopK values in the array in sorted order . This is a combination <nl> + / / Returns the TopK values in the array in sorted order . <nl> / / <nl> - / / of MakeUnique and TopKUnique . The returned top - K will have its lower bits <nl> - / / replaced by iota , thus it will be close to the original value but not exactly <nl> - / / the same . The running time is proportional to the product of K and the input <nl> - / / size . NaNs are never returned . Subnormal numbers are flushed to zero . <nl> + / / This is a combination of MakeUnique and TopKUnique . The returned top - K will <nl> + / / have its lower bits replaced by iota , thus it will be close to the original <nl> + / / value but not exactly the same . The running time is proportional to the product <nl> + / / of K and the input size . NaNs are never returned . Subnormal numbers are flushed <nl> + / / to zero . <nl> func TopKWithUnique ( scope * Scope , input tf . Output , k int64 ) ( topk tf . Output , topk_indices tf . Output ) { <nl> if scope . Err ( ) ! = nil { <nl> return <nl> func QuantizedAvgPool ( scope * Scope , input tf . Output , min_input tf . Output , max_in <nl> return op . Output ( 0 ) , op . Output ( 1 ) , op . Output ( 2 ) <nl> } <nl> <nl> - / / Adds v into specified rows of x . <nl> + / / Adds v into specified rows of x . <nl> / / <nl> / / Computes y = x ; y [ i , : ] + = v ; return y . <nl> / / <nl> func TPUReplicateMetadata ( scope * Scope , num_replicas int64 , optional . . . TPURepli <nl> return scope . AddOperation ( opspec ) <nl> } <nl> <nl> - / / Returns the TopK unique values in the array in sorted order . The <nl> + / / Returns the TopK unique values in the array in sorted order . <nl> / / <nl> - / / running time is proportional to the product of K and the input <nl> + / / The running time is proportional to the product of K and the input <nl> / / size . Sorting the whole array is more efficient for sufficiently large <nl> / / values of K . The median - of - medians algorithm is probably faster , but <nl> / / difficult to implement efficiently in XLA . If there are fewer than K <nl> mmm a / tensorflow / go / tensor . go <nl> ppp b / tensorflow / go / tensor . go <nl> func ( t * Tensor ) DataType ( ) DataType { return DataType ( C . TF_TensorType ( t . c ) ) } <nl> func ( t * Tensor ) Shape ( ) [ ] int64 { return t . shape } <nl> <nl> / / Reshape updates tensor ' s shape in place if this is possible or returns an error otherwise . <nl> - func ( t * Tensor ) Reshape ( new_shape [ ] int64 ) error { <nl> - old_shape_size : = numElements ( t . shape ) <nl> - new_shape_size : = numElements ( new_shape ) <nl> + func ( t * Tensor ) Reshape ( newShape [ ] int64 ) error { <nl> + oldShapeSize : = numElements ( t . shape ) <nl> + newShapeSize : = numElements ( newShape ) <nl> <nl> - if old_shape_size ! = new_shape_size { <nl> - return fmt . Errorf ( " unable to convert shape % v ( num_elements : % d ) into shape % v ( num_elements : % d ) " , t . shape , old_shape_size , new_shape , new_shape_size ) <nl> + if oldShapeSize ! = newShapeSize { <nl> + return fmt . Errorf ( " unable to convert shape % v ( num_elements : % d ) into shape % v ( num_elements : % d ) " , t . shape , oldShapeSize , newShape , newShapeSize ) <nl> } <nl> <nl> - if len ( new_shape ) = = 0 { <nl> + if len ( newShape ) = = 0 { <nl> return nil <nl> } <nl> <nl> var shapePtr * C . int64_t <nl> - shapePtr = ( * C . int64_t ) ( unsafe . Pointer ( & new_shape [ 0 ] ) ) <nl> + shapePtr = ( * C . int64_t ) ( unsafe . Pointer ( & newShape [ 0 ] ) ) <nl> <nl> status : = newStatus ( ) <nl> - C . TF_TensorBitcastFrom ( t . c , C . TF_TensorType ( t . c ) , t . c , shapePtr , C . int ( len ( new_shape ) ) , status . c ) <nl> + C . TF_TensorBitcastFrom ( t . c , C . TF_TensorType ( t . c ) , t . c , shapePtr , C . int ( len ( newShape ) ) , status . c ) <nl> <nl> - return status . Err ( ) <nl> + if err : = status . Err ( ) ; err ! = nil { <nl> + return err <nl> + } <nl> + t . shape = newShape <nl> + return nil <nl> } <nl> <nl> / / Value converts the Tensor to a Go value . For now , not all Tensor types are <nl> mmm a / tensorflow / go / tensor_test . go <nl> ppp b / tensorflow / go / tensor_test . go <nl> func BenchmarkTensor ( b * testing . B ) { <nl> } ) <nl> <nl> } <nl> + <nl> + func TestReshape ( t * testing . T ) { <nl> + tensor , err : = NewTensor ( [ ] int64 { 1 , 2 } ) <nl> + if err ! = nil { <nl> + t . Fatalf ( " Unable to create new tensor : % v " , err ) <nl> + } <nl> + <nl> + if got , want : = len ( tensor . Shape ( ) ) , 1 ; got ! = want { <nl> + t . Fatalf ( " len ( tensor . Shape ( ) ) : got % d , want % d " , got , want ) <nl> + } <nl> + if got , want : = tensor . Shape ( ) [ 0 ] , int64 ( 2 ) ; got ! = want { <nl> + t . Errorf ( " tensor . Shape ( ) [ 0 ] : got % d , want % d " , got , want ) <nl> + } <nl> + <nl> + if err : = tensor . Reshape ( [ ] int64 { 1 , 2 } ) ; err ! = nil { <nl> + t . Fatalf ( " tensor . Reshape ( [ 1 , 2 ] ) failed : % v " , err ) <nl> + } <nl> + <nl> + if got , want : = len ( tensor . Shape ( ) ) , 2 ; got ! = want { <nl> + t . Fatalf ( " After reshape , len ( tensor . Shape ( ) ) : got % d , want % d " , got , want ) <nl> + } <nl> + if got , want : = tensor . Shape ( ) [ 0 ] , int64 ( 1 ) ; got ! = want { <nl> + t . Errorf ( " After reshape , tensor . Shape ( ) [ 0 ] : got % d , want % d " , got , want ) <nl> + } <nl> + if got , want : = tensor . Shape ( ) [ 1 ] , int64 ( 2 ) ; got ! = want { <nl> + t . Errorf ( " After reshape , tensor . Shape ( ) [ 1 ] : got % d , want % d " , got , want ) <nl> + } <nl> + } <nl> mmm a / tensorflow / lite / BUILD <nl> ppp b / tensorflow / lite / BUILD <nl> exports_files ( glob ( [ <nl> " testdata / * . tflite " , <nl> " testdata / * . csv " , <nl> " models / testdata / * " , <nl> - ] ) ) <nl> + ] ) + [ <nl> + " create_op_resolver . h " , <nl> + ] ) <nl> <nl> config_setting ( <nl> name = " gemmlowp_profiling " , <nl> cc_library ( <nl> ] , <nl> ) <nl> <nl> + # Defines CreateOpResolver with all builtin ops . <nl> + cc_library ( <nl> + name = " create_op_resolver_with_builtin_ops " , <nl> + srcs = [ " create_op_resolver_with_builtin_ops . cc " ] , <nl> + hdrs = [ " create_op_resolver . h " ] , <nl> + copts = tflite_copts ( ) , <nl> + deps = [ <nl> + " / / tensorflow / lite : op_resolver " , <nl> + " / / tensorflow / lite / core / api " , <nl> + " / / tensorflow / lite / kernels : builtin_ops " , <nl> + ] , <nl> + ) <nl> + <nl> + # This target is created for tflite_custom_cc_library build rule . It requires <nl> + # the header file generated from gen_selected_ops so should not be depended on <nl> + # directly . <nl> + # TODO ( b / 174972014 ) : Generate this target to give RegisterSelectedOps a custom namespace . <nl> + cc_library ( <nl> + name = " create_op_resolver_with_selected_ops " , <nl> + srcs = [ " create_op_resolver_with_selected_ops . cc " ] , <nl> + hdrs = [ " create_op_resolver . h " ] , <nl> + copts = tflite_copts ( ) , <nl> + deps = [ <nl> + " / / tensorflow / lite : mutable_op_resolver " , <nl> + " / / tensorflow / lite : op_resolver " , <nl> + ] , <nl> + ) <nl> + <nl> cc_test ( <nl> name = " util_test " , <nl> size = " small " , <nl> mmm a / tensorflow / lite / CMakeLists . txt <nl> ppp b / tensorflow / lite / CMakeLists . txt <nl> if ( TFLITE_ENABLE_GPU ) <nl> FILTER " ( _test ) \ \ . ( cc | h ) $ " <nl> ) <nl> populate_tflite_source_vars ( <nl> - " delegates / gpu / cl / kernels / special " <nl> - TFLITE_DELEGATES_GPU_CL_KERNELS_SPECIAL_SRCS <nl> + " delegates / gpu / common / default " TFLITE_DELEGATES_GPU_COMMON_DEFAULT_SRCS <nl> FILTER " ( _test ) \ \ . ( cc | h ) $ " <nl> ) <nl> populate_tflite_source_vars ( <nl> - " delegates / gpu / cl / selectors " TFLITE_DELEGATES_GPU_CL_SELECTORS_SRCS <nl> + " delegates / gpu / common / memory_management " <nl> + TFLITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_SRCS <nl> FILTER " ( _test ) \ \ . ( cc | h ) $ " <nl> ) <nl> populate_tflite_source_vars ( <nl> - " delegates / gpu / cl / selectors / default " TFLITE_DELEGATES_GPU_CL_SELECTORS_DEFAULT_SRCS <nl> + " delegates / gpu / common / selectors " TFLITE_DELEGATES_GPU_COMMON_SELECTORS_SRCS <nl> FILTER " ( _test ) \ \ . ( cc | h ) $ " <nl> ) <nl> populate_tflite_source_vars ( <nl> - " delegates / gpu / common " TFLITE_DELEGATES_GPU_COMMON_SRCS <nl> + " delegates / gpu / common / selectors / default " TFLITE_DELEGATES_GPU_COMMON_SELECTORS_DEFAULT_SRCS <nl> FILTER " ( _test ) \ \ . ( cc | h ) $ " <nl> ) <nl> populate_tflite_source_vars ( <nl> - " delegates / gpu / common / default " TFLITE_DELEGATES_GPU_COMMON_DEFAULT_SRCS <nl> - FILTER " ( _test ) \ \ . ( cc | h ) $ " <nl> - ) <nl> - populate_tflite_source_vars ( <nl> - " delegates / gpu / common / memory_management " <nl> - TFLITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_SRCS <nl> + " delegates / gpu / common " TFLITE_DELEGATES_GPU_COMMON_SRCS <nl> FILTER " ( _test ) \ \ . ( cc | h ) $ " <nl> ) <nl> populate_tflite_source_vars ( <nl> if ( TFLITE_ENABLE_GPU ) <nl> $ { TFLITE_SOURCE_DIR } / delegates / gpu / delegate . cc <nl> $ { TFLITE_DELEGATES_GPU_CL_SRCS } <nl> $ { TFLITE_DELEGATES_GPU_CL_KERNELS_SRCS } <nl> - $ { TFLITE_DELEGATES_GPU_CL_KERNELS_SPECIAL_SRCS } <nl> - $ { TFLITE_DELEGATES_GPU_CL_SELECTORS_SRCS } <nl> - $ { TFLITE_DELEGATES_GPU_CL_SELECTORS_DEFAULT_SRCS } <nl> - $ { TFLITE_DELEGATES_GPU_COMMON_SRCS } <nl> $ { TFLITE_DELEGATES_GPU_COMMON_DEFAULT_SRCS } <nl> $ { TFLITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_SRCS } <nl> + $ { TFLITE_DELEGATES_GPU_COMMON_SELECTORS_SRCS } <nl> + $ { TFLITE_DELEGATES_GPU_COMMON_SELECTORS_DEFAULT_SRCS } <nl> + $ { TFLITE_DELEGATES_GPU_COMMON_SRCS } <nl> $ { TFLITE_DELEGATES_GPU_COMMON_TASK_SRCS } <nl> $ { TFLITE_DELEGATES_GPU_COMMON_TASKS_SRCS } <nl> $ { TFLITE_DELEGATES_GPU_COMMON_TASKS_SPECIAL_SRCS } <nl> mmm a / tensorflow / lite / build_def . bzl <nl> ppp b / tensorflow / lite / build_def . bzl <nl> def tflite_custom_cc_library ( <nl> model = models , <nl> ) <nl> real_srcs . append ( " : % s_registration " % name ) <nl> - real_deps . append ( " / / tensorflow / lite / java / src / main / native : selected_ops_jni " ) <nl> + real_deps . append ( " / / tensorflow / lite : create_op_resolver_with_selected_ops " ) <nl> else : <nl> # Support all operators if ` models ` not specified . <nl> real_deps . append ( " / / tensorflow / lite / java / src / main / native " ) <nl> def tflite_custom_cc_library ( <nl> srcs = real_srcs , <nl> hdrs = [ <nl> # TODO ( b / 161323860 ) replace this by generated header . <nl> - " / / tensorflow / lite / java / src / main / native : op_resolver . h " , <nl> + " / / tensorflow / lite : create_op_resolver . h " , <nl> ] , <nl> copts = tflite_copts ( ) , <nl> linkopts = select ( { <nl> mmm a / tensorflow / lite / core / api / flatbuffer_conversions . cc <nl> ppp b / tensorflow / lite / core / api / flatbuffer_conversions . cc <nl> TfLiteStatus ParseOpDataTfLite ( const Operator * op , BuiltinOperator op_type , <nl> return ParseDequantize ( op , error_reporter , allocator , builtin_data ) ; <nl> } <nl> <nl> + case BuiltinOperator_EXP : { <nl> + return ParseExp ( op , error_reporter , allocator , builtin_data ) ; <nl> + } <nl> + <nl> case BuiltinOperator_FILL : { <nl> return ParseFill ( op , error_reporter , allocator , builtin_data ) ; <nl> } <nl> TfLiteStatus ParseOpDataTfLite ( const Operator * op , BuiltinOperator op_type , <nl> case BuiltinOperator_ELU : <nl> case BuiltinOperator_EMBEDDING_LOOKUP : <nl> case BuiltinOperator_EQUAL : <nl> - case BuiltinOperator_EXP : <nl> case BuiltinOperator_EXPAND_DIMS : <nl> case BuiltinOperator_LOG_SOFTMAX : <nl> case BuiltinOperator_MATRIX_DIAG : <nl> TfLiteStatus ParseEqual ( const Operator * , ErrorReporter * , BuiltinDataAllocator * , <nl> return kTfLiteOk ; <nl> } <nl> <nl> + / / We have this parse function instead of directly returning kTfLiteOk from the <nl> + / / switch - case in ParseOpData because this function is used as part of the <nl> + / / selective registration for the OpResolver implementation in micro . <nl> + TfLiteStatus ParseExp ( const Operator * , ErrorReporter * , BuiltinDataAllocator * , <nl> + void * * ) { <nl> + return kTfLiteOk ; <nl> + } <nl> + <nl> / / We have this parse function instead of directly returning kTfLiteOk from the <nl> / / switch - case in ParseOpData because this function is used as part of the <nl> / / selective registration for the OpResolver implementation in micro . <nl> mmm a / tensorflow / lite / core / api / flatbuffer_conversions . h <nl> ppp b / tensorflow / lite / core / api / flatbuffer_conversions . h <nl> TfLiteStatus ParseDequantize ( const Operator * op , ErrorReporter * error_reporter , <nl> TfLiteStatus ParseEqual ( const Operator * op , ErrorReporter * error_reporter , <nl> BuiltinDataAllocator * allocator , void * * builtin_data ) ; <nl> <nl> + TfLiteStatus ParseExp ( const Operator * op , ErrorReporter * error_reporter , <nl> + BuiltinDataAllocator * allocator , void * * builtin_data ) ; <nl> + <nl> TfLiteStatus ParseFill ( const Operator * op , ErrorReporter * error_reporter , <nl> BuiltinDataAllocator * allocator , void * * builtin_data ) ; <nl> <nl> similarity index 81 % <nl> rename from tensorflow / lite / java / src / main / native / op_resolver . h <nl> rename to tensorflow / lite / create_op_resolver . h <nl> mmm a / tensorflow / lite / java / src / main / native / op_resolver . h <nl> ppp b / tensorflow / lite / create_op_resolver . h <nl> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - # ifndef TENSORFLOW_LITE_JAVA_SRC_MAIN_NATIVE_OP_RESOLVER_H_ <nl> - # define TENSORFLOW_LITE_JAVA_SRC_MAIN_NATIVE_OP_RESOLVER_H_ <nl> + # ifndef TENSORFLOW_LITE_CREATE_OP_RESOLVER_H_ <nl> + # define TENSORFLOW_LITE_CREATE_OP_RESOLVER_H_ <nl> <nl> # include < memory > <nl> <nl> std : : unique_ptr < OpResolver > CreateOpResolver ( ) ; <nl> <nl> } <nl> <nl> - # endif / / TENSORFLOW_LITE_JAVA_SRC_MAIN_NATIVE_OP_RESOLVER_H_ <nl> + # endif / / TENSORFLOW_LITE_CREATE_OP_RESOLVER_H_ <nl> similarity index 68 % <nl> rename from tensorflow / lite / java / src / main / native / builtin_ops_jni . cc <nl> rename to tensorflow / lite / create_op_resolver_with_builtin_ops . cc <nl> mmm a / tensorflow / lite / java / src / main / native / builtin_ops_jni . cc <nl> ppp b / tensorflow / lite / create_op_resolver_with_builtin_ops . cc <nl> limitations under the License . <nl> <nl> # include < memory > <nl> <nl> - # include " tensorflow / lite / core / api / op_resolver . h " <nl> + # include " tensorflow / lite / create_op_resolver . h " <nl> # include " tensorflow / lite / kernels / register . h " <nl> <nl> namespace tflite { <nl> <nl> - / / The JNI code in interpreter_jni . cc expects a CreateOpResolver ( ) function in <nl> - / / the tflite namespace . This one instantiates a <nl> - / / BuiltinOpResolverWithoutDefaultDelegates , with all the builtin ops but <nl> - / / without applying any TfLite delegates by default ( like the XNNPACK delegate ) . <nl> - / / For smaller binary sizes users should avoid linking this in , and should <nl> - / / provide a custom make CreateOpResolver ( ) instead . <nl> + / / This function instantiates a BuiltinOpResolverWithoutDefaultDelegates , with <nl> + / / all the builtin ops but without applying any TfLite delegates by default <nl> + / / ( like the XNNPACK delegate ) . For smaller binary sizes users should avoid <nl> + / / linking this in , and should provide a CreateOpResolver ( ) with selected ops <nl> + / / instead . <nl> std : : unique_ptr < OpResolver > CreateOpResolver ( ) { / / NOLINT <nl> return std : : unique_ptr < tflite : : ops : : builtin : : BuiltinOpResolver > ( <nl> new tflite : : ops : : builtin : : BuiltinOpResolverWithoutDefaultDelegates ( ) ) ; <nl> similarity index 91 % <nl> rename from tensorflow / lite / java / src / main / native / selected_ops_jni . cc <nl> rename to tensorflow / lite / create_op_resolver_with_selected_ops . cc <nl> mmm a / tensorflow / lite / java / src / main / native / selected_ops_jni . cc <nl> ppp b / tensorflow / lite / create_op_resolver_with_selected_ops . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / lite / java / src / main / native / op_resolver . h " <nl> + # include " tensorflow / lite / create_op_resolver . h " <nl> # include " tensorflow / lite / mutable_op_resolver . h " <nl> <nl> / / This method is generated by ` gen_selected_ops ` . <nl> - / / TODO ( b / 153652701 ) : Instead of relying on a global method , make <nl> + / / TODO ( b / 174972014 ) : Instead of relying on a global method , make <nl> / / ` gen_selected_ops ` generating a header file with custom namespace . <nl> void RegisterSelectedOps ( : : tflite : : MutableOpResolver * resolver ) ; <nl> <nl> mmm a / tensorflow / lite / delegates / gpu / BUILD <nl> ppp b / tensorflow / lite / delegates / gpu / BUILD <nl> objc_library ( <nl> " / / tensorflow / lite / delegates / gpu / common : model " , <nl> " / / tensorflow / lite / delegates / gpu / common : model_builder " , <nl> " / / tensorflow / lite / delegates / gpu / common : model_transformer " , <nl> + " / / tensorflow / lite / delegates / gpu / common : precision " , <nl> " / / tensorflow / lite / delegates / gpu / common : quantization_util " , <nl> " / / tensorflow / lite / delegates / gpu / common : shape " , <nl> " / / tensorflow / lite / delegates / gpu / common : status " , <nl> mmm a / tensorflow / lite / delegates / gpu / cl / BUILD <nl> ppp b / tensorflow / lite / delegates / gpu / cl / BUILD <nl> cc_library ( <nl> " : opencl_wrapper " , <nl> " : serialization_cc_fbs " , <nl> " : tensor " , <nl> - " / / tensorflow / lite / delegates / gpu / cl / selectors : operation_selector " , <nl> - " / / tensorflow / lite / delegates / gpu / cl / selectors : special_selector " , <nl> " / / tensorflow / lite / delegates / gpu / common : data_type " , <nl> " / / tensorflow / lite / delegates / gpu / common : memory_management " , <nl> " / / tensorflow / lite / delegates / gpu / common : model " , <nl> cc_library ( <nl> " / / tensorflow / lite / delegates / gpu / common : tensor " , <nl> " / / tensorflow / lite / delegates / gpu / common : types " , <nl> " / / tensorflow / lite / delegates / gpu / common : util " , <nl> + " / / tensorflow / lite / delegates / gpu / common / selectors : operation_selector " , <nl> + " / / tensorflow / lite / delegates / gpu / common / selectors : special_selector " , <nl> " / / tensorflow / lite / delegates / gpu / common / task : arguments " , <nl> " / / tensorflow / lite / delegates / gpu / common / task : buffer_desc " , <nl> " / / tensorflow / lite / delegates / gpu / common / task : gpu_object_desc " , <nl> mmm a / tensorflow / lite / delegates / gpu / cl / inference_context . cc <nl> ppp b / tensorflow / lite / delegates / gpu / cl / inference_context . cc <nl> limitations under the License . <nl> # include " absl / container / flat_hash_set . h " <nl> # include " tensorflow / lite / delegates / gpu / cl / buffer . h " <nl> # include " tensorflow / lite / delegates / gpu / cl / cl_device . h " <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / operation_selector . h " <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / special_selector . h " <nl> # include " tensorflow / lite / delegates / gpu / common / data_type . h " <nl> # include " tensorflow / lite / delegates / gpu / common / memory_management . h " <nl> # include " tensorflow / lite / delegates / gpu / common / model . h " <nl> # include " tensorflow / lite / delegates / gpu / common / model_transformer . h " <nl> # include " tensorflow / lite / delegates / gpu / common / operations . h " <nl> # include " tensorflow / lite / delegates / gpu / common / precision . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / operation_selector . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / special_selector . h " <nl> # include " tensorflow / lite / delegates / gpu / common / shape . h " <nl> # include " tensorflow / lite / delegates / gpu / common / task / gpu_operation . h " <nl> # include " tensorflow / lite / delegates / gpu / common / task / storage_type_util . h " <nl> mmm a / tensorflow / lite / delegates / gpu / common / model_builder . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / model_builder . cc <nl> class ReduceOperationParser : public TFLiteOperationParser { <nl> ReduceAttributes attr ; <nl> Tensor < Linear , DataType : : INT32 > axes ; <nl> RETURN_IF_ERROR ( reader - > ReadTensor ( 1 , & axes ) ) ; <nl> - const TfLiteTensor * output = reader - > GetOutputTensor ( 0 ) ; <nl> + const TfLiteTensor * input = reader - > GetInputTensor ( 0 ) ; <nl> for ( int i = 0 ; i < axes . data . size ( ) ; i + + ) { <nl> Axis axis ; <nl> - RETURN_IF_ERROR ( ExtractAxisFromIndex ( * output , axes . data [ i ] , & axis ) ) ; <nl> + RETURN_IF_ERROR ( ExtractAxisFromIndex ( * input , axes . data [ i ] , & axis ) ) ; <nl> attr . dims . insert ( axis ) ; <nl> } <nl> node - > operation . attributes = attr ; <nl> class MeanOperationParser : public TFLiteOperationParser { <nl> MeanAttributes attr ; <nl> Tensor < Linear , DataType : : INT32 > axes ; <nl> RETURN_IF_ERROR ( reader - > ReadTensor ( 1 , & axes ) ) ; <nl> - const TfLiteTensor * output = reader - > GetOutputTensor ( 0 ) ; <nl> + const TfLiteTensor * input = reader - > GetInputTensor ( 0 ) ; <nl> for ( int i = 0 ; i < axes . data . size ( ) ; i + + ) { <nl> Axis axis ; <nl> - RETURN_IF_ERROR ( ExtractAxisFromIndex ( * output , axes . data [ i ] , & axis ) ) ; <nl> + RETURN_IF_ERROR ( ExtractAxisFromIndex ( * input , axes . data [ i ] , & axis ) ) ; <nl> attr . dims . insert ( axis ) ; <nl> } <nl> node - > operation . attributes = attr ; <nl> similarity index 89 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / BUILD <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / BUILD <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / BUILD <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / BUILD <nl> cc_library ( <nl> name = " convolution_selector " , <nl> hdrs = [ " convolution_selector . h " ] , <nl> deps = [ <nl> - " / / tensorflow / lite / delegates / gpu / cl / selectors / default : convolution_selector " , # buildcleaner : keep <nl> " / / tensorflow / lite / delegates / gpu / common : model_hints " , <nl> " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> " / / tensorflow / lite / delegates / gpu / common : shape " , <nl> " / / tensorflow / lite / delegates / gpu / common : status " , <nl> + " / / tensorflow / lite / delegates / gpu / common / selectors / default : convolution_selector " , # buildcleaner : keep <nl> " / / tensorflow / lite / delegates / gpu / common / task : gpu_operation " , <nl> " / / tensorflow / lite / delegates / gpu / common / task : weights_layout " , <nl> ] , <nl> cc_library ( <nl> name = " convolution_transposed_selector " , <nl> hdrs = [ " convolution_transposed_selector . h " ] , <nl> deps = [ <nl> - " / / tensorflow / lite / delegates / gpu / cl / selectors / default : convolution_transposed_selector " , # buildcleaner : keep <nl> " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> " / / tensorflow / lite / delegates / gpu / common : status " , <nl> + " / / tensorflow / lite / delegates / gpu / common / selectors / default : convolution_transposed_selector " , # buildcleaner : keep <nl> " / / tensorflow / lite / delegates / gpu / common / task : gpu_operation " , <nl> " / / tensorflow / lite / delegates / gpu / common / task : weights_layout " , <nl> " @ com_google_absl / / absl / memory " , <nl> cc_library ( <nl> hdrs = [ " default_selector . h " ] , <nl> deps = [ <nl> " : subgraph " , <nl> - " / / tensorflow / lite / delegates / gpu / cl / selectors / default : default_selector " , # buildcleaner : keep <nl> " / / tensorflow / lite / delegates / gpu / common : model " , <nl> " / / tensorflow / lite / delegates / gpu / common : model_hints " , <nl> " / / tensorflow / lite / delegates / gpu / common : status " , <nl> + " / / tensorflow / lite / delegates / gpu / common / selectors / default : default_selector " , # buildcleaner : keep <nl> " / / tensorflow / lite / delegates / gpu / common / task : gpu_operation " , <nl> " / / tensorflow / lite / delegates / gpu / common / task : tensor_desc " , <nl> ] , <nl> cc_library ( <nl> name = " dw_convolution_selector " , <nl> hdrs = [ " dw_convolution_selector . h " ] , <nl> deps = [ <nl> - " / / tensorflow / lite / delegates / gpu / cl / selectors / default : dw_convolution_selector " , # buildcleaner : keep <nl> " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> " / / tensorflow / lite / delegates / gpu / common : status " , <nl> + " / / tensorflow / lite / delegates / gpu / common / selectors / default : dw_convolution_selector " , # buildcleaner : keep <nl> " / / tensorflow / lite / delegates / gpu / common / task : gpu_operation " , <nl> " @ com_google_absl / / absl / memory " , <nl> ] , <nl> cc_library ( <nl> name = " fully_connected_selector " , <nl> hdrs = [ " fully_connected_selector . h " ] , <nl> deps = [ <nl> - " / / tensorflow / lite / delegates / gpu / cl / selectors / default : fully_connected_selector " , # buildcleaner : keep <nl> " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> " / / tensorflow / lite / delegates / gpu / common : status " , <nl> + " / / tensorflow / lite / delegates / gpu / common / selectors / default : fully_connected_selector " , # buildcleaner : keep <nl> " / / tensorflow / lite / delegates / gpu / common / task : gpu_operation " , <nl> " @ com_google_absl / / absl / memory " , <nl> ] , <nl> cc_library ( <nl> " : fully_connected_selector " , <nl> " : simple_selectors " , <nl> " : subgraph " , <nl> - " / / tensorflow / lite / delegates / gpu / cl : cl_device " , <nl> " / / tensorflow / lite / delegates / gpu / common : data_type " , <nl> + " / / tensorflow / lite / delegates / gpu / common : gpu_info " , <nl> " / / tensorflow / lite / delegates / gpu / common : model " , <nl> " / / tensorflow / lite / delegates / gpu / common : model_hints " , <nl> " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> cc_library ( <nl> srcs = [ " simple_selectors . cc " ] , <nl> hdrs = [ " simple_selectors . h " ] , <nl> deps = [ <nl> - " / / tensorflow / lite / delegates / gpu / cl : cl_device " , <nl> + " / / tensorflow / lite / delegates / gpu / common : gpu_info " , <nl> " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> " / / tensorflow / lite / delegates / gpu / common : shape " , <nl> " / / tensorflow / lite / delegates / gpu / common : status " , <nl> cc_library ( <nl> hdrs = [ " special_selector . h " ] , <nl> deps = [ <nl> " : subgraph " , <nl> - " / / tensorflow / lite / delegates / gpu / cl : cl_device " , <nl> " / / tensorflow / lite / delegates / gpu / common : data_type " , <nl> + " / / tensorflow / lite / delegates / gpu / common : gpu_info " , <nl> " / / tensorflow / lite / delegates / gpu / common : model " , <nl> " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> " / / tensorflow / lite / delegates / gpu / common : shape " , <nl> similarity index 88 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / convolution_selector . h <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / convolution_selector . h <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / convolution_selector . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / convolution_selector . h <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # ifndef TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_CONVOLUTION_SELECTOR_H_ <nl> - # define TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_CONVOLUTION_SELECTOR_H_ <nl> + # ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_CONVOLUTION_SELECTOR_H_ <nl> + # define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_CONVOLUTION_SELECTOR_H_ <nl> <nl> # include < memory > <nl> <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> <nl> std : : unique_ptr < GPUOperation > SelectConvolution ( <nl> const Convolution2DAttributes & attr , const BHWC & dst_shape , <nl> std : : unique_ptr < GPUOperation > SelectConverterToConvWeights ( <nl> const WeightsDescription & weights_desc , const OperationDef & op_def , <nl> ModelHints hints ) ; <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> <nl> - # endif / / TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_CONVOLUTION_SELECTOR_H_ <nl> + # endif / / TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_CONVOLUTION_SELECTOR_H_ <nl> similarity index 82 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / convolution_transposed_selector . h <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / convolution_transposed_selector . h <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / convolution_transposed_selector . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / convolution_transposed_selector . h <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # ifndef TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_CONVOLUTION_TRANSPOSED_SELECTOR_H_ <nl> - # define TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_CONVOLUTION_TRANSPOSED_SELECTOR_H_ <nl> + # ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_CONVOLUTION_TRANSPOSED_SELECTOR_H_ <nl> + # define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_CONVOLUTION_TRANSPOSED_SELECTOR_H_ <nl> <nl> # include < memory > <nl> <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> <nl> std : : unique_ptr < GPUOperation > SelectConvolutionTransposed ( <nl> const ConvolutionTransposedAttributes & attr , const GpuInfo & gpu_info , <nl> std : : unique_ptr < GPUOperation > SelectConvolutionTransposedWithDynamicWeights ( <nl> const ConvolutionTransposedAttributes & attr , const GpuInfo & gpu_info , <nl> const OperationDef & op_def , WeightsDescription * weights_desc ) ; <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> <nl> - # endif / / TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_CONVOLUTION_TRANSPOSED_SELECTOR_H_ <nl> + # endif / / TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_CONVOLUTION_TRANSPOSED_SELECTOR_H_ <nl> similarity index 96 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / default / BUILD <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / default / BUILD <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / default / BUILD <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / default / BUILD <nl> cc_library ( <nl> name = " default_selector " , <nl> srcs = [ " default_selector . cc " ] , <nl> deps = [ <nl> - " / / tensorflow / lite / delegates / gpu / cl / selectors : subgraph " , <nl> " / / tensorflow / lite / delegates / gpu / common : model " , <nl> " / / tensorflow / lite / delegates / gpu / common : model_hints " , <nl> " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> " / / tensorflow / lite / delegates / gpu / common : status " , <nl> + " / / tensorflow / lite / delegates / gpu / common / selectors : subgraph " , <nl> " / / tensorflow / lite / delegates / gpu / common / task : gpu_operation " , <nl> " @ com_google_absl / / absl / strings " , <nl> ] , <nl> cc_library ( <nl> name = " dw_convolution_selector " , <nl> srcs = [ " dw_convolution_selector . cc " ] , <nl> deps = [ <nl> - " / / tensorflow / lite / delegates / gpu / cl : cl_device " , <nl> + " / / tensorflow / lite / delegates / gpu / common : gpu_info " , <nl> " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> " / / tensorflow / lite / delegates / gpu / common : status " , <nl> " / / tensorflow / lite / delegates / gpu / common / task : gpu_operation " , <nl> similarity index 99 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / default / convolution_selector . cc <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / default / convolution_selector . cc <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / default / convolution_selector . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / default / convolution_selector . cc <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> namespace { <nl> <nl> std : : unique_ptr < GPUOperation > SelectConvolutionAdreno ( <nl> std : : unique_ptr < GPUOperation > SelectConverterToConvWeights ( <nl> return absl : : make_unique < ConverterToConvWeights > ( std : : move ( converter ) ) ; <nl> } <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> similarity index 99 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / default / convolution_transposed_selector . cc <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / default / convolution_transposed_selector . cc <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / default / convolution_transposed_selector . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / default / convolution_transposed_selector . cc <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> namespace { <nl> <nl> std : : unique_ptr < GPUOperation > SelectConvolutionTransposedAdreno ( <nl> std : : unique_ptr < GPUOperation > SelectConvolutionTransposedWithDynamicWeights ( <nl> } <nl> } <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> similarity index 93 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / default / default_selector . cc <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / default / default_selector . cc <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / default / default_selector . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / default / default_selector . cc <nl> limitations under the License . <nl> # include < memory > <nl> <nl> # include " absl / strings / str_cat . h " <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / subgraph . h " <nl> # include " tensorflow / lite / delegates / gpu / common / model . h " <nl> # include " tensorflow / lite / delegates / gpu / common / model_hints . h " <nl> # include " tensorflow / lite / delegates / gpu / common / operations . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / subgraph . h " <nl> # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> # include " tensorflow / lite / delegates / gpu / common / task / gpu_operation . h " <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> <nl> absl : : Status SelectDefault ( const GpuInfo & gpu_info , const OperationDef & op_def , <nl> ModelHints hints , const std : : vector < Value * > & inputs , <nl> absl : : Status SelectDefault ( const GpuInfo & gpu_info , const OperationDef & op_def , <nl> absl : : StrCat ( " No selector for " , node . operation . type ) ) ; <nl> } <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> similarity index 97 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / default / dw_convolution_selector . cc <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / default / dw_convolution_selector . cc <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / default / dw_convolution_selector . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / default / dw_convolution_selector . cc <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> # include " absl / memory / memory . h " <nl> - # include " tensorflow / lite / delegates / gpu / cl / cl_device . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / gpu_info . h " <nl> # include " tensorflow / lite / delegates / gpu / common / tasks / depthwise_conv . h " <nl> # include " tensorflow / lite / delegates / gpu / common / tasks / depthwise_conv_3x3 . h " <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> namespace { <nl> <nl> std : : unique_ptr < GPUOperation > SelectDWConvolutionAdreno ( <nl> std : : unique_ptr < GPUOperation > SelectDWConvolution ( <nl> } <nl> } <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> similarity index 99 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / default / fully_connected_selector . cc <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / default / fully_connected_selector . cc <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / default / fully_connected_selector . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / default / fully_connected_selector . cc <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> <nl> std : : unique_ptr < GPUOperation > SelectFullyConnectedGeneric ( <nl> const FullyConnectedAttributes & attr , const GpuInfo & gpu_info , <nl> std : : unique_ptr < GPUOperation > SelectFullyConnected ( <nl> } <nl> } <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> similarity index 81 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / default_selector . h <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / default_selector . h <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / default_selector . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / default_selector . h <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # ifndef TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_DEFAULT_SELECTOR_H_ <nl> - # define TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_DEFAULT_SELECTOR_H_ <nl> + # ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_DEFAULT_SELECTOR_H_ <nl> + # define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_DEFAULT_SELECTOR_H_ <nl> <nl> # include < memory > <nl> <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / subgraph . h " <nl> # include " tensorflow / lite / delegates / gpu / common / model . h " <nl> # include " tensorflow / lite / delegates / gpu / common / model_hints . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / subgraph . h " <nl> # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> # include " tensorflow / lite / delegates / gpu / common / task / gpu_operation . h " <nl> # include " tensorflow / lite / delegates / gpu / common / task / tensor_desc . h " <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> <nl> absl : : Status SelectDefault ( const GpuInfo & gpu_info , const OperationDef & op_def , <nl> ModelHints hints , const std : : vector < Value * > & inputs , <nl> const std : : vector < Value * > & outputs , const Node & node , <nl> GPUOperationsSubgraph * gpu_subgraph ) ; <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> <nl> - # endif / / TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_DEFAULT_SELECTOR_H_ <nl> + # endif / / TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_DEFAULT_SELECTOR_H_ <nl> similarity index 80 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / dw_convolution_selector . h <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / dw_convolution_selector . h <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / dw_convolution_selector . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / dw_convolution_selector . h <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # ifndef TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_DW_CONVOLUTION_SELECTOR_H_ <nl> - # define TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_DW_CONVOLUTION_SELECTOR_H_ <nl> + # ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_DW_CONVOLUTION_SELECTOR_H_ <nl> + # define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_DW_CONVOLUTION_SELECTOR_H_ <nl> <nl> # include < memory > <nl> <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> <nl> std : : unique_ptr < GPUOperation > SelectDWConvolution ( <nl> const DepthwiseConvolution2DAttributes & attr , const GpuInfo & gpu_info , <nl> const OperationDef & op_def ) ; <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> <nl> - # endif / / TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_DW_CONVOLUTION_SELECTOR_H_ <nl> + # endif / / TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_DW_CONVOLUTION_SELECTOR_H_ <nl> similarity index 80 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / fully_connected_selector . h <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / fully_connected_selector . h <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / fully_connected_selector . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / fully_connected_selector . h <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # ifndef TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_FULLY_CONNECTED_SELECTOR_H_ <nl> - # define TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_FULLY_CONNECTED_SELECTOR_H_ <nl> + # ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_FULLY_CONNECTED_SELECTOR_H_ <nl> + # define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_FULLY_CONNECTED_SELECTOR_H_ <nl> <nl> # include < memory > <nl> <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> <nl> std : : unique_ptr < GPUOperation > SelectFullyConnected ( <nl> const FullyConnectedAttributes & attr , const GpuInfo & gpu_info , <nl> const OperationDef & op_def , int batch_size ) ; <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> <nl> - # endif / / TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_FULLY_CONNECTED_SELECTOR_H_ <nl> + # endif / / TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_FULLY_CONNECTED_SELECTOR_H_ <nl> similarity index 97 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / operation_selector . cc <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / operation_selector . cc <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / operation_selector . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / operation_selector . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / operation_selector . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / operation_selector . h " <nl> <nl> # include " absl / strings / str_cat . h " <nl> # include " absl / types / any . h " <nl> - # include " tensorflow / lite / delegates / gpu / cl / cl_device . h " <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / convolution_selector . h " <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / convolution_transposed_selector . h " <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / default_selector . h " <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / dw_convolution_selector . h " <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / fully_connected_selector . h " <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / simple_selectors . h " <nl> # include " tensorflow / lite / delegates / gpu / common / data_type . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / gpu_info . h " <nl> # include " tensorflow / lite / delegates / gpu / common / operations . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / convolution_selector . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / convolution_transposed_selector . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / default_selector . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / dw_convolution_selector . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / fully_connected_selector . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / simple_selectors . h " <nl> # include " tensorflow / lite / delegates / gpu / common / shape . h " <nl> # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> # include " tensorflow / lite / delegates / gpu / common / task / storage_type_util . h " <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> namespace { <nl> bool IsRecommendedForWinograd4x4To6x6 ( const Convolution2DAttributes & attr , <nl> const GpuInfo & gpu_info , <nl> absl : : Status GPUOperationFromNode ( const GpuInfo & gpu_info , <nl> } <nl> } <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> similarity index 82 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / operation_selector . h <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / operation_selector . h <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / operation_selector . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / operation_selector . h <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # ifndef TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_OPERATION_SELECTOR_H_ <nl> - # define TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_OPERATION_SELECTOR_H_ <nl> + # ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_OPERATION_SELECTOR_H_ <nl> + # define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_OPERATION_SELECTOR_H_ <nl> <nl> # include < memory > <nl> <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / subgraph . h " <nl> # include " tensorflow / lite / delegates / gpu / common / model . h " <nl> # include " tensorflow / lite / delegates / gpu / common / model_hints . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / subgraph . h " <nl> # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> # include " tensorflow / lite / delegates / gpu / common / task / gpu_operation . h " <nl> # include " tensorflow / lite / delegates / gpu / common / task / tensor_desc . h " <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> <nl> absl : : Status GPUOperationFromNode ( const GpuInfo & gpu_info , <nl> const OperationDef & op_def , ModelHints hints , <nl> absl : : Status GPUOperationFromNode ( const GpuInfo & gpu_info , <nl> const Node & node , <nl> GPUOperationsSubgraph * gpu_subgraph ) ; <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> <nl> - # endif / / TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_OPERATION_SELECTOR_H_ <nl> + # endif / / TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_OPERATION_SELECTOR_H_ <nl> similarity index 98 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / simple_selectors . cc <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / simple_selectors . cc <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / simple_selectors . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / simple_selectors . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / simple_selectors . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / simple_selectors . h " <nl> <nl> # include < memory > <nl> # include < set > <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> <nl> std : : unique_ptr < GPUOperation > SelectLSTM ( const OperationDef & op_def , <nl> const GpuInfo & gpu_info ) { <nl> std : : unique_ptr < GPUOperation > SelectQuantizeAndDequantize ( <nl> CreateQuantizeAndDequantize ( op_def , attr ) ) ; <nl> } <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> similarity index 93 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / simple_selectors . h <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / simple_selectors . h <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / simple_selectors . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / simple_selectors . h <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # ifndef TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_SIMPLE_SELECTORS_H_ <nl> - # define TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_SIMPLE_SELECTORS_H_ <nl> + # ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_SIMPLE_SELECTORS_H_ <nl> + # define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_SIMPLE_SELECTORS_H_ <nl> <nl> # include < memory > <nl> <nl> - # include " tensorflow / lite / delegates / gpu / cl / cl_device . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / gpu_info . h " <nl> # include " tensorflow / lite / delegates / gpu / common / operations . h " <nl> # include " tensorflow / lite / delegates / gpu / common / shape . h " <nl> # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> <nl> std : : unique_ptr < GPUOperation > SelectLSTM ( const OperationDef & op_def , <nl> const GpuInfo & gpu_info ) ; <nl> std : : unique_ptr < GPUOperation > SelectWinograd36To4x4 ( <nl> std : : unique_ptr < GPUOperation > SelectQuantizeAndDequantize ( <nl> const QuantizeAndDequantizeAttributes & attr , const OperationDef & op_def ) ; <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> <nl> - # endif / / TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_SIMPLE_SELECTORS_H_ <nl> + # endif / / TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_SIMPLE_SELECTORS_H_ <nl> similarity index 98 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / special_selector . cc <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / special_selector . cc <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / special_selector . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / special_selector . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / special_selector . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / special_selector . h " <nl> <nl> # include " absl / types / any . h " <nl> - # include " tensorflow / lite / delegates / gpu / cl / cl_device . h " <nl> # include " tensorflow / lite / delegates / gpu / common / data_type . h " <nl> # include " tensorflow / lite / delegates / gpu / common / operations . h " <nl> # include " tensorflow / lite / delegates / gpu / common / shape . h " <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> namespace { <nl> absl : : Status TryDepthwiseConvPlus1x1Conv ( <nl> CalculationsPrecision precision , const GraphFloat32 & graph , <nl> absl : : Status GPUSubgraphFromGraph ( <nl> return absl : : NotFoundError ( " No special combination . " ) ; <nl> } <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> similarity index 79 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / special_selector . h <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / special_selector . h <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / special_selector . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / special_selector . h <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # ifndef TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_SPECIAL_SELECTOR_H_ <nl> - # define TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_SPECIAL_SELECTOR_H_ <nl> + # ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_SPECIAL_SELECTOR_H_ <nl> + # define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_SPECIAL_SELECTOR_H_ <nl> <nl> # include < map > <nl> # include < set > <nl> # include < vector > <nl> <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / subgraph . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / gpu_info . h " <nl> # include " tensorflow / lite / delegates / gpu / common / model . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / subgraph . h " <nl> # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> # include " tensorflow / lite / delegates / gpu / common / task / gpu_operation . h " <nl> # include " tensorflow / lite / delegates / gpu / common / task / tensor_desc . h " <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> <nl> absl : : Status GPUSubgraphFromGraph ( <nl> const GpuInfo & gpu_info , CalculationsPrecision precision , <nl> absl : : Status GPUSubgraphFromGraph ( <nl> std : : set < NodeId > * consumed_nodes , GPUOperationsSubgraph * gpu_subgraph , <nl> std : : string * name ) ; <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> <nl> - # endif / / TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_SPECIAL_SELECTOR_H_ <nl> + # endif / / TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_SPECIAL_SELECTOR_H_ <nl> similarity index 93 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / subgraph . cc <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / subgraph . cc <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / subgraph . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / subgraph . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / lite / delegates / gpu / cl / selectors / subgraph . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / selectors / subgraph . h " <nl> <nl> # include < memory > <nl> <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> <nl> std : : unique_ptr < GPUOperation > * InitSingleOpSubgraph ( <nl> const std : : vector < Value * > & inputs , const std : : vector < Value * > & outputs , <nl> std : : unique_ptr < GPUOperation > * InitSingleOpSubgraph ( <nl> return & gpu_subgraph - > operations [ 0 ] . operation ; <nl> } <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> similarity index 87 % <nl> rename from tensorflow / lite / delegates / gpu / cl / selectors / subgraph . h <nl> rename to tensorflow / lite / delegates / gpu / common / selectors / subgraph . h <nl> mmm a / tensorflow / lite / delegates / gpu / cl / selectors / subgraph . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / selectors / subgraph . h <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # ifndef TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_SUBGRAPH_H_ <nl> - # define TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_SUBGRAPH_H_ <nl> + # ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_SUBGRAPH_H_ <nl> + # define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_SUBGRAPH_H_ <nl> <nl> # include < memory > <nl> # include < vector > <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> <nl> struct GPUOperationWithRefs { <nl> std : : unique_ptr < GPUOperation > operation ; <nl> std : : unique_ptr < GPUOperation > * InitSingleOpSubgraph ( <nl> const std : : vector < Value * > & inputs , const std : : vector < Value * > & outputs , <nl> GPUOperationsSubgraph * gpu_subgraph ) ; <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> <nl> - # endif / / TENSORFLOW_LITE_DELEGATES_GPU_CL_SELECTORS_SUBGRAPH_H_ <nl> + # endif / / TENSORFLOW_LITE_DELEGATES_GPU_COMMON_SELECTORS_SUBGRAPH_H_ <nl> mmm a / tensorflow / lite / delegates / gpu / common / tasks / special / depthwise_conv_plus_1x1_conv . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / tasks / special / depthwise_conv_plus_1x1_conv . cc <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> namespace { <nl> void UploadWeights ( const DepthwiseConvolution2DAttributes & dw_attr , <nl> const Convolution2DAttributes & conv_attr , <nl> GPUOperation CreateDepthwiseConvPlus1x1Conv ( <nl> return result ; <nl> } <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> mmm a / tensorflow / lite / delegates / gpu / common / tasks / special / depthwise_conv_plus_1x1_conv . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / tasks / special / depthwise_conv_plus_1x1_conv . h <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> <nl> bool IsDepthwiseConvPlus1x1ConvSupported ( <nl> const OperationDef & definition , <nl> GPUOperation CreateDepthwiseConvPlus1x1Conv ( <nl> const DepthwiseConvolution2DAttributes & dw_attr , <nl> const Convolution2DAttributes & conv_attr ) ; <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> <nl> mmm a / tensorflow / lite / delegates / gpu / common / tasks / special / fc_fc_add . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / tasks / special / fc_fc_add . cc <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> namespace { <nl> bool UseBufferForWeights ( const GpuInfo & gpu_info ) { <nl> return gpu_info . IsAdreno ( ) | | gpu_info . IsAMD ( ) | | gpu_info . IsMali ( ) ; <nl> FCFCAdd CreateFCFCAdd ( const GpuInfo & gpu_info , const OperationDef & definition , <nl> return result ; <nl> } <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> mmm a / tensorflow / lite / delegates / gpu / common / tasks / special / fc_fc_add . h <nl> ppp b / tensorflow / lite / delegates / gpu / common / tasks / special / fc_fc_add . h <nl> limitations under the License . <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> - namespace cl { <nl> <nl> template < DataType T , typename S > <nl> void RearrangeFCWeightsToIOO4I4 ( const tflite : : gpu : : Tensor < OHWI , T > & weights , <nl> FCFCAdd CreateFCFCAdd ( const GpuInfo & gpu_info , const OperationDef & definition , <nl> const FullyConnectedAttributes & attr0 , <nl> const FullyConnectedAttributes & attr1 ) ; <nl> <nl> - } / / namespace cl <nl> } / / namespace gpu <nl> } / / namespace tflite <nl> <nl> mmm a / tensorflow / lite / delegates / gpu / metal / BUILD <nl> ppp b / tensorflow / lite / delegates / gpu / metal / BUILD <nl> cc_library ( <nl> deps = [ <nl> " : compiled_model " , <nl> " : compute_task_descriptor " , <nl> - " : runtime_options " , <nl> " / / tensorflow / lite / delegates / gpu / common : gpu_info " , <nl> " / / tensorflow / lite / delegates / gpu / common : model " , <nl> " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> + " / / tensorflow / lite / delegates / gpu / common : precision " , <nl> " / / tensorflow / lite / delegates / gpu / common : shape " , <nl> " / / tensorflow / lite / delegates / gpu / common : status " , <nl> " / / tensorflow / lite / delegates / gpu / common : util " , <nl> objc_library ( <nl> " : common " , <nl> " : compute_task_descriptor " , <nl> " : metal_arguments " , <nl> - " : runtime_options " , <nl> " / / tensorflow / lite / delegates / gpu / common : model " , <nl> + " / / tensorflow / lite / delegates / gpu / common : precision " , <nl> " / / tensorflow / lite / delegates / gpu / common : shape " , <nl> " / / tensorflow / lite / delegates / gpu / common : status " , <nl> " / / tensorflow / lite / delegates / gpu / common : types " , <nl> objc_library ( <nl> " : compiled_model " , <nl> " : compute_task " , <nl> " : compute_task_descriptor " , <nl> - " : runtime_options " , <nl> " / / tensorflow / lite / delegates / gpu / common : memory_management " , <nl> " / / tensorflow / lite / delegates / gpu / common : model " , <nl> + " / / tensorflow / lite / delegates / gpu / common : precision " , <nl> " / / tensorflow / lite / delegates / gpu / common : shape " , <nl> " / / tensorflow / lite / delegates / gpu / common : status " , <nl> " / / tensorflow / lite / delegates / gpu / common : util " , <nl> objc_library ( <nl> ] , <nl> ) <nl> <nl> - cc_library ( <nl> - name = " runtime_options " , <nl> - hdrs = [ " runtime_options . h " ] , <nl> - ) <nl> - <nl> objc_library ( <nl> name = " TestBinary " , <nl> testonly = 1 , <nl> objc_library ( <nl> " / / tensorflow / lite / delegates / gpu / metal : common " , <nl> " / / tensorflow / lite / delegates / gpu / metal : inference_context " , <nl> " / / tensorflow / lite / delegates / gpu / metal : metal_spatial_tensor " , <nl> - " / / tensorflow / lite / delegates / gpu / metal : runtime_options " , <nl> " / / tensorflow / lite / delegates / gpu / metal / kernels : test_util " , <nl> " @ com_google_absl / / absl / memory " , <nl> ] , <nl> mmm a / tensorflow / lite / delegates / gpu / metal / api . cc <nl> ppp b / tensorflow / lite / delegates / gpu / metal / api . cc <nl> limitations under the License . <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / space_to_depth . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / transpose_conv . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / winograd . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> absl : : Status RegisterPrimaryOps ( const GraphFloat32 & graph , const Node * node , <nl> const std : : vector < ValueId > & inputs , <nl> const std : : vector < ValueId > & outputs , <nl> const GpuInfo & gpu_info , <nl> - const RuntimeOptions & options , <nl> + CalculationsPrecision precision , <nl> int * last_value_id , <nl> std : : map < ValueId , BHWC > * tensor_shapes , <nl> std : : vector < NodeDescriptor > * nodes ) { <nl> absl : : Status RegisterPrimaryOps ( const GraphFloat32 & graph , const Node * node , <nl> node_desc . src_tensors_ids = inputs ; <nl> node_desc . dst_tensors_ids = outputs ; <nl> OperationDef op_def ; <nl> - if ( options . storage_precision = = RuntimeOptions : : Precision : : FP32 ) { <nl> - op_def . precision = CalculationsPrecision : : F32 ; <nl> - } else { <nl> - if ( options . accumulator_precision = = RuntimeOptions : : Precision : : FP32 ) { <nl> - op_def . precision = CalculationsPrecision : : F32_F16 ; <nl> - } else { <nl> - op_def . precision = CalculationsPrecision : : F16 ; <nl> - } <nl> - } <nl> + op_def . precision = precision ; <nl> DataType data_type = DeduceDataTypeFromPrecision ( op_def . precision ) ; <nl> TensorDescriptor tensor_descriptor = <nl> TensorDescriptor { data_type , TensorStorageType : : BUFFER , Layout : : HWC } ; <nl> absl : : Status RegisterPrimaryOps ( const GraphFloat32 & graph , const Node * node , <nl> } / / namespace <nl> <nl> absl : : Status Compile ( const GraphFloat32 & graph , const GpuInfo & gpu_info , <nl> - const RuntimeOptions & options , <nl> + CalculationsPrecision precision , <nl> CompiledModel * compiled_model ) { <nl> int last_value_id = 0 ; <nl> for ( const auto & value : graph . values ( ) ) { <nl> absl : : Status Compile ( const GraphFloat32 & graph , const GpuInfo & gpu_info , <nl> } <nl> std : : vector < NodeDescriptor > node_descs ; <nl> std : : vector < ComputeTaskDescriptorPtr > custom_tasks ; <nl> - auto custom_status = <nl> - RegisterCustomOps ( graph , node , inputs , outputs , options , & custom_tasks ) ; <nl> + auto custom_status = RegisterCustomOps ( graph , node , inputs , outputs , <nl> + precision , & custom_tasks ) ; <nl> if ( ! custom_status . ok ( ) ) { <nl> auto primary_status = RegisterPrimaryOps ( <nl> - graph , node , inputs , outputs , gpu_info , options , & last_value_id , <nl> + graph , node , inputs , outputs , gpu_info , precision , & last_value_id , <nl> & compiled_model - > tensor_shapes , & node_descs ) ; <nl> if ( ! primary_status . ok ( ) ) { <nl> return absl : : UnimplementedError ( <nl> mmm a / tensorflow / lite / delegates / gpu / metal / api . h <nl> ppp b / tensorflow / lite / delegates / gpu / metal / api . h <nl> limitations under the License . <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / gpu_info . h " <nl> # include " tensorflow / lite / delegates / gpu / common / model . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / precision . h " <nl> # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compiled_model . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> namespace metal { <nl> <nl> / / Builds CompiledModel out of GraphFloat32 graph using provided RuntimeOptions . <nl> absl : : Status Compile ( const GraphFloat32 & graph , const GpuInfo & gpu_info , <nl> - const RuntimeOptions & options , <nl> + CalculationsPrecision precision , <nl> CompiledModel * compiled_model ) ; <nl> <nl> } / / namespace metal <nl> mmm a / tensorflow / lite / delegates / gpu / metal / compute_task . h <nl> ppp b / tensorflow / lite / delegates / gpu / metal / compute_task . h <nl> limitations under the License . <nl> # include < vector > <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / model . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / precision . h " <nl> # include " tensorflow / lite / delegates / gpu / common / shape . h " <nl> # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> @ interface TFLComputeTask : NSObject <nl> <nl> / / / Returns empty string or error if shader can ' t be compiled . <nl> - ( absl : : Status ) compileWithDevice : ( id < MTLDevice > ) device <nl> taskDescriptor : ( const tflite : : gpu : : metal : : NodeDescriptor & ) desc <nl> - runtimeOptions : ( const : : tflite : : gpu : : metal : : RuntimeOptions & ) options ; <nl> + precision : ( tflite : : gpu : : CalculationsPrecision ) precision ; <nl> <nl> / / / Updates parameters for inputs / outputs / intermediate tensors <nl> - ( absl : : Status ) updateParamsWithDevice : ( id < MTLDevice > ) device <nl> mmm a / tensorflow / lite / delegates / gpu / metal / compute_task . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / compute_task . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / types . h " <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / common . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : AlignByN ; <nl> using : : tflite : : gpu : : BHWC ; <nl> <nl> using : : tflite : : gpu : : metal : : ComputeTaskDescriptorPtr ; <nl> using : : tflite : : gpu : : metal : : CreateComputeProgram ; <nl> using : : tflite : : gpu : : metal : : DispatchParamsFunction ; <nl> - using : : tflite : : gpu : : metal : : RuntimeOptions ; <nl> + using : : tflite : : gpu : : CalculationsPrecision ; <nl> using : : tflite : : gpu : : metal : : UniformsFunction ; <nl> using : : tflite : : gpu : : uint3 ; <nl> using : : tflite : : gpu : : ValueId ; <nl> @ implementation TFLComputeTask { <nl> <nl> - ( absl : : Status ) compileWithDevice : ( id < MTLDevice > ) device <nl> taskDescriptor : ( const tflite : : gpu : : metal : : NodeDescriptor & ) desc <nl> - runtimeOptions : ( const RuntimeOptions & ) options { <nl> + precision : ( CalculationsPrecision ) precision ; { <nl> size_t offset = desc . task - > src_tensors_names . size ( ) + desc . task - > uniform_buffers . size ( ) <nl> + desc . task - > immutable_buffers . size ( ) + 1 ; <nl> RETURN_IF_ERROR ( _metal_args . Init ( device , offset , & desc . task - > args , & desc . task - > shader_source ) ) ; <nl> @ implementation TFLComputeTask { <nl> NSString * toAccumulatorType2 = @ " " ; <nl> NSString * toAccumulatorType3 = @ " " ; <nl> NSString * toAccumulatorType4 = @ " " ; <nl> - if ( options . storage_precision = = RuntimeOptions : : Precision : : FP32 ) { <nl> + if ( precision = = CalculationsPrecision : : F32 ) { <nl> storageType = @ " float " ; <nl> accumulatorType = @ " float " ; <nl> } else { <nl> / / FP16 <nl> storageType = @ " half " ; <nl> - if ( options . accumulator_precision = = RuntimeOptions : : Precision : : FP32 ) { <nl> + if ( precision = = CalculationsPrecision : : F32_F16 ) { <nl> accumulatorType = @ " float " ; <nl> toAccumulatorType = @ " float " ; <nl> toAccumulatorType2 = @ " float2 " ; <nl> @ implementation TFLComputeTask { <nl> _uniformBuffers . emplace_back ( UniformBuffer { { } , uniform . data_function } ) ; <nl> } <nl> _outputBuffers . emplace_back ( OutputBuffer { desc . dst_tensors_ids [ 0 ] , nil } ) ; <nl> + const bool f32_storage = precision = = CalculationsPrecision : : F32 ; <nl> for ( auto & immutable : desc . task - > immutable_buffers ) { <nl> - int padding = <nl> - 4 * ( options . storage_precision = = RuntimeOptions : : Precision : : FP32 ? sizeof ( float ) <nl> - : sizeof ( HalfBits ) ) ; <nl> + int padding = 4 * ( f32_storage ? sizeof ( float ) : sizeof ( HalfBits ) ) ; <nl> int paddedSize = AlignByN ( immutable . data . size ( ) , padding ) ; <nl> immutable . data . resize ( paddedSize ) ; <nl> id < MTLBuffer > metalBuffer = [ device newBufferWithBytes : immutable . data . data ( ) <nl> mmm a / tensorflow / lite / delegates / gpu / metal / inference_context . h <nl> ppp b / tensorflow / lite / delegates / gpu / metal / inference_context . h <nl> limitations under the License . <nl> # include < vector > <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / model . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / precision . h " <nl> # include " tensorflow / lite / delegates / gpu / common / shape . h " <nl> # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compiled_model . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> / / / Stages of model preprocessing : <nl> / / / 1 . Operations ' initialization . All operations are initialized and added into <nl> limitations under the License . <nl> model : ( const tflite : : gpu : : metal : : CompiledModel & ) compiledModel <nl> inputBufferIDs : ( const std : : vector < tflite : : gpu : : ValueId > & ) inputBufferIDs <nl> outputBufferIDs : ( const std : : vector < tflite : : gpu : : ValueId > & ) outputBufferIDs <nl> - runtimeOptions : ( const tflite : : gpu : : metal : : RuntimeOptions & ) options ; <nl> + precision : ( tflite : : gpu : : CalculationsPrecision ) precision ; <nl> <nl> / / / Inserts all GPU compute tasks into the command encoder . <nl> / / / @ param inputOutputBuffers Must be created and passed into the method with pairs ID : buffer <nl> - / / / @ param encoderBlock User - defined block to take control over command encoder . Can be nil . <nl> - / / / The block can be used , for example , for fine - grained benchmarking where end encoding <nl> - / / / is performed and command buffer is committed with completion block . A new command <nl> - / / / buffer must be created and new command encoder must be returned by the block . <nl> - / / / The block is called after every dispatch encoding . <nl> / / / @ discussion No GPU synchronization functions are used inside . All GPU resources must be created <nl> / / / with the same device which has been used in compileModelWithDevice ( ) method . <nl> - ( void ) encodeWithEncoder : ( id < MTLComputeCommandEncoder > ) commandEncoder <nl> - inputOutputBuffers : ( const std : : map < : : tflite : : gpu : : ValueId , id < MTLBuffer > > & ) inputOutputBuffers <nl> - encoderBlock : ( id < MTLComputeCommandEncoder > ( ^ ) ( bool isLast ) ) encoderBlock ; <nl> + inputOutputBuffers : <nl> + ( const std : : map < : : tflite : : gpu : : ValueId , id < MTLBuffer > > & ) inputOutputBuffers ; <nl> + <nl> + / / / Inserts all GPU compute tasks into the command buffer . For every task will be used separate <nl> + / / / encoder . <nl> + / / / @ param inputOutputBuffers Must be created and passed into the method with pairs ID : buffer <nl> + / / / @ discussion No GPU synchronization functions are used inside . All GPU resources must be created <nl> + / / / with the same device which has been used in compileModelWithDevice ( ) method . <nl> + - ( void ) encodeWithCommandBuffer : ( id < MTLCommandBuffer > ) commandBuffer <nl> + inputOutputBuffers : <nl> + ( const std : : map < : : tflite : : gpu : : ValueId , id < MTLBuffer > > & ) inputOutputBuffers ; <nl> + <nl> + / / / Adds all GPU compute tasks to the command queue . For every task will be used separate <nl> + / / / encoder . Few encoders ( flushPeriod ) batched into compute buffer that sent for execution . <nl> + / / / @ param inputOutputBuffers Must be created and passed into the method with pairs ID : buffer <nl> + / / / @ discussion No GPU synchronization functions are used inside . All GPU resources must be created <nl> + / / / with the same device which has been used in compileModelWithDevice ( ) method . <nl> + - ( void ) encodeWithCommandQueue : ( id < MTLCommandQueue > ) commandQueue <nl> + inputOutputBuffers : <nl> + ( const std : : map < : : tflite : : gpu : : ValueId , id < MTLBuffer > > & ) inputOutputBuffers <nl> + flushPeriodically : ( int ) flushPeriod ; <nl> <nl> @ end <nl> <nl> mmm a / tensorflow / lite / delegates / gpu / metal / inference_context . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / inference_context . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / memory_management . h " <nl> # include " tensorflow / lite / delegates / gpu / common / memory_management / types . h " <nl> # include " tensorflow / lite / delegates / gpu / common / model . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / precision . h " <nl> # include " tensorflow / lite / delegates / gpu / common / shape . h " <nl> # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : BHWC ; <nl> using : : tflite : : gpu : : metal : : ComputeTaskDescriptorPtr ; <nl> - using : : tflite : : gpu : : metal : : RuntimeOptions ; <nl> + using : : tflite : : gpu : : CalculationsPrecision ; <nl> using : : tflite : : gpu : : ValueId ; <nl> using : : tflite : : gpu : : AlignByN ; <nl> using : : tflite : : gpu : : HalfBits ; <nl> @ implementation TFLInferenceContext { <nl> std : : vector < ValueId > _inputIds ; <nl> std : : vector < ValueId > _outputIds ; <nl> id < MTLDevice > _device ; <nl> - RuntimeOptions _options ; <nl> + CalculationsPrecision _precision ; <nl> std : : map < ValueId , BHWC > _tensorShapes ; <nl> } <nl> <nl> @ implementation TFLInferenceContext { <nl> model : ( const tflite : : gpu : : metal : : CompiledModel & ) compiledModel <nl> inputBufferIDs : ( const std : : vector < tflite : : gpu : : ValueId > & ) inputBufferIDs <nl> outputBufferIDs : ( const std : : vector < tflite : : gpu : : ValueId > & ) outputBufferIDs <nl> - runtimeOptions : ( const RuntimeOptions & ) options { <nl> + precision : ( tflite : : gpu : : CalculationsPrecision ) precision { <nl> _device = device ; <nl> _inputIds = inputBufferIDs ; <nl> _outputIds = outputBufferIDs ; <nl> - _options = options ; <nl> + _precision = precision ; <nl> / / Metal resources are created here . <nl> for ( const auto & node : compiledModel . nodes ) { <nl> TFLComputeTask * task = [ [ TFLComputeTask alloc ] init ] ; <nl> RETURN_IF_ERROR ( [ task compileWithDevice : _device <nl> taskDescriptor : node <nl> - runtimeOptions : _options ] ) ; <nl> + precision : _precision ] ) ; <nl> [ task setDescription : node . description ] ; <nl> _computeTasks . emplace_back ( task ) ; <nl> } <nl> @ implementation TFLInferenceContext { <nl> RETURN_IF_ERROR ( AssignObjectsToTensors ( usageRecords , MemoryStrategy : : GREEDY_BEST , & assignment ) ) ; <nl> auto objectsCount = assignment . object_sizes . size ( ) ; <nl> std : : vector < id < MTLBuffer > > sharedBuffers ( objectsCount ) ; <nl> - size_t dataTypeSize = _options . storage_precision = = RuntimeOptions : : Precision : : FP32 <nl> - ? sizeof ( float ) <nl> - : sizeof ( HalfBits ) ; <nl> + const bool f32_storage = _precision = = CalculationsPrecision : : F32 ; <nl> + size_t dataTypeSize = f32_storage ? sizeof ( float ) : sizeof ( HalfBits ) ; <nl> <nl> / / allocate buffers for each shared object <nl> for ( size_t i = 0 ; i < objectsCount ; + + i ) { <nl> @ implementation TFLInferenceContext { <nl> } <nl> <nl> - ( void ) encodeWithEncoder : ( id < MTLComputeCommandEncoder > ) commandEncoder <nl> - inputOutputBuffers : ( const std : : map < ValueId , id < MTLBuffer > > & ) inputOutputBuffers <nl> - encoderBlock : ( id < MTLComputeCommandEncoder > ( ^ ) ( bool isLast ) ) encoderBlock { <nl> + inputOutputBuffers : <nl> + ( const std : : map < : : tflite : : gpu : : ValueId , id < MTLBuffer > > & ) inputOutputBuffers { <nl> for ( auto & task_index : _taskIdsWithInOutBuffers ) { <nl> auto & task = _computeTasks [ task_index ] ; <nl> [ task updateBuffers : inputOutputBuffers ] ; <nl> - ( void ) encodeWithEncoder : ( id < MTLComputeCommandEncoder > ) commandEncoder <nl> for ( int i = 0 ; i < _computeTasks . size ( ) ; + + i ) { <nl> auto & task = _computeTasks [ i ] ; <nl> [ task encodeWithEncoder : commandEncoder ] ; <nl> - if ( encoderBlock ! = nil ) { <nl> - commandEncoder = encoderBlock ( i = = _computeTasks . size ( ) - 1 ) ; <nl> + } <nl> + } <nl> + <nl> + - ( void ) encodeWithCommandBuffer : ( id < MTLCommandBuffer > ) commandBuffer <nl> + inputOutputBuffers : <nl> + ( const std : : map < : : tflite : : gpu : : ValueId , id < MTLBuffer > > & ) inputOutputBuffers { <nl> + for ( auto & task_index : _taskIdsWithInOutBuffers ) { <nl> + auto & task = _computeTasks [ task_index ] ; <nl> + [ task updateBuffers : inputOutputBuffers ] ; <nl> + } <nl> + for ( int i = 0 ; i < _computeTasks . size ( ) ; + + i ) { <nl> + id < MTLComputeCommandEncoder > encoder = [ commandBuffer computeCommandEncoder ] ; <nl> + auto & task = _computeTasks [ i ] ; <nl> + [ task encodeWithEncoder : encoder ] ; <nl> + [ encoder endEncoding ] ; <nl> + } <nl> + } <nl> + <nl> + - ( void ) encodeWithCommandQueue : ( id < MTLCommandQueue > ) commandQueue <nl> + inputOutputBuffers : <nl> + ( const std : : map < : : tflite : : gpu : : ValueId , id < MTLBuffer > > & ) inputOutputBuffers <nl> + flushPeriodically : ( int ) flushPeriod { <nl> + for ( auto & task_index : _taskIdsWithInOutBuffers ) { <nl> + auto & task = _computeTasks [ task_index ] ; <nl> + [ task updateBuffers : inputOutputBuffers ] ; <nl> + } <nl> + id < MTLCommandBuffer > commandBuffer = [ commandQueue commandBuffer ] ; <nl> + for ( int i = 0 ; i < _computeTasks . size ( ) ; + + i ) { <nl> + id < MTLComputeCommandEncoder > encoder = [ commandBuffer computeCommandEncoder ] ; <nl> + auto & task = _computeTasks [ i ] ; <nl> + [ task encodeWithEncoder : encoder ] ; <nl> + [ encoder endEncoding ] ; <nl> + if ( i % flushPeriod = = ( flushPeriod - 1 ) ) { <nl> + [ commandBuffer commit ] ; <nl> + commandBuffer = [ commandQueue commandBuffer ] ; <nl> } <nl> } <nl> + [ commandBuffer commit ] ; <nl> } <nl> <nl> @ end <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / BUILD <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / BUILD <nl> cc_library ( <nl> hdrs = [ " custom_registry . h " ] , <nl> deps = [ <nl> " / / tensorflow / lite / delegates / gpu / common : model " , <nl> + " / / tensorflow / lite / delegates / gpu / common : precision " , <nl> " / / tensorflow / lite / delegates / gpu / common : status " , <nl> " / / tensorflow / lite / delegates / gpu / metal : compute_task_descriptor " , <nl> - " / / tensorflow / lite / delegates / gpu / metal : runtime_options " , <nl> ] , <nl> ) <nl> <nl> objc_library ( <nl> " / / tensorflow / lite / delegates / gpu / common : gpu_info " , <nl> " / / tensorflow / lite / delegates / gpu / common : model " , <nl> " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> + " / / tensorflow / lite / delegates / gpu / common : precision " , <nl> " / / tensorflow / lite / delegates / gpu / common : shape " , <nl> " / / tensorflow / lite / delegates / gpu / common : status " , <nl> " / / tensorflow / lite / delegates / gpu / common : tensor " , <nl> objc_library ( <nl> " / / tensorflow / lite / delegates / gpu / metal : common " , <nl> " / / tensorflow / lite / delegates / gpu / metal : compiled_model " , <nl> " / / tensorflow / lite / delegates / gpu / metal : inference_context " , <nl> - " / / tensorflow / lite / delegates / gpu / metal : runtime_options " , <nl> " @ FP16 " , <nl> " @ com_google_absl / / absl / memory " , <nl> ] , <nl> objc_library ( <nl> deps = [ <nl> " : test_util " , <nl> " / / tensorflow / lite / delegates / gpu / common : gpu_info " , <nl> + " / / tensorflow / lite / delegates / gpu / common : precision " , <nl> " / / tensorflow / lite / delegates / gpu / common : shape " , <nl> " / / tensorflow / lite / delegates / gpu / common : types " , <nl> " / / tensorflow / lite / delegates / gpu / common : util " , <nl> " / / tensorflow / lite / delegates / gpu / metal : common " , <nl> " / / tensorflow / lite / delegates / gpu / metal : inference_context " , <nl> - " / / tensorflow / lite / delegates / gpu / metal : runtime_options " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / add_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / add_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : ElementwiseAttributes ; <nl> using : : tflite : : gpu : : BHWC ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / concat_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / concat_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : Axis ; <nl> using : : tflite : : gpu : : BHWC ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / conv_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / conv_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : Axis ; <nl> using : : tflite : : gpu : : BHWC ; <nl> - ( void ) testWinograd4x4To6x6 { <nl> } <nl> <nl> id < MTLDevice > device = MTLCreateSystemDefaultDevice ( ) ; <nl> - tflite : : gpu : : metal : : RuntimeOptions options ; <nl> - options . storage_precision = tflite : : gpu : : metal : : RuntimeOptions : : Precision : : FP32 ; <nl> - options . accumulator_precision = tflite : : gpu : : metal : : RuntimeOptions : : Precision : : FP32 ; <nl> <nl> std : : map < ValueId , TensorFloat32 > inputs_v0 ; <nl> inputs_v0 [ 0 ] = src_tensor ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / custom_registry . cc <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / custom_registry . cc <nl> limitations under the License . <nl> # include < vector > <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / model . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / precision . h " <nl> # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> namespace metal { <nl> absl : : Status RegisterCustomOps ( const GraphFloat32 & graph , const Node * node , <nl> const std : : vector < ValueId > & inputs , <nl> const std : : vector < ValueId > & outputs , <nl> - const RuntimeOptions & options , <nl> + CalculationsPrecision precision , <nl> std : : vector < ComputeTaskDescriptorPtr > * tasks ) { <nl> return absl : : UnimplementedError ( " Unsupported op : " + node - > operation . type ) ; <nl> } <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / custom_registry . h <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / custom_registry . h <nl> limitations under the License . <nl> # include < vector > <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / model . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / precision . h " <nl> # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> namespace metal { <nl> absl : : Status RegisterCustomOps ( const GraphFloat32 & graph , const Node * node , <nl> const std : : vector < ValueId > & inputs , <nl> const std : : vector < ValueId > & outputs , <nl> - const RuntimeOptions & options , <nl> + CalculationsPrecision precision , <nl> std : : vector < ComputeTaskDescriptorPtr > * tasks ) ; <nl> <nl> } / / namespace metal <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / depthwise_conv_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / depthwise_conv_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : Axis ; <nl> using : : tflite : : gpu : : BHWC ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / elementwise_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / elementwise_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : DataType ; <nl> using : : tflite : : gpu : : HWC ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / fully_connected_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / fully_connected_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : BHWC ; <nl> using : : tflite : : gpu : : DataType ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / max_unpooling_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / max_unpooling_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : BHWC ; <nl> using : : tflite : : gpu : : DataType ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / mean_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / mean_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : Axis ; <nl> using : : tflite : : gpu : : BHWC ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / padding_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / padding_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : BHWC ; <nl> using : : tflite : : gpu : : DataType ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / pooling_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / pooling_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : BHWC ; <nl> using : : tflite : : gpu : : DataType ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / prelu_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / prelu_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : BHWC ; <nl> using : : tflite : : gpu : : DataType ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / quantize_and_dequantize_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / quantize_and_dequantize_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> # include " tensorflow / lite / kernels / internal / quantization_util . h " <nl> <nl> using : : tflite : : NudgeQuantizationRange ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / relu_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / relu_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : BHWC ; <nl> using : : tflite : : gpu : : DataType ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / reshape_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / reshape_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : BHWC ; <nl> using : : tflite : : gpu : : DataType ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / resize_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / resize_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : BHWC ; <nl> using : : tflite : : gpu : : DataType ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / slice_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / slice_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : BHWC ; <nl> using : : tflite : : gpu : : DataType ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / softmax_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / softmax_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : Axis ; <nl> using : : tflite : : gpu : : BHWC ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / space_to_depth_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / space_to_depth_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / tensor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : BHWC ; <nl> using : : tflite : : gpu : : DataType ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / test_util . h <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / test_util . h <nl> limitations under the License . <nl> # include " tensorflow / lite / delegates / gpu / metal / compiled_model . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / inference_context . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> namespace tflite { <nl> namespace gpu { <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / test_util . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / test_util . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / metal / compiled_model . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / inference_context . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / precision . h " <nl> # include " tensorflow / lite / delegates / gpu / common / gpu_info . h " <nl> <nl> namespace tflite { <nl> <nl> std : : string device_name = std : : string ( [ [ device name ] UTF8String ] ) ; <nl> GpuInfo gpu_info ; <nl> GetGpuInfoFromDeviceDescription ( device_name , GpuApi : : kMetal , & gpu_info ) ; <nl> - RuntimeOptions options ; <nl> - options . storage_precision = RuntimeOptions : : Precision : : FP32 ; <nl> - options . accumulator_precision = RuntimeOptions : : Precision : : FP32 ; <nl> + CalculationsPrecision precision = CalculationsPrecision : : F32 ; <nl> CompiledModel compiled_model ; <nl> - RETURN_IF_ERROR ( Compile ( graph_ , gpu_info , options , & compiled_model ) ) ; <nl> + RETURN_IF_ERROR ( Compile ( graph_ , gpu_info , precision , & compiled_model ) ) ; <nl> CompiledModel optimized_model ; <nl> RETURN_IF_ERROR ( ValidateOptimizeModel ( input_ids , output_ids , compiled_model , & optimized_model ) ) ; <nl> <nl> <nl> model : optimized_model <nl> inputBufferIDs : input_ids <nl> outputBufferIDs : output_ids <nl> - runtimeOptions : options ] ) ; <nl> + precision : precision ] ) ; <nl> std : : map < ValueId , BHWC > input_dimensions ; <nl> std : : map < ValueId , id < MTLBuffer > > input_buffers ; <nl> for ( auto & input : inputs_ ) { <nl> <nl> id < MTLCommandQueue > command_queue = [ device newCommandQueue ] ; <nl> id < MTLCommandBuffer > command_buffer = [ command_queue commandBuffer ] ; <nl> id < MTLComputeCommandEncoder > command_encoder = [ command_buffer computeCommandEncoder ] ; <nl> - [ graph encodeWithEncoder : command_encoder inputOutputBuffers : inout_buffers encoderBlock : nil ] ; <nl> + [ graph encodeWithEncoder : command_encoder inputOutputBuffers : inout_buffers ] ; <nl> [ command_encoder endEncoding ] ; <nl> [ command_buffer commit ] ; <nl> [ command_buffer waitUntilCompleted ] ; <nl> <nl> RETURN_IF_ERROR ( <nl> ValidateOptimizeModel ( inputBufferIDs , outputBufferIDs , raw_model , & optimized_model ) ) ; <nl> <nl> - RuntimeOptions options ; <nl> - options . storage_precision = RuntimeOptions : : Precision : : FP32 ; <nl> - options . accumulator_precision = RuntimeOptions : : Precision : : FP32 ; <nl> + CalculationsPrecision precision = CalculationsPrecision : : F32 ; <nl> <nl> TFLInferenceContext * graph = [ [ TFLInferenceContext alloc ] init ] ; <nl> RETURN_IF_ERROR ( [ graph compileModelWithDevice : device <nl> model : optimized_model <nl> inputBufferIDs : inputBufferIDs <nl> outputBufferIDs : outputBufferIDs <nl> - runtimeOptions : options ] ) ; <nl> + precision : precision ] ) ; <nl> std : : map < ValueId , BHWC > inputDimensions ; <nl> std : : map < ValueId , std : : vector < float > > inputBuffersCPU ; <nl> std : : map < ValueId , id < MTLBuffer > > inputBuffersGPU ; <nl> <nl> id < MTLCommandQueue > commandQueue = [ device newCommandQueue ] ; <nl> id < MTLCommandBuffer > commandBuffer = [ commandQueue commandBuffer ] ; <nl> id < MTLComputeCommandEncoder > commandEncoder = [ commandBuffer computeCommandEncoder ] ; <nl> - [ graph encodeWithEncoder : commandEncoder inputOutputBuffers : inputOutputBuffers encoderBlock : nil ] ; <nl> + [ graph encodeWithEncoder : commandEncoder inputOutputBuffers : inputOutputBuffers ] ; <nl> [ commandEncoder endEncoding ] ; <nl> [ commandBuffer commit ] ; <nl> [ commandBuffer waitUntilCompleted ] ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / transpose_conv_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / transpose_conv_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> <nl> using : : tflite : : gpu : : ConvolutionTransposedAttributes ; <nl> using : : tflite : : gpu : : BHWC ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / winograd_test . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / winograd_test . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / common / util . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / compute_task_descriptor . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / kernels / test_util . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> # include " tensorflow / lite / delegates / gpu / common / winograd_util . h " <nl> <nl> using : : tflite : : gpu : : BHWC ; <nl> - ( void ) testWinograd4x4To36TileX6 { <nl> } <nl> } <nl> <nl> - tflite : : gpu : : metal : : RuntimeOptions options ; <nl> - options . storage_precision = tflite : : gpu : : metal : : RuntimeOptions : : Precision : : FP32 ; <nl> - options . accumulator_precision = tflite : : gpu : : metal : : RuntimeOptions : : Precision : : FP32 ; <nl> - <nl> tflite : : gpu : : metal : : Winograd4x4To36Attributes attr ; <nl> attr . padding . prepended = tflite : : gpu : : HW ( 1 , 1 ) ; <nl> attr . padding . appended = tflite : : gpu : : HW ( 1 , 1 ) ; <nl> - ( void ) testWinograd36To4x4 { <nl> attr . biases . shape = tflite : : gpu : : Linear ( 1 ) ; <nl> attr . biases . data . resize ( 1 , 0 . 0f ) ; <nl> <nl> - tflite : : gpu : : metal : : RuntimeOptions options ; <nl> - options . storage_precision = tflite : : gpu : : metal : : RuntimeOptions : : Precision : : FP32 ; <nl> - options . accumulator_precision = tflite : : gpu : : metal : : RuntimeOptions : : Precision : : FP32 ; <nl> - <nl> tflite : : gpu : : OperationDef op_def ; <nl> op_def . precision = tflite : : gpu : : CalculationsPrecision : : F32 ; <nl> tflite : : gpu : : TensorDescriptor tensor_descriptor = tflite : : gpu : : TensorDescriptor { <nl> - ( void ) testWinograd36To4x4Tile4x1 { <nl> attr . biases . shape = tflite : : gpu : : Linear ( 1 ) ; <nl> attr . biases . data . resize ( 1 , 0 . 0f ) ; <nl> <nl> - tflite : : gpu : : metal : : RuntimeOptions options ; <nl> - options . storage_precision = tflite : : gpu : : metal : : RuntimeOptions : : Precision : : FP32 ; <nl> - options . accumulator_precision = tflite : : gpu : : metal : : RuntimeOptions : : Precision : : FP32 ; <nl> - <nl> tflite : : gpu : : OperationDef op_def ; <nl> op_def . precision = tflite : : gpu : : CalculationsPrecision : : F32 ; <nl> tflite : : gpu : : TensorDescriptor tensor_descriptor = tflite : : gpu : : TensorDescriptor { <nl> deleted file mode 100644 <nl> index d8e8fe3dd928f . . 0000000000000 <nl> mmm a / tensorflow / lite / delegates / gpu / metal / runtime_options . h <nl> ppp / dev / null <nl> <nl> - / * Copyright 2019 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - <nl> - # ifndef TENSORFLOW_LITE_DELEGATES_GPU_METAL_RUNTIME_OPTIONS_H_ <nl> - # define TENSORFLOW_LITE_DELEGATES_GPU_METAL_RUNTIME_OPTIONS_H_ <nl> - <nl> - namespace tflite { <nl> - namespace gpu { <nl> - namespace metal { <nl> - <nl> - struct RuntimeOptions { <nl> - enum class Precision { <nl> - FP16 , <nl> - FP32 , <nl> - } ; <nl> - / / Buffer storage format . If FP32 then accumulator must be FP32 . <nl> - Precision storage_precision = Precision : : FP32 ; <nl> - / / Accumulator precision . Defines the precision for convolutions . <nl> - Precision accumulator_precision = Precision : : FP32 ; <nl> - } ; <nl> - <nl> - } / / namespace metal <nl> - } / / namespace gpu <nl> - } / / namespace tflite <nl> - <nl> - # endif / / TENSORFLOW_LITE_DELEGATES_GPU_METAL_RUNTIME_OPTIONS_H_ <nl> mmm a / tensorflow / lite / delegates / gpu / metal_delegate . mm <nl> ppp b / tensorflow / lite / delegates / gpu / metal_delegate . mm <nl> <nl> # include " tensorflow / lite / delegates / gpu / metal / compiled_model . h " <nl> # include " tensorflow / lite / delegates / gpu / common / gpu_info . h " <nl> # include " tensorflow / lite / delegates / gpu / metal / inference_context . h " <nl> - # include " tensorflow / lite / delegates / gpu / metal / runtime_options . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / precision . h " <nl> # include " tensorflow / lite / kernels / kernel_util . h " <nl> # include " tensorflow / lite / minimal_logging . h " <nl> <nl> + <nl> namespace tflite { <nl> namespace gpu { <nl> namespace metal { <nl> void SetCommandEncoder ( id < MTLComputeCommandEncoder > encoder ) { <nl> GpuInfo gpu_info ; <nl> GetGpuInfoFromDeviceDescription ( device_name , GpuApi : : kMetal , & gpu_info ) ; <nl> size_t storage_type_size ; <nl> - RuntimeOptions runtime_options ; <nl> + CalculationsPrecision precision ; <nl> if ( options_ . allow_precision_loss ) { <nl> storage_type_size = sizeof ( HalfBits ) ; <nl> - runtime_options . storage_precision = RuntimeOptions : : Precision : : FP16 ; <nl> if ( gpu_info . IsRoundToNearestSupported ( ) ) { <nl> - runtime_options . accumulator_precision = RuntimeOptions : : Precision : : FP16 ; <nl> + precision = CalculationsPrecision : : F16 ; <nl> } else { <nl> - runtime_options . accumulator_precision = RuntimeOptions : : Precision : : FP32 ; <nl> + precision = CalculationsPrecision : : F32_F16 ; <nl> } <nl> } else { <nl> storage_type_size = sizeof ( float ) ; <nl> - runtime_options . storage_precision = RuntimeOptions : : Precision : : FP32 ; <nl> - runtime_options . accumulator_precision = RuntimeOptions : : Precision : : FP32 ; <nl> + precision = CalculationsPrecision : : F32 ; <nl> } <nl> <nl> / / TODO ( impjdi ) : Merge logic with above . <nl> void SetCommandEncoder ( id < MTLComputeCommandEncoder > encoder ) { <nl> <nl> / / TODO ( impjdi ) : Merge these . <nl> CompiledModel compiled_model ; <nl> - RETURN_IF_ERROR ( Compile ( graph , gpu_info , runtime_options , & compiled_model ) ) ; <nl> + RETURN_IF_ERROR ( Compile ( graph , gpu_info , precision , & compiled_model ) ) ; <nl> CompiledModel optimized_model ; <nl> RETURN_IF_ERROR ( ValidateOptimizeModel ( input_ids , output_ids , compiled_model , & optimized_model ) ) ; <nl> <nl> void SetCommandEncoder ( id < MTLComputeCommandEncoder > encoder ) { <nl> model : optimized_model <nl> inputBufferIDs : input_ids <nl> outputBufferIDs : output_ids <nl> - runtimeOptions : runtime_options ] ) ; <nl> + precision : precision ] ) ; <nl> return absl : : OkStatus ( ) ; <nl> } <nl> <nl> void SetCommandEncoder ( id < MTLComputeCommandEncoder > encoder ) { <nl> / / We need only synchronization so volatile works better than atomic which reads from global <nl> / / memory each time . <nl> __block volatile bool buffer_completed = false ; <nl> - __block id < MTLCommandBuffer > command_buffer ; <nl> - __block id < MTLComputeCommandEncoder > encoder = external_command_encoder_ ; <nl> + id < MTLCommandBuffer > command_buffer ; <nl> + id < MTLComputeCommandEncoder > encoder = external_command_encoder_ ; <nl> if ( external_command_encoder_ = = nil ) { <nl> command_buffer = [ command_queue_ commandBuffer ] ; <nl> encoder = [ command_buffer computeCommandEncoder ] ; <nl> } <nl> + const bool flush = external_command_encoder_ = = nil & & <nl> + ( options_ . wait_type = = TFLGpuDelegateWaitType : : TFLGpuDelegateWaitTypeActive | | <nl> + options_ . wait_type = = TFLGpuDelegateWaitType : : TFLGpuDelegateWaitTypeAggressive ) ; <nl> + const int flush_period = 8 ; <nl> <nl> const bool is_quantized_model = ! quant_conversion_map_ . empty ( ) ; <nl> if ( is_quantized_model ) { <nl> void SetCommandEncoder ( id < MTLComputeCommandEncoder > encoder ) { <nl> shape : input . shape <nl> sourceBuffer : input_output_buffers_ [ input . id ] <nl> convertedBuffer : bphwc4_buffers_ [ input . id ] ] ; <nl> - if ( external_command_encoder_ = = nil ) { <nl> - [ encoder endEncoding ] ; <nl> - [ command_buffer commit ] ; <nl> + } <nl> + if ( flush ) { <nl> + [ encoder endEncoding ] ; <nl> + [ command_buffer commit ] ; <nl> + } <nl> + <nl> + if ( external_command_encoder_ ! = nil | | <nl> + options_ . wait_type = = TFLGpuDelegateWaitType : : TFLGpuDelegateWaitTypePassive ) { <nl> + / / encoder = = external_command_encoder_ <nl> + [ inference_context_ encodeWithEncoder : encoder <nl> + inputOutputBuffers : bphwc4_buffers_ ] ; <nl> + } else { <nl> + if ( flush ) { <nl> + [ inference_context_ encodeWithCommandQueue : command_queue_ <nl> + inputOutputBuffers : bphwc4_buffers_ <nl> + flushPeriodically : flush_period ] ; <nl> command_buffer = [ command_queue_ commandBuffer ] ; <nl> encoder = [ command_buffer computeCommandEncoder ] ; <nl> + } else { <nl> + [ encoder endEncoding ] ; <nl> + [ inference_context_ encodeWithCommandBuffer : command_buffer <nl> + inputOutputBuffers : bphwc4_buffers_ ] ; <nl> + encoder = [ command_buffer computeCommandEncoder ] ; <nl> } <nl> } <nl> <nl> - [ inference_context_ <nl> - encodeWithEncoder : encoder <nl> - inputOutputBuffers : bphwc4_buffers_ <nl> - encoderBlock : ^ ( bool isLast ) { <nl> - if ( external_command_encoder_ ! = nil | | <nl> - options_ . wait_type = = TFLGpuDelegateWaitType : : TFLGpuDelegateWaitTypePassive ) { <nl> - return encoder ; <nl> - } <nl> - if ( isLast ) { <nl> - if ( options_ . wait_type = = TFLGpuDelegateWaitType : : TFLGpuDelegateWaitTypeActive ) { <nl> - [ command_buffer addCompletedHandler : ^ ( id < MTLCommandBuffer > ) { <nl> - buffer_completed = true ; <nl> - } ] ; <nl> - } <nl> - } else { <nl> - [ encoder endEncoding ] ; <nl> - [ command_buffer commit ] ; <nl> - command_buffer = [ command_queue_ commandBuffer ] ; <nl> - encoder = [ command_buffer computeCommandEncoder ] ; <nl> - } <nl> - return encoder ; <nl> - } ] ; <nl> for ( const auto & output : graph_outputs_ ) { <nl> if ( output . set_externally ) continue ; <nl> if ( bphwc4_buffers_ [ output . id ] = = input_output_buffers_ [ output . id ] ) continue ; <nl> void SetCommandEncoder ( id < MTLComputeCommandEncoder > encoder ) { <nl> <nl> if ( external_command_encoder_ = = nil ) { <nl> [ encoder endEncoding ] ; <nl> + if ( options_ . wait_type = = TFLGpuDelegateWaitType : : TFLGpuDelegateWaitTypeActive ) { <nl> + [ command_buffer addCompletedHandler : ^ ( id < MTLCommandBuffer > ) { <nl> + buffer_completed = true ; <nl> + } ] ; <nl> + } <nl> [ command_buffer commit ] ; <nl> if ( options_ . wait_type = = TFLGpuDelegateWaitType : : TFLGpuDelegateWaitTypeActive ) { <nl> while ( ! buffer_completed ) { <nl> void SetCommandEncoder ( id < MTLComputeCommandEncoder > encoder ) { <nl> / / passive wait : this thread sleeps until GPU finishes . <nl> [ command_buffer waitUntilCompleted ] ; <nl> } else if ( options_ . wait_type = = TFLGpuDelegateWaitType : : TFLGpuDelegateWaitTypeAggressive ) { <nl> - command_buffer = [ command_queue_ commandBuffer ] ; <nl> - encoder = [ command_buffer computeCommandEncoder ] ; <nl> - [ encoder setComputePipelineState : signal_program_ ] ; <nl> - [ encoder setBuffer : signal_buffer_ offset : 0 atIndex : 0 ] ; <nl> + id < MTLCommandBuffer > signal_cb = [ command_queue_ commandBuffer ] ; <nl> + id < MTLComputeCommandEncoder > signal_encoder = [ signal_cb computeCommandEncoder ] ; <nl> + [ signal_encoder setComputePipelineState : signal_program_ ] ; <nl> + [ signal_encoder setBuffer : signal_buffer_ offset : 0 atIndex : 0 ] ; <nl> signal_value_ + + ; <nl> - [ encoder setBytes : & signal_value_ length : sizeof ( int ) atIndex : 1 ] ; <nl> - [ encoder dispatchThreadgroups : MTLSizeMake ( 1 , 1 , 1 ) <nl> + [ signal_encoder setBytes : & signal_value_ length : sizeof ( int ) atIndex : 1 ] ; <nl> + [ signal_encoder dispatchThreadgroups : MTLSizeMake ( 1 , 1 , 1 ) <nl> threadsPerThreadgroup : MTLSizeMake ( 1 , 1 , 1 ) ] ; <nl> - [ encoder endEncoding ] ; <nl> - [ command_buffer commit ] ; <nl> + [ signal_encoder endEncoding ] ; <nl> + [ signal_cb commit ] ; <nl> gpu_alarm_clock_ - > Start ( ) ; <nl> const int * signal_ptr = reinterpret_cast < const int * > ( [ signal_buffer_ contents ] ) ; <nl> while ( signal_ptr [ 0 ] ! = signal_value_ ) { <nl> bool TFLGpuDelegateBindMetalBufferToTensor ( TfLiteDelegate * delegate , int tensor_ <nl> <nl> / / Note : This function is not exposed in ` metal_delegate . h ` , but it ' s exposed in <nl> / / ` metal_delegate_internal . h ` . <nl> - bool TFLGpuDelegateSetCommandEncoder ( <nl> - TfLiteDelegate * delegate , id < MTLComputeCommandEncoder > encoder ) { <nl> - auto * metal_delegate = : : tflite : : gpu : : metal : : GetMetalDelegate ( delegate ) ; <nl> - if ( ! metal_delegate ) return false ; <nl> - metal_delegate - > SetCommandEncoder ( encoder ) ; <nl> - return true ; <nl> - } <nl> - <nl> bool TFLGpuDelegateSetCommandBuffer ( TfLiteDelegate * delegate , <nl> id < MTLCommandBuffer > command_buffer ) { <nl> auto * metal_delegate = : : tflite : : gpu : : metal : : GetMetalDelegate ( delegate ) ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal_delegate_internal . h <nl> ppp b / tensorflow / lite / delegates / gpu / metal_delegate_internal . h <nl> bool TFLGpuDelegateBindMetalBufferToTensor ( TfLiteDelegate * delegate , <nl> int tensor_index , <nl> id < MTLBuffer > metal_buffer ) ; <nl> <nl> - / / Binds user - defined MTLComputeCommandEncoder . The delegate puts all GPU tasks <nl> - / / into this encoder instead of the internal encoder . <nl> - bool TFLGpuDelegateSetCommandEncoder ( TfLiteDelegate * delegate , <nl> - id < MTLComputeCommandEncoder > encoder ) ; <nl> - <nl> / / Binds user - defined MTLCommandBuffer . The delegate puts all GPU tasks <nl> / / into this buffer instead of the internal command buffer . <nl> bool TFLGpuDelegateSetCommandBuffer ( TfLiteDelegate * delegate , <nl> mmm a / tensorflow / lite / g3doc / performance / post_training_quantization . md <nl> ppp b / tensorflow / lite / g3doc / performance / post_training_quantization . md <nl> You can get further latency improvements , reductions in peak memory usage , and <nl> compatibility with integer only hardware devices or accelerators by making sure <nl> all model math is integer quantized . <nl> <nl> - For full integer quantization , you need to measure the dynamic range of <nl> - activations and inputs by supplying sample input data to the converter . Refer to <nl> - the ` representative_dataset_gen ( ) ` function used in the following code . <nl> + For full integer quantization , you need to calibrate or estimate the range , i . e , <nl> + ( min , max ) of all floating - point tensors in the model . Unlike constant tensors <nl> + such as weights and biases , variable tensors such as model input , activations <nl> + ( outputs of intermediate layers ) and model output cannot be calibrated unless we <nl> + run a few inference cycles . As a result , the converter requires a representative <nl> + dataset to calibrate them . This dataset can be a small subset ( around ~ 100 - 500 <nl> + samples ) of the training or validation data . Refer to the <nl> + ` representative_dataset ( ) ` function below . <nl> + <nl> + < pre > <nl> + def representative_dataset ( ) : <nl> + for data in tf . data . Dataset . from_tensor_slices ( ( images ) ) . batch ( 1 ) . take ( 100 ) : <nl> + yield [ data . astype ( tf . float32 ) ] <nl> + < / pre > <nl> + <nl> + For testing purposes , you can use a dummy dataset as follows : <nl> + <nl> + < pre > <nl> + def representative_dataset ( ) : <nl> + for _ in range ( 100 ) : <nl> + data = np . random . rand ( 1 , 244 , 244 , 3 ) <nl> + yield [ data . astype ( np . float32 ) ] <nl> + < / pre > <nl> <nl> # # # # Integer with float fallback ( using default float input / output ) <nl> <nl> the following steps : <nl> import tensorflow as tf <nl> converter = tf . lite . TFLiteConverter . from_saved_model ( saved_model_dir ) <nl> < b > converter . optimizations = [ tf . lite . Optimize . DEFAULT ] <nl> - def representative_dataset_gen ( ) : <nl> - for _ in range ( num_calibration_steps ) : <nl> - # Get sample input data as a numpy array in a method of your choosing . <nl> - yield [ input ] <nl> - converter . representative_dataset = representative_dataset_gen < / b > <nl> + converter . representative_dataset = representative_dataset < / b > <nl> tflite_quant_model = converter . convert ( ) <nl> < / pre > <nl> <nl> the following steps : <nl> import tensorflow as tf <nl> converter = tf . lite . TFLiteConverter . from_saved_model ( saved_model_dir ) <nl> converter . optimizations = [ tf . lite . Optimize . DEFAULT ] <nl> - def representative_dataset_gen ( ) : <nl> - for _ in range ( num_calibration_steps ) : <nl> - # Get sample input data as a numpy array in a method of your choosing . <nl> - yield [ input ] <nl> - converter . representative_dataset = representative_dataset_gen <nl> + converter . representative_dataset = representative_dataset <nl> < b > converter . target_spec . supported_ops = [ tf . lite . OpsSet . TFLITE_BUILTINS_INT8 ] < / b > <nl> < b > converter . inference_input_type = tf . int8 < / b > # or tf . uint8 <nl> < b > converter . inference_output_type = tf . int8 < / b > # or tf . uint8 <nl> significantly , but only slightly increase model size . <nl> < pre > <nl> import tensorflow as tf <nl> converter = tf . lite . TFLiteConverter . from_saved_model ( saved_model_dir ) <nl> - def representative_dataset_gen ( ) : <nl> - for _ in range ( num_calibration_steps ) : <nl> - # Get sample input data as a numpy array in a method of your choosing . <nl> - yield [ input ] <nl> - converter . representative_dataset = representative_dataset_gen <nl> + converter . representative_dataset = representative_dataset <nl> < b > converter . optimizations = [ tf . lite . Optimize . DEFAULT ] <nl> converter . target_spec . supported_ops = [ tf . lite . OpsSet . EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 ] < / b > <nl> tflite_quant_model = converter . convert ( ) <nl> The following option should be added to the target_spec to allow this . <nl> < pre > <nl> import tensorflow as tf <nl> converter = tf . lite . TFLiteConverter . from_saved_model ( saved_model_dir ) <nl> - def representative_dataset_gen ( ) : <nl> - for _ in range ( num_calibration_steps ) : <nl> - # Get sample input data as a numpy array in a method of your choosing . <nl> - yield [ input ] <nl> - converter . representative_dataset = representative_dataset_gen <nl> + converter . representative_dataset = representative_dataset <nl> converter . optimizations = [ tf . lite . Optimize . DEFAULT ] <nl> converter . target_spec . supported_ops = [ tf . lite . OpsSet . EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 , <nl> < b > tf . lite . OpsSet . TFLITE_BUILTINS < / b > ] <nl> mmm a / tensorflow / lite / interpreter_builder . cc <nl> ppp b / tensorflow / lite / interpreter_builder . cc <nl> TFLITE_ATTRIBUTE_WEAK Interpreter : : TfLiteDelegatePtr AcquireFlexDelegate ( ) { <nl> # endif <nl> void * lib_tf_internal = <nl> SharedLibrary : : LoadLibrary ( filename_pywrap_tensorflow_internal ) ; <nl> + # if defined ( _WIN32 ) <nl> + if ( lib_tf_internal = = nullptr ) { <nl> + lib_tf_internal = SharedLibrary : : LoadLibrary ( <nl> + " _pywrap_tensorflow_interpreter_wrapper . pyd " ) ; <nl> + } <nl> + # endif <nl> if ( lib_tf_internal ) { <nl> acquire_flex_delegate_func = <nl> reinterpret_cast < Interpreter : : TfLiteDelegatePtr ( * ) ( ) > ( <nl> mmm a / tensorflow / lite / java / src / main / native / BUILD <nl> ppp b / tensorflow / lite / java / src / main / native / BUILD <nl> cc_library ( <nl> alwayslink = 1 , <nl> ) <nl> <nl> - # This includes all ops . If you want a smaller binary , you should copy and <nl> - # modify builtin_ops_jni . cc . You should then link your binary against both <nl> - # " : native_framework_only " and your own version of " : native_builtin_ops " . <nl> + # This includes all ops . If you want a smaller binary , you should use <nl> + # tflite_custom_cc_library or tflite_custom_android_library rules . <nl> cc_library ( <nl> name = " native " , <nl> - srcs = [ <nl> - " builtin_ops_jni . cc " , <nl> - ] , <nl> - hdrs = [ " op_resolver . h " ] , <nl> copts = tflite_copts ( ) , <nl> deps = [ <nl> " : native_framework_only " , <nl> + " / / tensorflow / lite : create_op_resolver_with_builtin_ops " , <nl> " / / tensorflow / lite : framework " , <nl> - " / / tensorflow / lite / core / api " , <nl> - " / / tensorflow / lite / kernels : builtin_ops " , <nl> ] , <nl> alwayslink = 1 , <nl> ) <nl> <nl> - # TODO ( b / 153652701 ) : Generate this target to give CreateOpResolver a custom namespace . <nl> - cc_library ( <nl> - name = " selected_ops_jni " , <nl> - srcs = [ " selected_ops_jni . cc " ] , <nl> - hdrs = [ " op_resolver . h " ] , <nl> - copts = tflite_copts ( ) , <nl> - deps = [ <nl> - " / / tensorflow / lite : framework " , <nl> - ] , <nl> - ) <nl> - <nl> exports_files ( <nl> [ <nl> " exported_symbols . lds " , <nl> " version_script . lds " , <nl> - " op_resolver . h " , <nl> ] , <nl> ) <nl> mmm a / tensorflow / lite / kernels / BUILD <nl> ppp b / tensorflow / lite / kernels / BUILD <nl> cc_test ( <nl> srcs = [ " numeric_verify_test . cc " ] , <nl> tags = [ " tflite_nnapi " ] , <nl> deps = [ <nl> + " : kernel_util " , <nl> " : test_main " , <nl> " : test_util " , <nl> " / / tensorflow / lite : framework " , <nl> + " / / tensorflow / lite / kernels / internal : reference " , <nl> " / / tensorflow / lite / kernels / internal : types " , <nl> " / / tensorflow / lite / schema : schema_fbs " , <nl> " / / third_party / eigen3 " , <nl> " @ com_google_absl / / absl / memory " , <nl> " @ com_google_googletest / / : gtest " , <nl> + " @ flatbuffers " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / lite / kernels / internal / optimized / optimized_ops . h <nl> ppp b / tensorflow / lite / kernels / internal / optimized / optimized_ops . h <nl> inline void Conv ( const ConvParams & params , const RuntimeShape & input_shape , <nl> gemm_input_data = im2col_data ; <nl> gemm_input_shape = & im2col_shape ; <nl> } else { <nl> - / / TODO ( aselle ) : We need to make sure to not send im2col if it is not <nl> - / / needed . <nl> TFLITE_DCHECK ( ! im2col_data ) ; <nl> gemm_input_data = input_data ; <nl> gemm_input_shape = & input_shape ; <nl> inline void Transpose2D ( const RuntimeShape & input_shape , <nl> } <nl> } <nl> <nl> - / / TODO ( alanchiao ) : see if we can reduce the number <nl> + / / TODO ( b / 173718660 ) : see if we can reduce the number <nl> / / of lines of code in branching without affecting latency . <nl> template < typename T > <nl> inline void Transpose3D ( const TransposeParams & params , <nl> mmm a / tensorflow / lite / kernels / numeric_verify . cc <nl> ppp b / tensorflow / lite / kernels / numeric_verify . cc <nl> limitations under the License . <nl> # include < numeric > <nl> # include < vector > <nl> <nl> + # include " flatbuffers / flexbuffers . h " / / from @ flatbuffers <nl> # include " tensorflow / lite / c / common . h " <nl> # include " tensorflow / lite / kernels / dequantize . h " <nl> # include " tensorflow / lite / kernels / internal / optimized / neon_check . h " <nl> namespace ops { <nl> namespace custom { <nl> namespace numeric_verify { <nl> <nl> + static constexpr const char kToleranceStr [ ] = " tolerance " ; <nl> + static constexpr const char kDebugModeStr [ ] = " debug_mode " ; <nl> + static constexpr const int kTemporaryDequantizedTensor = 0 ; <nl> + <nl> struct OpContext { <nl> OpContext ( TfLiteContext * context , TfLiteNode * node ) { <nl> input = GetInput ( context , node , 0 ) ; <nl> ref = GetInput ( context , node , 1 ) ; <nl> + output = GetOutput ( context , node , 0 ) ; <nl> } <nl> const TfLiteTensor * input ; <nl> const TfLiteTensor * ref ; <nl> + TfLiteTensor * output ; <nl> } ; <nl> <nl> const int kTensorNotAllocated = - 1 ; <nl> const int kTensorNotAllocated = - 1 ; <nl> struct OpData { <nl> / / The percentage of the tensor value range . Must be a number less than 1 . 0 . <nl> float tolerance ; <nl> - / / The abstract value allowed for the floating - point value difference . <nl> - float max_diff ; <nl> / / This boolean value is only used when the input tensor is constant . <nl> bool float_input_initialized ; <nl> int cache_tensor_id = kTensorNotAllocated ; <nl> + / / This boolean value is for controlling the behavior of numeric verify op . <nl> + bool debug_mode ; <nl> } ; <nl> <nl> void * Init ( TfLiteContext * context , const char * buffer , size_t length ) { <nl> auto * op_data = new OpData ( ) ; <nl> op_data - > float_input_initialized = false ; <nl> <nl> - / / Get the tolerance parameter from the buffer . Use flexbuffers asMap if there <nl> - / / multiple custom options . <nl> - const float * buffer_t = reinterpret_cast < const float * > ( buffer ) ; <nl> - op_data - > tolerance = * buffer_t ; <nl> + const uint8_t * buffer_t = reinterpret_cast < const uint8_t * > ( buffer ) ; <nl> + const flexbuffers : : Map & m = flexbuffers : : GetRoot ( buffer_t , length ) . AsMap ( ) ; <nl> + const float tolerance = m [ kToleranceStr ] . AsFloat ( ) ; <nl> + const bool debug_mode = m [ kDebugModeStr ] . AsBool ( ) ; <nl> + op_data - > tolerance = tolerance ; <nl> + op_data - > debug_mode = debug_mode ; <nl> <nl> return op_data ; <nl> } <nl> void Free ( TfLiteContext * context , void * buffer ) { <nl> <nl> TfLiteStatus Prepare ( TfLiteContext * context , TfLiteNode * node ) { <nl> TF_LITE_ENSURE_EQ ( context , NumInputs ( node ) , 2 ) ; <nl> - TF_LITE_ENSURE_EQ ( context , NumOutputs ( node ) , 0 ) ; <nl> OpData * op_data = reinterpret_cast < OpData * > ( node - > user_data ) ; <nl> <nl> OpContext op_context ( context , node ) ; <nl> <nl> + const int num_output = ( op_data - > debug_mode ) ? 1 : 0 ; <nl> + TF_LITE_ENSURE_EQ ( context , NumOutputs ( node ) , num_output ) ; <nl> + <nl> TF_LITE_ENSURE ( context , op_context . input - > type = = kTfLiteUInt8 | | <nl> op_context . input - > type = = kTfLiteInt8 | | <nl> op_context . input - > type = = kTfLiteInt16 | | <nl> op_context . input - > type = = kTfLiteFloat16 ) ; <nl> TF_LITE_ENSURE ( context , op_context . ref - > type = = kTfLiteFloat32 ) ; <nl> <nl> - op_data - > max_diff = op_data - > tolerance * op_context . input - > params . scale ; <nl> - switch ( op_context . input - > type ) { <nl> - case kTfLiteUInt8 : <nl> - case kTfLiteInt8 : <nl> - op_data - > max_diff * = ( 1 < < 8 ) ; <nl> - break ; <nl> - case kTfLiteInt16 : <nl> - op_data - > max_diff * = ( 1 < < 16 ) ; <nl> - break ; <nl> - default : <nl> - break ; <nl> - } <nl> - <nl> / / Allocate tensor to store the dequantized inputs . <nl> if ( op_data - > cache_tensor_id = = kTensorNotAllocated ) { <nl> TF_LITE_ENSURE_OK ( <nl> TfLiteStatus Prepare ( TfLiteContext * context , TfLiteNode * node ) { <nl> <nl> TfLiteTensor * dequantized ; <nl> TF_LITE_ENSURE_OK ( context , <nl> - GetTemporarySafe ( context , node , / * index = * / 0 , & dequantized ) ) ; <nl> + GetTemporarySafe ( context , node , kTemporaryDequantizedTensor , <nl> + & dequantized ) ) ; <nl> dequantized - > type = op_context . ref - > type ; <nl> dequantized - > allocation_type = kTfLiteDynamic ; <nl> <nl> TfLiteStatus Prepare ( TfLiteContext * context , TfLiteNode * node ) { <nl> context , dequantized , <nl> TfLiteIntArrayCopy ( op_context . input - > dims ) ) ) ; <nl> <nl> + if ( op_data - > debug_mode ) { <nl> + TF_LITE_ENSURE_OK ( context , GetOutputSafe ( context , node , num_output - 1 , <nl> + & op_context . output ) ) ; <nl> + op_context . output - > type = kTfLiteFloat32 ; <nl> + op_context . output - > allocation_type = kTfLiteArenaRwPersistent ; <nl> + return context - > ResizeTensor ( context , op_context . output , <nl> + TfLiteIntArrayCopy ( op_context . input - > dims ) ) ; <nl> + } <nl> return kTfLiteOk ; <nl> } <nl> <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> / / Dequantize the input <nl> TfLiteTensor * dequantized ; <nl> TF_LITE_ENSURE_OK ( context , <nl> - GetTemporarySafe ( context , node , / * index = * / 0 , & dequantized ) ) ; <nl> + GetTemporarySafe ( context , node , kTemporaryDequantizedTensor , <nl> + & dequantized ) ) ; <nl> auto status = builtin : : dequantize : : DequantizeImpl < kernel_type > ( <nl> context , node , op_context . input , dequantized ) ; <nl> if ( status ! = kTfLiteOk ) { <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> op_data - > float_input_initialized = true ; <nl> } <nl> <nl> - / / If the tolerance is very small , we only display the stats of the diff . <nl> - if ( op_data - > tolerance < 0 . 1 ) { <nl> + / / If the debug_mode is on , we don ' t throw any errors . <nl> + / / We just calculate difference between float and quantized values , letting <nl> + / / python debugger deal with the information . <nl> + if ( op_data - > debug_mode | | op_data - > tolerance < 0 . 1 ) { <nl> + const int num_output = ( op_data - > debug_mode ) ? 1 : 0 ; <nl> + const int n = NumElements ( dequantized ) ; <nl> + if ( op_data - > debug_mode ) { <nl> + TF_LITE_ENSURE_OK ( context , GetOutputSafe ( context , node , num_output - 1 , <nl> + & op_context . output ) ) ; <nl> + auto output_data = GetTensorData < float > ( op_context . output ) ; <nl> + for ( int i = 0 ; i < n ; + + i ) { <nl> + float dequant = GetTensorData < float > ( dequantized ) [ i ] ; <nl> + float reference = GetTensorData < float > ( op_context . ref ) [ i ] ; <nl> + output_data [ i ] = dequant - reference ; <nl> + } <nl> + } <nl> + / / These statistics logging was added to identify some errors in practice . <nl> std : : vector < double > diffs , temp ; <nl> - diffs . reserve ( NumElements ( dequantized ) ) ; <nl> - temp . reserve ( NumElements ( dequantized ) ) ; <nl> - for ( int i = 0 ; i < NumElements ( op_context . ref ) ; + + i ) { <nl> + diffs . reserve ( n ) ; <nl> + temp . reserve ( n ) ; <nl> + diffs . resize ( n ) ; <nl> + temp . resize ( n ) ; <nl> + for ( int i = 0 ; i < n ; + + i ) { <nl> float dequant = GetTensorData < float > ( dequantized ) [ i ] ; <nl> float reference = GetTensorData < float > ( op_context . ref ) [ i ] ; <nl> - diffs . push_back ( dequant - reference ) ; <nl> + diffs [ i ] = static_cast < double > ( dequant - reference ) ; <nl> } <nl> double mean = <nl> std : : accumulate ( diffs . begin ( ) , diffs . end ( ) , 0 . 0 ) / diffs . size ( ) ; <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> mean , max_diff , op_context . input - > params . scale , <nl> op_context . input - > params . zero_point ) ; <nl> return kTfLiteOk ; <nl> - } <nl> - <nl> - / / Verify the dequantized output . <nl> - auto max_diff = op_data - > tolerance * op_context . input - > params . scale ; <nl> - for ( int i = 0 ; i < NumElements ( op_context . ref ) ; + + i ) { <nl> - int32_t value = GetQuantizedValue ( op_context , i ) ; <nl> - float dequant = GetTensorData < float > ( dequantized ) [ i ] ; <nl> - float reference = GetTensorData < float > ( op_context . ref ) [ i ] ; <nl> - float diff = std : : abs ( reference - dequant ) ; <nl> - if ( diff > max_diff ) { <nl> - TF_LITE_KERNEL_LOG ( <nl> - context , <nl> - " Mismatch : % f is quantized to % d with ( % f , % d ) . " <nl> - " abs ( % f - % f ) = % f > % f ( tolerance ) range percentage % f . \ n " , <nl> - reference , value , op_context . input - > params . scale , <nl> - op_context . input - > params . zero_point , reference , dequant , diff , <nl> - max_diff , op_data - > tolerance ) ; <nl> - return kTfLiteError ; <nl> + } else { <nl> + / / Verify the dequantized output . <nl> + auto max_diff = op_data - > tolerance * op_context . input - > params . scale ; <nl> + for ( int i = 0 ; i < NumElements ( op_context . ref ) ; + + i ) { <nl> + int32_t value = GetQuantizedValue ( op_context , i ) ; <nl> + float dequant = GetTensorData < float > ( dequantized ) [ i ] ; <nl> + float reference = GetTensorData < float > ( op_context . ref ) [ i ] ; <nl> + float diff = std : : abs ( reference - dequant ) ; <nl> + if ( diff > max_diff ) { <nl> + TF_LITE_KERNEL_LOG ( <nl> + context , <nl> + " Mismatch : % f is quantized to % d with ( % f , % d ) . " <nl> + " abs ( % f - % f ) = % f > % f ( tolerance ) range percentage % f . \ n " , <nl> + reference , value , op_context . input - > params . scale , <nl> + op_context . input - > params . zero_point , reference , dequant , diff , <nl> + max_diff , op_data - > tolerance ) ; <nl> + return kTfLiteError ; <nl> + } <nl> } <nl> } <nl> return kTfLiteOk ; <nl> mmm a / tensorflow / lite / kernels / numeric_verify_test . cc <nl> ppp b / tensorflow / lite / kernels / numeric_verify_test . cc <nl> limitations under the License . <nl> # include < gtest / gtest . h > <nl> # include " absl / memory / memory . h " <nl> # include " third_party / eigen3 / Eigen / Core " <nl> + # include " flatbuffers / flexbuffers . h " / / from @ flatbuffers <nl> # include " tensorflow / lite / interpreter . h " <nl> + # include " tensorflow / lite / kernels / internal / tensor_ctypes . h " <nl> # include " tensorflow / lite / kernels / internal / types . h " <nl> + # include " tensorflow / lite / kernels / kernel_util . h " <nl> # include " tensorflow / lite / kernels / test_util . h " <nl> # include " tensorflow / lite / schema / schema_generated . h " <nl> <nl> class NumericVerifyOpModel : public SingleOpModel { <nl> public : <nl> NumericVerifyOpModel ( TensorType type , std : : initializer_list < int > shape , <nl> float scale , int32_t zero_point , int version , <nl> - float tolerance = 5 . 0 ) { <nl> + float tolerance = 5 . 0 , bool debug_mode = false ) { <nl> const TensorData input_tensor_data = { type , shape , 0 , 0 , scale , zero_point } ; <nl> input_ = AddInput ( input_tensor_data ) ; <nl> ref_ = AddInput ( { TensorType_FLOAT32 , shape } ) ; <nl> + if ( debug_mode ) { <nl> + / / The output tensor has the same shape with that of the input tensor . <nl> + output_ = AddOutput ( { TensorType_FLOAT32 , shape } ) ; <nl> + } <nl> <nl> std : : vector < uint8_t > custom_options ( sizeof ( float ) ) ; <nl> - memcpy ( custom_options . data ( ) , & tolerance , sizeof ( float ) ) ; <nl> <nl> - SetCustomOp ( " NUMERIC_VERIFY " , custom_options , <nl> + flexbuffers : : Builder fbb ; <nl> + fbb . Map ( [ & ] ( ) { <nl> + fbb . Float ( " tolerance " , tolerance ) ; <nl> + fbb . Bool ( " debug_mode " , debug_mode ) ; <nl> + } ) ; <nl> + fbb . Finish ( ) ; <nl> + <nl> + SetCustomOp ( " NUMERIC_VERIFY " , fbb . GetBuffer ( ) , <nl> ops : : custom : : Register_NUMERIC_VERIFY ) ; <nl> <nl> BuildInterpreter ( { GetShape ( input_ ) , GetShape ( ref_ ) } ) ; <nl> class NumericVerifyOpModel : public SingleOpModel { <nl> PopulateTensor ( ref_ , ref_data ) ; <nl> } <nl> <nl> + std : : vector < float > GetOutput ( ) { return ExtractVector < float > ( output_ ) ; } <nl> + <nl> private : <nl> int input_ ; <nl> int ref_ ; <nl> + int output_ ; <nl> } ; <nl> <nl> TEST ( NumericVerifyOpTest , Uint8 ) { <nl> TEST ( NumericVerifyOpFailedTest , Int8 ) { <nl> EXPECT_EQ ( m . InvokeUnchecked ( ) , kTfLiteError ) ; <nl> } <nl> <nl> + TEST ( NumericVerifyOpDebugModeTest , Int8 ) { <nl> + / / [ - 63 . 5 , 64 ] - > scale = 0 . 5 , zero_point = 1 for INT8 <nl> + NumericVerifyOpModel m ( TensorType_INT8 , { 2 , 5 } , 0 . 5 , - 1 , 2 , 5 . 0 , true ) ; <nl> + <nl> + / / The 5th element is set to 0 . <nl> + m . SetInputs < int8_t > ( { - 128 , - 127 , - 126 , - 125 , - 124 , 0 , 124 , 125 , 126 , 127 } , <nl> + { - 63 . 5 , - 63 , - 62 . 5 , - 62 , - 61 . 5 , 62 , 62 . 5 , 63 , 63 . 5 , 64 } ) ; <nl> + EXPECT_EQ ( m . InvokeUnchecked ( ) , kTfLiteOk ) ; <nl> + / / The 5th element has discrepancy - 61 . 5 ( = dequantized - reference = 0 - ( 61 . 5 ) ) . <nl> + EXPECT_THAT ( <nl> + m . GetOutput ( ) , <nl> + ElementsAreArray ( ArrayFloatNear ( { 0 , 0 , 0 , 0 , 0 , - 61 . 5 , 0 , 0 , 0 , 0 } ) ) ) ; <nl> + } <nl> } / / namespace <nl> } / / namespace tflite <nl> mmm a / tensorflow / lite / kernels / register . cc <nl> ppp b / tensorflow / lite / kernels / register . cc <nl> BuiltinOpResolver : : BuiltinOpResolver ( ) { <nl> AddBuiltin ( BuiltinOperator_SPACE_TO_DEPTH , Register_SPACE_TO_DEPTH ( ) , <nl> / * min_version = * / 1 , <nl> / * max_version = * / 2 ) ; <nl> - AddBuiltin ( BuiltinOperator_DEPTH_TO_SPACE , Register_DEPTH_TO_SPACE ( ) ) ; <nl> + AddBuiltin ( BuiltinOperator_DEPTH_TO_SPACE , Register_DEPTH_TO_SPACE ( ) , <nl> + / * min_version = * / 1 , <nl> + / * max_version = * / 2 ) ; <nl> AddBuiltin ( BuiltinOperator_GATHER , Register_GATHER ( ) , <nl> / * min_version = * / 1 , <nl> / * max_version = * / 4 ) ; <nl> mmm a / tensorflow / lite / micro / kernels / quantize_test . cc <nl> ppp b / tensorflow / lite / micro / kernels / quantize_test . cc <nl> TF_LITE_MICRO_TEST ( QuantizeOpTestInt16toInt32 ) { <nl> output_zero_point , output_quantized ) ; <nl> } <nl> <nl> + TF_LITE_MICRO_TEST ( QuantizeOpTestInt16toInt8 ) { <nl> + constexpr int length = 10 ; <nl> + const int dims [ ] = { 2 , 2 , 5 } ; <nl> + const float values [ ] = { - 32 , - 31 , - 30 , - 29 , - 28 , 27 , 28 , 29 , 30 , 31 } ; <nl> + / / TODO ( b / 155682734 ) : Input scale must be smaller than output scale for <nl> + / / xtensa . <nl> + const float input_scale = 0 . 4f ; <nl> + const int input_zero_point = 0 ; <nl> + const float output_scale = 1 . 0f ; <nl> + const int output_zero_point = 0 ; <nl> + int8_t output_quantized [ length ] ; <nl> + int8_t values_quantized [ length ] ; <nl> + int16_t input_quantized [ length ] ; <nl> + tflite : : testing : : TestRequantize ( dims , values , input_quantized , input_scale , <nl> + input_zero_point , dims , values , <nl> + values_quantized , output_scale , <nl> + output_zero_point , output_quantized ) ; <nl> + } <nl> + <nl> TF_LITE_MICRO_TESTS_END <nl> mmm a / tensorflow / lite / micro / tools / ci_build / test_all . sh <nl> ppp b / tensorflow / lite / micro / tools / ci_build / test_all . sh <nl> ROOT_DIR = $ { SCRIPT_DIR } / . . / . . / . . / . . / . . <nl> cd " $ { ROOT_DIR } " <nl> pwd <nl> <nl> - make - f tensorflow / lite / micro / tools / make / Makefile \ <nl> - clean clean_downloads <nl> + make - f tensorflow / lite / micro / tools / make / Makefile clean_downloads DISABLE_DOWNLOADS = true <nl> + <nl> + <nl> + make - f tensorflow / lite / micro / tools / make / Makefile TAGS = cmsis - nn clean DISABLE_DOWNLOADS = true <nl> + if [ - d tensorflow / lite / micro / tools / make / downloads ] ; then <nl> + echo " ERROR : Downloads directory should not exist , but it does . " <nl> + exit 1 <nl> + fi <nl> + <nl> <nl> # We are moving away from having the downloads and installations be part of the <nl> # Makefile . As a result , we need to manually add the downloads in this script . <nl> mmm a / tensorflow / lite / micro / tools / make / Makefile <nl> ppp b / tensorflow / lite / micro / tools / make / Makefile <nl> ALL_PROJECT_TARGETS : = <nl> ARDUINO_LIBRARY_TARGETS : = <nl> ARDUINO_LIBRARY_ZIPS : = <nl> <nl> - # The download scripts require that the downloads directory already exist for <nl> - # improved error checking . To accomodate that , we first create a downloads <nl> - # directory . <nl> - $ ( shell mkdir - p $ { MAKEFILE_DIR } / downloads ) <nl> - <nl> - # Directly download the flatbuffers library . <nl> - DOWNLOAD_RESULT : = $ ( shell $ ( MAKEFILE_DIR ) / flatbuffers_download . sh $ { MAKEFILE_DIR } / downloads ) <nl> - ifneq ( $ ( DOWNLOAD_RESULT ) , SUCCESS ) <nl> - $ ( error Something went wrong with the flatbuffers download : $ ( DOWNLOAD_RESULT ) ) <nl> - endif <nl> + # For some invocations of the makefile , it is useful to avoid downloads . This <nl> + # can be achieved by explicitly passing in DISABLE_DOWNLOADS = true on the command <nl> + # line . Note that for target - specific downloads ( e . g . CMSIS ) there will need to <nl> + # be corresponding checking in the respecitve included makefiles ( e . g . <nl> + # ext_libs / cmsis_nn . inc ) <nl> + DISABLE_DOWNLOADS : = <nl> + <nl> + ifneq ( $ ( DISABLE_DOWNLOADS ) , true ) <nl> + # The download scripts require that the downloads directory already exist for <nl> + # improved error checking . To accomodate that , we first create a downloads <nl> + # directory . <nl> + $ ( shell mkdir - p $ { MAKEFILE_DIR } / downloads ) <nl> + <nl> + # Directly download the flatbuffers library . <nl> + DOWNLOAD_RESULT : = $ ( shell $ ( MAKEFILE_DIR ) / flatbuffers_download . sh $ { MAKEFILE_DIR } / downloads ) <nl> + ifneq ( $ ( DOWNLOAD_RESULT ) , SUCCESS ) <nl> + $ ( error Something went wrong with the flatbuffers download : $ ( DOWNLOAD_RESULT ) ) <nl> + endif <nl> <nl> - include $ ( MAKEFILE_DIR ) / third_party_downloads . inc <nl> - THIRD_PARTY_DOWNLOADS : = <nl> - $ ( eval $ ( call add_third_party_download , $ ( GEMMLOWP_URL ) , $ ( GEMMLOWP_MD5 ) , gemmlowp , ) ) <nl> - $ ( eval $ ( call add_third_party_download , $ ( RUY_URL ) , $ ( RUY_MD5 ) , ruy , ) ) <nl> - $ ( eval $ ( call add_third_party_download , $ ( PERSON_MODEL_URL ) , $ ( PERSON_MODEL_MD5 ) , person_model_grayscale , ) ) <nl> - $ ( eval $ ( call add_third_party_download , $ ( PERSON_MODEL_INT8_URL ) , $ ( PERSON_MODEL_INT8_MD5 ) , person_model_int8 , ) ) <nl> + include $ ( MAKEFILE_DIR ) / third_party_downloads . inc <nl> + THIRD_PARTY_DOWNLOADS : = <nl> + $ ( eval $ ( call add_third_party_download , $ ( GEMMLOWP_URL ) , $ ( GEMMLOWP_MD5 ) , gemmlowp , ) ) <nl> + $ ( eval $ ( call add_third_party_download , $ ( RUY_URL ) , $ ( RUY_MD5 ) , ruy , ) ) <nl> + $ ( eval $ ( call add_third_party_download , $ ( PERSON_MODEL_URL ) , $ ( PERSON_MODEL_MD5 ) , person_model_grayscale , ) ) <nl> + $ ( eval $ ( call add_third_party_download , $ ( PERSON_MODEL_INT8_URL ) , $ ( PERSON_MODEL_INT8_MD5 ) , person_model_int8 , ) ) <nl> + endif <nl> <nl> # The target - specific makefile must have a name that is exactly <nl> # TARGET_makefile . inc and is only needed for cross - compilation ( i . e . when TARGET <nl> mmm a / tensorflow / lite / micro / tools / make / ext_libs / cmsis_nn . inc <nl> ppp b / tensorflow / lite / micro / tools / make / ext_libs / cmsis_nn . inc <nl> ifneq ( $ ( filter cmsis - nn , $ ( ALL_TAGS ) ) , ) <nl> # CMSIS - NN optimizations not supported <nl> endif <nl> <nl> - # Setup CMSIS - NN lib and add required header files to microlite lib INCLUDE . <nl> - # Unless an external path is provided we force a download during the first phase of make so <nl> - # that the files exist prior to the call to recursive_find below . add_third_party_download <nl> - # prevents the use of wildcards and recursive_find in selecting which files to add to THIRD_PARTY_SRCS . <nl> - CMSIS_DEFAULT_DOWNLOAD_PATH : = $ ( MAKEFILE_DIR ) / downloads / cmsis <nl> - CMSIS_PATH : = $ ( CMSIS_DEFAULT_DOWNLOAD_PATH ) <nl> - ifeq ( $ ( CMSIS_PATH ) , $ ( CMSIS_DEFAULT_DOWNLOAD_PATH ) ) <nl> - $ ( call $ ( or $ ( shell $ ( DOWNLOAD_SCRIPT ) $ ( CMSIS_URL ) $ ( CMSIS_MD5 ) $ ( CMSIS_PATH ) > & 2 & & echo SUCCESS ) , $ ( error $ ( DOWNLOAD_SCRIPT ) failed ) ) ) <nl> + ifneq ( $ ( DISABLE_DOWNLOADS ) , true ) <nl> + # Setup CMSIS - NN lib and add required header files to microlite lib INCLUDE . <nl> + # Unless an external path is provided we force a download during the first phase of make so <nl> + # that the files exist prior to the call to recursive_find below . add_third_party_download <nl> + # prevents the use of wildcards and recursive_find in selecting which files to add to THIRD_PARTY_SRCS . <nl> + CMSIS_DEFAULT_DOWNLOAD_PATH : = $ ( MAKEFILE_DIR ) / downloads / cmsis <nl> + CMSIS_PATH : = $ ( CMSIS_DEFAULT_DOWNLOAD_PATH ) <nl> + ifeq ( $ ( CMSIS_PATH ) , $ ( CMSIS_DEFAULT_DOWNLOAD_PATH ) ) <nl> + $ ( call $ ( or $ ( shell $ ( DOWNLOAD_SCRIPT ) $ ( CMSIS_URL ) $ ( CMSIS_MD5 ) $ ( CMSIS_PATH ) > & 2 & & echo SUCCESS ) , $ ( error $ ( DOWNLOAD_SCRIPT ) failed ) ) ) <nl> + endif <nl> endif <nl> <nl> THIRD_PARTY_CC_SRCS + = \ <nl> mmm a / tensorflow / lite / micro / tools / make / ext_libs / xtensa . inc <nl> ppp b / tensorflow / lite / micro / tools / make / ext_libs / xtensa . inc <nl> <nl> ifeq ( $ ( TARGET_ARCH ) , hifi4 ) <nl> <nl> - DOWNLOAD_RESULT : = $ ( shell $ ( MAKEFILE_DIR ) / ext_libs / xtensa_download . sh $ { MAKEFILE_DIR } / downloads ) <nl> + DOWNLOAD_RESULT : = $ ( shell $ ( MAKEFILE_DIR ) / ext_libs / xtensa_download . sh $ { MAKEFILE_DIR } / downloads hifi4 ) <nl> ifneq ( $ ( DOWNLOAD_RESULT ) , SUCCESS ) <nl> $ ( error Something went wrong with the xtensa download : $ ( DOWNLOAD_RESULT ) ) <nl> endif <nl> mmm a / tensorflow / lite / micro / tools / make / ext_libs / xtensa_download . sh <nl> ppp b / tensorflow / lite / micro / tools / make / ext_libs / xtensa_download . sh <nl> <nl> # Called with four arguments : <nl> # 1 - Path to the downloads folder which is typically <nl> # tensorflow / lite / micro / tools / make / downloads <nl> + # 2 - Xtensa variant to download for ( e . g . hifi4 ) <nl> # <nl> # This script is called from the Makefile and uses the following convention to <nl> # enable determination of sucess / failure : <nl> if [ ! - d $ { DOWNLOADS_DIR } ] ; then <nl> exit 1 <nl> fi <nl> <nl> - # Name of the xa_nnlib directory once it is unzipped . <nl> - HIFI4_XA_NNLIB_DIRNAME = " xa_nnlib_hifi4 " <nl> - <nl> - HIFI4_PATH = $ { DOWNLOADS_DIR } / $ { HIFI4_XA_NNLIB_DIRNAME } <nl> - if [ - d $ { HIFI4_PATH } ] ; then <nl> - echo > & 2 " $ { HIFI4_PATH } already exists , skipping the download . " <nl> + if [ [ $ { 2 } = = " hifi4 " ] ] ; then <nl> + LIBRARY_URL = " http : / / mirror . tensorflow . org / github . com / foss - xtensa / nnlib - hifi4 / raw / master / archive / xa_nnlib_06_27 . zip " <nl> + LIBRARY_DIRNAME = " xa_nnlib_hifi4 " <nl> + LIBRARY_MD5 = " 45fdc1209a8da62ab568aa6040f7eabf " <nl> else <nl> + echo " Attempting to download an unsupported xtensa variant : $ { 2 } " <nl> + exit 1 <nl> + fi <nl> <nl> - ZIP_ARCHIVE_NAME = " xa_nnlib_06_27 . zip " <nl> - HIFI4_URL = " http : / / mirror . tensorflow . org / github . com / foss - xtensa / nnlib - hifi4 / raw / master / archive / $ { ZIP_ARCHIVE_NAME } " <nl> - HIFI4_MD5 = " 45fdc1209a8da62ab568aa6040f7eabf " <nl> + LIBRARY_INSTALL_PATH = $ { DOWNLOADS_DIR } / $ { LIBRARY_DIRNAME } <nl> <nl> - wget $ { HIFI4_URL } - O / tmp / $ { ZIP_ARCHIVE_NAME } > & 2 <nl> - MD5 = ` md5sum / tmp / $ { ZIP_ARCHIVE_NAME } | awk ' { print $ 1 } ' ` <nl> + if [ - d $ { LIBRARY_INSTALL_PATH } ] ; then <nl> + echo > & 2 " $ { LIBRARY_INSTALL_PATH } already exists , skipping the download . " <nl> + else <nl> + TMP_ZIP_ARCHIVE_NAME = " $ { LIBRARY_DIRNAME } . zip " <nl> + wget $ { LIBRARY_URL } - O / tmp / $ { TMP_ZIP_ARCHIVE_NAME } > & 2 <nl> + MD5 = ` md5sum / tmp / $ { TMP_ZIP_ARCHIVE_NAME } | awk ' { print $ 1 } ' ` <nl> <nl> - if [ [ $ { MD5 } ! = $ { HIFI4_MD5 } ] ] <nl> + if [ [ $ { MD5 } ! = $ { LIBRARY_MD5 } ] ] <nl> then <nl> - echo " Bad checksum . Expected : $ { HIFI4_MD5 } , Got : $ { MD5 } " <nl> + echo " Bad checksum . Expected : $ { LIBRARY_MD5 } , Got : $ { MD5 } " <nl> exit 1 <nl> fi <nl> <nl> - unzip - qo / tmp / $ { ZIP_ARCHIVE_NAME } - d $ { DOWNLOADS_DIR } > & 2 <nl> + unzip - qo / tmp / $ { TMP_ZIP_ARCHIVE_NAME } - d $ { DOWNLOADS_DIR } > & 2 <nl> fi <nl> <nl> echo " SUCCESS " <nl> mmm a / tensorflow / lite / micro / tools / make / flatbuffers_download . sh <nl> ppp b / tensorflow / lite / micro / tools / make / flatbuffers_download . sh <nl> function patch_to_avoid_strtod ( ) { <nl> mv $ { temp_flexbuffers_path } $ { input_flexbuffers_path } <nl> } <nl> <nl> + # The BUILD files in the downloaded folder result in an error with : <nl> + # bazel build tensorflow / lite / micro / . . . <nl> + # <nl> + # Parameters : <nl> + # $ 1 - path to the downloaded flatbuffers code . <nl> + function delete_build_files ( ) { <nl> + rm - f ` find $ { 1 } - name BUILD ` <nl> + } <nl> + <nl> DOWNLOADED_FLATBUFFERS_PATH = $ { DOWNLOADS_DIR } / flatbuffers <nl> <nl> if [ - d $ { DOWNLOADED_FLATBUFFERS_PATH } ] ; then <nl> else <nl> mv / tmp / flatbuffers - $ { ZIP_PREFIX } $ { DOWNLOADED_FLATBUFFERS_PATH } <nl> <nl> patch_to_avoid_strtod $ { DOWNLOADED_FLATBUFFERS_PATH } / include / flatbuffers / flexbuffers . h <nl> + delete_build_files $ { DOWNLOADED_FLATBUFFERS_PATH } <nl> + <nl> fi <nl> <nl> echo " SUCCESS " <nl> mmm a / tensorflow / lite / shared_library . h <nl> ppp b / tensorflow / lite / shared_library . h <nl> class SharedLibrary { <nl> return reinterpret_cast < void * > ( <nl> GetProcAddress ( static_cast < HMODULE > ( handle ) , symbol ) ) ; <nl> } <nl> + / / Warning : Unlike dlsym ( RTLD_DEFAULT ) , it doesn ' t search the symbol from <nl> + / / dependent DLLs . <nl> static inline void * GetSymbol ( const char * symbol ) { <nl> return reinterpret_cast < void * > ( GetProcAddress ( nullptr , symbol ) ) ; <nl> } <nl> mmm a / tensorflow / lite / testing / op_tests / depth_to_space . py <nl> ppp b / tensorflow / lite / testing / op_tests / depth_to_space . py <nl> def make_depth_to_space_tests ( options ) : <nl> " " " Make a set of tests to do depth_to_space . " " " <nl> <nl> test_parameters = [ { <nl> - " dtype " : [ tf . float32 , tf . int32 , tf . uint8 , tf . int64 ] , <nl> + " dtype " : [ tf . int32 , tf . uint8 , tf . int64 ] , <nl> " input_shape " : [ [ 2 , 3 , 4 , 16 ] ] , <nl> " block_size " : [ 2 , 4 ] , <nl> + " fully_quantize " : [ False ] , <nl> + } , { <nl> + " dtype " : [ tf . float32 ] , <nl> + " input_shape " : [ [ 2 , 3 , 4 , 16 ] ] , <nl> + " block_size " : [ 2 , 4 ] , <nl> + " fully_quantize " : [ True , False ] , <nl> } ] <nl> <nl> def build_graph ( parameters ) : <nl> def build_graph ( parameters ) : <nl> return [ input_tensor ] , [ out ] <nl> <nl> def build_inputs ( parameters , sess , inputs , outputs ) : <nl> - input_values = create_tensor_data ( parameters [ " dtype " ] , <nl> - parameters [ " input_shape " ] ) <nl> + if not parameters [ " fully_quantize " ] : <nl> + input_values = create_tensor_data ( parameters [ " dtype " ] , <nl> + parameters [ " input_shape " ] ) <nl> + else : <nl> + input_values = create_tensor_data ( <nl> + parameters [ " dtype " ] , <nl> + parameters [ " input_shape " ] , <nl> + min_value = - 1 , <nl> + max_value = 1 ) <nl> return [ input_values ] , sess . run ( <nl> outputs , feed_dict = dict ( zip ( inputs , [ input_values ] ) ) ) <nl> <nl> mmm a / tensorflow / lite / testing / selective_build_test . cc <nl> ppp b / tensorflow / lite / testing / selective_build_test . cc <nl> limitations under the License . <nl> # include < gtest / gtest . h > <nl> # include " tensorflow / core / platform / logging . h " <nl> # include " tensorflow / lite / c / common . h " <nl> + # include " tensorflow / lite / create_op_resolver . h " <nl> # include " tensorflow / lite / interpreter . h " <nl> - # include " tensorflow / lite / java / src / main / native / op_resolver . h " <nl> # include " tensorflow / lite / model . h " <nl> # include " tensorflow / lite / model_builder . h " <nl> <nl> mmm a / tensorflow / lite / tools / make / Makefile <nl> ppp b / tensorflow / lite / tools / make / Makefile <nl> $ ( wildcard tensorflow / lite / * / * / * / * / * / * tool . cc ) \ <nl> $ ( wildcard tensorflow / lite / kernels / * test_main . cc ) \ <nl> $ ( wildcard tensorflow / lite / kernels / * test_util * . cc ) \ <nl> $ ( wildcard tensorflow / lite / tools / make / downloads / cpuinfo / src / * / mock * . c ) \ <nl> + tensorflow / lite / create_op_resolver_with_selected_ops . cc \ <nl> tensorflow / lite / tflite_with_xnnpack . cc \ <nl> $ ( MINIMAL_SRCS ) <nl> <nl> mmm a / tensorflow / lite / tools / optimize / operator_property . cc <nl> ppp b / tensorflow / lite / tools / optimize / operator_property . cc <nl> OperatorProperty GetOperatorProperty ( OpVariant op_variant ) { <nl> property . version = 2 ; <nl> property . quantizable_int16 = false ; <nl> break ; <nl> + case BuiltinOperator_DEPTH_TO_SPACE : <nl> + property . inputs = { { 0 , { } } } ; <nl> + property . outputs = { { 0 , { } } } ; <nl> + property . restrict_same_input_output_scale = true ; <nl> + property . version = 2 ; <nl> + property . quantizable_int16 = false ; <nl> + break ; <nl> case BuiltinOperator_SPLIT : <nl> / / We skip input 0 since it is the split dim which is not real valued . <nl> property . inputs = { { 1 , { } } } ; <nl> mmm a / tensorflow / lite / tools / versioning / op_version . cc <nl> ppp b / tensorflow / lite / tools / versioning / op_version . cc <nl> int GetBuiltinOperatorVersion ( const OpSignature & op_sig ) { <nl> case BuiltinOperator_SELECT : <nl> case BuiltinOperator_RSQRT : <nl> case BuiltinOperator_SQUARED_DIFFERENCE : <nl> - if ( op_sig . input_types . at ( 0 ) = = TensorType_INT8 ) { <nl> - return 2 ; <nl> - } <nl> - return 1 ; <nl> + case BuiltinOperator_DEPTH_TO_SPACE : <nl> case BuiltinOperator_MIRROR_PAD : <nl> if ( op_sig . input_types . at ( 0 ) = = TensorType_INT8 ) { <nl> return 2 ; <nl> mmm a / tensorflow / lite / tools / versioning / runtime_version . cc <nl> ppp b / tensorflow / lite / tools / versioning / runtime_version . cc <nl> std : : string FindMinimumRuntimeVersionForOp ( tflite : : BuiltinOperator op_code , <nl> { { BuiltinOperator_CONCATENATION , 2 } , " 1 . 14 . 0 " } , <nl> { { BuiltinOperator_CONCATENATION , 3 } , " 2 . 3 . 0 " } , <nl> { { BuiltinOperator_DEPTH_TO_SPACE , 1 } , " 2 . 1 . 0 " } , <nl> + { { BuiltinOperator_DEPTH_TO_SPACE , 2 } , kPendingReleaseVersion } , <nl> { { BuiltinOperator_EMBEDDING_LOOKUP , 1 } , " 1 . 13 . 0 " } , <nl> { { BuiltinOperator_EMBEDDING_LOOKUP , 2 } , " 1 . 14 . 0 " } , <nl> { { BuiltinOperator_EMBEDDING_LOOKUP , 3 } , " 1 . 14 . 0 " } , <nl> mmm a / tensorflow / python / BUILD <nl> ppp b / tensorflow / python / BUILD <nl> cc_library ( <nl> ] , <nl> ) <nl> <nl> + # bfloat16_lib is shared with JAX , and must not depend on any other parts of <nl> + # TensorFlow . <nl> + # TODO ( phawkins ) : move bfloat16 into its own pip package . <nl> cc_library ( <nl> name = " bfloat16_lib " , <nl> srcs = [ " lib / core / bfloat16 . cc " ] , <nl> hdrs = [ " lib / core / bfloat16 . h " ] , <nl> deps = [ <nl> " : numpy_lib " , <nl> - " : safe_ptr " , <nl> - " / / tensorflow / core : framework " , <nl> - " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core / platform : logging " , <nl> + " / / third_party / eigen3 " , <nl> " / / third_party / python_runtime : headers " , <nl> + " @ com_google_absl / / absl / strings " , <nl> ] , <nl> ) <nl> <nl> tf_python_pybind_extension ( <nl> hdrs = [ <nl> " / / tensorflow / c : headers " , <nl> " / / tensorflow / c / eager : headers " , <nl> + # Using header directly is required to avoid ODR violations . <nl> + " util / stack_trace . h " , <nl> ] , <nl> # TODO ( b / 138203821 ) : change to " util . _tf_stack " once the bug is fixed . <nl> module_name = " _tf_stack " , <nl> deps = [ <nl> - " : stack_trace " , <nl> - " / / tensorflow / c : pywrap_required_hdrs " , <nl> - " / / tensorflow / core / common_runtime : core_cpu_headers_lib " , <nl> - " / / tensorflow / core / framework : pywrap_required_hdrs " , <nl> - " / / tensorflow / core / platform : path " , <nl> - " / / third_party / python_runtime : headers " , # buildcleaner : keep <nl> " @ com_google_absl / / absl / algorithm : container " , <nl> - " @ com_google_absl / / absl / container : flat_hash_map " , <nl> - " @ com_google_absl / / absl / container : flat_hash_set " , <nl> - " @ com_google_absl / / absl / hash " , <nl> " @ com_google_absl / / absl / strings " , <nl> " @ com_google_absl / / absl / strings : str_format " , <nl> " @ com_google_absl / / absl / types : span " , <nl> " @ pybind11 " , <nl> - ] , <nl> + " / / third_party / python_runtime : headers " , # buildcleaner : keep <nl> + " / / tensorflow / c : pywrap_required_hdrs " , <nl> + " / / tensorflow / core / common_runtime : core_cpu_headers_lib " , <nl> + " / / tensorflow / core / framework : pywrap_required_hdrs " , <nl> + " / / tensorflow / core / platform : path " , <nl> + ] + if_static ( [ <nl> + " : stack_trace " , <nl> + ] ) , <nl> ) <nl> <nl> tf_py_test ( <nl> mmm a / tensorflow / python / __init__ . py <nl> ppp b / tensorflow / python / __init__ . py <nl> <nl> <nl> # go / tf - wildcard - import <nl> # pylint : disable = wildcard - import , g - bad - import - order , g - import - not - at - top <nl> - from tensorflow . python import pywrap_tensorflow as _pywrap_tensorflow <nl> <nl> from tensorflow . python . eager import context <nl> + from tensorflow . python import pywrap_tensorflow as _pywrap_tensorflow <nl> <nl> # pylint : enable = wildcard - import <nl> <nl> mmm a / tensorflow / python / autograph / impl / conversion . py <nl> ppp b / tensorflow / python / autograph / impl / conversion . py <nl> def is_allowlisted ( <nl> # The check for __code__ below is because isgeneratorfunction crashes <nl> # without one . <nl> if hasattr ( o , ' __code__ ' ) and tf_inspect . isgeneratorfunction ( o ) : <nl> - logging . warn ( <nl> - ' Entity % s appears to be a generator function . It will not be converted ' <nl> - ' by AutoGraph . ' , o ) <nl> logging . log ( 2 , ' Allowlisted : % s : generator functions are not converted ' , o ) <nl> return True <nl> <nl> mmm a / tensorflow / python / client / tf_session_wrapper . cc <nl> ppp b / tensorflow / python / client / tf_session_wrapper . cc <nl> PYBIND11_MODULE ( _pywrap_tf_session , m ) { <nl> } , <nl> py : : return_value_policy : : reference ) ; <nl> <nl> + m . def ( <nl> + " TF_LoadPluggableDeviceLibrary " , <nl> + [ ] ( const char * library_filename ) { <nl> + tensorflow : : Safe_TF_StatusPtr status = <nl> + tensorflow : : make_safe ( TF_NewStatus ( ) ) ; <nl> + auto output = <nl> + TF_LoadPluggableDeviceLibrary ( library_filename , status . get ( ) ) ; <nl> + tensorflow : : MaybeRaiseRegisteredFromTFStatus ( status . get ( ) ) ; <nl> + return output ; <nl> + } , <nl> + py : : return_value_policy : : reference ) ; <nl> + <nl> m . def ( " TF_GetOpList " , [ ] ( TF_Library * lib_handle ) { <nl> TF_Buffer output_buffer = TF_GetOpList ( lib_handle ) ; <nl> return tensorflow : : PyoOrThrow ( PyBytes_FromStringAndSize ( <nl> PYBIND11_MODULE ( _pywrap_tf_session , m ) { <nl> <nl> m . def ( " TF_DeleteLibraryHandle " , TF_DeleteLibraryHandle , <nl> py : : call_guard < py : : gil_scoped_release > ( ) ) ; <nl> + <nl> + m . def ( " TF_PluggableDeviceLibraryHandle " , <nl> + TF_DeletePluggableDeviceLibraryHandle , <nl> + py : : call_guard < py : : gil_scoped_release > ( ) ) ; <nl> + <nl> m . def ( " TF_AddControlInput " , TF_AddControlInput ) ; <nl> m . def ( <nl> " TF_AddInputList " , [ ] ( TF_OperationDescription * desc , py : : handle & inputs ) { <nl> mmm a / tensorflow / python / compat / compat . py <nl> ppp b / tensorflow / python / compat / compat . py <nl> <nl> # This value changes every day with an automatic CL . It can be modified in code <nl> # via ` forward_compatibility_horizon ( ) ` or with the environment variable <nl> # TF_FORWARD_COMPATIBILITY_DELTA_DAYS , which is added to the compatibility date . <nl> - _FORWARD_COMPATIBILITY_HORIZON = datetime . date ( 2020 , 12 , 7 ) <nl> + _FORWARD_COMPATIBILITY_HORIZON = datetime . date ( 2020 , 12 , 8 ) <nl> _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = " TF_FORWARD_COMPATIBILITY_DELTA_DAYS " <nl> _FORWARD_COMPATIBILITY_DATE_NUMBER = None <nl> <nl> mmm a / tensorflow / python / eager / context . py <nl> ppp b / tensorflow / python / eager / context . py <nl> def invoking_op_callbacks ( self ) : <nl> def invoking_op_callbacks ( self , value ) : <nl> self . _thread_local_data . invoking_op_callbacks = value <nl> <nl> - def _initialize_physical_devices ( self ) : <nl> - " " " Get local devices visible to the system . " " " <nl> + def _initialize_physical_devices ( self , reinitialize = False ) : <nl> + " " " Gets local devices visible to the system . <nl> + <nl> + Args : <nl> + reinitialize : If True , reinitializes self . _physical_devices so that <nl> + dynamic registered devices will also be visible to the python front - end . <nl> + " " " <nl> # We lazy initialize self . _physical_devices since we do not want to do this <nl> # the constructor since the backend may not be initialized yet . <nl> with self . _device_lock : <nl> - if self . _physical_devices is not None : <nl> + if not reinitialize and self . _physical_devices is not None : <nl> return <nl> <nl> devs = pywrap_tfe . TF_ListPhysicalDevices ( ) <nl> def _initialize_physical_devices ( self ) : <nl> # Import device settings that may have been passed into the constructor <nl> self . _import_config ( ) <nl> <nl> + def reinitialize_physical_devices ( self ) : <nl> + " " " Gets local devices visible to the system . " " " <nl> + # Reinitialize the physical device list after registering <nl> + # the pluggable device . <nl> + self . _initialize_physical_devices ( True ) <nl> + <nl> def list_physical_devices ( self , device_type = None ) : <nl> " " " List local devices visible to the system . <nl> <nl> mmm a / tensorflow / python / eager / def_function . py <nl> ppp b / tensorflow / python / eager / def_function . py <nl> <nl> <nl> FREQUENT_TRACING_WARNING_MAX_CALL_HISTORY = 10 <nl> FREQUENT_TRACING_WARNING_THRESHOLD = 5 <nl> + FREQUENT_TRACING_WARNING_MAX_WARNING_PER_DETECTOR = 2 <nl> <nl> <nl> - class _CallCounter ( object ) : <nl> + class _FrequentTracingDetector ( object ) : <nl> " " " Class keeping track of how many recent calls triggered tracing . " " " <nl> <nl> - __slots__ = [ " _max_call_history " , " _calls_per_tracings " , " call_count " ] <nl> + __slots__ = [ " _calls_per_tracings " , " _call_count " , " _total_warning_count " ] <nl> <nl> - def __init__ ( self , max_call_history ) : <nl> - self . _max_call_history = max_call_history <nl> + def __init__ ( self ) : <nl> self . _calls_per_tracings = [ ] <nl> - self . call_count = 0 <nl> + self . _total_warning_count = 0 <nl> + self . _call_count = 0 <nl> + <nl> + def called_with_tracing ( self , function_name , omit_warning ) : <nl> + " " " Updates the list of most recent calls ' tracing information . <nl> <nl> - def called_with_tracing ( self ) : <nl> - self . call_count + = 1 <nl> + Warns the user when recent calls caused retracing too often . <nl> + <nl> + Args : <nl> + function_name : the python function being traced . <nl> + omit_warning : If ' True ' , this call will not warn the user even if <nl> + retracing happens too often . <nl> + " " " <nl> + self . _call_count + = 1 <nl> self . _calls_per_tracings . append ( 1 ) <nl> <nl> while self . _calls_per_tracings : <nl> - if self . call_count - self . _calls_per_tracings [ 0 ] > self . _max_call_history : <nl> - self . call_count - = self . _calls_per_tracings . pop ( 0 ) <nl> + if ( self . _call_count - self . _calls_per_tracings [ 0 ] > <nl> + FREQUENT_TRACING_WARNING_MAX_CALL_HISTORY ) : <nl> + self . _call_count - = self . _calls_per_tracings . pop ( 0 ) <nl> else : <nl> break <nl> <nl> + if ( omit_warning or self . _total_warning_count > = <nl> + FREQUENT_TRACING_WARNING_MAX_WARNING_PER_DETECTOR ) : <nl> + return <nl> + if len ( self . _calls_per_tracings ) > = FREQUENT_TRACING_WARNING_THRESHOLD : <nl> + self . _total_warning_count + = 1 <nl> + logging . warning ( <nl> + " { } out of the last { } calls to { } triggered tf . function " <nl> + " retracing . Tracing is expensive and the excessive number of " <nl> + " tracings could be due to ( 1 ) creating @ tf . function repeatedly in " <nl> + " a loop , ( 2 ) passing tensors with different shapes , ( 3 ) passing " <nl> + " Python objects instead of tensors . For ( 1 ) , please define your " <nl> + " @ tf . function outside of the loop . For ( 2 ) , @ tf . function has " <nl> + " experimental_relax_shapes = True option that relaxes argument " <nl> + " shapes that can avoid unnecessary retracing . For ( 3 ) , please " <nl> + " refer to " <nl> + " https : / / www . tensorflow . org / guide / function # controlling_retracing " <nl> + " and https : / / www . tensorflow . org / api_docs / python / tf / function for " <nl> + " more details . " . format ( <nl> + len ( self . _calls_per_tracings ) , self . _call_count , function_name ) ) <nl> + <nl> def called_without_tracing ( self ) : <nl> # We don ' t count tracing when users load a concrete function directly or <nl> # call get_concrete_function , so the first call can be not a tracing call . <nl> if not self . _calls_per_tracings : <nl> self . _calls_per_tracings = [ 0 ] <nl> self . _calls_per_tracings [ - 1 ] + = 1 <nl> - self . call_count + = 1 <nl> - <nl> - def get_tracing_count ( self ) : <nl> - return len ( self . _calls_per_tracings ) <nl> + self . _call_count + = 1 <nl> <nl> <nl> - class _FrequentTracingDetector ( object ) : <nl> - " " " Class for frequent retracing detection and warning . " " " <nl> + class _FrequentTracingDetectorManager ( object ) : <nl> + " " " Class for the management of all _FrequentTracingDetector objects . " " " <nl> <nl> - __slots__ = [ " _counters " , " _lock " ] <nl> + __slots__ = [ " _detectors " , " _lock " ] <nl> <nl> def __init__ ( self ) : <nl> - self . _counters = weakref . WeakKeyDictionary ( ) # GUARDED_BY ( self . _lock ) <nl> + self . _detectors = weakref . WeakKeyDictionary ( ) # GUARDED_BY ( self . _lock ) <nl> self . _lock = threading . Lock ( ) <nl> <nl> - def _get_counter ( self , key ) : <nl> - if key not in self . _counters : <nl> - self . _counters [ key ] = _CallCounter ( <nl> - FREQUENT_TRACING_WARNING_MAX_CALL_HISTORY ) <nl> - return self . _counters [ key ] <nl> + def _get_detector ( self , key ) : <nl> + if key not in self . _detectors : <nl> + self . _detectors [ key ] = _FrequentTracingDetector ( ) <nl> + return self . _detectors [ key ] <nl> <nl> def called_without_tracing ( self , key ) : <nl> with self . _lock : <nl> - counter = self . _get_counter ( key ) <nl> - counter . called_without_tracing ( ) <nl> + detector = self . _get_detector ( key ) <nl> + detector . called_without_tracing ( ) <nl> <nl> def called_with_tracing ( self , key , function_name , omit_warning ) : <nl> with self . _lock : <nl> - counter = self . _get_counter ( key ) <nl> - counter . called_with_tracing ( ) <nl> - if omit_warning : <nl> - return <nl> - if counter . get_tracing_count ( ) > = FREQUENT_TRACING_WARNING_THRESHOLD : <nl> - logging . warning ( <nl> - " { } out of the last { } calls to { } triggered tf . function " <nl> - " retracing . Tracing is expensive and the excessive number of " <nl> - " tracings could be due to ( 1 ) creating @ tf . function repeatedly in " <nl> - " a loop , ( 2 ) passing tensors with different shapes , ( 3 ) passing " <nl> - " Python objects instead of tensors . For ( 1 ) , please define your " <nl> - " @ tf . function outside of the loop . For ( 2 ) , @ tf . function has " <nl> - " experimental_relax_shapes = True option that relaxes argument " <nl> - " shapes that can avoid unnecessary retracing . For ( 3 ) , please " <nl> - " refer to " <nl> - " https : / / www . tensorflow . org / guide / function # controlling_retracing " <nl> - " and https : / / www . tensorflow . org / api_docs / python / tf / function for " <nl> - " more details . " . format ( counter . get_tracing_count ( ) , <nl> - counter . call_count , function_name ) ) <nl> - <nl> - <nl> - _frequent_tracing_detector = _FrequentTracingDetector ( ) <nl> + detector = self . _get_detector ( key ) <nl> + detector . called_with_tracing ( function_name , omit_warning ) <nl> + <nl> + <nl> + _frequent_tracing_detector_manager = _FrequentTracingDetectorManager ( ) <nl> <nl> <nl> class UnliftedInitializerVariable ( resource_variable_ops . UninitializedVariable ) : <nl> def __call__ ( self , * args , * * kwds ) : <nl> <nl> if context . executing_eagerly ( ) : <nl> if without_tracing : <nl> - _frequent_tracing_detector . called_without_tracing ( <nl> + _frequent_tracing_detector_manager . called_without_tracing ( <nl> self . _key_for_call_stats ) <nl> else : <nl> - _frequent_tracing_detector . called_with_tracing ( <nl> + _frequent_tracing_detector_manager . called_with_tracing ( <nl> self . _key_for_call_stats , self . _python_function , <nl> self . _omit_frequent_tracing_warning ) <nl> <nl> mmm a / tensorflow / python / eager / def_function_test . py <nl> ppp b / tensorflow / python / eager / def_function_test . py <nl> def __call__ ( self , x ) : <nl> self . assertLen ( logs . output , 1 ) <nl> self . assertIn ( ' Tracing is expensive ' , logs . output [ 0 ] ) <nl> <nl> + def test_retracing_warning_limits ( self ) : <nl> + <nl> + @ def_function . function <nl> + def my_func ( x ) : <nl> + return x <nl> + <nl> + with self . assertLogs ( level = ' WARN ' ) as logs : <nl> + for i in range ( 10 ) : <nl> + my_func ( i ) <nl> + <nl> + self . assertLen ( logs . output , 2 ) <nl> + <nl> def test_experimental_get_tracing_count_function ( self ) : <nl> <nl> @ def_function . function <nl> mmm a / tensorflow / python / framework / function . py <nl> ppp b / tensorflow / python / framework / function . py <nl> def _create_definition_if_needed_impl ( self ) : <nl> variable_keys . extend ( ops . GraphKeys . _VARIABLE_COLLECTIONS ) # pylint : disable = protected - access <nl> variable_keys . append ( vs . _VARSTORE_KEY ) # pylint : disable = protected - access <nl> <nl> - collections_ref = { } <nl> - parent_collections_ref = ops . get_default_graph ( ) . _collections # pylint : disable = protected - access <nl> - for key in variable_keys : <nl> - if key not in parent_collections_ref : <nl> - parent_collections_ref [ key ] = collections_ref [ key ] = [ ] <nl> - else : <nl> - collections_ref [ key ] = parent_collections_ref [ key ] <nl> + parent_graph = ops . get_default_graph ( ) <nl> + collections_ref = { <nl> + key : parent_graph . get_collection_ref ( key ) for key in variable_keys } <nl> <nl> temp_graph = func_graph_from_py_func ( <nl> self . _func , <nl> mmm a / tensorflow / python / framework / function_def_to_graph . py <nl> ppp b / tensorflow / python / framework / function_def_to_graph . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> + import itertools <nl> + <nl> + <nl> from tensorflow . core . framework import function_pb2 <nl> from tensorflow . core . framework import graph_pb2 <nl> from tensorflow . core . framework import tensor_shape_pb2 <nl> from tensorflow . core . framework import types_pb2 <nl> from tensorflow . core . framework import versions_pb2 <nl> from tensorflow . python . eager import context <nl> + from tensorflow . python . framework import cpp_shape_inference_pb2 <nl> from tensorflow . python . framework import importer <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import versions <nl> from tensorflow . python . framework . func_graph import FuncGraph <nl> + from tensorflow . python . ops import resource_variable_ops <nl> <nl> <nl> def function_def_to_graph ( fdef , input_shapes = None ) : <nl> def function_def_to_graph ( fdef , input_shapes = None ) : <nl> func_graph . get_operation_by_name ( fdef . control_ret [ ret_name ] ) <nl> for ret_name in fdef . signature . control_output <nl> ] <nl> + <nl> + _set_handle_data ( func_graph , fdef ) <nl> + <nl> for node in graph_def . node : <nl> output_shapes = node . attr . get ( " _output_shapes " , None ) <nl> if output_shapes is not None : <nl> def _get_num_args ( arg_def , node_def ) : <nl> return 1 <nl> else : <nl> raise ValueError ( " Invalid arg_def : \ n \ n { } " . format ( str ( arg_def ) ) ) <nl> + <nl> + <nl> + def _set_handle_data ( func_graph , fdef ) : <nl> + " " " Adds handle data for resource type inputs and outputs . " " " <nl> + for tensor , arg_def in itertools . chain ( <nl> + zip ( func_graph . inputs , fdef . signature . input_arg ) , <nl> + zip ( func_graph . outputs , fdef . signature . output_arg ) ) : <nl> + if arg_def . handle_data : <nl> + shape_and_dtype = arg_def . handle_data [ 0 ] <nl> + handle_data = cpp_shape_inference_pb2 . CppShapeInferenceResult . HandleData ( ) <nl> + handle_data . is_set = True <nl> + handle_data . shape_and_type . append ( <nl> + cpp_shape_inference_pb2 . CppShapeInferenceResult . HandleShapeAndType ( <nl> + shape = shape_and_dtype . shape , dtype = shape_and_dtype . dtype ) ) <nl> + resource_variable_ops . _set_handle_shapes_and_types ( # pylint : disable = protected - access <nl> + tensor , handle_data , True ) <nl> mmm a / tensorflow / python / framework / load_library . py <nl> ppp b / tensorflow / python / framework / load_library . py <nl> <nl> <nl> from tensorflow . python import _pywrap_python_op_gen <nl> from tensorflow . python . client import pywrap_tf_session as py_tf <nl> + from tensorflow . python . eager import context <nl> from tensorflow . python . util import deprecation <nl> from tensorflow . python . util . tf_export import tf_export <nl> <nl> def load_library ( library_location ) : <nl> library_location ) <nl> <nl> <nl> + def load_pluggable_device_library ( library_location ) : <nl> + " " " Loads a TensorFlow PluggableDevice plugin . <nl> + <nl> + " library_location " can be a path to a specific shared object , or a folder . <nl> + If it is a folder , all shared objects will be loaded . when the library is <nl> + loaded , devices / kernels registered in the library via StreamExecutor C API <nl> + and Kernel / Op Registration C API are made available in TensorFlow process . <nl> + <nl> + Args : <nl> + library_location : Path to the plugin or folder of plugins . Relative or <nl> + absolute filesystem path to a dynamic library file or folder . <nl> + <nl> + Raises : <nl> + OSError : When the file to be loaded is not found . <nl> + RuntimeError : when unable to load the library . <nl> + " " " <nl> + if os . path . exists ( library_location ) : <nl> + if os . path . isdir ( library_location ) : <nl> + directory_contents = os . listdir ( library_location ) <nl> + <nl> + pluggable_device_libraries = [ <nl> + os . path . join ( library_location , f ) <nl> + for f in directory_contents <nl> + if _is_shared_object ( f ) <nl> + ] <nl> + else : <nl> + pluggable_device_libraries = [ library_location ] <nl> + <nl> + for lib in pluggable_device_libraries : <nl> + py_tf . TF_LoadPluggableDeviceLibrary ( lib ) <nl> + # Reinitialized physical devices list after plugin registration . <nl> + context . context ( ) . reinitialize_physical_devices ( ) <nl> + else : <nl> + raise OSError ( <nl> + errno . ENOENT , <nl> + ' The file or folder to load pluggable device libraries from does not ' <nl> + ' exist . ' , library_location ) <nl> + <nl> + <nl> @ tf_export ( ' experimental . register_filesystem_plugin ' ) <nl> def register_filesystem_plugin ( plugin_location ) : <nl> " " " Loads a TensorFlow FileSystem plugin . <nl> mmm a / tensorflow / python / framework / ops . py <nl> ppp b / tensorflow / python / framework / ops . py <nl> <nl> from tensorflow . python . eager import tape <nl> from tensorflow . python . framework import c_api_util <nl> from tensorflow . python . framework import composite_tensor <nl> + from tensorflow . python . framework import cpp_shape_inference_pb2 <nl> from tensorflow . python . framework import device as pydev <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import errors <nl> def _as_graph_def ( self , from_version = None , add_shapes = False ) : <nl> continue <nl> # TODO ( b / 141471245 ) : Fix the inconsistency when inputs of func graph <nl> # are appended during gradient computation of while / cond . <nl> - for input_tensor , _ in zip ( func_graph_inputs , <nl> - function_def . signature . input_arg ) : <nl> + for input_tensor , arg_def in zip ( func_graph_inputs , <nl> + function_def . signature . input_arg ) : <nl> + input_shapes . list . shape . add ( ) . CopyFrom ( <nl> + input_tensor . get_shape ( ) . as_proto ( ) ) <nl> if input_tensor . dtype = = dtypes . resource : <nl> - # TODO ( allenl ) : Save and restore handle data , then save the <nl> - # resource placeholder ' s shape . Right now some shape functions get <nl> - # confused if we set the shape of the resource placeholder ( to a <nl> - # scalar of course ) and there isn ' t any handle data . <nl> - input_shapes . list . shape . add ( ) . CopyFrom ( <nl> - tensor_shape . TensorShape ( None ) . as_proto ( ) ) <nl> - else : <nl> - input_shapes . list . shape . add ( ) . CopyFrom ( <nl> - input_tensor . get_shape ( ) . as_proto ( ) ) <nl> + _copy_handle_data_to_arg_def ( input_tensor , arg_def ) <nl> + <nl> + for output_tensor , arg_def in zip ( func_graph . outputs , <nl> + function_def . signature . output_arg ) : <nl> + if output_tensor . dtype = = dtypes . resource : <nl> + _copy_handle_data_to_arg_def ( output_tensor , arg_def ) <nl> + <nl> for node in function_def . node_def : <nl> try : <nl> op = func_graph . get_operation_by_name ( node . name ) <nl> def _get_enclosing_context ( graph ) : <nl> <nl> if graph . building_function and hasattr ( graph , " outer_graph " ) : <nl> return _get_enclosing_context ( graph . outer_graph ) <nl> + <nl> + <nl> + def get_resource_handle_data ( graph_op ) : <nl> + assert type ( graph_op ) = = Tensor # pylint : disable = unidiomatic - typecheck <nl> + <nl> + handle_data = pywrap_tf_session . GetHandleShapeAndType ( <nl> + graph_op . graph . _c_graph , graph_op . _as_tf_output ( ) ) # pylint : disable = protected - access <nl> + <nl> + return cpp_shape_inference_pb2 . CppShapeInferenceResult . HandleData . FromString ( <nl> + compat . as_bytes ( handle_data ) ) <nl> + <nl> + <nl> + def _copy_handle_data_to_arg_def ( tensor , arg_def ) : <nl> + handle_data = get_resource_handle_data ( tensor ) <nl> + if handle_data . shape_and_type : <nl> + shape_and_type = handle_data . shape_and_type [ 0 ] <nl> + proto = arg_def . handle_data . add ( ) <nl> + proto . dtype = shape_and_type . dtype <nl> + proto . shape . CopyFrom ( handle_data . shape_and_type [ 0 ] . shape ) <nl> mmm a / tensorflow / python / framework / test_util . py <nl> ppp b / tensorflow / python / framework / test_util . py <nl> def _assertArrayLikeAllClose ( self , a , b , rtol = 1e - 6 , atol = 1e - 6 , msg = None ) : <nl> self . assertEqual ( a . shape , b . shape , shape_mismatch_msg ) <nl> <nl> msgs = [ msg ] <nl> + # np . allclose does not always work for our custom bfloat16 extension type <nl> + # when type promotions are involved , so we first cast any bfloat16 arrays <nl> + # to float32 . <nl> + a_dtype = a . dtype <nl> + a = a . astype ( np . float32 ) if a . dtype = = dtypes . bfloat16 . as_numpy_dtype else a <nl> + b = b . astype ( np . float32 ) if b . dtype = = dtypes . bfloat16 . as_numpy_dtype else b <nl> if not np . allclose ( a , b , rtol = rtol , atol = atol ) : <nl> # Adds more details to np . testing . assert_allclose . <nl> # <nl> def _assertArrayLikeAllClose ( self , a , b , rtol = 1e - 6 , atol = 1e - 6 , msg = None ) : <nl> msgs . append ( " not close rhs = { } " . format ( y ) ) <nl> msgs . append ( " not close dif = { } " . format ( np . abs ( x - y ) ) ) <nl> msgs . append ( " not close tol = { } " . format ( atol + rtol * np . abs ( y ) ) ) <nl> - msgs . append ( " dtype = { } , shape = { } " . format ( a . dtype , a . shape ) ) <nl> + msgs . append ( " dtype = { } , shape = { } " . format ( a_dtype , a . shape ) ) <nl> # TODO ( xpan ) : There seems to be a bug : <nl> # tensorflow / compiler / tests : binary_ops_test pass with float32 <nl> # nan even though the equal_nan is False by default internally . <nl> mmm a / tensorflow / python / keras / BUILD <nl> ppp b / tensorflow / python / keras / BUILD <nl> py_library ( <nl> ] , <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> + " : activations " , <nl> " : backend " , <nl> " : losses " , <nl> " / / tensorflow / python : array_ops " , <nl> mmm a / tensorflow / python / keras / applications / mobilenet_v3 . py <nl> ppp b / tensorflow / python / keras / applications / mobilenet_v3 . py <nl> <nl> The following table describes the performance of MobileNets : <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> MACs stands for Multiply Adds <nl> - <nl> + <nl> | Classification Checkpoint | MACs ( M ) | Parameters ( M ) | Top1 Accuracy | Pixel1 CPU ( ms ) | <nl> | mmm | mmm | mmm | mmm | mmm | <nl> | mobilenet_v3_large_1 . 0_224 | 217 | 5 . 4 | 75 . 6 | 51 . 2 | <nl> <nl> <nl> Optionally loads weights pre - trained on ImageNet . <nl> <nl> - Note : each Keras Application expects a specific kind of input preprocessing . <nl> - For MobileNetV3 , call <nl> - ` tf . keras . applications . mobilenet_v3 . preprocess_input ` on your <nl> - inputs before passing them to the model . <nl> - <nl> Arguments : <nl> input_shape : Optional shape tuple , to be specified if you would <nl> like to use a model with an input image resolution that is not <nl> <nl> on the " top " layer . Ignored unless ` include_top = True ` . Set <nl> ` classifier_activation = None ` to return the logits of the " top " layer . <nl> <nl> + Call arguments : <nl> + inputs : A floating point ` numpy . array ` or a ` tf . Tensor ` , 4D with 3 color <nl> + channels , with values in the range [ 0 , 255 ] . <nl> + <nl> Returns : <nl> A ` keras . Model ` instance . <nl> <nl> def _inverted_res_block ( x , expansion , filters , kernel_size , stride , se_ratio , <nl> <nl> @ keras_export ( ' keras . applications . mobilenet_v3 . preprocess_input ' ) <nl> def preprocess_input ( x , data_format = None ) : # pylint : disable = unused - argument <nl> + " " " A placeholder method for backward compatibility . <nl> + <nl> + The preprocessing logic has been included in the mobilenet_v3 model <nl> + implementation . Users are no longer required to call this method to normalize <nl> + the input data . This method does nothing and only kept as a placeholder to <nl> + align the API surface between old and new version of model . <nl> + <nl> + Args : <nl> + x : A floating point ` numpy . array ` or a ` tf . Tensor ` . <nl> + data_format : Optional data format of the image tensor / array . Defaults to <nl> + None , in which case the global setting <nl> + ` tf . keras . backend . image_data_format ( ) ` is used ( unless you changed it , <nl> + it defaults to " channels_last " ) . { mode } <nl> + <nl> + Returns : <nl> + Unchanged ` numpy . array ` or ` tf . Tensor ` . <nl> + " " " <nl> + <nl> return x <nl> <nl> <nl> def decode_predictions ( preds , top = 5 ) : <nl> return imagenet_utils . decode_predictions ( preds , top = top ) <nl> <nl> <nl> - preprocess_input . __doc__ = imagenet_utils . PREPROCESS_INPUT_DOC . format ( <nl> - mode = ' ' , <nl> - ret = imagenet_utils . PREPROCESS_INPUT_RET_DOC_TF , <nl> - error = imagenet_utils . PREPROCESS_INPUT_ERROR_DOC ) <nl> decode_predictions . __doc__ = imagenet_utils . decode_predictions . __doc__ <nl> mmm a / tensorflow / python / keras / benchmarks / layer_benchmarks / layer_benchmarks_test . py <nl> ppp b / tensorflow / python / keras / benchmarks / layer_benchmarks / layer_benchmarks_test . py <nl> <nl> from __future__ import print_function <nl> <nl> import functools <nl> + import numpy as np <nl> import six <nl> <nl> import tensorflow as tf <nl> def _get_metadata ( name ) : <nl> } <nl> <nl> <nl> + def _get_input_data ( inputs ) : <nl> + if " input_shape " in inputs : <nl> + return tf . ones ( inputs [ " input_shape " ] ) <nl> + elif " input " in inputs : <nl> + return inputs [ " input " ] <nl> + else : <nl> + raise ValueError ( " Please specificy either ` input_shape ` or ` input ` " <nl> + " for the benchmark test " ) <nl> + <nl> + <nl> def _generate_benchmark_params ( * params_list ) : <nl> benchmark_params = [ ] <nl> for params in params_list : <nl> class KerasLayerBenchmarks ( six . with_metaclass ( <nl> _benchmark_parameters = _generate_benchmark_params ( [ <nl> ( " Conv2D_small_shape " , tf . keras . layers . Conv2D , <nl> { " filters " : 1 , " kernel_size " : 1 , " activation " : " relu " } , <nl> - ( 1 , 1 , 1 , 1 ) , 10000 ) , <nl> + { " input_shape " : ( 1 , 1 , 1 , 1 ) } , 10 ) , <nl> ( " Conv2D_normal_shape " , tf . keras . layers . Conv2D , <nl> { " filters " : 1 , " kernel_size " : 1 , " activation " : " relu " } , <nl> - ( 64 , 28 , 28 , 3 ) , 10000 ) , <nl> + { " input_shape " : ( 64 , 28 , 28 , 3 ) } , 10 ) , <nl> ( " LSTM_small_shape " , tf . keras . layers . LSTM , <nl> - { " units " : 1 } , ( 1 , 1 , 1 ) , 10000 ) , <nl> + { " units " : 1 } , { " input_shape " : ( 1 , 1 , 1 ) } , 10 ) , <nl> ( " LSTM_normal_shape " , tf . keras . layers . LSTM , <nl> - { " units " : 4 } , ( 32 , 10 , 8 ) , 10000 ) , <nl> + { " units " : 4 } , { " input_shape " : ( 32 , 10 , 8 ) } , 10 ) , <nl> + ( " Embedding_small_shape " , tf . keras . layers . Embedding , <nl> + { " input_dim " : 1 , " output_dim " : 1 , " input_length " : 1 } , <nl> + { " input " : np . random . randint ( 1 , size = ( 1 , 1 ) ) } , 10 ) , <nl> + ( " Embedding_normal_shape " , tf . keras . layers . Embedding , <nl> + { " input_dim " : 1000 , " output_dim " : 64 , " input_length " : 10 } , <nl> + { " input " : np . random . randint ( 1000 , size = ( 32 , 10 ) ) } , 10 ) , <nl> ] ) <nl> <nl> - def benchmark_layer_call ( self , layer_cls , layer_args , input_shape , num_iters ) : <nl> + def benchmark_layer_call ( self , layer_cls , layer_args , inputs , num_iters ) : <nl> layer = layer_cls ( * * layer_args ) <nl> - x = tf . ones ( input_shape ) <nl> + x = _get_input_data ( inputs ) <nl> <nl> fn = functools . partial ( layer , x ) <nl> name = _get_benchmark_name ( self . _get_name ( ) ) <nl> def benchmark_layer_call ( self , layer_cls , layer_args , input_shape , num_iters ) : <nl> self . run_report ( fn , num_iters , metadata ) <nl> <nl> def benchmark_layer_call_with_function ( <nl> - self , layer_cls , layer_args , input_shape , num_iters ) : <nl> + self , layer_cls , layer_args , inputs , num_iters ) : <nl> layer = layer_cls ( * * layer_args ) <nl> - x = tf . ones ( input_shape ) <nl> + x = _get_input_data ( inputs ) <nl> layer . call = tf . function ( layer . call ) <nl> <nl> fn = functools . partial ( layer , x ) <nl> def benchmark_layer_call_with_function ( <nl> self . run_report ( fn , num_iters , metadata ) <nl> <nl> def benchmark_layer_call_with_xla ( <nl> - self , layer_cls , layer_args , input_shape , num_iters ) : <nl> + self , layer_cls , layer_args , inputs , num_iters ) : <nl> + name = _get_benchmark_name ( self . _get_name ( ) ) <nl> + # TODO ( b / 173461426 ) <nl> + if layer_cls is tf . keras . layers . Embedding and name [ - 1 ] = = " GPU " : <nl> + return <nl> layer = layer_cls ( * * layer_args ) <nl> - x = tf . ones ( input_shape ) <nl> + x = _get_input_data ( inputs ) <nl> layer . call = tf . function ( <nl> layer . call , jit_compile = True ) <nl> <nl> fn = functools . partial ( layer , x ) <nl> - name = _get_benchmark_name ( self . _get_name ( ) ) <nl> metadata = { " implementation " : name [ 0 ] + " . layer . call . xla " } <nl> metadata . update ( _get_metadata ( name ) ) <nl> self . run_report ( fn , num_iters , metadata ) <nl> <nl> def benchmark_layer_call_backward ( <nl> - self , layer_cls , layer_args , input_shape , num_iters ) : <nl> + self , layer_cls , layer_args , inputs , num_iters ) : <nl> layer = layer_cls ( * * layer_args ) <nl> - x = tf . ones ( input_shape ) <nl> + x = _get_input_data ( inputs ) <nl> <nl> fn = functools . partial ( _layer_call_backward , layer , x ) <nl> name = _get_benchmark_name ( self . _get_name ( ) ) <nl> def benchmark_layer_call_backward ( <nl> self . run_report ( fn , num_iters , metadata ) <nl> <nl> def benchmark_layer_call_backward_with_function ( <nl> - self , layer_cls , layer_args , input_shape , num_iters ) : <nl> + self , layer_cls , layer_args , inputs , num_iters ) : <nl> layer = layer_cls ( * * layer_args ) <nl> - x = tf . ones ( input_shape ) <nl> + x = _get_input_data ( inputs ) <nl> layer . call = tf . function ( layer . call ) <nl> <nl> fn = functools . partial ( _layer_call_backward , layer , x ) <nl> class KerasLayerBenchmarksBackwardXLA ( six . with_metaclass ( <nl> # { " units " : 1 } , ( 1 , 1 , 1 ) , 10000 ) , <nl> # ( " LSTM_normal_shape " , tf . keras . layers . LSTM , <nl> # { " units " : 4 } , ( 32 , 10 , 8 ) , 10000 ) , <nl> + ( " Embedding_small_shape " , tf . keras . layers . Embedding , <nl> + { " input_dim " : 1 , " output_dim " : 1 , " input_length " : 1 } , <nl> + { " input " : np . random . randint ( 1 , size = ( 1 , 1 ) ) } , 10 ) , <nl> + ( " Embedding_normal_shape " , tf . keras . layers . Embedding , <nl> + { " input_dim " : 1000 , " output_dim " : 64 , " input_length " : 10 } , <nl> + { " input " : np . random . randint ( 1000 , size = ( 32 , 10 ) ) } , 10 ) , <nl> ] ) <nl> <nl> def benchmark_layer_call_backward_with_xla ( <nl> - self , layer_cls , layer_args , input_shape , num_iters ) : <nl> + self , layer_cls , layer_args , inputs , num_iters ) : <nl> + name = _get_benchmark_name ( self . _get_name ( ) ) <nl> + # TODO ( b / 173461426 ) <nl> + if layer_cls is tf . keras . layers . Embedding and name [ - 1 ] = = " GPU " : <nl> + return <nl> layer = layer_cls ( * * layer_args ) <nl> - x = tf . ones ( input_shape ) <nl> + x = _get_input_data ( inputs ) <nl> layer . call = tf . function ( <nl> layer . call , jit_compile = True ) <nl> <nl> fn = functools . partial ( _layer_call_backward , layer , x ) <nl> - name = _get_benchmark_name ( self . _get_name ( ) ) <nl> metadata = { " implementation " : name [ 0 ] + " . layer . call . backward . xla " } <nl> metadata . update ( _get_metadata ( name ) ) <nl> self . run_report ( fn , num_iters , metadata ) <nl> mmm a / tensorflow / python / keras / metrics . py <nl> ppp b / tensorflow / python / keras / metrics . py <nl> <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import tensor_shape <nl> from tensorflow . python . framework import tensor_spec <nl> + from tensorflow . python . keras import activations <nl> from tensorflow . python . keras import backend as K <nl> - from tensorflow . python . keras . activations import sigmoid <nl> from tensorflow . python . keras . engine import base_layer <nl> from tensorflow . python . keras . engine import base_layer_utils <nl> from tensorflow . python . keras . engine import keras_tensor <nl> def update_state ( self , y_true , y_pred , sample_weight = None ) : <nl> label_weights = None if self . multi_label else self . label_weights <nl> <nl> if self . _from_logits : <nl> - y_pred = sigmoid ( y_pred ) <nl> + y_pred = activations . sigmoid ( y_pred ) <nl> <nl> with ops . control_dependencies ( deps ) : <nl> return metrics_utils . update_confusion_matrix_variables ( <nl> mmm a / tensorflow / python / kernel_tests / sparse_xent_op_test . py <nl> ppp b / tensorflow / python / kernel_tests / sparse_xent_op_test . py <nl> def testDouble ( self ) : <nl> np . array ( [ [ 1 . , 1 . , 1 . , 1 . ] , [ 1 . , 2 . , 3 . , 4 . ] ] ) . astype ( np . float64 ) , <nl> np . array ( [ 0 , 3 ] ) . astype ( label_dtype ) ) <nl> <nl> - def testBfloat16 ( self ) : <nl> - for label_dtype in np . int32 , np . int64 : <nl> - np_features = np . array ( [ [ 1 . , 1 . , 1 . , 1 . ] , [ 1 . , 2 . , 3 . , <nl> - 4 . ] ] ) . astype ( np . float32 ) <nl> - np_labels = np . array ( [ 0 , 3 ] ) . astype ( label_dtype ) <nl> - np_loss , np_backprop = self . _npXent ( np_features , np_labels ) <nl> - <nl> - np_features_bf16 = math_ops . cast ( np_features , dtypes . bfloat16 ) <nl> - np_loss_bf16 = math_ops . cast ( np_loss , dtypes . bfloat16 ) <nl> - np_backprop_bf16 = math_ops . cast ( np_backprop , dtypes . bfloat16 ) <nl> - with self . cached_session ( use_gpu = False ) : <nl> - loss , backprop = gen_nn_ops . sparse_softmax_cross_entropy_with_logits ( <nl> - np_features_bf16 , np_labels ) <nl> - tf_loss , tf_backprop = self . evaluate ( [ loss , backprop ] ) <nl> - self . assertAllCloseAccordingToType ( np_loss_bf16 , tf_loss ) <nl> - self . assertAllCloseAccordingToType ( np_backprop_bf16 , tf_backprop ) <nl> - <nl> def testHalf ( self ) : <nl> for label_dtype in np . int32 , np . int64 : <nl> self . _testXent ( <nl> mmm a / tensorflow / python / lib / core / bfloat16 . cc <nl> ppp b / tensorflow / python / lib / core / bfloat16 . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include < array > <nl> - <nl> # include " tensorflow / python / lib / core / bfloat16 . h " <nl> <nl> - # include " tensorflow / core / framework / numeric_types . h " <nl> - # include " tensorflow / core / lib / strings / strcat . h " <nl> + # include < array > <nl> + # include < locale > <nl> + / / Place ` < locale > ` before < Python . h > to avoid a build failure in macOS . <nl> + # include < Python . h > <nl> + <nl> + # include " absl / strings / str_cat . h " <nl> + # include " third_party / eigen3 / Eigen / Core " <nl> # include " tensorflow / core / platform / logging . h " <nl> # include " tensorflow / python / lib / core / numpy . h " <nl> - # include " tensorflow / python / lib / core / safe_ptr . h " <nl> <nl> namespace tensorflow { <nl> namespace { <nl> <nl> - / / Workarounds for Python 2 vs 3 API differences . <nl> - # if PY_MAJOR_VERSION < 3 <nl> - <nl> - PyObject * MakePyString ( const string & s ) { <nl> - return PyString_FromString ( s . c_str ( ) ) ; <nl> - } <nl> - <nl> - typedef long HashType ; / / NOLINT <nl> - <nl> - bool TfPyInt_Check ( PyObject * object ) { return PyInt_Check ( object ) ; } <nl> - <nl> - PyObject * TfPyInt_FromLong ( long x ) { / / NOLINT <nl> - return PyInt_FromLong ( x ) ; <nl> - } <nl> - <nl> - long TfPyInt_AsLong ( PyObject * x ) { / / NOLINT <nl> - return PyInt_AsLong ( x ) ; <nl> - } <nl> + using bfloat16 = Eigen : : bfloat16 ; <nl> <nl> - # else / / PY_MAJOR_VERSION < 3 <nl> + struct PyDecrefDeleter { <nl> + void operator ( ) ( PyObject * p ) const { Py_DECREF ( p ) ; } <nl> + } ; <nl> <nl> - PyObject * MakePyString ( const string & s ) { <nl> - return PyUnicode_FromString ( s . c_str ( ) ) ; <nl> + / / Safe container for an owned PyObject . On destruction , the reference count of <nl> + / / the contained object will be decremented . <nl> + using Safe_PyObjectPtr = std : : unique_ptr < PyObject , PyDecrefDeleter > ; <nl> + Safe_PyObjectPtr make_safe ( PyObject * object ) { <nl> + return Safe_PyObjectPtr ( object ) ; <nl> } <nl> <nl> - bool TfPyInt_Check ( PyObject * object ) { <nl> + bool PyLong_CheckNoOverflow ( PyObject * object ) { <nl> if ( ! PyLong_Check ( object ) ) { <nl> - return 0 ; <nl> + return false ; <nl> } <nl> int overflow = 0 ; <nl> PyLong_AsLongAndOverflow ( object , & overflow ) ; <nl> return ( overflow = = 0 ) ; <nl> } <nl> <nl> - PyObject * TfPyInt_FromLong ( long x ) { / / NOLINT <nl> - return PyLong_FromLong ( x ) ; <nl> - } <nl> - <nl> - long TfPyInt_AsLong ( PyObject * x ) { / / NOLINT <nl> - return PyLong_AsLong ( x ) ; <nl> - } <nl> - <nl> - typedef Py_hash_t HashType ; <nl> - <nl> - # endif / / PY_MAJOR_VERSION < 3 <nl> + / / Registered numpy type ID . Global variable populated by the registration code . <nl> + / / Protected by the GIL . <nl> + int npy_bfloat16 = - 1 ; <nl> <nl> / / Forward declaration . <nl> extern PyTypeObject PyBfloat16_Type ; <nl> Safe_PyObjectPtr PyBfloat16_FromBfloat16 ( bfloat16 x ) { <nl> <nl> / / Converts a Python object to a bfloat16 value . Returns true on success , <nl> / / returns false and reports a Python error on failure . <nl> - bool AsBfloat16 ( PyObject * arg , bfloat16 * output ) { <nl> + bool CastToBfloat16 ( PyObject * arg , bfloat16 * output ) { <nl> if ( PyBfloat16_Check ( arg ) ) { <nl> * output = PyBfloat16_Bfloat16 ( arg ) ; <nl> return true ; <nl> bool AsBfloat16 ( PyObject * arg , bfloat16 * output ) { <nl> * output = bfloat16 ( d ) ; <nl> return true ; <nl> } <nl> - if ( TfPyInt_Check ( arg ) ) { <nl> - long l = TfPyInt_AsLong ( arg ) ; / / NOLINT <nl> + if ( PyLong_CheckNoOverflow ( arg ) ) { <nl> + long l = PyLong_AsLong ( arg ) ; / / NOLINT <nl> if ( PyErr_Occurred ( ) ) { <nl> return false ; <nl> } <nl> bool AsBfloat16 ( PyObject * arg , bfloat16 * output ) { <nl> * output = bfloat16 ( static_cast < float > ( l ) ) ; <nl> return true ; <nl> } <nl> + if ( PyArray_IsScalar ( arg , Half ) ) { <nl> + Eigen : : half f ; <nl> + PyArray_ScalarAsCtype ( arg , & f ) ; <nl> + * output = bfloat16 ( f ) ; <nl> + return true ; <nl> + } <nl> if ( PyArray_IsScalar ( arg , Float ) ) { <nl> float f ; <nl> PyArray_ScalarAsCtype ( arg , & f ) ; <nl> * output = bfloat16 ( f ) ; <nl> return true ; <nl> } <nl> - PyErr_Format ( PyExc_TypeError , " expected number , got % s " , <nl> - arg - > ob_type - > tp_name ) ; <nl> + if ( PyArray_IsScalar ( arg , Double ) ) { <nl> + double f ; <nl> + PyArray_ScalarAsCtype ( arg , & f ) ; <nl> + * output = bfloat16 ( f ) ; <nl> + return true ; <nl> + } <nl> + if ( PyArray_IsZeroDim ( arg ) ) { <nl> + Safe_PyObjectPtr ref ; <nl> + PyArrayObject * arr = reinterpret_cast < PyArrayObject * > ( arg ) ; <nl> + if ( PyArray_TYPE ( arr ) ! = npy_bfloat16 ) { <nl> + ref = make_safe ( PyArray_Cast ( arr , npy_bfloat16 ) ) ; <nl> + if ( PyErr_Occurred ( ) ) { <nl> + return false ; <nl> + } <nl> + arg = ref . get ( ) ; <nl> + arr = reinterpret_cast < PyArrayObject * > ( arg ) ; <nl> + } <nl> + * output = * reinterpret_cast < bfloat16 * > ( PyArray_DATA ( arr ) ) ; <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + bool SafeCastToBfloat16 ( PyObject * arg , bfloat16 * output ) { <nl> + if ( PyBfloat16_Check ( arg ) ) { <nl> + * output = PyBfloat16_Bfloat16 ( arg ) ; <nl> + return true ; <nl> + } <nl> return false ; <nl> } <nl> <nl> PyObject * PyBfloat16_Float ( PyObject * self ) { <nl> PyObject * PyBfloat16_Int ( PyObject * self ) { <nl> bfloat16 x = PyBfloat16_Bfloat16 ( self ) ; <nl> long y = static_cast < long > ( x ) ; / / NOLINT <nl> - return TfPyInt_FromLong ( y ) ; <nl> + return PyLong_FromLong ( y ) ; <nl> } <nl> <nl> / / Negates a PyBfloat16 . <nl> PyObject * PyBfloat16_Negative ( PyObject * self ) { <nl> return PyBfloat16_FromBfloat16 ( - x ) . release ( ) ; <nl> } <nl> <nl> - / / Binary arithmetic operators on PyBfloat16 values . <nl> - # define BFLOAT16_BINOP ( name , op ) \ <nl> - PyObject * PyBfloat16_ # # name ( PyObject * a , PyObject * b ) { \ <nl> - bfloat16 x , y ; \ <nl> - if ( ! AsBfloat16 ( a , & x ) | | ! AsBfloat16 ( b , & y ) ) return nullptr ; \ <nl> - bfloat16 z = x op y ; \ <nl> - return PyBfloat16_FromBfloat16 ( z ) . release ( ) ; \ <nl> + PyObject * PyBfloat16_Add ( PyObject * a , PyObject * b ) { <nl> + bfloat16 x , y ; <nl> + if ( SafeCastToBfloat16 ( a , & x ) & & SafeCastToBfloat16 ( b , & y ) ) { <nl> + return PyBfloat16_FromBfloat16 ( x + y ) . release ( ) ; <nl> + } <nl> + return PyArray_Type . tp_as_number - > nb_add ( a , b ) ; <nl> + } <nl> + <nl> + PyObject * PyBfloat16_Subtract ( PyObject * a , PyObject * b ) { <nl> + bfloat16 x , y ; <nl> + if ( SafeCastToBfloat16 ( a , & x ) & & SafeCastToBfloat16 ( b , & y ) ) { <nl> + return PyBfloat16_FromBfloat16 ( x - y ) . release ( ) ; <nl> + } <nl> + return PyArray_Type . tp_as_number - > nb_subtract ( a , b ) ; <nl> + } <nl> + <nl> + PyObject * PyBfloat16_Multiply ( PyObject * a , PyObject * b ) { <nl> + bfloat16 x , y ; <nl> + if ( SafeCastToBfloat16 ( a , & x ) & & SafeCastToBfloat16 ( b , & y ) ) { <nl> + return PyBfloat16_FromBfloat16 ( x * y ) . release ( ) ; <nl> } <nl> - BFLOAT16_BINOP ( Add , + ) <nl> - BFLOAT16_BINOP ( Subtract , - ) <nl> - BFLOAT16_BINOP ( Multiply , * ) <nl> - BFLOAT16_BINOP ( Divide , / ) <nl> - # undef BFLOAT16_BINOP <nl> + return PyArray_Type . tp_as_number - > nb_multiply ( a , b ) ; <nl> + } <nl> + <nl> + PyObject * PyBfloat16_TrueDivide ( PyObject * a , PyObject * b ) { <nl> + bfloat16 x , y ; <nl> + if ( SafeCastToBfloat16 ( a , & x ) & & SafeCastToBfloat16 ( b , & y ) ) { <nl> + return PyBfloat16_FromBfloat16 ( x / y ) . release ( ) ; <nl> + } <nl> + return PyArray_Type . tp_as_number - > nb_true_divide ( a , b ) ; <nl> + } <nl> <nl> / / Python number methods for PyBfloat16 objects . <nl> PyNumberMethods PyBfloat16_AsNumber = { <nl> PyBfloat16_Add , / / nb_add <nl> PyBfloat16_Subtract , / / nb_subtract <nl> PyBfloat16_Multiply , / / nb_multiply <nl> - # if PY_MAJOR_VERSION < 3 <nl> - PyBfloat16_Divide , / / nb_divide <nl> - # endif <nl> nullptr , / / nb_remainder <nl> nullptr , / / nb_divmod <nl> nullptr , / / nb_power <nl> PyNumberMethods PyBfloat16_AsNumber = { <nl> nullptr , / / nb_and <nl> nullptr , / / nb_xor <nl> nullptr , / / nb_or <nl> - # if PY_MAJOR_VERSION < 3 <nl> - nullptr , / / nb_coerce <nl> - # endif <nl> - PyBfloat16_Int , / / nb_int <nl> - # if PY_MAJOR_VERSION < 3 <nl> - PyBfloat16_Int , / / nb_long <nl> - # else <nl> - nullptr , / / reserved <nl> - # endif <nl> - PyBfloat16_Float , / / nb_float <nl> - # if PY_MAJOR_VERSION < 3 <nl> - nullptr , / / nb_oct <nl> - nullptr , / / nb_hex <nl> - # endif <nl> + PyBfloat16_Int , / / nb_int <nl> + nullptr , / / reserved <nl> + PyBfloat16_Float , / / nb_float <nl> <nl> nullptr , / / nb_inplace_add <nl> nullptr , / / nb_inplace_subtract <nl> nullptr , / / nb_inplace_multiply <nl> - # if PY_MAJOR_VERSION < 3 <nl> - nullptr , / / nb_inplace_divide <nl> - # endif <nl> nullptr , / / nb_inplace_remainder <nl> nullptr , / / nb_inplace_power <nl> nullptr , / / nb_inplace_lshift <nl> PyNumberMethods PyBfloat16_AsNumber = { <nl> nullptr , / / nb_inplace_xor <nl> nullptr , / / nb_inplace_or <nl> <nl> - nullptr , / / nb_floor_divide <nl> - PyBfloat16_Divide , / / nb_true_divide <nl> - nullptr , / / nb_inplace_floor_divide <nl> - nullptr , / / nb_inplace_true_divide <nl> - nullptr , / / nb_index <nl> + nullptr , / / nb_floor_divide <nl> + PyBfloat16_TrueDivide , / / nb_true_divide <nl> + nullptr , / / nb_inplace_floor_divide <nl> + nullptr , / / nb_inplace_true_divide <nl> + nullptr , / / nb_index <nl> } ; <nl> <nl> / / Constructs a new PyBfloat16 . <nl> PyObject * PyBfloat16_New ( PyTypeObject * type , PyObject * args , PyObject * kwds ) { <nl> } <nl> PyObject * arg = PyTuple_GetItem ( args , 0 ) ; <nl> <nl> + bfloat16 value ; <nl> if ( PyBfloat16_Check ( arg ) ) { <nl> Py_INCREF ( arg ) ; <nl> return arg ; <nl> - } else { <nl> - bfloat16 value ; <nl> - if ( ! AsBfloat16 ( arg , & value ) ) { <nl> - return nullptr ; <nl> - } <nl> + } else if ( CastToBfloat16 ( arg , & value ) ) { <nl> return PyBfloat16_FromBfloat16 ( value ) . release ( ) ; <nl> + } else if ( PyArray_Check ( arg ) ) { <nl> + PyArrayObject * arr = reinterpret_cast < PyArrayObject * > ( arg ) ; <nl> + if ( PyArray_TYPE ( arr ) ! = npy_bfloat16 ) { <nl> + return PyArray_Cast ( arr , npy_bfloat16 ) ; <nl> + } else { <nl> + Py_INCREF ( arg ) ; <nl> + return arg ; <nl> + } <nl> } <nl> + PyErr_Format ( PyExc_TypeError , " expected number , got % s " , <nl> + arg - > ob_type - > tp_name ) ; <nl> + return nullptr ; <nl> } <nl> <nl> / / Comparisons on PyBfloat16s . <nl> PyObject * PyBfloat16_RichCompare ( PyObject * a , PyObject * b , int op ) { <nl> bfloat16 x , y ; <nl> - if ( ! AsBfloat16 ( a , & x ) | | ! AsBfloat16 ( b , & y ) ) return nullptr ; <nl> + if ( ! SafeCastToBfloat16 ( a , & x ) | | ! SafeCastToBfloat16 ( b , & y ) ) { <nl> + return PyGenericArrType_Type . tp_richcompare ( a , b , op ) ; <nl> + } <nl> bool result ; <nl> switch ( op ) { <nl> case Py_LT : <nl> PyObject * PyBfloat16_RichCompare ( PyObject * a , PyObject * b , int op ) { <nl> / / Implementation of repr ( ) for PyBfloat16 . <nl> PyObject * PyBfloat16_Repr ( PyObject * self ) { <nl> bfloat16 x = reinterpret_cast < PyBfloat16 * > ( self ) - > value ; <nl> - string v = strings : : StrCat ( " bfloat16 ( " , static_cast < float > ( x ) , " ) " ) ; <nl> - return MakePyString ( v ) ; <nl> + std : : string v = absl : : StrCat ( static_cast < float > ( x ) ) ; <nl> + return PyUnicode_FromString ( v . c_str ( ) ) ; <nl> } <nl> <nl> / / Implementation of str ( ) for PyBfloat16 . <nl> PyObject * PyBfloat16_Str ( PyObject * self ) { <nl> bfloat16 x = reinterpret_cast < PyBfloat16 * > ( self ) - > value ; <nl> - string v = strings : : StrCat ( static_cast < float > ( x ) ) ; <nl> - return MakePyString ( v ) ; <nl> + std : : string v = absl : : StrCat ( static_cast < float > ( x ) ) ; <nl> + return PyUnicode_FromString ( v . c_str ( ) ) ; <nl> } <nl> <nl> / / Hash function for PyBfloat16 . We use the identity function , which is a weak <nl> / / hash function . <nl> - HashType PyBfloat16_Hash ( PyObject * self ) { <nl> + Py_hash_t PyBfloat16_Hash ( PyObject * self ) { <nl> bfloat16 x = reinterpret_cast < PyBfloat16 * > ( self ) - > value ; <nl> return x . value ; <nl> } <nl> <nl> / / Python type for PyBfloat16 objects . <nl> PyTypeObject PyBfloat16_Type = { <nl> - # if PY_MAJOR_VERSION < 3 <nl> - PyObject_HEAD_INIT ( nullptr ) 0 , / / ob_size <nl> - # else <nl> - PyVarObject_HEAD_INIT ( nullptr , 0 ) <nl> - # endif <nl> - " bfloat16 " , / / tp_name <nl> - sizeof ( PyBfloat16 ) , / / tp_basicsize <nl> - 0 , / / tp_itemsize <nl> - nullptr , / / tp_dealloc <nl> + PyVarObject_HEAD_INIT ( nullptr , 0 ) " bfloat16 " , / / tp_name <nl> + sizeof ( PyBfloat16 ) , / / tp_basicsize <nl> + 0 , / / tp_itemsize <nl> + nullptr , / / tp_dealloc <nl> # if PY_VERSION_HEX < 0x03080000 <nl> nullptr , / / tp_print <nl> # else <nl> 0 , / / tp_vectorcall_offset <nl> # endif <nl> - nullptr , / / tp_getattr <nl> - nullptr , / / tp_setattr <nl> - nullptr , / / tp_compare / tp_reserved <nl> - PyBfloat16_Repr , / / tp_repr <nl> - & PyBfloat16_AsNumber , / / tp_as_number <nl> - nullptr , / / tp_as_sequence <nl> - nullptr , / / tp_as_mapping <nl> - PyBfloat16_Hash , / / tp_hash <nl> - nullptr , / / tp_call <nl> - PyBfloat16_Str , / / tp_str <nl> - nullptr , / / tp_getattro <nl> - nullptr , / / tp_setattro <nl> - nullptr , / / tp_as_buffer <nl> - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE , / / tp_flags <nl> - " bfloat16 floating - point values " , / / tp_doc <nl> - nullptr , / / tp_traverse <nl> - nullptr , / / tp_clear <nl> - PyBfloat16_RichCompare , / / tp_richcompare <nl> - 0 , / / tp_weaklistoffset <nl> - nullptr , / / tp_iter <nl> - nullptr , / / tp_iternext <nl> - nullptr , / / tp_methods <nl> - nullptr , / / tp_members <nl> - nullptr , / / tp_getset <nl> - nullptr , / / tp_base <nl> - nullptr , / / tp_dict <nl> - nullptr , / / tp_descr_get <nl> - nullptr , / / tp_descr_set <nl> - 0 , / / tp_dictoffset <nl> - nullptr , / / tp_init <nl> - nullptr , / / tp_alloc <nl> - PyBfloat16_New , / / tp_new <nl> - nullptr , / / tp_free <nl> - nullptr , / / tp_is_gc <nl> - nullptr , / / tp_bases <nl> - nullptr , / / tp_mro <nl> - nullptr , / / tp_cache <nl> - nullptr , / / tp_subclasses <nl> - nullptr , / / tp_weaklist <nl> - nullptr , / / tp_del <nl> - 0 , / / tp_version_tag <nl> + nullptr , / / tp_getattr <nl> + nullptr , / / tp_setattr <nl> + nullptr , / / tp_compare / tp_reserved <nl> + PyBfloat16_Repr , / / tp_repr <nl> + & PyBfloat16_AsNumber , / / tp_as_number <nl> + nullptr , / / tp_as_sequence <nl> + nullptr , / / tp_as_mapping <nl> + PyBfloat16_Hash , / / tp_hash <nl> + nullptr , / / tp_call <nl> + PyBfloat16_Str , / / tp_str <nl> + nullptr , / / tp_getattro <nl> + nullptr , / / tp_setattro <nl> + nullptr , / / tp_as_buffer <nl> + / / tp_flags <nl> + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE , <nl> + " bfloat16 floating - point values " , / / tp_doc <nl> + nullptr , / / tp_traverse <nl> + nullptr , / / tp_clear <nl> + PyBfloat16_RichCompare , / / tp_richcompare <nl> + 0 , / / tp_weaklistoffset <nl> + nullptr , / / tp_iter <nl> + nullptr , / / tp_iternext <nl> + nullptr , / / tp_methods <nl> + nullptr , / / tp_members <nl> + nullptr , / / tp_getset <nl> + nullptr , / / tp_base <nl> + nullptr , / / tp_dict <nl> + nullptr , / / tp_descr_get <nl> + nullptr , / / tp_descr_set <nl> + 0 , / / tp_dictoffset <nl> + nullptr , / / tp_init <nl> + nullptr , / / tp_alloc <nl> + PyBfloat16_New , / / tp_new <nl> + nullptr , / / tp_free <nl> + nullptr , / / tp_is_gc <nl> + nullptr , / / tp_bases <nl> + nullptr , / / tp_mro <nl> + nullptr , / / tp_cache <nl> + nullptr , / / tp_subclasses <nl> + nullptr , / / tp_weaklist <nl> + nullptr , / / tp_del <nl> + 0 , / / tp_version_tag <nl> } ; <nl> <nl> / / Numpy support <nl> PyTypeObject PyBfloat16_Type = { <nl> PyArray_ArrFuncs NPyBfloat16_ArrFuncs ; <nl> <nl> PyArray_Descr NPyBfloat16_Descr = { <nl> - PyObject_HEAD_INIT ( nullptr ) & PyBfloat16_Type , / / typeobj <nl> + PyObject_HEAD_INIT ( nullptr ) / / <nl> + / * typeobj = * / <nl> + ( & PyBfloat16_Type ) , <nl> / / We must register bfloat16 with a kind other than " f " , because numpy <nl> / / considers two types with the same kind and size to be equal , but <nl> / / float16 ! = bfloat16 . <nl> - ' V ' , / / kind <nl> + / / The downside of this is that NumPy scalar promotion does not work with <nl> + / / bfloat16 values . <nl> + / * kind = * / ' V ' , <nl> / / TODO ( phawkins ) : there doesn ' t seem to be a way of guaranteeing a type <nl> / / character is unique . <nl> - ' E ' , / / type <nl> - ' = ' , / / byteorder <nl> - NPY_NEEDS_PYAPI | NPY_USE_GETITEM | NPY_USE_SETITEM , / / hasobject <nl> - 0 , / / type_num <nl> - sizeof ( bfloat16 ) , / / elsize <nl> - alignof ( bfloat16 ) , / / alignment <nl> - nullptr , / / subarray <nl> - nullptr , / / fields <nl> - nullptr , / / names <nl> - & NPyBfloat16_ArrFuncs , / / f <nl> - nullptr , / / metadata <nl> - nullptr , / / c_metadata <nl> - - 1 , / / hash <nl> + / * type = * / ' E ' , <nl> + / * byteorder = * / ' = ' , <nl> + / * flags = * / NPY_NEEDS_PYAPI | NPY_USE_GETITEM | NPY_USE_SETITEM , <nl> + / * type_num = * / 0 , <nl> + / * elsize = * / sizeof ( bfloat16 ) , <nl> + / * alignment = * / alignof ( bfloat16 ) , <nl> + / * subarray = * / nullptr , <nl> + / * fields = * / nullptr , <nl> + / * names = * / nullptr , <nl> + / * f = * / & NPyBfloat16_ArrFuncs , <nl> + / * metadata = * / nullptr , <nl> + / * c_metadata = * / nullptr , <nl> + / * hash = * / - 1 , / / - 1 means " not computed yet " . <nl> } ; <nl> <nl> - / / Registered numpy type ID . Global variable populated by the registration code . <nl> - int npy_bfloat16_ = - 1 ; <nl> - <nl> / / Implementations of NumPy array methods . <nl> <nl> PyObject * NPyBfloat16_GetItem ( void * data , void * arr ) { <nl> PyObject * NPyBfloat16_GetItem ( void * data , void * arr ) { <nl> <nl> int NPyBfloat16_SetItem ( PyObject * item , void * data , void * arr ) { <nl> bfloat16 x ; <nl> - if ( ! AsBfloat16 ( item , & x ) ) return - 1 ; <nl> + if ( ! CastToBfloat16 ( item , & x ) ) { <nl> + PyErr_Format ( PyExc_TypeError , " expected number , got % s " , <nl> + item - > ob_type - > tp_name ) ; <nl> + return - 1 ; <nl> + } <nl> memcpy ( data , & x , sizeof ( bfloat16 ) ) ; <nl> return 0 ; <nl> } <nl> int NPyBfloat16_Fill ( void * buffer_raw , npy_intp length , void * ignored ) { <nl> return 0 ; <nl> } <nl> <nl> + void NPyBfloat16_DotFunc ( void * ip1 , npy_intp is1 , void * ip2 , npy_intp is2 , <nl> + void * op , npy_intp n , void * arr ) { <nl> + char * c1 = reinterpret_cast < char * > ( ip1 ) ; <nl> + char * c2 = reinterpret_cast < char * > ( ip2 ) ; <nl> + float acc = 0 . 0f ; <nl> + for ( npy_intp i = 0 ; i < n ; + + i ) { <nl> + bfloat16 * const b1 = reinterpret_cast < bfloat16 * > ( c1 ) ; <nl> + bfloat16 * const b2 = reinterpret_cast < bfloat16 * > ( c2 ) ; <nl> + acc + = static_cast < float > ( * b1 ) * static_cast < float > ( * b2 ) ; <nl> + c1 + = is1 ; <nl> + c2 + = is2 ; <nl> + } <nl> + bfloat16 * out = reinterpret_cast < bfloat16 * > ( op ) ; <nl> + * out = static_cast < bfloat16 > ( acc ) ; <nl> + } <nl> + <nl> + int NPyBfloat16_CompareFunc ( const void * v1 , const void * v2 , void * arr ) { <nl> + bfloat16 b1 = * reinterpret_cast < const bfloat16 * > ( v1 ) ; <nl> + bfloat16 b2 = * reinterpret_cast < const bfloat16 * > ( v2 ) ; <nl> + if ( b1 < b2 ) { <nl> + return - 1 ; <nl> + } <nl> + if ( b1 > b2 ) { <nl> + return 1 ; <nl> + } <nl> + return 0 ; <nl> + } <nl> + <nl> + int NPyBfloat16_ArgMaxFunc ( void * data , npy_intp n , npy_intp * max_ind , <nl> + void * arr ) { <nl> + const bfloat16 * bdata = reinterpret_cast < const bfloat16 * > ( data ) ; <nl> + float max_val = - std : : numeric_limits < float > : : infinity ( ) ; <nl> + for ( npy_intp i = 0 ; i < n ; + + i ) { <nl> + if ( static_cast < float > ( bdata [ i ] ) > max_val ) { <nl> + max_val = static_cast < float > ( bdata [ i ] ) ; <nl> + * max_ind = i ; <nl> + } <nl> + } <nl> + return 0 ; <nl> + } <nl> + <nl> + int NPyBfloat16_ArgMinFunc ( void * data , npy_intp n , npy_intp * min_ind , <nl> + void * arr ) { <nl> + const bfloat16 * bdata = reinterpret_cast < const bfloat16 * > ( data ) ; <nl> + float min_val = std : : numeric_limits < float > : : infinity ( ) ; <nl> + for ( npy_intp i = 0 ; i < n ; + + i ) { <nl> + if ( static_cast < float > ( bdata [ i ] ) < min_val ) { <nl> + min_val = static_cast < float > ( bdata [ i ] ) ; <nl> + * min_ind = i ; <nl> + } <nl> + } <nl> + return 0 ; <nl> + } <nl> + <nl> / / NumPy casts <nl> <nl> + template < typename T , typename Enable = void > <nl> + struct TypeDescriptor { <nl> + / / typedef . . . T ; / / Representation type in memory for NumPy values of type <nl> + / / static int Dtype ( ) { return NPY_ . . . ; } / / Numpy type number for T . <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < bfloat16 > { <nl> + typedef bfloat16 T ; <nl> + static int Dtype ( ) { return npy_bfloat16 ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < uint8 > { <nl> + typedef uint8 T ; <nl> + static int Dtype ( ) { return NPY_UINT8 ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < uint16 > { <nl> + typedef uint16 T ; <nl> + static int Dtype ( ) { return NPY_UINT16 ; } <nl> + } ; <nl> + <nl> + / / We register " int " , " long " , and " long long " types for portability across <nl> + / / Linux , where " int " and " long " are the same type , and Windows , where " long " <nl> + / / and " longlong " are the same type . <nl> + template < > <nl> + struct TypeDescriptor < unsigned int > { <nl> + typedef unsigned int T ; <nl> + static int Dtype ( ) { return NPY_UINT ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < unsigned long > { / / NOLINT <nl> + typedef unsigned long T ; / / NOLINT <nl> + static int Dtype ( ) { return NPY_ULONG ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < unsigned long long > { / / NOLINT <nl> + typedef unsigned long long T ; / / NOLINT <nl> + static int Dtype ( ) { return NPY_ULONGLONG ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < int8 > { <nl> + typedef int8 T ; <nl> + static int Dtype ( ) { return NPY_INT8 ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < int16 > { <nl> + typedef int16 T ; <nl> + static int Dtype ( ) { return NPY_INT16 ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < int > { <nl> + typedef int T ; <nl> + static int Dtype ( ) { return NPY_INT ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < long > { / / NOLINT <nl> + typedef long T ; / / NOLINT <nl> + static int Dtype ( ) { return NPY_LONG ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < long long > { / / NOLINT <nl> + typedef long long T ; / / NOLINT <nl> + static int Dtype ( ) { return NPY_LONGLONG ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < bool > { <nl> + typedef int8 T ; <nl> + static int Dtype ( ) { return NPY_BOOL ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < Eigen : : half > { <nl> + typedef Eigen : : half T ; <nl> + static int Dtype ( ) { return NPY_HALF ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < float > { <nl> + typedef float T ; <nl> + static int Dtype ( ) { return NPY_FLOAT ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < double > { <nl> + typedef double T ; <nl> + static int Dtype ( ) { return NPY_DOUBLE ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < std : : complex < float > > { <nl> + typedef std : : complex < float > T ; <nl> + static int Dtype ( ) { return NPY_COMPLEX64 ; } <nl> + } ; <nl> + <nl> + template < > <nl> + struct TypeDescriptor < std : : complex < double > > { <nl> + typedef std : : complex < double > T ; <nl> + static int Dtype ( ) { return NPY_COMPLEX128 ; } <nl> + } ; <nl> + <nl> / / Performs a NumPy array cast from type ' From ' to ' To ' . <nl> template < typename From , typename To > <nl> void NPyCast ( void * from_void , void * to_void , npy_intp n , void * fromarr , <nl> void * toarr ) { <nl> - const From * from = reinterpret_cast < From * > ( from_void ) ; <nl> - To * to = reinterpret_cast < To * > ( to_void ) ; <nl> + const auto * from = <nl> + reinterpret_cast < typename TypeDescriptor < From > : : T * > ( from_void ) ; <nl> + auto * to = reinterpret_cast < typename TypeDescriptor < To > : : T * > ( to_void ) ; <nl> for ( npy_intp i = 0 ; i < n ; + + i ) { <nl> - to [ i ] = static_cast < To > ( from [ i ] ) ; <nl> + to [ i ] = <nl> + static_cast < typename TypeDescriptor < To > : : T > ( static_cast < To > ( from [ i ] ) ) ; <nl> } <nl> } <nl> <nl> void NPyCast ( void * from_void , void * to_void , npy_intp n , void * fromarr , <nl> / / safely coerced to T . <nl> template < typename T > <nl> bool RegisterBfloat16Cast ( int numpy_type , bool cast_is_safe ) { <nl> - if ( PyArray_RegisterCastFunc ( PyArray_DescrFromType ( numpy_type ) , npy_bfloat16_ , <nl> + if ( PyArray_RegisterCastFunc ( PyArray_DescrFromType ( numpy_type ) , npy_bfloat16 , <nl> NPyCast < T , bfloat16 > ) < 0 ) { <nl> return false ; <nl> } <nl> bool RegisterBfloat16Cast ( int numpy_type , bool cast_is_safe ) { <nl> } <nl> <nl> template < typename InType , typename OutType , typename Functor > <nl> - void BinaryUFunc ( char * * args , const npy_intp * dimensions , const npy_intp * steps , <nl> - void * data ) { <nl> - const char * i0 = args [ 0 ] ; <nl> - const char * i1 = args [ 1 ] ; <nl> - char * o = args [ 2 ] ; <nl> - for ( npy_intp k = 0 ; k < * dimensions ; k + + ) { <nl> - InType x = * reinterpret_cast < const InType * > ( i0 ) ; <nl> - InType y = * reinterpret_cast < const InType * > ( i1 ) ; <nl> - * reinterpret_cast < OutType * > ( o ) = Functor ( ) ( x , y ) ; <nl> - i0 + = steps [ 0 ] ; <nl> - i1 + = steps [ 1 ] ; <nl> - o + = steps [ 2 ] ; <nl> - } <nl> - } <nl> - <nl> - / / Numpy changed const - ness of PyUFuncGenericFunction , provide overload . <nl> - template < typename Functor > <nl> - void CompareUFunc ( char * * args , npy_intp * dimensions , npy_intp * steps , <nl> - void * data ) { <nl> - BinaryUFunc < bfloat16 , npy_bool , Functor > ( args , dimensions , steps , data ) ; <nl> - } <nl> - template < typename Functor > <nl> - void CompareUFunc ( char * * args , const npy_intp * dimensions , <nl> - const npy_intp * steps , void * data ) { <nl> - BinaryUFunc < bfloat16 , npy_bool , Functor > ( args , dimensions , steps , data ) ; <nl> - } <nl> - <nl> - struct Bfloat16EqFunctor { <nl> + struct UnaryUFunc { <nl> + static std : : vector < int > Types ( ) { <nl> + return { TypeDescriptor < InType > : : Dtype ( ) , TypeDescriptor < OutType > : : Dtype ( ) } ; <nl> + } <nl> + static void Call ( char * * args , const npy_intp * dimensions , <nl> + const npy_intp * steps , void * data ) { <nl> + const char * i0 = args [ 0 ] ; <nl> + char * o = args [ 1 ] ; <nl> + for ( npy_intp k = 0 ; k < * dimensions ; k + + ) { <nl> + auto x = * reinterpret_cast < const typename TypeDescriptor < InType > : : T * > ( i0 ) ; <nl> + * reinterpret_cast < typename TypeDescriptor < OutType > : : T * > ( o ) = Functor ( ) ( x ) ; <nl> + i0 + = steps [ 0 ] ; <nl> + o + = steps [ 1 ] ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + template < typename InType , typename OutType , typename OutType2 , <nl> + typename Functor > <nl> + struct UnaryUFunc2 { <nl> + static std : : vector < int > Types ( ) { <nl> + return { TypeDescriptor < InType > : : Dtype ( ) , TypeDescriptor < OutType > : : Dtype ( ) , <nl> + TypeDescriptor < OutType2 > : : Dtype ( ) } ; <nl> + } <nl> + static void Call ( char * * args , const npy_intp * dimensions , <nl> + const npy_intp * steps , void * data ) { <nl> + const char * i0 = args [ 0 ] ; <nl> + char * o0 = args [ 1 ] ; <nl> + char * o1 = args [ 2 ] ; <nl> + for ( npy_intp k = 0 ; k < * dimensions ; k + + ) { <nl> + auto x = * reinterpret_cast < const typename TypeDescriptor < InType > : : T * > ( i0 ) ; <nl> + std : : tie ( * reinterpret_cast < typename TypeDescriptor < OutType > : : T * > ( o0 ) , <nl> + * reinterpret_cast < typename TypeDescriptor < OutType2 > : : T * > ( o1 ) ) = <nl> + Functor ( ) ( x ) ; <nl> + i0 + = steps [ 0 ] ; <nl> + o0 + = steps [ 1 ] ; <nl> + o1 + = steps [ 2 ] ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + template < typename InType , typename OutType , typename Functor > <nl> + struct BinaryUFunc { <nl> + static std : : vector < int > Types ( ) { <nl> + return { TypeDescriptor < InType > : : Dtype ( ) , TypeDescriptor < InType > : : Dtype ( ) , <nl> + TypeDescriptor < OutType > : : Dtype ( ) } ; <nl> + } <nl> + static void Call ( char * * args , const npy_intp * dimensions , <nl> + const npy_intp * steps , void * data ) { <nl> + const char * i0 = args [ 0 ] ; <nl> + const char * i1 = args [ 1 ] ; <nl> + char * o = args [ 2 ] ; <nl> + for ( npy_intp k = 0 ; k < * dimensions ; k + + ) { <nl> + auto x = * reinterpret_cast < const typename TypeDescriptor < InType > : : T * > ( i0 ) ; <nl> + auto y = * reinterpret_cast < const typename TypeDescriptor < InType > : : T * > ( i1 ) ; <nl> + * reinterpret_cast < typename TypeDescriptor < OutType > : : T * > ( o ) = <nl> + Functor ( ) ( x , y ) ; <nl> + i0 + = steps [ 0 ] ; <nl> + i1 + = steps [ 1 ] ; <nl> + o + = steps [ 2 ] ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + template < typename InType , typename InType2 , typename OutType , typename Functor > <nl> + struct BinaryUFunc2 { <nl> + static std : : vector < int > Types ( ) { <nl> + return { TypeDescriptor < InType > : : Dtype ( ) , TypeDescriptor < InType2 > : : Dtype ( ) , <nl> + TypeDescriptor < OutType > : : Dtype ( ) } ; <nl> + } <nl> + static void Call ( char * * args , const npy_intp * dimensions , <nl> + const npy_intp * steps , void * data ) { <nl> + const char * i0 = args [ 0 ] ; <nl> + const char * i1 = args [ 1 ] ; <nl> + char * o = args [ 2 ] ; <nl> + for ( npy_intp k = 0 ; k < * dimensions ; k + + ) { <nl> + auto x = * reinterpret_cast < const typename TypeDescriptor < InType > : : T * > ( i0 ) ; <nl> + auto y = <nl> + * reinterpret_cast < const typename TypeDescriptor < InType2 > : : T * > ( i1 ) ; <nl> + * reinterpret_cast < typename TypeDescriptor < OutType > : : T * > ( o ) = <nl> + Functor ( ) ( x , y ) ; <nl> + i0 + = steps [ 0 ] ; <nl> + i1 + = steps [ 1 ] ; <nl> + o + = steps [ 2 ] ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + template < typename UFunc > <nl> + bool RegisterUFunc ( PyObject * numpy , const char * name ) { <nl> + std : : vector < int > types = UFunc : : Types ( ) ; <nl> + PyUFuncGenericFunction fn = <nl> + reinterpret_cast < PyUFuncGenericFunction > ( UFunc : : Call ) ; <nl> + Safe_PyObjectPtr ufunc_obj = make_safe ( PyObject_GetAttrString ( numpy , name ) ) ; <nl> + if ( ! ufunc_obj ) { <nl> + return false ; <nl> + } <nl> + PyUFuncObject * ufunc = reinterpret_cast < PyUFuncObject * > ( ufunc_obj . get ( ) ) ; <nl> + if ( static_cast < int > ( types . size ( ) ) ! = ufunc - > nargs ) { <nl> + PyErr_Format ( PyExc_AssertionError , <nl> + " ufunc % s takes % d arguments , loop takes % lu " , name , <nl> + ufunc - > nargs , types . size ( ) ) ; <nl> + return false ; <nl> + } <nl> + if ( PyUFunc_RegisterLoopForType ( ufunc , npy_bfloat16 , fn , <nl> + const_cast < int * > ( types . data ( ) ) , <nl> + nullptr ) < 0 ) { <nl> + return false ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + namespace ufuncs { <nl> + <nl> + struct Add { <nl> + bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { return a + b ; } <nl> + } ; <nl> + struct Subtract { <nl> + bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { return a - b ; } <nl> + } ; <nl> + struct Multiply { <nl> + bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { return a * b ; } <nl> + } ; <nl> + struct TrueDivide { <nl> + bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { return a / b ; } <nl> + } ; <nl> + <nl> + std : : pair < float , float > divmod ( float a , float b ) { <nl> + if ( b = = 0 . 0f ) { <nl> + float nan = std : : numeric_limits < float > : : quiet_NaN ( ) ; <nl> + return { nan , nan } ; <nl> + } <nl> + float mod = std : : fmod ( a , b ) ; <nl> + float div = ( a - mod ) / b ; <nl> + if ( mod ! = 0 . 0f ) { <nl> + if ( ( b < 0 . 0f ) ! = ( mod < 0 . 0f ) ) { <nl> + mod + = b ; <nl> + div - = 1 . 0f ; <nl> + } <nl> + } else { <nl> + mod = std : : copysign ( 0 . 0f , b ) ; <nl> + } <nl> + <nl> + float floordiv ; <nl> + if ( div ! = 0 . 0f ) { <nl> + floordiv = std : : floor ( div ) ; <nl> + if ( div - floordiv > 0 . 5f ) { <nl> + floordiv + = 1 . 0f ; <nl> + } <nl> + } else { <nl> + floordiv = std : : copysign ( 0 . 0f , a / b ) ; <nl> + } <nl> + return { floordiv , mod } ; <nl> + } <nl> + <nl> + struct FloorDivide { <nl> + bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> + return bfloat16 ( divmod ( static_cast < float > ( a ) , static_cast < float > ( b ) ) . first ) ; <nl> + } <nl> + } ; <nl> + struct Remainder { <nl> + bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> + return bfloat16 ( <nl> + divmod ( static_cast < float > ( a ) , static_cast < float > ( b ) ) . second ) ; <nl> + } <nl> + } ; <nl> + struct DivmodUFunc { <nl> + static std : : vector < int > Types ( ) { <nl> + return { npy_bfloat16 , npy_bfloat16 , npy_bfloat16 , npy_bfloat16 } ; <nl> + } <nl> + static void Call ( char * * args , npy_intp * dimensions , npy_intp * steps , <nl> + void * data ) { <nl> + const char * i0 = args [ 0 ] ; <nl> + const char * i1 = args [ 1 ] ; <nl> + char * o0 = args [ 2 ] ; <nl> + char * o1 = args [ 3 ] ; <nl> + for ( npy_intp k = 0 ; k < * dimensions ; k + + ) { <nl> + bfloat16 x = * reinterpret_cast < const bfloat16 * > ( i0 ) ; <nl> + bfloat16 y = * reinterpret_cast < const bfloat16 * > ( i1 ) ; <nl> + float floordiv , mod ; <nl> + std : : tie ( floordiv , mod ) = <nl> + divmod ( static_cast < float > ( x ) , static_cast < float > ( y ) ) ; <nl> + * reinterpret_cast < bfloat16 * > ( o0 ) = bfloat16 ( floordiv ) ; <nl> + * reinterpret_cast < bfloat16 * > ( o1 ) = bfloat16 ( mod ) ; <nl> + i0 + = steps [ 0 ] ; <nl> + i1 + = steps [ 1 ] ; <nl> + o0 + = steps [ 2 ] ; <nl> + o1 + = steps [ 3 ] ; <nl> + } <nl> + } <nl> + } ; <nl> + struct Fmod { <nl> + bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> + return bfloat16 ( std : : fmod ( static_cast < float > ( a ) , static_cast < float > ( b ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Negative { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { return - a ; } <nl> + } ; <nl> + struct Positive { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { return a ; } <nl> + } ; <nl> + struct Power { <nl> + bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> + return bfloat16 ( std : : pow ( static_cast < float > ( a ) , static_cast < float > ( b ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Abs { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : abs ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Cbrt { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : cbrt ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Ceil { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : ceil ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct CopySign { <nl> + bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> + return bfloat16 ( <nl> + std : : copysign ( static_cast < float > ( a ) , static_cast < float > ( b ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Exp { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : exp ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Exp2 { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : exp2 ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Expm1 { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : expm1 ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Floor { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : floor ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Frexp { <nl> + std : : pair < bfloat16 , int > operator ( ) ( bfloat16 a ) { <nl> + int exp ; <nl> + float f = std : : frexp ( static_cast < float > ( a ) , & exp ) ; <nl> + return { bfloat16 ( f ) , exp } ; <nl> + } <nl> + } ; <nl> + struct Heaviside { <nl> + bfloat16 operator ( ) ( bfloat16 bx , bfloat16 h0 ) { <nl> + float x = static_cast < float > ( bx ) ; <nl> + if ( Eigen : : numext : : isnan ( x ) ) { <nl> + return bx ; <nl> + } <nl> + if ( x < 0 ) { <nl> + return bfloat16 ( 0 . 0f ) ; <nl> + } <nl> + if ( x > 0 ) { <nl> + return bfloat16 ( 1 . 0f ) ; <nl> + } <nl> + return h0 ; / / x = = 0 <nl> + } <nl> + } ; <nl> + struct Conjugate { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { return a ; } <nl> + } ; <nl> + struct IsFinite { <nl> + bool operator ( ) ( bfloat16 a ) { return std : : isfinite ( static_cast < float > ( a ) ) ; } <nl> + } ; <nl> + struct IsInf { <nl> + bool operator ( ) ( bfloat16 a ) { return std : : isinf ( static_cast < float > ( a ) ) ; } <nl> + } ; <nl> + struct IsNan { <nl> + bool operator ( ) ( bfloat16 a ) { <nl> + return Eigen : : numext : : isnan ( static_cast < float > ( a ) ) ; <nl> + } <nl> + } ; <nl> + struct Ldexp { <nl> + bfloat16 operator ( ) ( bfloat16 a , int exp ) { <nl> + return bfloat16 ( std : : ldexp ( static_cast < float > ( a ) , exp ) ) ; <nl> + } <nl> + } ; <nl> + struct Log { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : log ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Log2 { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : log2 ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Log10 { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : log10 ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Log1p { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : log1p ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct LogAddExp { <nl> + bfloat16 operator ( ) ( bfloat16 bx , bfloat16 by ) { <nl> + float x = static_cast < float > ( bx ) ; <nl> + float y = static_cast < float > ( by ) ; <nl> + if ( x = = y ) { <nl> + / / Handles infinities of the same sign . <nl> + return bfloat16 ( x + std : : log ( 2 . 0f ) ) ; <nl> + } <nl> + float out = std : : numeric_limits < float > : : quiet_NaN ( ) ; <nl> + if ( x > y ) { <nl> + out = x + std : : log1p ( std : : exp ( y - x ) ) ; <nl> + } else if ( x < y ) { <nl> + out = y + std : : log1p ( std : : exp ( x - y ) ) ; <nl> + } <nl> + return bfloat16 ( out ) ; <nl> + } <nl> + } ; <nl> + struct LogAddExp2 { <nl> + bfloat16 operator ( ) ( bfloat16 bx , bfloat16 by ) { <nl> + float x = static_cast < float > ( bx ) ; <nl> + float y = static_cast < float > ( by ) ; <nl> + if ( x = = y ) { <nl> + / / Handles infinities of the same sign . <nl> + return bfloat16 ( x + 1 . 0f ) ; <nl> + } <nl> + float out = std : : numeric_limits < float > : : quiet_NaN ( ) ; <nl> + if ( x > y ) { <nl> + out = x + std : : log1p ( std : : exp2 ( y - x ) ) / std : : log ( 2 . 0f ) ; <nl> + } else if ( x < y ) { <nl> + out = y + std : : log1p ( std : : exp2 ( x - y ) ) / std : : log ( 2 . 0f ) ; <nl> + } <nl> + return bfloat16 ( out ) ; <nl> + } <nl> + } ; <nl> + struct Modf { <nl> + std : : pair < bfloat16 , bfloat16 > operator ( ) ( bfloat16 a ) { <nl> + float integral ; <nl> + float f = std : : modf ( static_cast < float > ( a ) , & integral ) ; <nl> + return { bfloat16 ( f ) , bfloat16 ( integral ) } ; <nl> + } <nl> + } ; <nl> + <nl> + struct Reciprocal { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( 1 . f / static_cast < float > ( a ) ) ; <nl> + } <nl> + } ; <nl> + struct Rint { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : rint ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Sign { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + float f ( a ) ; <nl> + if ( f < 0 ) { <nl> + return bfloat16 ( - 1 ) ; <nl> + } <nl> + if ( f > 0 ) { <nl> + return bfloat16 ( 1 ) ; <nl> + } <nl> + return a ; <nl> + } <nl> + } ; <nl> + struct SignBit { <nl> + bool operator ( ) ( bfloat16 a ) { return std : : signbit ( static_cast < float > ( a ) ) ; } <nl> + } ; <nl> + struct Sqrt { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : sqrt ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Square { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + float f ( a ) ; <nl> + return bfloat16 ( f * f ) ; <nl> + } <nl> + } ; <nl> + struct Trunc { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : trunc ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + <nl> + / / Trigonometric functions <nl> + struct Sin { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : sin ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Cos { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : cos ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Tan { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : tan ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Arcsin { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : asin ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Arccos { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : acos ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Arctan { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : atan ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Arctan2 { <nl> + bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> + return bfloat16 ( std : : atan2 ( static_cast < float > ( a ) , static_cast < float > ( b ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Hypot { <nl> + bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> + return bfloat16 ( std : : hypot ( static_cast < float > ( a ) , static_cast < float > ( b ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Sinh { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : sinh ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Cosh { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : cosh ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Tanh { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : tanh ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Arcsinh { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : asinh ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Arccosh { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : acosh ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Arctanh { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + return bfloat16 ( std : : atanh ( static_cast < float > ( a ) ) ) ; <nl> + } <nl> + } ; <nl> + struct Deg2rad { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + static constexpr float radians_per_degree = M_PI / 180 . 0f ; <nl> + return bfloat16 ( static_cast < float > ( a ) * radians_per_degree ) ; <nl> + } <nl> + } ; <nl> + struct Rad2deg { <nl> + bfloat16 operator ( ) ( bfloat16 a ) { <nl> + static constexpr float degrees_per_radian = 180 . 0f / M_PI ; <nl> + return bfloat16 ( static_cast < float > ( a ) * degrees_per_radian ) ; <nl> + } <nl> + } ; <nl> + <nl> + struct Eq { <nl> npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a = = b ; } <nl> } ; <nl> - struct Bfloat16NeFunctor { <nl> + struct Ne { <nl> npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a ! = b ; } <nl> } ; <nl> - struct Bfloat16LtFunctor { <nl> + struct Lt { <nl> npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a < b ; } <nl> } ; <nl> - struct Bfloat16GtFunctor { <nl> + struct Gt { <nl> npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a > b ; } <nl> } ; <nl> - struct Bfloat16LeFunctor { <nl> + struct Le { <nl> npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a < = b ; } <nl> } ; <nl> - struct Bfloat16GeFunctor { <nl> + struct Ge { <nl> npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a > = b ; } <nl> } ; <nl> + struct Maximum { <nl> + bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> + float fa ( a ) , fb ( b ) ; <nl> + return Eigen : : numext : : isnan ( fa ) | | fa > fb ? a : b ; <nl> + } <nl> + } ; <nl> + struct Minimum { <nl> + bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> + float fa ( a ) , fb ( b ) ; <nl> + return Eigen : : numext : : isnan ( fa ) | | fa < fb ? a : b ; <nl> + } <nl> + } ; <nl> + struct Fmax { <nl> + bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> + float fa ( a ) , fb ( b ) ; <nl> + return Eigen : : numext : : isnan ( fb ) | | fa > fb ? a : b ; <nl> + } <nl> + } ; <nl> + struct Fmin { <nl> + bfloat16 operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> + float fa ( a ) , fb ( b ) ; <nl> + return Eigen : : numext : : isnan ( fb ) | | fa < fb ? a : b ; <nl> + } <nl> + } ; <nl> + <nl> + struct LogicalNot { <nl> + npy_bool operator ( ) ( bfloat16 a ) { return ! a ; } <nl> + } ; <nl> + struct LogicalAnd { <nl> + npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a & & b ; } <nl> + } ; <nl> + struct LogicalOr { <nl> + npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { return a | | b ; } <nl> + } ; <nl> + struct LogicalXor { <nl> + npy_bool operator ( ) ( bfloat16 a , bfloat16 b ) { <nl> + return static_cast < bool > ( a ) ^ static_cast < bool > ( b ) ; <nl> + } <nl> + } ; <nl> + <nl> + struct NextAfter { <nl> + bfloat16 operator ( ) ( bfloat16 from , bfloat16 to ) { <nl> + uint16_t from_as_int , to_as_int ; <nl> + const uint16_t sign_mask = 1 < < 15 ; <nl> + float from_as_float ( from ) , to_as_float ( to ) ; <nl> + memcpy ( & from_as_int , & from , sizeof ( bfloat16 ) ) ; <nl> + memcpy ( & to_as_int , & to , sizeof ( bfloat16 ) ) ; <nl> + if ( Eigen : : numext : : isnan ( from_as_float ) | | <nl> + Eigen : : numext : : isnan ( to_as_float ) ) { <nl> + return bfloat16 ( std : : numeric_limits < float > : : quiet_NaN ( ) ) ; <nl> + } <nl> + if ( from_as_int = = to_as_int ) { <nl> + return to ; <nl> + } <nl> + if ( from_as_float = = 0 ) { <nl> + if ( to_as_float = = 0 ) { <nl> + return to ; <nl> + } else { <nl> + / / Smallest subnormal signed like ` to ` . <nl> + uint16_t out_int = ( to_as_int & sign_mask ) | 1 ; <nl> + bfloat16 out ; <nl> + memcpy ( & out , & out_int , sizeof ( bfloat16 ) ) ; <nl> + return out ; <nl> + } <nl> + } <nl> + uint16_t from_sign = from_as_int & sign_mask ; <nl> + uint16_t to_sign = to_as_int & sign_mask ; <nl> + uint16_t from_abs = from_as_int & ~ sign_mask ; <nl> + uint16_t to_abs = to_as_int & ~ sign_mask ; <nl> + uint16_t magnitude_adjustment = <nl> + ( from_abs > to_abs | | from_sign ! = to_sign ) ? 0xFFFF : 0x0001 ; <nl> + uint16_t out_int = from_as_int + magnitude_adjustment ; <nl> + bfloat16 out ; <nl> + memcpy ( & out , & out_int , sizeof ( bfloat16 ) ) ; <nl> + return out ; <nl> + } <nl> + } ; <nl> + <nl> + / / TODO ( phawkins ) : implement spacing <nl> + <nl> + } / / namespace ufuncs <nl> + <nl> + } / / namespace <nl> <nl> / / Initializes the module . <nl> bool Initialize ( ) { <nl> - / / It ' s critical to ImportNumpy and import umath <nl> - / / to avoid crash in open source build . <nl> ImportNumpy ( ) ; <nl> import_umath1 ( false ) ; <nl> <nl> - Safe_PyObjectPtr numpy_str = make_safe ( MakePyString ( " numpy " ) ) ; <nl> + Safe_PyObjectPtr numpy_str = make_safe ( PyUnicode_FromString ( " numpy " ) ) ; <nl> if ( ! numpy_str ) { <nl> return false ; <nl> } <nl> bool Initialize ( ) { <nl> return false ; <nl> } <nl> <nl> - / / We hit a mysterious crash if we haven ' t initialized numpy before this : <nl> PyBfloat16_Type . tp_base = & PyGenericArrType_Type ; <nl> <nl> if ( PyType_Ready ( & PyBfloat16_Type ) < 0 ) { <nl> bool Initialize ( ) { <nl> NPyBfloat16_ArrFuncs . copyswap = NPyBfloat16_CopySwap ; <nl> NPyBfloat16_ArrFuncs . nonzero = NPyBfloat16_NonZero ; <nl> NPyBfloat16_ArrFuncs . fill = NPyBfloat16_Fill ; <nl> + NPyBfloat16_ArrFuncs . dotfunc = NPyBfloat16_DotFunc ; <nl> + NPyBfloat16_ArrFuncs . compare = NPyBfloat16_CompareFunc ; <nl> + NPyBfloat16_ArrFuncs . argmax = NPyBfloat16_ArgMaxFunc ; <nl> + NPyBfloat16_ArrFuncs . argmin = NPyBfloat16_ArgMinFunc ; <nl> <nl> Py_TYPE ( & NPyBfloat16_Descr ) = & PyArrayDescr_Type ; <nl> - npy_bfloat16_ = PyArray_RegisterDataType ( & NPyBfloat16_Descr ) ; <nl> - if ( npy_bfloat16_ < 0 ) return false ; <nl> + npy_bfloat16 = PyArray_RegisterDataType ( & NPyBfloat16_Descr ) ; <nl> + if ( npy_bfloat16 < 0 ) { <nl> + return false ; <nl> + } <nl> <nl> / / Support dtype ( bfloat16 ) <nl> if ( PyDict_SetItemString ( PyBfloat16_Type . tp_dict , " dtype " , <nl> bool Initialize ( ) { <nl> } <nl> <nl> / / Register casts <nl> - <nl> - / / We lie shamelessly and say that a cast from half to bfloat16 is safe . <nl> - / / Numpy frequently uses the smallest legal representation type for small <nl> - / / float constants ( e . g . , 1 . 0 ) , which is often float16 . Things break if these <nl> - / / cannot be converted transparently to bfloat16 . <nl> - if ( ! RegisterBfloat16Cast < Eigen : : half > ( NPY_HALF , / * cast_is_safe = * / true ) ) { <nl> + if ( ! RegisterBfloat16Cast < Eigen : : half > ( NPY_HALF , / * cast_is_safe = * / false ) ) { <nl> return false ; <nl> } <nl> - <nl> if ( ! RegisterBfloat16Cast < float > ( NPY_FLOAT , / * cast_is_safe = * / true ) ) { <nl> return false ; <nl> } <nl> if ( ! RegisterBfloat16Cast < double > ( NPY_DOUBLE , / * cast_is_safe = * / true ) ) { <nl> return false ; <nl> } <nl> - if ( ! RegisterBfloat16Cast < int32 > ( NPY_INT32 , / * cast_is_safe = * / false ) ) { <nl> + if ( ! RegisterBfloat16Cast < bool > ( NPY_BOOL , / * cast_is_safe = * / false ) ) { <nl> return false ; <nl> } <nl> - if ( ! RegisterBfloat16Cast < int64 > ( NPY_INT64 , / * cast_is_safe = * / false ) ) { <nl> + if ( ! RegisterBfloat16Cast < uint8 > ( NPY_UINT8 , / * cast_is_safe = * / false ) ) { <nl> return false ; <nl> } <nl> - / / Following the numpy convention . imag part is dropped when converting to <nl> - / / float . <nl> - if ( ! RegisterBfloat16Cast < complex64 > ( NPY_COMPLEX64 , / * cast_is_safe = * / true ) ) { <nl> + if ( ! RegisterBfloat16Cast < uint16 > ( NPY_UINT16 , / * cast_is_safe = * / false ) ) { <nl> return false ; <nl> } <nl> - if ( ! RegisterBfloat16Cast < complex128 > ( NPY_COMPLEX128 , <nl> - / * cast_is_safe = * / true ) ) { <nl> + if ( ! RegisterBfloat16Cast < unsigned int > ( NPY_UINT , / * cast_is_safe = * / false ) ) { <nl> return false ; <nl> } <nl> - <nl> - / / Register ufuncs <nl> - auto register_ufunc = [ & ] ( const char * name , PyUFuncGenericFunction fn , <nl> - const std : : array < int , 3 > & types ) { <nl> - Safe_PyObjectPtr ufunc_obj = <nl> - make_safe ( PyObject_GetAttrString ( numpy . get ( ) , name ) ) ; <nl> - if ( ! ufunc_obj ) { <nl> - return false ; <nl> - } <nl> - PyUFuncObject * ufunc = reinterpret_cast < PyUFuncObject * > ( ufunc_obj . get ( ) ) ; <nl> - if ( types . size ( ) ! = ufunc - > nargs ) { <nl> - PyErr_Format ( PyExc_AssertionError , <nl> - " ufunc % s takes % d arguments , loop takes % lu " , name , <nl> - ufunc - > nargs , types . size ( ) ) ; <nl> - return false ; <nl> - } <nl> - if ( PyUFunc_RegisterLoopForType ( ufunc , npy_bfloat16_ , fn , <nl> - const_cast < int * > ( types . data ( ) ) , <nl> - nullptr ) < 0 ) { <nl> - return false ; <nl> - } <nl> - return true ; <nl> - } ; <nl> - <nl> - / / Comparisons <nl> - const std : : array < int , 3 > compare_types = { <nl> - { npy_bfloat16_ , npy_bfloat16_ , NPY_BOOL } } ; <nl> - <nl> - if ( ! register_ufunc ( " equal " , CompareUFunc < Bfloat16EqFunctor > , <nl> - compare_types ) ) { <nl> + if ( ! RegisterBfloat16Cast < unsigned long > ( NPY_ULONG , / / NOLINT <nl> + / * cast_is_safe = * / false ) ) { <nl> return false ; <nl> } <nl> - if ( ! register_ufunc ( " not_equal " , CompareUFunc < Bfloat16NeFunctor > , <nl> - compare_types ) ) { <nl> + if ( ! RegisterBfloat16Cast < unsigned long long > ( / / NOLINT <nl> + NPY_ULONGLONG , / * cast_is_safe = * / false ) ) { <nl> return false ; <nl> } <nl> - if ( ! register_ufunc ( " less " , CompareUFunc < Bfloat16LtFunctor > , compare_types ) ) { <nl> + if ( ! RegisterBfloat16Cast < uint64 > ( NPY_UINT64 , / * cast_is_safe = * / false ) ) { <nl> return false ; <nl> } <nl> - if ( ! register_ufunc ( " greater " , CompareUFunc < Bfloat16GtFunctor > , <nl> - compare_types ) ) { <nl> + if ( ! RegisterBfloat16Cast < int8 > ( NPY_INT8 , / * cast_is_safe = * / false ) ) { <nl> return false ; <nl> } <nl> - if ( ! register_ufunc ( " less_equal " , CompareUFunc < Bfloat16LeFunctor > , <nl> - compare_types ) ) { <nl> + if ( ! RegisterBfloat16Cast < int16 > ( NPY_INT16 , / * cast_is_safe = * / false ) ) { <nl> return false ; <nl> } <nl> - if ( ! register_ufunc ( " greater_equal " , CompareUFunc < Bfloat16GeFunctor > , <nl> - compare_types ) ) { <nl> + if ( ! RegisterBfloat16Cast < int > ( NPY_INT , / * cast_is_safe = * / false ) ) { <nl> + return false ; <nl> + } <nl> + if ( ! RegisterBfloat16Cast < long > ( NPY_LONG , / / NOLINT <nl> + / * cast_is_safe = * / false ) ) { <nl> + return false ; <nl> + } <nl> + if ( ! RegisterBfloat16Cast < long long > ( / / NOLINT <nl> + NPY_LONGLONG , / * cast_is_safe = * / false ) ) { <nl> + return false ; <nl> + } <nl> + / / Following the numpy convention . imag part is dropped when converting to <nl> + / / float . <nl> + if ( ! RegisterBfloat16Cast < std : : complex < float > > ( NPY_COMPLEX64 , <nl> + / * cast_is_safe = * / true ) ) { <nl> + return false ; <nl> + } <nl> + if ( ! RegisterBfloat16Cast < std : : complex < double > > ( NPY_COMPLEX128 , <nl> + / * cast_is_safe = * / true ) ) { <nl> return false ; <nl> } <nl> - return true ; <nl> - } <nl> <nl> - } / / namespace <nl> + bool ok = <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Add > > ( numpy . get ( ) , <nl> + " add " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Subtract > > ( <nl> + numpy . get ( ) , " subtract " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Multiply > > ( <nl> + numpy . get ( ) , " multiply " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : TrueDivide > > ( <nl> + numpy . get ( ) , " divide " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : LogAddExp > > ( <nl> + numpy . get ( ) , " logaddexp " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : LogAddExp2 > > ( <nl> + numpy . get ( ) , " logaddexp2 " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Negative > > ( <nl> + numpy . get ( ) , " negative " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Positive > > ( <nl> + numpy . get ( ) , " positive " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : TrueDivide > > ( <nl> + numpy . get ( ) , " true_divide " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : FloorDivide > > ( <nl> + numpy . get ( ) , " floor_divide " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Power > > ( numpy . get ( ) , <nl> + " power " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Remainder > > ( <nl> + numpy . get ( ) , " remainder " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Remainder > > ( <nl> + numpy . get ( ) , " mod " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Fmod > > ( numpy . get ( ) , <nl> + " fmod " ) & & <nl> + RegisterUFunc < ufuncs : : DivmodUFunc > ( numpy . get ( ) , " divmod " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Abs > > ( numpy . get ( ) , <nl> + " absolute " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Abs > > ( numpy . get ( ) , <nl> + " fabs " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Rint > > ( numpy . get ( ) , <nl> + " rint " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Sign > > ( numpy . get ( ) , <nl> + " sign " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Heaviside > > ( <nl> + numpy . get ( ) , " heaviside " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Conjugate > > ( <nl> + numpy . get ( ) , " conjugate " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Exp > > ( numpy . get ( ) , <nl> + " exp " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Exp2 > > ( numpy . get ( ) , <nl> + " exp2 " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Expm1 > > ( numpy . get ( ) , <nl> + " expm1 " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Log > > ( numpy . get ( ) , <nl> + " log " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Log2 > > ( numpy . get ( ) , <nl> + " log2 " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Log10 > > ( numpy . get ( ) , <nl> + " log10 " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Log1p > > ( numpy . get ( ) , <nl> + " log1p " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Sqrt > > ( numpy . get ( ) , <nl> + " sqrt " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Square > > ( numpy . get ( ) , <nl> + " square " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Cbrt > > ( numpy . get ( ) , <nl> + " cbrt " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Reciprocal > > ( <nl> + numpy . get ( ) , " reciprocal " ) & & <nl> + <nl> + / / Trigonometric functions <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Sin > > ( numpy . get ( ) , <nl> + " sin " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Cos > > ( numpy . get ( ) , <nl> + " cos " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Tan > > ( numpy . get ( ) , <nl> + " tan " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Arcsin > > ( numpy . get ( ) , <nl> + " arcsin " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Arccos > > ( numpy . get ( ) , <nl> + " arccos " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Arctan > > ( numpy . get ( ) , <nl> + " arctan " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Arctan2 > > ( <nl> + numpy . get ( ) , " arctan2 " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Hypot > > ( numpy . get ( ) , <nl> + " hypot " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Sinh > > ( numpy . get ( ) , <nl> + " sinh " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Cosh > > ( numpy . get ( ) , <nl> + " cosh " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Tanh > > ( numpy . get ( ) , <nl> + " tanh " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Arcsinh > > ( <nl> + numpy . get ( ) , " arcsinh " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Arccosh > > ( <nl> + numpy . get ( ) , " arccosh " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Arctanh > > ( <nl> + numpy . get ( ) , " arctanh " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Deg2rad > > ( <nl> + numpy . get ( ) , " deg2rad " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Rad2deg > > ( <nl> + numpy . get ( ) , " rad2deg " ) & & <nl> + <nl> + / / Comparison functions <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : Eq > > ( numpy . get ( ) , <nl> + " equal " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : Ne > > ( numpy . get ( ) , <nl> + " not_equal " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : Lt > > ( numpy . get ( ) , <nl> + " less " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : Gt > > ( numpy . get ( ) , <nl> + " greater " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : Le > > ( numpy . get ( ) , <nl> + " less_equal " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : Ge > > ( numpy . get ( ) , <nl> + " greater_equal " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Maximum > > ( <nl> + numpy . get ( ) , " maximum " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Minimum > > ( <nl> + numpy . get ( ) , " minimum " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Fmax > > ( numpy . get ( ) , <nl> + " fmax " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : Fmin > > ( numpy . get ( ) , <nl> + " fmin " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : LogicalAnd > > ( <nl> + numpy . get ( ) , " logical_and " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : LogicalOr > > ( <nl> + numpy . get ( ) , " logical_or " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bool , ufuncs : : LogicalXor > > ( <nl> + numpy . get ( ) , " logical_xor " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bool , ufuncs : : LogicalNot > > ( <nl> + numpy . get ( ) , " logical_not " ) & & <nl> + <nl> + / / Floating point functions <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bool , ufuncs : : IsFinite > > ( numpy . get ( ) , <nl> + " isfinite " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bool , ufuncs : : IsInf > > ( numpy . get ( ) , <nl> + " isinf " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bool , ufuncs : : IsNan > > ( numpy . get ( ) , <nl> + " isnan " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bool , ufuncs : : SignBit > > ( numpy . get ( ) , <nl> + " signbit " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : CopySign > > ( <nl> + numpy . get ( ) , " copysign " ) & & <nl> + RegisterUFunc < UnaryUFunc2 < bfloat16 , bfloat16 , bfloat16 , ufuncs : : Modf > > ( <nl> + numpy . get ( ) , " modf " ) & & <nl> + RegisterUFunc < BinaryUFunc2 < bfloat16 , int , bfloat16 , ufuncs : : Ldexp > > ( <nl> + numpy . get ( ) , " ldexp " ) & & <nl> + RegisterUFunc < UnaryUFunc2 < bfloat16 , bfloat16 , int , ufuncs : : Frexp > > ( <nl> + numpy . get ( ) , " frexp " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Floor > > ( numpy . get ( ) , <nl> + " floor " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Ceil > > ( numpy . get ( ) , <nl> + " ceil " ) & & <nl> + RegisterUFunc < UnaryUFunc < bfloat16 , bfloat16 , ufuncs : : Trunc > > ( numpy . get ( ) , <nl> + " trunc " ) & & <nl> + RegisterUFunc < BinaryUFunc < bfloat16 , bfloat16 , ufuncs : : NextAfter > > ( <nl> + numpy . get ( ) , " nextafter " ) ; <nl> + <nl> + return ok ; <nl> + } <nl> <nl> - void RegisterNumpyBfloat16 ( ) { <nl> - if ( npy_bfloat16_ > = 0 ) { <nl> + bool RegisterNumpyBfloat16 ( ) { <nl> + if ( npy_bfloat16 > = 0 ) { <nl> / / Already initialized . <nl> - return ; <nl> + return true ; <nl> } <nl> if ( ! Initialize ( ) ) { <nl> if ( ! PyErr_Occurred ( ) ) { <nl> PyErr_SetString ( PyExc_RuntimeError , " cannot load bfloat16 module . " ) ; <nl> } <nl> PyErr_Print ( ) ; <nl> + return false ; <nl> } <nl> + return true ; <nl> } <nl> <nl> - PyObject * Bfloat16PyType ( ) { <nl> - CHECK ( PyBfloat16_Type . tp_base ! = nullptr ) ; <nl> - Py_INCREF ( & PyBfloat16_Type ) ; <nl> + PyObject * Bfloat16Dtype ( ) { <nl> return reinterpret_cast < PyObject * > ( & PyBfloat16_Type ) ; <nl> } <nl> <nl> - int Bfloat16NumpyType ( ) { <nl> - CHECK_GE ( npy_bfloat16_ , 0 ) ; <nl> - return npy_bfloat16_ ; <nl> - } <nl> + int Bfloat16NumpyType ( ) { return npy_bfloat16 ; } <nl> <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / python / lib / core / bfloat16 . h <nl> ppp b / tensorflow / python / lib / core / bfloat16 . h <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> <nl> - / / Register the bfloat16 numpy type . <nl> - void RegisterNumpyBfloat16 ( ) ; <nl> + / / Register the bfloat16 numpy type . Returns true on success . <nl> + bool RegisterNumpyBfloat16 ( ) ; <nl> <nl> - / / Returns the PyObject for the bfloat16 type . <nl> - PyObject * Bfloat16PyType ( ) ; <nl> + / / Returns a pointer to the bfloat16 dtype object . <nl> + PyObject * Bfloat16Dtype ( ) ; <nl> <nl> / / Returns the id number of the bfloat16 numpy type . <nl> int Bfloat16NumpyType ( ) ; <nl> mmm a / tensorflow / python / lib / core / bfloat16_test . py <nl> ppp b / tensorflow / python / lib / core / bfloat16_test . py <nl> <nl> # See the License for the specific language governing permissions and <nl> # limitations under the License . <nl> # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - <nl> " " " Test cases for the bfloat16 Python type . " " " <nl> <nl> from __future__ import absolute_import <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> + import collections <nl> + import copy <nl> + import itertools <nl> import math <nl> <nl> + from absl . testing import absltest <nl> + from absl . testing import parameterized <nl> + <nl> import numpy as np <nl> <nl> # pylint : disable = unused - import , g - bad - import - order <nl> from tensorflow . python import _pywrap_bfloat16 <nl> - from tensorflow . python . framework import dtypes <nl> - from tensorflow . python . platform import test <nl> - <nl> <nl> bfloat16 = _pywrap_bfloat16 . TF_bfloat16_type ( ) <nl> <nl> <nl> - def float_values ( ) : <nl> - " " " Returns values that should round trip exactly to float and back . " " " <nl> - epsilon = float . fromhex ( " 1 . 0p - 7 " ) <nl> - return [ <nl> - 0 . 0 , 1 . 0 , - 1 , 0 . 5 , - 0 . 5 , epsilon , 1 . 0 + epsilon , 1 . 0 - epsilon , <nl> - - 1 . 0 - epsilon , - 1 . 0 + epsilon , 3 . 5 , 42 . 0 , 255 . 0 , 256 . 0 , <nl> - float ( " inf " ) , <nl> - float ( " - inf " ) , <nl> - float ( " nan " ) <nl> - ] <nl> + def numpy_assert_allclose ( a , b , * * kwargs ) : <nl> + a = a . astype ( np . float32 ) if a . dtype = = bfloat16 else a <nl> + b = b . astype ( np . float32 ) if b . dtype = = bfloat16 else b <nl> + return np . testing . assert_allclose ( a , b , * * kwargs ) <nl> + <nl> <nl> + epsilon = float . fromhex ( " 1 . 0p - 7 " ) <nl> <nl> - class Bfloat16Test ( test . TestCase ) : <nl> + # Values that should round trip exactly to float and back . <nl> + FLOAT_VALUES = [ <nl> + 0 . 0 , 1 . 0 , - 1 , 0 . 5 , - 0 . 5 , epsilon , 1 . 0 + epsilon , 1 . 0 - epsilon , <nl> + - 1 . 0 - epsilon , - 1 . 0 + epsilon , 3 . 5 , 42 . 0 , 255 . 0 , 256 . 0 , <nl> + float ( " inf " ) , <nl> + float ( " - inf " ) , <nl> + float ( " nan " ) <nl> + ] <nl> <nl> - def _assertFloatIdentical ( self , v , w ) : <nl> - if math . isnan ( v ) : <nl> - self . assertTrue ( math . isnan ( w ) ) <nl> - else : <nl> - self . assertEqual ( v , w ) <nl> + <nl> + class Bfloat16Test ( parameterized . TestCase ) : <nl> + " " " Tests the non - numpy Python methods of the bfloat16 type . " " " <nl> <nl> def testRoundTripToFloat ( self ) : <nl> - for v in float_values ( ) : <nl> - self . _assertFloatIdentical ( v , float ( bfloat16 ( v ) ) ) <nl> + for v in FLOAT_VALUES : <nl> + np . testing . assert_equal ( v , float ( bfloat16 ( v ) ) ) <nl> + <nl> + def testRoundTripNumpyTypes ( self ) : <nl> + for dtype in [ np . float16 , np . float32 , np . float64 ] : <nl> + np . testing . assert_equal ( - 3 . 75 , dtype ( bfloat16 ( dtype ( - 3 . 75 ) ) ) ) <nl> + np . testing . assert_equal ( 1 . 5 , float ( bfloat16 ( dtype ( 1 . 5 ) ) ) ) <nl> + np . testing . assert_equal ( 4 . 5 , dtype ( bfloat16 ( np . array ( 4 . 5 , dtype ) ) ) ) <nl> + np . testing . assert_equal ( <nl> + np . array ( [ 2 , 5 , - 1 ] , bfloat16 ) , bfloat16 ( np . array ( [ 2 , 5 , - 1 ] , dtype ) ) ) <nl> <nl> def testRoundTripToInt ( self ) : <nl> for v in [ - 256 , - 255 , - 34 , - 2 , - 1 , 0 , 1 , 2 , 10 , 47 , 128 , 255 , 256 , 512 ] : <nl> self . assertEqual ( v , int ( bfloat16 ( v ) ) ) <nl> <nl> + # pylint : disable = g - complex - comprehension <nl> + @ parameterized . named_parameters ( ( { <nl> + " testcase_name " : " _ " + dtype . __name__ , <nl> + " dtype " : dtype <nl> + } for dtype in [ bfloat16 , np . float16 , np . float32 , np . float64 ] ) ) <nl> + def testRoundTripToNumpy ( self , dtype ) : <nl> + for v in FLOAT_VALUES : <nl> + np . testing . assert_equal ( v , bfloat16 ( dtype ( v ) ) ) <nl> + np . testing . assert_equal ( v , dtype ( bfloat16 ( dtype ( v ) ) ) ) <nl> + np . testing . assert_equal ( v , dtype ( bfloat16 ( np . array ( v , dtype ) ) ) ) <nl> + if dtype ! = bfloat16 : <nl> + np . testing . assert_equal ( <nl> + np . array ( FLOAT_VALUES , dtype ) , <nl> + bfloat16 ( np . array ( FLOAT_VALUES , dtype ) ) . astype ( dtype ) ) <nl> + <nl> def testStr ( self ) : <nl> self . assertEqual ( " 0 " , str ( bfloat16 ( 0 . 0 ) ) ) <nl> self . assertEqual ( " 1 " , str ( bfloat16 ( 1 . 0 ) ) ) <nl> def testStr ( self ) : <nl> self . assertEqual ( " nan " , str ( bfloat16 ( float ( " nan " ) ) ) ) <nl> <nl> def testRepr ( self ) : <nl> - self . assertEqual ( " bfloat16 ( 0 ) " , repr ( bfloat16 ( 0 ) ) ) <nl> - self . assertEqual ( " bfloat16 ( 1 ) " , repr ( bfloat16 ( 1 ) ) ) <nl> - self . assertEqual ( " bfloat16 ( - 3 . 5 ) " , repr ( bfloat16 ( - 3 . 5 ) ) ) <nl> - self . assertEqual ( " bfloat16 ( 0 . 0078125 ) " , <nl> - repr ( bfloat16 ( float . fromhex ( " 1 . 0p - 7 " ) ) ) ) <nl> - self . assertEqual ( " bfloat16 ( inf ) " , repr ( bfloat16 ( float ( " inf " ) ) ) ) <nl> - self . assertEqual ( " bfloat16 ( - inf ) " , repr ( bfloat16 ( float ( " - inf " ) ) ) ) <nl> - self . assertEqual ( " bfloat16 ( nan ) " , repr ( bfloat16 ( float ( " nan " ) ) ) ) <nl> + self . assertEqual ( " 0 " , repr ( bfloat16 ( 0 ) ) ) <nl> + self . assertEqual ( " 1 " , repr ( bfloat16 ( 1 ) ) ) <nl> + self . assertEqual ( " - 3 . 5 " , repr ( bfloat16 ( - 3 . 5 ) ) ) <nl> + self . assertEqual ( " 0 . 0078125 " , repr ( bfloat16 ( float . fromhex ( " 1 . 0p - 7 " ) ) ) ) <nl> + self . assertEqual ( " inf " , repr ( bfloat16 ( float ( " inf " ) ) ) ) <nl> + self . assertEqual ( " - inf " , repr ( bfloat16 ( float ( " - inf " ) ) ) ) <nl> + self . assertEqual ( " nan " , repr ( bfloat16 ( float ( " nan " ) ) ) ) <nl> <nl> def testHash ( self ) : <nl> self . assertEqual ( 0 , hash ( bfloat16 ( 0 . 0 ) ) ) <nl> def testHash ( self ) : <nl> <nl> # Tests for Python operations <nl> def testNegate ( self ) : <nl> - for v in float_values ( ) : <nl> - self . _assertFloatIdentical ( - v , float ( - bfloat16 ( v ) ) ) <nl> + for v in FLOAT_VALUES : <nl> + np . testing . assert_equal ( - v , float ( - bfloat16 ( v ) ) ) <nl> <nl> def testAdd ( self ) : <nl> - self . _assertFloatIdentical ( 0 , float ( bfloat16 ( 0 ) + bfloat16 ( 0 ) ) ) <nl> - self . _assertFloatIdentical ( 1 , float ( bfloat16 ( 1 ) + bfloat16 ( 0 ) ) ) <nl> - self . _assertFloatIdentical ( 0 , float ( bfloat16 ( 1 ) + bfloat16 ( - 1 ) ) ) <nl> - self . _assertFloatIdentical ( 5 . 5 , float ( bfloat16 ( 2 ) + bfloat16 ( 3 . 5 ) ) ) <nl> - self . _assertFloatIdentical ( 1 . 25 , float ( bfloat16 ( 3 . 5 ) + bfloat16 ( - 2 . 25 ) ) ) <nl> - self . _assertFloatIdentical ( float ( " inf " ) , <nl> - float ( bfloat16 ( float ( " inf " ) ) + bfloat16 ( - 2 . 25 ) ) ) <nl> - self . _assertFloatIdentical ( float ( " - inf " ) , <nl> - float ( bfloat16 ( float ( " - inf " ) ) + bfloat16 ( - 2 . 25 ) ) ) <nl> + np . testing . assert_equal ( 0 , float ( bfloat16 ( 0 ) + bfloat16 ( 0 ) ) ) <nl> + np . testing . assert_equal ( 1 , float ( bfloat16 ( 1 ) + bfloat16 ( 0 ) ) ) <nl> + np . testing . assert_equal ( 0 , float ( bfloat16 ( 1 ) + bfloat16 ( - 1 ) ) ) <nl> + np . testing . assert_equal ( 5 . 5 , float ( bfloat16 ( 2 ) + bfloat16 ( 3 . 5 ) ) ) <nl> + np . testing . assert_equal ( 1 . 25 , float ( bfloat16 ( 3 . 5 ) + bfloat16 ( - 2 . 25 ) ) ) <nl> + np . testing . assert_equal ( <nl> + float ( " inf " ) , float ( bfloat16 ( float ( " inf " ) ) + bfloat16 ( - 2 . 25 ) ) ) <nl> + np . testing . assert_equal ( <nl> + float ( " - inf " ) , float ( bfloat16 ( float ( " - inf " ) ) + bfloat16 ( - 2 . 25 ) ) ) <nl> self . assertTrue ( math . isnan ( float ( bfloat16 ( 3 . 5 ) + bfloat16 ( float ( " nan " ) ) ) ) ) <nl> <nl> + # Test type promotion against Numpy scalar values . <nl> + self . assertEqual ( np . float32 , type ( bfloat16 ( 3 . 5 ) + np . float16 ( 2 . 25 ) ) ) <nl> + self . assertEqual ( np . float32 , type ( np . float16 ( 3 . 5 ) + bfloat16 ( 2 . 25 ) ) ) <nl> + self . assertEqual ( np . float32 , type ( bfloat16 ( 3 . 5 ) + np . float32 ( 2 . 25 ) ) ) <nl> + self . assertEqual ( np . float32 , type ( np . float32 ( 3 . 5 ) + bfloat16 ( 2 . 25 ) ) ) <nl> + self . assertEqual ( np . float64 , type ( bfloat16 ( 3 . 5 ) + np . float64 ( 2 . 25 ) ) ) <nl> + self . assertEqual ( np . float64 , type ( np . float64 ( 3 . 5 ) + bfloat16 ( 2 . 25 ) ) ) <nl> + self . assertEqual ( np . float64 , type ( bfloat16 ( 3 . 5 ) + float ( 2 . 25 ) ) ) <nl> + self . assertEqual ( np . float64 , type ( float ( 3 . 5 ) + bfloat16 ( 2 . 25 ) ) ) <nl> + self . assertEqual ( np . float32 , <nl> + type ( bfloat16 ( 3 . 5 ) + np . array ( 2 . 25 , np . float32 ) ) ) <nl> + self . assertEqual ( np . float32 , <nl> + type ( np . array ( 3 . 5 , np . float32 ) + bfloat16 ( 2 . 25 ) ) ) <nl> + <nl> def testSub ( self ) : <nl> - self . _assertFloatIdentical ( 0 , float ( bfloat16 ( 0 ) - bfloat16 ( 0 ) ) ) <nl> - self . _assertFloatIdentical ( 1 , float ( bfloat16 ( 1 ) - bfloat16 ( 0 ) ) ) <nl> - self . _assertFloatIdentical ( 2 , float ( bfloat16 ( 1 ) - bfloat16 ( - 1 ) ) ) <nl> - self . _assertFloatIdentical ( - 1 . 5 , float ( bfloat16 ( 2 ) - bfloat16 ( 3 . 5 ) ) ) <nl> - self . _assertFloatIdentical ( 5 . 75 , float ( bfloat16 ( 3 . 5 ) - bfloat16 ( - 2 . 25 ) ) ) <nl> - self . _assertFloatIdentical ( float ( " - inf " ) , <nl> - float ( bfloat16 ( - 2 . 25 ) - bfloat16 ( float ( " inf " ) ) ) ) <nl> - self . _assertFloatIdentical ( float ( " inf " ) , <nl> - float ( bfloat16 ( - 2 . 25 ) - bfloat16 ( float ( " - inf " ) ) ) ) <nl> + np . testing . assert_equal ( 0 , float ( bfloat16 ( 0 ) - bfloat16 ( 0 ) ) ) <nl> + np . testing . assert_equal ( 1 , float ( bfloat16 ( 1 ) - bfloat16 ( 0 ) ) ) <nl> + np . testing . assert_equal ( 2 , float ( bfloat16 ( 1 ) - bfloat16 ( - 1 ) ) ) <nl> + np . testing . assert_equal ( - 1 . 5 , float ( bfloat16 ( 2 ) - bfloat16 ( 3 . 5 ) ) ) <nl> + np . testing . assert_equal ( 5 . 75 , float ( bfloat16 ( 3 . 5 ) - bfloat16 ( - 2 . 25 ) ) ) <nl> + np . testing . assert_equal ( <nl> + float ( " - inf " ) , float ( bfloat16 ( - 2 . 25 ) - bfloat16 ( float ( " inf " ) ) ) ) <nl> + np . testing . assert_equal ( <nl> + float ( " inf " ) , float ( bfloat16 ( - 2 . 25 ) - bfloat16 ( float ( " - inf " ) ) ) ) <nl> self . assertTrue ( math . isnan ( float ( bfloat16 ( 3 . 5 ) - bfloat16 ( float ( " nan " ) ) ) ) ) <nl> <nl> def testMul ( self ) : <nl> - self . _assertFloatIdentical ( 0 , float ( bfloat16 ( 0 ) * bfloat16 ( 0 ) ) ) <nl> - self . _assertFloatIdentical ( 0 , float ( bfloat16 ( 1 ) * bfloat16 ( 0 ) ) ) <nl> - self . _assertFloatIdentical ( - 1 , float ( bfloat16 ( 1 ) * bfloat16 ( - 1 ) ) ) <nl> - self . _assertFloatIdentical ( - 7 . 875 , float ( bfloat16 ( 3 . 5 ) * bfloat16 ( - 2 . 25 ) ) ) <nl> - self . _assertFloatIdentical ( float ( " - inf " ) , <nl> - float ( bfloat16 ( float ( " inf " ) ) * bfloat16 ( - 2 . 25 ) ) ) <nl> - self . _assertFloatIdentical ( float ( " inf " ) , <nl> - float ( bfloat16 ( float ( " - inf " ) ) * bfloat16 ( - 2 . 25 ) ) ) <nl> + np . testing . assert_equal ( 0 , float ( bfloat16 ( 0 ) * bfloat16 ( 0 ) ) ) <nl> + np . testing . assert_equal ( 0 , float ( bfloat16 ( 1 ) * bfloat16 ( 0 ) ) ) <nl> + np . testing . assert_equal ( - 1 , float ( bfloat16 ( 1 ) * bfloat16 ( - 1 ) ) ) <nl> + np . testing . assert_equal ( - 7 . 875 , float ( bfloat16 ( 3 . 5 ) * bfloat16 ( - 2 . 25 ) ) ) <nl> + np . testing . assert_equal ( <nl> + float ( " - inf " ) , float ( bfloat16 ( float ( " inf " ) ) * bfloat16 ( - 2 . 25 ) ) ) <nl> + np . testing . assert_equal ( <nl> + float ( " inf " ) , float ( bfloat16 ( float ( " - inf " ) ) * bfloat16 ( - 2 . 25 ) ) ) <nl> self . assertTrue ( math . isnan ( float ( bfloat16 ( 3 . 5 ) * bfloat16 ( float ( " nan " ) ) ) ) ) <nl> <nl> def testDiv ( self ) : <nl> self . assertTrue ( math . isnan ( float ( bfloat16 ( 0 ) / bfloat16 ( 0 ) ) ) ) <nl> - self . _assertFloatIdentical ( float ( " inf " ) , float ( bfloat16 ( 1 ) / bfloat16 ( 0 ) ) ) <nl> - self . _assertFloatIdentical ( - 1 , float ( bfloat16 ( 1 ) / bfloat16 ( - 1 ) ) ) <nl> - self . _assertFloatIdentical ( - 1 . 75 , float ( bfloat16 ( 3 . 5 ) / bfloat16 ( - 2 ) ) ) <nl> - self . _assertFloatIdentical ( float ( " - inf " ) , <nl> - float ( bfloat16 ( float ( " inf " ) ) / bfloat16 ( - 2 . 25 ) ) ) <nl> - self . _assertFloatIdentical ( float ( " inf " ) , <nl> - float ( bfloat16 ( float ( " - inf " ) ) / bfloat16 ( - 2 . 25 ) ) ) <nl> + np . testing . assert_equal ( float ( " inf " ) , float ( bfloat16 ( 1 ) / bfloat16 ( 0 ) ) ) <nl> + np . testing . assert_equal ( - 1 , float ( bfloat16 ( 1 ) / bfloat16 ( - 1 ) ) ) <nl> + np . testing . assert_equal ( - 1 . 75 , float ( bfloat16 ( 3 . 5 ) / bfloat16 ( - 2 ) ) ) <nl> + np . testing . assert_equal ( <nl> + float ( " - inf " ) , float ( bfloat16 ( float ( " inf " ) ) / bfloat16 ( - 2 . 25 ) ) ) <nl> + np . testing . assert_equal ( <nl> + float ( " inf " ) , float ( bfloat16 ( float ( " - inf " ) ) / bfloat16 ( - 2 . 25 ) ) ) <nl> self . assertTrue ( math . isnan ( float ( bfloat16 ( 3 . 5 ) / bfloat16 ( float ( " nan " ) ) ) ) ) <nl> <nl> def testLess ( self ) : <nl> - for v in float_values ( ) : <nl> - for w in float_values ( ) : <nl> + for v in FLOAT_VALUES : <nl> + for w in FLOAT_VALUES : <nl> self . assertEqual ( v < w , bfloat16 ( v ) < bfloat16 ( w ) ) <nl> <nl> def testLessEqual ( self ) : <nl> - for v in float_values ( ) : <nl> - for w in float_values ( ) : <nl> + for v in FLOAT_VALUES : <nl> + for w in FLOAT_VALUES : <nl> self . assertEqual ( v < = w , bfloat16 ( v ) < = bfloat16 ( w ) ) <nl> <nl> def testGreater ( self ) : <nl> - for v in float_values ( ) : <nl> - for w in float_values ( ) : <nl> + for v in FLOAT_VALUES : <nl> + for w in FLOAT_VALUES : <nl> self . assertEqual ( v > w , bfloat16 ( v ) > bfloat16 ( w ) ) <nl> <nl> def testGreaterEqual ( self ) : <nl> - for v in float_values ( ) : <nl> - for w in float_values ( ) : <nl> + for v in FLOAT_VALUES : <nl> + for w in FLOAT_VALUES : <nl> self . assertEqual ( v > = w , bfloat16 ( v ) > = bfloat16 ( w ) ) <nl> <nl> def testEqual ( self ) : <nl> - for v in float_values ( ) : <nl> - for w in float_values ( ) : <nl> + for v in FLOAT_VALUES : <nl> + for w in FLOAT_VALUES : <nl> self . assertEqual ( v = = w , bfloat16 ( v ) = = bfloat16 ( w ) ) <nl> <nl> def testNotEqual ( self ) : <nl> - for v in float_values ( ) : <nl> - for w in float_values ( ) : <nl> + for v in FLOAT_VALUES : <nl> + for w in FLOAT_VALUES : <nl> self . assertEqual ( v ! = w , bfloat16 ( v ) ! = bfloat16 ( w ) ) <nl> <nl> def testNan ( self ) : <nl> a = np . isnan ( bfloat16 ( float ( " nan " ) ) ) <nl> self . assertTrue ( a ) <nl> - np . testing . assert_allclose ( np . array ( [ 1 . 0 , a ] ) , np . array ( [ 1 . 0 , a ] ) ) <nl> + numpy_assert_allclose ( np . array ( [ 1 . 0 , a ] ) , np . array ( [ 1 . 0 , a ] ) ) <nl> <nl> - a = np . array ( <nl> - [ bfloat16 ( 1 . 34375 ) , <nl> - bfloat16 ( 1 . 4375 ) , <nl> - bfloat16 ( float ( " nan " ) ) ] , <nl> - dtype = dtypes . bfloat16 . as_numpy_dtype ) <nl> + a = np . array ( [ bfloat16 ( 1 . 34375 ) , <nl> + bfloat16 ( 1 . 4375 ) , <nl> + bfloat16 ( float ( " nan " ) ) ] , <nl> + dtype = bfloat16 ) <nl> b = np . array ( <nl> [ bfloat16 ( 1 . 3359375 ) , <nl> bfloat16 ( 1 . 4375 ) , <nl> bfloat16 ( float ( " nan " ) ) ] , <nl> - dtype = dtypes . bfloat16 . as_numpy_dtype ) <nl> - np . testing . assert_allclose ( <nl> + dtype = bfloat16 ) <nl> + numpy_assert_allclose ( <nl> a , b , rtol = 0 . 1 , atol = 0 . 1 , equal_nan = True , err_msg = " " , verbose = True ) <nl> <nl> + def testSort ( self ) : <nl> + values_to_sort = np . float32 ( FLOAT_VALUES ) <nl> + sorted_f32 = np . sort ( values_to_sort ) <nl> + sorted_bf16 = np . sort ( values_to_sort . astype ( bfloat16 ) ) <nl> + np . testing . assert_equal ( sorted_f32 , np . float32 ( sorted_bf16 ) ) <nl> + <nl> <nl> - class Bfloat16NumPyTest ( test . TestCase ) : <nl> + BinaryOp = collections . namedtuple ( " BinaryOp " , [ " op " ] ) <nl> + <nl> + UNARY_UFUNCS = [ <nl> + np . negative , np . positive , np . absolute , np . fabs , np . rint , np . sign , <nl> + np . conjugate , np . exp , np . exp2 , np . expm1 , np . log , np . log10 , np . log1p , <nl> + np . log2 , np . sqrt , np . square , np . cbrt , np . reciprocal , np . sin , np . cos , np . tan , <nl> + np . arcsin , np . arccos , np . arctan , np . sinh , np . cosh , np . tanh , np . arcsinh , <nl> + np . arccosh , np . arctanh , np . deg2rad , np . rad2deg , np . floor , np . ceil , np . trunc <nl> + ] <nl> + <nl> + BINARY_UFUNCS = [ <nl> + np . add , np . subtract , np . multiply , np . divide , np . logaddexp , np . logaddexp2 , <nl> + np . floor_divide , np . power , np . remainder , np . fmod , np . heaviside , np . arctan2 , <nl> + np . hypot , np . maximum , np . minimum , np . fmax , np . fmin , np . copysign <nl> + ] <nl> + <nl> + BINARY_PREDICATE_UFUNCS = [ <nl> + np . equal , np . not_equal , np . less , np . greater , np . less_equal , <nl> + np . greater_equal , np . logical_and , np . logical_or , np . logical_xor <nl> + ] <nl> + <nl> + <nl> + class Bfloat16NumPyTest ( parameterized . TestCase ) : <nl> + " " " Tests the NumPy integration of the bfloat16 type . " " " <nl> <nl> def testDtype ( self ) : <nl> self . assertEqual ( bfloat16 , np . dtype ( bfloat16 ) ) <nl> <nl> + def testDeepCopyDoesNotAlterHash ( self ) : <nl> + # For context , see https : / / github . com / google / jax / issues / 4651 . If the hash <nl> + # value of the type descriptor is not initialized correctly , a deep copy <nl> + # can change the type hash . <nl> + dtype = np . dtype ( bfloat16 ) <nl> + h = hash ( dtype ) <nl> + _ = copy . deepcopy ( dtype ) <nl> + self . assertEqual ( h , hash ( dtype ) ) <nl> + <nl> def testArray ( self ) : <nl> x = np . array ( [ [ 1 , 2 , 3 ] ] , dtype = bfloat16 ) <nl> self . assertEqual ( bfloat16 , x . dtype ) <nl> - self . assertEqual ( " [ [ bfloat16 ( 1 ) bfloat16 ( 2 ) bfloat16 ( 3 ) ] ] " , str ( x ) ) <nl> - self . assertAllEqual ( x , x ) <nl> - self . assertAllClose ( x , x ) <nl> + self . assertEqual ( " [ [ 1 2 3 ] ] " , str ( x ) ) <nl> + np . testing . assert_equal ( x , x ) <nl> + numpy_assert_allclose ( x , x ) <nl> self . assertTrue ( ( x = = x ) . all ( ) ) <nl> <nl> def testComparisons ( self ) : <nl> def testComparisons ( self ) : <nl> bx = x . astype ( bfloat16 ) <nl> y = np . array ( [ 82432 , 7 , 0 ] , dtype = np . float32 ) <nl> by = y . astype ( bfloat16 ) <nl> - self . assertAllEqual ( x = = y , bx = = by ) <nl> - self . assertAllEqual ( x ! = y , bx ! = by ) <nl> - self . assertAllEqual ( x < y , bx < by ) <nl> - self . assertAllEqual ( x > y , bx > by ) <nl> - self . assertAllEqual ( x < = y , bx < = by ) <nl> - self . assertAllEqual ( x > = y , bx > = by ) <nl> + np . testing . assert_equal ( x = = y , bx = = by ) <nl> + np . testing . assert_equal ( x ! = y , bx ! = by ) <nl> + np . testing . assert_equal ( x < y , bx < by ) <nl> + np . testing . assert_equal ( x > y , bx > by ) <nl> + np . testing . assert_equal ( x < = y , bx < = by ) <nl> + np . testing . assert_equal ( x > = y , bx > = by ) <nl> <nl> def testEqual2 ( self ) : <nl> a = np . array ( [ 401408 ] , bfloat16 ) <nl> def testEqual2 ( self ) : <nl> <nl> def testCasts ( self ) : <nl> for dtype in [ <nl> - np . float16 , np . float32 , np . float64 , np . int32 , np . int64 , <nl> - np . complex64 , np . complex128 ] : <nl> + np . float16 , np . float32 , np . float64 , np . int8 , np . int16 , np . int32 , <nl> + np . int64 , np . complex64 , np . complex128 , np . uint8 , np . uint16 , np . uint32 , <nl> + np . uint64 , np . intc , np . int_ , np . longlong , np . uintc , np . ulonglong <nl> + ] : <nl> x = np . array ( [ [ 1 , 2 , 3 ] ] , dtype = dtype ) <nl> y = x . astype ( bfloat16 ) <nl> z = y . astype ( dtype ) <nl> def testConformNumpyComplex ( self ) : <nl> x = np . array ( [ 1 . 1 , 2 . 2 + 2 . 2j , 3 . 3 ] , dtype = dtype ) <nl> y_np = x . astype ( np . float32 ) <nl> y_tf = x . astype ( bfloat16 ) <nl> - self . assertAllClose ( y_np , y_tf , atol = 2e - 2 ) <nl> + numpy_assert_allclose ( y_np , y_tf , atol = 2e - 2 ) <nl> <nl> z_np = y_np . astype ( dtype ) <nl> z_tf = y_tf . astype ( dtype ) <nl> - self . assertAllClose ( z_np , z_tf , atol = 2e - 2 ) <nl> - <nl> - def testAdd ( self ) : <nl> - x = np . array ( [ [ 1 , 2 , 3 ] ] , dtype = bfloat16 ) <nl> - y = np . array ( [ [ 4 , 5 , 6 ] ] , dtype = bfloat16 ) <nl> - self . assertAllClose ( np . array ( [ [ 5 , 7 , 9 ] ] ) , x + y ) <nl> - <nl> - def testLogSumExp ( self ) : <nl> - x = np . array ( [ [ 1 , 2 , 3 ] ] , dtype = np . float32 ) <nl> - y = np . array ( [ [ 4 , 5 , 6 ] ] , dtype = np . float32 ) <nl> - self . assertAllClose ( np . logaddexp ( x , y ) , <nl> - np . logaddexp ( x . astype ( bfloat16 ) , y . astype ( bfloat16 ) ) , <nl> - atol = 2e - 2 ) <nl> + numpy_assert_allclose ( z_np , z_tf , atol = 2e - 2 ) <nl> <nl> def testArange ( self ) : <nl> - self . assertAllEqual ( <nl> + np . testing . assert_equal ( <nl> np . arange ( 100 , dtype = np . float32 ) . astype ( bfloat16 ) , <nl> np . arange ( 100 , dtype = bfloat16 ) ) <nl> - self . assertAllEqual ( <nl> + np . testing . assert_equal ( <nl> np . arange ( - 10 . 5 , 7 . 8 , 0 . 5 , dtype = np . float32 ) . astype ( bfloat16 ) , <nl> np . arange ( - 10 . 5 , 7 . 8 , 0 . 5 , dtype = bfloat16 ) ) <nl> - self . assertAllEqual ( <nl> + np . testing . assert_equal ( <nl> np . arange ( - 0 . , - 7 . , - 0 . 25 , dtype = np . float32 ) . astype ( bfloat16 ) , <nl> np . arange ( - 0 . , - 7 . , - 0 . 25 , dtype = bfloat16 ) ) <nl> - self . assertAllEqual ( <nl> + np . testing . assert_equal ( <nl> np . arange ( - 16384 . , 16384 . , 64 . , dtype = np . float32 ) . astype ( bfloat16 ) , <nl> np . arange ( - 16384 . , 16384 . , 64 . , dtype = bfloat16 ) ) <nl> <nl> - def testSort ( self ) : <nl> - values_to_sort = np . float32 ( float_values ( ) ) <nl> - sorted_f32 = np . sort ( values_to_sort ) <nl> - sorted_bf16 = np . sort ( values_to_sort . astype ( bfloat16 ) ) <nl> - self . assertAllEqual ( sorted_f32 , np . float32 ( sorted_bf16 ) ) <nl> + # pylint : disable = g - complex - comprehension <nl> + @ parameterized . named_parameters ( ( { <nl> + " testcase_name " : " _ " + op . __name__ , <nl> + " op " : op <nl> + } for op in UNARY_UFUNCS ) ) <nl> + def testUnaryUfunc ( self , op ) : <nl> + rng = np . random . RandomState ( seed = 42 ) <nl> + x = rng . randn ( 3 , 7 , 10 ) . astype ( bfloat16 ) <nl> + numpy_assert_allclose ( <nl> + op ( x ) . astype ( np . float32 ) , op ( x . astype ( np . float32 ) ) , rtol = 1e - 2 ) <nl> + <nl> + @ parameterized . named_parameters ( ( { <nl> + " testcase_name " : " _ " + op . __name__ , <nl> + " op " : op <nl> + } for op in BINARY_UFUNCS ) ) <nl> + def testBinaryUfunc ( self , op ) : <nl> + rng = np . random . RandomState ( seed = 42 ) <nl> + x = rng . randn ( 3 , 7 , 10 ) . astype ( bfloat16 ) <nl> + y = rng . randn ( 4 , 1 , 7 , 10 ) . astype ( bfloat16 ) <nl> + numpy_assert_allclose ( <nl> + op ( x , y ) . astype ( np . float32 ) , <nl> + op ( x . astype ( np . float32 ) , y . astype ( np . float32 ) ) , <nl> + rtol = 1e - 2 ) <nl> + <nl> + @ parameterized . named_parameters ( ( { <nl> + " testcase_name " : " _ " + op . __name__ , <nl> + " op " : op <nl> + } for op in BINARY_PREDICATE_UFUNCS ) ) <nl> + def testBinaryPredicateUfunc ( self , op ) : <nl> + rng = np . random . RandomState ( seed = 42 ) <nl> + x = rng . randn ( 3 , 7 ) . astype ( bfloat16 ) <nl> + y = rng . randn ( 4 , 1 , 7 ) . astype ( bfloat16 ) <nl> + np . testing . assert_equal ( <nl> + op ( x , y ) , op ( x . astype ( np . float32 ) , y . astype ( np . float32 ) ) ) <nl> + <nl> + @ parameterized . named_parameters ( ( { <nl> + " testcase_name " : " _ " + op . __name__ , <nl> + " op " : op <nl> + } for op in [ np . isfinite , np . isinf , np . isnan , np . signbit , np . logical_not ] ) ) <nl> + def testPredicateUfunc ( self , op ) : <nl> + rng = np . random . RandomState ( seed = 42 ) <nl> + shape = ( 3 , 7 , 10 ) <nl> + posinf_flips = rng . rand ( * shape ) < 0 . 1 <nl> + neginf_flips = rng . rand ( * shape ) < 0 . 1 <nl> + nan_flips = rng . rand ( * shape ) < 0 . 1 <nl> + vals = rng . randn ( * shape ) <nl> + vals = np . where ( posinf_flips , np . inf , vals ) <nl> + vals = np . where ( neginf_flips , - np . inf , vals ) <nl> + vals = np . where ( nan_flips , np . nan , vals ) <nl> + vals = vals . astype ( bfloat16 ) <nl> + np . testing . assert_equal ( op ( vals ) , op ( vals . astype ( np . float32 ) ) ) <nl> + <nl> + def testDivmod ( self ) : <nl> + rng = np . random . RandomState ( seed = 42 ) <nl> + x = rng . randn ( 3 , 7 ) . astype ( bfloat16 ) <nl> + y = rng . randn ( 4 , 1 , 7 ) . astype ( bfloat16 ) <nl> + o1 , o2 = np . divmod ( x , y ) <nl> + e1 , e2 = np . divmod ( x . astype ( np . float32 ) , y . astype ( np . float32 ) ) <nl> + numpy_assert_allclose ( o1 , e1 , rtol = 1e - 2 ) <nl> + numpy_assert_allclose ( o2 , e2 , rtol = 1e - 2 ) <nl> + <nl> + def testModf ( self ) : <nl> + rng = np . random . RandomState ( seed = 42 ) <nl> + x = rng . randn ( 3 , 7 ) . astype ( bfloat16 ) <nl> + o1 , o2 = np . modf ( x ) <nl> + e1 , e2 = np . modf ( x . astype ( np . float32 ) ) <nl> + numpy_assert_allclose ( o1 . astype ( np . float32 ) , e1 , rtol = 1e - 2 ) <nl> + numpy_assert_allclose ( o2 . astype ( np . float32 ) , e2 , rtol = 1e - 2 ) <nl> + <nl> + def testLdexp ( self ) : <nl> + rng = np . random . RandomState ( seed = 42 ) <nl> + x = rng . randn ( 3 , 7 ) . astype ( bfloat16 ) <nl> + y = rng . randint ( - 50 , 50 , ( 1 , 7 ) ) <nl> + numpy_assert_allclose ( <nl> + np . ldexp ( x , y ) . astype ( np . float32 ) , <nl> + np . ldexp ( x . astype ( np . float32 ) , y ) , <nl> + rtol = 1e - 2 , <nl> + atol = 1e - 6 ) <nl> + <nl> + def testFrexp ( self ) : <nl> + rng = np . random . RandomState ( seed = 42 ) <nl> + x = rng . randn ( 3 , 7 ) . astype ( bfloat16 ) <nl> + mant1 , exp1 = np . frexp ( x ) <nl> + mant2 , exp2 = np . frexp ( x . astype ( np . float32 ) ) <nl> + np . testing . assert_equal ( exp1 , exp2 ) <nl> + numpy_assert_allclose ( mant1 , mant2 , rtol = 1e - 2 ) <nl> + <nl> + def testNextAfter ( self ) : <nl> + one = np . array ( 1 . , dtype = bfloat16 ) <nl> + two = np . array ( 2 . , dtype = bfloat16 ) <nl> + zero = np . array ( 0 . , dtype = bfloat16 ) <nl> + nan = np . array ( np . nan , dtype = bfloat16 ) <nl> + np . testing . assert_equal ( np . nextafter ( one , two ) - one , epsilon ) <nl> + np . testing . assert_equal ( np . nextafter ( one , zero ) - one , - epsilon / 2 ) <nl> + np . testing . assert_equal ( np . isnan ( np . nextafter ( nan , one ) ) , True ) <nl> + np . testing . assert_equal ( np . isnan ( np . nextafter ( one , nan ) ) , True ) <nl> + np . testing . assert_equal ( np . nextafter ( one , one ) , one ) <nl> + smallest_denormal = float . fromhex ( " 1 . 0p - 133 " ) <nl> + np . testing . assert_equal ( np . nextafter ( zero , one ) , smallest_denormal ) <nl> + np . testing . assert_equal ( np . nextafter ( zero , - one ) , - smallest_denormal ) <nl> + for a , b in itertools . permutations ( [ 0 . , - 0 . , nan ] , 2 ) : <nl> + np . testing . assert_equal ( <nl> + np . nextafter ( <nl> + np . array ( a , dtype = np . float32 ) , np . array ( b , dtype = np . float32 ) ) , <nl> + np . nextafter ( <nl> + np . array ( a , dtype = bfloat16 ) , np . array ( b , dtype = bfloat16 ) ) ) <nl> <nl> <nl> if __name__ = = " __main__ " : <nl> - test . main ( ) <nl> + absltest . main ( ) <nl> mmm a / tensorflow / python / lib / core / bfloat16_wrapper . cc <nl> ppp b / tensorflow / python / lib / core / bfloat16_wrapper . cc <nl> PYBIND11_MODULE ( _pywrap_bfloat16 , m ) { <nl> tensorflow : : RegisterNumpyBfloat16 ( ) ; <nl> <nl> m . def ( " TF_bfloat16_type " , <nl> - [ ] { return pybind11 : : handle ( tensorflow : : Bfloat16PyType ( ) ) ; } ) ; <nl> + [ ] { return pybind11 : : handle ( tensorflow : : Bfloat16Dtype ( ) ) ; } ) ; <nl> } <nl> mmm a / tensorflow / python / ops / handle_data_util . py <nl> ppp b / tensorflow / python / ops / handle_data_util . py <nl> <nl> from __future__ import print_function <nl> <nl> from tensorflow . python . client import pywrap_tf_session <nl> - from tensorflow . python . framework import cpp_shape_inference_pb2 <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> - from tensorflow . python . util import compat <nl> <nl> <nl> - def get_resource_handle_data ( graph_op ) : <nl> - assert type ( graph_op ) = = ops . Tensor # pylint : disable = unidiomatic - typecheck <nl> - <nl> - handle_data = pywrap_tf_session . GetHandleShapeAndType ( <nl> - graph_op . graph . _c_graph , graph_op . _as_tf_output ( ) ) # pylint : disable = protected - access <nl> - <nl> - return cpp_shape_inference_pb2 . CppShapeInferenceResult . HandleData . FromString ( <nl> - compat . as_bytes ( handle_data ) ) <nl> + get_resource_handle_data = ops . get_resource_handle_data <nl> <nl> <nl> def copy_handle_data ( source_t , target_t ) : <nl> mmm a / tensorflow / python / ops / image_ops_impl . py <nl> ppp b / tensorflow / python / ops / image_ops_impl . py <nl> def image_gradients ( image ) : <nl> image = tf . reshape ( tf . range ( IMAGE_HEIGHT * IMAGE_WIDTH * CHANNELS , <nl> delta = 1 , dtype = tf . float32 ) , <nl> shape = ( BATCH_SIZE , IMAGE_HEIGHT , IMAGE_WIDTH , CHANNELS ) ) <nl> - dx , dy = tf . image . image_gradients ( image ) <nl> + dy , dx = tf . image . image_gradients ( image ) <nl> print ( image [ 0 , : , : , 0 ] ) <nl> tf . Tensor ( <nl> [ [ 0 . 1 . 2 . 3 . 4 . ] <nl> def image_gradients ( image ) : <nl> [ 10 . 11 . 12 . 13 . 14 . ] <nl> [ 15 . 16 . 17 . 18 . 19 . ] <nl> [ 20 . 21 . 22 . 23 . 24 . ] ] , shape = ( 5 , 5 ) , dtype = float32 ) <nl> - print ( dx [ 0 , : , : , 0 ] ) <nl> + print ( dy [ 0 , : , : , 0 ] ) <nl> tf . Tensor ( <nl> [ [ 5 . 5 . 5 . 5 . 5 . ] <nl> [ 5 . 5 . 5 . 5 . 5 . ] <nl> [ 5 . 5 . 5 . 5 . 5 . ] <nl> [ 5 . 5 . 5 . 5 . 5 . ] <nl> [ 0 . 0 . 0 . 0 . 0 . ] ] , shape = ( 5 , 5 ) , dtype = float32 ) <nl> - print ( dy [ 0 , : , : , 0 ] ) <nl> + print ( dx [ 0 , : , : , 0 ] ) <nl> tf . Tensor ( <nl> [ [ 1 . 1 . 1 . 1 . 0 . ] <nl> [ 1 . 1 . 1 . 1 . 0 . ] <nl> mmm a / tensorflow / python / saved_model / load_test . py <nl> ppp b / tensorflow / python / saved_model / load_test . py <nl> def get_gradient ( obj ) : <nl> self . assertIsNotNone ( imported_gradient ) <nl> self . assertAllClose ( imported_gradient , 2 . ) <nl> <nl> + def test_nested_fn_backprop ( self , cycles ) : <nl> + weight = variables . Variable ( 2 . , trainable = True ) <nl> + <nl> + @ def_function . function ( input_signature = [ <nl> + tensor_spec . TensorSpec ( dtype = dtypes . float32 , shape = ( None , None ) ) ] ) <nl> + def g ( x ) : <nl> + weight . read_value ( ) # Just get the tape to watch the variable <nl> + handle = array_ops . identity ( weight . handle ) <nl> + @ def_function . function <nl> + def launder_var_handle ( ) : <nl> + return array_ops . identity ( handle ) <nl> + return x + resource_variable_ops . read_variable_op ( <nl> + launder_var_handle ( ) , dtypes . float32 ) <nl> + <nl> + root = tracking . AutoTrackable ( ) <nl> + root . weight = weight <nl> + root . g = g <nl> + imported = cycle ( root , cycles ) <nl> + def get_gradient ( obj , persistent ) : <nl> + with backprop . GradientTape ( persistent = persistent ) as t : <nl> + x = constant_op . constant ( [ [ 1 . , 2 . , 3 . ] , [ 1 . , - 2 , 3 . ] ] ) <nl> + y = obj . g ( x ) <nl> + self . assertAllClose ( y , obj . weight + x ) <nl> + loss = math_ops . reduce_sum ( y ) <nl> + return t . gradient ( loss , obj . weight ) <nl> + <nl> + imported_gradient = get_gradient ( imported , persistent = False ) <nl> + original_gradient = get_gradient ( root , persistent = False ) <nl> + self . assertIsNotNone ( original_gradient ) <nl> + self . assertAllClose ( original_gradient , 6 . ) <nl> + self . assertIsNotNone ( imported_gradient ) <nl> + self . assertAllClose ( imported_gradient , 6 . ) <nl> + <nl> def test_restored_func_with_captured_var_backprop_float32 ( self , cycles ) : <nl> self . _test_restored_func_with_captured_var_backprop ( cycles , dtypes . float32 ) <nl> <nl> def increment_v ( x ) : <nl> # allocations at a lower level . <nl> @ test_util . assert_no_new_pyobjects_executing_eagerly <nl> def test_functions_cleaned ( self ) : <nl> + self . skipTest ( " TODO ( b / 175152958 ) : The test is leaking function definitions " ) <nl> if sys . version_info . major < 3 : <nl> self . skipTest ( " Not working in Python 2 " ) <nl> root = module . Module ( ) <nl> mmm a / tensorflow / python / tpu / tpu_embedding_v2_correctness_test . py <nl> ppp b / tensorflow / python / tpu / tpu_embedding_v2_correctness_test . py <nl> def setUp ( self ) : <nl> self . feature_friends_row_lengths = [ 1 , 3 , 1 , 3 ] <nl> self . resolver = None <nl> <nl> - def tearDown ( self ) : <nl> - if self . resolver : <nl> - tpu_strategy_util . shutdown_tpu_system ( self . resolver ) <nl> - super ( TPUEmbeddingCorrectness , self ) . tearDown ( ) <nl> - <nl> def _get_strategy ( self ) : <nl> self . resolver = tpu_cluster_resolver . TPUClusterResolver ( <nl> tpu = FLAGS . tpu , zone = FLAGS . zone , project = FLAGS . project ) <nl> mmm a / tensorflow / python / tpu / tpu_embedding_v2_test . py <nl> ppp b / tensorflow / python / tpu / tpu_embedding_v2_test . py <nl> def setUp ( self ) : <nl> self . cpu_mid_level = self . build_mid_level ( <nl> self . second_mid_level_contents , self . cpu_mid_level_optimizer ) <nl> <nl> - def tearDown ( self ) : <nl> - tpu_strategy_util . shutdown_tpu_system ( self . resolver ) <nl> - super ( TPUEmbeddingCheckpointTest , self ) . tearDown ( ) <nl> - <nl> def test_checkpoint_save_retrieves ( self ) : <nl> # Ensure that the variables from the first model are loaded . <nl> self . first_mid_level . _load_variables ( ) <nl> def setUp ( self ) : <nl> self . feature_friends_row_lengths = [ 1 , 3 , 1 , 3 ] <nl> self . resolver = None <nl> <nl> - def tearDown ( self ) : <nl> - if self . resolver : <nl> - tpu_strategy_util . shutdown_tpu_system ( self . resolver ) <nl> - super ( TPUEmbeddingTest , self ) . tearDown ( ) <nl> - <nl> def test_tables_with_same_name ( self ) : <nl> with self . assertRaisesRegex ( <nl> ValueError , ' Multiple tables with name table found . ' ) : <nl> mmm a / tensorflow / python / util / nest . py <nl> ppp b / tensorflow / python / util / nest . py <nl> def assert_same_structure ( nest1 , nest2 , check_types = True , <nl> expand_composites = False ) : <nl> " " " Asserts that two structures are nested in the same way . <nl> <nl> - Note that namedtuples with identical name and fields are always considered <nl> - to have the same shallow structure ( even with ` check_types = True ` ) . <nl> - For instance , this code will print ` True ` : <nl> + Note the method does not check the types of data inside the structures . <nl> <nl> - ` ` ` python <nl> - def nt ( a , b ) : <nl> - return collections . namedtuple ( ' foo ' , ' a b ' ) ( a , b ) <nl> - print ( assert_same_structure ( nt ( 0 , 1 ) , nt ( 2 , 3 ) ) ) <nl> - ` ` ` <nl> + Examples : <nl> + <nl> + * These scalar vs . scalar comparisons will pass : <nl> + <nl> + > > > tf . nest . assert_same_structure ( 1 . 5 , tf . Variable ( 1 , tf . uint32 ) ) <nl> + > > > tf . nest . assert_same_structure ( " abc " , np . array ( [ 1 , 2 ] ) ) <nl> + <nl> + * These sequence vs . sequence comparisons will pass : <nl> + <nl> + > > > structure1 = ( ( ( 1 , 2 ) , 3 ) , 4 , ( 5 , 6 ) ) <nl> + > > > structure2 = ( ( ( " foo1 " , " foo2 " ) , " foo3 " ) , " foo4 " , ( " foo5 " , " foo6 " ) ) <nl> + > > > structure3 = [ ( ( " a " , " b " ) , " c " ) , " d " , [ " e " , " f " ] ] <nl> + > > > tf . nest . assert_same_structure ( structure1 , structure2 ) <nl> + > > > tf . nest . assert_same_structure ( structure1 , structure3 , check_types = False ) <nl> + <nl> + > > > import collections <nl> + > > > tf . nest . assert_same_structure ( <nl> + . . . collections . namedtuple ( " bar " , " a b " ) ( 1 , 2 ) , <nl> + . . . collections . namedtuple ( " foo " , " a b " ) ( 2 , 3 ) , <nl> + . . . check_types = False ) <nl> + <nl> + > > > tf . nest . assert_same_structure ( <nl> + . . . collections . namedtuple ( " bar " , " a b " ) ( 1 , 2 ) , <nl> + . . . { " a " : 1 , " b " : 2 } , <nl> + . . . check_types = False ) <nl> + <nl> + > > > tf . nest . assert_same_structure ( <nl> + . . . { " a " : 1 , " b " : 2 , " c " : 3 } , <nl> + . . . { " c " : 6 , " b " : 5 , " a " : 4 } ) <nl> + <nl> + > > > ragged_tensor1 = tf . RaggedTensor . from_row_splits ( <nl> + . . . values = [ 3 , 1 , 4 , 1 , 5 , 9 , 2 , 6 ] , <nl> + . . . row_splits = [ 0 , 4 , 4 , 7 , 8 , 8 ] ) <nl> + > > > ragged_tensor2 = tf . RaggedTensor . from_row_splits ( <nl> + . . . values = [ 3 , 1 , 4 ] , <nl> + . . . row_splits = [ 0 , 3 ] ) <nl> + > > > tf . nest . assert_same_structure ( <nl> + . . . ragged_tensor1 , <nl> + . . . ragged_tensor2 , <nl> + . . . expand_composites = True ) <nl> + <nl> + * These examples will raise exceptions : <nl> + <nl> + > > > tf . nest . assert_same_structure ( [ 0 , 1 ] , np . array ( [ 0 , 1 ] ) ) <nl> + Traceback ( most recent call last ) : <nl> + . . . <nl> + ValueError : The two structures don ' t have the same nested structure <nl> + <nl> + > > > tf . nest . assert_same_structure ( <nl> + . . . collections . namedtuple ( ' bar ' , ' a b ' ) ( 1 , 2 ) , <nl> + . . . collections . namedtuple ( ' foo ' , ' a b ' ) ( 2 , 3 ) ) <nl> + Traceback ( most recent call last ) : <nl> + . . . <nl> + TypeError : The two structures don ' t have the same nested structure <nl> <nl> Args : <nl> nest1 : an arbitrarily nested structure . <nl> mmm a / tensorflow / python / util / tf_stack . cc <nl> ppp b / tensorflow / python / util / tf_stack . cc <nl> limitations under the License . <nl> / / We store the retrieved stack trace within the Node object directly . Then <nl> / / whenever the graph is instantiated / copies , we copy the stack trace with it . <nl> / / Since the graph instantiation goes through the protobuf roundtrip , we store <nl> - / / the original Graph with stack traces attached in FunctionLibraryDefinition . <nl> + / / the original stack traces mapping attached in FunctionLibraryDefinition . <nl> <nl> # include < Python . h > <nl> # include < frameobject . h > <nl> mmm a / tensorflow / stream_executor / cuda / BUILD <nl> ppp b / tensorflow / stream_executor / cuda / BUILD <nl> cc_library ( <nl> ] ) , <nl> ) <nl> <nl> + cc_library ( <nl> + name = " cuda_asm_compiler " , <nl> + srcs = if_cuda_is_configured ( [ " cuda_asm_compiler . cc " ] ) , <nl> + deps = if_cuda_is_configured ( [ <nl> + " / / tensorflow / core : lib_proto_parsing " , <nl> + " / / tensorflow / stream_executor / gpu : asm_compiler " , <nl> + " / / tensorflow / stream_executor / gpu : gpu_driver_header " , <nl> + ] ) , <nl> + ) <nl> + <nl> cc_library ( <nl> name = " cuda_gpu_executor " , <nl> srcs = if_cuda_is_configured ( [ " cuda_gpu_executor . cc " ] ) , <nl> cc_library ( <nl> " : cuda_platform_id " , <nl> " : cuda_stream " , <nl> " : cuda_timer " , <nl> + " : cuda_asm_compiler " , <nl> " @ com_google_absl / / absl / strings " , <nl> " / / tensorflow / stream_executor : event " , <nl> " / / tensorflow / stream_executor : plugin_registry " , <nl> new file mode 100644 <nl> index 0000000000000 . . f92d3c487d057 <nl> mmm / dev / null <nl> ppp b / tensorflow / stream_executor / cuda / cuda_asm_compiler . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / core / lib / core / errors . h " <nl> + # include " tensorflow / stream_executor / gpu / asm_compiler . h " <nl> + # include " tensorflow / stream_executor / gpu / gpu_driver . h " <nl> + <nl> + namespace stream_executor { <nl> + <nl> + # define RETURN_IF_CUDA_ERROR ( expr ) \ <nl> + do { \ <nl> + CUresult _status = expr ; \ <nl> + if ( ! SE_PREDICT_TRUE ( _status = = CUDA_SUCCESS ) ) { \ <nl> + const char * error_string ; \ <nl> + cuGetErrorString ( _status , & error_string ) ; \ <nl> + std : : ostringstream oss ; \ <nl> + oss < < error_string < < " \ nin " < < __FILE__ < < " ( " < < __LINE__ < < " ) : ' " \ <nl> + < < # expr < < " ' " ; \ <nl> + return port : : Status ( port : : error : : UNKNOWN , oss . str ( ) . c_str ( ) ) ; \ <nl> + } \ <nl> + } while ( false ) <nl> + <nl> + port : : StatusOr < std : : vector < uint8 > > LinkGpuAsm ( <nl> + gpu : : GpuContext * context , std : : vector < CubinOrPTXImage > images ) { <nl> + gpu : : ScopedActivateContext activation ( context ) ; <nl> + <nl> + CUlinkState link_state ; <nl> + RETURN_IF_CUDA_ERROR ( cuLinkCreate ( 0 , nullptr , nullptr , & link_state ) ) ; <nl> + for ( auto & image : images ) { <nl> + RETURN_IF_CUDA_ERROR ( cuLinkAddData ( <nl> + link_state , CU_JIT_INPUT_CUBIN , static_cast < void * > ( image . bytes . data ( ) ) , <nl> + image . bytes . size ( ) , " " , 0 , nullptr , nullptr ) ) ; <nl> + } <nl> + void * cubin_out ; <nl> + size_t cubin_size ; <nl> + RETURN_IF_CUDA_ERROR ( cuLinkComplete ( link_state , & cubin_out , & cubin_size ) ) ; <nl> + std : : vector < uint8 > cubin ( static_cast < uint8 * > ( cubin_out ) , <nl> + static_cast < uint8 * > ( cubin_out ) + cubin_size ) ; <nl> + RETURN_IF_CUDA_ERROR ( cuLinkDestroy ( link_state ) ) ; <nl> + return std : : move ( cubin ) ; <nl> + } <nl> + <nl> + } / / namespace stream_executor <nl> mmm a / tensorflow / stream_executor / cuda / cuda_dnn . cc <nl> ppp b / tensorflow / stream_executor / cuda / cuda_dnn . cc <nl> class CudnnRnnSequenceTensorDescriptor <nl> static port : : StatusOr < CudnnRnnSequenceTensorDescriptor > Create ( <nl> GpuExecutor * parent , int max_seq_length , int batch_size , int data_size , <nl> cudnnDataType_t data_type ) { <nl> - CHECK_GT ( max_seq_length , 0 ) ; <nl> + if ( max_seq_length < = 0 ) { <nl> + return port : : Status ( port : : error : : INVALID_ARGUMENT , " max_seq_length < = 0 " ) ; <nl> + } <nl> int dims [ ] = { batch_size , data_size , 1 } ; <nl> int strides [ ] = { dims [ 1 ] * dims [ 2 ] , dims [ 2 ] , 1 } ; <nl> TensorDescriptor tensor_desc = CreateTensorDescriptor ( ) ; <nl> class CudnnRnnSequenceTensorDescriptor <nl> GpuExecutor * parent , int max_seq_length , int batch_size , int data_size , <nl> const absl : : Span < const int > & seq_lengths , bool time_major , <nl> cudnnDataType_t data_type ) { <nl> - CHECK_GT ( max_seq_length , 0 ) ; <nl> + if ( max_seq_length < = 0 ) { <nl> + return port : : Status ( port : : error : : INVALID_ARGUMENT , " max_seq_length < = 0 " ) ; <nl> + } <nl> int dims [ ] = { batch_size , data_size , 1 } ; <nl> int strides [ ] = { dims [ 1 ] * dims [ 2 ] , dims [ 2 ] , 1 } ; <nl> TensorDescriptor tensor_desc = CreateTensorDescriptor ( ) ; <nl> mmm a / tensorflow / stream_executor / gpu / asm_compiler . h <nl> ppp b / tensorflow / stream_executor / gpu / asm_compiler . h <nl> limitations under the License . <nl> # include " tensorflow / stream_executor / platform / port . h " <nl> <nl> namespace stream_executor { <nl> + namespace gpu { <nl> + class GpuContext ; <nl> + } <nl> <nl> / / Compiles the given PTX string using ptxas and returns the resulting machine <nl> / / code ( i . e . a cubin ) as a byte array . The generated cubin matches the compute <nl> struct HsacoImage { <nl> port : : StatusOr < std : : vector < uint8 > > BundleGpuAsm ( <nl> std : : vector < HsacoImage > images , const std : : string rocm_root_dir ) ; <nl> <nl> + / / Links multiple relocatable GPU images ( e . g . results of ptxas - c ) into a <nl> + / / single image . <nl> + port : : StatusOr < std : : vector < uint8 > > LinkGpuAsm ( <nl> + gpu : : GpuContext * context , std : : vector < CubinOrPTXImage > images ) ; <nl> + <nl> } / / namespace stream_executor <nl> <nl> # endif / / TENSORFLOW_STREAM_EXECUTOR_GPU_ASM_COMPILER_H_ <nl> mmm a / tensorflow / tools / api / golden / v1 / tensorflow . - config - proto . - experimental . pbtxt <nl> ppp b / tensorflow / tools / api / golden / v1 / tensorflow . - config - proto . - experimental . pbtxt <nl> tf_proto { <nl> label : LABEL_OPTIONAL <nl> type : TYPE_INT64 <nl> } <nl> + field { <nl> + name : " use_tfrt " <nl> + number : 18 <nl> + label : LABEL_OPTIONAL <nl> + type : TYPE_BOOL <nl> + } <nl> enum_type { <nl> name : " MlirBridgeRollout " <nl> value : { <nl> mmm a / tensorflow / tools / api / golden / v1 / tensorflow . - config - proto . pbtxt <nl> ppp b / tensorflow / tools / api / golden / v1 / tensorflow . - config - proto . pbtxt <nl> tf_proto { <nl> label : LABEL_OPTIONAL <nl> type : TYPE_INT64 <nl> } <nl> + field { <nl> + name : " use_tfrt " <nl> + number : 18 <nl> + label : LABEL_OPTIONAL <nl> + type : TYPE_BOOL <nl> + } <nl> enum_type { <nl> name : " MlirBridgeRollout " <nl> value : { <nl> mmm a / tensorflow / tools / ci_build / builds / libtensorflow . sh <nl> ppp b / tensorflow / tools / ci_build / builds / libtensorflow . sh <nl> function build_libtensorflow_tarball ( ) { <nl> BAZEL_OPTS = " $ { BAZEL_OPTS } - - config = cuda - - crosstool_top = / / third_party / toolchains / preconfig / ubuntu16 . 04 / gcc7_manylinux2010 - nvcc - cuda11 : toolchain " <nl> export TF_NEED_ROCM = 0 <nl> export TF_CUDA_COMPUTE_CAPABILITIES = " sm_35 , sm_50 , sm_60 , sm_70 , sm_75 , compute_80 " <nl> + else <nl> + BAZEL_OPTS = " $ { BAZEL_OPTS } - - crosstool_top = / / third_party / toolchains / preconfig / ubuntu16 . 04 / gcc7_manylinux2010 : toolchain " <nl> fi <nl> bazel clean - - expunge <nl> yes " " | . / configure <nl> mmm a / tensorflow / tools / ci_build / install / install_centos_pip_packages . sh <nl> ppp b / tensorflow / tools / ci_build / install / install_centos_pip_packages . sh <nl> pip3 install - - upgrade termcolor <nl> pip2 install keras_preprocessing = = 1 . 0 . 5 - - no - deps <nl> pip3 install keras_preprocessing = = 1 . 0 . 5 - - no - deps <nl> pip2 install - - upgrade h5py = = 2 . 8 . 0 <nl> - pip3 install - - upgrade h5py = = 3 . 1 . 0 <nl> + pip3 install - - upgrade h5py = = 2 . 8 . 0 <nl> <nl> # Estimator <nl> pip2 install tf - estimator - nightly - - no - deps <nl> mmm a / tensorflow / tools / ci_build / install / install_pip_packages . sh <nl> ppp b / tensorflow / tools / ci_build / install / install_pip_packages . sh <nl> pip3 install - - upgrade termcolor <nl> pip2 install keras_preprocessing = = 1 . 1 . 0 - - no - deps <nl> pip3 install keras_preprocessing = = 1 . 1 . 0 - - no - deps <nl> pip2 install - - upgrade h5py = = 2 . 8 . 0 <nl> - pip3 install - - upgrade h5py = = 3 . 1 . 0 <nl> + pip3 install - - upgrade h5py = = 2 . 8 . 0 <nl> <nl> # Estimator <nl> pip2 install tf - estimator - nightly - - no - deps <nl> mmm a / tensorflow / tools / ci_build / install / install_python3 . 5_pip_packages . sh <nl> ppp b / tensorflow / tools / ci_build / install / install_python3 . 5_pip_packages . sh <nl> pip3 . 5 install - - upgrade termcolor <nl> <nl> # Keras <nl> pip3 . 5 install keras_preprocessing = = 1 . 0 . 5 <nl> - pip3 . 5 install - - upgrade h5py = = 3 . 1 . 0 <nl> + pip3 . 5 install - - upgrade h5py = = 2 . 8 . 0 <nl> <nl> # Estimator <nl> pip3 . 5 install tf - estimator - nightly = = 1 . 12 . 0 . dev20181203 - - no - deps <nl> mmm a / tensorflow / tools / ci_build / install / install_python3 . 6_pip_packages . sh <nl> ppp b / tensorflow / tools / ci_build / install / install_python3 . 6_pip_packages . sh <nl> pip3 install - - upgrade astor <nl> pip3 install - - upgrade gast <nl> pip3 install - - upgrade termcolor <nl> <nl> - pip3 install - - upgrade h5py = = 3 . 1 . 0 <nl> + pip3 install - - upgrade h5py = = 2 . 8 . 0 <nl> <nl> # Keras <nl> pip3 install keras_preprocessing = = 1 . 0 . 5 <nl> mmm a / tensorflow / tools / ci_build / linux / libtensorflow_docker . sh <nl> ppp b / tensorflow / tools / ci_build / linux / libtensorflow_docker . sh <nl> DOCKER_CONTEXT_PATH = " $ ( realpath $ { SCRIPT_DIR } / . . ) " <nl> ROOT_DIR = " $ ( realpath $ { SCRIPT_DIR } / . . / . . / . . / . . / ) " <nl> <nl> DOCKER_IMAGE = " tf - libtensorflow - cpu " <nl> - DOCKER_FILE = " Dockerfile . cpu " <nl> + DOCKER_FILE = " Dockerfile . rbe . ubuntu16 . 04 - manylinux2010 " <nl> DOCKER_BINARY = " docker " <nl> if [ " $ { TF_NEED_CUDA } " = = " 1 " ] ; then <nl> DOCKER_IMAGE = " tf - tensorflow - gpu " <nl> mmm a / tensorflow / tools / ci_build / rel / macos / cpu_py36_nonpip . sh <nl> ppp b / tensorflow / tools / ci_build / rel / macos / cpu_py36_nonpip . sh <nl> python3 . 6 - m virtualenv tf_build_env - - system - site - packages <nl> source tf_build_env / bin / activate <nl> <nl> # Install macos pip dependencies <nl> - install_macos_pip_deps sudo pip3 . 6 <nl> + install_macos_pip_deps virtualenv <nl> <nl> # Run configure . <nl> export TF_NEED_CUDA = 0 <nl> mmm a / tensorflow / tools / ci_build / rel / macos / cpu_py37_nonpip . sh <nl> ppp b / tensorflow / tools / ci_build / rel / macos / cpu_py37_nonpip . sh <nl> install_bazelisk <nl> # Pick a more recent version of xcode <nl> export DEVELOPER_DIR = / Applications / Xcode_11 . 3 . app / Contents / Developer <nl> sudo xcode - select - s " $ { DEVELOPER_DIR } " <nl> - python - m virtualenv tf_build_env - - system - site - packages <nl> + python3 . 7 - m virtualenv tf_build_env - - system - site - packages <nl> source tf_build_env / bin / activate <nl> <nl> # Install macos pip dependencies <nl> - install_macos_pip_deps sudo pip3 . 7 <nl> + install_macos_pip_deps virtualenv <nl> <nl> # Run configure . <nl> export TF_NEED_CUDA = 0 <nl> mmm a / tensorflow / tools / ci_build / rel / macos / cpu_py38_nonpip . sh <nl> ppp b / tensorflow / tools / ci_build / rel / macos / cpu_py38_nonpip . sh <nl> install_bazelisk <nl> export DEVELOPER_DIR = / Applications / Xcode_10 . 3 . app / Contents / Developer <nl> export MACOSX_DEPLOYMENT_TARGET = 10 . 10 <nl> sudo xcode - select - s " $ { DEVELOPER_DIR } " <nl> - python - m virtualenv tf_build_env - - system - site - packages <nl> + python3 . 8 - m virtualenv tf_build_env - - system - site - packages <nl> source tf_build_env / bin / activate <nl> <nl> # Install macos pip dependencies <nl> - install_macos_pip_deps sudo pip3 . 8 <nl> + install_macos_pip_deps virtualenv <nl> <nl> # Run configure . <nl> export TF_NEED_CUDA = 0 <nl> mmm a / tensorflow / tools / ci_build / release / common . sh <nl> ppp b / tensorflow / tools / ci_build / release / common . sh <nl> function install_ubuntu_16_pip_deps { <nl> " $ { PIP_CMD } " install - - user ' astunparse ~ = 1 . 6 . 3 ' <nl> " $ { PIP_CMD } " install - - user ' flatbuffers ~ = 1 . 12 . 0 ' <nl> " $ { PIP_CMD } " install - - user ' google_pasta ~ = 0 . 2 ' <nl> - " $ { PIP_CMD } " install - - user ' h5py ~ = 3 . 1 . 0 ' <nl> + " $ { PIP_CMD } " install - - user ' h5py ~ = 2 . 10 . 0 ' <nl> " $ { PIP_CMD } " install - - user ' keras_preprocessing ~ = 1 . 1 . 2 ' <nl> " $ { PIP_CMD } " install - - user ' numpy ~ = 1 . 19 . 2 ' <nl> " $ { PIP_CMD } " install - - user ' opt_einsum ~ = 3 . 3 . 0 ' <nl> function install_macos_pip_deps { <nl> $ { PIP_CMD } install $ USER_FLAG ' astunparse ~ = 1 . 6 . 3 ' <nl> $ { PIP_CMD } install $ USER_FLAG ' flatbuffers ~ = 1 . 12 . 0 ' <nl> $ { PIP_CMD } install $ USER_FLAG ' google_pasta ~ = 0 . 2 ' <nl> - $ { PIP_CMD } install $ USER_FLAG ' h5py ~ = 3 . 1 . 0 ' <nl> + $ { PIP_CMD } install $ USER_FLAG ' h5py ~ = 2 . 10 . 0 ' <nl> $ { PIP_CMD } install $ USER_FLAG ' keras_preprocessing ~ = 1 . 1 . 2 ' <nl> $ { PIP_CMD } install $ USER_FLAG ' numpy ~ = 1 . 19 . 2 ' <nl> $ { PIP_CMD } install $ USER_FLAG ' opt_einsum ~ = 3 . 3 . 0 ' <nl> mmm a / tensorflow / tools / ci_build / release / common_win . bat <nl> ppp b / tensorflow / tools / ci_build / release / common_win . bat <nl> echo on <nl> @ REM Set Environment Variables <nl> @ REM <nl> IF NOT DEFINED PYTHON_DIRECTORY ( <nl> - SET PYTHON_DIRECTORY = Python37 <nl> + SET PYTHON_DIRECTORY = Python36 <nl> ) <nl> SET PY_EXE = C : \ % PYTHON_DIRECTORY % \ python . exe <nl> SET PATH = % PATH % ; C : \ % PYTHON_DIRECTORY % <nl> SET PATH = % PATH % ; C : \ % PYTHON_DIRECTORY % <nl> % PY_EXE % - m pip install " astunparse ~ = 1 . 6 . 3 " <nl> % PY_EXE % - m pip install " flatbuffers ~ = 1 . 12 . 0 " <nl> % PY_EXE % - m pip install " google_pasta ~ = 0 . 2 " <nl> - % PY_EXE % - m pip install " h5py ~ = 3 . 1 . 0 " <nl> + % PY_EXE % - m pip install " h5py ~ = 2 . 10 . 0 " <nl> % PY_EXE % - m pip install " keras_preprocessing ~ = 1 . 1 . 2 " <nl> % PY_EXE % - m pip install " numpy ~ = 1 . 19 . 2 " <nl> % PY_EXE % - m pip install " opt_einsum ~ = 3 . 3 . 0 " <nl> mmm a / tensorflow / tools / def_file_filter / symbols_pybind . txt <nl> ppp b / tensorflow / tools / def_file_filter / symbols_pybind . txt <nl> tensorflow : : grappler : : graph_analyzer : : GraphAnalyzerTool <nl> <nl> [ / / tensorflow / python : bfloat16_lib ] # bfloat16 <nl> tensorflow : : RegisterNumpyBfloat16 <nl> - tensorflow : : Bfloat16PyType <nl> + tensorflow : : Bfloat16Dtype <nl> <nl> [ / / tensorflow / python : py_func_lib ] # py_func <nl> tensorflow : : InitializePyTrampoline <nl> mmm a / tensorflow / tools / pip_package / setup . py <nl> ppp b / tensorflow / tools / pip_package / setup . py <nl> <nl> ' astunparse ~ = 1 . 6 . 3 ' , <nl> ' flatbuffers ~ = 1 . 12 . 0 ' , <nl> ' google_pasta ~ = 0 . 2 ' , <nl> - ' h5py ~ = 3 . 1 . 0 ' , <nl> + ' h5py ~ = 2 . 10 . 0 ' , <nl> ' keras_preprocessing ~ = 1 . 1 . 2 ' , <nl> ' numpy ~ = 1 . 19 . 2 ' , <nl> ' opt_einsum ~ = 3 . 3 . 0 ' , <nl> mmm a / tensorflow / workspace . bzl <nl> ppp b / tensorflow / workspace . bzl <nl> def tf_repositories ( path_prefix = " " , tf_repo_name = " " ) : <nl> ) <nl> <nl> # Check out LLVM and MLIR from llvm - project . <nl> - LLVM_COMMIT = " ecaff13fc0bc1105ad910a72a5d0dcd164b35191 " <nl> - LLVM_SHA256 = " d0178d6f6a23ce60752d11ee8b1d64784d8ce9625f03d76943b0e40a0043211a " <nl> + LLVM_COMMIT = " d553243fe4b5e1992c07aff7b54b16160a4d5e97 " <nl> + LLVM_SHA256 = " 46b06b63414c21d86d8a91e9011f07dd974e976bbda767af66ec77c7d764f091 " <nl> LLVM_URLS = [ <nl> " https : / / storage . googleapis . com / mirror . tensorflow . org / github . com / llvm / llvm - project / archive / { commit } . tar . gz " . format ( commit = LLVM_COMMIT ) , <nl> " https : / / github . com / llvm / llvm - project / archive / { commit } . tar . gz " . format ( commit = LLVM_COMMIT ) , <nl>
Merge remote - tracking branch ' upstream / master ' into xtensa - fusion - f1
tensorflow/tensorflow
6b24408403c8befb7615a6192ec33a9d36122b70
2020-12-09T05:45:17Z
deleted file mode 100644 <nl> index be3cd9f5cfa3 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlButton . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIControlButton . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIControlButton . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnControl_Button : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Control . Button . SetVisible = V2 : : KodiAPI : : GUI : : CAddOnControl_Button : : SetVisible ; <nl> - interfaces - > GUI . Control . Button . SetEnabled = V2 : : KodiAPI : : GUI : : CAddOnControl_Button : : SetEnabled ; <nl> - <nl> - interfaces - > GUI . Control . Button . SetLabel = V2 : : KodiAPI : : GUI : : CAddOnControl_Button : : SetLabel ; <nl> - interfaces - > GUI . Control . Button . GetLabel = V2 : : KodiAPI : : GUI : : CAddOnControl_Button : : GetLabel ; <nl> - <nl> - interfaces - > GUI . Control . Button . SetLabel2 = V2 : : KodiAPI : : GUI : : CAddOnControl_Button : : SetLabel2 ; <nl> - interfaces - > GUI . Control . Button . GetLabel2 = V2 : : KodiAPI : : GUI : : CAddOnControl_Button : : GetLabel2 ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index b68ac499879a . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlButton . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnControl_Button <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index b77a46d2df19 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlEdit . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIControlEdit . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIControlEdit . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnControl_Edit : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Control . Edit . SetVisible = V2 : : KodiAPI : : GUI : : CAddOnControl_Edit : : SetVisible ; <nl> - interfaces - > GUI . Control . Edit . SetEnabled = V2 : : KodiAPI : : GUI : : CAddOnControl_Edit : : SetEnabled ; <nl> - interfaces - > GUI . Control . Edit . SetLabel = V2 : : KodiAPI : : GUI : : CAddOnControl_Edit : : SetLabel ; <nl> - interfaces - > GUI . Control . Edit . GetLabel = V2 : : KodiAPI : : GUI : : CAddOnControl_Edit : : GetLabel ; <nl> - interfaces - > GUI . Control . Edit . SetText = V2 : : KodiAPI : : GUI : : CAddOnControl_Edit : : SetText ; <nl> - interfaces - > GUI . Control . Edit . GetText = V2 : : KodiAPI : : GUI : : CAddOnControl_Edit : : GetText ; <nl> - interfaces - > GUI . Control . Edit . SetCursorPosition = V2 : : KodiAPI : : GUI : : CAddOnControl_Edit : : SetCursorPosition ; <nl> - interfaces - > GUI . Control . Edit . GetCursorPosition = V2 : : KodiAPI : : GUI : : CAddOnControl_Edit : : GetCursorPosition ; <nl> - interfaces - > GUI . Control . Edit . SetInputType = V2 : : KodiAPI : : GUI : : CAddOnControl_Edit : : SetInputType ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 06418a069b25 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlEdit . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnControl_Edit <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index d571354a1461 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlFadeLabel . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIControlFadeLabel . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIControlFadeLabel . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnControl_FadeLabel : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Control . FadeLabel . SetVisible = V2 : : KodiAPI : : GUI : : CAddOnControl_FadeLabel : : SetVisible ; <nl> - interfaces - > GUI . Control . FadeLabel . AddLabel = V2 : : KodiAPI : : GUI : : CAddOnControl_FadeLabel : : AddLabel ; <nl> - interfaces - > GUI . Control . FadeLabel . GetLabel = V2 : : KodiAPI : : GUI : : CAddOnControl_FadeLabel : : GetLabel ; <nl> - interfaces - > GUI . Control . FadeLabel . SetScrolling = V2 : : KodiAPI : : GUI : : CAddOnControl_FadeLabel : : SetScrolling ; <nl> - interfaces - > GUI . Control . FadeLabel . Reset = V2 : : KodiAPI : : GUI : : CAddOnControl_FadeLabel : : Reset ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 2659fe5b84fc . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlFadeLabel . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnControl_FadeLabel <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 0db0beff119b . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlImage . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIControlImage . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIControlImage . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnControl_Image : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - <nl> - interfaces - > GUI . Control . Image . SetVisible = V2 : : KodiAPI : : GUI : : CAddOnControl_Image : : SetVisible ; <nl> - interfaces - > GUI . Control . Image . SetFileName = V2 : : KodiAPI : : GUI : : CAddOnControl_Image : : SetFileName ; <nl> - interfaces - > GUI . Control . Image . SetColorDiffuse = V2 : : KodiAPI : : GUI : : CAddOnControl_Image : : SetColorDiffuse ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 2d8b7a8b88b9 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlImage . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnControl_Image <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index ce9e12f6330d . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlLabel . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIControlLabel . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIControlLabel . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnControl_Label : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Control . Label . SetVisible = V2 : : KodiAPI : : GUI : : CAddOnControl_Label : : SetVisible ; <nl> - interfaces - > GUI . Control . Label . SetLabel = V2 : : KodiAPI : : GUI : : CAddOnControl_Label : : SetLabel ; <nl> - interfaces - > GUI . Control . Label . GetLabel = V2 : : KodiAPI : : GUI : : CAddOnControl_Label : : GetLabel ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index d9601278101f . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlLabel . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnControl_Label <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 6ae8babcf6c2 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlProgress . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIControlProgress . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIControlProgress . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnControl_Progress : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Control . Progress . SetVisible = V2 : : KodiAPI : : GUI : : CAddOnControl_Progress : : SetVisible ; <nl> - interfaces - > GUI . Control . Progress . SetPercentage = V2 : : KodiAPI : : GUI : : CAddOnControl_Progress : : SetPercentage ; <nl> - interfaces - > GUI . Control . Progress . GetPercentage = V2 : : KodiAPI : : GUI : : CAddOnControl_Progress : : GetPercentage ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 98ed0b95f01e . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlProgress . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - class CAddOnControl_Progress <nl> - { <nl> - public : <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 05782c0a0e03 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlRadioButton . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIControlRadioButton . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIControlRadioButton . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnControl_RadioButton : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Control . RadioButton . SetVisible = V2 : : KodiAPI : : GUI : : CAddOnControl_RadioButton : : SetVisible ; <nl> - interfaces - > GUI . Control . RadioButton . SetEnabled = V2 : : KodiAPI : : GUI : : CAddOnControl_RadioButton : : SetEnabled ; <nl> - <nl> - interfaces - > GUI . Control . RadioButton . SetLabel = V2 : : KodiAPI : : GUI : : CAddOnControl_RadioButton : : SetLabel ; <nl> - interfaces - > GUI . Control . RadioButton . GetLabel = V2 : : KodiAPI : : GUI : : CAddOnControl_RadioButton : : GetLabel ; <nl> - <nl> - interfaces - > GUI . Control . RadioButton . SetSelected = V2 : : KodiAPI : : GUI : : CAddOnControl_RadioButton : : SetSelected ; <nl> - interfaces - > GUI . Control . RadioButton . IsSelected = V2 : : KodiAPI : : GUI : : CAddOnControl_RadioButton : : IsSelected ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 802f5fe9b866 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlRadioButton . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnControl_RadioButton <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index a5a55e9c176b . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlRendering . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIControlRendering . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIControlRendering . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - using namespace ADDON ; <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnControl_Rendering : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Control . Rendering . SetCallbacks = V2 : : KodiAPI : : GUI : : CAddOnControl_Rendering : : SetCallbacks ; <nl> - interfaces - > GUI . Control . Rendering . Delete = V2 : : KodiAPI : : GUI : : CAddOnControl_Rendering : : Delete ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index d2e32eada81a . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlRendering . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - class CAddOnControl_Rendering <nl> - { <nl> - public : <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 208f6ee7480d . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlSettingsSlider . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIControlSettingsSlider . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIControlSettingsSlider . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - using namespace ADDON ; <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnControl_SettingsSlider : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Control . SettingsSlider . SetVisible = V2 : : KodiAPI : : GUI : : CAddOnControl_SettingsSlider : : SetVisible ; <nl> - interfaces - > GUI . Control . SettingsSlider . SetEnabled = V2 : : KodiAPI : : GUI : : CAddOnControl_SettingsSlider : : SetEnabled ; <nl> - <nl> - interfaces - > GUI . Control . SettingsSlider . SetText = V2 : : KodiAPI : : GUI : : CAddOnControl_SettingsSlider : : SetText ; <nl> - interfaces - > GUI . Control . SettingsSlider . Reset = V2 : : KodiAPI : : GUI : : CAddOnControl_SettingsSlider : : Reset ; <nl> - <nl> - interfaces - > GUI . Control . SettingsSlider . SetIntRange = V2 : : KodiAPI : : GUI : : CAddOnControl_SettingsSlider : : SetIntRange ; <nl> - interfaces - > GUI . Control . SettingsSlider . SetIntValue = V2 : : KodiAPI : : GUI : : CAddOnControl_SettingsSlider : : SetIntValue ; <nl> - interfaces - > GUI . Control . SettingsSlider . GetIntValue = V2 : : KodiAPI : : GUI : : CAddOnControl_SettingsSlider : : GetIntValue ; <nl> - interfaces - > GUI . Control . SettingsSlider . SetIntInterval = V2 : : KodiAPI : : GUI : : CAddOnControl_SettingsSlider : : SetIntInterval ; <nl> - <nl> - interfaces - > GUI . Control . SettingsSlider . SetPercentage = V2 : : KodiAPI : : GUI : : CAddOnControl_SettingsSlider : : SetPercentage ; <nl> - interfaces - > GUI . Control . SettingsSlider . GetPercentage = V2 : : KodiAPI : : GUI : : CAddOnControl_SettingsSlider : : GetPercentage ; <nl> - <nl> - interfaces - > GUI . Control . SettingsSlider . SetFloatRange = V2 : : KodiAPI : : GUI : : CAddOnControl_SettingsSlider : : SetFloatRange ; <nl> - interfaces - > GUI . Control . SettingsSlider . SetFloatValue = V2 : : KodiAPI : : GUI : : CAddOnControl_SettingsSlider : : SetFloatValue ; <nl> - interfaces - > GUI . Control . SettingsSlider . GetFloatValue = V2 : : KodiAPI : : GUI : : CAddOnControl_SettingsSlider : : GetFloatValue ; <nl> - interfaces - > GUI . Control . SettingsSlider . SetFloatInterval = V2 : : KodiAPI : : GUI : : CAddOnControl_SettingsSlider : : SetFloatInterval ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index ba47d36a8e13 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlSettingsSlider . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnControl_SettingsSlider <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 4353a6c469f5 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlSlider . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIControlSlider . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIControlSlider . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - using namespace ADDON ; <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnControl_Slider : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Control . Slider . SetVisible = V2 : : KodiAPI : : GUI : : CAddOnControl_Slider : : SetVisible ; <nl> - interfaces - > GUI . Control . Slider . SetEnabled = V2 : : KodiAPI : : GUI : : CAddOnControl_Slider : : SetEnabled ; <nl> - <nl> - interfaces - > GUI . Control . Slider . Reset = V2 : : KodiAPI : : GUI : : CAddOnControl_Slider : : Reset ; <nl> - interfaces - > GUI . Control . Slider . GetDescription = V2 : : KodiAPI : : GUI : : CAddOnControl_Slider : : GetDescription ; <nl> - <nl> - interfaces - > GUI . Control . Slider . SetIntRange = V2 : : KodiAPI : : GUI : : CAddOnControl_Slider : : SetIntRange ; <nl> - interfaces - > GUI . Control . Slider . SetIntValue = V2 : : KodiAPI : : GUI : : CAddOnControl_Slider : : SetIntValue ; <nl> - interfaces - > GUI . Control . Slider . GetIntValue = V2 : : KodiAPI : : GUI : : CAddOnControl_Slider : : GetIntValue ; <nl> - interfaces - > GUI . Control . Slider . SetIntInterval = V2 : : KodiAPI : : GUI : : CAddOnControl_Slider : : SetIntInterval ; <nl> - <nl> - interfaces - > GUI . Control . Slider . SetPercentage = V2 : : KodiAPI : : GUI : : CAddOnControl_Slider : : SetPercentage ; <nl> - interfaces - > GUI . Control . Slider . GetPercentage = V2 : : KodiAPI : : GUI : : CAddOnControl_Slider : : GetPercentage ; <nl> - <nl> - interfaces - > GUI . Control . Slider . SetFloatRange = V2 : : KodiAPI : : GUI : : CAddOnControl_Slider : : SetFloatRange ; <nl> - interfaces - > GUI . Control . Slider . SetFloatValue = V2 : : KodiAPI : : GUI : : CAddOnControl_Slider : : SetFloatValue ; <nl> - interfaces - > GUI . Control . Slider . GetFloatValue = V2 : : KodiAPI : : GUI : : CAddOnControl_Slider : : GetFloatValue ; <nl> - interfaces - > GUI . Control . Slider . SetFloatInterval = V2 : : KodiAPI : : GUI : : CAddOnControl_Slider : : SetFloatInterval ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index c3a069ff93f4 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlSlider . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnControl_Slider <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 202a2b61c6da . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlSpin . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIControlSpin . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIControlSpin . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - using namespace ADDON ; <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnControl_Spin : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Control . Spin . SetVisible = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : SetVisible ; <nl> - interfaces - > GUI . Control . Spin . SetEnabled = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : SetEnabled ; <nl> - <nl> - interfaces - > GUI . Control . Spin . SetText = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : SetText ; <nl> - interfaces - > GUI . Control . Spin . Reset = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : Reset ; <nl> - interfaces - > GUI . Control . Spin . SetType = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : SetType ; <nl> - <nl> - interfaces - > GUI . Control . Spin . AddStringLabel = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : AddStringLabel ; <nl> - interfaces - > GUI . Control . Spin . SetStringValue = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : SetStringValue ; <nl> - interfaces - > GUI . Control . Spin . GetStringValue = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : GetStringValue ; <nl> - <nl> - interfaces - > GUI . Control . Spin . AddIntLabel = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : AddIntLabel ; <nl> - interfaces - > GUI . Control . Spin . SetIntRange = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : SetIntRange ; <nl> - interfaces - > GUI . Control . Spin . SetIntValue = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : SetIntValue ; <nl> - interfaces - > GUI . Control . Spin . GetIntValue = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : GetIntValue ; <nl> - <nl> - interfaces - > GUI . Control . Spin . SetFloatRange = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : SetFloatRange ; <nl> - interfaces - > GUI . Control . Spin . SetFloatValue = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : SetFloatValue ; <nl> - interfaces - > GUI . Control . Spin . GetFloatValue = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : GetFloatValue ; <nl> - interfaces - > GUI . Control . Spin . SetFloatInterval = V2 : : KodiAPI : : GUI : : CAddOnControl_Spin : : SetFloatInterval ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 94a16dff8991 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlSpin . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - class CAddOnControl_Spin <nl> - { <nl> - public : <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 5f4a56e15465 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlTextBox . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIControlTextBox . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIControlTextBox . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - using namespace ADDON ; <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnControl_TextBox : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Control . TextBox . SetVisible = V2 : : KodiAPI : : GUI : : CAddOnControl_TextBox : : SetVisible ; <nl> - interfaces - > GUI . Control . TextBox . Reset = V2 : : KodiAPI : : GUI : : CAddOnControl_TextBox : : Reset ; <nl> - interfaces - > GUI . Control . TextBox . SetText = V2 : : KodiAPI : : GUI : : CAddOnControl_TextBox : : SetText ; <nl> - interfaces - > GUI . Control . TextBox . GetText = V2 : : KodiAPI : : GUI : : CAddOnControl_TextBox : : GetText ; <nl> - interfaces - > GUI . Control . TextBox . Scroll = V2 : : KodiAPI : : GUI : : CAddOnControl_TextBox : : Scroll ; <nl> - interfaces - > GUI . Control . TextBox . SetAutoScrolling = V2 : : KodiAPI : : GUI : : CAddOnControl_TextBox : : SetAutoScrolling ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 4c517adeeae7 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIControlTextBox . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnControl_TextBox <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index bf8e2ecf24af . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogContextMenu . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIDialogContextMenu . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIDialogContextMenu . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - using namespace ADDON ; <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnDialog_ContextMenu : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Dialogs . ContextMenu . Open = V2 : : KodiAPI : : GUI : : CAddOnDialog_ContextMenu : : Open ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 8893a748274f . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogContextMenu . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnDialog_ContextMenu <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - <nl> - static int Open ( const char * heading , const char * entries [ ] , unsigned int size ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 2e3a76181fe8 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogExtendedProgressBar . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIDialogExtendedProgressBar . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIDialogExtendedProgressBar . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - using namespace ADDON ; <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnDialog_ExtendedProgress : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Dialogs . ExtendedProgress . New = V2 : : KodiAPI : : GUI : : CAddOnDialog_ExtendedProgress : : New ; <nl> - interfaces - > GUI . Dialogs . ExtendedProgress . Delete = V2 : : KodiAPI : : GUI : : CAddOnDialog_ExtendedProgress : : Delete ; <nl> - interfaces - > GUI . Dialogs . ExtendedProgress . Title = V2 : : KodiAPI : : GUI : : CAddOnDialog_ExtendedProgress : : Title ; <nl> - interfaces - > GUI . Dialogs . ExtendedProgress . SetTitle = V2 : : KodiAPI : : GUI : : CAddOnDialog_ExtendedProgress : : SetTitle ; <nl> - interfaces - > GUI . Dialogs . ExtendedProgress . Text = V2 : : KodiAPI : : GUI : : CAddOnDialog_ExtendedProgress : : Text ; <nl> - interfaces - > GUI . Dialogs . ExtendedProgress . SetText = V2 : : KodiAPI : : GUI : : CAddOnDialog_ExtendedProgress : : SetText ; <nl> - interfaces - > GUI . Dialogs . ExtendedProgress . IsFinished = V2 : : KodiAPI : : GUI : : CAddOnDialog_ExtendedProgress : : IsFinished ; <nl> - interfaces - > GUI . Dialogs . ExtendedProgress . MarkFinished = V2 : : KodiAPI : : GUI : : CAddOnDialog_ExtendedProgress : : MarkFinished ; <nl> - interfaces - > GUI . Dialogs . ExtendedProgress . Percentage = V2 : : KodiAPI : : GUI : : CAddOnDialog_ExtendedProgress : : Percentage ; <nl> - interfaces - > GUI . Dialogs . ExtendedProgress . SetPercentage = V2 : : KodiAPI : : GUI : : CAddOnDialog_ExtendedProgress : : SetPercentage ; <nl> - interfaces - > GUI . Dialogs . ExtendedProgress . SetProgress = V2 : : KodiAPI : : GUI : : CAddOnDialog_ExtendedProgress : : SetProgress ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index f6a868fc7312 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogExtendedProgressBar . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnDialog_ExtendedProgress <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 94addd9048b6 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogFileBrowser . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIDialogFileBrowser . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIDialogFileBrowser . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - using namespace ADDON ; <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnDialog_FileBrowser : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Dialogs . FileBrowser . ShowAndGetDirectory = V2 : : KodiAPI : : GUI : : CAddOnDialog_FileBrowser : : ShowAndGetDirectory ; <nl> - interfaces - > GUI . Dialogs . FileBrowser . ShowAndGetFile = V2 : : KodiAPI : : GUI : : CAddOnDialog_FileBrowser : : ShowAndGetFile ; <nl> - interfaces - > GUI . Dialogs . FileBrowser . ShowAndGetFileFromDir = V2 : : KodiAPI : : GUI : : CAddOnDialog_FileBrowser : : ShowAndGetFileFromDir ; <nl> - interfaces - > GUI . Dialogs . FileBrowser . ShowAndGetFileList = V2 : : KodiAPI : : GUI : : CAddOnDialog_FileBrowser : : ShowAndGetFileList ; <nl> - interfaces - > GUI . Dialogs . FileBrowser . ShowAndGetSource = V2 : : KodiAPI : : GUI : : CAddOnDialog_FileBrowser : : ShowAndGetSource ; <nl> - interfaces - > GUI . Dialogs . FileBrowser . ShowAndGetImage = V2 : : KodiAPI : : GUI : : CAddOnDialog_FileBrowser : : ShowAndGetImage ; <nl> - interfaces - > GUI . Dialogs . FileBrowser . ShowAndGetImageList = V2 : : KodiAPI : : GUI : : CAddOnDialog_FileBrowser : : ShowAndGetImageList ; <nl> - interfaces - > GUI . Dialogs . FileBrowser . ClearList = V2 : : KodiAPI : : GUI : : CAddOnDialog_FileBrowser : : ClearList ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 0a85dd89c10a . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogFileBrowser . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnDialog_FileBrowser <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 885747fdb164 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogKeyboard . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIDialogKeyboard . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIDialogKeyboard . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - using namespace ADDON ; <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnDialog_Keyboard : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Dialogs . Keyboard . ShowAndGetInputWithHead = V2 : : KodiAPI : : GUI : : CAddOnDialog_Keyboard : : ShowAndGetInputWithHead ; <nl> - interfaces - > GUI . Dialogs . Keyboard . ShowAndGetInput = V2 : : KodiAPI : : GUI : : CAddOnDialog_Keyboard : : ShowAndGetInput ; <nl> - interfaces - > GUI . Dialogs . Keyboard . ShowAndGetNewPasswordWithHead = V2 : : KodiAPI : : GUI : : CAddOnDialog_Keyboard : : ShowAndGetNewPasswordWithHead ; <nl> - interfaces - > GUI . Dialogs . Keyboard . ShowAndGetNewPassword = V2 : : KodiAPI : : GUI : : CAddOnDialog_Keyboard : : ShowAndGetNewPassword ; <nl> - interfaces - > GUI . Dialogs . Keyboard . ShowAndVerifyNewPasswordWithHead = V2 : : KodiAPI : : GUI : : CAddOnDialog_Keyboard : : ShowAndVerifyNewPasswordWithHead ; <nl> - interfaces - > GUI . Dialogs . Keyboard . ShowAndVerifyNewPassword = V2 : : KodiAPI : : GUI : : CAddOnDialog_Keyboard : : ShowAndVerifyNewPassword ; <nl> - interfaces - > GUI . Dialogs . Keyboard . ShowAndVerifyPassword = V2 : : KodiAPI : : GUI : : CAddOnDialog_Keyboard : : ShowAndVerifyPassword ; <nl> - interfaces - > GUI . Dialogs . Keyboard . ShowAndGetFilter = V2 : : KodiAPI : : GUI : : CAddOnDialog_Keyboard : : ShowAndGetFilter ; <nl> - interfaces - > GUI . Dialogs . Keyboard . SendTextToActiveKeyboard = V2 : : KodiAPI : : GUI : : CAddOnDialog_Keyboard : : SendTextToActiveKeyboard ; <nl> - interfaces - > GUI . Dialogs . Keyboard . isKeyboardActivated = V2 : : KodiAPI : : GUI : : CAddOnDialog_Keyboard : : isKeyboardActivated ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 7d0fff27bdf5 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogKeyboard . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnDialog_Keyboard <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 2a2176e80624 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogNumeric . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIDialogNumeric . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIDialogNumeric . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - using namespace ADDON ; <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnDialog_Numeric : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Dialogs . Numeric . ShowAndVerifyNewPassword = V2 : : KodiAPI : : GUI : : CAddOnDialog_Numeric : : ShowAndVerifyNewPassword ; <nl> - interfaces - > GUI . Dialogs . Numeric . ShowAndVerifyPassword = V2 : : KodiAPI : : GUI : : CAddOnDialog_Numeric : : ShowAndVerifyPassword ; <nl> - interfaces - > GUI . Dialogs . Numeric . ShowAndVerifyInput = V2 : : KodiAPI : : GUI : : CAddOnDialog_Numeric : : ShowAndVerifyInput ; <nl> - interfaces - > GUI . Dialogs . Numeric . ShowAndGetTime = V2 : : KodiAPI : : GUI : : CAddOnDialog_Numeric : : ShowAndGetTime ; <nl> - interfaces - > GUI . Dialogs . Numeric . ShowAndGetDate = V2 : : KodiAPI : : GUI : : CAddOnDialog_Numeric : : ShowAndGetDate ; <nl> - interfaces - > GUI . Dialogs . Numeric . ShowAndGetIPAddress = V2 : : KodiAPI : : GUI : : CAddOnDialog_Numeric : : ShowAndGetIPAddress ; <nl> - interfaces - > GUI . Dialogs . Numeric . ShowAndGetNumber = V2 : : KodiAPI : : GUI : : CAddOnDialog_Numeric : : ShowAndGetNumber ; <nl> - interfaces - > GUI . Dialogs . Numeric . ShowAndGetSeconds = V2 : : KodiAPI : : GUI : : CAddOnDialog_Numeric : : ShowAndGetSeconds ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 855664a461a2 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogNumeric . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnDialog_Numeric <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index b248af298110 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogOK . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIDialogOK . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIDialogOK . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - using namespace ADDON ; <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnDialog_OK : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Dialogs . OK . ShowAndGetInputSingleText = V2 : : KodiAPI : : GUI : : CAddOnDialog_OK : : ShowAndGetInputSingleText ; <nl> - interfaces - > GUI . Dialogs . OK . ShowAndGetInputLineText = V2 : : KodiAPI : : GUI : : CAddOnDialog_OK : : ShowAndGetInputLineText ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index a04c49a952f7 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogOK . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnDialog_OK <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index d7bb4a0d9a95 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogProgress . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIDialogProgress . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIDialogProgress . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - using namespace ADDON ; <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnDialog_Progress : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Dialogs . Progress . New = V2 : : KodiAPI : : GUI : : CAddOnDialog_Progress : : New ; <nl> - interfaces - > GUI . Dialogs . Progress . Delete = V2 : : KodiAPI : : GUI : : CAddOnDialog_Progress : : Delete ; <nl> - interfaces - > GUI . Dialogs . Progress . Open = V2 : : KodiAPI : : GUI : : CAddOnDialog_Progress : : Open ; <nl> - interfaces - > GUI . Dialogs . Progress . SetHeading = V2 : : KodiAPI : : GUI : : CAddOnDialog_Progress : : SetHeading ; <nl> - interfaces - > GUI . Dialogs . Progress . SetLine = V2 : : KodiAPI : : GUI : : CAddOnDialog_Progress : : SetLine ; <nl> - interfaces - > GUI . Dialogs . Progress . SetCanCancel = V2 : : KodiAPI : : GUI : : CAddOnDialog_Progress : : SetCanCancel ; <nl> - interfaces - > GUI . Dialogs . Progress . IsCanceled = V2 : : KodiAPI : : GUI : : CAddOnDialog_Progress : : IsCanceled ; <nl> - interfaces - > GUI . Dialogs . Progress . SetPercentage = V2 : : KodiAPI : : GUI : : CAddOnDialog_Progress : : SetPercentage ; <nl> - interfaces - > GUI . Dialogs . Progress . GetPercentage = V2 : : KodiAPI : : GUI : : CAddOnDialog_Progress : : GetPercentage ; <nl> - interfaces - > GUI . Dialogs . Progress . ShowProgressBar = V2 : : KodiAPI : : GUI : : CAddOnDialog_Progress : : ShowProgressBar ; <nl> - interfaces - > GUI . Dialogs . Progress . SetProgressMax = V2 : : KodiAPI : : GUI : : CAddOnDialog_Progress : : SetProgressMax ; <nl> - interfaces - > GUI . Dialogs . Progress . SetProgressAdvance = V2 : : KodiAPI : : GUI : : CAddOnDialog_Progress : : SetProgressAdvance ; <nl> - interfaces - > GUI . Dialogs . Progress . Abort = V2 : : KodiAPI : : GUI : : CAddOnDialog_Progress : : Abort ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 3d243322d9d6 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogProgress . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnDialog_Progress <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index e7cd2cb524f9 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogSelect . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIDialogSelect . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIDialogSelect . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - using namespace ADDON ; <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnDialog_Select : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Dialogs . Select . Open = V2 : : KodiAPI : : GUI : : CAddOnDialog_Select : : Open ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index aa7b3a2e763d . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogSelect . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnDialog_Select <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 272b768f8f68 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogTextViewer . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIDialogTextViewer . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIDialogTextViewer . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - using namespace ADDON ; <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnDialog_TextViewer : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Dialogs . TextViewer . Open = V2 : : KodiAPI : : GUI : : CAddOnDialog_TextViewer : : Open ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 6474e1ce4981 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogTextViewer . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnDialog_TextViewer <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 77c81c80343f . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogYesNo . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIDialogYesNo . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIDialogYesNo . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnDialog_YesNo : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Dialogs . YesNo . ShowAndGetInputSingleText = V2 : : KodiAPI : : GUI : : CAddOnDialog_YesNo : : ShowAndGetInputSingleText ; <nl> - interfaces - > GUI . Dialogs . YesNo . ShowAndGetInputLineText = V2 : : KodiAPI : : GUI : : CAddOnDialog_YesNo : : ShowAndGetInputLineText ; <nl> - interfaces - > GUI . Dialogs . YesNo . ShowAndGetInputLineButtonText = V2 : : KodiAPI : : GUI : : CAddOnDialog_YesNo : : ShowAndGetInputLineButtonText ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 606f649cf27f . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIDialogYesNo . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnDialog_YesNo <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 228541b89622 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIGeneral . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIGeneral . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIGeneral . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnGUIGeneral : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . General . Lock = V2 : : KodiAPI : : GUI : : CAddOnGUIGeneral : : Lock ; <nl> - interfaces - > GUI . General . Unlock = V2 : : KodiAPI : : GUI : : CAddOnGUIGeneral : : Unlock ; <nl> - interfaces - > GUI . General . GetScreenHeight = V2 : : KodiAPI : : GUI : : CAddOnGUIGeneral : : GetScreenHeight ; <nl> - interfaces - > GUI . General . GetScreenWidth = V2 : : KodiAPI : : GUI : : CAddOnGUIGeneral : : GetScreenWidth ; <nl> - interfaces - > GUI . General . GetVideoResolution = V2 : : KodiAPI : : GUI : : CAddOnGUIGeneral : : GetVideoResolution ; <nl> - interfaces - > GUI . General . GetCurrentWindowDialogId = V2 : : KodiAPI : : GUI : : CAddOnGUIGeneral : : GetCurrentWindowDialogId ; <nl> - interfaces - > GUI . General . GetCurrentWindowId = V2 : : KodiAPI : : GUI : : CAddOnGUIGeneral : : GetCurrentWindowId ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 9946db471e66 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIGeneral . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnGUIGeneral <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index dd14ef5af533 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIListItem . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIListItem . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIListItem . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnListItem : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . ListItem . Create = V2 : : KodiAPI : : GUI : : CAddOnListItem : : Create ; <nl> - interfaces - > GUI . ListItem . Destroy = V2 : : KodiAPI : : GUI : : CAddOnListItem : : Destroy ; <nl> - interfaces - > GUI . ListItem . GetLabel = V2 : : KodiAPI : : GUI : : CAddOnListItem : : GetLabel ; <nl> - interfaces - > GUI . ListItem . SetLabel = V2 : : KodiAPI : : GUI : : CAddOnListItem : : SetLabel ; <nl> - interfaces - > GUI . ListItem . GetLabel2 = V2 : : KodiAPI : : GUI : : CAddOnListItem : : GetLabel2 ; <nl> - interfaces - > GUI . ListItem . SetLabel2 = V2 : : KodiAPI : : GUI : : CAddOnListItem : : SetLabel2 ; <nl> - interfaces - > GUI . ListItem . GetIconImage = V2 : : KodiAPI : : GUI : : CAddOnListItem : : GetIconImage ; <nl> - interfaces - > GUI . ListItem . SetIconImage = V2 : : KodiAPI : : GUI : : CAddOnListItem : : SetIconImage ; <nl> - interfaces - > GUI . ListItem . GetOverlayImage = V2 : : KodiAPI : : GUI : : CAddOnListItem : : GetOverlayImage ; <nl> - interfaces - > GUI . ListItem . SetOverlayImage = V2 : : KodiAPI : : GUI : : CAddOnListItem : : SetOverlayImage ; <nl> - interfaces - > GUI . ListItem . SetThumbnailImage = V2 : : KodiAPI : : GUI : : CAddOnListItem : : SetThumbnailImage ; <nl> - interfaces - > GUI . ListItem . SetArt = V2 : : KodiAPI : : GUI : : CAddOnListItem : : SetArt ; <nl> - interfaces - > GUI . ListItem . GetArt = V2 : : KodiAPI : : GUI : : CAddOnListItem : : GetArt ; <nl> - interfaces - > GUI . ListItem . SetArtFallback = V2 : : KodiAPI : : GUI : : CAddOnListItem : : SetArtFallback ; <nl> - interfaces - > GUI . ListItem . HasArt = V2 : : KodiAPI : : GUI : : CAddOnListItem : : HasArt ; <nl> - interfaces - > GUI . ListItem . Select = V2 : : KodiAPI : : GUI : : CAddOnListItem : : Select ; <nl> - interfaces - > GUI . ListItem . IsSelected = V2 : : KodiAPI : : GUI : : CAddOnListItem : : IsSelected ; <nl> - interfaces - > GUI . ListItem . HasIcon = V2 : : KodiAPI : : GUI : : CAddOnListItem : : HasIcon ; <nl> - interfaces - > GUI . ListItem . HasOverlay = V2 : : KodiAPI : : GUI : : CAddOnListItem : : HasOverlay ; <nl> - interfaces - > GUI . ListItem . IsFileItem = V2 : : KodiAPI : : GUI : : CAddOnListItem : : IsFileItem ; <nl> - interfaces - > GUI . ListItem . IsFolder = V2 : : KodiAPI : : GUI : : CAddOnListItem : : IsFolder ; <nl> - interfaces - > GUI . ListItem . SetProperty = V2 : : KodiAPI : : GUI : : CAddOnListItem : : SetProperty ; <nl> - interfaces - > GUI . ListItem . GetProperty = V2 : : KodiAPI : : GUI : : CAddOnListItem : : GetProperty ; <nl> - interfaces - > GUI . ListItem . ClearProperty = V2 : : KodiAPI : : GUI : : CAddOnListItem : : ClearProperty ; <nl> - interfaces - > GUI . ListItem . ClearProperties = V2 : : KodiAPI : : GUI : : CAddOnListItem : : ClearProperties ; <nl> - interfaces - > GUI . ListItem . HasProperties = V2 : : KodiAPI : : GUI : : CAddOnListItem : : HasProperties ; <nl> - interfaces - > GUI . ListItem . HasProperty = V2 : : KodiAPI : : GUI : : CAddOnListItem : : HasProperty ; <nl> - interfaces - > GUI . ListItem . SetPath = V2 : : KodiAPI : : GUI : : CAddOnListItem : : SetPath ; <nl> - interfaces - > GUI . ListItem . GetPath = V2 : : KodiAPI : : GUI : : CAddOnListItem : : GetPath ; <nl> - interfaces - > GUI . ListItem . GetDuration = V2 : : KodiAPI : : GUI : : CAddOnListItem : : GetDuration ; <nl> - interfaces - > GUI . ListItem . SetSubtitles = V2 : : KodiAPI : : GUI : : CAddOnListItem : : SetSubtitles ; <nl> - interfaces - > GUI . ListItem . SetMimeType = V2 : : KodiAPI : : GUI : : CAddOnListItem : : SetMimeType ; <nl> - interfaces - > GUI . ListItem . SetContentLookup = V2 : : KodiAPI : : GUI : : CAddOnListItem : : SetContentLookup ; <nl> - interfaces - > GUI . ListItem . AddContextMenuItems = V2 : : KodiAPI : : GUI : : CAddOnListItem : : AddContextMenuItems ; <nl> - interfaces - > GUI . ListItem . AddStreamInfo = V2 : : KodiAPI : : GUI : : CAddOnListItem : : AddStreamInfo ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 08acd8e4b189 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIListItem . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnListItem <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 4e0db287eea7 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIWindow . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_GUIWindow . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / GUI / Addon_GUIWindow . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnWindow : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > GUI . Window . New = V2 : : KodiAPI : : GUI : : CAddOnWindow : : New ; <nl> - interfaces - > GUI . Window . Delete = V2 : : KodiAPI : : GUI : : CAddOnWindow : : Delete ; <nl> - interfaces - > GUI . Window . SetCallbacks = V2 : : KodiAPI : : GUI : : CAddOnWindow : : SetCallbacks ; <nl> - interfaces - > GUI . Window . Show = V2 : : KodiAPI : : GUI : : CAddOnWindow : : Show ; <nl> - interfaces - > GUI . Window . Close = V2 : : KodiAPI : : GUI : : CAddOnWindow : : Close ; <nl> - interfaces - > GUI . Window . DoModal = V2 : : KodiAPI : : GUI : : CAddOnWindow : : DoModal ; <nl> - interfaces - > GUI . Window . SetFocusId = V2 : : KodiAPI : : GUI : : CAddOnWindow : : SetFocusId ; <nl> - interfaces - > GUI . Window . GetFocusId = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetFocusId ; <nl> - interfaces - > GUI . Window . SetProperty = V2 : : KodiAPI : : GUI : : CAddOnWindow : : SetProperty ; <nl> - interfaces - > GUI . Window . SetPropertyInt = V2 : : KodiAPI : : GUI : : CAddOnWindow : : SetPropertyInt ; <nl> - interfaces - > GUI . Window . SetPropertyBool = V2 : : KodiAPI : : GUI : : CAddOnWindow : : SetPropertyBool ; <nl> - interfaces - > GUI . Window . SetPropertyDouble = V2 : : KodiAPI : : GUI : : CAddOnWindow : : SetPropertyDouble ; <nl> - interfaces - > GUI . Window . GetProperty = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetProperty ; <nl> - interfaces - > GUI . Window . GetPropertyInt = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetPropertyInt ; <nl> - interfaces - > GUI . Window . GetPropertyBool = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetPropertyBool ; <nl> - interfaces - > GUI . Window . GetPropertyDouble = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetPropertyDouble ; <nl> - interfaces - > GUI . Window . ClearProperties = V2 : : KodiAPI : : GUI : : CAddOnWindow : : ClearProperties ; <nl> - interfaces - > GUI . Window . ClearProperty = V2 : : KodiAPI : : GUI : : CAddOnWindow : : ClearProperty ; <nl> - interfaces - > GUI . Window . GetListSize = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetListSize ; <nl> - interfaces - > GUI . Window . ClearList = V2 : : KodiAPI : : GUI : : CAddOnWindow : : ClearList ; <nl> - interfaces - > GUI . Window . AddItem = V2 : : KodiAPI : : GUI : : CAddOnWindow : : AddItem ; <nl> - interfaces - > GUI . Window . AddStringItem = V2 : : KodiAPI : : GUI : : CAddOnWindow : : AddStringItem ; <nl> - interfaces - > GUI . Window . RemoveItem = V2 : : KodiAPI : : GUI : : CAddOnWindow : : RemoveItem ; <nl> - interfaces - > GUI . Window . RemoveItemFile = V2 : : KodiAPI : : GUI : : CAddOnWindow : : RemoveItemFile ; <nl> - interfaces - > GUI . Window . GetListItem = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetListItem ; <nl> - interfaces - > GUI . Window . SetCurrentListPosition = V2 : : KodiAPI : : GUI : : CAddOnWindow : : SetCurrentListPosition ; <nl> - interfaces - > GUI . Window . GetCurrentListPosition = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetCurrentListPosition ; <nl> - interfaces - > GUI . Window . SetControlLabel = V2 : : KodiAPI : : GUI : : CAddOnWindow : : SetControlLabel ; <nl> - interfaces - > GUI . Window . MarkDirtyRegion = V2 : : KodiAPI : : GUI : : CAddOnWindow : : MarkDirtyRegion ; <nl> - <nl> - interfaces - > GUI . Window . GetControl_Button = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetControl_Button ; <nl> - interfaces - > GUI . Window . GetControl_Edit = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetControl_Edit ; <nl> - interfaces - > GUI . Window . GetControl_FadeLabel = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetControl_FadeLabel ; <nl> - interfaces - > GUI . Window . GetControl_Image = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetControl_Image ; <nl> - interfaces - > GUI . Window . GetControl_Label = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetControl_Label ; <nl> - interfaces - > GUI . Window . GetControl_Spin = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetControl_Spin ; <nl> - interfaces - > GUI . Window . GetControl_RadioButton = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetControl_RadioButton ; <nl> - interfaces - > GUI . Window . GetControl_Progress = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetControl_Progress ; <nl> - interfaces - > GUI . Window . GetControl_RenderAddon = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetControl_RenderAddon ; <nl> - interfaces - > GUI . Window . GetControl_Slider = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetControl_Slider ; <nl> - interfaces - > GUI . Window . GetControl_SettingsSlider = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetControl_SettingsSlider ; <nl> - interfaces - > GUI . Window . GetControl_TextBox = V2 : : KodiAPI : : GUI : : CAddOnWindow : : GetControl_TextBox ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V2 * / <nl> deleted file mode 100644 <nl> index 3aa1d5a2a60a . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Addon_GUIWindow . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2015 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace GUI <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - struct CAddOnWindow <nl> - { <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace GUI * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 5a21fece94f5 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / CMakeLists . txt <nl> ppp / dev / null <nl> <nl> - set ( SOURCES Addon_GUIControlButton . cpp <nl> - Addon_GUIControlEdit . cpp <nl> - Addon_GUIControlFadeLabel . cpp <nl> - Addon_GUIControlImage . cpp <nl> - Addon_GUIControlLabel . cpp <nl> - Addon_GUIControlProgress . cpp <nl> - Addon_GUIControlRadioButton . cpp <nl> - Addon_GUIControlRendering . cpp <nl> - Addon_GUIControlSettingsSlider . cpp <nl> - Addon_GUIControlSlider . cpp <nl> - Addon_GUIControlSpin . cpp <nl> - Addon_GUIControlTextBox . cpp <nl> - Addon_GUIDialogContextMenu . cpp <nl> - Addon_GUIDialogExtendedProgressBar . cpp <nl> - Addon_GUIDialogFileBrowser . cpp <nl> - Addon_GUIDialogKeyboard . cpp <nl> - Addon_GUIDialogNumeric . cpp <nl> - Addon_GUIDialogOK . cpp <nl> - Addon_GUIDialogProgress . cpp <nl> - Addon_GUIDialogSelect . cpp <nl> - Addon_GUIDialogTextViewer . cpp <nl> - Addon_GUIDialogYesNo . cpp <nl> - Addon_GUIGeneral . cpp <nl> - Addon_GUIListItem . cpp <nl> - Addon_GUIWindow . cpp ) <nl> - <nl> - set ( HEADERS Addon_GUIControlButton . h <nl> - Addon_GUIControlEdit . h <nl> - Addon_GUIControlFadeLabel . h <nl> - Addon_GUIControlImage . h <nl> - Addon_GUIControlLabel . h <nl> - Addon_GUIControlProgress . h <nl> - Addon_GUIControlRadioButton . h <nl> - Addon_GUIControlRendering . h <nl> - Addon_GUIControlSettingsSlider . h <nl> - Addon_GUIControlSlider . h <nl> - Addon_GUIControlSpin . h <nl> - Addon_GUIControlTextBox . h <nl> - Addon_GUIDialogContextMenu . h <nl> - Addon_GUIDialogExtendedProgressBar . h <nl> - Addon_GUIDialogFileBrowser . h <nl> - Addon_GUIDialogKeyboard . h <nl> - Addon_GUIDialogNumeric . h <nl> - Addon_GUIDialogOK . h <nl> - Addon_GUIDialogProgress . h <nl> - Addon_GUIDialogSelect . h <nl> - Addon_GUIDialogTextViewer . h <nl> - Addon_GUIDialogYesNo . h <nl> - Addon_GUIGeneral . h <nl> - Addon_GUIListItem . h <nl> - Addon_GUIWindow . h <nl> - . . / . . / . . / . . / kodi - addon - dev - kit / src / api3 / version . h ) <nl> - <nl> - include_directories ( $ { CORE_SOURCE_DIR } / xbmc / addons / kodi - addon - dev - kit / include ) <nl> - <nl> - core_add_library ( api3AddonInterfaces_GUI ) <nl> - <nl> - if ( ENABLE_INTERNAL_FFMPEG ) <nl> - add_dependencies ( api3AddonInterfaces_GUI ffmpeg ) <nl> - endif ( ) <nl> deleted file mode 100644 <nl> index 6f78e0969787 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / GUI / Makefile . in <nl> ppp / dev / null <nl> <nl> - SRCS = Addon_GUIControlButton . cpp \ <nl> - Addon_GUIControlEdit . cpp \ <nl> - Addon_GUIControlFadeLabel . cpp \ <nl> - Addon_GUIControlImage . cpp \ <nl> - Addon_GUIControlLabel . cpp \ <nl> - Addon_GUIControlProgress . cpp \ <nl> - Addon_GUIControlRadioButton . cpp \ <nl> - Addon_GUIControlRendering . cpp \ <nl> - Addon_GUIControlSettingsSlider . cpp \ <nl> - Addon_GUIControlSlider . cpp \ <nl> - Addon_GUIControlSpin . cpp \ <nl> - Addon_GUIControlTextBox . cpp \ <nl> - Addon_GUIDialogContextMenu . cpp \ <nl> - Addon_GUIDialogExtendedProgressBar . cpp \ <nl> - Addon_GUIDialogFileBrowser . cpp \ <nl> - Addon_GUIDialogKeyboard . cpp \ <nl> - Addon_GUIDialogNumeric . cpp \ <nl> - Addon_GUIDialogOK . cpp \ <nl> - Addon_GUIDialogProgress . cpp \ <nl> - Addon_GUIDialogSelect . cpp \ <nl> - Addon_GUIDialogTextViewer . cpp \ <nl> - Addon_GUIDialogYesNo . cpp \ <nl> - Addon_GUIGeneral . cpp \ <nl> - Addon_GUIListItem . cpp \ <nl> - Addon_GUIWindow . cpp \ <nl> - <nl> - LIB = addon - interface - gui . a <nl> - <nl> - include @ abs_top_srcdir @ / Makefile . include <nl> - - include $ ( patsubst % . cpp , % . P , $ ( patsubst % . c , % . P , $ ( SRCS ) ) ) <nl> deleted file mode 100644 <nl> index 5ee80b74127c . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / PVR / Addon_PVR . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2012 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_PVR . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / PVR / Addon_PVR . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace PVR <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddonInterfacesPVR : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - / * write Kodi PVR specific add - on function addresses to callback table * / <nl> - interfaces - > PVR . add_menu_hook = V2 : : KodiAPI : : PVR : : CAddonInterfacesPVR : : add_menu_hook ; <nl> - interfaces - > PVR . recording = V2 : : KodiAPI : : PVR : : CAddonInterfacesPVR : : recording ; <nl> - interfaces - > PVR . connection_state_change = V2 : : KodiAPI : : PVR : : CAddonInterfacesPVR : : connection_state_change ; <nl> - interfaces - > PVR . epg_event_state_change = V2 : : KodiAPI : : PVR : : CAddonInterfacesPVR : : epg_event_state_change ; <nl> - <nl> - interfaces - > PVR . transfer_epg_entry = V2 : : KodiAPI : : PVR : : CAddonInterfacesPVR : : transfer_epg_entry ; <nl> - interfaces - > PVR . transfer_channel_entry = V2 : : KodiAPI : : PVR : : CAddonInterfacesPVR : : transfer_channel_entry ; <nl> - interfaces - > PVR . transfer_channel_group = V2 : : KodiAPI : : PVR : : CAddonInterfacesPVR : : transfer_channel_group ; <nl> - interfaces - > PVR . transfer_channel_group_member = V2 : : KodiAPI : : PVR : : CAddonInterfacesPVR : : transfer_channel_group_member ; <nl> - interfaces - > PVR . transfer_timer_entry = V2 : : KodiAPI : : PVR : : CAddonInterfacesPVR : : transfer_timer_entry ; <nl> - interfaces - > PVR . transfer_recording_entry = V2 : : KodiAPI : : PVR : : CAddonInterfacesPVR : : transfer_recording_entry ; <nl> - <nl> - interfaces - > PVR . trigger_channel_update = V2 : : KodiAPI : : PVR : : CAddonInterfacesPVR : : trigger_channel_update ; <nl> - interfaces - > PVR . trigger_channel_groups_update = V2 : : KodiAPI : : PVR : : CAddonInterfacesPVR : : trigger_channel_groups_update ; <nl> - interfaces - > PVR . trigger_timer_update = V2 : : KodiAPI : : PVR : : CAddonInterfacesPVR : : trigger_timer_update ; <nl> - interfaces - > PVR . trigger_recording_update = V2 : : KodiAPI : : PVR : : CAddonInterfacesPVR : : trigger_recording_update ; <nl> - interfaces - > PVR . trigger_epg_update = V2 : : KodiAPI : : PVR : : CAddonInterfacesPVR : : trigger_epg_update ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace PVR * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 4e44ca2df907 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / PVR / Addon_PVR . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2012 - 2016 Team KODI <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with KODI ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace PVR <nl> - { <nl> - extern " C " <nl> - { <nl> - class CAddonInterfacesPVR <nl> - { <nl> - public : <nl> - static void Init ( struct CB_AddOnLib * callbacks ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace PVR * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 82d593e14379 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / PVR / CMakeLists . txt <nl> ppp / dev / null <nl> <nl> - set ( SOURCES Addon_PVR . cpp ) <nl> - <nl> - set ( HEADERS Addon_PVR . h <nl> - . . / . . / . . / . . / kodi - addon - dev - kit / src / api3 / version . h ) <nl> - <nl> - include_directories ( $ { CORE_SOURCE_DIR } / xbmc / addons / kodi - addon - dev - kit / include ) <nl> - <nl> - core_add_library ( api3AddonInterfaces_PVR ) <nl> - <nl> - if ( ENABLE_INTERNAL_FFMPEG ) <nl> - add_dependencies ( api3AddonInterfaces_PVR ffmpeg ) <nl> - endif ( ) <nl> deleted file mode 100644 <nl> index 3510a0d6cfac . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / PVR / Makefile . in <nl> ppp / dev / null <nl> <nl> - SRCS = Addon_PVR . cpp \ <nl> - <nl> - LIB = addon - interface - pvr . a <nl> - <nl> - include @ abs_top_srcdir @ / Makefile . include <nl> - - include $ ( patsubst % . cpp , % . P , $ ( patsubst % . c , % . P , $ ( SRCS ) ) ) <nl> deleted file mode 100644 <nl> index 04e21ca3eccc . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / Peripheral / Addon_Peripheral . cpp <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2014 - 2016 Team Kodi <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with this Program ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - # include " Addon_Peripheral . h " <nl> - # include " addons / binary / interfaces / api3 / AddonInterfaceBase . h " <nl> - # include " addons / binary / interfaces / api2 / Peripheral / Addon_Peripheral . h " <nl> - # include " addons / kodi - addon - dev - kit / include / kodi / api3 / . internal / AddonLib_internal . hpp " <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - namespace Peripheral <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - void CAddOnPeripheral : : Init ( struct CB_AddOnLib * interfaces ) <nl> - { <nl> - interfaces - > Peripheral . trigger_scan = V2 : : KodiAPI : : Peripheral : : CAddOnPeripheral : : trigger_scan ; <nl> - interfaces - > Peripheral . refresh_button_maps = V2 : : KodiAPI : : Peripheral : : CAddOnPeripheral : : refresh_button_maps ; <nl> - } <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace Peripheral * / <nl> - <nl> - } / * namespace KodiAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 486aef83b20f . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / Peripheral / Addon_Peripheral . h <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - / * <nl> - * Copyright ( C ) 2014 - 2016 Team Kodi <nl> - * http : / / kodi . tv <nl> - * <nl> - * This Program is free software ; you can redistribute it and / or modify <nl> - * it under the terms of the GNU General Public License as published by <nl> - * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> - * any later version . <nl> - * <nl> - * This Program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU General Public License <nl> - * along with this Program ; see the file COPYING . If not , see <nl> - * < http : / / www . gnu . org / licenses / > . <nl> - * <nl> - * / <nl> - <nl> - namespace V3 <nl> - { <nl> - namespace KodiAPI <nl> - { <nl> - <nl> - struct CB_AddOnLib ; <nl> - <nl> - namespace Peripheral <nl> - { <nl> - extern " C " <nl> - { <nl> - <nl> - / * ! <nl> - * Callbacks for a peripheral add - on to Kodi <nl> - * / <nl> - class CAddOnPeripheral <nl> - { <nl> - public : <nl> - static void Init ( struct CB_AddOnLib * interfaces ) ; <nl> - } ; <nl> - <nl> - } / * extern " C " * / <nl> - } / * namespace InputStream * / <nl> - <nl> - } / * namespace KoidAPI * / <nl> - } / * namespace V3 * / <nl> deleted file mode 100644 <nl> index 53404dbeeaa2 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / Peripheral / CMakeLists . txt <nl> ppp / dev / null <nl> <nl> - set ( SOURCES Addon_Peripheral . cpp ) <nl> - <nl> - set ( HEADERS Addon_Peripheral . h <nl> - . . / . . / . . / . . / kodi - addon - dev - kit / src / api3 / version . h ) <nl> - <nl> - include_directories ( $ { CORE_SOURCE_DIR } / xbmc / addons / kodi - addon - dev - kit / include ) <nl> - <nl> - core_add_library ( api3AddonInterfaces_Peripheral ) <nl> - <nl> - if ( ENABLE_INTERNAL_FFMPEG ) <nl> - add_dependencies ( api3AddonInterfaces_Peripheral ffmpeg ) <nl> - endif ( ) <nl> deleted file mode 100644 <nl> index 6ce6179b7794 . . 000000000000 <nl> mmm a / xbmc / addons / binary / interfaces / api3 / Peripheral / Makefile . in <nl> ppp / dev / null <nl> <nl> - SRCS = Addon_Peripheral . cpp \ <nl> - <nl> - LIB = addon - interface - peripheral . a <nl> - <nl> - include @ abs_top_srcdir @ / Makefile . include <nl> - - include $ ( patsubst % . cpp , % . P , $ ( patsubst % . c , % . P , $ ( SRCS ) ) ) <nl>
Revert " [ addon ] add API level 3 on kodi side for future development - Part 2 "
xbmc/xbmc
d987ee2ae0665f793f474d353513d6d49ab61598
2016-05-20T04:32:16Z
mmm a / src / wasm / wasm - memory . cc <nl> ppp b / src / wasm / wasm - memory . cc <nl> bool WasmMemoryTracker : : ReserveAddressSpace ( size_t num_bytes ) { <nl> constexpr size_t kAddressSpaceLimit = 0x80000000 ; / / 2 GiB <nl> # endif <nl> <nl> - int retries = 5 ; / / cmpxchng can fail , retry some number of times . <nl> - do { <nl> - size_t old_count = reserved_address_space_ ; <nl> - if ( ( kAddressSpaceLimit - old_count ) < num_bytes ) return false ; <nl> + while ( true ) { <nl> + size_t old_count = reserved_address_space_ . load ( ) ; <nl> + if ( kAddressSpaceLimit - old_count < num_bytes ) return false ; <nl> if ( reserved_address_space_ . compare_exchange_weak ( old_count , <nl> old_count + num_bytes ) ) { <nl> return true ; <nl> } <nl> - } while ( retries - - > 0 ) ; <nl> - <nl> - return false ; <nl> + } <nl> } <nl> <nl> void WasmMemoryTracker : : ReleaseReservation ( size_t num_bytes ) { <nl>
[ wasm ] Remove limit on cmpxchg loop
v8/v8
6549dffab64facc85daa50a1160b7a16b42a1b3e
2018-09-10T10:28:27Z
mmm a / test / lint / extended - lint - cppcheck . sh <nl> ppp b / test / lint / extended - lint - cppcheck . sh <nl> function join_array { <nl> ENABLED_CHECKS_REGEXP = $ ( join_array " | " " $ { ENABLED_CHECKS [ @ ] } " ) <nl> IGNORED_WARNINGS_REGEXP = $ ( join_array " | " " $ { IGNORED_WARNINGS [ @ ] } " ) <nl> WARNINGS = $ ( git ls - files - - " * . cpp " " * . h " " : ( exclude ) src / leveldb / " " : ( exclude ) src / crc32c / " " : ( exclude ) src / secp256k1 / " " : ( exclude ) src / univalue / " | \ <nl> - xargs cppcheck - - enable = all - j " $ ( getconf _NPROCESSORS_ONLN ) " - - language = c + + - - std = c + + 11 - - template = gcc - D__cplusplus - DCLIENT_VERSION_BUILD - DCLIENT_VERSION_IS_RELEASE - DCLIENT_VERSION_MAJOR - DCLIENT_VERSION_MINOR - DCOPYRIGHT_YEAR - DDEBUG - I src / - q 2 > & 1 | sort - u | \ <nl> + xargs cppcheck - - enable = all - j " $ ( getconf _NPROCESSORS_ONLN ) " - - language = c + + - - std = c + + 17 - - template = gcc - D__cplusplus - DCLIENT_VERSION_BUILD - DCLIENT_VERSION_IS_RELEASE - DCLIENT_VERSION_MAJOR - DCLIENT_VERSION_MINOR - DCOPYRIGHT_YEAR - DDEBUG - I src / - q 2 > & 1 | sort - u | \ <nl> grep - E " $ { ENABLED_CHECKS_REGEXP } " | \ <nl> grep - vE " $ { IGNORED_WARNINGS_REGEXP } " ) <nl> if [ [ $ { WARNINGS } ! = " " ] ] ; then <nl>
lint : Use c + + 17 std in cppcheck linter
bitcoin/bitcoin
c502a6dbfb854ca827a5a3925394f9e09d29b898
2020-12-01T17:32:26Z
mmm a / drivers / rtaudio / audio_driver_rtaudio . cpp <nl> ppp b / drivers / rtaudio / audio_driver_rtaudio . cpp <nl> Error AudioDriverRtAudio : : init ( ) { <nl> RtAudio : : StreamOptions options ; <nl> <nl> / / set the desired numberOfBuffers <nl> - unsigned int target_number_of_buffers = 4 ; <nl> - options . numberOfBuffers = target_number_of_buffers ; <nl> - <nl> - / / options . <nl> - / / RtAudioStreamFlags flags ; / * ! < A bit - mask of stream flags ( RTAUDIO_NONINTERLEAVED , RTAUDIO_MINIMIZE_LATENCY , RTAUDIO_HOG_DEVICE ) . * / / / <nl> - / / unsigned int numberOfBuffers ; / * ! < Number of stream buffers . * / <nl> - / / std : : string streamName ; / * ! < A stream name ( currently used only in Jack ) . * / <nl> - / / int priority ; / * ! < Scheduling priority of callback thread ( only used with flag RTAUDIO_SCHEDULE_REALTIME ) . * / <nl> + options . numberOfBuffers = 4 ; <nl> <nl> parameters . firstChannel = 0 ; <nl> mix_rate = GLOBAL_DEF ( " audio / mix_rate " , 44100 ) ; <nl> <nl> int latency = GLOBAL_DEF ( " audio / output_latency " , 25 ) ; <nl> - / / calculate desired buffer_size , taking the desired numberOfBuffers into account ( latency depends on numberOfBuffers * buffer_size ) <nl> - unsigned int buffer_size = closest_power_of_2 ( latency * mix_rate / 1000 / target_number_of_buffers ) ; <nl> + / / calculate desired buffer_size <nl> + unsigned int buffer_size = closest_power_of_2 ( latency * mix_rate / 1000 ) ; <nl> <nl> if ( OS : : get_singleton ( ) - > is_stdout_verbose ( ) ) { <nl> print_line ( " audio buffer size : " + itos ( buffer_size ) ) ; <nl> Error AudioDriverRtAudio : : init ( ) { <nl> <nl> short int tries = 2 ; <nl> <nl> - while ( true ) { <nl> - while ( true ) { <nl> - switch ( speaker_mode ) { <nl> - case SPEAKER_MODE_STEREO : parameters . nChannels = 2 ; break ; <nl> - case SPEAKER_SURROUND_51 : parameters . nChannels = 6 ; break ; <nl> - case SPEAKER_SURROUND_71 : parameters . nChannels = 8 ; break ; <nl> - } ; <nl> - <nl> - try { <nl> - dac - > openStream ( & parameters , NULL , RTAUDIO_SINT32 , mix_rate , & buffer_size , & callback , this , & options ) ; <nl> - active = true ; <nl> - <nl> - break ; <nl> - } catch ( RtAudioError & e ) { <nl> - / / try with less channels <nl> - ERR_PRINT ( " Unable to open audio , retrying with fewer channels . . " ) ; <nl> - <nl> - switch ( speaker_mode ) { <nl> - case SPEAKER_MODE_STEREO : speaker_mode = SPEAKER_MODE_STEREO ; break ; <nl> - case SPEAKER_SURROUND_51 : speaker_mode = SPEAKER_SURROUND_51 ; break ; <nl> - case SPEAKER_SURROUND_71 : speaker_mode = SPEAKER_SURROUND_71 ; break ; <nl> - } ; <nl> - } <nl> - } <nl> + while ( tries > = 0 ) { <nl> + switch ( speaker_mode ) { <nl> + case SPEAKER_MODE_STEREO : parameters . nChannels = 2 ; break ; <nl> + case SPEAKER_SURROUND_51 : parameters . nChannels = 6 ; break ; <nl> + case SPEAKER_SURROUND_71 : parameters . nChannels = 8 ; break ; <nl> + } ; <nl> <nl> - / / compare actual numberOfBuffers with the desired one . If not equal , close and reopen the stream with adjusted buffer size , so the desired output_latency is still correct <nl> - if ( target_number_of_buffers ! = options . numberOfBuffers ) { <nl> - if ( tries < = 0 ) { <nl> - ERR_EXPLAIN ( " RtAudio : Unable to set correct number of buffers . " ) ; <nl> - ERR_FAIL_V ( ERR_UNAVAILABLE ) ; <nl> - break ; <nl> - } <nl> + try { <nl> + dac - > openStream ( & parameters , NULL , RTAUDIO_SINT32 , mix_rate , & buffer_size , & callback , this , & options ) ; <nl> + active = true ; <nl> <nl> - try { <nl> - dac - > closeStream ( ) ; <nl> - active = false ; <nl> - } catch ( RtAudioError & e ) { <nl> - ERR_PRINT ( e . what ( ) ) ; <nl> - ERR_FAIL_V ( ERR_UNAVAILABLE ) ; <nl> - break ; <nl> + break ; <nl> + } catch ( RtAudioError & e ) { <nl> + / / try with less channels <nl> + ERR_PRINT ( " Unable to open audio , retrying with fewer channels . . " ) ; <nl> + <nl> + switch ( speaker_mode ) { <nl> + case SPEAKER_SURROUND_51 : speaker_mode = SPEAKER_MODE_STEREO ; break ; <nl> + case SPEAKER_SURROUND_71 : speaker_mode = SPEAKER_SURROUND_51 ; break ; <nl> } <nl> - if ( OS : : get_singleton ( ) - > is_stdout_verbose ( ) ) <nl> - print_line ( " RtAudio : Desired number of buffers ( " + itos ( target_number_of_buffers ) + " ) not available . Using " + itos ( options . numberOfBuffers ) + " instead . Reopening stream with adjusted buffer_size . " ) ; <nl> <nl> - / / new buffer size dependent on the ratio between set and actual numberOfBuffers <nl> - buffer_size = buffer_size / ( options . numberOfBuffers / target_number_of_buffers ) ; <nl> - target_number_of_buffers = options . numberOfBuffers ; <nl> tries - - ; <nl> - } else { <nl> - break ; <nl> } <nl> } <nl> <nl>
Merge pull request from marcelofg55 / rtaudio_buffer_fix
godotengine/godot
ddbd1330973bb8e91fc765c6fdd0470aab44fde4
2017-08-28T21:03:04Z
mmm a / src / library_async . js <nl> ppp b / src / library_async . js <nl> mergeInto ( LibraryManager . library , { <nl> asm . setAsyncState ( s ) ; <nl> } , <nl> handle : function ( doAsyncOp ) { <nl> + Module [ ' noExitRuntime ' ] = true ; <nl> if ( EmterpreterAsync . state = = = 0 ) { <nl> / / save the stack we want to resume . this lets other code run in between <nl> / / XXX this assumes that this stack top never ever leak ! exceptions might violate that <nl> mmm a / tests / idbstore . c <nl> ppp b / tests / idbstore . c <nl> <nl> # include < stdio . h > <nl> # include < string . h > <nl> # include < assert . h > <nl> + # include < stdlib . h > <nl> <nl> # include < emscripten . h > <nl> <nl> void test ( ) { <nl> # endif <nl> } <nl> <nl> + void never ( ) { <nl> + EM_ASM ( { alert ( ' this should never be reached ! runtime must not be shut down ! ' ) } ) ; <nl> + assert ( 0 ) ; <nl> + while ( 1 ) { } <nl> + } <nl> + <nl> int main ( ) { <nl> + atexit ( never ) ; <nl> test ( ) ; <nl> emscripten_exit_with_live_runtime ( ) ; <nl> return 0 ; <nl> mmm a / tests / idbstore_sync . c <nl> ppp b / tests / idbstore_sync . c <nl> <nl> # include < stdio . h > <nl> # include < string . h > <nl> # include < assert . h > <nl> + # include < stdlib . h > <nl> <nl> # include < emscripten . h > <nl> <nl> void test ( ) { <nl> REPORT_RESULT ( ) ; <nl> } <nl> <nl> + void never ( ) { <nl> + EM_ASM ( { alert ( ' this should never be reached ! runtime must not be shut down ! ' ) } ) ; <nl> + assert ( 0 ) ; <nl> + while ( 1 ) { } <nl> + } <nl> + <nl> int main ( ) { <nl> + atexit ( never ) ; <nl> test ( ) ; <nl> return 0 ; <nl> } <nl>
do not exit runtime when doing async in emterpreter
emscripten-core/emscripten
9e7bb10256166e1bc5b3eed7fca2bc9a1e0feaf9
2015-02-05T18:14:25Z
mmm a / src / google / protobuf / compiler / js / js_generator . cc <nl> ppp b / src / google / protobuf / compiler / js / js_generator . cc <nl> void Generator : : GenerateProvides ( const GeneratorOptions & options , <nl> if ( options . import_style = = GeneratorOptions : : kImportCommonJsStrict ) { <nl> string namespaceObject = * it ; <nl> / / Remove " proto . " from the namespace object <nl> - GOOGLE_CHECK ( namespaceObject . compare ( 0 , 6 , " proto . " ) ) ; <nl> + GOOGLE_CHECK ( namespaceObject . compare ( 0 , 6 , " proto . " ) = = 0 ) ; <nl> namespaceObject . erase ( 0 , 6 ) ; <nl> printer - > Print ( " goog . exportSymbol ( ' $ name $ ' , null , proto ) ; \ n " , " name " , <nl> namespaceObject ) ; <nl>
fix commonjs js provide GOOGLE_CHECK
protocolbuffers/protobuf
f42ddff0ddc20184c4436ae274fcd19231142cdf
2018-07-12T20:47:16Z
mmm a / xbmc / CueDocument . cpp <nl> ppp b / xbmc / CueDocument . cpp <nl> int CCueDocument : : ExtractTimeFromIndex ( const CStdString & index ) <nl> } <nl> StringUtils : : TrimLeft ( numberTime ) ; <nl> / / split the resulting string <nl> - CStdStringArray time ; <nl> - StringUtils : : SplitString ( numberTime , " : " , time ) ; <nl> + vector < string > time = StringUtils : : Split ( numberTime , " : " ) ; <nl> if ( time . size ( ) ! = 3 ) <nl> return - 1 ; <nl> <nl> mmm a / xbmc / FileItem . cpp <nl> ppp b / xbmc / FileItem . cpp <nl> void CFileItemList : : FilterCueItems ( ) <nl> } <nl> else <nl> { / / try replacing the extension with one of our allowed ones . <nl> - CStdStringArray extensions ; <nl> - StringUtils : : SplitString ( g_advancedSettings . m_musicExtensions , " | " , extensions ) ; <nl> - for ( unsigned int i = 0 ; i < extensions . size ( ) ; i + + ) <nl> + vector < string > extensions = StringUtils : : Split ( g_advancedSettings . m_musicExtensions , " | " ) ; <nl> + for ( vector < string > : : const_iterator i = extensions . begin ( ) ; i ! = extensions . end ( ) ; + + i ) <nl> { <nl> - strMediaFile = URIUtils : : ReplaceExtension ( pItem - > GetPath ( ) , extensions [ i ] ) ; <nl> + strMediaFile = URIUtils : : ReplaceExtension ( pItem - > GetPath ( ) , * i ) ; <nl> CFileItem item ( strMediaFile , false ) ; <nl> if ( ! item . IsCUESheet ( ) & & ! item . IsPlayList ( ) & & Contains ( strMediaFile ) ) <nl> { <nl> CStdString CFileItem : : GetUserMusicThumb ( bool alwaysCheckRemote / * = false * / , bo <nl> / / if a folder , check for folder . jpg <nl> if ( m_bIsFolder & & ! IsFileFolder ( ) & & ( ! IsRemote ( ) | | alwaysCheckRemote | | CSettings : : Get ( ) . GetBool ( " musicfiles . findremotethumbs " ) ) ) <nl> { <nl> - CStdStringArray thumbs ; <nl> - StringUtils : : SplitString ( g_advancedSettings . m_musicThumbs , " | " , thumbs ) ; <nl> - for ( unsigned int i = 0 ; i < thumbs . size ( ) ; + + i ) <nl> + vector < string > thumbs = StringUtils : : Split ( g_advancedSettings . m_musicThumbs , " | " ) ; <nl> + for ( vector < string > : : const_iterator i = thumbs . begin ( ) ; i ! = thumbs . end ( ) ; + + i ) <nl> { <nl> - CStdString folderThumb ( GetFolderThumb ( thumbs [ i ] ) ) ; <nl> + CStdString folderThumb ( GetFolderThumb ( * i ) ) ; <nl> if ( CFile : : Exists ( folderThumb ) ) <nl> { <nl> return folderThumb ; <nl> CStdString CFileItem : : GetLocalFanart ( ) const <nl> items . Append ( moreItems ) ; <nl> } <nl> <nl> - CStdStringArray fanarts ; <nl> - StringUtils : : SplitString ( g_advancedSettings . m_fanartImages , " | " , fanarts ) ; <nl> + vector < string > fanarts = StringUtils : : Split ( g_advancedSettings . m_fanartImages , " | " ) ; <nl> <nl> strFile = URIUtils : : ReplaceExtension ( strFile , " - fanart " ) ; <nl> fanarts . insert ( m_bIsFolder ? fanarts . end ( ) : fanarts . begin ( ) , URIUtils : : GetFileName ( strFile ) ) ; <nl> CStdString CFileItem : : GetLocalFanart ( ) const <nl> if ( ! strFile2 . empty ( ) ) <nl> fanarts . insert ( m_bIsFolder ? fanarts . end ( ) : fanarts . begin ( ) , URIUtils : : GetFileName ( strFile2 ) ) ; <nl> <nl> - for ( unsigned int i = 0 ; i < fanarts . size ( ) ; + + i ) <nl> + for ( vector < string > : : const_iterator i = fanarts . begin ( ) ; i ! = fanarts . end ( ) ; + + i ) <nl> { <nl> for ( int j = 0 ; j < items . Size ( ) ; j + + ) <nl> { <nl> CStdString strCandidate = URIUtils : : GetFileName ( items [ j ] - > m_strPath ) ; <nl> URIUtils : : RemoveExtension ( strCandidate ) ; <nl> - CStdString strFanart = fanarts [ i ] ; <nl> + CStdString strFanart = * i ; <nl> URIUtils : : RemoveExtension ( strFanart ) ; <nl> if ( StringUtils : : EqualsNoCase ( strCandidate , strFanart ) ) <nl> return items [ j ] - > m_strPath ; <nl> mmm a / xbmc / Util . cpp <nl> ppp b / xbmc / Util . cpp <nl> void CUtil : : ScanForExternalSubtitles ( const CStdString & strMovie , std : : vector < CSt <nl> int iSize = strLookInPaths . size ( ) ; <nl> for ( int i = 0 ; i < iSize ; + + i ) <nl> { <nl> - CStdStringArray directories ; <nl> - int nTokens = StringUtils : : SplitString ( strLookInPaths [ i ] , " / " , directories ) ; <nl> - if ( nTokens = = 1 ) <nl> - StringUtils : : SplitString ( strLookInPaths [ i ] , " \ \ " , directories ) ; <nl> + vector < string > directories = StringUtils : : Split ( strLookInPaths [ i ] , " / " ) ; <nl> + if ( directories . size ( ) = = 1 ) <nl> + directories = StringUtils : : Split ( strLookInPaths [ i ] , " \ \ " ) ; <nl> <nl> / / if it ' s inside a cdX dir , add parent path <nl> if ( directories . size ( ) > = 2 & & directories [ directories . size ( ) - 2 ] . size ( ) = = 3 & & StringUtils : : StartsWithNoCase ( directories [ directories . size ( ) - 2 ] , " cd " ) ) / / SplitString returns empty token as last item , hence size - 2 <nl> mmm a / xbmc / addons / GUIDialogAddonSettings . cpp <nl> ppp b / xbmc / addons / GUIDialogAddonSettings . cpp <nl> bool CGUIDialogAddonSettings : : ShowVirtualKeyboard ( int iControl ) <nl> bool bUseFileDirectories = false ; <nl> if ( option ) <nl> { <nl> - vector < CStdString > options ; <nl> - StringUtils : : SplitString ( option , " | " , options ) ; <nl> + vector < string > options = StringUtils : : Split ( option , " | " ) ; <nl> bUseThumbs = find ( options . begin ( ) , options . end ( ) , " usethumbs " ) ! = options . end ( ) ; <nl> bUseFileDirectories = find ( options . begin ( ) , options . end ( ) , " treatasfolder " ) ! = options . end ( ) ; <nl> } <nl> bool CGUIDialogAddonSettings : : ShowVirtualKeyboard ( int iControl ) <nl> const char * strType = setting - > Attribute ( " addontype " ) ; <nl> if ( strType ) <nl> { <nl> - CStdStringArray addonTypes ; <nl> - StringUtils : : SplitString ( strType , " , " , addonTypes ) ; <nl> + vector < string > addonTypes = StringUtils : : Split ( strType , " , " ) ; <nl> vector < ADDON : : TYPE > types ; <nl> - for ( unsigned int i = 0 ; i < addonTypes . size ( ) ; i + + ) <nl> + for ( vector < string > : : iterator i = addonTypes . begin ( ) ; i ! = addonTypes . end ( ) ; + + i ) <nl> { <nl> - StringUtils : : Trim ( addonTypes [ i ] ) ; <nl> - ADDON : : TYPE type = TranslateType ( addonTypes [ i ] ) ; <nl> + StringUtils : : Trim ( * i ) ; <nl> + ADDON : : TYPE type = TranslateType ( * i ) ; <nl> if ( type ! = ADDON_UNKNOWN ) <nl> types . push_back ( type ) ; <nl> } <nl> void CGUIDialogAddonSettings : : CreateControls ( ) <nl> float fMin = 0 . 0f ; <nl> float fMax = 100 . 0f ; <nl> float fInc = 1 . 0f ; <nl> - vector < CStdString > range ; <nl> - StringUtils : : SplitString ( setting - > Attribute ( " range " ) , " , " , range ) ; <nl> + vector < std : : string > range = StringUtils : : Split ( setting - > Attribute ( " range " ) , " , " ) ; <nl> if ( range . size ( ) > 1 ) <nl> { <nl> - fMin = ( float ) atof ( range [ 0 ] ) ; <nl> + fMin = ( float ) atof ( range [ 0 ] . c_str ( ) ) ; <nl> if ( range . size ( ) > 2 ) <nl> { <nl> - fMax = ( float ) atof ( range [ 2 ] ) ; <nl> - fInc = ( float ) atof ( range [ 1 ] ) ; <nl> + fMax = ( float ) atof ( range [ 2 ] . c_str ( ) ) ; <nl> + fInc = ( float ) atof ( range [ 1 ] . c_str ( ) ) ; <nl> } <nl> else <nl> - fMax = ( float ) atof ( range [ 1 ] ) ; <nl> + fMax = ( float ) atof ( range [ 1 ] . c_str ( ) ) ; <nl> } <nl> <nl> CStdString option = setting - > Attribute ( " option " ) ; <nl> void CGUIDialogAddonSettings : : CreateControls ( ) <nl> CStdString CGUIDialogAddonSettings : : GetAddonNames ( const CStdString & addonIDslist ) const <nl> { <nl> CStdString retVal ; <nl> - CStdStringArray addons ; <nl> - StringUtils : : SplitString ( addonIDslist , " , " , addons ) ; <nl> - for ( CStdStringArray : : const_iterator it = addons . begin ( ) ; it ! = addons . end ( ) ; it + + ) <nl> + vector < string > addons = StringUtils : : Split ( addonIDslist , " , " ) ; <nl> + for ( vector < string > : : const_iterator it = addons . begin ( ) ; it ! = addons . end ( ) ; it + + ) <nl> { <nl> if ( ! retVal . empty ( ) ) <nl> retVal + = " , " ; <nl> mmm a / xbmc / addons / PluginSource . cpp <nl> ppp b / xbmc / addons / PluginSource . cpp <nl> void CPluginSource : : SetProvides ( const CStdString & content ) <nl> vector < CStdString > provides ; <nl> if ( ! content . empty ( ) ) <nl> { <nl> - StringUtils : : SplitString ( content , " " , provides ) ; <nl> - for ( unsigned int i = 0 ; i < provides . size ( ) ; + + i ) <nl> + vector < string > provides = StringUtils : : Split ( content , " " ) ; <nl> + for ( vector < string > : : const_iterator i = provides . begin ( ) ; i ! = provides . end ( ) ; + + i ) <nl> { <nl> - Content content = Translate ( provides [ i ] ) ; <nl> + Content content = Translate ( * i ) ; <nl> if ( content ! = UNKNOWN ) <nl> m_providedContent . insert ( content ) ; <nl> } <nl> mmm a / xbmc / addons / Skin . cpp <nl> ppp b / xbmc / addons / Skin . cpp <nl> CSkinInfo : : CSkinInfo ( const cp_extension_t * ext ) <nl> bool defRes = CAddonMgr : : Get ( ) . GetExtValue ( * i , " @ default " ) . Equals ( " true " ) ; <nl> CStdString folder = CAddonMgr : : Get ( ) . GetExtValue ( * i , " @ folder " ) ; <nl> float aspect = 0 ; <nl> - CStdStringArray fracs ; <nl> CStdString strAspect = CAddonMgr : : Get ( ) . GetExtValue ( * i , " @ aspect " ) ; <nl> - StringUtils : : SplitString ( strAspect , " : " , fracs ) ; <nl> + vector < string > fracs = StringUtils : : Split ( strAspect , " : " ) ; <nl> if ( fracs . size ( ) = = 2 ) <nl> aspect = ( float ) ( atof ( fracs [ 0 ] . c_str ( ) ) / atof ( fracs [ 1 ] . c_str ( ) ) ) ; <nl> if ( width > 0 & & height > 0 ) <nl> mmm a / xbmc / cores / DllLoader / DllLoaderContainer . cpp <nl> ppp b / xbmc / cores / DllLoader / DllLoaderContainer . cpp <nl> LibraryLoader * DllLoaderContainer : : FindModule ( const char * sName , const char * sCu <nl> } <nl> <nl> / / in environment variable ? <nl> - CStdStringArray vecEnv ; <nl> + std : : vector < std : : string > vecEnv ; <nl> <nl> # if defined ( TARGET_ANDROID ) <nl> CStdString systemLibs = getenv ( " XBMC_ANDROID_SYSTEM_LIBS " ) ; <nl> - StringUtils : : SplitString ( systemLibs , " : " , vecEnv ) ; <nl> + vecEnv = StringUtils : : Split ( systemLibs , " : " ) ; <nl> CStdString localLibs = getenv ( " XBMC_ANDROID_LIBS " ) ; <nl> vecEnv . insert ( vecEnv . begin ( ) , localLibs ) ; <nl> # else <nl> - StringUtils : : SplitString ( ENV_PATH , " ; " , vecEnv ) ; <nl> + vecEnv = StringUtils : : Split ( ENV_PATH , " ; " ) ; <nl> # endif <nl> LibraryLoader * pDll = NULL ; <nl> <nl> - for ( int i = 0 ; i < ( int ) vecEnv . size ( ) ; + + i ) <nl> + for ( std : : vector < std : : string > : : const_iterator i = vecEnv . begin ( ) ; i ! = vecEnv . end ( ) ; + + i ) <nl> { <nl> - CStdString strPath = vecEnv [ i ] ; <nl> + CStdString strPath = * i ; <nl> URIUtils : : AddSlashAtEnd ( strPath ) ; <nl> <nl> # ifdef LOGALL <nl> mmm a / xbmc / cores / ExternalPlayer / ExternalPlayer . cpp <nl> ppp b / xbmc / cores / ExternalPlayer / ExternalPlayer . cpp <nl> void CExternalPlayer : : Process ( ) <nl> { <nl> for ( unsigned int i = 0 ; i < m_filenameReplacers . size ( ) ; i + + ) <nl> { <nl> - std : : vector < CStdString > vecSplit ; <nl> - StringUtils : : SplitString ( m_filenameReplacers [ i ] , " , " , vecSplit ) ; <nl> + std : : vector < std : : string > vecSplit = StringUtils : : Split ( m_filenameReplacers [ i ] , " , " ) ; <nl> <nl> / / something is wrong , go to next substitution <nl> if ( vecSplit . size ( ) ! = 4 ) <nl> mmm a / xbmc / cores / dvdplayer / Edl . cpp <nl> ppp b / xbmc / cores / dvdplayer / Edl . cpp <nl> bool CEdl : : ReadEdl ( const CStdString & strMovie , const float fFramesPerSecond ) <nl> { <nl> if ( strFields [ i ] . find ( " : " ) ! = std : : string : : npos ) / / HH : MM : SS . sss format <nl> { <nl> - CStdStringArray fieldParts ; <nl> - StringUtils : : SplitString ( strFields [ i ] , " . " , fieldParts ) ; <nl> + vector < string > fieldParts = StringUtils : : Split ( strFields [ i ] , " . " ) ; <nl> if ( fieldParts . size ( ) = = 1 ) / / No ms <nl> { <nl> iCutStartEnd [ i ] = StringUtils : : TimeStringToSeconds ( fieldParts [ 0 ] ) * ( int64_t ) 1000 ; / / seconds to ms <nl> bool CEdl : : ReadEdl ( const CStdString & strMovie , const float fFramesPerSecond ) <nl> { <nl> fieldParts [ 1 ] = fieldParts [ 1 ] . substr ( 0 , 3 ) ; <nl> } <nl> - iCutStartEnd [ i ] = ( int64_t ) StringUtils : : TimeStringToSeconds ( fieldParts [ 0 ] ) * 1000 + atoi ( fieldParts [ 1 ] ) ; / / seconds to ms <nl> + iCutStartEnd [ i ] = ( int64_t ) StringUtils : : TimeStringToSeconds ( fieldParts [ 0 ] ) * 1000 + atoi ( fieldParts [ 1 ] . c_str ( ) ) ; / / seconds to ms <nl> } <nl> else <nl> { <nl> mmm a / xbmc / cores / paplayer / PCMCodec . cpp <nl> ppp b / xbmc / cores / paplayer / PCMCodec . cpp <nl> bool PCMCodec : : CanInit ( ) <nl> <nl> void PCMCodec : : SetMimeParams ( const CStdString & strMimeParams ) <nl> { <nl> - CStdStringArray mimeParams ; <nl> - <nl> / / if there are no parameters , the default is 2 channels , 44100 samples / sec <nl> m_Channels = 2 ; <nl> m_SampleRate = 44100 ; <nl> <nl> - StringUtils : : SplitString ( strMimeParams , " ; " , mimeParams ) ; <nl> - for ( size_t i = 0 ; i < mimeParams . size ( ) ; i + + ) <nl> + std : : vector < std : : string > mimeParams = StringUtils : : Split ( strMimeParams , " ; " ) ; <nl> + for ( std : : vector < std : : string > : : const_iterator i = mimeParams . begin ( ) ; i ! = mimeParams . end ( ) ; + + i ) <nl> { <nl> - CStdStringArray thisParam ; <nl> - StringUtils : : SplitString ( mimeParams [ i ] , " = " , thisParam , 2 ) ; <nl> + std : : vector < std : : string > thisParam = StringUtils : : Split ( * i , " = " , 2 ) ; <nl> if ( thisParam . size ( ) > 1 ) <nl> { <nl> if ( thisParam [ 0 ] = = " rate " ) <nl> { <nl> StringUtils : : Trim ( thisParam [ 1 ] ) ; <nl> - m_SampleRate = atoi ( thisParam [ 1 ] ) ; <nl> + m_SampleRate = atoi ( thisParam [ 1 ] . c_str ( ) ) ; <nl> } <nl> else if ( thisParam [ 0 ] = = " channels " ) <nl> { <nl> StringUtils : : Trim ( thisParam [ 1 ] ) ; <nl> - m_Channels = atoi ( thisParam [ 1 ] ) ; <nl> + m_Channels = atoi ( thisParam [ 1 ] . c_str ( ) ) ; <nl> } <nl> } <nl> } <nl> mmm a / xbmc / filesystem / DAVCommon . cpp <nl> ppp b / xbmc / filesystem / DAVCommon . cpp <nl> using namespace XFILE ; <nl> * / <nl> bool CDAVCommon : : ValueWithoutNamespace ( const TiXmlNode * pNode , const std : : string & value ) <nl> { <nl> - CStdStringArray tag ; <nl> const TiXmlElement * pElement ; <nl> <nl> if ( ! pNode ) <nl> bool CDAVCommon : : ValueWithoutNamespace ( const TiXmlNode * pNode , const std : : string <nl> return false ; <nl> } <nl> <nl> - StringUtils : : SplitString ( pElement - > Value ( ) , " : " , tag , 2 ) ; <nl> + std : : vector < std : : string > tag = StringUtils : : Split ( pElement - > ValueStr ( ) , " : " , 2 ) ; <nl> <nl> if ( tag . size ( ) = = 1 & & tag [ 0 ] = = value ) <nl> { <nl> mmm a / xbmc / filesystem / MultiPathDirectory . cpp <nl> ppp b / xbmc / filesystem / MultiPathDirectory . cpp <nl> bool CMultiPathDirectory : : GetPaths ( const CStdString & strPath , vector < CStdString > <nl> URIUtils : : RemoveSlashAtEnd ( strPath1 ) ; <nl> <nl> / / split on " / " <nl> - vector < CStdString > vecTemp ; <nl> - StringUtils : : SplitString ( strPath1 , " / " , vecTemp ) ; <nl> - if ( vecTemp . size ( ) = = 0 ) <nl> + vector < string > vecTemp = StringUtils : : Split ( strPath1 , " / " ) ; <nl> + if ( vecTemp . empty ( ) ) <nl> return false ; <nl> <nl> / / check each item <nl> bool CMultiPathDirectory : : HasPath ( const CStdString & strPath , const CStdString & s <nl> URIUtils : : RemoveSlashAtEnd ( strPath1 ) ; <nl> <nl> / / split on " / " <nl> - vector < CStdString > vecTemp ; <nl> - StringUtils : : SplitString ( strPath1 , " / " , vecTemp ) ; <nl> - if ( vecTemp . size ( ) = = 0 ) <nl> + vector < string > vecTemp = StringUtils : : Split ( strPath1 , " / " ) ; <nl> + if ( vecTemp . empty ( ) ) <nl> return false ; <nl> <nl> / / check each item <nl> mmm a / xbmc / guilib / GUIControlFactory . cpp <nl> ppp b / xbmc / guilib / GUIControlFactory . cpp <nl> bool CGUIControlFactory : : GetTexture ( const TiXmlNode * pRootNode , const char * strT <nl> void CGUIControlFactory : : GetRectFromString ( const CStdString & string , CRect & rect ) <nl> { <nl> / / format is rect = " left [ , top , right , bottom ] " <nl> - CStdStringArray strRect ; <nl> - StringUtils : : SplitString ( string , " , " , strRect ) ; <nl> + std : : vector < std : : string > strRect = StringUtils : : Split ( string , " , " ) ; <nl> if ( strRect . size ( ) = = 1 ) <nl> { <nl> rect . x1 = ( float ) atof ( strRect [ 0 ] . c_str ( ) ) ; <nl> mmm a / xbmc / guilib / GUIRSSControl . cpp <nl> ppp b / xbmc / guilib / GUIRSSControl . cpp <nl> void CGUIRSSControl : : Process ( unsigned int currentTime , CDirtyRegionList & dirtyre <nl> { <nl> if ( m_strRSSTags ! = " " ) <nl> { <nl> - CStdStringArray vecSplitTags ; <nl> - <nl> - StringUtils : : SplitString ( m_strRSSTags , " , " , vecSplitTags ) ; <nl> - <nl> - for ( unsigned int i = 0 ; i < vecSplitTags . size ( ) ; i + + ) <nl> - m_pReader - > AddTag ( vecSplitTags [ i ] ) ; <nl> + vector < string > tags = StringUtils : : Split ( m_strRSSTags , " , " ) ; <nl> + for ( vector < string > : : const_iterator i = tags . begin ( ) ; i ! = tags . end ( ) ; + + i ) <nl> + m_pReader - > AddTag ( * i ) ; <nl> } <nl> / / use half the width of the control as spacing between feeds , and double this between feed sets <nl> float spaceWidth = ( m_label . font ) ? m_label . font - > GetCharWidth ( L ' ' ) : 15 ; <nl> mmm a / xbmc / guilib / VisibleEffect . cpp <nl> ppp b / xbmc / guilib / VisibleEffect . cpp <nl> CSlideEffect : : CSlideEffect ( const TiXmlElement * node ) : CAnimEffect ( node , EFFECT_ <nl> const char * startPos = node - > Attribute ( " start " ) ; <nl> if ( startPos ) <nl> { <nl> - vector < CStdString > commaSeparated ; <nl> - StringUtils : : SplitString ( startPos , " , " , commaSeparated ) ; <nl> + vector < string > commaSeparated = StringUtils : : Split ( startPos , " , " ) ; <nl> if ( commaSeparated . size ( ) > 1 ) <nl> m_startY = ( float ) atof ( commaSeparated [ 1 ] . c_str ( ) ) ; <nl> m_startX = ( float ) atof ( commaSeparated [ 0 ] . c_str ( ) ) ; <nl> CSlideEffect : : CSlideEffect ( const TiXmlElement * node ) : CAnimEffect ( node , EFFECT_ <nl> const char * endPos = node - > Attribute ( " end " ) ; <nl> if ( endPos ) <nl> { <nl> - vector < CStdString > commaSeparated ; <nl> - StringUtils : : SplitString ( endPos , " , " , commaSeparated ) ; <nl> + vector < string > commaSeparated = StringUtils : : Split ( endPos , " , " ) ; <nl> if ( commaSeparated . size ( ) > 1 ) <nl> m_endY = ( float ) atof ( commaSeparated [ 1 ] . c_str ( ) ) ; <nl> m_endX = ( float ) atof ( commaSeparated [ 0 ] . c_str ( ) ) ; <nl> CRotateEffect : : CRotateEffect ( const TiXmlElement * node , EFFECT_TYPE effect ) : CAn <nl> m_autoCenter = true ; <nl> else <nl> { <nl> - vector < CStdString > commaSeparated ; <nl> - StringUtils : : SplitString ( centerPos , " , " , commaSeparated ) ; <nl> + vector < string > commaSeparated = StringUtils : : Split ( centerPos , " , " ) ; <nl> if ( commaSeparated . size ( ) > 1 ) <nl> m_center . y = ( float ) atof ( commaSeparated [ 1 ] . c_str ( ) ) ; <nl> m_center . x = ( float ) atof ( commaSeparated [ 0 ] . c_str ( ) ) ; <nl> CZoomEffect : : CZoomEffect ( const TiXmlElement * node , const CRect & rect ) : CAnimEff <nl> const char * start = node - > Attribute ( " start " ) ; <nl> if ( start ) <nl> { <nl> - CStdStringArray params ; <nl> - StringUtils : : SplitString ( start , " , " , params ) ; <nl> + vector < string > params = StringUtils : : Split ( start , " , " ) ; <nl> if ( params . size ( ) = = 1 ) <nl> { <nl> m_startX = ( float ) atof ( params [ 0 ] . c_str ( ) ) ; <nl> CZoomEffect : : CZoomEffect ( const TiXmlElement * node , const CRect & rect ) : CAnimEff <nl> const char * end = node - > Attribute ( " end " ) ; <nl> if ( end ) <nl> { <nl> - CStdStringArray params ; <nl> - StringUtils : : SplitString ( end , " , " , params ) ; <nl> + vector < string > params = StringUtils : : Split ( end , " , " ) ; <nl> if ( params . size ( ) = = 1 ) <nl> { <nl> m_endX = ( float ) atof ( params [ 0 ] . c_str ( ) ) ; <nl> CZoomEffect : : CZoomEffect ( const TiXmlElement * node , const CRect & rect ) : CAnimEff <nl> m_autoCenter = true ; <nl> else <nl> { <nl> - vector < CStdString > commaSeparated ; <nl> - StringUtils : : SplitString ( centerPos , " , " , commaSeparated ) ; <nl> + vector < string > commaSeparated = StringUtils : : Split ( centerPos , " , " ) ; <nl> if ( commaSeparated . size ( ) > 1 ) <nl> m_center . y = ( float ) atof ( commaSeparated [ 1 ] . c_str ( ) ) ; <nl> m_center . x = ( float ) atof ( commaSeparated [ 0 ] . c_str ( ) ) ; <nl> mmm a / xbmc / input / ButtonTranslator . cpp <nl> ppp b / xbmc / input / ButtonTranslator . cpp <nl> uint32_t CButtonTranslator : : TranslateKeyboardButton ( TiXmlElement * pButton ) <nl> { <nl> StringUtils : : ToLower ( strMod ) ; <nl> <nl> - CStdStringArray modArray ; <nl> - StringUtils : : SplitString ( strMod , " , " , modArray ) ; <nl> - for ( unsigned int i = 0 ; i < modArray . size ( ) ; i + + ) <nl> + vector < string > modArray = StringUtils : : Split ( strMod , " , " ) ; <nl> + for ( vector < string > : : const_iterator i = modArray . begin ( ) ; i ! = modArray . end ( ) ; + + i ) <nl> { <nl> - CStdString & substr = modArray [ i ] ; <nl> + string substr = * i ; <nl> StringUtils : : Trim ( substr ) ; <nl> <nl> if ( substr = = " ctrl " | | substr = = " control " ) <nl> mmm a / xbmc / interfaces / legacy / Control . cpp <nl> ppp b / xbmc / interfaces / legacy / Control . cpp <nl> namespace XBMCAddon <nl> const String & cAttr = pTuple . second ( ) ; <nl> <nl> TiXmlElement pNode ( " animation " ) ; <nl> - CStdStringArray attrs ; <nl> - StringUtils : : SplitString ( cAttr . c_str ( ) , " " , attrs ) ; <nl> - for ( unsigned int i = 0 ; i < attrs . size ( ) ; i + + ) <nl> + std : : vector < std : : string > attrs = StringUtils : : Split ( cAttr , " " ) ; <nl> + for ( std : : vector < std : : string > : : const_iterator i = attrs . begin ( ) ; i ! = attrs . end ( ) ; + + i ) <nl> { <nl> - CStdStringArray attrs2 ; <nl> - StringUtils : : SplitString ( attrs [ i ] , " = " , attrs2 ) ; <nl> + std : : vector < std : : string > attrs2 = StringUtils : : Split ( * i , " = " ) ; <nl> if ( attrs2 . size ( ) = = 2 ) <nl> pNode . SetAttribute ( attrs2 [ 0 ] , attrs2 [ 1 ] ) ; <nl> } <nl> mmm a / xbmc / network / AirPlayServer . cpp <nl> ppp b / xbmc / network / AirPlayServer . cpp <nl> <nl> # endif / / HAS_ZEROCONF <nl> <nl> using namespace ANNOUNCEMENT ; <nl> + using namespace std ; <nl> <nl> # ifdef TARGET_WINDOWS <nl> # define close closesocket <nl> CStdString calcResponse ( const CStdString & username , <nl> / / from a string field1 = " value1 " , field2 = " value2 " it parses the value to a field <nl> CStdString getFieldFromString ( const CStdString & str , const char * field ) <nl> { <nl> - CStdString tmpStr ; <nl> - CStdStringArray tmpAr1 ; <nl> - CStdStringArray tmpAr2 ; <nl> - <nl> - StringUtils : : SplitString ( str , " , " , tmpAr1 ) ; <nl> - <nl> - for ( unsigned int i = 0 ; i < tmpAr1 . size ( ) ; i + + ) <nl> + vector < string > tmpAr1 = StringUtils : : Split ( str , " , " ) ; <nl> + for ( vector < string > : : const_iterator i = tmpAr1 . begin ( ) ; i ! = tmpAr1 . end ( ) ; + + i ) <nl> { <nl> - if ( tmpAr1 [ i ] . find ( field ) ! = std : : string : : npos ) <nl> + if ( i - > find ( field ) ! = std : : string : : npos ) <nl> { <nl> - if ( StringUtils : : SplitString ( tmpAr1 [ i ] , " = " , tmpAr2 ) = = 2 ) <nl> + vector < string > tmpAr2 = StringUtils : : Split ( * i , " = " ) ; <nl> + if ( tmpAr2 . size ( ) = = 2 ) <nl> { <nl> StringUtils : : Replace ( tmpAr2 [ 1 ] , " \ " " , " " ) ; / / remove quotes <nl> return tmpAr2 [ 1 ] ; <nl> mmm a / xbmc / network / cddb . cpp <nl> ppp b / xbmc / network / cddb . cpp <nl> void Xcddb : : addTitle ( const char * buffer ) <nl> } <nl> <nl> / / track artist " / " track title <nl> - CStdString strValue = value ; <nl> - CStdStringArray values ; <nl> - StringUtils : : SplitString ( value , " / " , values ) ; <nl> + vector < string > values = StringUtils : : Split ( value , " / " ) ; <nl> if ( values . size ( ) > 1 ) <nl> { <nl> g_charsetConverter . unknownToUTF8 ( values [ 0 ] ) ; <nl> void Xcddb : : addTitle ( const char * buffer ) <nl> g_charsetConverter . unknownToUTF8 ( values [ 1 ] ) ; <nl> m_mapTitles [ trk_nr ] + = values [ 1 ] ; <nl> } <nl> - else <nl> + else if ( ! values . empty ( ) ) <nl> { <nl> g_charsetConverter . unknownToUTF8 ( values [ 0 ] ) ; <nl> m_mapTitles [ trk_nr ] + = values [ 0 ] ; <nl> mmm a / xbmc / network / upnp / UPnPServer . cpp <nl> ppp b / xbmc / network / upnp / UPnPServer . cpp <nl> CUPnPServer : : SortItems ( CFileItemList & items , const char * sort_criteria ) <nl> } <nl> <nl> bool sorted = false ; <nl> - CStdStringArray tokens = StringUtils : : SplitString ( criteria , " , " ) ; <nl> - for ( vector < CStdString > : : reverse_iterator itr = tokens . rbegin ( ) ; itr ! = tokens . rend ( ) ; itr + + ) { <nl> + vector < string > tokens = StringUtils : : Split ( criteria , " , " ) ; <nl> + for ( vector < string > : : reverse_iterator itr = tokens . rbegin ( ) ; itr ! = tokens . rend ( ) ; itr + + ) { <nl> SortDescription sorting ; <nl> / * Platinum guarantees 1st char is - or + * / <nl> sorting . sortOrder = StringUtils : : StartsWith ( * itr , " + " ) ? SortOrderAscending : SortOrderDescending ; <nl> mmm a / xbmc / network / websocket / WebSocketV13 . cpp <nl> ppp b / xbmc / network / websocket / WebSocketV13 . cpp <nl> bool CWebSocketV13 : : Handshake ( const char * data , size_t length , std : : string & resp <nl> value = header . getValue ( WS_HEADER_PROTOCOL_LC ) ; <nl> if ( value & & strlen ( value ) > 0 ) <nl> { <nl> - CStdStringArray protocols ; <nl> - StringUtils : : SplitString ( value , " , " , protocols ) ; <nl> - for ( CStdStringArray : : iterator protocol = protocols . begin ( ) ; protocol ! = protocols . end ( ) ; + + protocol ) <nl> + vector < string > protocols = StringUtils : : Split ( value , " , " ) ; <nl> + for ( vector < string > : : iterator protocol = protocols . begin ( ) ; protocol ! = protocols . end ( ) ; + + protocol ) <nl> { <nl> StringUtils : : Trim ( * protocol ) ; <nl> if ( * protocol = = WS_PROTOCOL_JSONRPC ) <nl> mmm a / xbmc / network / websocket / WebSocketV8 . cpp <nl> ppp b / xbmc / network / websocket / WebSocketV8 . cpp <nl> bool CWebSocketV8 : : Handshake ( const char * data , size_t length , std : : string & respo <nl> value = header . getValue ( WS_HEADER_PROTOCOL_LC ) ; <nl> if ( value & & strlen ( value ) > 0 ) <nl> { <nl> - CStdStringArray protocols ; <nl> - StringUtils : : SplitString ( value , " , " , protocols ) ; <nl> - for ( CStdStringArray : : iterator protocol = protocols . begin ( ) ; protocol ! = protocols . end ( ) ; + + protocol ) <nl> + vector < string > protocols = StringUtils : : Split ( value , " , " ) ; <nl> + for ( vector < string > : : iterator protocol = protocols . begin ( ) ; protocol ! = protocols . end ( ) ; + + protocol ) <nl> { <nl> StringUtils : : Trim ( * protocol ) ; <nl> if ( * protocol = = WS_PROTOCOL_JSONRPC ) <nl> mmm a / xbmc / peripherals / Peripherals . cpp <nl> ppp b / xbmc / peripherals / Peripherals . cpp <nl> bool CPeripherals : : LoadMappings ( void ) <nl> <nl> for ( TiXmlElement * currentNode = pRootElement - > FirstChildElement ( " peripheral " ) ; currentNode ; currentNode = currentNode - > NextSiblingElement ( " peripheral " ) ) <nl> { <nl> - CStdStringArray vpArray , idArray ; <nl> PeripheralID id ; <nl> PeripheralDeviceMapping mapping ; <nl> <nl> bool CPeripherals : : LoadMappings ( void ) <nl> if ( currentNode - > Attribute ( " vendor_product " ) ) <nl> { <nl> / / The vendor_product attribute is a list of comma separated vendor : product pairs <nl> - StringUtils : : SplitString ( currentNode - > Attribute ( " vendor_product " ) , " , " , vpArray ) ; <nl> - for ( unsigned int i = 0 ; i < vpArray . size ( ) ; i + + ) <nl> + vector < string > vpArray = StringUtils : : Split ( currentNode - > Attribute ( " vendor_product " ) , " , " ) ; <nl> + for ( vector < string > : : const_iterator i = vpArray . begin ( ) ; i ! = vpArray . end ( ) ; + + i ) <nl> { <nl> - StringUtils : : SplitString ( vpArray [ i ] , " : " , idArray ) ; <nl> + vector < string > idArray = StringUtils : : Split ( * i , " : " ) ; <nl> if ( idArray . size ( ) ! = 2 ) <nl> { <nl> CLog : : Log ( LOGERROR , " % s - ignoring node \ " % s \ " with invalid vendor_product attribute " , __FUNCTION__ , mapping . m_strDeviceName . c_str ( ) ) ; <nl> continue ; <nl> } <nl> <nl> - id . m_iVendorId = PeripheralTypeTranslator : : HexStringToInt ( idArray [ 0 ] ) ; <nl> - id . m_iProductId = PeripheralTypeTranslator : : HexStringToInt ( idArray [ 1 ] ) ; <nl> + id . m_iVendorId = PeripheralTypeTranslator : : HexStringToInt ( idArray [ 0 ] . c_str ( ) ) ; <nl> + id . m_iProductId = PeripheralTypeTranslator : : HexStringToInt ( idArray [ 1 ] . c_str ( ) ) ; <nl> mapping . m_PeripheralID . push_back ( id ) ; <nl> } <nl> } <nl> mmm a / xbmc / playlists / PlayListM3U . cpp <nl> ppp b / xbmc / playlists / PlayListM3U . cpp <nl> std : : map < CStdString , CStdString > CPlayListM3U : : ParseStreamLine ( const CStdStrin <nl> CStdString strParams ( streamLine . substr ( strlen ( M3U_STREAM_MARKER ) + 1 ) ) ; <nl> <nl> / / separate the parameters <nl> - CStdStringArray vecParams = StringUtils : : SplitString ( strParams , " , " ) ; <nl> - for ( size_t i = 0 ; i < vecParams . size ( ) ; i + + ) <nl> + std : : vector < std : : string > vecParams = StringUtils : : Split ( strParams , " , " ) ; <nl> + for ( std : : vector < std : : string > : : iterator i = vecParams . begin ( ) ; i ! = vecParams . end ( ) ; + + i ) <nl> { <nl> / / split the param , ensure there was an = <nl> - StringUtils : : Trim ( vecParams [ i ] ) ; <nl> - CStdStringArray vecTuple = StringUtils : : SplitString ( vecParams [ i ] , " = " ) ; <nl> + StringUtils : : Trim ( * i ) ; <nl> + std : : vector < std : : string > vecTuple = StringUtils : : Split ( * i , " = " ) ; <nl> if ( vecTuple . size ( ) < 2 ) <nl> continue ; <nl> <nl> mmm a / xbmc / settings / AdvancedSettings . cpp <nl> ppp b / xbmc / settings / AdvancedSettings . cpp <nl> void CAdvancedSettings : : GetCustomExtensions ( TiXmlElement * pRootElement , CStdStri <nl> extensions + = " | " + extraExtensions ; <nl> if ( XMLUtils : : GetString ( pRootElement , " remove " , extraExtensions ) & & ! extraExtensions . empty ( ) ) <nl> { <nl> - CStdStringArray exts ; <nl> - StringUtils : : SplitString ( extraExtensions , " | " , exts ) ; <nl> - for ( unsigned int i = 0 ; i < exts . size ( ) ; + + i ) <nl> + vector < string > exts = StringUtils : : Split ( extraExtensions , " | " ) ; <nl> + for ( vector < string > : : const_iterator i = exts . begin ( ) ; i ! = exts . end ( ) ; + + i ) <nl> { <nl> - size_t iPos = extensions . find ( exts [ i ] ) ; <nl> - if ( iPos = = std : : string : : npos ) <nl> - continue ; <nl> - extensions . erase ( iPos , exts [ i ] . size ( ) + 1 ) ; <nl> + size_t iPos = extensions . find ( * i ) ; <nl> + if ( iPos ! = std : : string : : npos ) <nl> + extensions . erase ( iPos , i - > size ( ) + 1 ) ; <nl> } <nl> } <nl> } <nl> mmm a / xbmc / utils / Fanart . cpp <nl> ppp b / xbmc / utils / Fanart . cpp <nl> bool CFanart : : ParseColors ( const CStdString & colorsIn , CStdString & colorsOut ) <nl> if ( colorsIn [ 0 ] = = ' | ' ) <nl> { / / need conversion <nl> colorsOut . clear ( ) ; <nl> - CStdStringArray strColors ; <nl> - StringUtils : : SplitString ( colorsIn , " | " , strColors ) ; <nl> + std : : vector < std : : string > strColors = StringUtils : : Split ( colorsIn , " | " ) ; <nl> for ( int i = 0 ; i < std : : min ( ( int ) strColors . size ( ) - 1 , ( int ) max_fanart_colors ) ; i + + ) <nl> { / / split up each color <nl> - CStdStringArray strTriplets ; <nl> - StringUtils : : SplitString ( strColors [ i + 1 ] , " , " , strTriplets ) ; <nl> + std : : vector < std : : string > strTriplets = StringUtils : : Split ( strColors [ i + 1 ] , " , " ) ; <nl> if ( strTriplets . size ( ) = = 3 ) <nl> { / / convert <nl> if ( colorsOut . size ( ) ) <nl> mmm a / xbmc / utils / StringUtils . cpp <nl> ppp b / xbmc / utils / StringUtils . cpp <nl> int64_t StringUtils : : AlphaNumericCompare ( const wchar_t * left , const wchar_t * rig <nl> <nl> int StringUtils : : DateStringToYYYYMMDD ( const CStdString & dateString ) <nl> { <nl> - CStdStringArray days ; <nl> - int splitCount = StringUtils : : SplitString ( dateString , " - " , days ) ; <nl> - if ( splitCount = = 1 ) <nl> + vector < string > days = StringUtils : : Split ( dateString , " - " ) ; <nl> + if ( days . size ( ) = = 1 ) <nl> return atoi ( days [ 0 ] . c_str ( ) ) ; <nl> - else if ( splitCount = = 2 ) <nl> + else if ( days . size ( ) = = 2 ) <nl> return atoi ( days [ 0 ] . c_str ( ) ) * 100 + atoi ( days [ 1 ] . c_str ( ) ) ; <nl> - else if ( splitCount = = 3 ) <nl> + else if ( days . size ( ) = = 3 ) <nl> return atoi ( days [ 0 ] . c_str ( ) ) * 10000 + atoi ( days [ 1 ] . c_str ( ) ) * 100 + atoi ( days [ 2 ] . c_str ( ) ) ; <nl> else <nl> return - 1 ; <nl> long StringUtils : : TimeStringToSeconds ( const CStdString & timeString ) <nl> } <nl> else <nl> { <nl> - CStdStringArray secs ; <nl> - StringUtils : : SplitString ( strCopy , " : " , secs ) ; <nl> + vector < string > secs = StringUtils : : Split ( strCopy , " : " ) ; <nl> int timeInSecs = 0 ; <nl> for ( unsigned int i = 0 ; i < 3 & & i < secs . size ( ) ; i + + ) <nl> { <nl> timeInSecs * = 60 ; <nl> - timeInSecs + = atoi ( secs [ i ] ) ; <nl> + timeInSecs + = atoi ( secs [ i ] . c_str ( ) ) ; <nl> } <nl> return timeInSecs ; <nl> } <nl> mmm a / xbmc / utils / XMLUtils . cpp <nl> ppp b / xbmc / utils / XMLUtils . cpp <nl> bool XMLUtils : : GetDateTime ( const TiXmlNode * pRootNode , const char * strTag , CDate <nl> <nl> void XMLUtils : : SetAdditiveString ( TiXmlNode * pRootNode , const char * strTag , const CStdString & strSeparator , const CStdString & strValue ) <nl> { <nl> - CStdStringArray list ; <nl> - StringUtils : : SplitString ( strValue , strSeparator , list ) ; <nl> - for ( unsigned int i = 0 ; i < list . size ( ) & & ! list [ i ] . empty ( ) ; + + i ) <nl> - SetString ( pRootNode , strTag , list [ i ] ) ; <nl> + std : : vector < std : : string > list = StringUtils : : Split ( strValue , strSeparator ) ; <nl> + for ( std : : vector < std : : string > : : const_iterator i = list . begin ( ) ; i ! = list . end ( ) ; + + i ) <nl> + SetString ( pRootNode , strTag , * i ) ; <nl> } <nl> <nl> void XMLUtils : : SetStringArray ( TiXmlNode * pRootNode , const char * strTag , const std : : vector < std : : string > & arrayValue ) <nl> mmm a / xbmc / video / VideoDatabase . cpp <nl> ppp b / xbmc / video / VideoDatabase . cpp <nl> bool CVideoDatabase : : GetStackTimes ( const CStdString & filePath , vector < int > & time <nl> m_pDS - > query ( strSQL . c_str ( ) ) ; <nl> if ( m_pDS - > num_rows ( ) > 0 ) <nl> { / / get the video settings info <nl> - CStdStringArray timeString ; <nl> int timeTotal = 0 ; <nl> - StringUtils : : SplitString ( m_pDS - > fv ( " times " ) . get_asString ( ) , " , " , timeString ) ; <nl> + vector < string > timeString = StringUtils : : Split ( m_pDS - > fv ( " times " ) . get_asString ( ) , " , " ) ; <nl> times . clear ( ) ; <nl> - for ( unsigned int i = 0 ; i < timeString . size ( ) ; i + + ) <nl> + for ( vector < string > : : const_iterator i = timeString . begin ( ) ; i ! = timeString . end ( ) ; + + i ) <nl> { <nl> - times . push_back ( atoi ( timeString [ i ] . c_str ( ) ) ) ; <nl> - timeTotal + = atoi ( timeString [ i ] . c_str ( ) ) ; <nl> + times . push_back ( atoi ( i - > c_str ( ) ) ) ; <nl> + timeTotal + = atoi ( i - > c_str ( ) ) ; <nl> } <nl> m_pDS - > close ( ) ; <nl> return ( timeTotal > 0 ) ; <nl> mmm a / xbmc / windowing / egl / EGLNativeTypeAmlogic . cpp <nl> ppp b / xbmc / windowing / egl / EGLNativeTypeAmlogic . cpp <nl> bool CEGLNativeTypeAmlogic : : ProbeResolutions ( std : : vector < RESOLUTION_INFO > & resol <nl> { <nl> char valstr [ 256 ] = { 0 } ; <nl> aml_get_sysfs_str ( " / sys / class / amhdmitx / amhdmitx0 / disp_cap " , valstr , 255 ) ; <nl> - std : : vector < CStdString > probe_str ; <nl> - StringUtils : : SplitString ( valstr , " \ n " , probe_str ) ; <nl> + std : : vector < std : : string > probe_str = StringUtils : : Split ( valstr , " \ n " ) ; <nl> <nl> resolutions . clear ( ) ; <nl> RESOLUTION_INFO res ; <nl> - for ( size_t i = 0 ; i < probe_str . size ( ) ; i + + ) <nl> + for ( std : : vector < std : : string > : : const_iterator i = probe_str . begin ( ) ; i ! = probe_str . end ( ) ; + + i ) <nl> { <nl> - if ( ModeToResolution ( probe_str [ i ] . c_str ( ) , & res ) ) <nl> + if ( ModeToResolution ( i - > c_str ( ) , & res ) ) <nl> resolutions . push_back ( res ) ; <nl> } <nl> return resolutions . size ( ) > 0 ; <nl> mmm a / xbmc / windows / GUIMediaWindow . cpp <nl> ppp b / xbmc / windows / GUIMediaWindow . cpp <nl> void CGUIMediaWindow : : LoadAdditionalTags ( TiXmlElement * root ) <nl> if ( element & & element - > FirstChild ( ) ) <nl> { / / format is < views > 50 , 29 , 51 , 95 < / views > <nl> CStdString allViews = element - > FirstChild ( ) - > Value ( ) ; <nl> - CStdStringArray views ; <nl> - StringUtils : : SplitString ( allViews , " , " , views ) ; <nl> - for ( unsigned int i = 0 ; i < views . size ( ) ; i + + ) <nl> + vector < string > views = StringUtils : : Split ( allViews , " , " ) ; <nl> + for ( vector < string > : : const_iterator i = views . begin ( ) ; i ! = views . end ( ) ; + + i ) <nl> { <nl> - int controlID = atol ( views [ i ] . c_str ( ) ) ; <nl> + int controlID = atol ( i - > c_str ( ) ) ; <nl> CGUIControl * control = GetControl ( controlID ) ; <nl> if ( control & & control - > IsContainer ( ) ) <nl> m_viewControl . AddView ( control ) ; <nl>
[ stdstring ] switch StringUtils : : SplitString to StringUtils : : Split
xbmc/xbmc
74f3f02e798b45bd2e43f66d5fb2462703f41489
2014-06-12T05:21:05Z
new file mode 100644 <nl> index 00000000000 . . 4bf9f341af5 <nl> mmm / dev / null <nl> ppp b / samples / cpp / delaunay2 . cpp <nl> <nl> + # include < opencv2 / opencv . hpp > <nl> + # include < iostream > <nl> + <nl> + namespace cv <nl> + { <nl> + <nl> + class CV_EXPORTS_W Subdiv2D <nl> + { <nl> + public : <nl> + <nl> + enum <nl> + { <nl> + PTLOC_ERROR = - 2 , <nl> + PTLOC_OUTSIDE_RECT = - 1 , <nl> + PTLOC_INSIDE = 0 , <nl> + PTLOC_VERTEX = 1 , <nl> + PTLOC_ON_EDGE = 2 <nl> + } ; <nl> + <nl> + enum <nl> + { <nl> + NEXT_AROUND_ORG = 0x00 , <nl> + NEXT_AROUND_DST = 0x22 , <nl> + PREV_AROUND_ORG = 0x11 , <nl> + PREV_AROUND_DST = 0x33 , <nl> + NEXT_AROUND_LEFT = 0x13 , <nl> + NEXT_AROUND_RIGHT = 0x31 , <nl> + PREV_AROUND_LEFT = 0x20 , <nl> + PREV_AROUND_RIGHT = 0x02 <nl> + } ; <nl> + <nl> + CV_WRAP Subdiv2D ( ) ; <nl> + CV_WRAP Subdiv2D ( Rect rect ) ; <nl> + CV_WRAP void initDelaunay ( Rect rect ) ; <nl> + <nl> + CV_WRAP int insert ( Point2f pt ) ; <nl> + CV_WRAP void insert ( const vector < Point2f > & ptvec ) ; <nl> + CV_WRAP int locate ( Point2f pt , CV_OUT int & edge , CV_OUT int & vertex ) ; <nl> + <nl> + CV_WRAP int findNearest ( Point2f pt , CV_OUT Point2f * nearestPt = 0 ) ; <nl> + CV_WRAP void getTriangleList ( CV_OUT vector < Vec6f > & triangleList ) ; <nl> + CV_WRAP void getVoronoiFacetList ( const vector < int > & idx , CV_OUT vector < vector < Point2f > > & facetList ) ; <nl> + <nl> + CV_WRAP Point2f getVertex ( int vertex , CV_OUT int * firstEdge = 0 ) const ; <nl> + <nl> + CV_WRAP int getEdge ( int edge , int nextEdgeType ) const ; <nl> + CV_WRAP int nextEdge ( int edge ) const ; <nl> + CV_WRAP int rotateEdge ( int edge , int rotate ) const ; <nl> + CV_WRAP int symEdge ( int edge ) const ; <nl> + CV_WRAP int edgeOrg ( int edge , CV_OUT Point2f * orgpt = 0 ) const ; <nl> + CV_WRAP int edgeDst ( int edge , CV_OUT Point2f * dstpt = 0 ) const ; <nl> + <nl> + protected : <nl> + int newEdge ( ) ; <nl> + void deleteEdge ( int edge ) ; <nl> + int newPoint ( Point2f pt , bool isvirtual , int firstEdge = 0 ) ; <nl> + void deletePoint ( int vtx ) ; <nl> + void setEdgePoints ( int edge , int orgPt , int dstPt ) ; <nl> + void splice ( int edgeA , int edgeB ) ; <nl> + int connectEdges ( int edgeA , int edgeB ) ; <nl> + void swapEdges ( int edge ) ; <nl> + int isRightOf ( Point2f pt , int edge ) const ; <nl> + void calcVoronoi ( ) ; <nl> + void clearVoronoi ( ) ; <nl> + <nl> + struct CV_EXPORTS Vertex <nl> + { <nl> + Vertex ( ) ; <nl> + Vertex ( Point2f pt , bool _isvirtual , int _firstEdge = 0 ) ; <nl> + bool isvirtual ( ) const ; <nl> + bool isfree ( ) const ; <nl> + int firstEdge ; <nl> + int type ; <nl> + Point2f pt ; <nl> + } ; <nl> + struct CV_EXPORTS QuadEdge <nl> + { <nl> + QuadEdge ( ) ; <nl> + QuadEdge ( int edgeidx ) ; <nl> + bool isfree ( ) const ; <nl> + int next [ 4 ] ; <nl> + int pt [ 4 ] ; <nl> + } ; <nl> + <nl> + vector < Vertex > vtx ; <nl> + vector < QuadEdge > qedges ; <nl> + int freeQEdge ; <nl> + int freePoint ; <nl> + bool validGeometry ; <nl> + <nl> + int recentEdge ; <nl> + Point2f topLeft ; <nl> + Point2f bottomRight ; <nl> + } ; <nl> + <nl> + <nl> + int Subdiv2D : : nextEdge ( int edge ) const <nl> + { <nl> + CV_DbgAssert ( ( size_t ) ( edge > > 2 ) < qedges . size ( ) ) ; <nl> + return qedges [ edge > > 2 ] . next [ edge & 3 ] ; <nl> + } <nl> + <nl> + int Subdiv2D : : rotateEdge ( int edge , int rotate ) const <nl> + { <nl> + CV_DbgAssert ( ( size_t ) ( edge > > 2 ) < qedges . size ( ) ) ; <nl> + return ( edge & ~ 3 ) + ( ( edge + rotate ) & 3 ) ; <nl> + } <nl> + <nl> + int Subdiv2D : : symEdge ( int edge ) const <nl> + { <nl> + CV_DbgAssert ( ( size_t ) ( edge > > 2 ) < qedges . size ( ) ) ; <nl> + return edge ^ 2 ; <nl> + } <nl> + <nl> + int Subdiv2D : : getEdge ( int edge , int nextEdgeType ) const <nl> + { <nl> + CV_DbgAssert ( ( size_t ) ( edge > > 2 ) < qedges . size ( ) ) ; <nl> + edge = qedges [ edge > > 2 ] . next [ ( edge + nextEdgeType ) & 3 ] ; <nl> + return ( edge & ~ 3 ) + ( ( edge + ( nextEdgeType > > 4 ) ) & 3 ) ; <nl> + } <nl> + <nl> + int Subdiv2D : : edgeOrg ( int edge , CV_OUT Point2f * orgpt ) const <nl> + { <nl> + CV_DbgAssert ( ( size_t ) ( edge > > 2 ) < qedges . size ( ) ) ; <nl> + int vidx = qedges [ edge > > 2 ] . pt [ edge & 3 ] ; <nl> + if ( orgpt ) <nl> + { <nl> + CV_DbgAssert ( ( size_t ) vidx < vtx . size ( ) ) ; <nl> + * orgpt = vtx [ vidx ] . pt ; <nl> + } <nl> + return vidx ; <nl> + } <nl> + <nl> + int Subdiv2D : : edgeDst ( int edge , CV_OUT Point2f * dstpt ) const <nl> + { <nl> + CV_DbgAssert ( ( size_t ) ( edge > > 2 ) < qedges . size ( ) ) ; <nl> + int vidx = qedges [ edge > > 2 ] . pt [ ( edge + 2 ) & 3 ] ; <nl> + if ( dstpt ) <nl> + { <nl> + CV_DbgAssert ( ( size_t ) vidx < vtx . size ( ) ) ; <nl> + * dstpt = vtx [ vidx ] . pt ; <nl> + } <nl> + return vidx ; <nl> + } <nl> + <nl> + <nl> + Point2f Subdiv2D : : getVertex ( int vertex , CV_OUT int * firstEdge ) const <nl> + { <nl> + CV_DbgAssert ( ( size_t ) vertex < vtx . size ( ) ) ; <nl> + if ( firstEdge ) <nl> + * firstEdge = vtx [ vertex ] . firstEdge ; <nl> + return vtx [ vertex ] . pt ; <nl> + } <nl> + <nl> + <nl> + Subdiv2D : : Subdiv2D ( ) <nl> + { <nl> + validGeometry = false ; <nl> + freeQEdge = 0 ; <nl> + freePoint = 0 ; <nl> + recentEdge = 0 ; <nl> + } <nl> + <nl> + Subdiv2D : : Subdiv2D ( Rect rect ) <nl> + { <nl> + validGeometry = false ; <nl> + freeQEdge = 0 ; <nl> + freePoint = 0 ; <nl> + recentEdge = 0 ; <nl> + <nl> + initDelaunay ( rect ) ; <nl> + } <nl> + <nl> + <nl> + Subdiv2D : : QuadEdge : : QuadEdge ( ) <nl> + { <nl> + next [ 0 ] = next [ 1 ] = next [ 2 ] = next [ 3 ] = 0 ; <nl> + pt [ 0 ] = pt [ 1 ] = pt [ 2 ] = pt [ 3 ] = 0 ; <nl> + } <nl> + <nl> + Subdiv2D : : QuadEdge : : QuadEdge ( int edgeidx ) <nl> + { <nl> + next [ 0 ] = edgeidx ; <nl> + next [ 1 ] = edgeidx + 3 ; <nl> + next [ 2 ] = edgeidx + 2 ; <nl> + next [ 3 ] = edgeidx + 1 ; <nl> + <nl> + pt [ 0 ] = pt [ 1 ] = pt [ 2 ] = pt [ 3 ] = 0 ; <nl> + } <nl> + <nl> + bool Subdiv2D : : QuadEdge : : isfree ( ) const <nl> + { <nl> + return next [ 0 ] < = 0 ; <nl> + } <nl> + <nl> + Subdiv2D : : Vertex : : Vertex ( ) <nl> + { <nl> + firstEdge = 0 ; <nl> + type = - 1 ; <nl> + } <nl> + <nl> + Subdiv2D : : Vertex : : Vertex ( Point2f _pt , bool _isvirtual , int _firstEdge ) <nl> + { <nl> + firstEdge = _firstEdge ; <nl> + type = ( int ) _isvirtual ; <nl> + pt = _pt ; <nl> + } <nl> + <nl> + bool Subdiv2D : : Vertex : : isvirtual ( ) const <nl> + { <nl> + return type > 0 ; <nl> + } <nl> + <nl> + bool Subdiv2D : : Vertex : : isfree ( ) const <nl> + { <nl> + return firstEdge < = 0 ; <nl> + } <nl> + <nl> + void Subdiv2D : : splice ( int edgeA , int edgeB ) <nl> + { <nl> + int & a_next = qedges [ edgeA > > 2 ] . next [ edgeA & 3 ] ; <nl> + int & b_next = qedges [ edgeB > > 2 ] . next [ edgeB & 3 ] ; <nl> + int a_rot = rotateEdge ( a_next , 1 ) ; <nl> + int b_rot = rotateEdge ( b_next , 1 ) ; <nl> + int & a_rot_next = qedges [ a_rot > > 2 ] . next [ a_rot & 3 ] ; <nl> + int & b_rot_next = qedges [ b_rot > > 2 ] . next [ b_rot & 3 ] ; <nl> + std : : swap ( a_next , b_next ) ; <nl> + std : : swap ( a_rot_next , b_rot_next ) ; <nl> + } <nl> + <nl> + void Subdiv2D : : setEdgePoints ( int edge , int orgPt , int dstPt ) <nl> + { <nl> + qedges [ edge > > 2 ] . pt [ edge & 3 ] = orgPt ; <nl> + qedges [ edge > > 2 ] . pt [ ( edge + 2 ) & 3 ] = dstPt ; <nl> + } <nl> + <nl> + int Subdiv2D : : connectEdges ( int edgeA , int edgeB ) <nl> + { <nl> + int edge = newEdge ( ) ; <nl> + <nl> + splice ( edge , getEdge ( edgeA , NEXT_AROUND_LEFT ) ) ; <nl> + splice ( symEdge ( edge ) , edgeB ) ; <nl> + <nl> + setEdgePoints ( edge , edgeDst ( edgeA ) , edgeOrg ( edgeB ) ) ; <nl> + return edge ; <nl> + } <nl> + <nl> + void Subdiv2D : : swapEdges ( int edge ) <nl> + { <nl> + int sedge = symEdge ( edge ) ; <nl> + int a = getEdge ( edge , PREV_AROUND_ORG ) ; <nl> + int b = getEdge ( sedge , PREV_AROUND_ORG ) ; <nl> + <nl> + splice ( edge , a ) ; <nl> + splice ( sedge , b ) ; <nl> + <nl> + setEdgePoints ( edge , edgeDst ( a ) , edgeDst ( b ) ) ; <nl> + <nl> + splice ( edge , getEdge ( a , NEXT_AROUND_LEFT ) ) ; <nl> + splice ( sedge , getEdge ( b , NEXT_AROUND_LEFT ) ) ; <nl> + } <nl> + <nl> + int Subdiv2D : : isRightOf ( Point2f pt , int edge ) const <nl> + { <nl> + Point2f org , dst ; <nl> + edgeOrg ( edge , & org ) ; <nl> + edgeDst ( edge , & dst ) ; <nl> + double cw_area = cvTriangleArea ( pt , dst , org ) ; <nl> + <nl> + return ( cw_area > 0 ) - ( cw_area < 0 ) ; <nl> + } <nl> + <nl> + <nl> + int Subdiv2D : : newEdge ( ) <nl> + { <nl> + if ( freeQEdge = = 0 ) <nl> + { <nl> + qedges . push_back ( QuadEdge ( ) ) ; <nl> + freeQEdge = ( int ) ( qedges . size ( ) - 1 ) ; <nl> + } <nl> + int edge = freeQEdge * 4 ; <nl> + freeQEdge = qedges [ edge > > 2 ] . next [ 1 ] ; <nl> + qedges [ edge > > 2 ] = QuadEdge ( edge ) ; <nl> + return edge ; <nl> + } <nl> + <nl> + void Subdiv2D : : deleteEdge ( int edge ) <nl> + { <nl> + CV_DbgAssert ( ( size_t ) ( edge > > 2 ) < ( size_t ) qedges . size ( ) ) ; <nl> + splice ( edge , getEdge ( edge , PREV_AROUND_ORG ) ) ; <nl> + int sedge = symEdge ( edge ) ; <nl> + splice ( sedge , getEdge ( sedge , PREV_AROUND_ORG ) ) ; <nl> + <nl> + edge > > = 2 ; <nl> + qedges [ edge ] . next [ 0 ] = - 1 ; <nl> + qedges [ edge ] . next [ 1 ] = freeQEdge ; <nl> + freeQEdge = edge ; <nl> + } <nl> + <nl> + int Subdiv2D : : newPoint ( Point2f pt , bool isvirtual , int firstEdge ) <nl> + { <nl> + if ( freePoint = = 0 ) <nl> + { <nl> + vtx . push_back ( Vertex ( ) ) ; <nl> + freePoint = ( int ) ( vtx . size ( ) - 1 ) ; <nl> + vtx [ freePoint ] . type = - 1 ; <nl> + vtx [ freePoint ] . firstEdge = 0 ; <nl> + } <nl> + int vidx = freePoint ; <nl> + freePoint = vtx [ vidx ] . firstEdge ; <nl> + vtx [ vidx ] = Vertex ( pt , isvirtual , firstEdge ) ; <nl> + <nl> + return vidx ; <nl> + } <nl> + <nl> + void Subdiv2D : : deletePoint ( int vidx ) <nl> + { <nl> + CV_DbgAssert ( ( size_t ) vidx < vtx . size ( ) ) ; <nl> + vtx [ vidx ] . firstEdge = freePoint ; <nl> + vtx [ vidx ] . type = - 1 ; <nl> + freePoint = vidx ; <nl> + } <nl> + <nl> + int Subdiv2D : : locate ( Point2f pt , int & _edge , int & _vertex ) <nl> + { <nl> + int vertex = 0 ; <nl> + <nl> + int i , maxEdges = ( int ) ( qedges . size ( ) * 4 ) ; <nl> + int edge = recentEdge ; <nl> + <nl> + CV_Assert ( edge > 0 ) ; <nl> + <nl> + if ( pt . x < topLeft . x | | pt . y < topLeft . y | | pt . x > = bottomRight . x | | pt . y > = bottomRight . y ) <nl> + CV_Error ( CV_StsOutOfRange , " " ) ; <nl> + <nl> + int location = PTLOC_ERROR ; <nl> + <nl> + int right_of_curr = isRightOf ( pt , edge ) ; <nl> + if ( right_of_curr > 0 ) <nl> + { <nl> + edge = symEdge ( edge ) ; <nl> + right_of_curr = - right_of_curr ; <nl> + } <nl> + <nl> + for ( i = 0 ; i < maxEdges ; i + + ) <nl> + { <nl> + int onext_edge = nextEdge ( edge ) ; <nl> + int dprev_edge = getEdge ( edge , PREV_AROUND_DST ) ; <nl> + <nl> + int right_of_onext = isRightOf ( pt , onext_edge ) ; <nl> + int right_of_dprev = isRightOf ( pt , dprev_edge ) ; <nl> + <nl> + if ( right_of_dprev > 0 ) <nl> + { <nl> + if ( right_of_onext > 0 | | ( right_of_onext = = 0 & & right_of_curr = = 0 ) ) <nl> + { <nl> + location = PTLOC_INSIDE ; <nl> + break ; <nl> + } <nl> + else <nl> + { <nl> + right_of_curr = right_of_onext ; <nl> + edge = onext_edge ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + if ( right_of_onext > 0 ) <nl> + { <nl> + if ( right_of_dprev = = 0 & & right_of_curr = = 0 ) <nl> + { <nl> + location = PTLOC_INSIDE ; <nl> + break ; <nl> + } <nl> + else <nl> + { <nl> + right_of_curr = right_of_dprev ; <nl> + edge = dprev_edge ; <nl> + } <nl> + } <nl> + else if ( right_of_curr = = 0 & & <nl> + isRightOf ( vtx [ edgeDst ( onext_edge ) ] . pt , edge ) > = 0 ) <nl> + { <nl> + edge = symEdge ( edge ) ; <nl> + } <nl> + else <nl> + { <nl> + right_of_curr = right_of_onext ; <nl> + edge = onext_edge ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + recentEdge = edge ; <nl> + <nl> + if ( location = = PTLOC_INSIDE ) <nl> + { <nl> + Point2f org_pt , dst_pt ; <nl> + edgeOrg ( edge , & org_pt ) ; <nl> + edgeDst ( edge , & dst_pt ) ; <nl> + <nl> + double t1 = fabs ( pt . x - org_pt . x ) ; <nl> + t1 + = fabs ( pt . y - org_pt . y ) ; <nl> + double t2 = fabs ( pt . x - dst_pt . x ) ; <nl> + t2 + = fabs ( pt . y - dst_pt . y ) ; <nl> + double t3 = fabs ( org_pt . x - dst_pt . x ) ; <nl> + t3 + = fabs ( org_pt . y - dst_pt . y ) ; <nl> + <nl> + if ( t1 < FLT_EPSILON ) <nl> + { <nl> + location = PTLOC_VERTEX ; <nl> + vertex = edgeOrg ( edge ) ; <nl> + edge = 0 ; <nl> + } <nl> + else if ( t2 < FLT_EPSILON ) <nl> + { <nl> + location = PTLOC_VERTEX ; <nl> + vertex = edgeDst ( edge ) ; <nl> + edge = 0 ; <nl> + } <nl> + else if ( ( t1 < t3 | | t2 < t3 ) & & <nl> + fabs ( cvTriangleArea ( pt , org_pt , dst_pt ) ) < FLT_EPSILON ) <nl> + { <nl> + location = PTLOC_ON_EDGE ; <nl> + vertex = 0 ; <nl> + } <nl> + } <nl> + <nl> + if ( location = = PTLOC_ERROR ) <nl> + { <nl> + edge = 0 ; <nl> + vertex = 0 ; <nl> + } <nl> + <nl> + _edge = edge ; <nl> + _vertex = vertex ; <nl> + <nl> + return location ; <nl> + } <nl> + <nl> + <nl> + inline bool <nl> + isPtInCircle3 ( Point2f pt , Point2f a , Point2f b , Point2f c ) <nl> + { <nl> + const double eps = FLT_EPSILON * 0 . 125 ; <nl> + double val = ( ( double ) a . x * a . x + ( double ) a . y * a . y ) * cvTriangleArea ( b , c , pt ) ; <nl> + val - = ( ( double ) b . x * b . x + ( double ) b . y * b . y ) * cvTriangleArea ( a , c , pt ) ; <nl> + val + = ( ( double ) c . x * c . x + ( double ) c . y * c . y ) * cvTriangleArea ( a , b , pt ) ; <nl> + val - = ( ( double ) pt . x * pt . x + ( double ) pt . y * pt . y ) * cvTriangleArea ( a , b , c ) ; <nl> + <nl> + return val > eps ? 1 : val < - eps ? - 1 : 0 ; <nl> + } <nl> + <nl> + <nl> + int Subdiv2D : : insert ( Point2f pt ) <nl> + { <nl> + int curr_point = 0 , curr_edge = 0 , deleted_edge = 0 ; <nl> + int location = locate ( pt , curr_edge , curr_point ) ; <nl> + <nl> + if ( location = = PTLOC_ERROR ) <nl> + CV_Error ( CV_StsBadSize , " " ) ; <nl> + <nl> + if ( location = = PTLOC_OUTSIDE_RECT ) <nl> + CV_Error ( CV_StsOutOfRange , " " ) ; <nl> + <nl> + if ( location = = PTLOC_VERTEX ) <nl> + return curr_point ; <nl> + <nl> + if ( location = = PTLOC_ON_EDGE ) <nl> + { <nl> + deleted_edge = curr_edge ; <nl> + recentEdge = curr_edge = getEdge ( curr_edge , PREV_AROUND_ORG ) ; <nl> + deleteEdge ( deleted_edge ) ; <nl> + } <nl> + else if ( location = = PTLOC_INSIDE ) <nl> + ; <nl> + else <nl> + CV_Error_ ( CV_StsError , ( " Subdiv2D : : locate returned invalid location = % d " , location ) ) ; <nl> + <nl> + assert ( curr_edge ! = 0 ) ; <nl> + validGeometry = false ; <nl> + <nl> + curr_point = newPoint ( pt , false ) ; <nl> + int base_edge = newEdge ( ) ; <nl> + int first_point = edgeOrg ( curr_edge ) ; <nl> + setEdgePoints ( base_edge , first_point , curr_point ) ; <nl> + splice ( base_edge , curr_edge ) ; <nl> + <nl> + do <nl> + { <nl> + base_edge = connectEdges ( curr_edge , symEdge ( base_edge ) ) ; <nl> + curr_edge = getEdge ( base_edge , PREV_AROUND_ORG ) ; <nl> + } <nl> + while ( edgeDst ( curr_edge ) ! = first_point ) ; <nl> + <nl> + curr_edge = getEdge ( base_edge , PREV_AROUND_ORG ) ; <nl> + <nl> + int i , max_edges = qedges . size ( ) * 4 ; <nl> + <nl> + for ( i = 0 ; i < max_edges ; i + + ) <nl> + { <nl> + int temp_dst = 0 , curr_org = 0 , curr_dst = 0 ; <nl> + int temp_edge = getEdge ( curr_edge , PREV_AROUND_ORG ) ; <nl> + <nl> + temp_dst = edgeDst ( temp_edge ) ; <nl> + curr_org = edgeOrg ( curr_edge ) ; <nl> + curr_dst = edgeDst ( curr_edge ) ; <nl> + <nl> + if ( isRightOf ( vtx [ temp_dst ] . pt , curr_edge ) > 0 & & <nl> + isPtInCircle3 ( vtx [ curr_org ] . pt , vtx [ temp_dst ] . pt , <nl> + vtx [ curr_dst ] . pt , vtx [ curr_point ] . pt ) < 0 ) <nl> + { <nl> + swapEdges ( curr_edge ) ; <nl> + curr_edge = getEdge ( curr_edge , PREV_AROUND_ORG ) ; <nl> + } <nl> + else if ( curr_org = = first_point ) <nl> + break ; <nl> + else <nl> + curr_edge = getEdge ( nextEdge ( curr_edge ) , PREV_AROUND_LEFT ) ; <nl> + } <nl> + <nl> + return curr_point ; <nl> + } <nl> + <nl> + void Subdiv2D : : insert ( const vector < Point2f > & ptvec ) <nl> + { <nl> + for ( size_t i = 0 ; i < ptvec . size ( ) ; i + + ) <nl> + insert ( ptvec [ i ] ) ; <nl> + } <nl> + <nl> + void Subdiv2D : : initDelaunay ( Rect rect ) <nl> + { <nl> + float big_coord = 3 . f * MAX ( rect . width , rect . height ) ; <nl> + float rx = ( float ) rect . x ; <nl> + float ry = ( float ) rect . y ; <nl> + <nl> + vtx . clear ( ) ; <nl> + qedges . clear ( ) ; <nl> + <nl> + recentEdge = 0 ; <nl> + validGeometry = false ; <nl> + <nl> + topLeft = Point2f ( rx , ry ) ; <nl> + bottomRight = Point2f ( rx + rect . width , ry + rect . height ) ; <nl> + <nl> + Point2f ppA ( rx + big_coord , ry ) ; <nl> + Point2f ppB ( rx , ry + big_coord ) ; <nl> + Point2f ppC ( rx - big_coord , ry - big_coord ) ; <nl> + <nl> + vtx . push_back ( Vertex ( ) ) ; <nl> + qedges . push_back ( QuadEdge ( ) ) ; <nl> + <nl> + freeQEdge = 0 ; <nl> + freePoint = 0 ; <nl> + <nl> + int pA = newPoint ( ppA , false ) ; <nl> + int pB = newPoint ( ppB , false ) ; <nl> + int pC = newPoint ( ppC , false ) ; <nl> + <nl> + int edge_AB = newEdge ( ) ; <nl> + int edge_BC = newEdge ( ) ; <nl> + int edge_CA = newEdge ( ) ; <nl> + <nl> + setEdgePoints ( edge_AB , pA , pB ) ; <nl> + setEdgePoints ( edge_BC , pB , pC ) ; <nl> + setEdgePoints ( edge_CA , pC , pA ) ; <nl> + <nl> + splice ( edge_AB , symEdge ( edge_CA ) ) ; <nl> + splice ( edge_BC , symEdge ( edge_AB ) ) ; <nl> + splice ( edge_CA , symEdge ( edge_BC ) ) ; <nl> + <nl> + recentEdge = edge_AB ; <nl> + } <nl> + <nl> + <nl> + void Subdiv2D : : clearVoronoi ( ) <nl> + { <nl> + size_t i , total = qedges . size ( ) ; <nl> + <nl> + for ( i = 0 ; i < total ; i + + ) <nl> + qedges [ i ] . pt [ 1 ] = qedges [ i ] . pt [ 3 ] = 0 ; <nl> + <nl> + total = vtx . size ( ) ; <nl> + for ( i = 0 ; i < total ; i + + ) <nl> + { <nl> + if ( vtx [ i ] . isvirtual ( ) ) <nl> + deletePoint ( ( int ) i ) ; <nl> + } <nl> + <nl> + validGeometry = false ; <nl> + } <nl> + <nl> + <nl> + static Point2f computeVoronoiPoint ( Point2f org0 , Point2f dst0 , Point2f org1 , Point2f dst1 ) <nl> + { <nl> + double a0 = dst0 . x - org0 . x ; <nl> + double b0 = dst0 . y - org0 . y ; <nl> + double c0 = - 0 . 5 * ( a0 * ( dst0 . x + org0 . x ) + b0 * ( dst0 . y + org0 . y ) ) ; <nl> + <nl> + double a1 = dst1 . x - org1 . x ; <nl> + double b1 = dst1 . y - org1 . y ; <nl> + double c1 = - 0 . 5 * ( a1 * ( dst1 . x + org1 . x ) + b1 * ( dst1 . y + org1 . y ) ) ; <nl> + <nl> + double det = a0 * b1 - a1 * b0 ; <nl> + <nl> + if ( det ! = 0 ) <nl> + { <nl> + det = 1 . / det ; <nl> + return Point2f ( ( float ) ( ( b0 * c1 - b1 * c0 ) * det ) , <nl> + ( float ) ( ( a1 * c0 - a0 * c1 ) * det ) ) ; <nl> + } <nl> + <nl> + return Point2f ( FLT_MAX , FLT_MAX ) ; <nl> + } <nl> + <nl> + <nl> + void Subdiv2D : : calcVoronoi ( ) <nl> + { <nl> + / / check if it is already calculated <nl> + if ( validGeometry ) <nl> + return ; <nl> + <nl> + clearVoronoi ( ) ; <nl> + int i , total = ( int ) qedges . size ( ) ; <nl> + <nl> + / / loop through all quad - edges , except for the first 3 ( # 1 , # 2 , # 3 - 0 is reserved for " NULL " pointer ) <nl> + for ( i = 4 ; i < total ; i + + ) <nl> + { <nl> + QuadEdge & quadedge = qedges [ i ] ; <nl> + <nl> + if ( quadedge . isfree ( ) ) <nl> + continue ; <nl> + <nl> + int edge0 = ( int ) ( i * 4 ) ; <nl> + Point2f org0 , dst0 , org1 , dst1 ; <nl> + <nl> + if ( ! quadedge . pt [ 3 ] ) <nl> + { <nl> + int edge1 = getEdge ( edge0 , NEXT_AROUND_LEFT ) ; <nl> + int edge2 = getEdge ( edge1 , NEXT_AROUND_LEFT ) ; <nl> + <nl> + edgeOrg ( edge0 , & org0 ) ; <nl> + edgeDst ( edge0 , & dst0 ) ; <nl> + edgeOrg ( edge1 , & org1 ) ; <nl> + edgeDst ( edge1 , & dst1 ) ; <nl> + <nl> + Point2f virt_point = computeVoronoiPoint ( org0 , dst0 , org1 , dst1 ) ; <nl> + <nl> + if ( fabs ( virt_point . x ) < FLT_MAX * 0 . 5 & & <nl> + fabs ( virt_point . y ) < FLT_MAX * 0 . 5 ) <nl> + { <nl> + quadedge . pt [ 3 ] = qedges [ edge1 > > 2 ] . pt [ 3 - ( edge1 & 2 ) ] = <nl> + qedges [ edge2 > > 2 ] . pt [ 3 - ( edge2 & 2 ) ] = newPoint ( virt_point , true ) ; <nl> + } <nl> + } <nl> + <nl> + if ( ! quadedge . pt [ 1 ] ) <nl> + { <nl> + int edge1 = getEdge ( edge0 , NEXT_AROUND_RIGHT ) ; <nl> + int edge2 = getEdge ( edge1 , NEXT_AROUND_RIGHT ) ; <nl> + <nl> + edgeOrg ( edge0 , & org0 ) ; <nl> + edgeDst ( edge0 , & dst0 ) ; <nl> + edgeOrg ( edge1 , & org1 ) ; <nl> + edgeDst ( edge1 , & dst1 ) ; <nl> + <nl> + Point2f virt_point = computeVoronoiPoint ( org0 , dst0 , org1 , dst1 ) ; <nl> + <nl> + if ( fabs ( virt_point . x ) < FLT_MAX * 0 . 5 & & <nl> + fabs ( virt_point . y ) < FLT_MAX * 0 . 5 ) <nl> + { <nl> + quadedge . pt [ 1 ] = qedges [ edge1 > > 2 ] . pt [ 1 + ( edge1 & 2 ) ] = <nl> + qedges [ edge2 > > 2 ] . pt [ 1 + ( edge2 & 2 ) ] = newPoint ( virt_point , true ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + validGeometry = true ; <nl> + } <nl> + <nl> + <nl> + static int <nl> + isRightOf2 ( const Point2f & pt , const Point2f & org , const Point2f & diff ) <nl> + { <nl> + double cw_area = ( ( double ) org . x - pt . x ) * diff . y - ( ( double ) org . y - pt . y ) * diff . x ; <nl> + return ( cw_area > 0 ) - ( cw_area < 0 ) ; <nl> + } <nl> + <nl> + <nl> + int Subdiv2D : : findNearest ( Point2f pt , Point2f * nearestPt ) <nl> + { <nl> + if ( ! validGeometry ) <nl> + calcVoronoi ( ) ; <nl> + <nl> + int vertex = 0 , edge = 0 ; <nl> + int loc = locate ( pt , edge , vertex ) ; <nl> + <nl> + if ( loc ! = PTLOC_ON_EDGE & & loc ! = PTLOC_INSIDE ) <nl> + return vertex ; <nl> + <nl> + vertex = 0 ; <nl> + <nl> + Point2f start ; <nl> + edgeOrg ( edge , & start ) ; <nl> + Point2f diff = pt - start ; <nl> + <nl> + edge = rotateEdge ( edge , 1 ) ; <nl> + <nl> + int i , total = ( int ) vtx . size ( ) ; <nl> + <nl> + for ( i = 0 ; i < total ; i + + ) <nl> + { <nl> + Point2f t ; <nl> + <nl> + for ( ; ; ) <nl> + { <nl> + CV_Assert ( edgeDst ( edge , & t ) > 0 ) ; <nl> + if ( isRightOf2 ( t , start , diff ) > = 0 ) <nl> + break ; <nl> + <nl> + edge = getEdge ( edge , NEXT_AROUND_LEFT ) ; <nl> + } <nl> + <nl> + for ( ; ; ) <nl> + { <nl> + CV_Assert ( edgeOrg ( edge , & t ) > 0 ) ; <nl> + <nl> + if ( isRightOf2 ( t , start , diff ) < 0 ) <nl> + break ; <nl> + <nl> + edge = getEdge ( edge , PREV_AROUND_LEFT ) ; <nl> + } <nl> + <nl> + Point2f tempDiff ; <nl> + edgeDst ( edge , & tempDiff ) ; <nl> + edgeOrg ( edge , & t ) ; <nl> + tempDiff - = t ; <nl> + <nl> + if ( isRightOf2 ( pt , t , tempDiff ) > = 0 ) <nl> + { <nl> + vertex = edgeOrg ( rotateEdge ( edge , 3 ) ) ; <nl> + break ; <nl> + } <nl> + <nl> + edge = symEdge ( edge ) ; <nl> + } <nl> + <nl> + if ( nearestPt & & vertex > 0 ) <nl> + * nearestPt = vtx [ vertex ] . pt ; <nl> + <nl> + return vertex ; <nl> + } <nl> + <nl> + void Subdiv2D : : getTriangleList ( vector < Vec6f > & triangleList ) <nl> + { <nl> + vector < bool > processed ( vtx . size ( ) , false ) ; <nl> + processed [ 0 ] = true ; <nl> + <nl> + calcVoronoi ( ) ; <nl> + triangleList . clear ( ) ; <nl> + <nl> + for ( size_t i = 4 ; i < qedges . size ( ) ; i + + ) <nl> + { <nl> + if ( qedges [ i ] . isfree ( ) ) <nl> + continue ; <nl> + int e0 = ( int ) ( i * 4 ) , e1 = rotateEdge ( e0 , 1 ) , e ; <nl> + int vidx0 = edgeOrg ( e1 ) , vidx1 = edgeDst ( e1 ) ; <nl> + Point2f a , b , c ; <nl> + if ( ! processed [ vidx0 ] ) <nl> + { <nl> + edgeOrg ( e0 , & a ) ; <nl> + edgeDst ( e0 , & b ) ; <nl> + e = getEdge ( e0 , NEXT_AROUND_LEFT ) ; <nl> + edgeDst ( e , & c ) ; <nl> + triangleList . push_back ( Vec6f ( a . x , a . y , b . x , b . y , c . x , c . y ) ) ; <nl> + processed [ vidx0 ] = true ; <nl> + } <nl> + if ( ! processed [ vidx1 ] ) <nl> + { <nl> + edgeDst ( e0 , & a ) ; <nl> + edgeOrg ( e0 , & b ) ; <nl> + e = getEdge ( e0 , PREV_AROUND_RIGHT ) ; <nl> + edgeOrg ( e , & c ) ; <nl> + triangleList . push_back ( Vec6f ( a . x , a . y , b . x , b . y , c . x , c . y ) ) ; <nl> + processed [ vidx1 ] = true ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void Subdiv2D : : getVoronoiFacetList ( const vector < int > & idx , CV_OUT vector < vector < Point2f > > & facetList ) <nl> + { <nl> + calcVoronoi ( ) ; <nl> + facetList . clear ( ) ; <nl> + <nl> + vector < Point2f > buf ; <nl> + <nl> + size_t i , total ; <nl> + if ( idx . empty ( ) ) <nl> + i = 4 , total = vtx . size ( ) ; <nl> + else <nl> + i = 0 , total = idx . size ( ) ; <nl> + <nl> + for ( ; i < total ; i + + ) <nl> + { <nl> + int k = idx . empty ( ) ? ( int ) i : idx [ i ] ; <nl> + <nl> + if ( vtx [ k ] . isvirtual ( ) ) <nl> + continue ; <nl> + int edge = rotateEdge ( vtx [ k ] . firstEdge , 1 ) , t = edge ; <nl> + <nl> + / / gather points <nl> + buf . clear ( ) ; <nl> + do <nl> + { <nl> + buf . push_back ( vtx [ edgeOrg ( t ) ] . pt ) ; <nl> + t = getEdge ( t , NEXT_AROUND_LEFT ) ; <nl> + } <nl> + while ( t ! = edge ) ; <nl> + <nl> + facetList . push_back ( buf ) ; <nl> + } <nl> + } <nl> + <nl> + } <nl> + <nl> + using namespace cv ; <nl> + using namespace std ; <nl> + <nl> + static void help ( ) <nl> + { <nl> + cout < < " \ nThis program demostrates iterative construction of \ n " <nl> + " delaunay triangulation and voronoi tesselation . \ n " <nl> + " It draws a random set of points in an image and then delaunay triangulates them . \ n " <nl> + " Usage : \ n " <nl> + " . / delaunay \ n " <nl> + " \ nThis program builds the traingulation interactively , you may stop this process by \ n " <nl> + " hitting any key . \ n " ; <nl> + } <nl> + <nl> + <nl> + static void draw_subdiv_point ( Mat & img , Point2f fp , Scalar color ) <nl> + { <nl> + circle ( img , fp , 3 , color , CV_FILLED , 8 , 0 ) ; <nl> + } <nl> + <nl> + static void draw_subdiv ( Mat & img , Subdiv2D & subdiv , Scalar delaunay_color ) <nl> + { <nl> + vector < Vec6f > triangleList ; <nl> + subdiv . getTriangleList ( triangleList ) ; <nl> + vector < Point > pt ( 3 ) ; <nl> + <nl> + for ( size_t i = 0 ; i < triangleList . size ( ) ; i + + ) <nl> + { <nl> + Vec6f t = triangleList [ i ] ; <nl> + pt [ 0 ] = Point ( cvRound ( t [ 0 ] ) , cvRound ( t [ 1 ] ) ) ; <nl> + pt [ 1 ] = Point ( cvRound ( t [ 2 ] ) , cvRound ( t [ 3 ] ) ) ; <nl> + pt [ 2 ] = Point ( cvRound ( t [ 4 ] ) , cvRound ( t [ 5 ] ) ) ; <nl> + line ( img , pt [ 0 ] , pt [ 1 ] , delaunay_color , 1 , CV_AA , 0 ) ; <nl> + line ( img , pt [ 1 ] , pt [ 2 ] , delaunay_color , 1 , CV_AA , 0 ) ; <nl> + line ( img , pt [ 2 ] , pt [ 0 ] , delaunay_color , 1 , CV_AA , 0 ) ; <nl> + } <nl> + } <nl> + <nl> + static void locate_point ( Mat & img , Subdiv2D & subdiv , Point2f fp , Scalar active_color ) <nl> + { <nl> + int e0 = 0 , vertex = 0 ; <nl> + <nl> + subdiv . locate ( fp , e0 , vertex ) ; <nl> + <nl> + if ( e0 > 0 ) <nl> + { <nl> + int e = e0 ; <nl> + do <nl> + { <nl> + Point2f org , dst ; <nl> + if ( subdiv . edgeOrg ( e , & org ) > 0 & & subdiv . edgeDst ( e , & dst ) > 0 ) <nl> + line ( img , org , dst , active_color , 3 , CV_AA , 0 ) ; <nl> + <nl> + e = subdiv . getEdge ( e , Subdiv2D : : NEXT_AROUND_LEFT ) ; <nl> + } <nl> + while ( e ! = e0 ) ; <nl> + } <nl> + <nl> + draw_subdiv_point ( img , fp , active_color ) ; <nl> + } <nl> + <nl> + <nl> + void paint_voronoi ( Mat & img , Subdiv2D & subdiv ) <nl> + { <nl> + vector < vector < Point2f > > facets ; <nl> + subdiv . getVoronoiFacetList ( vector < int > ( ) , facets ) ; <nl> + <nl> + vector < Point > ifacet ; <nl> + vector < vector < Point > > ifacets ( 1 ) ; <nl> + <nl> + for ( size_t i = 0 ; i < facets . size ( ) ; i + + ) <nl> + { <nl> + ifacet . resize ( facets [ i ] . size ( ) ) ; <nl> + for ( size_t j = 0 ; j < facets [ i ] . size ( ) ; j + + ) <nl> + ifacet [ j ] = facets [ i ] [ j ] ; <nl> + <nl> + Scalar color ; <nl> + color [ 0 ] = rand ( ) & 256 ; <nl> + color [ 1 ] = rand ( ) & 256 ; <nl> + color [ 2 ] = rand ( ) & 256 ; <nl> + fillConvexPoly ( img , ifacet , color , 8 , 0 ) ; <nl> + <nl> + ifacets [ 0 ] = ifacet ; <nl> + polylines ( img , ifacets , true , Scalar ( ) , 1 , CV_AA , 0 ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + int main ( int , char * * ) <nl> + { <nl> + help ( ) ; <nl> + <nl> + Scalar active_facet_color ( 0 , 0 , 255 ) , delaunay_color ( 255 , 255 , 255 ) ; <nl> + Rect rect ( 0 , 0 , 600 , 600 ) ; <nl> + <nl> + Subdiv2D subdiv ( rect ) ; <nl> + Mat img ( rect . size ( ) , CV_8UC3 ) ; <nl> + <nl> + img = Scalar : : all ( 0 ) ; <nl> + string win = " Delaunay Demo " ; <nl> + imshow ( win , img ) ; <nl> + <nl> + for ( int i = 0 ; i < 200 ; i + + ) <nl> + { <nl> + Point2f fp ( ( float ) ( rand ( ) % ( rect . width - 10 ) + 5 ) , <nl> + ( float ) ( rand ( ) % ( rect . height - 10 ) + 5 ) ) ; <nl> + <nl> + locate_point ( img , subdiv , fp , active_facet_color ) ; <nl> + imshow ( win , img ) ; <nl> + <nl> + if ( waitKey ( 100 ) > = 0 ) <nl> + break ; <nl> + <nl> + subdiv . insert ( fp ) ; <nl> + img = Scalar : : all ( 0 ) ; <nl> + draw_subdiv ( img , subdiv , delaunay_color ) ; <nl> + imshow ( win , img ) ; <nl> + <nl> + if ( waitKey ( 100 ) > = 0 ) <nl> + break ; <nl> + } <nl> + <nl> + img = Scalar : : all ( 0 ) ; <nl> + paint_voronoi ( img , subdiv ) ; <nl> + imshow ( win , img ) ; <nl> + <nl> + waitKey ( 0 ) ; <nl> + <nl> + return 0 ; <nl> + } <nl>
initial ( buggy ) C + + version of Delaunay triangulation
opencv/opencv
cfdf4640527de775167e187859f8159227193700
2011-08-09T15:11:32Z
mmm a / src / objective - c / GRPCClient / GRPCCall . m <nl> ppp b / src / objective - c / GRPCClient / GRPCCall . m <nl> @ implementation GRPCCall { <nl> <nl> GRPCRequestHeaders * _requestHeaders ; <nl> <nl> + / / In the case that the call is a unary call ( i . e . the writer to GRPCCall is of type <nl> + / / GRXImmediateSingleWriter ) , GRPCCall will delay sending ops ( not send them to C core <nl> + / / immediately ) and buffer them into a batch _unaryOpBatch . The batch is sent to C core when <nl> + / / the SendClose op is added . <nl> BOOL _unaryCall ; <nl> - <nl> NSMutableArray * _unaryOpBatch ; <nl> } <nl> <nl> mmm a / src / objective - c / GRPCClient / private / GRPCOpBatchLog . h <nl> ppp b / src / objective - c / GRPCClient / private / GRPCOpBatchLog . h <nl> <nl> * <nl> * / <nl> <nl> + <nl> + # ifdef GRPC_TEST_OBJC <nl> + <nl> / * * <nl> * Logs the op batches of a client . Used for testing . <nl> * / <nl> <nl> + ( NSArray * ) obtainAndCleanOpBatchLog ; <nl> <nl> @ end <nl> + <nl> + # endif <nl> mmm a / src / objective - c / GRPCClient / private / GRPCOpBatchLog . m <nl> ppp b / src / objective - c / GRPCClient / private / GRPCOpBatchLog . m <nl> <nl> * <nl> * / <nl> <nl> + # ifdef GRPC_TEST_OBJC <nl> + <nl> # import " GRPCOpBatchLog . h " <nl> <nl> - @ implementation GRPCOpBatchLog <nl> + static NSMutableArray * opBatchLog = nil ; <nl> <nl> - NSMutableArray * opBatchLog = nil ; <nl> + @ implementation GRPCOpBatchLog <nl> <nl> + ( void ) enableOpBatchLog : ( BOOL ) enabled { <nl> @ synchronized ( opBatchLog ) { <nl> + ( NSArray * ) obtainAndCleanOpBatchLog { <nl> } <nl> } <nl> <nl> - @ end <nl> \ No newline at end of file <nl> + @ end <nl> + <nl> + # endif <nl> mmm a / src / objective - c / RxLibrary / GRXImmediateSingleWriter . m <nl> ppp b / src / objective - c / RxLibrary / GRXImmediateSingleWriter . m <nl> <nl> <nl> @ implementation GRXImmediateSingleWriter { <nl> id _value ; <nl> - NSError * _errorOrNil ; <nl> id < GRXWriteable > _writeable ; <nl> } <nl> <nl> @ synthesize state = _state ; <nl> <nl> - - ( instancetype ) initWithValue : ( id ) value error : ( NSError * ) errorOrNil { <nl> + - ( instancetype ) initWithValue : ( id ) value { <nl> if ( self = [ super init ] ) { <nl> _value = value ; <nl> - _errorOrNil = errorOrNil ; <nl> _state = GRXWriterStateNotStarted ; <nl> } <nl> return self ; <nl> } <nl> <nl> + ( GRXWriter * ) writerWithValue : ( id ) value { <nl> - return [ [ self alloc ] initWithValue : value error : nil ] ; <nl> + return [ [ self alloc ] initWithValue : value ] ; <nl> } <nl> <nl> - ( void ) startWithWriteable : ( id < GRXWriteable > ) writeable { <nl> _state = GRXWriterStateStarted ; <nl> _writeable = writeable ; <nl> [ writeable writeValue : _value ] ; <nl> - [ self finishWithError : _errorOrNil ] ; <nl> + [ self finish ] ; <nl> } <nl> <nl> - - ( void ) finishWithError : ( NSError * ) errorOrNil { <nl> + - ( void ) finish { <nl> _state = GRXWriterStateFinished ; <nl> - _errorOrNil = nil ; <nl> _value = nil ; <nl> id < GRXWriteable > writeable = _writeable ; <nl> _writeable = nil ; <nl> - [ writeable writesFinishedWithError : errorOrNil ] ; <nl> + [ writeable writesFinishedWithError : nil ] ; <nl> } <nl> <nl> + / / Overwrite the setter to disallow manual state transition . The getter <nl> + / / of _state is synthesized . <nl> - ( void ) setState : ( GRXWriterState ) newState { <nl> / / Manual state transition is not allowed <nl> return ; <nl>
Addressed the comments
grpc/grpc
c2e53b5af4830c14d5cda72bba20df2855789300
2017-03-22T21:30:16Z
mmm a / stdlib / public / Windows / WinSDK . swift <nl> ppp b / stdlib / public / Windows / WinSDK . swift <nl> public let TRUE : BOOL = 1 <nl> public let INVALID_HANDLE_VALUE : HANDLE = HANDLE ( bitPattern : - 1 ) ! <nl> <nl> / / shellapi . h <nl> - public let FOF_NO_UI : DWORD = <nl> - DWORD ( FOF_SILENT | FOF_NOCONFIRMATION | FOF_NOERRORUI | FOF_NOCONFIRMMKDIR ) <nl> + public let FOF_NO_UI : FILEOP_FLAGS = <nl> + FILEOP_FLAGS ( FOF_SILENT | FOF_NOCONFIRMATION | FOF_NOERRORUI | FOF_NOCONFIRMMKDIR ) <nl> <nl>
stdlib : better type for WinSDK constant
apple/swift
6c642859ce20035e8b98c37f800b5e559b78eee3
2019-01-09T06:40:26Z
mmm a / tensorflow / go / op / wrappers . go <nl> ppp b / tensorflow / go / op / wrappers . go <nl> func DepthwiseConv2dNativeBackpropFilterDataFormat ( value string ) DepthwiseConv2d <nl> / / element on that dimension . The dimension order is determined by the value of <nl> / / ` data_format ` , see above for details . Dilations in the batch and depth <nl> / / dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func DepthwiseConv2dNativeBackpropFilterDilations ( value [ ] int64 ) DepthwiseConv2dNativeBackpropFilterAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func SampleDistortedBoundingBoxV2Seed2 ( value int64 ) SampleDistortedBoundingBoxV2 <nl> / / <nl> / / value : The cropped area of the image must have an aspect ratio = <nl> / / width / height within this range . <nl> - / / If not specified , defaults to { f : 0 . 75 f : 1 . 33 } <nl> + / / If not specified , defaults to { f : 0 . 75 f : 1 . 33 } <nl> func SampleDistortedBoundingBoxV2AspectRatioRange ( value [ ] float32 ) SampleDistortedBoundingBoxV2Attr { <nl> return func ( m optionalAttr ) { <nl> m [ " aspect_ratio_range " ] = value <nl> func SampleDistortedBoundingBoxV2AspectRatioRange ( value [ ] float32 ) SampleDistort <nl> / / <nl> / / value : The cropped area of the image must contain a fraction of the <nl> / / supplied image within this range . <nl> - / / If not specified , defaults to { f : 0 . 05 f : 1 } <nl> + / / If not specified , defaults to { f : 0 . 05 f : 1 } <nl> func SampleDistortedBoundingBoxV2AreaRange ( value [ ] float32 ) SampleDistortedBoundingBoxV2Attr { <nl> return func ( m optionalAttr ) { <nl> m [ " area_range " ] = value <nl> func SampleDistortedBoundingBoxMinObjectCovered ( value float32 ) SampleDistortedBo <nl> / / <nl> / / value : The cropped area of the image must have an aspect ratio = <nl> / / width / height within this range . <nl> - / / If not specified , defaults to { f : 0 . 75 f : 1 . 33 } <nl> + / / If not specified , defaults to { f : 0 . 75 f : 1 . 33 } <nl> func SampleDistortedBoundingBoxAspectRatioRange ( value [ ] float32 ) SampleDistortedBoundingBoxAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " aspect_ratio_range " ] = value <nl> func SampleDistortedBoundingBoxAspectRatioRange ( value [ ] float32 ) SampleDistorted <nl> / / <nl> / / value : The cropped area of the image must contain a fraction of the <nl> / / supplied image within this range . <nl> - / / If not specified , defaults to { f : 0 . 05 f : 1 } <nl> + / / If not specified , defaults to { f : 0 . 05 f : 1 } <nl> func SampleDistortedBoundingBoxAreaRange ( value [ ] float32 ) SampleDistortedBoundingBoxAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " area_range " ] = value <nl> func ImageSummaryMaxImages ( value int64 ) ImageSummaryAttr { <nl> / / ImageSummaryBadColor sets the optional bad_color attribute to value . <nl> / / <nl> / / value : Color to use for pixels with non - finite values . <nl> - / / If not specified , defaults to { dtype : DT_UINT8 tensor_shape : { dim : { size : 4 } } int_val : 255 int_val : 0 int_val : 0 int_val : 255 } <nl> + / / If not specified , defaults to { dtype : DT_UINT8 tensor_shape : { dim : { size : 4 } } int_val : 255 int_val : 0 int_val : 0 int_val : 255 } <nl> func ImageSummaryBadColor ( value tf . Tensor ) ImageSummaryAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " bad_color " ] = value <nl> func Conv3DBackpropFilterV2DataFormat ( value string ) Conv3DBackpropFilterV2Attr { <nl> / / filter element on that dimension . The dimension order is determined by the <nl> / / value of ` data_format ` , see above for details . Dilations in the batch and <nl> / / depth dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> func Conv3DBackpropFilterV2Dilations ( value [ ] int64 ) Conv3DBackpropFilterV2Attr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func Conv2DBackpropInputDataFormat ( value string ) Conv2DBackpropInputAttr { <nl> / / element on that dimension . The dimension order is determined by the value of <nl> / / ` data_format ` , see above for details . Dilations in the batch and depth <nl> / / dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func Conv2DBackpropInputDilations ( value [ ] int64 ) Conv2DBackpropInputAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func Conv2DDataFormat ( value string ) Conv2DAttr { <nl> / / filter element on that dimension . The dimension order is determined by the <nl> / / value of ` data_format ` , see above for details . Dilations in the batch and <nl> / / depth dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func Conv2DDilations ( value [ ] int64 ) Conv2DAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutType ( value tf . DataTy <nl> / / QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeDilations sets the optional dilations attribute to value . <nl> / / <nl> / / value : List of dilation values . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeDilations ( value [ ] int64 ) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func QuantizedDepthwiseConv2DWithBiasAndReluOutType ( value tf . DataType ) Quantized <nl> / / QuantizedDepthwiseConv2DWithBiasAndReluDilations sets the optional dilations attribute to value . <nl> / / <nl> / / value : List of dilation values . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func QuantizedDepthwiseConv2DWithBiasAndReluDilations ( value [ ] int64 ) QuantizedDepthwiseConv2DWithBiasAndReluAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func QuantizedDepthwiseConv2DWithBiasOutType ( value tf . DataType ) QuantizedDepthwi <nl> / / QuantizedDepthwiseConv2DWithBiasDilations sets the optional dilations attribute to value . <nl> / / <nl> / / value : List of dilation values . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func QuantizedDepthwiseConv2DWithBiasDilations ( value [ ] int64 ) QuantizedDepthwiseConv2DWithBiasAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func QuantizedDepthwiseConv2DOutType ( value tf . DataType ) QuantizedDepthwiseConv2D <nl> / / QuantizedDepthwiseConv2DDilations sets the optional dilations attribute to value . <nl> / / <nl> / / value : List of dilation values . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func QuantizedDepthwiseConv2DDilations ( value [ ] int64 ) QuantizedDepthwiseConv2DAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func QuantizedConv2DPerChannelOutType ( value tf . DataType ) QuantizedConv2DPerChann <nl> / / QuantizedConv2DPerChannelDilations sets the optional dilations attribute to value . <nl> / / <nl> / / value : list of dilation values . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func QuantizedConv2DPerChannelDilations ( value [ ] int64 ) QuantizedConv2DPerChannelAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func Conv3DBackpropInputV2DataFormat ( value string ) Conv3DBackpropInputV2Attr { <nl> / / filter element on that dimension . The dimension order is determined by the <nl> / / value of ` data_format ` , see above for details . Dilations in the batch and <nl> / / depth dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> func Conv3DBackpropInputV2Dilations ( value [ ] int64 ) Conv3DBackpropInputV2Attr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func AvgPool3DGrad ( scope * Scope , orig_input_shape tf . Output , grad tf . Output , ksi <nl> type Conv3DBackpropFilterAttr func ( optionalAttr ) <nl> <nl> / / Conv3DBackpropFilterDilations sets the optional dilations attribute to value . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> func Conv3DBackpropFilterDilations ( value [ ] int64 ) Conv3DBackpropFilterAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func DepthwiseConv2dNativeBackpropInputDataFormat ( value string ) DepthwiseConv2dN <nl> / / element on that dimension . The dimension order is determined by the value of <nl> / / ` data_format ` , see above for details . Dilations in the batch and depth <nl> / / dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func DepthwiseConv2dNativeBackpropInputDilations ( value [ ] int64 ) DepthwiseConv2dNativeBackpropInputAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func DepthwiseConv2dNativeBackpropInput ( scope * Scope , input_sizes tf . Output , fil <nl> type Conv3DBackpropInputAttr func ( optionalAttr ) <nl> <nl> / / Conv3DBackpropInputDilations sets the optional dilations attribute to value . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> func Conv3DBackpropInputDilations ( value [ ] int64 ) Conv3DBackpropInputAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func DepthwiseConv2dNativeDataFormat ( value string ) DepthwiseConv2dNativeAttr { <nl> / / element on that dimension . The dimension order is determined by the value of <nl> / / ` data_format ` , see above for details . Dilations in the batch and depth <nl> / / dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func DepthwiseConv2dNativeDilations ( value [ ] int64 ) DepthwiseConv2dNativeAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func QuantizedConv2DOutType ( value tf . DataType ) QuantizedConv2DAttr { <nl> / / filter element on that dimension . The dimension order is determined by the <nl> / / value of ` data_format ` , see above for details . Dilations in the batch and <nl> / / depth dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func QuantizedConv2DDilations ( value [ ] int64 ) QuantizedConv2DAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func Conv3DDataFormat ( value string ) Conv3DAttr { <nl> / / filter element on that dimension . The dimension order is determined by the <nl> / / value of ` data_format ` , see above for details . Dilations in the batch and <nl> / / depth dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 i : 1 } <nl> func Conv3DDilations ( value [ ] int64 ) Conv3DAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl> func Conv2DBackpropFilterDataFormat ( value string ) Conv2DBackpropFilterAttr { <nl> / / element on that dimension . The dimension order is determined by the value of <nl> / / ` data_format ` , see above for details . Dilations in the batch and depth <nl> / / dimensions must be 1 . <nl> - / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> + / / If not specified , defaults to { i : 1 i : 1 i : 1 i : 1 } <nl> func Conv2DBackpropFilterDilations ( value [ ] int64 ) Conv2DBackpropFilterAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " dilations " ] = value <nl>
Go : Update generated wrapper functions for TensorFlow ops .
tensorflow/tensorflow
c099a62118d4854e77dcfdf8c17308428237fbed
2020-03-02T12:49:13Z
mmm a / src / library_sdl . js <nl> ppp b / src / library_sdl . js <nl> var LibrarySDL = { <nl> SDL . surfaces [ surf ] = null ; <nl> } , <nl> <nl> - touchX : 0 , touchY : 0 , <nl> + touchX : 0 , touchY : 0 , <nl> + savedKeydown : null , <nl> <nl> receiveEvent : function ( event ) { <nl> switch ( event . type ) { <nl> var LibrarySDL = { <nl> SDL . DOMButtons [ event . button ] = 0 ; <nl> } <nl> <nl> - if ( event . type = = ' keypress ' & & ! SDL . textInput ) { <nl> - break ; <nl> + / / SDL expects a unicode character to be passed to its keydown events . <nl> + / / Unfortunately , the browser APIs only provide a charCode property on <nl> + / / keypress events , so we must backfill in keydown events with their <nl> + / / subsequent keypress event ' s charCode . <nl> + if ( event . type = = = ' keypress ' & & SDL . savedKeydown ) { <nl> + / / charCode is read - only <nl> + SDL . savedKeydown . keypressCharCode = event . charCode ; <nl> + SDL . savedKeydown = null ; <nl> + } else if ( event . type = = = ' keydown ' ) { <nl> + SDL . savedKeydown = event ; <nl> + } <nl> + <nl> + / / If we preventDefault on keydown events , the subsequent keypress events <nl> + / / won ' t fire . However , it ' s fine ( and in some cases necessary ) to <nl> + / / preventDefault for keys that don ' t generate a character . <nl> + if ( event . type ! = = ' keydown ' | | ( event . keyCode = = = 8 / * backspace * / | | event . keyCode = = = 9 / * tab * / ) ) { <nl> + event . preventDefault ( ) ; <nl> + } <nl> + <nl> + / / Don ' t push keypress events unless SDL_StartTextInput has been called . <nl> + if ( event . type ! = = ' keypress ' | | SDL . textInput ) { <nl> + SDL . events . push ( event ) ; <nl> } <nl> - <nl> - SDL . events . push ( event ) ; <nl> break ; <nl> case ' mouseout ' : <nl> / / Un - press all pressed mouse buttons , because we might miss the release outside of the canvas <nl> var LibrarySDL = { <nl> SDL . DOMButtons [ i ] = 0 ; <nl> } <nl> } <nl> + event . preventDefault ( ) ; <nl> break ; <nl> case ' blur ' : <nl> case ' visibilitychange ' : { <nl> var LibrarySDL = { <nl> keyCode : SDL . keyboardMap [ code ] <nl> } ) ; <nl> } <nl> + event . preventDefault ( ) ; <nl> break ; <nl> } <nl> case ' unload ' : <nl> var LibrarySDL = { <nl> return ; <nl> case ' resize ' : <nl> SDL . events . push ( event ) ; <nl> + / / manually triggered resize event doesn ' t have a preventDefault member <nl> + if ( event . preventDefault ) { <nl> + event . preventDefault ( ) ; <nl> + } <nl> break ; <nl> } <nl> if ( SDL . events . length > = 10000 ) { <nl> Module . printErr ( ' SDL event queue full , dropping events ' ) ; <nl> SDL . events = SDL . events . slice ( 0 , 10000 ) ; <nl> } <nl> - / / manually triggered resize event doesn ' t have a preventDefault member <nl> - if ( event . preventDefault ) { <nl> - event . preventDefault ( ) ; <nl> - } <nl> return ; <nl> } , <nl> <nl> var LibrarySDL = { <nl> { { { makeSetValue ( ' ptr ' , ' SDL . structs . KeyboardEvent . repeat ' , ' 0 ' , ' i8 ' ) } } } / / TODO <nl> { { { makeSetValue ( ' ptr ' , ' SDL . structs . KeyboardEvent . keysym + SDL . structs . keysym . scancode ' , ' scan ' , ' i32 ' ) } } } <nl> { { { makeSetValue ( ' ptr ' , ' SDL . structs . KeyboardEvent . keysym + SDL . structs . keysym . sym ' , ' key ' , ' i32 ' ) } } } <nl> - { { { makeSetValue ( ' ptr ' , ' SDL . structs . KeyboardEvent . keysym + SDL . structs . keysym . mod ' , ' SDL . modState ' , ' i32 ' ) } } } <nl> - { { { makeSetValue ( ' ptr ' , ' SDL . structs . KeyboardEvent . keysym + SDL . structs . keysym . unicode ' , ' key ' , ' i32 ' ) } } } <nl> + { { { makeSetValue ( ' ptr ' , ' SDL . structs . KeyboardEvent . keysym + SDL . structs . keysym . mod ' , ' SDL . modState ' , ' i16 ' ) } } } <nl> + / / some non - character keys ( e . g . backspace and tab ) won ' t have keypressCharCode set , fill in with the keyCode . <nl> + { { { makeSetValue ( ' ptr ' , ' SDL . structs . KeyboardEvent . keysym + SDL . structs . keysym . unicode ' , ' event . keypressCharCode | | key ' , ' i32 ' ) } } } <nl> <nl> break ; <nl> } <nl>
Merge pull request from inolen / sdl_unicode
emscripten-core/emscripten
adb38d0e59d87586c4ea283bf59052f0a0f9f016
2013-08-19T19:46:51Z
mmm a / xbmc / cores / dvdplayer / DVDPlayer . cpp <nl> ppp b / xbmc / cores / dvdplayer / DVDPlayer . cpp <nl> void CDVDPlayer : : HandleMessages ( ) <nl> { <nl> CDVDMsgPlayerSeek & msg ( * ( ( CDVDMsgPlayerSeek * ) pMsg ) ) ; <nl> <nl> + if ( ! m_State . canseek ) <nl> + { <nl> + pMsg - > Release ( ) ; <nl> + continue ; <nl> + } <nl> + <nl> if ( ! msg . GetTrickPlay ( ) ) <nl> { <nl> g_infoManager . SetDisplayAfterSeek ( 100000 ) ; <nl> void CDVDPlayer : : HandleMessages ( ) <nl> <nl> double start = DVD_NOPTS_VALUE ; <nl> <nl> - if ( m_pInputStream - > IsStreamType ( DVDSTREAM_TYPE_PVRMANAGER ) & & ! m_State . canseek ) <nl> - break ; <nl> - <nl> int time = msg . GetRestore ( ) ? ( int ) m_Edl . RestoreCutTime ( msg . GetTime ( ) ) : msg . GetTime ( ) ; <nl> CLog : : Log ( LOGDEBUG , " demuxer seek to : % d " , time ) ; <nl> if ( m_pDemuxer & & m_pDemuxer - > SeekTime ( time , msg . GetBackward ( ) , & start ) ) <nl> bool CDVDPlayer : : CanPause ( ) <nl> <nl> void CDVDPlayer : : Pause ( ) <nl> { <nl> - if ( m_pInputStream - > IsStreamType ( DVDSTREAM_TYPE_PVRMANAGER ) & & ! m_State . canpause ) <nl> + CSingleLock lock ( m_StateSection ) ; <nl> + if ( ! m_State . canpause ) <nl> return ; <nl> + lock . Leave ( ) ; <nl> <nl> if ( m_playSpeed ! = DVD_PLAYSPEED_PAUSE & & ( m_caching = = CACHESTATE_FULL | | m_caching = = CACHESTATE_PVR ) ) <nl> { <nl> bool CDVDPlayer : : IsPassthrough ( ) const <nl> <nl> bool CDVDPlayer : : CanSeek ( ) <nl> { <nl> - if ( m_pInputStream - > IsStreamType ( DVDSTREAM_TYPE_PVRMANAGER ) ) <nl> - { <nl> - CSingleLock lock ( m_StateSection ) ; <nl> - return m_State . canseek ; <nl> - } <nl> - else <nl> - return GetTotalTime ( ) > 0 ; <nl> + CSingleLock lock ( m_StateSection ) ; <nl> + return m_State . canseek ; <nl> } <nl> <nl> void CDVDPlayer : : Seek ( bool bPlus , bool bLargeStep ) <nl> void CDVDPlayer : : Seek ( bool bPlus , bool bLargeStep ) <nl> return ; <nl> } <nl> # endif <nl> - if ( m_pInputStream - > IsStreamType ( DVDSTREAM_TYPE_PVRMANAGER ) & & ! m_State . canseek ) <nl> + if ( ! m_State . canseek ) <nl> return ; <nl> <nl> if ( ( ( bPlus & & GetChapter ( ) < GetChapterCount ( ) ) <nl> void CDVDPlayer : : UpdatePlayState ( double timeout ) <nl> state . canpause = pvrinputstream - > CanPause ( ) ; <nl> state . canseek = pvrinputstream - > CanSeek ( ) ; <nl> } <nl> + else <nl> + { <nl> + state . canseek = GetTotalTime ( ) > 0 ? true : false ; <nl> + state . canpause = true ; <nl> + } <nl> <nl> CDVDInputStream : : IDisplayTime * pDisplayTime = dynamic_cast < CDVDInputStream : : IDisplayTime * > ( m_pInputStream ) ; <nl> if ( pDisplayTime & & pDisplayTime - > GetTotalTime ( ) > 0 ) <nl>
Merge pull request from FernetMenta / pvrseek
xbmc/xbmc
adefdd64943216a57317f3ce7fd2cf34754b5b6a
2012-10-13T13:14:35Z
mmm a / Tests / EndToEndTests / Speech / LSTM / cntk . cntk <nl> ppp b / Tests / EndToEndTests / Speech / LSTM / cntk . cntk <nl> speechTrain = [ <nl> / / parameter macros - - these carry their own weight matrices <nl> B ( ) = BS . Parameters . BiasParam ( cellDim ) <nl> <nl> - W ( v ) = BS . Parameters . WeightParam ( cellDim , inputDim ) * BS . Parameters . Stabilize ( v , enabled = enableSelfStabilization ) / / input - to - hidden <nl> - H ( h ) = BS . Parameters . WeightParam ( cellDim , outputDim ) * BS . Parameters . Stabilize ( h , enabled = enableSelfStabilization ) / / hidden - to - hidden <nl> - C ( c ) = BS . Parameters . DiagWeightParam ( cellDim ) . * BS . Parameters . Stabilize ( c , enabled = enableSelfStabilization ) / / cell - to - hiddden ( note : applied elementwise ) <nl> + W ( v ) = BS . Parameters . WeightParam ( cellDim , inputDim ) * Stabilize ( v , enabled = enableSelfStabilization ) / / input - to - hidden <nl> + H ( h ) = BS . Parameters . WeightParam ( cellDim , outputDim ) * Stabilize ( h , enabled = enableSelfStabilization ) / / hidden - to - hidden <nl> + C ( c ) = BS . Parameters . DiagWeightParam ( cellDim ) . * Stabilize ( c , enabled = enableSelfStabilization ) / / cell - to - hiddden ( note : applied elementwise ) <nl> <nl> / / note : the W ( x ) here are all different , they all come with their own set of weights ; same for H ( dh ) , C ( dc ) , and B ( ) <nl> it = Sigmoid ( W ( x ) + B ( ) + H ( dh ) + C ( dc ) ) / / input gate ( t ) <nl> speechTrain = [ <nl> h = if outputDim ! = cellDim / / output / hidden state <nl> then [ / / project <nl> Wmr = BS . Parameters . WeightParam ( outputDim , cellDim ) ; <nl> - htp = Wmr * BS . Parameters . Stabilize ( _privateInnards . ht , enabled = enableSelfStabilization ) <nl> + htp = Wmr * Stabilize ( _privateInnards . ht , enabled = enableSelfStabilization ) <nl> ] . htp / / TODO : ^ ^ extend BS syntax to allow to say : then [ Wmr = WeightParam ( outputDim , cellDim ) ] in Wmr * Stabilize ( . . . ) <nl> else _privateInnards . ht / / no projection <nl> dim = outputDim <nl> speechTrain = [ <nl> ] . lstmState / / we return the state record ( h , c ) <nl> <nl> / / define the stack of hidden LSTM layers - - TODO : change to RecurrentLSTMPStack ( ) , change stabilizer config <nl> - S ( x ) = BS . Parameters . Stabilize ( x , enabled = useSelfStabilization ) <nl> + S ( x ) = Stabilize ( x , enabled = useSelfStabilization ) <nl> LSTMoutput [ k : 1 . . numLSTMLayers ] = <nl> if k = = 1 <nl> then / * BS . RNNs . * / RecurrentLSTMP ( hiddenDim , cellDim = innerCellDim , / * S * / ( featNorm ) , inputDim = baseFeatDim , enableSelfStabilization = useSelfStabilization ) . h <nl>
fixed the last fix
microsoft/CNTK
395d394ed423a2b93f5f673317bf932826f023a5
2016-05-05T23:45:38Z
mmm a / src / mainwindow . cpp <nl> ppp b / src / mainwindow . cpp <nl> void MainWindow : : setupUi ( ) <nl> dataTable - > setRowCount ( 0 ) ; <nl> dataTable - > setColumnCount ( 0 ) ; <nl> dataTable - > setSelectionMode ( QTableWidget : : SingleSelection ) ; <nl> + dataTable - > setHorizontalScrollMode ( QAbstractItemView : : ScrollPerPixel ) ; <nl> <nl> vboxLayout2 - > addWidget ( dataTable ) ; <nl> <nl> void MainWindow : : setupUi ( ) <nl> queryResultTableView - > setSelectionMode ( QTreeView : : NoSelection ) ; <nl> queryResultTableView - > setModel ( queryResultListModel ) ; <nl> queryResultTableView - > setEditTriggers ( QAbstractItemView : : NoEditTriggers ) ; <nl> + queryResultTableView - > setHorizontalScrollMode ( QAbstractItemView : : ScrollPerPixel ) ; <nl> <nl> vboxLayout3 - > addWidget ( queryResultTableView ) ; <nl> <nl>
set horizontal scrollmode of result widgets to per pixel
sqlitebrowser/sqlitebrowser
1f760aac1417c39f7366850a79576c4ce7eddb5e
2012-04-17T14:37:21Z
mmm a / tests / python / test_loop_grad . py <nl> ppp b / tests / python / test_loop_grad . py <nl> def func ( ) : <nl> assert x [ k , i ] = = 2 * * i * k <nl> assert x . grad [ k , i ] = = 2 * * ( m - 1 - i ) <nl> <nl> + <nl> + def test_loop_grad_complex ( ) : <nl> + return <nl> + for arch in [ ti . x86_64 , ti . cuda ] : <nl> + ti . reset ( ) <nl> + ti . cfg . arch = arch <nl> + x = ti . var ( ti . f32 ) <nl> + <nl> + n = 16 <nl> + m = 8 <nl> + @ ti . layout <nl> + def place ( ) : <nl> + ti . root . dense ( ti . ij , ( n , m ) ) . place ( x ) <nl> + ti . root . lazy_grad ( ) <nl> + <nl> + @ ti . kernel <nl> + def func ( ) : <nl> + for k in range ( n ) : <nl> + t = k * k <nl> + tt = t * 2 <nl> + for i in range ( m - 1 ) : <nl> + x [ k , i + 1 ] = x [ k , i ] * 2 + tt <nl> + <nl> + <nl> + for k in range ( n ) : <nl> + x [ k , 0 ] = k <nl> + func ( ) <nl> + <nl> + for k in range ( n ) : <nl> + x . grad [ k , m - 1 ] = 1 <nl> + func . grad ( ) <nl> + <nl> + for k in range ( n ) : <nl> + for i in range ( m ) : <nl> + assert x [ k , i ] = = i * * 2 + 2 * k * * 2 <nl> + assert x . grad [ k , i ] = = 2 * * ( m - 1 - i ) <nl> + <nl>
test_loop_grad_complex WIP
taichi-dev/taichi
de171870d2c81b23a8c982c12a53d288b6931406
2019-10-21T21:46:30Z
mmm a / cocos / platform / CCImage . cpp <nl> ppp b / cocos / platform / CCImage . cpp <nl> bool Image : : initWithJpgData ( const unsigned char * data , ssize_t dataLen ) <nl> / * libjpeg data structure for storing one row , that is , scanline of an image * / <nl> JSAMPROW row_pointer [ 1 ] = { 0 } ; <nl> unsigned long location = 0 ; <nl> - unsigned int i = 0 ; <nl> <nl> bool ret = false ; <nl> do <nl> bool Image : : initWithJpgData ( const unsigned char * data , ssize_t dataLen ) <nl> _width = cinfo . output_width ; <nl> _height = cinfo . output_height ; <nl> _hasPremultipliedAlpha = false ; <nl> - / / row_pointer [ 0 ] = static_cast < unsigned char * > ( malloc ( cinfo . output_width * cinfo . output_components * sizeof ( unsigned char ) ) ) ; <nl> - / / CC_BREAK_IF ( ! row_pointer [ 0 ] ) ; <nl> <nl> _dataLen = cinfo . output_width * cinfo . output_height * cinfo . output_components ; <nl> _data = static_cast < unsigned char * > ( malloc ( _dataLen * sizeof ( unsigned char ) ) ) ; <nl> bool Image : : initWithJpgData ( const unsigned char * data , ssize_t dataLen ) <nl> location + = cinfo . output_width * cinfo . output_components ; <nl> jpeg_read_scanlines ( & cinfo , row_pointer , 1 ) ; <nl> } <nl> - <nl> - row_pointer [ 0 ] = nullptr ; <nl> - <nl> - / * When read image file with broken data , jpeg_finish_decompress ( ) may cause error . <nl> - * Besides , jpeg_destroy_decompress ( ) shall deallocate and release all memory associated <nl> - * with the decompression object . <nl> - * So it doesn ' t need to call jpeg_finish_decompress ( ) . <nl> - * / <nl> - / / jpeg_finish_decompress ( & cinfo ) ; <nl> + <nl> + / * When read image file with broken data , jpeg_finish_decompress ( ) may cause error . <nl> + * Besides , jpeg_destroy_decompress ( ) shall deallocate and release all memory associated <nl> + * with the decompression object . <nl> + * So it doesn ' t need to call jpeg_finish_decompress ( ) . <nl> + * / <nl> + / / jpeg_finish_decompress ( & cinfo ) ; <nl> jpeg_destroy_decompress ( & cinfo ) ; <nl> / * wrap up decompression , destroy objects , free pointers and close open files * / <nl> ret = true ; <nl> } while ( 0 ) ; <nl> <nl> - if ( row_pointer [ 0 ] ! = nullptr ) <nl> - { <nl> - free ( row_pointer [ 0 ] ) ; <nl> - } ; <nl> return ret ; <nl> # else <nl> return false ; <nl>
optimize decompress jpg data and fix warnings
cocos2d/cocos2d-x
ae5f6e63e57c9d956bd62ab16e05354e79d6b7a4
2014-09-16T13:55:05Z
mmm a / folly / Range . h <nl> ppp b / folly / Range . h <nl> class Range { <nl> template < <nl> typename Tgt , <nl> std : : enable_if_t < <nl> - std : : is_same < Tgt , StringViewType > : : value & & <nl> - std : : is_constructible < StringViewType , Iter const & , size_type > : : <nl> - value , <nl> + StrictConjunction < <nl> + std : : is_same < Tgt , StringViewType > , <nl> + std : : is_constructible < StringViewType , Iter const & , size_type > > : : <nl> + value , <nl> int > = 0 > <nl> constexpr operator Tgt ( ) const noexcept ( <nl> std : : is_nothrow_constructible < Tgt , Iter const & , size_type > : : value ) { <nl> mmm a / folly / test / RangeTest . cpp <nl> ppp b / folly / test / RangeTest . cpp <nl> TEST ( StringPiece , StringViewConversion ) { <nl> TrickierTarget tt3 ( deqRange ) ; <nl> EXPECT_EQ ( tt3 . which , 1 ) ; <nl> } <nl> + <nl> + namespace { <nl> + <nl> + / / Range with non - pod value type should not cause compile errors . <nl> + class NonPOD { <nl> + public : <nl> + NonPOD ( ) { } <nl> + } ; <nl> + void test_func ( Range < const NonPOD * > ) { } <nl> + <nl> + } / / anonymous namespace <nl> + <nl> # endif <nl>
Fix folly : : Range compile error when using non - POD value type , c + + 17 , and libc + +
facebook/folly
e3ed6d7c878e02157af67b3e037a7248398492aa
2018-10-20T07:09:42Z
mmm a / tensorflow / compiler / mlir / hlo / lib / Dialect / mhlo / transforms / hlo_legalize_to_lhlo . cc <nl> ppp b / tensorflow / compiler / mlir / hlo / lib / Dialect / mhlo / transforms / hlo_legalize_to_lhlo . cc <nl> using BaseOpConversion = BufferAssignmentOpConversionPattern < T > ; <nl> Value InsertDynamicAllocAndDealloc ( Location loc , Value result , <nl> Value shape_operand , <nl> ConversionPatternRewriter * rewriter ) { <nl> - auto result_type = result . getType ( ) . dyn_cast < ShapedType > ( ) ; <nl> + auto result_type = result . getType ( ) . dyn_cast < RankedTensorType > ( ) ; <nl> if ( ! result_type ) { <nl> result . getDefiningOp ( ) - > emitOpError ( ) <nl> < < " tensor to buffer conversion expects ranked results " ; <nl> Value InsertDynamicAllocAndDealloc ( Location loc , Value result , <nl> auto memref_type = <nl> MemRefType : : get ( result_type . getShape ( ) , result_type . getElementType ( ) ) ; <nl> <nl> - Operation * op = result . getDefiningOp ( ) ; <nl> - <nl> / / Extract the required element out of the vector . <nl> SmallVector < Value , 4 > dynamic_operands ; <nl> for ( auto shape_element : llvm : : enumerate ( result_type . getShape ( ) ) ) { <nl> if ( shape_element . value ( ) ! = ShapedType : : kDynamicSize ) continue ; <nl> - Value index = rewriter - > create < ConstantOp > ( <nl> - loc , rewriter - > getIntegerAttr ( rewriter - > getIndexType ( ) , <nl> - shape_element . index ( ) ) ) ; <nl> - Value alloc_operand = rewriter - > create < ExtractElementOp > ( loc , shape_operand , <nl> - ValueRange { index } ) ; <nl> + Value index = rewriter - > create < ConstantIndexOp > ( loc , shape_element . index ( ) ) ; <nl> + Value alloc_operand = <nl> + rewriter - > create < ExtractElementOp > ( loc , shape_operand , index ) ; <nl> if ( ! alloc_operand . getType ( ) . isIndex ( ) ) { <nl> alloc_operand = rewriter - > create < IndexCastOp > ( loc , alloc_operand , <nl> rewriter - > getIndexType ( ) ) ; <nl> Value InsertDynamicAllocAndDealloc ( Location loc , Value result , <nl> dynamic_operands . push_back ( alloc_operand ) ; <nl> } <nl> <nl> - / / Insert in front of op to ensure sizes are available . <nl> - OpBuilder allocBuilder ( op ) ; <nl> - auto alloc = allocBuilder . create < AllocOp > ( loc , memref_type , dynamic_operands ) ; <nl> - return alloc ; <nl> + return rewriter - > create < AllocOp > ( loc , memref_type , dynamic_operands ) ; <nl> } <nl> <nl> Value InsertAlloc ( Location loc , OpResult result , <nl> ConversionPatternRewriter * rewriter ) { <nl> - auto result_type = result . getType ( ) . dyn_cast < ShapedType > ( ) ; <nl> + auto result_type = result . getType ( ) . dyn_cast < RankedTensorType > ( ) ; <nl> if ( ! result_type | | ! result_type . hasStaticShape ( ) ) { <nl> result . getDefiningOp ( ) - > emitOpError ( ) <nl> < < " tensor to buffer conversion expects statically shaped results " ; <nl> class HloToLhloOpConverter : public BaseOpConversion < HloOpTy > { <nl> buffer_args . push_back ( <nl> InsertAlloc ( op - > getLoc ( ) , result . value ( ) , & rewriter ) ) ; <nl> } else { <nl> - SmallVector < Value , 1 > results_shape ; <nl> auto shape_type_op = dyn_cast < InferShapedTypeOpInterface > ( op ) ; <nl> if ( ! shape_type_op ) return failure ( ) ; <nl> - if ( failed ( <nl> - shape_type_op . reifyReturnTypeShapes ( rewriter , results_shape ) ) ) <nl> - return failure ( ) ; <nl> + <nl> + SmallVector < Value , 1 > results_shape ; <nl> + auto status = <nl> + shape_type_op . reifyReturnTypeShapes ( rewriter , results_shape ) ; <nl> + if ( failed ( status ) ) return failure ( ) ; <nl> buffer_args . push_back ( InsertDynamicAllocAndDealloc ( <nl> op - > getLoc ( ) , result . value ( ) , results_shape . front ( ) , & rewriter ) ) ; <nl> } <nl> } <nl> rewriter . create < mhlo : : HloToLhloOp < HloOpTy > > ( op - > getLoc ( ) , llvm : : None , <nl> buffer_args , op - > getAttrs ( ) ) ; <nl> - rewriter . replaceOp ( op , ArrayRef < Value > ( buffer_args ) . slice ( operands . size ( ) ) ) ; <nl> + rewriter . replaceOp ( <nl> + op , llvm : : makeArrayRef ( buffer_args ) . drop_front ( operands . size ( ) ) ) ; <nl> return success ( ) ; <nl> } <nl> } ; <nl>
[ HLO ] Clean - up dynamic allocation in hlo - legalize - to - lhlo pass .
tensorflow/tensorflow
e8d84bd57a45dce49cebd5afe09875425813695f
2020-10-05T11:00:36Z
new file mode 100644 <nl> index 000000000 . . 4f74ee70a <nl> mmm / dev / null <nl> ppp b / examples / qwerty_to_workman . json <nl> <nl> + { <nl> + " profiles " : [ <nl> + { <nl> + " name " : " Default profile " , <nl> + " selected " : true , <nl> + " simple_modifications " : { <nl> + " caps_lock " : " delete_or_backspace " , <nl> + " w " : " d " , <nl> + " e " : " r " , <nl> + " r " : " w " , <nl> + " t " : " b " , <nl> + " y " : " j " , <nl> + " u " : " f " , <nl> + " i " : " u " , <nl> + " o " : " p " , <nl> + " p " : " semicolon " , <nl> + " d " : " h " , <nl> + " f " : " t " , <nl> + " h " : " y " , <nl> + " j " : " n " , <nl> + " k " : " e " , <nl> + " l " : " o " , <nl> + " semicolon " : " i " , <nl> + " c " : " m " , <nl> + " v " : " c " , <nl> + " b " : " v " , <nl> + " n " : " k " , <nl> + " m " : " l " <nl> + } <nl> + } <nl> + ] <nl> + } <nl>
Add qwerty_to_workman . json to examples
pqrs-org/Karabiner-Elements
236732f7db3a8dcdc13d5b62b0cacb6fae33d827
2017-07-16T19:05:13Z
mmm a / src / idl_gen_csharp . cpp <nl> ppp b / src / idl_gen_csharp . cpp <nl> static CommentConfig comment_config = { <nl> <nl> namespace csharp { <nl> class CSharpGenerator : public BaseGenerator { <nl> + struct FieldArrayLength { <nl> + std : : string name ; <nl> + int length ; <nl> + } ; <nl> + <nl> public : <nl> CSharpGenerator ( const Parser & parser , const std : : string & path , <nl> const std : : string & file_name ) <nl> class CSharpGenerator : public BaseGenerator { <nl> GenOffsetType ( * field . value . type . struct_def ) + <nl> " ) : " + GenTypeGet ( field . value . type ) + <nl> " . Pack ( builder , _o . " + camel_name + " ) ; \ n " ; <nl> + } else if ( struct_def . fixed & & struct_has_create ) { <nl> + std : : vector < FieldArrayLength > array_lengths ; <nl> + FieldArrayLength tmp_array_length = { <nl> + field . name , <nl> + field . value . type . fixed_length , <nl> + } ; <nl> + array_lengths . push_back ( tmp_array_length ) ; <nl> + GenStructPackDecl_ObjectAPI ( * field . value . type . struct_def , code_ptr , <nl> + array_lengths ) ; <nl> } <nl> break ; <nl> } <nl> class CSharpGenerator : public BaseGenerator { <nl> } <nl> case BASE_TYPE_ARRAY : { <nl> if ( field . value . type . struct_def ! = nullptr ) { <nl> - std : : vector < std : : string > name_vec ; <nl> - name_vec . push_back ( field . name ) ; <nl> - std : : vector < int > array_length_vec ; <nl> - array_length_vec . push_back ( field . value . type . fixed_length ) ; <nl> - GenArrayPackDecl_ObjectAPI ( * field . value . type . struct_def , code_ptr , <nl> - name_vec , array_length_vec ) ; <nl> + std : : vector < FieldArrayLength > array_lengths ; <nl> + FieldArrayLength tmp_array_length = { <nl> + field . name , <nl> + field . value . type . fixed_length , <nl> + } ; <nl> + array_lengths . push_back ( tmp_array_length ) ; <nl> + GenStructPackDecl_ObjectAPI ( * field . value . type . struct_def , code_ptr , <nl> + array_lengths ) ; <nl> } else { <nl> code + = " var _ " + field . name + " = _o . " + camel_name + " ; \ n " ; <nl> } <nl> class CSharpGenerator : public BaseGenerator { <nl> switch ( field . value . type . base_type ) { <nl> case BASE_TYPE_STRUCT : { <nl> if ( struct_def . fixed ) { <nl> - GenStructArgs_ObjectAPI ( * field . value . type . struct_def , code_ptr , <nl> - " _o . " + camel_name + " . " ) ; <nl> + GenStructPackCall_ObjectAPI ( * field . value . type . struct_def , <nl> + code_ptr , <nl> + " _ " + field . name + " _ " ) ; <nl> } else { <nl> code + = " , \ n " ; <nl> if ( field . value . type . struct_def - > fixed ) { <nl> class CSharpGenerator : public BaseGenerator { <nl> } <nl> case BASE_TYPE_ARRAY : { <nl> if ( field . value . type . struct_def ! = nullptr ) { <nl> - GenArrayPackCall_ObjectAPI ( * field . value . type . struct_def , code_ptr , <nl> - " _ " + field . name + " _ " ) ; <nl> + GenStructPackCall_ObjectAPI ( * field . value . type . struct_def , <nl> + code_ptr , <nl> + " _ " + field . name + " _ " ) ; <nl> } else { <nl> code + = " , \ n " ; <nl> code + = " _ " + field . name ; <nl> class CSharpGenerator : public BaseGenerator { <nl> code + = " } \ n " ; <nl> } <nl> <nl> - void GenStructArgs_ObjectAPI ( const StructDef & struct_def , <nl> - std : : string * code_ptr , <nl> - std : : string prefix ) const { <nl> - auto & code = * code_ptr ; <nl> - for ( auto it = struct_def . fields . vec . begin ( ) ; <nl> - it ! = struct_def . fields . vec . end ( ) ; + + it ) { <nl> - auto & field = * * it ; <nl> - const auto & field_type = field . value . type ; <nl> - if ( IsStruct ( field_type ) ) { <nl> - GenStructArgs_ObjectAPI ( * field_type . struct_def , code_ptr , <nl> - prefix + " . " + MakeCamel ( field . name ) + " . " ) ; <nl> - } else { <nl> - code + = " , \ n " ; <nl> - code + = prefix + MakeCamel ( field . name ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - void GenArrayPackDecl_ObjectAPI ( const StructDef & struct_def , <nl> - std : : string * code_ptr , <nl> - std : : vector < std : : string > name_vec , <nl> - std : : vector < int > array_length_vec ) const { <nl> + void GenStructPackDecl_ObjectAPI ( <nl> + const StructDef & struct_def , std : : string * code_ptr , <nl> + std : : vector < FieldArrayLength > & array_lengths ) const { <nl> auto & code = * code_ptr ; <nl> for ( auto it = struct_def . fields . vec . begin ( ) ; <nl> it ! = struct_def . fields . vec . end ( ) ; + + it ) { <nl> class CSharpGenerator : public BaseGenerator { <nl> auto is_array = IsArray ( field . value . type ) ; <nl> const auto & field_type = <nl> is_array ? field . value . type . VectorType ( ) : field . value . type ; <nl> - if ( ! IsStruct ( field_type ) ) { <nl> - auto tmp_name_vec = name_vec ; <nl> - tmp_name_vec . push_back ( field . name ) ; <nl> - auto tmp_array_length_vec = array_length_vec ; <nl> - if ( is_array ) { <nl> - tmp_array_length_vec . push_back ( field_type . fixed_length ) ; <nl> + FieldArrayLength tmp_array_length = { <nl> + field . name , <nl> + field_type . fixed_length , <nl> + } ; <nl> + array_lengths . push_back ( tmp_array_length ) ; <nl> + if ( field_type . struct_def ! = nullptr ) { <nl> + GenStructPackDecl_ObjectAPI ( * field_type . struct_def , code_ptr , <nl> + array_lengths ) ; <nl> + } else { <nl> + std : : vector < FieldArrayLength > array_only_lengths ; <nl> + for ( size_t i = 0 ; i < array_lengths . size ( ) ; + + i ) { <nl> + if ( array_lengths [ i ] . length > 0 ) { <nl> + array_only_lengths . push_back ( array_lengths [ i ] ) ; <nl> + } <nl> } <nl> std : : string name ; <nl> - for ( size_t tmp_name_index = 0 ; tmp_name_index < tmp_name_vec . size ( ) ; <nl> - + + tmp_name_index ) { <nl> - name + = " _ " + tmp_name_vec [ tmp_name_index ] ; <nl> - } <nl> - code + = " var " + name + " = new " + GenTypeBasic ( field_type ) + " [ " ; <nl> - code + = NumToString ( tmp_array_length_vec [ 0 ] ) ; <nl> - for ( size_t i = 1 ; i < tmp_array_length_vec . size ( ) ; + + i ) { <nl> - auto array_length = tmp_array_length_vec [ i ] ; <nl> - code + = " , " + NumToString ( array_length ) ; <nl> - } <nl> - code + = " ] ; \ n " ; <nl> - code + = " " ; <nl> - / / initialize array <nl> - for ( size_t i = 0 ; i < tmp_array_length_vec . size ( ) ; + + i ) { <nl> - auto array_length = tmp_array_length_vec [ i ] ; <nl> - auto idx = " idx " + NumToString ( i ) ; <nl> - code + = " for ( var " + idx + " = 0 ; " + idx + " < " + <nl> - NumToString ( array_length ) + " ; + + " + idx + " ) { " ; <nl> - } <nl> - code + = name + " [ idx0 " ; <nl> - for ( size_t i = 1 ; i < tmp_array_length_vec . size ( ) ; + + i ) { <nl> - auto idx = " idx " + NumToString ( i ) ; <nl> - code + = " , " + idx ; <nl> + for ( size_t i = 0 ; i < array_lengths . size ( ) ; + + i ) { <nl> + name + = " _ " + array_lengths [ i ] . name ; <nl> } <nl> - code + = " ] = _o " ; <nl> - for ( size_t i = 0 ; i < tmp_array_length_vec . size ( ) ; + + i ) { <nl> - auto idx = " idx " + NumToString ( i ) ; <nl> - code + = " . " + MakeCamel ( tmp_name_vec [ i ] ) + " [ " + idx + " ] " ; <nl> - } <nl> - if ( ! is_array ) { code + = " . " + MakeCamel ( field . name ) ; } <nl> - code + = " ; " ; <nl> - for ( size_t i = 0 ; i < tmp_array_length_vec . size ( ) ; + + i ) { <nl> - code + = " } " ; <nl> + code + = " var " + name + " = " ; <nl> + if ( array_only_lengths . size ( ) > 0 ) { <nl> + code + = " new " + GenTypeBasic ( field_type ) + " [ " ; <nl> + for ( size_t i = 0 ; i < array_only_lengths . size ( ) ; + + i ) { <nl> + if ( i ! = 0 ) { code + = " , " ; } <nl> + code + = NumToString ( array_only_lengths [ i ] . length ) ; <nl> + } <nl> + code + = " ] ; \ n " ; <nl> + code + = " " ; <nl> + / / initialize array <nl> + for ( size_t i = 0 ; i < array_only_lengths . size ( ) ; + + i ) { <nl> + auto idx = " idx " + NumToString ( i ) ; <nl> + code + = " for ( var " + idx + " = 0 ; " + idx + " < " + <nl> + NumToString ( array_only_lengths [ i ] . length ) + " ; + + " + idx + <nl> + " ) { " ; <nl> + } <nl> + for ( size_t i = 0 ; i < array_only_lengths . size ( ) ; + + i ) { <nl> + auto idx = " idx " + NumToString ( i ) ; <nl> + if ( i = = 0 ) { <nl> + code + = name + " [ " + idx ; <nl> + } else { <nl> + code + = " , " + idx ; <nl> + } <nl> + } <nl> + code + = " ] = _o " ; <nl> + for ( size_t i = 0 , j = 0 ; i < array_lengths . size ( ) ; + + i ) { <nl> + code + = " . " + MakeCamel ( array_lengths [ i ] . name ) ; <nl> + if ( array_lengths [ i ] . length < = 0 ) continue ; <nl> + code + = " [ idx " + NumToString ( j + + ) + " ] " ; <nl> + } <nl> + code + = " ; " ; <nl> + for ( size_t i = 0 ; i < array_only_lengths . size ( ) ; + + i ) { code + = " } " ; } <nl> + } else { <nl> + code + = " _o " ; <nl> + for ( size_t i = 0 ; i < array_lengths . size ( ) ; + + i ) { <nl> + code + = " . " + MakeCamel ( array_lengths [ i ] . name ) ; <nl> + } <nl> + code + = " ; " ; <nl> } <nl> code + = " \ n " ; <nl> } <nl> + array_lengths . pop_back ( ) ; <nl> } <nl> } <nl> <nl> - void GenArrayPackCall_ObjectAPI ( const StructDef & struct_def , <nl> - std : : string * code_ptr , <nl> - std : : string prefix ) const { <nl> + void GenStructPackCall_ObjectAPI ( const StructDef & struct_def , <nl> + std : : string * code_ptr , <nl> + std : : string prefix ) const { <nl> auto & code = * code_ptr ; <nl> for ( auto it = struct_def . fields . vec . begin ( ) ; <nl> it ! = struct_def . fields . vec . end ( ) ; + + it ) { <nl> auto & field = * * it ; <nl> const auto & field_type = field . value . type ; <nl> - if ( IsStruct ( field_type ) ) { <nl> - GenArrayPackCall_ObjectAPI ( * field_type . struct_def , code_ptr , <nl> - prefix + field . name + " _ " ) ; <nl> + if ( field_type . struct_def ! = nullptr ) { <nl> + GenStructPackCall_ObjectAPI ( * field_type . struct_def , code_ptr , <nl> + prefix + field . name + " _ " ) ; <nl> } else { <nl> code + = " , \ n " ; <nl> code + = prefix + field . name ; <nl> mmm a / tests / MyGame / Example / Vec3 . cs <nl> ppp b / tests / MyGame / Example / Vec3 . cs <nl> public struct Vec3 : IFlatbufferObject <nl> } <nl> public static Offset < MyGame . Example . Vec3 > Pack ( FlatBufferBuilder builder , Vec3T _o ) { <nl> if ( _o = = null ) return default ( Offset < MyGame . Example . Vec3 > ) ; <nl> + var _test3_a = _o . Test3 . A ; <nl> + var _test3_b = _o . Test3 . B ; <nl> return CreateVec3 ( <nl> builder , <nl> _o . X , <nl> public struct Vec3 : IFlatbufferObject <nl> _o . Z , <nl> _o . Test1 , <nl> _o . Test2 , <nl> - _o . Test3 . A , <nl> - _o . Test3 . B ) ; <nl> + _test3_a , <nl> + _test3_b ) ; <nl> } <nl> } ; <nl> <nl>
[ C # ] Fix nested structs and arrays in Object API ( )
google/flatbuffers
fb96fadc20a78da264b1878ff95f12838c3b22c8
2020-04-06T16:27:13Z
mmm a / doc / modules / navigator . md <nl> ppp b / doc / modules / navigator . md <nl> A series of navigator operation apis like ` push ` , ` pop ` . <nl> # # # # Arguments <nl> <nl> * ` options ` * ( object ) * : some options . <nl> - * ` url ` * ( stirng ) * : The URL of the weex page to push . <nl> + * ` url ` * ( string ) * : The URL of the weex page to push . <nl> * ` animated ` * ( string ) * : ` true ` if the weex page is push through animation , otherwise , ` false ` . <nl> * ` callback ` * ( object ) * : the callback function after executing this action . <nl> <nl> mmm a / ios / sdk / WeexSDK / Sources / Component / WXTextAreaComponent . m <nl> ppp b / ios / sdk / WeexSDK / Sources / Component / WXTextAreaComponent . m <nl> - ( void ) updateStyles : ( NSDictionary * ) styles <nl> } <nl> <nl> if ( ! isnan ( weakSelf . cssNode - > style . minDimensions [ CSS_HEIGHT ] ) ) { <nl> - computedSize . width = MAX ( computedSize . height , weakSelf . cssNode - > style . minDimensions [ CSS_HEIGHT ] ) ; <nl> + computedSize . height = MAX ( computedSize . height , weakSelf . cssNode - > style . minDimensions [ CSS_HEIGHT ] ) ; <nl> } <nl> <nl> if ( ! isnan ( weakSelf . cssNode - > style . maxDimensions [ CSS_HEIGHT ] ) ) { <nl> - computedSize . width = MIN ( computedSize . height , weakSelf . cssNode - > style . maxDimensions [ CSS_HEIGHT ] ) ; <nl> + computedSize . height = MIN ( computedSize . height , weakSelf . cssNode - > style . maxDimensions [ CSS_HEIGHT ] ) ; <nl> } <nl> <nl> return ( CGSize ) { <nl> mmm a / ios / sdk / WeexSDK / Sources / Component / WXTextComponent . m <nl> ppp b / ios / sdk / WeexSDK / Sources / Component / WXTextComponent . m <nl> - ( WXDisplayBlock ) displayBlock <nl> } <nl> <nl> if ( ! isnan ( weakSelf . cssNode - > style . minDimensions [ CSS_HEIGHT ] ) ) { <nl> - computedSize . width = MAX ( computedSize . height , weakSelf . cssNode - > style . minDimensions [ CSS_HEIGHT ] ) ; <nl> + computedSize . height = MAX ( computedSize . height , weakSelf . cssNode - > style . minDimensions [ CSS_HEIGHT ] ) ; <nl> } <nl> <nl> if ( ! isnan ( weakSelf . cssNode - > style . maxDimensions [ CSS_HEIGHT ] ) ) { <nl> - computedSize . width = MIN ( computedSize . height , weakSelf . cssNode - > style . maxDimensions [ CSS_HEIGHT ] ) ; <nl> + computedSize . height = MIN ( computedSize . height , weakSelf . cssNode - > style . maxDimensions [ CSS_HEIGHT ] ) ; <nl> } <nl> <nl> return ( CGSize ) { <nl> mmm a / ios / sdk / WeexSDK / Sources / Component / WXTextInputComponent . m <nl> ppp b / ios / sdk / WeexSDK / Sources / Component / WXTextInputComponent . m <nl> - ( void ) updateStyles : ( NSDictionary * ) styles <nl> } <nl> <nl> if ( ! isnan ( weakSelf . cssNode - > style . minDimensions [ CSS_HEIGHT ] ) ) { <nl> - computedSize . width = MAX ( computedSize . height , weakSelf . cssNode - > style . minDimensions [ CSS_HEIGHT ] ) ; <nl> + computedSize . height = MAX ( computedSize . height , weakSelf . cssNode - > style . minDimensions [ CSS_HEIGHT ] ) ; <nl> } <nl> <nl> if ( ! isnan ( weakSelf . cssNode - > style . maxDimensions [ CSS_HEIGHT ] ) ) { <nl> - computedSize . width = MIN ( computedSize . height , weakSelf . cssNode - > style . maxDimensions [ CSS_HEIGHT ] ) ; <nl> + computedSize . height = MIN ( computedSize . height , weakSelf . cssNode - > style . maxDimensions [ CSS_HEIGHT ] ) ; <nl> } <nl> <nl> return ( CGSize ) { <nl> mmm a / ios / sdk / WeexSDKTests / TestSupportUtils . h <nl> ppp b / ios / sdk / WeexSDKTests / TestSupportUtils . h <nl> <nl> * / <nl> <nl> # import < Foundation / Foundation . h > <nl> + # import < UIKit / UIKit . h > <nl> + <nl> + extern bool WXTransform3DApproximateToTransform ( CATransform3D a , CATransform3D b ) ; <nl> + <nl> + extern bool WXRectApproximateToRect ( CGRect a , CGRect b ) ; <nl> <nl> @ interface TestSupportUtils : NSObject <nl> / * * <nl> mmm a / ios / sdk / WeexSDKTests / TestSupportUtils . m <nl> ppp b / ios / sdk / WeexSDKTests / TestSupportUtils . m <nl> <nl> <nl> # import " TestSupportUtils . h " <nl> <nl> - @ implementation TestSupportUtils <nl> <nl> - static dispatch_once_t onceToken ; <nl> + bool WXIsDoubleApproximate ( double x , double y ) { <nl> + return fabs ( x - y ) < 0 . 001 ; <nl> + } <nl> + <nl> + bool WXTransform3DApproximateToTransform ( CATransform3D a , CATransform3D b ) <nl> + { <nl> + return <nl> + WXIsDoubleApproximate ( a . m11 , b . m11 ) & & <nl> + WXIsDoubleApproximate ( a . m12 , b . m12 ) & & <nl> + WXIsDoubleApproximate ( a . m13 , b . m13 ) & & <nl> + WXIsDoubleApproximate ( a . m14 , b . m14 ) & & <nl> + WXIsDoubleApproximate ( a . m21 , b . m21 ) & & <nl> + WXIsDoubleApproximate ( a . m22 , b . m22 ) & & <nl> + WXIsDoubleApproximate ( a . m23 , b . m23 ) & & <nl> + WXIsDoubleApproximate ( a . m24 , b . m24 ) & & <nl> + WXIsDoubleApproximate ( a . m31 , b . m31 ) & & <nl> + WXIsDoubleApproximate ( a . m32 , b . m32 ) & & <nl> + WXIsDoubleApproximate ( a . m33 , b . m33 ) & & <nl> + WXIsDoubleApproximate ( a . m34 , b . m34 ) & & <nl> + WXIsDoubleApproximate ( a . m41 , b . m41 ) & & <nl> + WXIsDoubleApproximate ( a . m42 , b . m42 ) & & <nl> + WXIsDoubleApproximate ( a . m43 , b . m43 ) & & <nl> + WXIsDoubleApproximate ( a . m44 , b . m44 ) ; <nl> + } <nl> + <nl> + bool WXRectApproximateToRect ( CGRect a , CGRect b ) <nl> + { <nl> + return <nl> + WXIsDoubleApproximate ( a . origin . x , b . origin . x ) & & <nl> + WXIsDoubleApproximate ( a . origin . y , b . origin . y ) & & <nl> + WXIsDoubleApproximate ( a . size . width , b . size . width ) & & <nl> + WXIsDoubleApproximate ( a . size . height , b . size . height ) ; <nl> + } <nl> + <nl> + <nl> + @ implementation TestSupportUtils <nl> <nl> + ( void ) waitSecs : ( NSTimeInterval ) secs { <nl> NSDate * timeoutDate = [ NSDate dateWithTimeIntervalSinceNow : secs ] ; <nl> mmm a / ios / sdk / WeexSDKTests / WXAnimationModuleTests . m <nl> ppp b / ios / sdk / WeexSDKTests / WXAnimationModuleTests . m <nl> - ( void ) testExample { <nl> <nl> } <nl> <nl> - - ( void ) testPerformanceExample { <nl> - / / This is an example of a performance test case . <nl> - [ self measureBlock : ^ { <nl> - / / Put the code you want to measure the time of here . <nl> - } ] ; <nl> - } <nl> - <nl> - ( void ) testAnimationRotate { <nl> WXComponent * component = [ self component ] ; <nl> WXAnimationModule * object = [ [ WXAnimationModule alloc ] init ] ; <nl> [ object animation : component args : @ { @ " duration " : @ 500 , @ " timingFunction " : @ " ease - in - out " , @ " styles " : @ { @ " transform " : @ " rotate ( 90deg ) " } } callback : nil ] ; <nl> [ TestSupportUtils waitSecs : 1 ] ; <nl> <nl> - CGFloat angle = [ ( NSNumber * ) [ component . layer valueForKeyPath : @ " transform . rotation . z " ] floatValue ] ; <nl> - <nl> - XCTAssert ( fabs ( angle - M_PI_2 ) < 0 . 00001 ) ; <nl> + CATransform3D transformToVerify = CATransform3DMakeAffineTransform ( CGAffineTransformRotate ( CGAffineTransformIdentity , M_PI / 2 ) ) ; <nl> + XCTAssert ( WXTransform3DApproximateToTransform ( component . layer . transform , transformToVerify ) ) ; <nl> } <nl> <nl> - ( void ) testAnimationTranslate { <nl> mmm a / ios / sdk / WeexSDKTests / WXBridgeMethodTests . m <nl> ppp b / ios / sdk / WeexSDKTests / WXBridgeMethodTests . m <nl> - ( void ) testExample { <nl> XCTAssertTrue ( args . count = = 0 ) ; <nl> } <nl> <nl> - - ( void ) testPerformanceExample { <nl> - / / This is an example of a performance test case . <nl> - [ self measureBlock : ^ { <nl> - / / Put the code you want to measure the time of here . <nl> - } ] ; <nl> - } <nl> - <nl> @ end <nl> mmm a / ios / sdk / WeexSDKTests / WXConvertTests . m <nl> ppp b / ios / sdk / WeexSDKTests / WXConvertTests . m <nl> - ( void ) testNSUInteger { <nl> <nl> } <nl> <nl> - - ( void ) testPerformanceExample { <nl> - / / This is an example of a performance test case . <nl> - [ self measureBlock : ^ { <nl> - / / Put the code you want to measure the time of here . <nl> - } ] ; <nl> - } <nl> - <nl> @ end <nl> mmm a / ios / sdk / WeexSDKTests / WXInstanceWrapTests . m <nl> ppp b / ios / sdk / WeexSDKTests / WXInstanceWrapTests . m <nl> - ( void ) testExample { <nl> / / Use XCTAssert and related functions to verify your tests produce the correct results . <nl> } <nl> <nl> - - ( void ) testPerformanceExample { <nl> - / / This is an example of a performance test case . <nl> - [ self measureBlock : ^ { <nl> - / / Put the code you want to measure the time of here . <nl> - } ] ; <nl> - } <nl> - <nl> - ( void ) testErrorCodeInfo { <nl> <nl> self . exp = [ self expectationWithDescription : @ " Error ! " ] ; <nl> mmm a / ios / sdk / WeexSDKTests / WXRootViewTests . m <nl> ppp b / ios / sdk / WeexSDKTests / WXRootViewTests . m <nl> - ( void ) testInstanceAndWeexRootFrame { <nl> NSBundle * bundle = [ NSBundle bundleForClass : [ self class ] ] ; <nl> NSString * path = [ bundle pathForResource : @ " main " ofType : @ " js " ] ; <nl> NSString * script = [ NSString stringWithContentsOfFile : path encoding : NSUTF8StringEncoding error : nil ] ; <nl> - [ WXSDKEngine initSDKEnviroment : script ] ; <nl> + [ WXSDKEngine initSDKEnvironment : script ] ; <nl> [ WXLog setLogLevel : WXLogLevelDebug ] ; <nl> <nl> NSString * jsPath = [ bundle pathForResource : @ " testRootView " ofType : @ " js " ] ; <nl> - ( void ) testInstanceAndWeexRootFrame { <nl> [ instance1 renderView : jsScript options : nil data : nil ] ; <nl> XCTestExpectation * expectation1 = [ self expectationWithDescription : @ " instance 1 " ] ; <nl> instance1 . renderFinish = ^ ( UIView * view ) { <nl> - XCTAssert ( CGRectEqualToRect ( view . frame , instanceFrame ) ) ; <nl> - XCTAssert ( CGRectEqualToRect ( view . subviews [ 0 ] . frame , CGRectMake ( 0 , 0 , instanceFrame . size . width , instanceFrame . size . height ) ) ) ; <nl> + XCTAssert ( WXRectApproximateToRect ( view . frame , instanceFrame ) ) ; <nl> + XCTAssert ( WXRectApproximateToRect ( view . subviews [ 0 ] . frame , CGRectMake ( 0 , 0 , instanceFrame . size . width , instanceFrame . size . height ) ) ) ; <nl> [ expectation1 fulfill ] ; <nl> } ; <nl> <nl> - ( void ) testInstanceAndWeexRootFrame { <nl> [ instance2 renderView : jsScript options : nil data : templateRootFrameData ] ; <nl> XCTestExpectation * expectation2 = [ self expectationWithDescription : @ " instance 2 " ] ; <nl> instance2 . renderFinish = ^ ( UIView * view ) { <nl> - XCTAssert ( CGRectEqualToRect ( view . frame , instanceFrame ) ) ; <nl> - XCTAssert ( CGRectEqualToRect ( view . subviews [ 0 ] . frame , <nl> + XCTAssert ( WXRectApproximateToRect ( view . frame , instanceFrame ) ) ; <nl> + XCTAssert ( WXRectApproximateToRect ( view . subviews [ 0 ] . frame , <nl> CGRectMake ( <nl> WXPixelResize ( templateRootFrame . origin . x ) , <nl> WXPixelResize ( templateRootFrame . origin . y ) , <nl> - ( void ) testInstanceAndWeexRootFrame { <nl> XCTestExpectation * expectation3 = [ self expectationWithDescription : @ " instance 3 " ] ; <nl> XCTestExpectation * expectation31 = [ self expectationWithDescription : @ " instance 3 onLayoutChange " ] ; <nl> instance3 . renderFinish = ^ ( UIView * view ) { <nl> - XCTAssert ( CGRectEqualToRect ( view . frame , <nl> + XCTAssert ( WXRectApproximateToRect ( view . frame , <nl> CGRectMake ( 0 , 0 , <nl> WXPixelResize ( templateRootFrame . size . width ) , <nl> WXPixelResize ( templateRootFrame . size . height ) ) ) ) ; <nl> - XCTAssert ( CGRectEqualToRect ( view . subviews [ 0 ] . frame , <nl> + XCTAssert ( WXRectApproximateToRect ( view . subviews [ 0 ] . frame , <nl> CGRectMake ( <nl> WXPixelResize ( templateRootFrame . origin . x ) , <nl> WXPixelResize ( templateRootFrame . origin . y ) , <nl> - ( void ) testInstanceAndWeexRootFrame { <nl> <nl> [ instance3 refreshInstance : changedFrameData ] ; <nl> instance3 . onLayoutChange = ^ ( UIView * view ) { <nl> - XCTAssert ( CGRectEqualToRect ( view . frame , <nl> + XCTAssert ( WXRectApproximateToRect ( view . frame , <nl> CGRectMake ( 0 , 0 , <nl> WXPixelResize ( templateRootFrame . size . width ) , <nl> WXPixelResize ( 400 ) ) ) ) ; <nl> - ( void ) testInstanceAndWeexRootFrame { <nl> [ instance4 renderView : jsScript options : nil data : nil ] ; <nl> XCTestExpectation * expectation4 = [ self expectationWithDescription : @ " instance 4 " ] ; <nl> instance4 . renderFinish = ^ ( UIView * view ) { <nl> - XCTAssert ( CGRectEqualToRect ( view . frame , <nl> + XCTAssert ( WXRectApproximateToRect ( view . frame , <nl> CGRectMake ( 0 , 0 , WXPixelResize ( 100 ) , WXPixelResize ( 200 ) ) ) ) ; <nl> - XCTAssert ( CGRectEqualToRect ( view . subviews [ 0 ] . frame , <nl> + XCTAssert ( WXRectApproximateToRect ( view . subviews [ 0 ] . frame , <nl> CGRectMake ( 0 , 0 , WXPixelResize ( 100 ) , WXPixelResize ( 200 ) ) ) ) ; <nl> [ expectation4 fulfill ] ; <nl> } ; <nl> <nl> - [ self waitForExpectationsWithTimeout : 5 . 0 handler : ^ ( NSError * error ) { <nl> + [ self waitForExpectationsWithTimeout : 10 . 0 handler : ^ ( NSError * error ) { <nl> if ( error ) { <nl> NSLog ( @ " Timeout Error : % @ " , error ) ; <nl> } <nl> mmm a / ios / sdk / WeexSDKTests / WXSDKEngineTests . m <nl> ppp b / ios / sdk / WeexSDKTests / WXSDKEngineTests . m <nl> - ( void ) testExample { <nl> / / Use XCTAssert and related functions to verify your tests produce the correct results . <nl> } <nl> <nl> - - ( void ) testPerformanceExample { <nl> - / / This is an example of a performance test case . <nl> - [ self measureBlock : ^ { <nl> - / / Put the code you want to measure the time of here . <nl> - } ] ; <nl> - } <nl> - <nl> - ( void ) testRegisterModule { <nl> <nl> [ WXSDKEngine registerModule : @ " stream " withClass : NSClassFromString ( @ " WXStreamModule " ) ] ; <nl> mmm a / ios / sdk / WeexSDKTests / WXSDKManagerTests . m <nl> ppp b / ios / sdk / WeexSDKTests / WXSDKManagerTests . m <nl> - ( void ) testExample { <nl> / / Use XCTAssert and related functions to verify your tests produce the correct results . <nl> } <nl> <nl> - <nl> - ( void ) testWXSDKManager { <nl> id bridgeMgr = [ WXSDKManager bridgeMgr ] ; <nl> XCTAssertNotNil ( bridgeMgr ) ; <nl> mmm a / ios / sdk / WeexSDKTests / WXStorageTests . m <nl> ppp b / ios / sdk / WeexSDKTests / WXStorageTests . m <nl> - ( void ) testGetItemold { <nl> } ] ; <nl> } <nl> <nl> - - ( void ) testPerformanceExample { <nl> - / / This is an example of a performance test case . <nl> - [ self measureBlock : ^ { <nl> - / / Put the code you want to measure the time of here . <nl> - } ] ; <nl> - } <nl> - <nl> @ end <nl> mmm a / ios / sdk / WeexSDKTests / WXStreamModuleTests . m <nl> ppp b / ios / sdk / WeexSDKTests / WXStreamModuleTests . m <nl> - ( void ) sendHttp : ( NSDictionary * ) options { <nl> callbackResult = result ; <nl> [ _exp fulfill ] ; <nl> } ] ; <nl> - [ self waitForExpectationsWithTimeout : 10 handler : ^ ( NSError * error ) { <nl> + [ self waitForExpectationsWithTimeout : 20 handler : ^ ( NSError * error ) { <nl> XCTAssertNotNil ( callbackResult ) ; <nl> } ] ; <nl> <nl> mmm a / ios / sdk / WeexSDKTests / WXTimerModuleTests . m <nl> ppp b / ios / sdk / WeexSDKTests / WXTimerModuleTests . m <nl> - ( void ) testExample { <nl> / / Use XCTAssert and related functions to verify your tests produce the correct results . <nl> } <nl> <nl> - - ( void ) testPerformanceExample { <nl> - / / This is an example of a performance test case . <nl> - [ self measureBlock : ^ { <nl> - / / Put the code you want to measure the time of here . <nl> - } ] ; <nl> - } <nl> - <nl> - ( void ) testSetTimeout { <nl> <nl> self . exp = [ self expectationWithDescription : @ " Set Timeout Unit Test Error ! " ] ; <nl> mmm a / ios / sdk / WeexSDKTests / WeexSDKTests . m <nl> ppp b / ios / sdk / WeexSDKTests / WeexSDKTests . m <nl> - ( void ) testExample { <nl> / / Use XCTAssert and related functions to verify your tests produce the correct results . <nl> } <nl> <nl> - - ( void ) testPerformanceExample { <nl> - / / This is an example of a performance test case . <nl> - [ self measureBlock : ^ { <nl> - / / Put the code you want to measure the time of here . <nl> - } ] ; <nl> - } <nl> <nl> @ end <nl>
Merge commit ' 4abcd6cf0070c6301ed64d6bbcdef727365f347e ' into ios - feature - 20170118
apache/incubator-weex
44f3f0a0189b3d3facc2d0b44fe51ea851d2bbfe
2016-12-29T06:26:46Z
mmm a / include / swift / AST / Attr . def <nl> ppp b / include / swift / AST / Attr . def <nl> DECL_ATTR ( differentiable , Differentiable , <nl> ABIStableToAdd | ABIBreakingToRemove | APIStableToAdd | APIBreakingToRemove , <nl> 91 ) <nl> <nl> + SIMPLE_DECL_ATTR ( _hasMissingDesignatedInitializers , <nl> + HasMissingDesignatedInitializers , OnClass | UserInaccessible | NotSerialized | <nl> + APIBreakingToAdd | ABIBreakingToAdd | APIStableToRemove | ABIStableToRemove , <nl> + 92 ) <nl> + <nl> + SIMPLE_DECL_ATTR ( _inheritsConvenienceInitializers , <nl> + InheritsConvenienceInitializers , OnClass | UserInaccessible | NotSerialized | <nl> + APIStableToAdd | ABIStableToAdd | APIBreakingToRemove | ABIBreakingToRemove , <nl> + 93 ) <nl> + <nl> SIMPLE_DECL_ATTR ( IBSegueAction , IBSegueAction , <nl> OnFunc | <nl> ABIStableToAdd | ABIStableToRemove | APIStableToAdd | APIStableToRemove , <nl> mmm a / lib / Sema / TypeCheckAttr . cpp <nl> ppp b / lib / Sema / TypeCheckAttr . cpp <nl> class AttributeChecker : public AttributeVisitor < AttributeChecker > { <nl> IGNORED_ATTR ( Exported ) <nl> IGNORED_ATTR ( ForbidSerializingReference ) <nl> IGNORED_ATTR ( HasStorage ) <nl> + IGNORED_ATTR ( HasMissingDesignatedInitializers ) <nl> + IGNORED_ATTR ( InheritsConvenienceInitializers ) <nl> IGNORED_ATTR ( Inline ) <nl> IGNORED_ATTR ( ObjCBridged ) <nl> IGNORED_ATTR ( ObjCNonLazyRealization ) <nl> mmm a / lib / Sema / TypeCheckDeclOverride . cpp <nl> ppp b / lib / Sema / TypeCheckDeclOverride . cpp <nl> namespace { <nl> UNINTERESTING_ATTR ( Exported ) <nl> UNINTERESTING_ATTR ( ForbidSerializingReference ) <nl> UNINTERESTING_ATTR ( GKInspectable ) <nl> + UNINTERESTING_ATTR ( HasMissingDesignatedInitializers ) <nl> UNINTERESTING_ATTR ( IBAction ) <nl> UNINTERESTING_ATTR ( IBDesignable ) <nl> UNINTERESTING_ATTR ( IBInspectable ) <nl> UNINTERESTING_ATTR ( IBOutlet ) <nl> UNINTERESTING_ATTR ( IBSegueAction ) <nl> UNINTERESTING_ATTR ( Indirect ) <nl> + UNINTERESTING_ATTR ( InheritsConvenienceInitializers ) <nl> UNINTERESTING_ATTR ( Inline ) <nl> UNINTERESTING_ATTR ( Optimize ) <nl> UNINTERESTING_ATTR ( Inlinable ) <nl> new file mode 100644 <nl> index 000000000000 . . 12b8171ccd3d <nl> mmm / dev / null <nl> ppp b / test / attr / attr_hasMissingDesignatedInits . swift <nl> <nl> + / / RUN : % target - swift - frontend - typecheck % s - verify <nl> + <nl> + / / This test just makes sure we don ' t error if we see either of these attributes . <nl> + <nl> + @ _hasMissingDesignatedInitializers / / no - error <nl> + class MyClass { } <nl> + <nl> + @ _inheritsConvenienceInitializers / / no - error <nl> + class MyOtherClass { } <nl>
Merge pull request from harlanhaskins / an - inconenient - init - parsing
apple/swift
6f349036a9f97ecd5fc3b19e1748d762d162584a
2019-11-22T01:28:09Z
mmm a / xbmc / addons / AddonCallbacksGUI . cpp <nl> ppp b / xbmc / addons / AddonCallbacksGUI . cpp <nl> void CGUIAddonWindowDialog : : Show_Internal ( bool show / * = true * / ) <nl> } <nl> <nl> CGUIAddonRenderingControl : : CGUIAddonRenderingControl ( CGUIRenderingControl * pControl ) <nl> - { <nl> - m_pControl = pControl ; <nl> - m_refCount = 1 ; <nl> - } <nl> + : m_pControl { pControl } , <nl> + m_clientHandle { nullptr } , <nl> + m_refCount { 1 } , <nl> + CBCreate { nullptr } , <nl> + CBDirty { nullptr } , <nl> + CBRender { nullptr } , <nl> + CBStop { nullptr } <nl> + { } <nl> <nl> bool CGUIAddonRenderingControl : : Create ( int x , int y , int w , int h , void * device ) <nl> { <nl> mmm a / xbmc / addons / AddonDatabase . h <nl> ppp b / xbmc / addons / AddonDatabase . h <nl> class CAddonDatabase : public CDatabase <nl> bool GetAddon ( int id , ADDON : : AddonPtr & addon ) ; <nl> <nl> / * keep in sync with the select in GetAddon * / <nl> - enum _AddonFields <nl> + enum AddonFields <nl> { <nl> addon_id = 0 , <nl> addon_type , <nl> class CAddonDatabase : public CDatabase <nl> dependencies_addon , <nl> dependencies_version , <nl> dependencies_optional <nl> - } AddonFields ; <nl> + } ; <nl> } ; <nl> <nl> mmm a / xbmc / addons / AddonManager . cpp <nl> ppp b / xbmc / addons / AddonManager . cpp <nl> bool CAddonMgr : : CheckUserDirs ( const cp_cfg_element_t * settings ) <nl> } <nl> <nl> CAddonMgr : : CAddonMgr ( ) <nl> - { <nl> - m_cpluff = NULL ; <nl> - } <nl> + : m_cpluff ( nullptr ) , <nl> + m_cp_context ( nullptr ) <nl> + { } <nl> <nl> CAddonMgr : : ~ CAddonMgr ( ) <nl> { <nl> mmm a / xbmc / addons / GUIDialogAddonInfo . cpp <nl> ppp b / xbmc / addons / GUIDialogAddonInfo . cpp <nl> using namespace ADDON ; <nl> using namespace XFILE ; <nl> <nl> CGUIDialogAddonInfo : : CGUIDialogAddonInfo ( void ) <nl> - : CGUIDialog ( WINDOW_DIALOG_ADDON_INFO , " DialogAddonInfo . xml " ) , m_jobid ( 0 ) <nl> + : CGUIDialog ( WINDOW_DIALOG_ADDON_INFO , " DialogAddonInfo . xml " ) , <nl> + m_jobid ( 0 ) , <nl> + m_changelog ( false ) <nl> { <nl> m_item = CFileItemPtr ( new CFileItem ) ; <nl> m_loadType = KEEP_IN_MEMORY ; <nl> mmm a / xbmc / addons / Scraper . h <nl> ppp b / xbmc / addons / Scraper . h <nl> class CScraperError <nl> class CScraper : public CAddon <nl> { <nl> public : <nl> - CScraper ( const AddonProps & props ) : CAddon ( props ) , m_fLoaded ( false ) { } <nl> + CScraper ( const AddonProps & props ) : <nl> + CAddon ( props ) , m_fLoaded ( false ) , m_requiressettings ( false ) , <nl> + m_pathContent ( CONTENT_NONE ) { } <nl> + <nl> CScraper ( const cp_extension_t * ext ) ; <nl> virtual ~ CScraper ( ) { } <nl> virtual AddonPtr Clone ( ) const ; <nl>
fixes 719099 , 719101 , 719105 , 1021008 , 1194442
xbmc/xbmc
cd355db0d23caf0976819249b11c24ac1f717f64
2015-03-07T13:52:10Z
mmm a / grpc . def <nl> ppp b / grpc . def <nl> EXPORTS <nl> grpc_resource_quota_unref <nl> grpc_resource_quota_resize <nl> grpc_resource_quota_arg_vtable <nl> + grpc_channelz_get_top_channels <nl> + grpc_channelz_get_channel <nl> grpc_insecure_channel_create_from_fd <nl> grpc_server_add_insecure_channel_from_fd <nl> grpc_use_signal <nl> mmm a / include / grpc / grpc . h <nl> ppp b / include / grpc / grpc . h <nl> GRPCAPI void grpc_resource_quota_resize ( grpc_resource_quota * resource_quota , <nl> * / <nl> GRPCAPI const grpc_arg_pointer_vtable * grpc_resource_quota_arg_vtable ( void ) ; <nl> <nl> + / * * * * * * * * * * * * * CHANNELZ API * * * * * * * * * * * * * / <nl> + / * * Channelz is under active development . The following APIs will see some <nl> + churn as the feature is implemented . This comment will be removed once <nl> + channelz is officially supported , and these APIs become stable . For now <nl> + you may track the progress by following this github issue : <nl> + https : / / github . com / grpc / grpc / issues / 15340 <nl> + <nl> + the following APIs return allocated JSON strings that match the response <nl> + objects from the channelz proto , found here : <nl> + https : / / github . com / grpc / grpc / blob / master / src / proto / grpc / channelz / channelz . proto . <nl> + <nl> + For easy conversion to protobuf , The JSON is formatted according to : <nl> + https : / / developers . google . com / protocol - buffers / docs / proto3 # json . * / <nl> + <nl> + / * Gets all root channels ( i . e . channels the application has directly <nl> + created ) . This does not include subchannels nor non - top level channels . <nl> + The returned string is allocated and must be freed by the application . * / <nl> + GRPCAPI char * grpc_channelz_get_top_channels ( intptr_t start_channel_id ) ; <nl> + <nl> + / * Returns a single Channel , or else a NOT_FOUND code . The returned string <nl> + is allocated and must be freed by the application . * / <nl> + GRPCAPI char * grpc_channelz_get_channel ( intptr_t channel_id ) ; <nl> + <nl> # ifdef __cplusplus <nl> } <nl> # endif <nl> mmm a / src / core / lib / channel / channelz_registry . cc <nl> ppp b / src / core / lib / channel / channelz_registry . cc <nl> char * ChannelzRegistry : : InternalGetTopChannels ( intptr_t start_channel_id ) { <nl> <nl> } / / namespace channelz <nl> } / / namespace grpc_core <nl> + <nl> + char * grpc_channelz_get_top_channels ( intptr_t start_channel_id ) { <nl> + return grpc_core : : channelz : : ChannelzRegistry : : GetTopChannels ( <nl> + start_channel_id ) ; <nl> + } <nl> + <nl> + char * grpc_channelz_get_channel ( intptr_t channel_id ) { <nl> + grpc_core : : channelz : : ChannelNode * channel_node = <nl> + grpc_core : : channelz : : ChannelzRegistry : : GetChannelNode ( channel_id ) ; <nl> + if ( channel_node = = nullptr ) { <nl> + return nullptr ; <nl> + } <nl> + grpc_json * top_level_json = grpc_json_create ( GRPC_JSON_OBJECT ) ; <nl> + grpc_json * json = top_level_json ; <nl> + grpc_json * channel_json = channel_node - > RenderJson ( ) ; <nl> + channel_json - > key = " channel " ; <nl> + grpc_json_link_child ( json , channel_json , nullptr ) ; <nl> + char * json_str = grpc_json_dump_to_string ( top_level_json , 0 ) ; <nl> + grpc_json_destroy ( top_level_json ) ; <nl> + return json_str ; <nl> + } <nl> mmm a / src / ruby / ext / grpc / rb_grpc_imports . generated . c <nl> ppp b / src / ruby / ext / grpc / rb_grpc_imports . generated . c <nl> grpc_resource_quota_ref_type grpc_resource_quota_ref_import ; <nl> grpc_resource_quota_unref_type grpc_resource_quota_unref_import ; <nl> grpc_resource_quota_resize_type grpc_resource_quota_resize_import ; <nl> grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import ; <nl> + grpc_channelz_get_top_channels_type grpc_channelz_get_top_channels_import ; <nl> + grpc_channelz_get_channel_type grpc_channelz_get_channel_import ; <nl> grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import ; <nl> grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import ; <nl> grpc_use_signal_type grpc_use_signal_import ; <nl> void grpc_rb_load_imports ( HMODULE library ) { <nl> grpc_resource_quota_unref_import = ( grpc_resource_quota_unref_type ) GetProcAddress ( library , " grpc_resource_quota_unref " ) ; <nl> grpc_resource_quota_resize_import = ( grpc_resource_quota_resize_type ) GetProcAddress ( library , " grpc_resource_quota_resize " ) ; <nl> grpc_resource_quota_arg_vtable_import = ( grpc_resource_quota_arg_vtable_type ) GetProcAddress ( library , " grpc_resource_quota_arg_vtable " ) ; <nl> + grpc_channelz_get_top_channels_import = ( grpc_channelz_get_top_channels_type ) GetProcAddress ( library , " grpc_channelz_get_top_channels " ) ; <nl> + grpc_channelz_get_channel_import = ( grpc_channelz_get_channel_type ) GetProcAddress ( library , " grpc_channelz_get_channel " ) ; <nl> grpc_insecure_channel_create_from_fd_import = ( grpc_insecure_channel_create_from_fd_type ) GetProcAddress ( library , " grpc_insecure_channel_create_from_fd " ) ; <nl> grpc_server_add_insecure_channel_from_fd_import = ( grpc_server_add_insecure_channel_from_fd_type ) GetProcAddress ( library , " grpc_server_add_insecure_channel_from_fd " ) ; <nl> grpc_use_signal_import = ( grpc_use_signal_type ) GetProcAddress ( library , " grpc_use_signal " ) ; <nl> mmm a / src / ruby / ext / grpc / rb_grpc_imports . generated . h <nl> ppp b / src / ruby / ext / grpc / rb_grpc_imports . generated . h <nl> extern grpc_resource_quota_resize_type grpc_resource_quota_resize_import ; <nl> typedef const grpc_arg_pointer_vtable * ( * grpc_resource_quota_arg_vtable_type ) ( void ) ; <nl> extern grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import ; <nl> # define grpc_resource_quota_arg_vtable grpc_resource_quota_arg_vtable_import <nl> + typedef char * ( * grpc_channelz_get_top_channels_type ) ( intptr_t start_channel_id ) ; <nl> + extern grpc_channelz_get_top_channels_type grpc_channelz_get_top_channels_import ; <nl> + # define grpc_channelz_get_top_channels grpc_channelz_get_top_channels_import <nl> + typedef char * ( * grpc_channelz_get_channel_type ) ( intptr_t channel_id ) ; <nl> + extern grpc_channelz_get_channel_type grpc_channelz_get_channel_import ; <nl> + # define grpc_channelz_get_channel grpc_channelz_get_channel_import <nl> typedef grpc_channel * ( * grpc_insecure_channel_create_from_fd_type ) ( const char * target , int fd , const grpc_channel_args * args ) ; <nl> extern grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import ; <nl> # define grpc_insecure_channel_create_from_fd grpc_insecure_channel_create_from_fd_import <nl> mmm a / test / core / channel / channelz_test . cc <nl> ppp b / test / core / channel / channelz_test . cc <nl> void ValidateGetTopChannels ( size_t expected_channels ) { <nl> EXPECT_EQ ( end - > type , GRPC_JSON_TRUE ) ; <nl> grpc_json_destroy ( parsed_json ) ; <nl> gpr_free ( json_str ) ; <nl> + / / also check that the core API formats this correctly <nl> + char * core_api_json_str = grpc_channelz_get_top_channels ( 0 ) ; <nl> + grpc : : testing : : ValidateGetTopChannelsResponseProtoJsonTranslation ( <nl> + core_api_json_str ) ; <nl> + gpr_free ( core_api_json_str ) ; <nl> } <nl> <nl> class ChannelFixture { <nl> void ValidateChannel ( ChannelNode * channel , validate_channel_data_args args ) { <nl> grpc : : testing : : ValidateChannelProtoJsonTranslation ( json_str ) ; <nl> ValidateCounters ( json_str , args ) ; <nl> gpr_free ( json_str ) ; <nl> + / / also check that the core API formats this the correct way <nl> + char * core_api_json_str = grpc_channelz_get_channel ( channel - > channel_uuid ( ) ) ; <nl> + grpc : : testing : : ValidateGetChannelResponseProtoJsonTranslation ( <nl> + core_api_json_str ) ; <nl> + gpr_free ( core_api_json_str ) ; <nl> } <nl> <nl> grpc_millis GetLastCallStartedMillis ( ChannelNode * channel ) { <nl> mmm a / test / core / surface / public_headers_must_be_c89 . c <nl> ppp b / test / core / surface / public_headers_must_be_c89 . c <nl> int main ( int argc , char * * argv ) { <nl> printf ( " % lx " , ( unsigned long ) grpc_resource_quota_unref ) ; <nl> printf ( " % lx " , ( unsigned long ) grpc_resource_quota_resize ) ; <nl> printf ( " % lx " , ( unsigned long ) grpc_resource_quota_arg_vtable ) ; <nl> + printf ( " % lx " , ( unsigned long ) grpc_channelz_get_top_channels ) ; <nl> + printf ( " % lx " , ( unsigned long ) grpc_channelz_get_channel ) ; <nl> printf ( " % lx " , ( unsigned long ) grpc_auth_property_iterator_next ) ; <nl> printf ( " % lx " , ( unsigned long ) grpc_auth_context_property_iterator ) ; <nl> printf ( " % lx " , ( unsigned long ) grpc_auth_context_peer_identity ) ; <nl> mmm a / test / cpp / util / channel_trace_proto_helper . cc <nl> ppp b / test / cpp / util / channel_trace_proto_helper . cc <nl> void ValidateGetTopChannelsResponseProtoJsonTranslation ( char * json_c_str ) { <nl> json_c_str ) ; <nl> } <nl> <nl> + void ValidateGetChannelResponseProtoJsonTranslation ( char * json_c_str ) { <nl> + VaidateProtoJsonTranslation < grpc : : channelz : : v1 : : GetChannelResponse > ( <nl> + json_c_str ) ; <nl> + } <nl> + <nl> } / / namespace testing <nl> } / / namespace grpc <nl> mmm a / test / cpp / util / channel_trace_proto_helper . h <nl> ppp b / test / cpp / util / channel_trace_proto_helper . h <nl> namespace testing { <nl> void ValidateChannelTraceProtoJsonTranslation ( char * json_c_str ) ; <nl> void ValidateChannelProtoJsonTranslation ( char * json_c_str ) ; <nl> void ValidateGetTopChannelsResponseProtoJsonTranslation ( char * json_c_str ) ; <nl> + void ValidateGetChannelResponseProtoJsonTranslation ( char * json_c_str ) ; <nl> <nl> } / / namespace testing <nl> } / / namespace grpc <nl>
Merge pull request from ncteisen / channelz - expose - get - channel
grpc/grpc
481c1d57e7fc92f74c79b9aa0f7c8f0eaa51000b
2018-07-18T23:21:51Z
mmm a / src / arm / full - codegen - arm . cc <nl> ppp b / src / arm / full - codegen - arm . cc <nl> void FullCodeGenerator : : VisitForInStatement ( ForInStatement * stmt ) { <nl> __ mov ( result_register ( ) , r3 ) ; <nl> / / Perform the assignment as if via ' = ' . <nl> { EffectContext context ( this ) ; <nl> - EmitAssignment ( stmt - > each ( ) ) ; <nl> + EmitAssignment ( stmt - > each ( ) , stmt - > EachFeedbackSlot ( ) ) ; <nl> PrepareForBailoutForId ( stmt - > AssignmentId ( ) , NO_REGISTERS ) ; <nl> } <nl> <nl> void FullCodeGenerator : : VisitVariableProxy ( VariableProxy * expr ) { <nl> <nl> <nl> void FullCodeGenerator : : EmitSetHomeObjectIfNeeded ( Expression * initializer , <nl> - int offset ) { <nl> + int offset , <nl> + FeedbackVectorICSlot slot ) { <nl> if ( NeedsHomeObject ( initializer ) ) { <nl> __ ldr ( StoreDescriptor : : ReceiverRegister ( ) , MemOperand ( sp ) ) ; <nl> __ mov ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( isolate ( ) - > factory ( ) - > home_object_symbol ( ) ) ) ; <nl> __ ldr ( StoreDescriptor : : ValueRegister ( ) , <nl> MemOperand ( sp , offset * kPointerSize ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> } <nl> } <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> <nl> AccessorTable accessor_table ( zone ( ) ) ; <nl> int property_index = 0 ; <nl> + / / store_slot_index points to the vector ic slot for the next store ic used . <nl> + / / ObjectLiteral : : ComputeFeedbackRequirements controls the allocation of slots <nl> + / / and must be updated if the number of store ics emitted here changes . <nl> + int store_slot_index = 0 ; <nl> for ( ; property_index < expr - > properties ( ) - > length ( ) ; property_index + + ) { <nl> ObjectLiteral : : Property * property = expr - > properties ( ) - > at ( property_index ) ; <nl> if ( property - > is_computed_name ( ) ) break ; <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> DCHECK ( StoreDescriptor : : ValueRegister ( ) . is ( r0 ) ) ; <nl> __ mov ( StoreDescriptor : : NameRegister ( ) , Operand ( key - > value ( ) ) ) ; <nl> __ ldr ( StoreDescriptor : : ReceiverRegister ( ) , MemOperand ( sp ) ) ; <nl> - CallStoreIC ( key - > LiteralFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > GetNthSlot ( store_slot_index + + ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( key - > LiteralFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( key - > id ( ) , NO_REGISTERS ) ; <nl> <nl> if ( NeedsHomeObject ( value ) ) { <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> __ mov ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( isolate ( ) - > factory ( ) - > home_object_symbol ( ) ) ) ; <nl> __ ldr ( StoreDescriptor : : ValueRegister ( ) , MemOperand ( sp ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > GetNthSlot ( store_slot_index + + ) ) ; <nl> + } <nl> CallStoreIC ( ) ; <nl> } <nl> } else { <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> VisitForStackValue ( key ) ; <nl> VisitForStackValue ( value ) ; <nl> if ( property - > emit_store ( ) ) { <nl> - EmitSetHomeObjectIfNeeded ( value , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + value , 2 , expr - > SlotForHomeObject ( value , & store_slot_index ) ) ; <nl> __ mov ( r0 , Operand ( Smi : : FromInt ( SLOPPY ) ) ) ; / / PropertyAttributes <nl> __ push ( r0 ) ; <nl> __ CallRuntime ( Runtime : : kSetProperty , 4 ) ; <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> __ push ( r0 ) ; <nl> VisitForStackValue ( it - > first ) ; <nl> EmitAccessor ( it - > second - > getter ) ; <nl> - EmitSetHomeObjectIfNeeded ( it - > second - > getter , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + it - > second - > getter , 2 , <nl> + expr - > SlotForHomeObject ( it - > second - > getter , & store_slot_index ) ) ; <nl> EmitAccessor ( it - > second - > setter ) ; <nl> - EmitSetHomeObjectIfNeeded ( it - > second - > setter , 3 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + it - > second - > setter , 3 , <nl> + expr - > SlotForHomeObject ( it - > second - > setter , & store_slot_index ) ) ; <nl> __ mov ( r0 , Operand ( Smi : : FromInt ( NONE ) ) ) ; <nl> __ push ( r0 ) ; <nl> __ CallRuntime ( Runtime : : kDefineAccessorPropertyUnchecked , 5 ) ; <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> } else { <nl> EmitPropertyKey ( property , expr - > GetIdForProperty ( property_index ) ) ; <nl> VisitForStackValue ( value ) ; <nl> - EmitSetHomeObjectIfNeeded ( value , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + value , 2 , expr - > SlotForHomeObject ( value , & store_slot_index ) ) ; <nl> <nl> switch ( property - > kind ( ) ) { <nl> case ObjectLiteral : : Property : : CONSTANT : <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> } else { <nl> context ( ) - > Plug ( r0 ) ; <nl> } <nl> + <nl> + / / Verify that compilation exactly consumed the number of store ic slots that <nl> + / / the ObjectLiteral node had to offer . <nl> + DCHECK ( ! FLAG_vector_stores | | store_slot_index = = expr - > slot_count ( ) ) ; <nl> } <nl> <nl> <nl> void FullCodeGenerator : : VisitAssignment ( Assignment * expr ) { <nl> Comment cmnt ( masm_ , " [ Assignment " ) ; <nl> <nl> Property * property = expr - > target ( ) - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( property ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( property ) ; <nl> <nl> / / Evaluate LHS expression . <nl> switch ( assign_type ) { <nl> void FullCodeGenerator : : VisitAssignment ( Assignment * expr ) { <nl> switch ( assign_type ) { <nl> case VARIABLE : <nl> EmitVariableAssignment ( expr - > target ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - expr - > op ( ) ) ; <nl> + expr - > op ( ) , expr - > AssignmentSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( r0 ) ; <nl> break ; <nl> void FullCodeGenerator : : EmitBinaryOp ( BinaryOperation * expr , Token : : Value op ) { <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> + void FullCodeGenerator : : EmitAssignment ( Expression * expr , <nl> + FeedbackVectorICSlot slot ) { <nl> DCHECK ( expr - > IsValidReferenceExpression ( ) ) ; <nl> <nl> Property * prop = expr - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( prop ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( prop ) ; <nl> <nl> switch ( assign_type ) { <nl> case VARIABLE : { <nl> Variable * var = expr - > AsVariableProxy ( ) - > var ( ) ; <nl> EffectContext context ( this ) ; <nl> - EmitVariableAssignment ( var , Token : : ASSIGN ) ; <nl> + EmitVariableAssignment ( var , Token : : ASSIGN , slot ) ; <nl> break ; <nl> } <nl> case NAMED_PROPERTY : { <nl> void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> __ pop ( StoreDescriptor : : ValueRegister ( ) ) ; / / Restore value . <nl> __ mov ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> break ; <nl> } <nl> void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> __ Move ( StoreDescriptor : : NameRegister ( ) , r0 ) ; <nl> __ Pop ( StoreDescriptor : : ValueRegister ( ) , <nl> StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> CallIC ( ic ) ; <nl> void FullCodeGenerator : : EmitStoreToStackLocalOrContextSlot ( <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitVariableAssignment ( Variable * var , Token : : Value op ) { <nl> + void FullCodeGenerator : : EmitVariableAssignment ( Variable * var , Token : : Value op , <nl> + FeedbackVectorICSlot slot ) { <nl> if ( var - > IsUnallocated ( ) ) { <nl> / / Global var , const , or let . <nl> __ mov ( StoreDescriptor : : NameRegister ( ) , Operand ( var - > name ( ) ) ) ; <nl> __ ldr ( StoreDescriptor : : ReceiverRegister ( ) , GlobalObjectOperand ( ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> <nl> } else if ( var - > mode ( ) = = LET & & op ! = Token : : INIT_LET ) { <nl> void FullCodeGenerator : : EmitNamedPropertyAssignment ( Assignment * expr ) { <nl> __ mov ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ) ; <nl> __ pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> - CallStoreIC ( expr - > AssignmentFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > AssignmentSlot ( ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( expr - > AssignmentFeedbackId ( ) ) ; <nl> + } <nl> <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( r0 ) ; <nl> void FullCodeGenerator : : EmitKeyedPropertyAssignment ( Assignment * expr ) { <nl> <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> - CallIC ( ic , expr - > AssignmentFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > AssignmentSlot ( ) ) ; <nl> + CallIC ( ic ) ; <nl> + } else { <nl> + CallIC ( ic , expr - > AssignmentFeedbackId ( ) ) ; <nl> + } <nl> <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( r0 ) ; <nl> void FullCodeGenerator : : EmitLoadSuperConstructor ( ) { <nl> <nl> <nl> void FullCodeGenerator : : EmitInitializeThisAfterSuper ( <nl> - SuperReference * super_ref ) { <nl> + SuperReference * super_ref , FeedbackVectorICSlot slot ) { <nl> Variable * this_var = super_ref - > this_var ( ) - > var ( ) ; <nl> GetVar ( r1 , this_var ) ; <nl> __ CompareRoot ( r1 , Heap : : kTheHoleValueRootIndex ) ; <nl> void FullCodeGenerator : : EmitInitializeThisAfterSuper ( <nl> __ CallRuntime ( Runtime : : kThrowReferenceError , 1 ) ; <nl> __ bind ( & uninitialized_this ) ; <nl> <nl> - EmitVariableAssignment ( this_var , Token : : INIT_CONST ) ; <nl> + EmitVariableAssignment ( this_var , Token : : INIT_CONST , slot ) ; <nl> } <nl> <nl> <nl> void FullCodeGenerator : : EmitSuperConstructorCall ( Call * expr ) { <nl> <nl> RecordJSReturnSite ( expr ) ; <nl> <nl> - EmitInitializeThisAfterSuper ( expr - > expression ( ) - > AsSuperReference ( ) ) ; <nl> + EmitInitializeThisAfterSuper ( expr - > expression ( ) - > AsSuperReference ( ) , <nl> + expr - > CallFeedbackICSlot ( ) ) ; <nl> context ( ) - > Plug ( r0 ) ; <nl> } <nl> <nl> void FullCodeGenerator : : EmitCallSuperWithSpread ( CallRuntime * expr ) { <nl> __ ldr ( cp , MemOperand ( fp , StandardFrameConstants : : kContextOffset ) ) ; <nl> context ( ) - > DropAndPlug ( 1 , r0 ) ; <nl> <nl> + / / TODO ( mvstanton ) : with FLAG_vector_stores this needs a slot id . <nl> EmitInitializeThisAfterSuper ( super_reference ) ; <nl> } <nl> <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> SetSourcePosition ( expr - > position ( ) ) ; <nl> <nl> Property * prop = expr - > expression ( ) - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( prop ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( prop ) ; <nl> <nl> / / Evaluate expression and get value . <nl> if ( assign_type = = VARIABLE ) { <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> if ( expr - > is_postfix ( ) ) { <nl> { EffectContext context ( this ) ; <nl> EmitVariableAssignment ( expr - > expression ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - Token : : ASSIGN ) ; <nl> + Token : : ASSIGN , expr - > CountSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context . Plug ( r0 ) ; <nl> } <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> } <nl> } else { <nl> EmitVariableAssignment ( expr - > expression ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - Token : : ASSIGN ) ; <nl> + Token : : ASSIGN , expr - > CountSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( r0 ) ; <nl> } <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> __ mov ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ) ; <nl> __ pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> - CallStoreIC ( expr - > CountStoreFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > CountSlot ( ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( expr - > CountStoreFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> if ( expr - > is_postfix ( ) ) { <nl> if ( ! context ( ) - > IsEffect ( ) ) { <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> StoreDescriptor : : NameRegister ( ) ) ; <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> - CallIC ( ic , expr - > CountStoreFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > CountSlot ( ) ) ; <nl> + CallIC ( ic ) ; <nl> + } else { <nl> + CallIC ( ic , expr - > CountStoreFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> if ( expr - > is_postfix ( ) ) { <nl> if ( ! context ( ) - > IsEffect ( ) ) { <nl> void FullCodeGenerator : : ClearPendingMessage ( ) { <nl> } <nl> <nl> <nl> + void FullCodeGenerator : : EmitLoadStoreICSlot ( FeedbackVectorICSlot slot ) { <nl> + DCHECK ( FLAG_vector_stores & & ! slot . IsInvalid ( ) ) ; <nl> + __ mov ( VectorStoreICTrampolineDescriptor : : SlotRegister ( ) , <nl> + Operand ( SmiFromSlot ( slot ) ) ) ; <nl> + } <nl> + <nl> + <nl> # undef __ <nl> <nl> <nl> mmm a / src / arm64 / full - codegen - arm64 . cc <nl> ppp b / src / arm64 / full - codegen - arm64 . cc <nl> void FullCodeGenerator : : VisitForInStatement ( ForInStatement * stmt ) { <nl> __ Mov ( result_register ( ) , x3 ) ; <nl> / / Perform the assignment as if via ' = ' . <nl> { EffectContext context ( this ) ; <nl> - EmitAssignment ( stmt - > each ( ) ) ; <nl> + EmitAssignment ( stmt - > each ( ) , stmt - > EachFeedbackSlot ( ) ) ; <nl> PrepareForBailoutForId ( stmt - > AssignmentId ( ) , NO_REGISTERS ) ; <nl> } <nl> <nl> void FullCodeGenerator : : VisitVariableProxy ( VariableProxy * expr ) { <nl> <nl> <nl> void FullCodeGenerator : : EmitSetHomeObjectIfNeeded ( Expression * initializer , <nl> - int offset ) { <nl> + int offset , <nl> + FeedbackVectorICSlot slot ) { <nl> if ( NeedsHomeObject ( initializer ) ) { <nl> __ Peek ( StoreDescriptor : : ReceiverRegister ( ) , 0 ) ; <nl> __ Mov ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( isolate ( ) - > factory ( ) - > home_object_symbol ( ) ) ) ; <nl> __ Peek ( StoreDescriptor : : ValueRegister ( ) , offset * kPointerSize ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> } <nl> } <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> <nl> AccessorTable accessor_table ( zone ( ) ) ; <nl> int property_index = 0 ; <nl> + / / store_slot_index points to the vector ic slot for the next store ic used . <nl> + / / ObjectLiteral : : ComputeFeedbackRequirements controls the allocation of slots <nl> + / / and must be updated if the number of store ics emitted here changes . <nl> + int store_slot_index = 0 ; <nl> for ( ; property_index < expr - > properties ( ) - > length ( ) ; property_index + + ) { <nl> ObjectLiteral : : Property * property = expr - > properties ( ) - > at ( property_index ) ; <nl> if ( property - > is_computed_name ( ) ) break ; <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> DCHECK ( StoreDescriptor : : ValueRegister ( ) . is ( x0 ) ) ; <nl> __ Mov ( StoreDescriptor : : NameRegister ( ) , Operand ( key - > value ( ) ) ) ; <nl> __ Peek ( StoreDescriptor : : ReceiverRegister ( ) , 0 ) ; <nl> - CallStoreIC ( key - > LiteralFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > GetNthSlot ( store_slot_index + + ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( key - > LiteralFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( key - > id ( ) , NO_REGISTERS ) ; <nl> <nl> if ( NeedsHomeObject ( value ) ) { <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> __ Mov ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( isolate ( ) - > factory ( ) - > home_object_symbol ( ) ) ) ; <nl> __ Peek ( StoreDescriptor : : ValueRegister ( ) , 0 ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > GetNthSlot ( store_slot_index + + ) ) ; <nl> + } <nl> CallStoreIC ( ) ; <nl> } <nl> } else { <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> VisitForStackValue ( key ) ; <nl> VisitForStackValue ( value ) ; <nl> if ( property - > emit_store ( ) ) { <nl> - EmitSetHomeObjectIfNeeded ( value , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + value , 2 , expr - > SlotForHomeObject ( value , & store_slot_index ) ) ; <nl> __ Mov ( x0 , Smi : : FromInt ( SLOPPY ) ) ; / / Language mode <nl> __ Push ( x0 ) ; <nl> __ CallRuntime ( Runtime : : kSetProperty , 4 ) ; <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> __ Push ( x10 ) ; <nl> VisitForStackValue ( it - > first ) ; <nl> EmitAccessor ( it - > second - > getter ) ; <nl> - EmitSetHomeObjectIfNeeded ( it - > second - > getter , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + it - > second - > getter , 2 , <nl> + expr - > SlotForHomeObject ( it - > second - > getter , & store_slot_index ) ) ; <nl> EmitAccessor ( it - > second - > setter ) ; <nl> - EmitSetHomeObjectIfNeeded ( it - > second - > setter , 3 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + it - > second - > setter , 3 , <nl> + expr - > SlotForHomeObject ( it - > second - > setter , & store_slot_index ) ) ; <nl> __ Mov ( x10 , Smi : : FromInt ( NONE ) ) ; <nl> __ Push ( x10 ) ; <nl> __ CallRuntime ( Runtime : : kDefineAccessorPropertyUnchecked , 5 ) ; <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> } else { <nl> EmitPropertyKey ( property , expr - > GetIdForProperty ( property_index ) ) ; <nl> VisitForStackValue ( value ) ; <nl> - EmitSetHomeObjectIfNeeded ( value , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + value , 2 , expr - > SlotForHomeObject ( value , & store_slot_index ) ) ; <nl> <nl> switch ( property - > kind ( ) ) { <nl> case ObjectLiteral : : Property : : CONSTANT : <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> } else { <nl> context ( ) - > Plug ( x0 ) ; <nl> } <nl> + <nl> + / / Verify that compilation exactly consumed the number of store ic slots that <nl> + / / the ObjectLiteral node had to offer . <nl> + DCHECK ( ! FLAG_vector_stores | | store_slot_index = = expr - > slot_count ( ) ) ; <nl> } <nl> <nl> <nl> void FullCodeGenerator : : VisitAssignment ( Assignment * expr ) { <nl> Comment cmnt ( masm_ , " [ Assignment " ) ; <nl> <nl> Property * property = expr - > target ( ) - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( property ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( property ) ; <nl> <nl> / / Evaluate LHS expression . <nl> switch ( assign_type ) { <nl> void FullCodeGenerator : : VisitAssignment ( Assignment * expr ) { <nl> switch ( assign_type ) { <nl> case VARIABLE : <nl> EmitVariableAssignment ( expr - > target ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - expr - > op ( ) ) ; <nl> + expr - > op ( ) , expr - > AssignmentSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( x0 ) ; <nl> break ; <nl> void FullCodeGenerator : : EmitClassDefineProperties ( ClassLiteral * lit ) { <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> + void FullCodeGenerator : : EmitAssignment ( Expression * expr , <nl> + FeedbackVectorICSlot slot ) { <nl> DCHECK ( expr - > IsValidReferenceExpression ( ) ) ; <nl> <nl> Property * prop = expr - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( prop ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( prop ) ; <nl> <nl> switch ( assign_type ) { <nl> case VARIABLE : { <nl> Variable * var = expr - > AsVariableProxy ( ) - > var ( ) ; <nl> EffectContext context ( this ) ; <nl> - EmitVariableAssignment ( var , Token : : ASSIGN ) ; <nl> + EmitVariableAssignment ( var , Token : : ASSIGN , slot ) ; <nl> break ; <nl> } <nl> case NAMED_PROPERTY : { <nl> void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> __ Pop ( StoreDescriptor : : ValueRegister ( ) ) ; / / Restore value . <nl> __ Mov ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> break ; <nl> } <nl> void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> __ Mov ( StoreDescriptor : : NameRegister ( ) , x0 ) ; <nl> __ Pop ( StoreDescriptor : : ReceiverRegister ( ) , <nl> StoreDescriptor : : ValueRegister ( ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> CallIC ( ic ) ; <nl> void FullCodeGenerator : : EmitStoreToStackLocalOrContextSlot ( <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitVariableAssignment ( Variable * var , <nl> - Token : : Value op ) { <nl> + void FullCodeGenerator : : EmitVariableAssignment ( Variable * var , Token : : Value op , <nl> + FeedbackVectorICSlot slot ) { <nl> ASM_LOCATION ( " FullCodeGenerator : : EmitVariableAssignment " ) ; <nl> if ( var - > IsUnallocated ( ) ) { <nl> / / Global var , const , or let . <nl> __ Mov ( StoreDescriptor : : NameRegister ( ) , Operand ( var - > name ( ) ) ) ; <nl> __ Ldr ( StoreDescriptor : : ReceiverRegister ( ) , GlobalObjectMemOperand ( ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> <nl> } else if ( var - > mode ( ) = = LET & & op ! = Token : : INIT_LET ) { <nl> void FullCodeGenerator : : EmitNamedPropertyAssignment ( Assignment * expr ) { <nl> __ Mov ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ) ; <nl> __ Pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> - CallStoreIC ( expr - > AssignmentFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > AssignmentSlot ( ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( expr - > AssignmentFeedbackId ( ) ) ; <nl> + } <nl> <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( x0 ) ; <nl> void FullCodeGenerator : : EmitKeyedPropertyAssignment ( Assignment * expr ) { <nl> <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> - CallIC ( ic , expr - > AssignmentFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > AssignmentSlot ( ) ) ; <nl> + CallIC ( ic ) ; <nl> + } else { <nl> + CallIC ( ic , expr - > AssignmentFeedbackId ( ) ) ; <nl> + } <nl> <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( x0 ) ; <nl> void FullCodeGenerator : : EmitLoadSuperConstructor ( ) { <nl> <nl> <nl> void FullCodeGenerator : : EmitInitializeThisAfterSuper ( <nl> - SuperReference * super_ref ) { <nl> + SuperReference * super_ref , FeedbackVectorICSlot slot ) { <nl> Variable * this_var = super_ref - > this_var ( ) - > var ( ) ; <nl> GetVar ( x1 , this_var ) ; <nl> Label uninitialized_this ; <nl> void FullCodeGenerator : : EmitInitializeThisAfterSuper ( <nl> __ CallRuntime ( Runtime : : kThrowReferenceError , 1 ) ; <nl> __ bind ( & uninitialized_this ) ; <nl> <nl> - EmitVariableAssignment ( this_var , Token : : INIT_CONST ) ; <nl> + EmitVariableAssignment ( this_var , Token : : INIT_CONST , slot ) ; <nl> } <nl> <nl> <nl> void FullCodeGenerator : : EmitSuperConstructorCall ( Call * expr ) { <nl> <nl> RecordJSReturnSite ( expr ) ; <nl> <nl> - EmitInitializeThisAfterSuper ( expr - > expression ( ) - > AsSuperReference ( ) ) ; <nl> + EmitInitializeThisAfterSuper ( expr - > expression ( ) - > AsSuperReference ( ) , <nl> + expr - > CallFeedbackICSlot ( ) ) ; <nl> context ( ) - > Plug ( x0 ) ; <nl> } <nl> <nl> void FullCodeGenerator : : EmitCallSuperWithSpread ( CallRuntime * expr ) { <nl> __ Ldr ( cp , MemOperand ( fp , StandardFrameConstants : : kContextOffset ) ) ; <nl> context ( ) - > DropAndPlug ( 1 , x0 ) ; <nl> <nl> + / / TODO ( mvstanton ) : with FLAG_vector_stores this needs a slot id . <nl> EmitInitializeThisAfterSuper ( super_reference ) ; <nl> } <nl> <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> SetSourcePosition ( expr - > position ( ) ) ; <nl> <nl> Property * prop = expr - > expression ( ) - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( prop ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( prop ) ; <nl> <nl> / / Evaluate expression and get value . <nl> if ( assign_type = = VARIABLE ) { <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> if ( expr - > is_postfix ( ) ) { <nl> { EffectContext context ( this ) ; <nl> EmitVariableAssignment ( expr - > expression ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - Token : : ASSIGN ) ; <nl> + Token : : ASSIGN , expr - > CountSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context . Plug ( x0 ) ; <nl> } <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> } <nl> } else { <nl> EmitVariableAssignment ( expr - > expression ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - Token : : ASSIGN ) ; <nl> + Token : : ASSIGN , expr - > CountSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( x0 ) ; <nl> } <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> __ Mov ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ) ; <nl> __ Pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> - CallStoreIC ( expr - > CountStoreFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > CountSlot ( ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( expr - > CountStoreFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> if ( expr - > is_postfix ( ) ) { <nl> if ( ! context ( ) - > IsEffect ( ) ) { <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> __ Pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> - CallIC ( ic , expr - > CountStoreFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > CountSlot ( ) ) ; <nl> + CallIC ( ic ) ; <nl> + } else { <nl> + CallIC ( ic , expr - > CountStoreFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> if ( expr - > is_postfix ( ) ) { <nl> if ( ! context ( ) - > IsEffect ( ) ) { <nl> void FullCodeGenerator : : ClearPendingMessage ( ) { <nl> } <nl> <nl> <nl> + void FullCodeGenerator : : EmitLoadStoreICSlot ( FeedbackVectorICSlot slot ) { <nl> + DCHECK ( FLAG_vector_stores & & ! slot . IsInvalid ( ) ) ; <nl> + __ Mov ( VectorStoreICTrampolineDescriptor : : SlotRegister ( ) , SmiFromSlot ( slot ) ) ; <nl> + } <nl> + <nl> + <nl> # undef __ <nl> <nl> <nl> mmm a / src / ast - numbering . cc <nl> ppp b / src / ast - numbering . cc <nl> void AstNumberingVisitor : : VisitAssignment ( Assignment * node ) { <nl> if ( node - > is_compound ( ) ) VisitBinaryOperation ( node - > binary_operation ( ) ) ; <nl> Visit ( node - > target ( ) ) ; <nl> Visit ( node - > value ( ) ) ; <nl> + ReserveFeedbackSlots ( node ) ; <nl> } <nl> <nl> <nl> void AstNumberingVisitor : : VisitSpread ( Spread * node ) { <nl> void AstNumberingVisitor : : VisitForInStatement ( ForInStatement * node ) { <nl> IncrementNodeCount ( ) ; <nl> DisableSelfOptimization ( ) ; <nl> - ReserveFeedbackSlots ( node ) ; <nl> node - > set_base_id ( ReserveIdRange ( ForInStatement : : num_ids ( ) ) ) ; <nl> Visit ( node - > each ( ) ) ; <nl> Visit ( node - > enumerable ( ) ) ; <nl> Visit ( node - > body ( ) ) ; <nl> + ReserveFeedbackSlots ( node ) ; <nl> } <nl> <nl> <nl> void AstNumberingVisitor : : VisitForOfStatement ( ForOfStatement * node ) { <nl> Visit ( node - > result_done ( ) ) ; <nl> Visit ( node - > assign_each ( ) ) ; <nl> Visit ( node - > body ( ) ) ; <nl> + ReserveFeedbackSlots ( node ) ; <nl> } <nl> <nl> <nl> void AstNumberingVisitor : : VisitObjectLiteral ( ObjectLiteral * node ) { <nl> / / is shadowed by a later occurrence of the same key . For the <nl> / / marked expressions , no store code will be is emitted . <nl> node - > CalculateEmitStore ( zone ( ) ) ; <nl> + ReserveFeedbackSlots ( node ) ; <nl> } <nl> <nl> <nl> mmm a / src / ast . cc <nl> ppp b / src / ast . cc <nl> FeedbackVectorRequirements VariableProxy : : ComputeFeedbackRequirements ( <nl> } <nl> <nl> <nl> + static int GetStoreICSlots ( Expression * expr ) { <nl> + int ic_slots = 0 ; <nl> + if ( FLAG_vector_stores ) { <nl> + Property * property = expr - > AsProperty ( ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( property ) ; <nl> + if ( ( assign_type = = VARIABLE & & <nl> + expr - > AsVariableProxy ( ) - > var ( ) - > IsUnallocated ( ) ) | | <nl> + assign_type = = NAMED_PROPERTY | | assign_type = = KEYED_PROPERTY ) { <nl> + ic_slots + + ; <nl> + } <nl> + } <nl> + return ic_slots ; <nl> + } <nl> + <nl> + <nl> + static Code : : Kind GetStoreICKind ( Expression * expr ) { <nl> + LhsKind assign_type = Property : : GetAssignType ( expr - > AsProperty ( ) ) ; <nl> + return assign_type = = KEYED_PROPERTY ? Code : : KEYED_STORE_IC : Code : : STORE_IC ; <nl> + } <nl> + <nl> + <nl> + FeedbackVectorRequirements ForEachStatement : : ComputeFeedbackRequirements ( <nl> + Isolate * isolate , const ICSlotCache * cache ) { <nl> + int ic_slots = GetStoreICSlots ( each ( ) ) ; <nl> + return FeedbackVectorRequirements ( 0 , ic_slots ) ; <nl> + } <nl> + <nl> + <nl> + Code : : Kind ForEachStatement : : FeedbackICSlotKind ( int index ) { <nl> + return GetStoreICKind ( each ( ) ) ; <nl> + } <nl> + <nl> + <nl> Assignment : : Assignment ( Zone * zone , Token : : Value op , Expression * target , <nl> Expression * value , int pos ) <nl> : Expression ( zone , pos ) , <nl> - bit_field_ ( IsUninitializedField : : encode ( false ) | <nl> - KeyTypeField : : encode ( ELEMENT ) | <nl> - StoreModeField : : encode ( STANDARD_STORE ) | <nl> - TokenField : : encode ( op ) ) , <nl> + bit_field_ ( <nl> + IsUninitializedField : : encode ( false ) | KeyTypeField : : encode ( ELEMENT ) | <nl> + StoreModeField : : encode ( STANDARD_STORE ) | TokenField : : encode ( op ) ) , <nl> target_ ( target ) , <nl> value_ ( value ) , <nl> - binary_operation_ ( NULL ) { } <nl> + binary_operation_ ( NULL ) , <nl> + slot_ ( FeedbackVectorICSlot : : Invalid ( ) ) { } <nl> + <nl> + <nl> + FeedbackVectorRequirements Assignment : : ComputeFeedbackRequirements ( <nl> + Isolate * isolate , const ICSlotCache * cache ) { <nl> + int ic_slots = GetStoreICSlots ( target ( ) ) ; <nl> + return FeedbackVectorRequirements ( 0 , ic_slots ) ; <nl> + } <nl> + <nl> + <nl> + Code : : Kind Assignment : : FeedbackICSlotKind ( int index ) { <nl> + return GetStoreICKind ( target ( ) ) ; <nl> + } <nl> + <nl> + <nl> + FeedbackVectorRequirements CountOperation : : ComputeFeedbackRequirements ( <nl> + Isolate * isolate , const ICSlotCache * cache ) { <nl> + int ic_slots = GetStoreICSlots ( expression ( ) ) ; <nl> + return FeedbackVectorRequirements ( 0 , ic_slots ) ; <nl> + } <nl> + <nl> + <nl> + Code : : Kind CountOperation : : FeedbackICSlotKind ( int index ) { <nl> + return GetStoreICKind ( expression ( ) ) ; <nl> + } <nl> <nl> <nl> Token : : Value Assignment : : binary_op ( ) const { <nl> bool ObjectLiteral : : Property : : emit_store ( ) { <nl> } <nl> <nl> <nl> + FeedbackVectorRequirements ObjectLiteral : : ComputeFeedbackRequirements ( <nl> + Isolate * isolate , const ICSlotCache * cache ) { <nl> + if ( ! FLAG_vector_stores ) return FeedbackVectorRequirements ( 0 , 0 ) ; <nl> + <nl> + / / This logic that computes the number of slots needed for vector store <nl> + / / ics must mirror FullCodeGenerator : : VisitObjectLiteral . <nl> + int ic_slots = 0 ; <nl> + for ( int i = 0 ; i < properties ( ) - > length ( ) ; i + + ) { <nl> + ObjectLiteral : : Property * property = properties ( ) - > at ( i ) ; <nl> + if ( property - > IsCompileTimeValue ( ) ) continue ; <nl> + <nl> + Expression * value = property - > value ( ) ; <nl> + if ( property - > is_computed_name ( ) & & <nl> + property - > kind ( ) ! = ObjectLiteral : : Property : : PROTOTYPE ) { <nl> + if ( FunctionLiteral : : NeedsHomeObject ( value ) ) ic_slots + + ; <nl> + } else if ( property - > emit_store ( ) ) { <nl> + if ( property - > kind ( ) = = ObjectLiteral : : Property : : MATERIALIZED_LITERAL | | <nl> + property - > kind ( ) = = ObjectLiteral : : Property : : COMPUTED ) { <nl> + Literal * key = property - > key ( ) - > AsLiteral ( ) ; <nl> + if ( key - > value ( ) - > IsInternalizedString ( ) ) ic_slots + + ; <nl> + if ( FunctionLiteral : : NeedsHomeObject ( value ) ) ic_slots + + ; <nl> + } else if ( property - > kind ( ) = = ObjectLiteral : : Property : : GETTER | | <nl> + property - > kind ( ) = = ObjectLiteral : : Property : : SETTER ) { <nl> + / / We might need a slot for the home object . <nl> + if ( FunctionLiteral : : NeedsHomeObject ( value ) ) ic_slots + + ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + # ifdef DEBUG <nl> + / / FullCodeGenerator : : VisitObjectLiteral verifies that it consumes slot_count_ <nl> + / / slots . <nl> + slot_count_ = ic_slots ; <nl> + # endif <nl> + return FeedbackVectorRequirements ( 0 , ic_slots ) ; <nl> + } <nl> + <nl> + <nl> + FeedbackVectorICSlot ObjectLiteral : : SlotForHomeObject ( Expression * value , <nl> + int * slot_index ) const { <nl> + if ( FLAG_vector_stores & & FunctionLiteral : : NeedsHomeObject ( value ) ) { <nl> + DCHECK ( slot_index ! = NULL & & * slot_index > = 0 & & * slot_index < slot_count_ ) ; <nl> + FeedbackVectorICSlot slot = GetNthSlot ( * slot_index ) ; <nl> + * slot_index + = 1 ; <nl> + return slot ; <nl> + } <nl> + return FeedbackVectorICSlot : : Invalid ( ) ; <nl> + } <nl> + <nl> + <nl> void ObjectLiteral : : CalculateEmitStore ( Zone * zone ) { <nl> const auto GETTER = ObjectLiteral : : Property : : GETTER ; <nl> const auto SETTER = ObjectLiteral : : Property : : SETTER ; <nl> void Expression : : RecordToBooleanTypeFeedback ( TypeFeedbackOracle * oracle ) { <nl> <nl> bool Call : : IsUsingCallFeedbackICSlot ( Isolate * isolate ) const { <nl> CallType call_type = GetCallType ( isolate ) ; <nl> - if ( IsUsingCallFeedbackSlot ( isolate ) | | call_type = = POSSIBLY_EVAL_CALL ) { <nl> + if ( call_type = = POSSIBLY_EVAL_CALL ) { <nl> + return false ; <nl> + } <nl> + if ( call_type = = SUPER_CALL & & ! FLAG_vector_stores ) { <nl> return false ; <nl> } <nl> return true ; <nl> bool Call : : IsUsingCallFeedbackICSlot ( Isolate * isolate ) const { <nl> <nl> bool Call : : IsUsingCallFeedbackSlot ( Isolate * isolate ) const { <nl> / / SuperConstructorCall uses a CallConstructStub , which wants <nl> - / / a Slot , not an IC slot . <nl> + / / a Slot , in addition to any IC slots requested elsewhere . <nl> return GetCallType ( isolate ) = = SUPER_CALL ; <nl> } <nl> <nl> FeedbackVectorRequirements Call : : ComputeFeedbackRequirements ( <nl> Isolate * isolate , const ICSlotCache * cache ) { <nl> int ic_slots = IsUsingCallFeedbackICSlot ( isolate ) ? 1 : 0 ; <nl> int slots = IsUsingCallFeedbackSlot ( isolate ) ? 1 : 0 ; <nl> - / / A Call uses either a slot or an IC slot . <nl> - DCHECK ( ( ic_slots & slots ) = = 0 ) ; <nl> return FeedbackVectorRequirements ( slots , ic_slots ) ; <nl> } <nl> <nl> mmm a / src / ast . h <nl> ppp b / src / ast . h <nl> class ForEachStatement : public IterationStatement { <nl> Expression * each ( ) const { return each_ ; } <nl> Expression * subject ( ) const { return subject_ ; } <nl> <nl> + FeedbackVectorRequirements ComputeFeedbackRequirements ( <nl> + Isolate * isolate , const ICSlotCache * cache ) override ; <nl> + void SetFirstFeedbackICSlot ( FeedbackVectorICSlot slot , <nl> + ICSlotCache * cache ) override { <nl> + each_slot_ = slot ; <nl> + } <nl> + Code : : Kind FeedbackICSlotKind ( int index ) override ; <nl> + FeedbackVectorICSlot EachFeedbackSlot ( ) const { return each_slot_ ; } <nl> + <nl> protected : <nl> ForEachStatement ( Zone * zone , ZoneList < const AstRawString * > * labels , int pos ) <nl> - : IterationStatement ( zone , labels , pos ) , each_ ( NULL ) , subject_ ( NULL ) { } <nl> + : IterationStatement ( zone , labels , pos ) , <nl> + each_ ( NULL ) , <nl> + subject_ ( NULL ) , <nl> + each_slot_ ( FeedbackVectorICSlot : : Invalid ( ) ) { } <nl> <nl> private : <nl> Expression * each_ ; <nl> Expression * subject_ ; <nl> + FeedbackVectorICSlot each_slot_ ; <nl> } ; <nl> <nl> <nl> class ForInStatement final : public ForEachStatement { <nl> } <nl> <nl> / / Type feedback information . <nl> - virtual FeedbackVectorRequirements ComputeFeedbackRequirements ( <nl> + FeedbackVectorRequirements ComputeFeedbackRequirements ( <nl> Isolate * isolate , const ICSlotCache * cache ) override { <nl> - return FeedbackVectorRequirements ( 1 , 0 ) ; <nl> + FeedbackVectorRequirements base = <nl> + ForEachStatement : : ComputeFeedbackRequirements ( isolate , cache ) ; <nl> + DCHECK ( base . slots ( ) = = 0 & & base . ic_slots ( ) < = 1 ) ; <nl> + return FeedbackVectorRequirements ( 1 , base . ic_slots ( ) ) ; <nl> } <nl> void SetFirstFeedbackSlot ( FeedbackVectorSlot slot ) override { <nl> for_in_feedback_slot_ = slot ; <nl> class ObjectLiteral final : public MaterializedLiteral { <nl> / / ObjectLiteral can vary , so num_ids ( ) is not a static method . <nl> int num_ids ( ) const { return parent_num_ids ( ) + 1 + properties ( ) - > length ( ) ; } <nl> <nl> + / / Object literals need one feedback slot for each non - trivial value , as well <nl> + / / as some slots for home objects . <nl> + FeedbackVectorRequirements ComputeFeedbackRequirements ( <nl> + Isolate * isolate , const ICSlotCache * cache ) override ; <nl> + void SetFirstFeedbackICSlot ( FeedbackVectorICSlot slot , <nl> + ICSlotCache * cache ) override { <nl> + slot_ = slot ; <nl> + } <nl> + Code : : Kind FeedbackICSlotKind ( int index ) override { return Code : : STORE_IC ; } <nl> + FeedbackVectorICSlot GetNthSlot ( int n ) const { <nl> + return FeedbackVectorICSlot ( slot_ . ToInt ( ) + n ) ; <nl> + } <nl> + <nl> + / / If value needs a home object , returns a valid feedback vector ic slot <nl> + / / given by slot_index , and increments slot_index . <nl> + FeedbackVectorICSlot SlotForHomeObject ( Expression * value , <nl> + int * slot_index ) const ; <nl> + <nl> + # ifdef DEBUG <nl> + int slot_count ( ) const { return slot_count_ ; } <nl> + # endif <nl> + <nl> protected : <nl> ObjectLiteral ( Zone * zone , ZoneList < Property * > * properties , int literal_index , <nl> - int boilerplate_properties , bool has_function , <nl> - bool is_strong , int pos ) <nl> + int boilerplate_properties , bool has_function , bool is_strong , <nl> + int pos ) <nl> : MaterializedLiteral ( zone , literal_index , is_strong , pos ) , <nl> properties_ ( properties ) , <nl> boilerplate_properties_ ( boilerplate_properties ) , <nl> fast_elements_ ( false ) , <nl> has_elements_ ( false ) , <nl> may_store_doubles_ ( false ) , <nl> - has_function_ ( has_function ) { } <nl> + has_function_ ( has_function ) , <nl> + # ifdef DEBUG <nl> + slot_count_ ( 0 ) , <nl> + # endif <nl> + slot_ ( FeedbackVectorICSlot : : Invalid ( ) ) { <nl> + } <nl> static int parent_num_ids ( ) { return MaterializedLiteral : : num_ids ( ) ; } <nl> <nl> private : <nl> class ObjectLiteral final : public MaterializedLiteral { <nl> bool has_elements_ ; <nl> bool may_store_doubles_ ; <nl> bool has_function_ ; <nl> + # ifdef DEBUG <nl> + / / slot_count_ helps validate that the logic to allocate ic slots and the <nl> + / / logic to use them are in sync . <nl> + int slot_count_ ; <nl> + # endif <nl> + FeedbackVectorICSlot slot_ ; <nl> } ; <nl> <nl> <nl> class VariableProxy final : public Expression { <nl> } ; <nl> <nl> <nl> + / / Left - hand side can only be a property , a global or a ( parameter or local ) <nl> + / / slot . <nl> + enum LhsKind { <nl> + VARIABLE , <nl> + NAMED_PROPERTY , <nl> + KEYED_PROPERTY , <nl> + NAMED_SUPER_PROPERTY , <nl> + KEYED_SUPER_PROPERTY <nl> + } ; <nl> + <nl> + <nl> class Property final : public Expression { <nl> public : <nl> DECLARE_NODE_TYPE ( Property ) <nl> class Property final : public Expression { <nl> return property_feedback_slot_ ; <nl> } <nl> <nl> + static LhsKind GetAssignType ( Property * property ) { <nl> + if ( property = = NULL ) return VARIABLE ; <nl> + bool super_access = property - > IsSuperAccess ( ) ; <nl> + return ( property - > key ( ) - > IsPropertyName ( ) ) <nl> + ? ( super_access ? NAMED_SUPER_PROPERTY : NAMED_PROPERTY ) <nl> + : ( super_access ? KEYED_SUPER_PROPERTY : KEYED_PROPERTY ) ; <nl> + } <nl> + <nl> protected : <nl> Property ( Zone * zone , Expression * obj , Expression * key , int pos ) <nl> : Expression ( zone , pos ) , <nl> class Call final : public Expression { <nl> Isolate * isolate , const ICSlotCache * cache ) override ; <nl> void SetFirstFeedbackICSlot ( FeedbackVectorICSlot slot , <nl> ICSlotCache * cache ) override { <nl> - ic_slot_or_slot_ = slot . ToInt ( ) ; <nl> - } <nl> - void SetFirstFeedbackSlot ( FeedbackVectorSlot slot ) override { <nl> - ic_slot_or_slot_ = slot . ToInt ( ) ; <nl> + ic_slot_ = slot ; <nl> } <nl> + void SetFirstFeedbackSlot ( FeedbackVectorSlot slot ) override { slot_ = slot ; } <nl> Code : : Kind FeedbackICSlotKind ( int index ) override { return Code : : CALL_IC ; } <nl> <nl> - FeedbackVectorSlot CallFeedbackSlot ( ) const { <nl> - DCHECK ( ic_slot_or_slot_ ! = FeedbackVectorSlot : : Invalid ( ) . ToInt ( ) ) ; <nl> - return FeedbackVectorSlot ( ic_slot_or_slot_ ) ; <nl> - } <nl> + FeedbackVectorSlot CallFeedbackSlot ( ) const { return slot_ ; } <nl> <nl> - FeedbackVectorICSlot CallFeedbackICSlot ( ) const { <nl> - DCHECK ( ic_slot_or_slot_ ! = FeedbackVectorICSlot : : Invalid ( ) . ToInt ( ) ) ; <nl> - return FeedbackVectorICSlot ( ic_slot_or_slot_ ) ; <nl> - } <nl> + FeedbackVectorICSlot CallFeedbackICSlot ( ) const { return ic_slot_ ; } <nl> <nl> SmallMapList * GetReceiverTypes ( ) override { <nl> if ( expression ( ) - > IsProperty ( ) ) { <nl> class Call final : public Expression { <nl> Call ( Zone * zone , Expression * expression , ZoneList < Expression * > * arguments , <nl> int pos ) <nl> : Expression ( zone , pos ) , <nl> - ic_slot_or_slot_ ( FeedbackVectorICSlot : : Invalid ( ) . ToInt ( ) ) , <nl> + ic_slot_ ( FeedbackVectorICSlot : : Invalid ( ) ) , <nl> + slot_ ( FeedbackVectorSlot : : Invalid ( ) ) , <nl> expression_ ( expression ) , <nl> arguments_ ( arguments ) , <nl> bit_field_ ( IsUninitializedField : : encode ( false ) ) { <nl> class Call final : public Expression { <nl> private : <nl> int local_id ( int n ) const { return base_id ( ) + parent_num_ids ( ) + n ; } <nl> <nl> - / / We store this as an integer because we don ' t know if we have a slot or <nl> - / / an ic slot until scoping time . <nl> - int ic_slot_or_slot_ ; <nl> + FeedbackVectorICSlot ic_slot_ ; <nl> + FeedbackVectorSlot slot_ ; <nl> Expression * expression_ ; <nl> ZoneList < Expression * > * arguments_ ; <nl> Handle < JSFunction > target_ ; <nl> class CountOperation final : public Expression { <nl> return TypeFeedbackId ( local_id ( 3 ) ) ; <nl> } <nl> <nl> + FeedbackVectorRequirements ComputeFeedbackRequirements ( <nl> + Isolate * isolate , const ICSlotCache * cache ) override ; <nl> + void SetFirstFeedbackICSlot ( FeedbackVectorICSlot slot , <nl> + ICSlotCache * cache ) override { <nl> + slot_ = slot ; <nl> + } <nl> + Code : : Kind FeedbackICSlotKind ( int index ) override ; <nl> + FeedbackVectorICSlot CountSlot ( ) const { return slot_ ; } <nl> + <nl> protected : <nl> CountOperation ( Zone * zone , Token : : Value op , bool is_prefix , Expression * expr , <nl> int pos ) <nl> : Expression ( zone , pos ) , <nl> - bit_field_ ( IsPrefixField : : encode ( is_prefix ) | <nl> - KeyTypeField : : encode ( ELEMENT ) | <nl> - StoreModeField : : encode ( STANDARD_STORE ) | <nl> - TokenField : : encode ( op ) ) , <nl> + bit_field_ ( <nl> + IsPrefixField : : encode ( is_prefix ) | KeyTypeField : : encode ( ELEMENT ) | <nl> + StoreModeField : : encode ( STANDARD_STORE ) | TokenField : : encode ( op ) ) , <nl> type_ ( NULL ) , <nl> - expression_ ( expr ) { } <nl> + expression_ ( expr ) , <nl> + slot_ ( FeedbackVectorICSlot : : Invalid ( ) ) { } <nl> static int parent_num_ids ( ) { return Expression : : num_ids ( ) ; } <nl> <nl> private : <nl> class CountOperation final : public Expression { <nl> Type * type_ ; <nl> Expression * expression_ ; <nl> SmallMapList receiver_types_ ; <nl> + FeedbackVectorICSlot slot_ ; <nl> } ; <nl> <nl> <nl> class Assignment final : public Expression { <nl> bit_field_ = StoreModeField : : update ( bit_field_ , mode ) ; <nl> } <nl> <nl> + FeedbackVectorRequirements ComputeFeedbackRequirements ( <nl> + Isolate * isolate , const ICSlotCache * cache ) override ; <nl> + void SetFirstFeedbackICSlot ( FeedbackVectorICSlot slot , <nl> + ICSlotCache * cache ) override { <nl> + slot_ = slot ; <nl> + } <nl> + Code : : Kind FeedbackICSlotKind ( int index ) override ; <nl> + FeedbackVectorICSlot AssignmentSlot ( ) const { return slot_ ; } <nl> + <nl> protected : <nl> Assignment ( Zone * zone , Token : : Value op , Expression * target , Expression * value , <nl> int pos ) ; <nl> class Assignment final : public Expression { <nl> Expression * value_ ; <nl> BinaryOperation * binary_operation_ ; <nl> SmallMapList receiver_types_ ; <nl> + FeedbackVectorICSlot slot_ ; <nl> } ; <nl> <nl> <nl> mmm a / src / full - codegen . h <nl> ppp b / src / full - codegen . h <nl> class FullCodeGenerator : public AstVisitor { <nl> void EmitLoadJSRuntimeFunction ( CallRuntime * expr ) ; <nl> void EmitCallJSRuntimeFunction ( CallRuntime * expr ) ; <nl> <nl> - / / Platform - specific support for compiling assignments . <nl> - <nl> - / / Left - hand side can only be a property , a global or a ( parameter or local ) <nl> - / / slot . <nl> - enum LhsKind { <nl> - VARIABLE , <nl> - NAMED_PROPERTY , <nl> - KEYED_PROPERTY , <nl> - NAMED_SUPER_PROPERTY , <nl> - KEYED_SUPER_PROPERTY <nl> - } ; <nl> - <nl> - static LhsKind GetAssignType ( Property * property ) { <nl> - if ( property = = NULL ) return VARIABLE ; <nl> - bool super_access = property - > IsSuperAccess ( ) ; <nl> - return ( property - > key ( ) - > IsPropertyName ( ) ) <nl> - ? ( super_access ? NAMED_SUPER_PROPERTY : NAMED_PROPERTY ) <nl> - : ( super_access ? KEYED_SUPER_PROPERTY : KEYED_PROPERTY ) ; <nl> - } <nl> - <nl> / / Load a value from a named property . <nl> / / The receiver is left on the stack by the IC . <nl> void EmitNamedPropertyLoad ( Property * expr ) ; <nl> class FullCodeGenerator : public AstVisitor { <nl> Expression * right ) ; <nl> <nl> / / Assign to the given expression as if via ' = ' . The right - hand - side value <nl> - / / is expected in the accumulator . <nl> - void EmitAssignment ( Expression * expr ) ; <nl> + / / is expected in the accumulator . slot is only used if FLAG_vector_stores <nl> + / / is true . <nl> + void EmitAssignment ( Expression * expr , FeedbackVectorICSlot slot = <nl> + FeedbackVectorICSlot : : Invalid ( ) ) ; <nl> <nl> / / Complete a variable assignment . The right - hand - side value is expected <nl> / / in the accumulator . <nl> - void EmitVariableAssignment ( Variable * var , <nl> - Token : : Value op ) ; <nl> + void EmitVariableAssignment ( <nl> + Variable * var , Token : : Value op , <nl> + FeedbackVectorICSlot slot = FeedbackVectorICSlot : : Invalid ( ) ) ; <nl> <nl> / / Helper functions to EmitVariableAssignment <nl> void EmitStoreToStackLocalOrContextSlot ( Variable * var , <nl> class FullCodeGenerator : public AstVisitor { <nl> / / Adds the [ [ HomeObject ] ] to | initializer | if it is a FunctionLiteral . <nl> / / The value of the initializer is expected to be at the top of the stack . <nl> / / | offset | is the offset in the stack where the home object can be found . <nl> - void EmitSetHomeObjectIfNeeded ( Expression * initializer , int offset ) ; <nl> + void EmitSetHomeObjectIfNeeded ( <nl> + Expression * initializer , int offset , <nl> + FeedbackVectorICSlot slot = FeedbackVectorICSlot : : Invalid ( ) ) ; <nl> <nl> void EmitLoadSuperConstructor ( ) ; <nl> - void EmitInitializeThisAfterSuper ( SuperReference * super_ref ) ; <nl> + void EmitInitializeThisAfterSuper ( <nl> + SuperReference * super_ref , <nl> + FeedbackVectorICSlot slot = FeedbackVectorICSlot : : Invalid ( ) ) ; <nl> <nl> void CallIC ( Handle < Code > code , <nl> TypeFeedbackId id = TypeFeedbackId : : None ( ) ) ; <nl> class FullCodeGenerator : public AstVisitor { <nl> bool MustCreateObjectLiteralWithRuntime ( ObjectLiteral * expr ) const ; <nl> bool MustCreateArrayLiteralWithRuntime ( ArrayLiteral * expr ) const ; <nl> <nl> + void EmitLoadStoreICSlot ( FeedbackVectorICSlot slot ) ; <nl> + <nl> Handle < HandlerTable > handler_table ( ) { return handler_table_ ; } <nl> <nl> struct BailoutEntry { <nl> mmm a / src / ia32 / full - codegen - ia32 . cc <nl> ppp b / src / ia32 / full - codegen - ia32 . cc <nl> void FullCodeGenerator : : VisitForInStatement ( ForInStatement * stmt ) { <nl> __ mov ( result_register ( ) , ebx ) ; <nl> / / Perform the assignment as if via ' = ' . <nl> { EffectContext context ( this ) ; <nl> - EmitAssignment ( stmt - > each ( ) ) ; <nl> + EmitAssignment ( stmt - > each ( ) , stmt - > EachFeedbackSlot ( ) ) ; <nl> PrepareForBailoutForId ( stmt - > AssignmentId ( ) , NO_REGISTERS ) ; <nl> } <nl> <nl> void FullCodeGenerator : : VisitVariableProxy ( VariableProxy * expr ) { <nl> <nl> <nl> void FullCodeGenerator : : EmitSetHomeObjectIfNeeded ( Expression * initializer , <nl> - int offset ) { <nl> + int offset , <nl> + FeedbackVectorICSlot slot ) { <nl> if ( NeedsHomeObject ( initializer ) ) { <nl> __ mov ( StoreDescriptor : : ReceiverRegister ( ) , Operand ( esp , 0 ) ) ; <nl> __ mov ( StoreDescriptor : : NameRegister ( ) , <nl> Immediate ( isolate ( ) - > factory ( ) - > home_object_symbol ( ) ) ) ; <nl> __ mov ( StoreDescriptor : : ValueRegister ( ) , <nl> Operand ( esp , offset * kPointerSize ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> } <nl> } <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> <nl> AccessorTable accessor_table ( zone ( ) ) ; <nl> int property_index = 0 ; <nl> + / / store_slot_index points to the vector ic slot for the next store ic used . <nl> + / / ObjectLiteral : : ComputeFeedbackRequirements controls the allocation of slots <nl> + / / and must be updated if the number of store ics emitted here changes . <nl> + int store_slot_index = 0 ; <nl> for ( ; property_index < expr - > properties ( ) - > length ( ) ; property_index + + ) { <nl> ObjectLiteral : : Property * property = expr - > properties ( ) - > at ( property_index ) ; <nl> if ( property - > is_computed_name ( ) ) break ; <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> DCHECK ( StoreDescriptor : : ValueRegister ( ) . is ( eax ) ) ; <nl> __ mov ( StoreDescriptor : : NameRegister ( ) , Immediate ( key - > value ( ) ) ) ; <nl> __ mov ( StoreDescriptor : : ReceiverRegister ( ) , Operand ( esp , 0 ) ) ; <nl> - CallStoreIC ( key - > LiteralFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > GetNthSlot ( store_slot_index + + ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( key - > LiteralFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( key - > id ( ) , NO_REGISTERS ) ; <nl> <nl> if ( NeedsHomeObject ( value ) ) { <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> __ mov ( StoreDescriptor : : NameRegister ( ) , <nl> Immediate ( isolate ( ) - > factory ( ) - > home_object_symbol ( ) ) ) ; <nl> __ mov ( StoreDescriptor : : ValueRegister ( ) , Operand ( esp , 0 ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > GetNthSlot ( store_slot_index + + ) ) ; <nl> + } <nl> CallStoreIC ( ) ; <nl> } <nl> } else { <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> VisitForStackValue ( key ) ; <nl> VisitForStackValue ( value ) ; <nl> if ( property - > emit_store ( ) ) { <nl> - EmitSetHomeObjectIfNeeded ( value , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + value , 2 , expr - > SlotForHomeObject ( value , & store_slot_index ) ) ; <nl> __ push ( Immediate ( Smi : : FromInt ( SLOPPY ) ) ) ; / / Language mode <nl> __ CallRuntime ( Runtime : : kSetProperty , 4 ) ; <nl> } else { <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> __ push ( Operand ( esp , 0 ) ) ; / / Duplicate receiver . <nl> VisitForStackValue ( it - > first ) ; <nl> EmitAccessor ( it - > second - > getter ) ; <nl> - EmitSetHomeObjectIfNeeded ( it - > second - > getter , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + it - > second - > getter , 2 , <nl> + expr - > SlotForHomeObject ( it - > second - > getter , & store_slot_index ) ) ; <nl> + <nl> EmitAccessor ( it - > second - > setter ) ; <nl> - EmitSetHomeObjectIfNeeded ( it - > second - > setter , 3 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + it - > second - > setter , 3 , <nl> + expr - > SlotForHomeObject ( it - > second - > setter , & store_slot_index ) ) ; <nl> + <nl> __ push ( Immediate ( Smi : : FromInt ( NONE ) ) ) ; <nl> __ CallRuntime ( Runtime : : kDefineAccessorPropertyUnchecked , 5 ) ; <nl> } <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> } else { <nl> EmitPropertyKey ( property , expr - > GetIdForProperty ( property_index ) ) ; <nl> VisitForStackValue ( value ) ; <nl> - EmitSetHomeObjectIfNeeded ( value , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + value , 2 , expr - > SlotForHomeObject ( value , & store_slot_index ) ) ; <nl> <nl> switch ( property - > kind ( ) ) { <nl> case ObjectLiteral : : Property : : CONSTANT : <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> } else { <nl> context ( ) - > Plug ( eax ) ; <nl> } <nl> + <nl> + / / Verify that compilation exactly consumed the number of store ic slots that <nl> + / / the ObjectLiteral node had to offer . <nl> + DCHECK ( ! FLAG_vector_stores | | store_slot_index = = expr - > slot_count ( ) ) ; <nl> } <nl> <nl> <nl> void FullCodeGenerator : : VisitAssignment ( Assignment * expr ) { <nl> Comment cmnt ( masm_ , " [ Assignment " ) ; <nl> <nl> Property * property = expr - > target ( ) - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( property ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( property ) ; <nl> <nl> / / Evaluate LHS expression . <nl> switch ( assign_type ) { <nl> void FullCodeGenerator : : VisitAssignment ( Assignment * expr ) { <nl> switch ( assign_type ) { <nl> case VARIABLE : <nl> EmitVariableAssignment ( expr - > target ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - expr - > op ( ) ) ; <nl> + expr - > op ( ) , expr - > AssignmentSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( eax ) ; <nl> break ; <nl> void FullCodeGenerator : : EmitBinaryOp ( BinaryOperation * expr , Token : : Value op ) { <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> + void FullCodeGenerator : : EmitAssignment ( Expression * expr , <nl> + FeedbackVectorICSlot slot ) { <nl> DCHECK ( expr - > IsValidReferenceExpression ( ) ) ; <nl> <nl> Property * prop = expr - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( prop ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( prop ) ; <nl> <nl> switch ( assign_type ) { <nl> case VARIABLE : { <nl> Variable * var = expr - > AsVariableProxy ( ) - > var ( ) ; <nl> EffectContext context ( this ) ; <nl> - EmitVariableAssignment ( var , Token : : ASSIGN ) ; <nl> + EmitVariableAssignment ( var , Token : : ASSIGN , slot ) ; <nl> break ; <nl> } <nl> case NAMED_PROPERTY : { <nl> void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> __ pop ( StoreDescriptor : : ValueRegister ( ) ) ; / / Restore value . <nl> __ mov ( StoreDescriptor : : NameRegister ( ) , <nl> prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> break ; <nl> } <nl> void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> __ Move ( StoreDescriptor : : NameRegister ( ) , eax ) ; <nl> __ pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; / / Receiver . <nl> __ pop ( StoreDescriptor : : ValueRegister ( ) ) ; / / Restore value . <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> CallIC ( ic ) ; <nl> void FullCodeGenerator : : EmitStoreToStackLocalOrContextSlot ( <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitVariableAssignment ( Variable * var , <nl> - Token : : Value op ) { <nl> + void FullCodeGenerator : : EmitVariableAssignment ( Variable * var , Token : : Value op , <nl> + FeedbackVectorICSlot slot ) { <nl> if ( var - > IsUnallocated ( ) ) { <nl> / / Global var , const , or let . <nl> __ mov ( StoreDescriptor : : NameRegister ( ) , var - > name ( ) ) ; <nl> __ mov ( StoreDescriptor : : ReceiverRegister ( ) , GlobalObjectOperand ( ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> <nl> } else if ( var - > mode ( ) = = LET & & op ! = Token : : INIT_LET ) { <nl> void FullCodeGenerator : : EmitNamedPropertyAssignment ( Assignment * expr ) { <nl> SetSourcePosition ( expr - > position ( ) ) ; <nl> __ mov ( StoreDescriptor : : NameRegister ( ) , prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ; <nl> __ pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> - CallStoreIC ( expr - > AssignmentFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > AssignmentSlot ( ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( expr - > AssignmentFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( eax ) ; <nl> } <nl> void FullCodeGenerator : : EmitKeyedPropertyAssignment ( Assignment * expr ) { <nl> SetSourcePosition ( expr - > position ( ) ) ; <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> - CallIC ( ic , expr - > AssignmentFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > AssignmentSlot ( ) ) ; <nl> + CallIC ( ic ) ; <nl> + } else { <nl> + CallIC ( ic , expr - > AssignmentFeedbackId ( ) ) ; <nl> + } <nl> <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( eax ) ; <nl> void FullCodeGenerator : : EmitLoadSuperConstructor ( ) { <nl> <nl> <nl> void FullCodeGenerator : : EmitInitializeThisAfterSuper ( <nl> - SuperReference * super_ref ) { <nl> + SuperReference * super_ref , FeedbackVectorICSlot slot ) { <nl> Variable * this_var = super_ref - > this_var ( ) - > var ( ) ; <nl> GetVar ( ecx , this_var ) ; <nl> __ cmp ( ecx , isolate ( ) - > factory ( ) - > the_hole_value ( ) ) ; <nl> void FullCodeGenerator : : EmitInitializeThisAfterSuper ( <nl> __ CallRuntime ( Runtime : : kThrowReferenceError , 1 ) ; <nl> __ bind ( & uninitialized_this ) ; <nl> <nl> - EmitVariableAssignment ( this_var , Token : : INIT_CONST ) ; <nl> + EmitVariableAssignment ( this_var , Token : : INIT_CONST , slot ) ; <nl> } <nl> <nl> <nl> void FullCodeGenerator : : EmitSuperConstructorCall ( Call * expr ) { <nl> <nl> RecordJSReturnSite ( expr ) ; <nl> <nl> - EmitInitializeThisAfterSuper ( expr - > expression ( ) - > AsSuperReference ( ) ) ; <nl> + EmitInitializeThisAfterSuper ( expr - > expression ( ) - > AsSuperReference ( ) , <nl> + expr - > CallFeedbackICSlot ( ) ) ; <nl> context ( ) - > Plug ( eax ) ; <nl> } <nl> <nl> void FullCodeGenerator : : EmitCallSuperWithSpread ( CallRuntime * expr ) { <nl> __ mov ( esi , Operand ( ebp , StandardFrameConstants : : kContextOffset ) ) ; <nl> context ( ) - > DropAndPlug ( 1 , eax ) ; <nl> <nl> + / / TODO ( mvstanton ) : with FLAG_vector_stores this needs a slot id . <nl> EmitInitializeThisAfterSuper ( super_reference ) ; <nl> } <nl> <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> SetSourcePosition ( expr - > position ( ) ) ; <nl> <nl> Property * prop = expr - > expression ( ) - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( prop ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( prop ) ; <nl> <nl> / / Evaluate expression and get value . <nl> if ( assign_type = = VARIABLE ) { <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> / / Perform the assignment as if via ' = ' . <nl> { EffectContext context ( this ) ; <nl> EmitVariableAssignment ( expr - > expression ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - Token : : ASSIGN ) ; <nl> + Token : : ASSIGN , expr - > CountSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context . Plug ( eax ) ; <nl> } <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> } else { <nl> / / Perform the assignment as if via ' = ' . <nl> EmitVariableAssignment ( expr - > expression ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - Token : : ASSIGN ) ; <nl> + Token : : ASSIGN , expr - > CountSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( eax ) ; <nl> } <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> __ mov ( StoreDescriptor : : NameRegister ( ) , <nl> prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ; <nl> __ pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> - CallStoreIC ( expr - > CountStoreFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > CountSlot ( ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( expr - > CountStoreFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> if ( expr - > is_postfix ( ) ) { <nl> if ( ! context ( ) - > IsEffect ( ) ) { <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> __ pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> - CallIC ( ic , expr - > CountStoreFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > CountSlot ( ) ) ; <nl> + CallIC ( ic ) ; <nl> + } else { <nl> + CallIC ( ic , expr - > CountStoreFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> if ( expr - > is_postfix ( ) ) { <nl> / / Result is on the stack <nl> void FullCodeGenerator : : ClearPendingMessage ( ) { <nl> } <nl> <nl> <nl> + void FullCodeGenerator : : EmitLoadStoreICSlot ( FeedbackVectorICSlot slot ) { <nl> + DCHECK ( FLAG_vector_stores & & ! slot . IsInvalid ( ) ) ; <nl> + __ mov ( VectorStoreICTrampolineDescriptor : : SlotRegister ( ) , <nl> + Immediate ( SmiFromSlot ( slot ) ) ) ; <nl> + } <nl> + <nl> + <nl> # undef __ <nl> <nl> <nl> mmm a / src / mips / full - codegen - mips . cc <nl> ppp b / src / mips / full - codegen - mips . cc <nl> void FullCodeGenerator : : VisitForInStatement ( ForInStatement * stmt ) { <nl> __ mov ( result_register ( ) , a3 ) ; <nl> / / Perform the assignment as if via ' = ' . <nl> { EffectContext context ( this ) ; <nl> - EmitAssignment ( stmt - > each ( ) ) ; <nl> + EmitAssignment ( stmt - > each ( ) , stmt - > EachFeedbackSlot ( ) ) ; <nl> PrepareForBailoutForId ( stmt - > AssignmentId ( ) , NO_REGISTERS ) ; <nl> } <nl> <nl> void FullCodeGenerator : : VisitVariableProxy ( VariableProxy * expr ) { <nl> <nl> <nl> void FullCodeGenerator : : EmitSetHomeObjectIfNeeded ( Expression * initializer , <nl> - int offset ) { <nl> + int offset , <nl> + FeedbackVectorICSlot slot ) { <nl> if ( NeedsHomeObject ( initializer ) ) { <nl> __ lw ( StoreDescriptor : : ReceiverRegister ( ) , MemOperand ( sp ) ) ; <nl> __ li ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( isolate ( ) - > factory ( ) - > home_object_symbol ( ) ) ) ; <nl> __ lw ( StoreDescriptor : : ValueRegister ( ) , <nl> MemOperand ( sp , offset * kPointerSize ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> } <nl> } <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> <nl> AccessorTable accessor_table ( zone ( ) ) ; <nl> int property_index = 0 ; <nl> + / / store_slot_index points to the vector ic slot for the next store ic used . <nl> + / / ObjectLiteral : : ComputeFeedbackRequirements controls the allocation of slots <nl> + / / and must be updated if the number of store ics emitted here changes . <nl> + int store_slot_index = 0 ; <nl> for ( ; property_index < expr - > properties ( ) - > length ( ) ; property_index + + ) { <nl> ObjectLiteral : : Property * property = expr - > properties ( ) - > at ( property_index ) ; <nl> if ( property - > is_computed_name ( ) ) break ; <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> DCHECK ( StoreDescriptor : : ValueRegister ( ) . is ( a0 ) ) ; <nl> __ li ( StoreDescriptor : : NameRegister ( ) , Operand ( key - > value ( ) ) ) ; <nl> __ lw ( StoreDescriptor : : ReceiverRegister ( ) , MemOperand ( sp ) ) ; <nl> - CallStoreIC ( key - > LiteralFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > GetNthSlot ( store_slot_index + + ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( key - > LiteralFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( key - > id ( ) , NO_REGISTERS ) ; <nl> <nl> if ( NeedsHomeObject ( value ) ) { <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> __ li ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( isolate ( ) - > factory ( ) - > home_object_symbol ( ) ) ) ; <nl> __ lw ( StoreDescriptor : : ValueRegister ( ) , MemOperand ( sp ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > GetNthSlot ( store_slot_index + + ) ) ; <nl> + } <nl> CallStoreIC ( ) ; <nl> } <nl> } else { <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> VisitForStackValue ( key ) ; <nl> VisitForStackValue ( value ) ; <nl> if ( property - > emit_store ( ) ) { <nl> - EmitSetHomeObjectIfNeeded ( value , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + value , 2 , expr - > SlotForHomeObject ( value , & store_slot_index ) ) ; <nl> __ li ( a0 , Operand ( Smi : : FromInt ( SLOPPY ) ) ) ; / / PropertyAttributes . <nl> __ push ( a0 ) ; <nl> __ CallRuntime ( Runtime : : kSetProperty , 4 ) ; <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> __ push ( a0 ) ; <nl> VisitForStackValue ( it - > first ) ; <nl> EmitAccessor ( it - > second - > getter ) ; <nl> - EmitSetHomeObjectIfNeeded ( it - > second - > getter , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + it - > second - > getter , 2 , <nl> + expr - > SlotForHomeObject ( it - > second - > getter , & store_slot_index ) ) ; <nl> EmitAccessor ( it - > second - > setter ) ; <nl> - EmitSetHomeObjectIfNeeded ( it - > second - > setter , 3 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + it - > second - > setter , 3 , <nl> + expr - > SlotForHomeObject ( it - > second - > setter , & store_slot_index ) ) ; <nl> __ li ( a0 , Operand ( Smi : : FromInt ( NONE ) ) ) ; <nl> __ push ( a0 ) ; <nl> __ CallRuntime ( Runtime : : kDefineAccessorPropertyUnchecked , 5 ) ; <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> } else { <nl> EmitPropertyKey ( property , expr - > GetIdForProperty ( property_index ) ) ; <nl> VisitForStackValue ( value ) ; <nl> - EmitSetHomeObjectIfNeeded ( value , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + value , 2 , expr - > SlotForHomeObject ( value , & store_slot_index ) ) ; <nl> <nl> switch ( property - > kind ( ) ) { <nl> case ObjectLiteral : : Property : : CONSTANT : <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> } else { <nl> context ( ) - > Plug ( v0 ) ; <nl> } <nl> + <nl> + / / Verify that compilation exactly consumed the number of store ic slots that <nl> + / / the ObjectLiteral node had to offer . <nl> + DCHECK ( ! FLAG_vector_stores | | store_slot_index = = expr - > slot_count ( ) ) ; <nl> } <nl> <nl> <nl> void FullCodeGenerator : : VisitAssignment ( Assignment * expr ) { <nl> Comment cmnt ( masm_ , " [ Assignment " ) ; <nl> <nl> Property * property = expr - > target ( ) - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( property ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( property ) ; <nl> <nl> / / Evaluate LHS expression . <nl> switch ( assign_type ) { <nl> void FullCodeGenerator : : VisitAssignment ( Assignment * expr ) { <nl> switch ( assign_type ) { <nl> case VARIABLE : <nl> EmitVariableAssignment ( expr - > target ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - expr - > op ( ) ) ; <nl> + expr - > op ( ) , expr - > AssignmentSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( v0 ) ; <nl> break ; <nl> void FullCodeGenerator : : EmitBinaryOp ( BinaryOperation * expr , Token : : Value op ) { <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> + void FullCodeGenerator : : EmitAssignment ( Expression * expr , <nl> + FeedbackVectorICSlot slot ) { <nl> DCHECK ( expr - > IsValidReferenceExpression ( ) ) ; <nl> <nl> Property * prop = expr - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( prop ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( prop ) ; <nl> <nl> switch ( assign_type ) { <nl> case VARIABLE : { <nl> Variable * var = expr - > AsVariableProxy ( ) - > var ( ) ; <nl> EffectContext context ( this ) ; <nl> - EmitVariableAssignment ( var , Token : : ASSIGN ) ; <nl> + EmitVariableAssignment ( var , Token : : ASSIGN , slot ) ; <nl> break ; <nl> } <nl> case NAMED_PROPERTY : { <nl> void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> __ pop ( StoreDescriptor : : ValueRegister ( ) ) ; / / Restore value . <nl> __ li ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> break ; <nl> } <nl> void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> __ mov ( StoreDescriptor : : NameRegister ( ) , result_register ( ) ) ; <nl> __ Pop ( StoreDescriptor : : ValueRegister ( ) , <nl> StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> CallIC ( ic ) ; <nl> void FullCodeGenerator : : EmitStoreToStackLocalOrContextSlot ( <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitVariableAssignment ( Variable * var , Token : : Value op ) { <nl> + void FullCodeGenerator : : EmitVariableAssignment ( Variable * var , Token : : Value op , <nl> + FeedbackVectorICSlot slot ) { <nl> if ( var - > IsUnallocated ( ) ) { <nl> / / Global var , const , or let . <nl> __ mov ( StoreDescriptor : : ValueRegister ( ) , result_register ( ) ) ; <nl> __ li ( StoreDescriptor : : NameRegister ( ) , Operand ( var - > name ( ) ) ) ; <nl> __ lw ( StoreDescriptor : : ReceiverRegister ( ) , GlobalObjectOperand ( ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> <nl> } else if ( var - > mode ( ) = = LET & & op ! = Token : : INIT_LET ) { <nl> void FullCodeGenerator : : EmitNamedPropertyAssignment ( Assignment * expr ) { <nl> __ li ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ) ; <nl> __ pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> - CallStoreIC ( expr - > AssignmentFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > AssignmentSlot ( ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( expr - > AssignmentFeedbackId ( ) ) ; <nl> + } <nl> <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( v0 ) ; <nl> void FullCodeGenerator : : EmitKeyedPropertyAssignment ( Assignment * expr ) { <nl> <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> - CallIC ( ic , expr - > AssignmentFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > AssignmentSlot ( ) ) ; <nl> + CallIC ( ic ) ; <nl> + } else { <nl> + CallIC ( ic , expr - > AssignmentFeedbackId ( ) ) ; <nl> + } <nl> <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( v0 ) ; <nl> void FullCodeGenerator : : EmitLoadSuperConstructor ( ) { <nl> <nl> <nl> void FullCodeGenerator : : EmitInitializeThisAfterSuper ( <nl> - SuperReference * super_ref ) { <nl> + SuperReference * super_ref , FeedbackVectorICSlot slot ) { <nl> Variable * this_var = super_ref - > this_var ( ) - > var ( ) ; <nl> GetVar ( a1 , this_var ) ; <nl> __ LoadRoot ( at , Heap : : kTheHoleValueRootIndex ) ; <nl> void FullCodeGenerator : : EmitInitializeThisAfterSuper ( <nl> __ CallRuntime ( Runtime : : kThrowReferenceError , 1 ) ; <nl> __ bind ( & uninitialized_this ) ; <nl> <nl> - EmitVariableAssignment ( this_var , Token : : INIT_CONST ) ; <nl> + EmitVariableAssignment ( this_var , Token : : INIT_CONST , slot ) ; <nl> } <nl> <nl> <nl> void FullCodeGenerator : : EmitSuperConstructorCall ( Call * expr ) { <nl> <nl> RecordJSReturnSite ( expr ) ; <nl> <nl> - EmitInitializeThisAfterSuper ( expr - > expression ( ) - > AsSuperReference ( ) ) ; <nl> + EmitInitializeThisAfterSuper ( expr - > expression ( ) - > AsSuperReference ( ) , <nl> + expr - > CallFeedbackICSlot ( ) ) ; <nl> context ( ) - > Plug ( v0 ) ; <nl> } <nl> <nl> void FullCodeGenerator : : EmitCallSuperWithSpread ( CallRuntime * expr ) { <nl> __ lw ( cp , MemOperand ( fp , StandardFrameConstants : : kContextOffset ) ) ; <nl> context ( ) - > DropAndPlug ( 1 , v0 ) ; <nl> <nl> + / / TODO ( mvstanton ) : with FLAG_vector_stores this needs a slot id . <nl> EmitInitializeThisAfterSuper ( super_reference ) ; <nl> } <nl> <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> SetSourcePosition ( expr - > position ( ) ) ; <nl> <nl> Property * prop = expr - > expression ( ) - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( prop ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( prop ) ; <nl> <nl> / / Evaluate expression and get value . <nl> if ( assign_type = = VARIABLE ) { <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> if ( expr - > is_postfix ( ) ) { <nl> { EffectContext context ( this ) ; <nl> EmitVariableAssignment ( expr - > expression ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - Token : : ASSIGN ) ; <nl> + Token : : ASSIGN , expr - > CountSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context . Plug ( v0 ) ; <nl> } <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> } <nl> } else { <nl> EmitVariableAssignment ( expr - > expression ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - Token : : ASSIGN ) ; <nl> + Token : : ASSIGN , expr - > CountSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( v0 ) ; <nl> } <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> __ li ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ) ; <nl> __ pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> - CallStoreIC ( expr - > CountStoreFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > CountSlot ( ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( expr - > CountStoreFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> if ( expr - > is_postfix ( ) ) { <nl> if ( ! context ( ) - > IsEffect ( ) ) { <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> StoreDescriptor : : NameRegister ( ) ) ; <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> - CallIC ( ic , expr - > CountStoreFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > CountSlot ( ) ) ; <nl> + CallIC ( ic ) ; <nl> + } else { <nl> + CallIC ( ic , expr - > CountStoreFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> if ( expr - > is_postfix ( ) ) { <nl> if ( ! context ( ) - > IsEffect ( ) ) { <nl> void FullCodeGenerator : : ClearPendingMessage ( ) { <nl> } <nl> <nl> <nl> + void FullCodeGenerator : : EmitLoadStoreICSlot ( FeedbackVectorICSlot slot ) { <nl> + DCHECK ( FLAG_vector_stores & & ! slot . IsInvalid ( ) ) ; <nl> + __ li ( VectorStoreICTrampolineDescriptor : : SlotRegister ( ) , <nl> + Operand ( SmiFromSlot ( slot ) ) ) ; <nl> + } <nl> + <nl> + <nl> # undef __ <nl> <nl> <nl> mmm a / src / mips64 / full - codegen - mips64 . cc <nl> ppp b / src / mips64 / full - codegen - mips64 . cc <nl> void FullCodeGenerator : : VisitForInStatement ( ForInStatement * stmt ) { <nl> __ mov ( result_register ( ) , a3 ) ; <nl> / / Perform the assignment as if via ' = ' . <nl> { EffectContext context ( this ) ; <nl> - EmitAssignment ( stmt - > each ( ) ) ; <nl> + EmitAssignment ( stmt - > each ( ) , stmt - > EachFeedbackSlot ( ) ) ; <nl> PrepareForBailoutForId ( stmt - > AssignmentId ( ) , NO_REGISTERS ) ; <nl> } <nl> <nl> void FullCodeGenerator : : VisitVariableProxy ( VariableProxy * expr ) { <nl> <nl> <nl> void FullCodeGenerator : : EmitSetHomeObjectIfNeeded ( Expression * initializer , <nl> - int offset ) { <nl> + int offset , <nl> + FeedbackVectorICSlot slot ) { <nl> if ( NeedsHomeObject ( initializer ) ) { <nl> __ ld ( StoreDescriptor : : ReceiverRegister ( ) , MemOperand ( sp ) ) ; <nl> __ li ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( isolate ( ) - > factory ( ) - > home_object_symbol ( ) ) ) ; <nl> __ ld ( StoreDescriptor : : ValueRegister ( ) , <nl> MemOperand ( sp , offset * kPointerSize ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> } <nl> } <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> <nl> AccessorTable accessor_table ( zone ( ) ) ; <nl> int property_index = 0 ; <nl> + / / store_slot_index points to the vector ic slot for the next store ic used . <nl> + / / ObjectLiteral : : ComputeFeedbackRequirements controls the allocation of slots <nl> + / / and must be updated if the number of store ics emitted here changes . <nl> + int store_slot_index = 0 ; <nl> for ( ; property_index < expr - > properties ( ) - > length ( ) ; property_index + + ) { <nl> ObjectLiteral : : Property * property = expr - > properties ( ) - > at ( property_index ) ; <nl> if ( property - > is_computed_name ( ) ) break ; <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> DCHECK ( StoreDescriptor : : ValueRegister ( ) . is ( a0 ) ) ; <nl> __ li ( StoreDescriptor : : NameRegister ( ) , Operand ( key - > value ( ) ) ) ; <nl> __ ld ( StoreDescriptor : : ReceiverRegister ( ) , MemOperand ( sp ) ) ; <nl> - CallStoreIC ( key - > LiteralFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > GetNthSlot ( store_slot_index + + ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( key - > LiteralFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( key - > id ( ) , NO_REGISTERS ) ; <nl> <nl> if ( NeedsHomeObject ( value ) ) { <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> __ li ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( isolate ( ) - > factory ( ) - > home_object_symbol ( ) ) ) ; <nl> __ ld ( StoreDescriptor : : ValueRegister ( ) , MemOperand ( sp ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > GetNthSlot ( store_slot_index + + ) ) ; <nl> + } <nl> CallStoreIC ( ) ; <nl> } <nl> } else { <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> VisitForStackValue ( key ) ; <nl> VisitForStackValue ( value ) ; <nl> if ( property - > emit_store ( ) ) { <nl> - EmitSetHomeObjectIfNeeded ( value , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + value , 2 , expr - > SlotForHomeObject ( value , & store_slot_index ) ) ; <nl> __ li ( a0 , Operand ( Smi : : FromInt ( SLOPPY ) ) ) ; / / PropertyAttributes . <nl> __ push ( a0 ) ; <nl> __ CallRuntime ( Runtime : : kSetProperty , 4 ) ; <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> __ push ( a0 ) ; <nl> VisitForStackValue ( it - > first ) ; <nl> EmitAccessor ( it - > second - > getter ) ; <nl> - EmitSetHomeObjectIfNeeded ( it - > second - > getter , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + it - > second - > getter , 2 , <nl> + expr - > SlotForHomeObject ( it - > second - > getter , & store_slot_index ) ) ; <nl> EmitAccessor ( it - > second - > setter ) ; <nl> - EmitSetHomeObjectIfNeeded ( it - > second - > setter , 3 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + it - > second - > setter , 3 , <nl> + expr - > SlotForHomeObject ( it - > second - > setter , & store_slot_index ) ) ; <nl> __ li ( a0 , Operand ( Smi : : FromInt ( NONE ) ) ) ; <nl> __ push ( a0 ) ; <nl> __ CallRuntime ( Runtime : : kDefineAccessorPropertyUnchecked , 5 ) ; <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> } else { <nl> EmitPropertyKey ( property , expr - > GetIdForProperty ( property_index ) ) ; <nl> VisitForStackValue ( value ) ; <nl> - EmitSetHomeObjectIfNeeded ( value , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + value , 2 , expr - > SlotForHomeObject ( value , & store_slot_index ) ) ; <nl> <nl> switch ( property - > kind ( ) ) { <nl> case ObjectLiteral : : Property : : CONSTANT : <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> } else { <nl> context ( ) - > Plug ( v0 ) ; <nl> } <nl> + <nl> + / / Verify that compilation exactly consumed the number of store ic slots that <nl> + / / the ObjectLiteral node had to offer . <nl> + DCHECK ( ! FLAG_vector_stores | | store_slot_index = = expr - > slot_count ( ) ) ; <nl> } <nl> <nl> <nl> void FullCodeGenerator : : VisitAssignment ( Assignment * expr ) { <nl> Comment cmnt ( masm_ , " [ Assignment " ) ; <nl> <nl> Property * property = expr - > target ( ) - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( property ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( property ) ; <nl> <nl> / / Evaluate LHS expression . <nl> switch ( assign_type ) { <nl> void FullCodeGenerator : : VisitAssignment ( Assignment * expr ) { <nl> switch ( assign_type ) { <nl> case VARIABLE : <nl> EmitVariableAssignment ( expr - > target ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - expr - > op ( ) ) ; <nl> + expr - > op ( ) , expr - > AssignmentSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( v0 ) ; <nl> break ; <nl> void FullCodeGenerator : : EmitBinaryOp ( BinaryOperation * expr , Token : : Value op ) { <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> + void FullCodeGenerator : : EmitAssignment ( Expression * expr , <nl> + FeedbackVectorICSlot slot ) { <nl> DCHECK ( expr - > IsValidReferenceExpression ( ) ) ; <nl> <nl> Property * prop = expr - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( prop ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( prop ) ; <nl> <nl> switch ( assign_type ) { <nl> case VARIABLE : { <nl> Variable * var = expr - > AsVariableProxy ( ) - > var ( ) ; <nl> EffectContext context ( this ) ; <nl> - EmitVariableAssignment ( var , Token : : ASSIGN ) ; <nl> + EmitVariableAssignment ( var , Token : : ASSIGN , slot ) ; <nl> break ; <nl> } <nl> case NAMED_PROPERTY : { <nl> void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> __ pop ( StoreDescriptor : : ValueRegister ( ) ) ; / / Restore value . <nl> __ li ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> break ; <nl> } <nl> void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> __ Move ( StoreDescriptor : : NameRegister ( ) , result_register ( ) ) ; <nl> __ Pop ( StoreDescriptor : : ValueRegister ( ) , <nl> StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> CallIC ( ic ) ; <nl> void FullCodeGenerator : : EmitStoreToStackLocalOrContextSlot ( <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitVariableAssignment ( Variable * var , Token : : Value op ) { <nl> + void FullCodeGenerator : : EmitVariableAssignment ( Variable * var , Token : : Value op , <nl> + FeedbackVectorICSlot slot ) { <nl> if ( var - > IsUnallocated ( ) ) { <nl> / / Global var , const , or let . <nl> __ mov ( StoreDescriptor : : ValueRegister ( ) , result_register ( ) ) ; <nl> __ li ( StoreDescriptor : : NameRegister ( ) , Operand ( var - > name ( ) ) ) ; <nl> __ ld ( StoreDescriptor : : ReceiverRegister ( ) , GlobalObjectOperand ( ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> <nl> } else if ( var - > mode ( ) = = LET & & op ! = Token : : INIT_LET ) { <nl> void FullCodeGenerator : : EmitNamedPropertyAssignment ( Assignment * expr ) { <nl> __ li ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ) ; <nl> __ pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> - CallStoreIC ( expr - > AssignmentFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > AssignmentSlot ( ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( expr - > AssignmentFeedbackId ( ) ) ; <nl> + } <nl> <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( v0 ) ; <nl> void FullCodeGenerator : : EmitKeyedPropertyAssignment ( Assignment * expr ) { <nl> <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> - CallIC ( ic , expr - > AssignmentFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > AssignmentSlot ( ) ) ; <nl> + CallIC ( ic ) ; <nl> + } else { <nl> + CallIC ( ic , expr - > AssignmentFeedbackId ( ) ) ; <nl> + } <nl> <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( v0 ) ; <nl> void FullCodeGenerator : : EmitLoadSuperConstructor ( ) { <nl> <nl> <nl> void FullCodeGenerator : : EmitInitializeThisAfterSuper ( <nl> - SuperReference * super_ref ) { <nl> + SuperReference * super_ref , FeedbackVectorICSlot slot ) { <nl> Variable * this_var = super_ref - > this_var ( ) - > var ( ) ; <nl> GetVar ( a1 , this_var ) ; <nl> __ LoadRoot ( at , Heap : : kTheHoleValueRootIndex ) ; <nl> void FullCodeGenerator : : EmitInitializeThisAfterSuper ( <nl> __ CallRuntime ( Runtime : : kThrowReferenceError , 1 ) ; <nl> __ bind ( & uninitialized_this ) ; <nl> <nl> - EmitVariableAssignment ( this_var , Token : : INIT_CONST ) ; <nl> + EmitVariableAssignment ( this_var , Token : : INIT_CONST , slot ) ; <nl> } <nl> <nl> <nl> void FullCodeGenerator : : EmitSuperConstructorCall ( Call * expr ) { <nl> <nl> RecordJSReturnSite ( expr ) ; <nl> <nl> - EmitInitializeThisAfterSuper ( expr - > expression ( ) - > AsSuperReference ( ) ) ; <nl> + EmitInitializeThisAfterSuper ( expr - > expression ( ) - > AsSuperReference ( ) , <nl> + expr - > CallFeedbackICSlot ( ) ) ; <nl> context ( ) - > Plug ( v0 ) ; <nl> } <nl> <nl> void FullCodeGenerator : : EmitCallSuperWithSpread ( CallRuntime * expr ) { <nl> __ ld ( cp , MemOperand ( fp , StandardFrameConstants : : kContextOffset ) ) ; <nl> context ( ) - > DropAndPlug ( 1 , v0 ) ; <nl> <nl> + / / TODO ( mvstanton ) : with FLAG_vector_stores this needs a slot id . <nl> EmitInitializeThisAfterSuper ( super_reference ) ; <nl> } <nl> <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> SetSourcePosition ( expr - > position ( ) ) ; <nl> <nl> Property * prop = expr - > expression ( ) - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( prop ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( prop ) ; <nl> <nl> / / Evaluate expression and get value . <nl> if ( assign_type = = VARIABLE ) { <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> if ( expr - > is_postfix ( ) ) { <nl> { EffectContext context ( this ) ; <nl> EmitVariableAssignment ( expr - > expression ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - Token : : ASSIGN ) ; <nl> + Token : : ASSIGN , expr - > CountSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context . Plug ( v0 ) ; <nl> } <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> __ li ( StoreDescriptor : : NameRegister ( ) , <nl> Operand ( prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ) ; <nl> __ pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> - CallStoreIC ( expr - > CountStoreFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > CountSlot ( ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( expr - > CountStoreFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> if ( expr - > is_postfix ( ) ) { <nl> if ( ! context ( ) - > IsEffect ( ) ) { <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> StoreDescriptor : : NameRegister ( ) ) ; <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> - CallIC ( ic , expr - > CountStoreFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > CountSlot ( ) ) ; <nl> + CallIC ( ic ) ; <nl> + } else { <nl> + CallIC ( ic , expr - > CountStoreFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> if ( expr - > is_postfix ( ) ) { <nl> if ( ! context ( ) - > IsEffect ( ) ) { <nl> void FullCodeGenerator : : ClearPendingMessage ( ) { <nl> } <nl> <nl> <nl> + void FullCodeGenerator : : EmitLoadStoreICSlot ( FeedbackVectorICSlot slot ) { <nl> + DCHECK ( FLAG_vector_stores & & ! slot . IsInvalid ( ) ) ; <nl> + __ li ( VectorStoreICTrampolineDescriptor : : SlotRegister ( ) , <nl> + Operand ( SmiFromSlot ( slot ) ) ) ; <nl> + } <nl> + <nl> + <nl> # undef __ <nl> <nl> <nl> mmm a / src / type - feedback - vector . cc <nl> ppp b / src / type - feedback - vector . cc <nl> TypeFeedbackVector : : VectorICKind TypeFeedbackVector : : FromCodeKind ( <nl> return KindLoadIC ; <nl> case Code : : KEYED_LOAD_IC : <nl> return KindKeyedLoadIC ; <nl> + case Code : : STORE_IC : <nl> + DCHECK ( FLAG_vector_stores ) ; <nl> + return KindStoreIC ; <nl> + case Code : : KEYED_STORE_IC : <nl> + DCHECK ( FLAG_vector_stores ) ; <nl> + return KindKeyedStoreIC ; <nl> default : <nl> / / Shouldn ' t get here . <nl> UNREACHABLE ( ) ; <nl> Code : : Kind TypeFeedbackVector : : FromVectorICKind ( VectorICKind kind ) { <nl> return Code : : LOAD_IC ; <nl> case KindKeyedLoadIC : <nl> return Code : : KEYED_LOAD_IC ; <nl> + case KindStoreIC : <nl> + DCHECK ( FLAG_vector_stores ) ; <nl> + return Code : : STORE_IC ; <nl> + case KindKeyedStoreIC : <nl> + DCHECK ( FLAG_vector_stores ) ; <nl> + return Code : : KEYED_STORE_IC ; <nl> case KindUnused : <nl> break ; <nl> } <nl> void TypeFeedbackVector : : ClearICSlotsImpl ( SharedFunctionInfo * shared , <nl> KeyedLoadICNexus nexus ( this , slot ) ; <nl> nexus . Clear ( host ) ; <nl> } <nl> + / / TODO ( mvstanton ) : Handle clearing of store ics when FLAG_vector_stores <nl> + / / is true . <nl> } <nl> } <nl> } <nl> mmm a / src / type - feedback - vector . h <nl> ppp b / src / type - feedback - vector . h <nl> class TypeFeedbackVector : public FixedArray { <nl> KindUnused = 0x0 , <nl> KindCallIC = 0x1 , <nl> KindLoadIC = 0x2 , <nl> - KindKeyedLoadIC = 0x3 <nl> + KindKeyedLoadIC = 0x3 , <nl> + KindStoreIC = 0x4 , <nl> + KindKeyedStoreIC = 0x5 , <nl> } ; <nl> <nl> - static const int kVectorICKindBits = 2 ; <nl> + static const int kVectorICKindBits = 3 ; <nl> static VectorICKind FromCodeKind ( Code : : Kind kind ) ; <nl> static Code : : Kind FromVectorICKind ( VectorICKind kind ) ; <nl> void SetKind ( FeedbackVectorICSlot slot , Code : : Kind kind ) ; <nl> mmm a / src / x64 / full - codegen - x64 . cc <nl> ppp b / src / x64 / full - codegen - x64 . cc <nl> void FullCodeGenerator : : VisitForInStatement ( ForInStatement * stmt ) { <nl> __ movp ( result_register ( ) , rbx ) ; <nl> / / Perform the assignment as if via ' = ' . <nl> { EffectContext context ( this ) ; <nl> - EmitAssignment ( stmt - > each ( ) ) ; <nl> + EmitAssignment ( stmt - > each ( ) , stmt - > EachFeedbackSlot ( ) ) ; <nl> PrepareForBailoutForId ( stmt - > AssignmentId ( ) , NO_REGISTERS ) ; <nl> } <nl> <nl> void FullCodeGenerator : : VisitVariableProxy ( VariableProxy * expr ) { <nl> <nl> <nl> void FullCodeGenerator : : EmitSetHomeObjectIfNeeded ( Expression * initializer , <nl> - int offset ) { <nl> + int offset , <nl> + FeedbackVectorICSlot slot ) { <nl> if ( NeedsHomeObject ( initializer ) ) { <nl> __ movp ( StoreDescriptor : : ReceiverRegister ( ) , Operand ( rsp , 0 ) ) ; <nl> __ Move ( StoreDescriptor : : NameRegister ( ) , <nl> isolate ( ) - > factory ( ) - > home_object_symbol ( ) ) ; <nl> __ movp ( StoreDescriptor : : ValueRegister ( ) , <nl> Operand ( rsp , offset * kPointerSize ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> } <nl> } <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> <nl> AccessorTable accessor_table ( zone ( ) ) ; <nl> int property_index = 0 ; <nl> + / / store_slot_index points to the vector ic slot for the next store ic used . <nl> + / / ObjectLiteral : : ComputeFeedbackRequirements controls the allocation of slots <nl> + / / and must be updated if the number of store ics emitted here changes . <nl> + int store_slot_index = 0 ; <nl> for ( ; property_index < expr - > properties ( ) - > length ( ) ; property_index + + ) { <nl> ObjectLiteral : : Property * property = expr - > properties ( ) - > at ( property_index ) ; <nl> if ( property - > is_computed_name ( ) ) break ; <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> DCHECK ( StoreDescriptor : : ValueRegister ( ) . is ( rax ) ) ; <nl> __ Move ( StoreDescriptor : : NameRegister ( ) , key - > value ( ) ) ; <nl> __ movp ( StoreDescriptor : : ReceiverRegister ( ) , Operand ( rsp , 0 ) ) ; <nl> - CallStoreIC ( key - > LiteralFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > GetNthSlot ( store_slot_index + + ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( key - > LiteralFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( key - > id ( ) , NO_REGISTERS ) ; <nl> <nl> if ( NeedsHomeObject ( value ) ) { <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> __ Move ( StoreDescriptor : : NameRegister ( ) , <nl> isolate ( ) - > factory ( ) - > home_object_symbol ( ) ) ; <nl> __ movp ( StoreDescriptor : : ValueRegister ( ) , Operand ( rsp , 0 ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > GetNthSlot ( store_slot_index + + ) ) ; <nl> + } <nl> CallStoreIC ( ) ; <nl> } <nl> } else { <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> VisitForStackValue ( key ) ; <nl> VisitForStackValue ( value ) ; <nl> if ( property - > emit_store ( ) ) { <nl> - EmitSetHomeObjectIfNeeded ( value , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + value , 2 , expr - > SlotForHomeObject ( value , & store_slot_index ) ) ; <nl> __ Push ( Smi : : FromInt ( SLOPPY ) ) ; / / Language mode <nl> __ CallRuntime ( Runtime : : kSetProperty , 4 ) ; <nl> } else { <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> __ Push ( Operand ( rsp , 0 ) ) ; / / Duplicate receiver . <nl> VisitForStackValue ( it - > first ) ; <nl> EmitAccessor ( it - > second - > getter ) ; <nl> - EmitSetHomeObjectIfNeeded ( it - > second - > getter , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + it - > second - > getter , 2 , <nl> + expr - > SlotForHomeObject ( it - > second - > getter , & store_slot_index ) ) ; <nl> EmitAccessor ( it - > second - > setter ) ; <nl> - EmitSetHomeObjectIfNeeded ( it - > second - > setter , 3 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + it - > second - > setter , 3 , <nl> + expr - > SlotForHomeObject ( it - > second - > setter , & store_slot_index ) ) ; <nl> __ Push ( Smi : : FromInt ( NONE ) ) ; <nl> __ CallRuntime ( Runtime : : kDefineAccessorPropertyUnchecked , 5 ) ; <nl> } <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> } else { <nl> EmitPropertyKey ( property , expr - > GetIdForProperty ( property_index ) ) ; <nl> VisitForStackValue ( value ) ; <nl> - EmitSetHomeObjectIfNeeded ( value , 2 ) ; <nl> + EmitSetHomeObjectIfNeeded ( <nl> + value , 2 , expr - > SlotForHomeObject ( value , & store_slot_index ) ) ; <nl> <nl> switch ( property - > kind ( ) ) { <nl> case ObjectLiteral : : Property : : CONSTANT : <nl> void FullCodeGenerator : : VisitObjectLiteral ( ObjectLiteral * expr ) { <nl> } else { <nl> context ( ) - > Plug ( rax ) ; <nl> } <nl> + <nl> + / / Verify that compilation exactly consumed the number of store ic slots that <nl> + / / the ObjectLiteral node had to offer . <nl> + DCHECK ( ! FLAG_vector_stores | | store_slot_index = = expr - > slot_count ( ) ) ; <nl> } <nl> <nl> <nl> void FullCodeGenerator : : VisitAssignment ( Assignment * expr ) { <nl> Comment cmnt ( masm_ , " [ Assignment " ) ; <nl> <nl> Property * property = expr - > target ( ) - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( property ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( property ) ; <nl> <nl> / / Evaluate LHS expression . <nl> switch ( assign_type ) { <nl> void FullCodeGenerator : : VisitAssignment ( Assignment * expr ) { <nl> switch ( assign_type ) { <nl> case VARIABLE : <nl> EmitVariableAssignment ( expr - > target ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - expr - > op ( ) ) ; <nl> + expr - > op ( ) , expr - > AssignmentSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( rax ) ; <nl> break ; <nl> void FullCodeGenerator : : EmitBinaryOp ( BinaryOperation * expr , Token : : Value op ) { <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> + void FullCodeGenerator : : EmitAssignment ( Expression * expr , <nl> + FeedbackVectorICSlot slot ) { <nl> DCHECK ( expr - > IsValidReferenceExpression ( ) ) ; <nl> <nl> Property * prop = expr - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( prop ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( prop ) ; <nl> <nl> switch ( assign_type ) { <nl> case VARIABLE : { <nl> Variable * var = expr - > AsVariableProxy ( ) - > var ( ) ; <nl> EffectContext context ( this ) ; <nl> - EmitVariableAssignment ( var , Token : : ASSIGN ) ; <nl> + EmitVariableAssignment ( var , Token : : ASSIGN , slot ) ; <nl> break ; <nl> } <nl> case NAMED_PROPERTY : { <nl> void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> __ Pop ( StoreDescriptor : : ValueRegister ( ) ) ; / / Restore value . <nl> __ Move ( StoreDescriptor : : NameRegister ( ) , <nl> prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> break ; <nl> } <nl> void FullCodeGenerator : : EmitAssignment ( Expression * expr ) { <nl> __ Move ( StoreDescriptor : : NameRegister ( ) , rax ) ; <nl> __ Pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> __ Pop ( StoreDescriptor : : ValueRegister ( ) ) ; / / Restore value . <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> CallIC ( ic ) ; <nl> void FullCodeGenerator : : EmitStoreToStackLocalOrContextSlot ( <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitVariableAssignment ( Variable * var , <nl> - Token : : Value op ) { <nl> + void FullCodeGenerator : : EmitVariableAssignment ( Variable * var , Token : : Value op , <nl> + FeedbackVectorICSlot slot ) { <nl> if ( var - > IsUnallocated ( ) ) { <nl> / / Global var , const , or let . <nl> __ Move ( StoreDescriptor : : NameRegister ( ) , var - > name ( ) ) ; <nl> __ movp ( StoreDescriptor : : ReceiverRegister ( ) , GlobalObjectOperand ( ) ) ; <nl> + if ( FLAG_vector_stores ) EmitLoadStoreICSlot ( slot ) ; <nl> CallStoreIC ( ) ; <nl> <nl> } else if ( var - > mode ( ) = = LET & & op ! = Token : : INIT_LET ) { <nl> void FullCodeGenerator : : EmitNamedPropertyAssignment ( Assignment * expr ) { <nl> SetSourcePosition ( expr - > position ( ) ) ; <nl> __ Move ( StoreDescriptor : : NameRegister ( ) , prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ; <nl> __ Pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> - CallStoreIC ( expr - > AssignmentFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > AssignmentSlot ( ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( expr - > AssignmentFeedbackId ( ) ) ; <nl> + } <nl> <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( rax ) ; <nl> void FullCodeGenerator : : EmitKeyedPropertyAssignment ( Assignment * expr ) { <nl> SetSourcePosition ( expr - > position ( ) ) ; <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> - CallIC ( ic , expr - > AssignmentFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > AssignmentSlot ( ) ) ; <nl> + CallIC ( ic ) ; <nl> + } else { <nl> + CallIC ( ic , expr - > AssignmentFeedbackId ( ) ) ; <nl> + } <nl> <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( rax ) ; <nl> void FullCodeGenerator : : EmitLoadSuperConstructor ( ) { <nl> <nl> <nl> void FullCodeGenerator : : EmitInitializeThisAfterSuper ( <nl> - SuperReference * super_ref ) { <nl> + SuperReference * super_ref , FeedbackVectorICSlot slot ) { <nl> Variable * this_var = super_ref - > this_var ( ) - > var ( ) ; <nl> GetVar ( rcx , this_var ) ; <nl> __ CompareRoot ( rcx , Heap : : kTheHoleValueRootIndex ) ; <nl> void FullCodeGenerator : : EmitInitializeThisAfterSuper ( <nl> __ CallRuntime ( Runtime : : kThrowReferenceError , 1 ) ; <nl> __ bind ( & uninitialized_this ) ; <nl> <nl> - EmitVariableAssignment ( this_var , Token : : INIT_CONST ) ; <nl> + EmitVariableAssignment ( this_var , Token : : INIT_CONST , slot ) ; <nl> } <nl> <nl> <nl> void FullCodeGenerator : : EmitSuperConstructorCall ( Call * expr ) { <nl> <nl> RecordJSReturnSite ( expr ) ; <nl> <nl> - EmitInitializeThisAfterSuper ( expr - > expression ( ) - > AsSuperReference ( ) ) ; <nl> + EmitInitializeThisAfterSuper ( expr - > expression ( ) - > AsSuperReference ( ) , <nl> + expr - > CallFeedbackICSlot ( ) ) ; <nl> context ( ) - > Plug ( rax ) ; <nl> } <nl> <nl> void FullCodeGenerator : : EmitCallSuperWithSpread ( CallRuntime * expr ) { <nl> __ movp ( rsi , Operand ( rbp , StandardFrameConstants : : kContextOffset ) ) ; <nl> context ( ) - > DropAndPlug ( 1 , rax ) ; <nl> <nl> + / / TODO ( mvstanton ) : with FLAG_vector_stores this needs a slot id . <nl> EmitInitializeThisAfterSuper ( super_reference ) ; <nl> } <nl> <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> SetSourcePosition ( expr - > position ( ) ) ; <nl> <nl> Property * prop = expr - > expression ( ) - > AsProperty ( ) ; <nl> - LhsKind assign_type = GetAssignType ( prop ) ; <nl> + LhsKind assign_type = Property : : GetAssignType ( prop ) ; <nl> <nl> / / Evaluate expression and get value . <nl> if ( assign_type = = VARIABLE ) { <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> / / Perform the assignment as if via ' = ' . <nl> { EffectContext context ( this ) ; <nl> EmitVariableAssignment ( expr - > expression ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - Token : : ASSIGN ) ; <nl> + Token : : ASSIGN , expr - > CountSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context . Plug ( rax ) ; <nl> } <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> } else { <nl> / / Perform the assignment as if via ' = ' . <nl> EmitVariableAssignment ( expr - > expression ( ) - > AsVariableProxy ( ) - > var ( ) , <nl> - Token : : ASSIGN ) ; <nl> + Token : : ASSIGN , expr - > CountSlot ( ) ) ; <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> context ( ) - > Plug ( rax ) ; <nl> } <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> __ Move ( StoreDescriptor : : NameRegister ( ) , <nl> prop - > key ( ) - > AsLiteral ( ) - > value ( ) ) ; <nl> __ Pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> - CallStoreIC ( expr - > CountStoreFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > CountSlot ( ) ) ; <nl> + CallStoreIC ( ) ; <nl> + } else { <nl> + CallStoreIC ( expr - > CountStoreFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> if ( expr - > is_postfix ( ) ) { <nl> if ( ! context ( ) - > IsEffect ( ) ) { <nl> void FullCodeGenerator : : VisitCountOperation ( CountOperation * expr ) { <nl> __ Pop ( StoreDescriptor : : ReceiverRegister ( ) ) ; <nl> Handle < Code > ic = <nl> CodeFactory : : KeyedStoreIC ( isolate ( ) , language_mode ( ) ) . code ( ) ; <nl> - CallIC ( ic , expr - > CountStoreFeedbackId ( ) ) ; <nl> + if ( FLAG_vector_stores ) { <nl> + EmitLoadStoreICSlot ( expr - > CountSlot ( ) ) ; <nl> + CallIC ( ic ) ; <nl> + } else { <nl> + CallIC ( ic , expr - > CountStoreFeedbackId ( ) ) ; <nl> + } <nl> PrepareForBailoutForId ( expr - > AssignmentId ( ) , TOS_REG ) ; <nl> if ( expr - > is_postfix ( ) ) { <nl> if ( ! context ( ) - > IsEffect ( ) ) { <nl> void FullCodeGenerator : : ClearPendingMessage ( ) { <nl> } <nl> <nl> <nl> + void FullCodeGenerator : : EmitLoadStoreICSlot ( FeedbackVectorICSlot slot ) { <nl> + DCHECK ( FLAG_vector_stores & & ! slot . IsInvalid ( ) ) ; <nl> + __ Move ( VectorStoreICTrampolineDescriptor : : SlotRegister ( ) , SmiFromSlot ( slot ) ) ; <nl> + } <nl> + <nl> + <nl> # undef __ <nl> <nl> <nl>
VectorICs : allocating slots for store ics in ast nodes .
v8/v8
5450fc07ba07615a70f5ed8379dc23c3275d6fe3
2015-05-27T14:26:25Z
mmm a / html5 / render / vue / components / scrollable / shared . js <nl> ppp b / html5 / render / vue / components / scrollable / shared . js <nl> import loading from ' . / loading ' <nl> <nl> export function createLoading ( context , createElement , vnode ) { <nl> const options = vnode . componentOptions <nl> - return createElement ( loading , extend ( { <nl> + return createElement ( loading , extend ( vnode . data , { <nl> on : options . listeners <nl> - } , vnode . data ) , options . children ) <nl> + } ) , options . children ) <nl> } <nl> <nl> export function createRefresh ( context , createElement , vnode ) { <nl> const options = vnode . componentOptions <nl> - return createElement ( refresh , extend ( { <nl> + return createElement ( refresh , extend ( vnode . data , { <nl> on : options . listeners <nl> - } , vnode . data ) , options . children ) <nl> + } ) , options . children ) <nl> } <nl>
* [ html5 ] fix loading & refresh event .
apache/incubator-weex
fc4172718f7f2e2c964a029b75986f5b187f8f94
2017-02-23T07:58:56Z
deleted file mode 100644 <nl> index c70ea45de53 . . 00000000000 <nl> mmm a / . github / labeler . keywords . yml <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - pr - feature : " New Feature " <nl> deleted file mode 100644 <nl> index 72cf714f039 . . 00000000000 <nl> mmm a / . github / labeler . yml <nl> ppp / dev / null <nl> <nl> - # Documentation PRs <nl> - documentation : <nl> - - " * * / * . md " <nl> - - " docs / * * / * " <nl> - pr - documentation : <nl> - - " * * / * . md " <nl> - - " docs / * * / * " <nl> - <nl> - # Component labels <nl> - comp - mutations : <nl> - - " * * / * Mutation * " <nl> - comp - matview : <nl> - - " * * / * MaterializedView * " <nl> - comp - skipidx : <nl> - - " * * / * Indices * " <nl> - comp - kafka : <nl> - - " dbms / src / Storages / Kafka / * * / * " <nl> - - " dbms / tests / integration / test_storage_kafka / * * / * " <nl> - - " utils / kafka / * * / * " <nl> deleted file mode 100644 <nl> index 0110ef7b516 . . 00000000000 <nl> mmm a / . github / workflows / labeler . yml <nl> ppp / dev / null <nl> <nl> - name : " Pull Request Labeler " <nl> - on : <nl> - pull_request <nl> - <nl> - jobs : <nl> - by - filename : <nl> - runs - on : ubuntu - latest <nl> - steps : <nl> - - uses : " actions / labeler @ v2 " <nl> - with : <nl> - repo - token : " $ { { secrets . GITHUB_TOKEN } } " <nl>
Labeler seems to require additional permissions from PR authors ( )
ClickHouse/ClickHouse
f8a401bbf72e487d1a2953f8dfdf4b180acf54eb
2019-10-24T00:56:53Z
mmm a / include / swift / AST / DiagnosticsSema . def <nl> ppp b / include / swift / AST / DiagnosticsSema . def <nl> ERROR ( tuple_conversion_not_expressible , sema_tcc , none , <nl> " cannot express tuple conversion % 0 to % 1 " , ( Type , Type ) ) <nl> ERROR ( load_of_explicit_lvalue , sema_tcc , none , <nl> " % 0 variable is not being passed by reference " , ( Type ) ) <nl> - ERROR ( could_not_find_user_conversion , sema_tcc , none , <nl> - " could not find a user - defined conversion from type % 0 to type % 1 " , <nl> - ( Type , Type ) ) <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> / / Expression Type Checking Errors <nl> ERROR ( functions_mutating_and_not , sema_tcd , none , <nl> ERROR ( static_functions_not_mutating , sema_tcd , none , <nl> " static functions may not be declared mutating " , ( ) ) <nl> <nl> - ERROR ( conversion_not_function , sema_tcd , none , <nl> - " conversion should be applied to an instance method " , ( ) ) <nl> - ERROR ( conversion_not_instance_method , sema_tcd , none , <nl> - " conversion function % 0 is not an instance method " , <nl> - ( Identifier ) ) <nl> ERROR ( transparent_stored_property , sema_tcd , none , <nl> " ' transparent ' attribute cannot be applied to stored properties " , ( ) ) <nl> ERROR ( transparent_on_invalid_extension , sema_tcd , none , <nl> ERROR ( transparent_in_classes_not_supported , sema_tcd , none , <nl> " ' transparent ' attribute is not supported on declarations within classes " , <nl> ( ) ) <nl> <nl> - ERROR ( conversion_params , sema_tcd , none , <nl> - " conversion function % 0 has non - defaulted parameters " , ( Identifier ) ) <nl> ERROR ( invalid_iboutlet , sema_tcd , none , <nl> " only instance properties can be declared ' IBOutlet ' " , ( ) ) <nl> ERROR ( iboutlet_nonobjc_class , sema_tcd , none , <nl> mmm a / include / swift / AST / KnownIdentifiers . def <nl> ppp b / include / swift / AST / KnownIdentifiers . def <nl> IDENTIFIER_WITH_NAME ( ConvertFromDictionaryLiteral , <nl> IDENTIFIER_WITH_NAME ( GetBuiltinLogicValue , " _getBuiltinLogicValue " ) <nl> IDENTIFIER_WITH_NAME ( ArrayBoundValue , " arrayBoundValue " ) <nl> IDENTIFIER_WITH_NAME ( GetBuiltinArrayBoundValue , " _getBuiltinArrayBoundValue " ) <nl> - IDENTIFIER_WITH_NAME ( Conversion , " __conversion " ) <nl> <nl> IDENTIFIER_WITH_NAME ( CVarArgType , " CVarArgType " ) <nl> IDENTIFIER_WITH_NAME ( OptionalNilComparisonType , " _OptionalNilComparisonType " ) <nl> mmm a / lib / Parse / ParseDecl . cpp <nl> ppp b / lib / Parse / ParseDecl . cpp <nl> Parser : : parseDeclFunc ( SourceLoc StaticLoc , StaticSpellingKind StaticSpelling , <nl> return nullptr ; <nl> } <nl> <nl> - / / Ban __conversion functions . <nl> - if ( SimpleName = = Context . Id_Conversion ) { <nl> - diagnose ( NameLoc , diag : : func_conversion ) ; <nl> - } <nl> - <nl> DebuggerContextChange DCC ( * this , SimpleName , DeclKind : : Func ) ; <nl> <nl> if ( NonglobalError & & ! DCC . movedToTopLevel ( ) ) { <nl> mmm a / lib / Sema / CSApply . cpp <nl> ppp b / lib / Sema / CSApply . cpp <nl> namespace { <nl> Expr * coerceExistentialMetatype ( Expr * expr , Type toType , <nl> ConstraintLocatorBuilder locator ) ; <nl> <nl> - / / / \ brief Coerce the expression to another type via a user - defined <nl> - / / / conversion . <nl> - / / / <nl> - / / / \ param expr The expression to be coerced . <nl> - / / / \ param toType The tupe to which the expression will be coerced . <nl> - / / / \ param locator Locator describing where this conversion occurs . <nl> - / / / <nl> - / / / \ return The coerced expression , whose type will be equivalent to <nl> - / / / \ c toType . <nl> - Expr * coerceViaUserConversion ( Expr * expr , Type toType , <nl> - ConstraintLocatorBuilder locator ) ; <nl> - <nl> / / / \ brief Coerce an expression of ( possibly unchecked ) optional <nl> / / / type to have a different ( possibly unchecked ) optional type . <nl> Expr * coerceOptionalToOptional ( Expr * expr , Type toType , <nl> namespace { <nl> cs . DC - > getInnermostMethodContext ( ) ) ) { <nl> tc . diagnose ( expr - > getDotLoc ( ) , <nl> diag : : init_delegation_outside_initializer ) ; <nl> + return nullptr ; <nl> } <nl> } <nl> } <nl> Expr * ExprRewriter : : coerceExistential ( Expr * expr , Type toType , <nl> auto & tc = solution . getConstraintSystem ( ) . getTypeChecker ( ) ; <nl> Type fromType = expr - > getType ( ) ; <nl> <nl> - if ( auto bridgedType = tc . getDynamicBridgedThroughObjCClass ( cs . DC , toType , <nl> - fromType ) ) { <nl> - / / Protect against " no - op " conversions . If the bridged type points back <nl> - / / to itself , the constraint solver won ' t have a conversion handy to <nl> - / / coerce to a user conversion , so we should avoid creating a new <nl> - / / expression node . <nl> - if ( ! bridgedType - > isEqual ( fromType ) & & ! bridgedType - > isEqual ( toType ) ) { <nl> - expr = coerceViaUserConversion ( expr , bridgedType , locator ) ; <nl> - fromType = bridgedType ; <nl> - } <nl> - } <nl> - <nl> / / Handle existential coercions that implicitly look through ImplicitlyUnwrappedOptional < T > . <nl> if ( auto ty = cs . lookThroughImplicitlyUnwrappedOptionalType ( fromType ) ) { <nl> expr = coerceImplicitlyUnwrappedOptionalToValue ( expr , ty , locator ) ; <nl> Expr * ExprRewriter : : coerceExistentialMetatype ( Expr * expr , Type toType , <nl> return new ( tc . Context ) MetatypeErasureExpr ( expr , toType , conformances ) ; <nl> } <nl> <nl> - Expr * ExprRewriter : : coerceViaUserConversion ( Expr * expr , Type toType , <nl> - ConstraintLocatorBuilder locator ) { <nl> - auto & tc = solution . getConstraintSystem ( ) . getTypeChecker ( ) ; <nl> - <nl> - / / Determine the locator that corresponds to the conversion member . <nl> - auto storedLocator <nl> - = cs . getConstraintLocator ( <nl> - locator . withPathElement ( ConstraintLocator : : ConversionMember ) ) ; <nl> - auto knownOverload = solution . overloadChoices . find ( storedLocator ) ; <nl> - if ( knownOverload ! = solution . overloadChoices . end ( ) ) { <nl> - auto selected = knownOverload - > second ; <nl> - <nl> - / / FIXME : Location information is suspect throughout . <nl> - / / Form a reference to the conversion member . <nl> - auto memberRef = buildMemberRef ( expr , <nl> - selected . openedFullType , <nl> - expr - > getStartLoc ( ) , <nl> - selected . choice . getDecl ( ) , <nl> - expr - > getEndLoc ( ) , <nl> - selected . openedType , <nl> - locator , <nl> - / * Implicit = * / true , / * direct ivar * / false ) ; <nl> - <nl> - / / Form an empty tuple . <nl> - Expr * args = TupleExpr : : createEmpty ( tc . Context , <nl> - expr - > getStartLoc ( ) , <nl> - expr - > getEndLoc ( ) , <nl> - / * Implicit = * / true ) ; <nl> - <nl> - / / Call the conversion function with an empty tuple . <nl> - ApplyExpr * apply = new ( tc . Context ) CallExpr ( memberRef , args , <nl> - / * Implicit = * / true ) ; <nl> - auto openedType = selected . openedType - > castTo < FunctionType > ( ) - > getResult ( ) ; <nl> - expr = finishApply ( apply , openedType , <nl> - ConstraintLocatorBuilder ( <nl> - cs . getConstraintLocator ( apply ) ) ) ; <nl> - <nl> - if ( ! expr ) <nl> - return nullptr ; <nl> - <nl> - return coerceToType ( expr , toType , locator ) ; <nl> - } <nl> - <nl> - / / If there was no conversion member , look for a constructor member . <nl> - / / This is only used for handling interpolated string literals , where <nl> - / / we allow construction or conversion . <nl> - storedLocator <nl> - = cs . getConstraintLocator ( <nl> - locator . withPathElement ( ConstraintLocator : : ConstructorMember ) ) ; <nl> - knownOverload = solution . overloadChoices . find ( storedLocator ) ; <nl> - <nl> - / / Could not find a user conversion . <nl> - if ( knownOverload = = solution . overloadChoices . end ( ) ) { <nl> - tc . diagnose ( expr - > getLoc ( ) , diag : : could_not_find_user_conversion , <nl> - expr - > getType ( ) , toType ) ; <nl> - return nullptr ; <nl> - } <nl> - <nl> - auto selected = knownOverload - > second ; <nl> - <nl> - / / FIXME : Location information is suspect throughout . <nl> - / / Form a reference to the constructor . <nl> - <nl> - / / Form a reference to the constructor or enum declaration . <nl> - / / FIXME : Bogus location info . <nl> - Expr * typeBase = TypeExpr : : createImplicitHack ( expr - > getStartLoc ( ) , toType , <nl> - tc . Context ) ; <nl> - Expr * declRef = buildMemberRef ( typeBase , <nl> - selected . openedFullType , <nl> - expr - > getStartLoc ( ) , <nl> - selected . choice . getDecl ( ) , <nl> - expr - > getStartLoc ( ) , <nl> - selected . openedType , <nl> - storedLocator , <nl> - / * Implicit = * / true , / * direct ivar * / false ) ; <nl> - <nl> - / / FIXME : Lack of openedType here is an issue . <nl> - ApplyExpr * apply = new ( tc . Context ) CallExpr ( declRef , expr , <nl> - / * Implicit = * / true ) ; <nl> - expr = finishApply ( apply , toType , locator ) ; <nl> - if ( ! expr ) <nl> - return nullptr ; <nl> - <nl> - return coerceToType ( expr , toType , locator ) ; <nl> - } <nl> - <nl> static uint getOptionalBindDepth ( const BoundGenericType * bgt ) { <nl> <nl> if ( bgt - > getDecl ( ) - > classifyAsOptionalType ( ) ) { <nl> Expr * ExprRewriter : : coerceToType ( Expr * expr , Type toType , <nl> isBridged ) ; <nl> } <nl> <nl> - case ConversionRestrictionKind : : User : { <nl> - tc . requirePointerArgumentIntrinsics ( expr - > getLoc ( ) ) ; <nl> - return coerceViaUserConversion ( expr , toType , locator ) ; <nl> - } <nl> - <nl> case ConversionRestrictionKind : : InoutToPointer : { <nl> tc . requirePointerArgumentIntrinsics ( expr - > getLoc ( ) ) ; <nl> return new ( tc . Context ) InOutToPointerExpr ( expr , toType ) ; <nl> Expr * ExprRewriter : : coerceToType ( Expr * expr , Type toType , <nl> } <nl> } <nl> <nl> - / / Coerce via conversion function or constructor . <nl> - if ( fromType - > getNominalOrBoundGenericNominal ( ) | | <nl> - fromType - > is < ArchetypeType > ( ) | | <nl> - toType - > getNominalOrBoundGenericNominal ( ) | | <nl> - toType - > is < ArchetypeType > ( ) ) { <nl> - return coerceViaUserConversion ( expr , toType , locator ) ; <nl> - } <nl> - <nl> / / Coercion from one metatype to another . <nl> if ( fromType - > is < MetatypeType > ( ) ) { <nl> auto toMeta = toType - > castTo < MetatypeType > ( ) ; <nl> mmm a / lib / Sema / CSRanking . cpp <nl> ppp b / lib / Sema / CSRanking . cpp <nl> static bool isDeclAsSpecializedAs ( TypeChecker & tc , DeclContext * dc , <nl> enum { <nl> CheckAll , <nl> CheckInput , <nl> - CheckResult <nl> } checkKind ; <nl> if ( isa < AbstractFunctionDecl > ( decl1 ) | | isa < EnumElementDecl > ( decl1 ) ) { <nl> / / Nothing to do : these have the curried ' self ' already . <nl> static bool isDeclAsSpecializedAs ( TypeChecker & tc , DeclContext * dc , <nl> } else { <nl> checkKind = CheckInput ; <nl> } <nl> - <nl> - / / Only check the result type for conversion functions . <nl> - if ( auto func = dyn_cast < FuncDecl > ( decl1 ) ) { <nl> - if ( func - > getName ( ) = = tc . Context . Id_Conversion ) <nl> - checkKind = CheckResult ; <nl> - } <nl> } else { <nl> / / Add a curried ' self ' type . <nl> assert ( ! type1 - > is < GenericFunctionType > ( ) & & " Odd generic function type ? " ) ; <nl> static bool isDeclAsSpecializedAs ( TypeChecker & tc , DeclContext * dc , <nl> locator ) ; <nl> break ; <nl> } <nl> - <nl> - case CheckResult : { <nl> - / / Check whether the first function type ' s input is a subtype of the second . <nl> - auto funcTy1 = openedType1 - > castTo < FunctionType > ( ) ; <nl> - auto funcTy2 = openedType2 - > castTo < FunctionType > ( ) ; <nl> - cs . addConstraint ( ConstraintKind : : Subtype , <nl> - funcTy1 - > getResult ( ) , <nl> - funcTy2 - > getResult ( ) , <nl> - locator ) ; <nl> - break ; <nl> - } <nl> } <nl> <nl> / / Solve the system . <nl> mmm a / lib / Sema / CSSimplify . cpp <nl> ppp b / lib / Sema / CSSimplify . cpp <nl> bool constraints : : matchCallArguments ( <nl> return listener . relabelArguments ( actualArgNames ) ; <nl> } <nl> <nl> - / / / Determine whether we should attempt a user - defined conversion . <nl> - static bool shouldTryUserConversion ( ConstraintSystem & cs , Type type ) { <nl> - / / Strip the l - value qualifier if present . <nl> - type = type - > getRValueType ( ) ; <nl> - <nl> - / / If this isn ' t a type that can have user - defined conversions , there ' s <nl> - / / nothing to do . <nl> - if ( ! type - > getNominalOrBoundGenericNominal ( ) & & ! type - > is < ArchetypeType > ( ) ) <nl> - return false ; <nl> - <nl> - / / If there are no user - defined conversions , there ' s nothing to do . <nl> - / / FIXME : lame name ! <nl> - auto & ctx = cs . getASTContext ( ) ; <nl> - auto name = ctx . Id_Conversion ; <nl> - return static_cast < bool > ( cs . lookupMember ( type , name ) ) ; <nl> - } <nl> - <nl> / / Match the argument of a call to the parameter . <nl> static ConstraintSystem : : SolutionKind <nl> matchCallArguments ( ConstraintSystem & cs , TypeMatchKind kind , <nl> static ConstraintKind getConstraintKind ( TypeMatchKind kind ) { <nl> llvm_unreachable ( " unhandled type matching kind " ) ; <nl> } <nl> <nl> - / / / If the given type has user - defined conversions , introduce new <nl> - / / / relational constraint between the result of performing the user - defined <nl> - / / / conversion and an arbitrary other type . <nl> - static ConstraintSystem : : SolutionKind <nl> - tryUserConversion ( ConstraintSystem & cs , Type type , ConstraintKind kind , <nl> - Type otherType , ConstraintLocatorBuilder locator ) { <nl> - assert ( kind ! = ConstraintKind : : Construction & & <nl> - kind ! = ConstraintKind : : Conversion & & <nl> - kind ! = ConstraintKind : : ArgumentTupleConversion & & <nl> - kind ! = ConstraintKind : : OperatorArgumentTupleConversion & & <nl> - kind ! = ConstraintKind : : OperatorArgumentConversion & & <nl> - " Construction / conversion constraints create potential cycles " ) ; <nl> - <nl> - / / If this isn ' t a type that can have user - defined conversions , there ' s <nl> - / / nothing to do . <nl> - if ( ! shouldTryUserConversion ( cs , type ) ) <nl> - return ConstraintSystem : : SolutionKind : : Unsolved ; <nl> - <nl> - auto memberLocator = cs . getConstraintLocator ( <nl> - locator . withPathElement ( <nl> - ConstraintLocator : : ConversionMember ) ) ; <nl> - auto inputTV = cs . createTypeVariable ( <nl> - cs . getConstraintLocator ( memberLocator , <nl> - ConstraintLocator : : ApplyArgument ) , <nl> - / * options = * / 0 ) ; <nl> - auto outputTV = cs . createTypeVariable ( <nl> - cs . getConstraintLocator ( memberLocator , <nl> - ConstraintLocator : : ApplyFunction ) , <nl> - / * options = * / 0 ) ; <nl> - <nl> - auto & ctx = cs . getASTContext ( ) ; <nl> - auto name = ctx . Id_Conversion ; <nl> - <nl> - / / The conversion function will have function type TI - > TO , for fresh <nl> - / / type variables TI and TO . <nl> - cs . addValueMemberConstraint ( type , name , <nl> - FunctionType : : get ( inputTV , outputTV ) , <nl> - memberLocator ) ; <nl> - <nl> - / / A conversion function must accept an empty parameter list ( ) . <nl> - / / Note : This should never fail , because the declaration checker <nl> - / / should ensure that conversions have no non - defaulted parameters . <nl> - cs . addConstraint ( ConstraintKind : : ArgumentTupleConversion , <nl> - TupleType : : getEmpty ( ctx ) , <nl> - inputTV , cs . getConstraintLocator ( locator ) ) ; <nl> - <nl> - / / Relate the output of the conversion function to the other type , using <nl> - / / the provided constraint kind . <nl> - / / If the type we ' re converting to is existential , we can also have an <nl> - / / existential conversion here , so introduce a disjunction . <nl> - auto resultLocator = cs . getConstraintLocator ( <nl> - locator . withPathElement ( <nl> - ConstraintLocator : : ConversionResult ) ) ; <nl> - if ( otherType - > isExistentialType ( ) ) { <nl> - Constraint * constraints [ 2 ] = { <nl> - Constraint : : create ( cs , kind , outputTV , otherType , Identifier ( ) , <nl> - resultLocator ) , <nl> - Constraint : : createRestricted ( cs , ConstraintKind : : Conversion , <nl> - ConversionRestrictionKind : : Existential , <nl> - outputTV , otherType , resultLocator ) <nl> - } ; <nl> - cs . addConstraint ( Constraint : : createDisjunction ( cs , constraints , <nl> - resultLocator ) ) ; <nl> - } else { <nl> - cs . addConstraint ( kind , outputTV , otherType , resultLocator ) ; <nl> - } <nl> - <nl> - / / We ' re adding a user - defined conversion . <nl> - cs . increaseScore ( SK_UserConversion ) ; <nl> - <nl> - return ConstraintSystem : : SolutionKind : : Solved ; <nl> - } <nl> - <nl> static bool isStringCompatiblePointerBaseType ( TypeChecker & TC , <nl> DeclContext * DC , <nl> Type baseType ) { <nl> ConstraintSystem : : matchTypes ( Type type1 , Type type2 , TypeMatchKind kind , <nl> } <nl> } <nl> <nl> - / / A nominal type can be converted to another type via a user - defined <nl> - / / conversion function . <nl> - if ( concrete & & kind > = TypeMatchKind : : Conversion & & <nl> - ! ( flags & TMF_ApplyingOperatorParameter ) & & <nl> - shouldTryUserConversion ( * this , type1 ) ) { <nl> - conversionsOrFixes . push_back ( ConversionRestrictionKind : : User ) ; <nl> - <nl> - / / Favor array conversions to non - array types ( such as NSArray ) . <nl> - if ( this - > isArrayType ( desugar1 ) & & ! this - > isArrayType ( desugar2 ) ) { <nl> - this - > increaseScore ( SK_UserConversion ) ; <nl> - } <nl> - } <nl> - <nl> commit_to_conversions : <nl> / / When we hit this point , we ' re committed to the set of potential <nl> / / conversions recorded thus far . <nl> ConstraintSystem : : simplifyRestrictedConstraint ( ConversionRestrictionKind restric <nl> bridgedObjCClass - > getDeclaredInterfaceType ( ) , <nl> TypeMatchKind : : Subtype , subFlags , locator ) ; <nl> } <nl> - <nl> - / / T ' < U , hasMember ( T , conversion , T - > T ' ) = = = > T < c U <nl> - case ConversionRestrictionKind : : User : <nl> - addContextualScore ( ) ; <nl> - assert ( matchKind > = TypeMatchKind : : Conversion ) ; <nl> - return tryUserConversion ( * this , type1 , <nl> - ConstraintKind : : Subtype , <nl> - type2 , <nl> - locator ) ; <nl> - <nl> } <nl> <nl> llvm_unreachable ( " bad conversion restriction " ) ; <nl> mmm a / lib / Sema / Constraint . cpp <nl> ppp b / lib / Sema / Constraint . cpp <nl> StringRef swift : : constraints : : getName ( ConversionRestrictionKind kind ) { <nl> return " [ cf - toll - free - bridge - to - objc ] " ; <nl> case ConversionRestrictionKind : : ObjCTollFreeBridgeToCF : <nl> return " [ objc - toll - free - bridge - to - cf ] " ; <nl> - case ConversionRestrictionKind : : User : <nl> - return " [ user ] " ; <nl> } <nl> llvm_unreachable ( " bad conversion restriction kind " ) ; <nl> } <nl> mmm a / lib / Sema / Constraint . h <nl> ppp b / lib / Sema / Constraint . h <nl> enum class ConversionRestrictionKind { <nl> CFTollFreeBridgeToObjC , <nl> / / / Implicit conversion from an Objective - C class type to its <nl> / / / toll - free - bridged CF type . <nl> - ObjCTollFreeBridgeToCF , <nl> - / / / User - defined conversions . <nl> - User <nl> + ObjCTollFreeBridgeToCF <nl> } ; <nl> <nl> / / / Return a string representation of a conversion restriction . <nl> mmm a / lib / Sema / ConstraintLocator . cpp <nl> ppp b / lib / Sema / ConstraintLocator . cpp <nl> void ConstraintLocator : : dump ( SourceManager * sm , raw_ostream & out ) { <nl> out < < " closure result " ; <nl> break ; <nl> <nl> - case ConversionMember : <nl> - out < < " conversion member " ; <nl> - break ; <nl> - <nl> - case ConversionResult : <nl> - out < < " conversion result " ; <nl> - break ; <nl> - <nl> case ConstructorMember : <nl> out < < " constructor member " ; <nl> break ; <nl> mmm a / lib / Sema / ConstraintLocator . h <nl> ppp b / lib / Sema / ConstraintLocator . h <nl> class ConstraintLocator : public llvm : : FoldingSetNode { <nl> ScalarToTuple , <nl> / / / \ brief The load of an lvalue . <nl> Load , <nl> - / / / \ brief The lookup for a conversion member . <nl> - ConversionMember , <nl> - / / / \ brief The conversion result . <nl> - ConversionResult , <nl> / / / \ brief The ' then ' branch of a ternary expression . <nl> IfThen , <nl> / / / \ brief The ' else ' branch of a ternary expression . <nl> class ConstraintLocator : public llvm : : FoldingSetNode { <nl> case LvalueObjectType : <nl> case ScalarToTuple : <nl> case Load : <nl> - case ConversionMember : <nl> - case ConversionResult : <nl> case IfThen : <nl> case IfElse : <nl> case AssignSource : <nl> class ConstraintLocator : public llvm : : FoldingSetNode { <nl> case CheckedCastOperand : <nl> case ClosureResult : <nl> case ConstructorMember : <nl> - case ConversionMember : <nl> - case ConversionResult : <nl> case InstanceType : <nl> case Load : <nl> case LvalueObjectType : <nl> mmm a / lib / Sema / TypeCheckNameLookup . cpp <nl> ppp b / lib / Sema / TypeCheckNameLookup . cpp <nl> LookupResult TypeChecker : : lookupMember ( Type type , DeclName name , <nl> / / We can ' t have tuple types here ; they need to be handled elsewhere . <nl> assert ( ! type - > is < TupleType > ( ) ) ; <nl> <nl> - / / Conversion lookups never permit dynamic lookup . <nl> - if ( name . isSimpleName ( Context . Id_Conversion ) ) { <nl> - options = options & ~ NL_DynamicLookup ; <nl> - } <nl> - <nl> / / Look for the member . <nl> if ( ! dc - > lookupQualified ( type , name , options , this , result . Results ) ) { <nl> / / If we didn ' t find anything , / and / this is a nominal type , check to see <nl> mmm a / lib / Sema / TypeCheckType . cpp <nl> ppp b / lib / Sema / TypeCheckType . cpp <nl> bool TypeChecker : : isRepresentableInObjC ( const AbstractFunctionDecl * AFD , <nl> } <nl> return false ; <nl> } <nl> - <nl> - / / FIXME : Egregious hack to avoid our conversion operations becoming <nl> - / / @ objc for bridged classes , which causes extraneous thunks . <nl> - if ( FD - > getName ( ) = = Context . Id_Conversion ) <nl> - return false ; <nl> } <nl> <nl> / / willSet / didSet implementations are never exposed to objc , they are always <nl> mmm a / test / decl / func / functions . swift <nl> ppp b / test / decl / func / functions . swift <nl> func ! ! ! < T > ( lhs : UnsafePointer < T > , rhs : UnsafePointer < T > ) - > Bool { return false <nl> / / < rdar : / / problem / 16786168 > Functions currently permit ' var inout ' parameters <nl> func var_inout_error ( inout var x : Int ) { } / / expected - error { { parameter may not have multiple ' inout ' , ' var ' , or ' let ' specifiers } } <nl> func var_inout_error ( var inout x : Int ) { } / / expected - error { { parameter may not have multiple ' inout ' , ' var ' , or ' let ' specifiers } } <nl> - <nl> - <nl> - / / Ban __conversion <nl> - struct Conversion { <nl> - func __conversion ( ) - > Int { return 0 } / / expected - error { { ' __conversion ' functions are no longer allowed } } <nl> - } <nl> mmm a / test / expr / postfix / dot / init_ref_delegation . swift <nl> ppp b / test / expr / postfix / dot / init_ref_delegation . swift <nl> struct RDar16603812 { <nl> var i = 42 <nl> init ( ) { } <nl> func foo ( ) { <nl> - self . init ( ) / / expected - error { { could not find a user - defined conversion from type ' RDar16603812 ' to type ' inout RDar16603812 ' } } expected - error { { initializer delegation can only occur within an initializer } } <nl> + self . init ( ) / / expected - error { { initializer delegation can only occur within an initializer } } <nl> } <nl> } <nl> <nl>
Remove user - defined conversions from the type checker .
apple/swift
397f4a98880f82e439b1c4164885abb21cd56d09
2014-08-21T21:59:49Z
mmm a / dbms / tests / queries / 0_stateless / 00362_great_circle_distance . sql <nl> ppp b / dbms / tests / queries / 0_stateless / 00362_great_circle_distance . sql <nl> SELECT floor ( greatCircleDistance ( 33 . 3 , 55 . 3 , 33 . 3 , 55 . 3 ) ) AS distance ; <nl> - - y = ' 37 . 588144 , 55 . 733842 ' <nl> - - m = ' 37 . 617780 , 55 . 755830 ' <nl> - - n = ' 83 . 089598 , 54 . 842461 ' <nl> - select abs ( greatCircleDistance ( 37 . 531014 , 55 . 703050 , 37 . 588144 , 55 . 733842 ) - 4964 . 25740448 ) / 4964 . 25740448 < 0 . 004 <nl> - select abs ( greatCircleDistance ( 37 . 531014 , 55 . 703050 , 37 . 617780 , 55 . 755830 ) - 8015 . 52288508 ) / 8015 . 52288508 < 0 . 004 <nl> - select abs ( greatCircleDistance ( 37 . 588144 , 55 . 733842 , 37 . 617780 , 55 . 755830 ) - 3075 . 27332275 ) / 3075 . 27332275 < 0 . 004 <nl> - select abs ( greatCircleDistance ( 83 . 089598 , 54 . 842461 , 37 . 617780 , 55 . 755830 ) - 2837839 . 72863 ) / 2837839 . 72863 < 0 . 004 <nl> - select abs ( greatCircleDistance ( 37 . 617780 , 55 . 755830 , 158 . 756175 , 53 . 006373 ) - 6802821 . 68814 ) / 6802821 . 68814 < 0 . 004 <nl> - select abs ( greatCircleDistance ( 83 . 089598 , 54 . 842461 , 158 . 756175 , 53 . 006373 ) - 4727216 . 39539 ) / 4727216 . 39539 < 0 . 004 <nl> + select abs ( greatCircleDistance ( 37 . 531014 , 55 . 703050 , 37 . 588144 , 55 . 733842 ) - 4964 . 25740448 ) / 4964 . 25740448 < 0 . 004 ; <nl> + select abs ( greatCircleDistance ( 37 . 531014 , 55 . 703050 , 37 . 617780 , 55 . 755830 ) - 8015 . 52288508 ) / 8015 . 52288508 < 0 . 004 ; <nl> + select abs ( greatCircleDistance ( 37 . 588144 , 55 . 733842 , 37 . 617780 , 55 . 755830 ) - 3075 . 27332275 ) / 3075 . 27332275 < 0 . 004 ; <nl> + select abs ( greatCircleDistance ( 83 . 089598 , 54 . 842461 , 37 . 617780 , 55 . 755830 ) - 2837839 . 72863 ) / 2837839 . 72863 < 0 . 004 ; <nl> + select abs ( greatCircleDistance ( 37 . 617780 , 55 . 755830 , 158 . 756175 , 53 . 006373 ) - 6802821 . 68814 ) / 6802821 . 68814 < 0 . 004 ; <nl> + select abs ( greatCircleDistance ( 83 . 089598 , 54 . 842461 , 158 . 756175 , 53 . 006373 ) - 4727216 . 39539 ) / 4727216 . 39539 < 0 . 004 ; <nl>
Fix test
ClickHouse/ClickHouse
fdaacb56455e9ff0c880f9dc7d667bc07f1d6aef
2019-10-21T09:37:50Z
mmm a / tensorflow / python / distribute / cross_device_ops . py <nl> ppp b / tensorflow / python / distribute / cross_device_ops . py <nl> def broadcast_implementation ( self , tensor , destinations ) : <nl> class ReductionToOneDevice ( CrossDeviceOps ) : <nl> " " " Always do reduction to one device first and then do broadcasting . <nl> <nl> - Batch reduction is done by reduction on each element one by one . <nl> + Batch reduction is done by reduction on each element one by one . <nl> + <nl> + ` ` ` <nl> + mirrored_strategy = tf . distribute . MirroredStrategy ( <nl> + cross_device_ops = tf . distribute . ReductionToOneDevice ( ) ) <nl> + ` ` ` <nl> " " " <nl> <nl> def __init__ ( self , reduce_to_device = None , accumulation_fn = None ) : <nl> - " " " Initializes the instance of ReductionToOneDevice . <nl> + " " " Initializes with a device to reduce to and a way to accumulate . <nl> <nl> Args : <nl> reduce_to_device : the intermediate device to reduce to . If None , reduce <nl> - to the first device in ` destinations ` of the reduce ( ) method . <nl> + to the first device in ` destinations ` of the ` reduce ( ) ` method . <nl> accumulation_fn : a function that does accumulation . If None , then <nl> ` tf . math . add_n ` is used . <nl> " " " <nl>
Doc improvements to ReductionToOneDevice .
tensorflow/tensorflow
6f737e0dd60fc02138c6bf0dc34c6a7e64297c73
2019-08-22T21:16:27Z
mmm a / lib / SILOptimizer / FunctionSignatureTransforms / ExistentialSpecializer . cpp <nl> ppp b / lib / SILOptimizer / FunctionSignatureTransforms / ExistentialSpecializer . cpp <nl> static bool findConcreteType ( ApplySite AI , int ArgIdx , CanType & ConcreteType ) { <nl> SILValue InitExistential = <nl> findInitExistentialFromGlobalAddrAndApply ( GAI , AI , ArgIdx ) ; <nl> / / / If the Arg is already init_existential , return the concrete type . <nl> - if ( findConcreteTypeFromInitExistential ( InitExistential , ConcreteType ) ) { <nl> + if ( InitExistential & & <nl> + findConcreteTypeFromInitExistential ( InitExistential , ConcreteType ) ) { <nl> return true ; <nl> } <nl> } <nl> bool ExistentialSpecializer : : canSpecializeCalleeFunction ( ApplySite & Apply ) { <nl> if ( Callee - > getInlineStrategy ( ) = = Inline_t : : AlwaysInline ) <nl> return false ; <nl> <nl> + / / / Ignore externally linked functions with public_external or higher <nl> + / / / linkage . <nl> + if ( isAvailableExternally ( Callee - > getLinkage ( ) ) ) { <nl> + return false ; <nl> + } <nl> + <nl> / / / Only choose a select few function representations for specialization . <nl> switch ( Callee - > getRepresentation ( ) ) { <nl> case SILFunctionTypeRepresentation : : ObjCMethod : <nl> mmm a / lib / SILOptimizer / FunctionSignatureTransforms / ExistentialTransform . cpp <nl> ppp b / lib / SILOptimizer / FunctionSignatureTransforms / ExistentialTransform . cpp <nl> void ExistentialSpecializerCloner : : cloneAndPopulateFunction ( ) { <nl> SILModule & M = OrigF - > getModule ( ) ; <nl> auto & Ctx = M . getASTContext ( ) ; <nl> llvm : : SmallDenseMap < int , AllocStackInst * > ArgToAllocStackMap ; <nl> - bool MissingDestroyUse = false ; <nl> <nl> NewFBuilder . setInsertionPoint ( ClonedEntryBB ) ; <nl> <nl> void ExistentialSpecializerCloner : : cloneAndPopulateFunction ( ) { <nl> IsInitialization_t : : IsInitialization ) ; <nl> if ( ExistentialArgDescriptor [ ArgDesc . Index ] . DestroyAddrUse ) { <nl> NewFBuilder . createDestroyAddr ( InsertLoc , NewArg ) ; <nl> - } else { <nl> - MissingDestroyUse = true ; <nl> } <nl> entryArgs . push_back ( ASI ) ; <nl> break ; <nl> void ExistentialSpecializerCloner : : cloneAndPopulateFunction ( ) { <nl> / / / If there is an argument with no DestroyUse , insert DeallocStack <nl> / / / before return Instruction . <nl> llvm : : SmallPtrSet < ReturnInst * , 4 > ReturnInsts ; <nl> - if ( MissingDestroyUse ) { <nl> - / / / Find the set of return instructions in a function . <nl> - for ( auto & BB : NewF ) { <nl> - TermInst * TI = BB . getTerminator ( ) ; <nl> - if ( auto * RI = dyn_cast < ReturnInst > ( TI ) ) { <nl> - ReturnInsts . insert ( RI ) ; <nl> - } <nl> + / / / Find the set of return instructions in a function . <nl> + for ( auto & BB : NewF ) { <nl> + TermInst * TI = BB . getTerminator ( ) ; <nl> + if ( auto * RI = dyn_cast < ReturnInst > ( TI ) ) { <nl> + ReturnInsts . insert ( RI ) ; <nl> } <nl> } <nl> <nl> void ExistentialSpecializerCloner : : cloneAndPopulateFunction ( ) { <nl> int ArgIndex = ArgDesc . Index ; <nl> auto iter = ArgToAllocStackMap . find ( ArgIndex ) ; <nl> if ( iter ! = ArgToAllocStackMap . end ( ) ) { <nl> - auto it = ExistentialArgDescriptor . find ( ArgIndex ) ; <nl> - if ( it ! = ExistentialArgDescriptor . end ( ) & & it - > second . DestroyAddrUse ) { <nl> - for ( Operand * ASIUse : iter - > second - > getUses ( ) ) { <nl> - auto * ASIUser = ASIUse - > getUser ( ) ; <nl> - if ( auto * DAI = dyn_cast < DestroyAddrInst > ( ASIUser ) ) { <nl> - SILBuilder Builder ( ASIUser ) ; <nl> - Builder . setInsertionPoint ( & * std : : next ( ASIUser - > getIterator ( ) ) ) ; <nl> - Builder . createDeallocStack ( DAI - > getLoc ( ) , iter - > second ) ; <nl> - } <nl> - } <nl> - } else { / / Need to insert DeallocStack before return . <nl> - for ( auto * I : ReturnInsts ) { <nl> - SILBuilder Builder ( I - > getParent ( ) ) ; <nl> - Builder . setInsertionPoint ( I ) ; <nl> - Builder . createDeallocStack ( iter - > second - > getLoc ( ) , iter - > second ) ; <nl> - } <nl> + / / Need to insert DeallocStack before return . <nl> + for ( auto * I : ReturnInsts ) { <nl> + SILBuilder Builder ( I - > getParent ( ) ) ; <nl> + Builder . setInsertionPoint ( I ) ; <nl> + Builder . createDeallocStack ( iter - > second - > getLoc ( ) , iter - > second ) ; <nl> } <nl> } <nl> } <nl> new file mode 100644 <nl> index 000000000000 . . a4517e563feb <nl> mmm / dev / null <nl> ppp b / test / SILOptimizer / existential_transform_extras . sil <nl> <nl> + / / RUN : % target - sil - opt - assume - parsing - unqualified - ownership - sil - enable - sil - verify - all % s - enable - sil - existential - specializer - existential - specializer | % FileCheck % s <nl> + <nl> + / / Additional tests for existential_specializer <nl> + <nl> + import Builtin <nl> + import Swift <nl> + import SwiftShims <nl> + <nl> + internal protocol P { <nl> + func foo ( ) - > Int32 <nl> + } <nl> + <nl> + internal class Klass1 : P { <nl> + @ inline ( never ) func foo ( ) - > Int32 <nl> + init ( ) <nl> + } <nl> + <nl> + internal class Klass2 : P { <nl> + @ inline ( never ) func foo ( ) - > Int32 <nl> + init ( ) <nl> + } <nl> + <nl> + @ inline ( never ) internal func wrap_foo_ncp ( a : inout P , b : inout P ) - > Int32 <nl> + <nl> + @ inline ( never ) func ncp ( ) <nl> + <nl> + sil hidden [ noinline ] @ $ s7dealloc3ncpyyF : $ @ convention ( thin ) ( ) - > Int32 { <nl> + bb0 : <nl> + % 0 = alloc_stack $ P , var , name " magic2 " <nl> + % 1 = alloc_ref $ Klass1 <nl> + % 4 = init_existential_addr % 0 : $ * P , $ Klass1 <nl> + store % 1 to % 4 : $ * Klass1 <nl> + % 6 = alloc_stack $ P , var , name " magic3 " <nl> + % 7 = alloc_ref $ Klass1 <nl> + % 10 = init_existential_addr % 6 : $ * P , $ Klass1 <nl> + store % 7 to % 10 : $ * Klass1 <nl> + % 12 = function_ref @ $ s7dealloc12wrap_foo_ncp1a1bSiAA1P_pz_AaE_pztF : $ @ convention ( thin ) ( @ in P , @ in P ) - > Int32 <nl> + % 13 = apply % 12 ( % 0 , % 6 ) : $ @ convention ( thin ) ( @ in P , @ in P ) - > Int32 <nl> + debug_value % 13 : $ Int32 , let , name " x " <nl> + % 14 = alloc_stack $ P , var , name " magic4 " <nl> + % 15 = alloc_ref $ Klass1 <nl> + % 16 = init_existential_addr % 14 : $ * P , $ Klass1 <nl> + store % 15 to % 16 : $ * Klass1 <nl> + % 17 = function_ref @ $ s7dealloc20wrap_foo_ncp_another1aSiAA1P_pz_tF : $ @ convention ( thin ) ( @ inout P ) - > Int32 <nl> + % 18 = apply % 17 ( % 14 ) : $ @ convention ( thin ) ( @ inout P ) - > Int32 <nl> + % 24 = struct_extract % 13 : $ Int32 , # Int32 . _value <nl> + % 25 = struct_extract % 18 : $ Int32 , # Int32 . _value <nl> + % 26 = integer_literal $ Builtin . Int1 , - 1 <nl> + % 27 = builtin " sadd_with_overflow_Int32 " ( % 24 : $ Builtin . Int32 , % 25 : $ Builtin . Int32 , % 26 : $ Builtin . Int1 ) : $ ( Builtin . Int32 , Builtin . Int1 ) <nl> + % 28 = tuple_extract % 27 : $ ( Builtin . Int32 , Builtin . Int1 ) , 0 <nl> + % 29 = tuple_extract % 27 : $ ( Builtin . Int32 , Builtin . Int1 ) , 1 <nl> + cond_fail % 29 : $ Builtin . Int1 <nl> + % 31 = struct $ Int32 ( % 28 : $ Builtin . Int32 ) <nl> + destroy_addr % 14 : $ * P <nl> + dealloc_stack % 14 : $ * P <nl> + dealloc_stack % 6 : $ * P <nl> + dealloc_stack % 0 : $ * P <nl> + return % 31 : $ Int32 <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil public_external [ serialized ] @ $ s7dealloc20wrap_foo_ncp_another1aSiAA1P_pz_tF : $ @ convention ( thin ) ( @ inout P ) - > Int32 { <nl> + / / CHECK : bb0 ( % 0 : $ * P ) : <nl> + / / CHECK : debug_value_addr <nl> + / / CHECK : alloc_stack <nl> + / / CHECK : copy_addr <nl> + / / CHECK : open_existential_addr <nl> + / / CHECK : witness_method <nl> + / / CHECK : apply <nl> + / / CHECK : destroy_addr <nl> + / / CHECK : dealloc_stack <nl> + / / CHECK : return <nl> + / / CHECK - LABEL : } / / end sil function ' $ s7dealloc20wrap_foo_ncp_another1aSiAA1P_pz_tF ' <nl> + sil public_external [ serialized ] @ $ s7dealloc20wrap_foo_ncp_another1aSiAA1P_pz_tF : $ @ convention ( thin ) ( @ inout P ) - > Int32 { <nl> + bb0 ( % 0 : $ * P ) : <nl> + debug_value_addr % 0 : $ * P , var , name " a " , argno 1 <nl> + % 2 = alloc_stack $ P <nl> + copy_addr % 0 to [ initialization ] % 2 : $ * P <nl> + % 4 = open_existential_addr immutable_access % 2 : $ * P to $ * @ opened ( " EE9F89E4 - ECF4 - 11E8 - 8DDF - D0817AD4059B " ) P <nl> + % 5 = witness_method $ @ opened ( " EE9F89E4 - ECF4 - 11E8 - 8DDF - D0817AD4059B " ) P , # P . foo ! 1 : < Self where Self : P > ( Self ) - > ( ) - > Int32 , % 4 : $ * @ opened ( " EE9F89E4 - ECF4 - 11E8 - 8DDF - D0817AD4059B " ) P : $ @ convention ( witness_method : P ) < τ_0_0 where τ_0_0 : P > ( @ in_guaranteed τ_0_0 ) - > Int32 <nl> + % 6 = apply % 5 < @ opened ( " EE9F89E4 - ECF4 - 11E8 - 8DDF - D0817AD4059B " ) P > ( % 4 ) : $ @ convention ( witness_method : P ) < τ_0_0 where τ_0_0 : P > ( @ in_guaranteed τ_0_0 ) - > Int32 <nl> + destroy_addr % 2 : $ * P <nl> + dealloc_stack % 2 : $ * P <nl> + return % 6 : $ Int32 <nl> + } / / end sil function ' $ s7dealloc20wrap_foo_ncp_another1aSiAA1P_pz_tF ' <nl> + <nl> + sil shared [ noinline ] @ $ s7dealloc6Klass1C3fooSiyFTf4d_n : $ @ convention ( thin ) ( ) - > Int32 { <nl> + bb0 : <nl> + % 0 = integer_literal $ Builtin . Int32 , 10 <nl> + % 1 = struct $ Int32 ( % 0 : $ Builtin . Int32 ) <nl> + return % 1 : $ Int32 <nl> + } <nl> + <nl> + sil_global hidden [ let ] @ $ global_var : $ P <nl> + <nl> + / / CHECK - LABEL : sil hidden [ noinline ] @ $ helper : $ @ convention ( thin ) ( @ in P ) - > Int32 { <nl> + / / CHECK : bb0 ( % 0 : $ * P ) : <nl> + / / CHECK : debug_value_addr <nl> + / / CHECK : alloc_stack <nl> + / / CHECK : copy_addr <nl> + / / CHECK : destroy_addr <nl> + / / CHECK : open_existential_addr <nl> + / / CHECK : witness_method <nl> + / / CHECK : apply <nl> + / / CHECK : dealloc_stack <nl> + / / CHECK : return <nl> + / / CHECK - LABEL : } / / end sil function ' $ helper ' <nl> + sil hidden [ noinline ] @ $ helper : $ @ convention ( thin ) ( @ in P ) - > Int32 { <nl> + bb0 ( % 0 : $ * P ) : <nl> + debug_value_addr % 0 : $ * P , var , name " a " , argno 1 <nl> + % 4 = alloc_stack $ P <nl> + copy_addr % 0 to [ initialization ] % 4 : $ * P <nl> + destroy_addr % 0 : $ * P <nl> + % 6 = open_existential_addr immutable_access % 4 : $ * P to $ * @ opened ( " 3CB58EC4 - ECED - 11E8 - 9798 - D0817AD4059B " ) P <nl> + % 7 = witness_method $ @ opened ( " 3CB58EC4 - ECED - 11E8 - 9798 - D0817AD4059B " ) P , # P . foo ! 1 : < Self where Self : P > ( Self ) - > ( ) - > Int32 , % 6 : $ * @ opened ( " 3CB58EC4 - ECED - 11E8 - 9798 - D0817AD4059B " ) P : $ @ convention ( witness_method : P ) < τ_0_0 where τ_0_0 : P > ( @ in_guaranteed τ_0_0 ) - > Int32 <nl> + % 8 = apply % 7 < @ opened ( " 3CB58EC4 - ECED - 11E8 - 9798 - D0817AD4059B " ) P > ( % 6 ) : $ @ convention ( witness_method : P ) < τ_0_0 where τ_0_0 : P > ( @ in_guaranteed τ_0_0 ) - > Int32 <nl> + dealloc_stack % 4 : $ * P <nl> + return % 8 : $ Int32 <nl> + } <nl> + <nl> + sil @ global_addr_init : $ @ convention ( thin ) ( Builtin . Int1 ) - > Int32 { <nl> + bb0 ( % 0 : $ Builtin . Int1 ) : <nl> + alloc_global @ $ global_var <nl> + % 1 = global_addr @ $ global_var : $ * P <nl> + cond_br % 0 , bb1 , bb2 <nl> + <nl> + bb1 : <nl> + % 2 = init_existential_addr % 1 : $ * P , $ Klass1 <nl> + % 3 = alloc_ref $ Klass1 <nl> + store % 3 to % 2 : $ * Klass1 <nl> + br bb3 <nl> + <nl> + bb2 : <nl> + % 5 = init_existential_addr % 1 : $ * P , $ Klass2 <nl> + % 6 = alloc_ref $ Klass2 <nl> + store % 6 to % 5 : $ * Klass2 <nl> + br bb3 <nl> + <nl> + bb3 : <nl> + % 12 = function_ref @ $ helper : $ @ convention ( thin ) ( @ in P ) - > Int32 <nl> + % 13 = apply % 12 ( % 1 ) : $ @ convention ( thin ) ( @ in P ) - > Int32 <nl> + return % 13 : $ Int32 <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil shared [ noinline ] @ $ s7dealloc12wrap_foo_ncp1a1bSiAA1P_pz_AaE_pztFTf4ee_n : $ @ convention ( thin ) < τ_0_0 , τ_0_1 where τ_0_0 : P , τ_0_1 : P > ( @ in τ_0_0 , @ in τ_0_1 ) - > Int32 { <nl> + / / CHECK : bb0 ( % 0 : $ * τ_0_0 , % 1 : $ * τ_0_1 ) : <nl> + / / CHECK : alloc_stack <nl> + / / CHECK : init_existential_addr <nl> + / / CHECK : copy_addr <nl> + / / CHECK : destroy_addr <nl> + / / CHECK : alloc_stack <nl> + / / CHECK : init_existential_addr <nl> + / / CHECK : copy_addr <nl> + / / CHECK : destroy_addr <nl> + / / CHECK : debug_value_addr <nl> + / / CHECK : debug_value_addr <nl> + / / CHECK : alloc_stack <nl> + / / CHECK : copy_addr <nl> + / / CHECK : destroy_addr <nl> + / / CHECK : open_existential_addr <nl> + / / CHECK : witness_method <nl> + / / CHECK : apply <nl> + / / CHECK : alloc_stack <nl> + / / CHECK : copy_addr <nl> + / / CHECK : destroy_addr <nl> + / / CHECK : open_existential_addr <nl> + / / CHECK : witness_method <nl> + / / CHECK : apply <nl> + / / CHECK : struct_extract <nl> + / / CHECK : struct_extract <nl> + / / CHECK : integer_literal <nl> + / / CHECK : builtin <nl> + / / CHECK : tuple_extract <nl> + / / CHECK : tuple_extract <nl> + / / CHECK : cond_fail <nl> + / / CHECK : struct <nl> + / / CHECK : dealloc_stack <nl> + / / CHECK : dealloc_stack <nl> + / / CHECK : dealloc_stack <nl> + / / CHECK : dealloc_stack <nl> + / / CHECK : return <nl> + / / CHECK - LABEL : } / / end sil function ' $ s7dealloc12wrap_foo_ncp1a1bSiAA1P_pz_AaE_pztFTf4ee_n ' <nl> + sil hidden [ noinline ] @ $ s7dealloc12wrap_foo_ncp1a1bSiAA1P_pz_AaE_pztF : $ @ convention ( thin ) ( @ in P , @ in P ) - > Int32 { <nl> + bb0 ( % 0 : $ * P , % 1 : $ * P ) : <nl> + debug_value_addr % 0 : $ * P , var , name " a " , argno 1 <nl> + debug_value_addr % 1 : $ * P , var , name " b " , argno 2 <nl> + % 4 = alloc_stack $ P <nl> + copy_addr % 0 to [ initialization ] % 4 : $ * P <nl> + destroy_addr % 0 : $ * P <nl> + % 6 = open_existential_addr immutable_access % 4 : $ * P to $ * @ opened ( " 3CB58EC4 - ECED - 11E8 - 9798 - D0817AD4059B " ) P <nl> + % 7 = witness_method $ @ opened ( " 3CB58EC4 - ECED - 11E8 - 9798 - D0817AD4059B " ) P , # P . foo ! 1 : < Self where Self : P > ( Self ) - > ( ) - > Int32 , % 6 : $ * @ opened ( " 3CB58EC4 - ECED - 11E8 - 9798 - D0817AD4059B " ) P : $ @ convention ( witness_method : P ) < τ_0_0 where τ_0_0 : P > ( @ in_guaranteed τ_0_0 ) - > Int32 <nl> + % 8 = apply % 7 < @ opened ( " 3CB58EC4 - ECED - 11E8 - 9798 - D0817AD4059B " ) P > ( % 6 ) : $ @ convention ( witness_method : P ) < τ_0_0 where τ_0_0 : P > ( @ in_guaranteed τ_0_0 ) - > Int32 <nl> + % 9 = alloc_stack $ P <nl> + copy_addr % 1 to [ initialization ] % 9 : $ * P <nl> + destroy_addr % 1 : $ * P <nl> + % 11 = open_existential_addr immutable_access % 9 : $ * P to $ * @ opened ( " 3CB58FAA - ECED - 11E8 - 9798 - D0817AD4059B " ) P <nl> + % 12 = witness_method $ @ opened ( " 3CB58FAA - ECED - 11E8 - 9798 - D0817AD4059B " ) P , # P . foo ! 1 : < Self where Self : P > ( Self ) - > ( ) - > Int32 , % 11 : $ * @ opened ( " 3CB58FAA - ECED - 11E8 - 9798 - D0817AD4059B " ) P : $ @ convention ( witness_method : P ) < τ_0_0 where τ_0_0 : P > ( @ in_guaranteed τ_0_0 ) - > Int32 <nl> + % 13 = apply % 12 < @ opened ( " 3CB58FAA - ECED - 11E8 - 9798 - D0817AD4059B " ) P > ( % 11 ) : $ @ convention ( witness_method : P ) < τ_0_0 where τ_0_0 : P > ( @ in_guaranteed τ_0_0 ) - > Int32 <nl> + % 14 = struct_extract % 8 : $ Int32 , # Int32 . _value <nl> + % 15 = struct_extract % 13 : $ Int32 , # Int32 . _value <nl> + % 16 = integer_literal $ Builtin . Int1 , - 1 <nl> + % 17 = builtin " sadd_with_overflow_Int32 " ( % 14 : $ Builtin . Int32 , % 15 : $ Builtin . Int32 , % 16 : $ Builtin . Int1 ) : $ ( Builtin . Int32 , Builtin . Int1 ) <nl> + % 18 = tuple_extract % 17 : $ ( Builtin . Int32 , Builtin . Int1 ) , 0 <nl> + % 19 = tuple_extract % 17 : $ ( Builtin . Int32 , Builtin . Int1 ) , 1 <nl> + cond_fail % 19 : $ Builtin . Int1 <nl> + % 21 = struct $ Int32 ( % 18 : $ Builtin . Int32 ) <nl> + dealloc_stack % 9 : $ * P <nl> + dealloc_stack % 4 : $ * P <nl> + return % 21 : $ Int32 <nl> + } <nl> + <nl> + sil_witness_table hidden Klass1 : P module dealloc { <nl> + method # P . foo ! 1 : < Self where Self : P > ( Self ) - > ( ) - > Int32 : nil <nl> + } <nl> + <nl> + sil_witness_table hidden Klass2 : P module dealloc { <nl> + method # P . foo ! 1 : < Self where Self : P > ( Self ) - > ( ) - > Int32 : nil <nl> + } <nl> mmm a / tools / sil - opt / SILOpt . cpp <nl> ppp b / tools / sil - opt / SILOpt . cpp <nl> static llvm : : cl : : opt < int > <nl> SILInlineThreshold ( " sil - inline - threshold " , llvm : : cl : : Hidden , <nl> llvm : : cl : : init ( - 1 ) ) ; <nl> <nl> + static llvm : : cl : : opt < bool > <nl> + SILExistentialSpecializer ( " enable - sil - existential - specializer " , <nl> + llvm : : cl : : Hidden , <nl> + llvm : : cl : : init ( false ) ) ; <nl> + <nl> static llvm : : cl : : opt < bool > <nl> EnableSILVerifyAll ( " enable - sil - verify - all " , <nl> llvm : : cl : : Hidden , <nl> int main ( int argc , char * * argv ) { <nl> / / Setup the SIL Options . <nl> SILOptions & SILOpts = Invocation . getSILOptions ( ) ; <nl> SILOpts . InlineThreshold = SILInlineThreshold ; <nl> + SILOpts . ExistentialSpecializer = SILExistentialSpecializer ; <nl> SILOpts . VerifyAll = EnableSILVerifyAll ; <nl> SILOpts . RemoveRuntimeAsserts = RemoveRuntimeAsserts ; <nl> SILOpts . AssertConfig = AssertConfId ; <nl>
Merge remote - tracking branch ' origin / master ' into master - next
apple/swift
f710f6776c12aa6bed9e36b4b4325eec1357f925
2018-12-03T19:30:00Z
new file mode 100644 <nl> index 000000000000 . . 70154d698af5 <nl> mmm / dev / null <nl> ppp b / jstests / replsets / oplog_slow_sampling_logging . js <nl> <nl> + / * * <nl> + * Ensure serverStatus reports the total time spent sampling the oplog for all storage engines that <nl> + * support OplogStones . <nl> + * @ tags : [ requires_wiredtiger , requires_persistence , requires_fcv_44 ] <nl> + * / <nl> + ( function ( ) { <nl> + " use strict " ; <nl> + <nl> + const kOplogDocs = 45000 ; <nl> + / / kNumOplogSamples is derived from the number of oplog entries above . <nl> + const kNumOplogSamples = 15 ; <nl> + const kOplogSampleReadDelay = 1 ; <nl> + const kLoggingIntervalSeconds = 3 ; <nl> + <nl> + const testDB = " test " ; <nl> + <nl> + / / Force oplog sampling to occur on start up for small numbers of oplog inserts . <nl> + const replSet = new ReplSetTest ( { <nl> + nodes : 1 , <nl> + nodeOptions : { <nl> + setParameter : { <nl> + " maxOplogTruncationPointsDuringStartup " : 10 , <nl> + " oplogSamplingLogIntervalSeconds " : kLoggingIntervalSeconds , <nl> + " failpoint . slowOplogSamplingReads " : <nl> + tojson ( { mode : " alwaysOn " , data : { " delay " : kOplogSampleReadDelay } } ) <nl> + } <nl> + } <nl> + } ) ; <nl> + replSet . startSet ( ) ; <nl> + replSet . initiate ( ) ; <nl> + <nl> + let coll = replSet . getPrimary ( ) . getDB ( testDB ) . getCollection ( " testcoll " ) ; <nl> + <nl> + / / Insert enough documents to force kNumOplogSamples to be taken on the following start up . <nl> + for ( let i = 0 ; i < kOplogDocs ; i + + ) { <nl> + assert . commandWorked ( coll . insert ( { m : 1 + i } ) ) ; <nl> + } <nl> + <nl> + / / Restart replica set to load entries from the oplog for sampling . <nl> + replSet . stopSet ( null / * signal * / , true / * forRestart * / ) ; <nl> + replSet . startSet ( { restart : true } ) ; <nl> + <nl> + assert . commandWorked ( replSet . getPrimary ( ) . getDB ( testDB ) . serverStatus ( ) ) ; <nl> + <nl> + / / Err on the side of a smaller minExpectedLogs where fractional parts are concerned because <nl> + / / kLoggingIntervalSeconds is not an exact interval . Rather , once interval seconds have elapsed <nl> + / / since the last log message , a progress message will be logged after the current sample is <nl> + / / completed . <nl> + const maxSamplesPerLog = Math . ceil ( kLoggingIntervalSeconds / kOplogSampleReadDelay ) ; <nl> + const minExpectedLogs = Math . floor ( kNumOplogSamples / maxSamplesPerLog ) ; <nl> + <nl> + checkLog . containsWithAtLeastCount ( <nl> + replSet . getPrimary ( ) , " Oplog sampling progress : " , minExpectedLogs ) ; <nl> + assert ( checkLog . checkContainsOnce ( replSet . getPrimary ( ) , " Oplog sampling complete " ) ) ; <nl> + <nl> + replSet . stopSet ( ) ; <nl> + } ) ( ) ; <nl> mmm a / src / mongo / db / storage / wiredtiger / oplog_stone_parameters . idl <nl> ppp b / src / mongo / db / storage / wiredtiger / oplog_stone_parameters . idl <nl> server_parameters : <nl> cpp_varname : gOplogStoneSizeMB <nl> default : 0 <nl> validator : { gte : 0 } <nl> + oplogSamplingLogIntervalSeconds : <nl> + description : ' The approximate interval between log messages indicating oplog sampling progress during start up . Once interval seconds have elapsed since the last log message , a progress message will be logged after the current sample is completed . A value of zero will disable this logging . ' <nl> + set_at : [ startup , runtime ] <nl> + cpp_vartype : ' AtomicWord < int > ' <nl> + cpp_varname : gOplogSamplingLogIntervalSeconds <nl> + default : 10 <nl> + validator : { gte : 0 } <nl> mmm a / src / mongo / db / storage / wiredtiger / wiredtiger_record_store . cpp <nl> ppp b / src / mongo / db / storage / wiredtiger / wiredtiger_record_store . cpp <nl> void checkOplogFormatVersion ( OperationContext * opCtx , const std : : string & uri ) { <nl> <nl> MONGO_FAIL_POINT_DEFINE ( WTWriteConflictException ) ; <nl> MONGO_FAIL_POINT_DEFINE ( WTWriteConflictExceptionForReads ) ; <nl> + MONGO_FAIL_POINT_DEFINE ( slowOplogSamplingReads ) ; <nl> <nl> const std : : string kWiredTigerEngineName = " wiredTiger " ; <nl> <nl> void WiredTigerRecordStore : : OplogStones : : _calculateStonesBySampling ( OperationCon <nl> / / each logical section . <nl> auto cursor = _rs - > getRandomCursorWithOptions ( opCtx , extraConfig ) ; <nl> std : : vector < RecordId > oplogEstimates ; <nl> + auto lastProgressLog = Date_t : : now ( ) ; <nl> for ( int i = 0 ; i < numSamples ; + + i ) { <nl> + auto samplingLogIntervalSeconds = gOplogSamplingLogIntervalSeconds . load ( ) ; <nl> + slowOplogSamplingReads . execute ( <nl> + [ & ] ( const BSONObj & dataObj ) { sleepsecs ( dataObj [ " delay " ] . numberInt ( ) ) ; } ) ; <nl> auto record = cursor - > next ( ) ; <nl> if ( ! record ) { <nl> / / This shouldn ' t really happen unless the size storer values are far off from reality . <nl> void WiredTigerRecordStore : : OplogStones : : _calculateStonesBySampling ( OperationCon <nl> return ; <nl> } <nl> oplogEstimates . push_back ( record - > id ) ; <nl> + <nl> + const auto now = Date_t : : now ( ) ; <nl> + if ( samplingLogIntervalSeconds > 0 & & <nl> + now - lastProgressLog > = Seconds ( samplingLogIntervalSeconds ) ) { <nl> + log ( ) < < " Oplog sampling progress : " < < ( i + 1 ) < < " of " < < numSamples <nl> + < < " samples taken " ; <nl> + lastProgressLog = now ; <nl> + } <nl> } <nl> std : : sort ( oplogEstimates . begin ( ) , oplogEstimates . end ( ) ) ; <nl> + log ( ) < < " Oplog sampling complete " ; <nl> <nl> for ( int i = 1 ; i < = wholeStones ; + + i ) { <nl> / / Use every ( kRandomSamplesPerStone ) th sample , starting with the <nl>
SERVER - 41790 Log progress during oplog sampling at startup
mongodb/mongo
0e8ae6324e515b947b59d23988f2e7238a2f30c6
2019-12-20T21:45:35Z
mmm a / examples / mass_spring . py <nl> ppp b / examples / mass_spring . py <nl> <nl> bias2 = scalar ( ) <nl> hidden = scalar ( ) <nl> <nl> + center = vec ( ) <nl> + <nl> act = scalar ( ) <nl> <nl> + def n_input_states ( ) : <nl> + return n_sin_waves + 4 * n_objects + 2 <nl> + <nl> @ ti . layout <nl> def place ( ) : <nl> ti . root . dense ( ti . l , max_steps ) . dense ( ti . i , n_objects ) . place ( x , v , v_inc ) <nl> ti . root . dense ( ti . i , n_springs ) . place ( spring_anchor_a , spring_anchor_b , <nl> spring_length , spring_stiffness , <nl> spring_actuation ) <nl> - ti . root . dense ( ti . ij , ( n_springs , n_sin_waves ) ) . place ( weights1 ) <nl> - ti . root . dense ( ti . i , n_springs ) . place ( bias1 ) <nl> + ti . root . dense ( ti . ij , ( n_hidden , n_input_states ( ) ) ) . place ( weights1 ) <nl> + ti . root . dense ( ti . i , n_hidden ) . place ( bias1 ) <nl> ti . root . dense ( ti . ij , ( n_springs , n_hidden ) ) . place ( weights2 ) <nl> ti . root . dense ( ti . i , n_springs ) . place ( bias2 ) <nl> ti . root . dense ( ti . ij , ( max_steps , n_hidden ) ) . place ( hidden ) <nl> ti . root . dense ( ti . ij , ( max_steps , n_springs ) ) . place ( act ) <nl> + ti . root . dense ( ti . i , max_steps ) . place ( center ) <nl> ti . root . place ( loss ) <nl> ti . root . lazy_grad ( ) <nl> <nl> def place ( ) : <nl> dt = 0 . 004 <nl> learning_rate = 25 <nl> <nl> + @ ti . kernel <nl> + def compute_center ( t : ti . i32 ) : <nl> + for _ in range ( 1 ) : <nl> + c = ti . Vector ( [ 0 . 0 , 0 . 0 ] ) <nl> + for i in ti . static ( range ( n_objects ) ) : <nl> + c + = x [ t , i ] <nl> + center [ t ] = ( 1 . 0 / n_objects ) * c <nl> + <nl> @ ti . kernel <nl> def nn1 ( t : ti . i32 ) : <nl> for i in range ( n_hidden ) : <nl> def nn1 ( t : ti . i32 ) : <nl> for j in ti . static ( range ( n_sin_waves ) ) : <nl> actuation + = weights1 [ i , j ] * ti . sin ( <nl> spring_omega * t * dt + 2 * math . pi / n_sin_waves * j ) <nl> + for j in ti . static ( range ( n_objects ) ) : <nl> + offset = x [ t , j ] - center [ t ] <nl> + # actuation + = weights1 [ i , j * 4 + n_sin_waves ] * offset [ 0 ] <nl> + # actuation + = weights1 [ i , j * 4 + n_sin_waves + 1 ] * offset [ 1 ] <nl> + actuation + = weights1 [ i , j * 4 + n_sin_waves + 2 ] * v [ t , i ] [ 0 ] * 0 . 1 <nl> + actuation + = weights1 [ i , j * 4 + n_sin_waves + 3 ] * v [ t , i ] [ 1 ] * 0 . 1 <nl> + actuation + = weights1 [ i , n_objects * 4 + n_sin_waves ] * ( goal [ 0 ] - center [ t ] [ 0 ] ) <nl> + actuation + = weights1 [ i , n_objects * 4 + n_sin_waves + 1 ] * ( goal [ 1 ] - center [ t ] [ 1 ] ) <nl> actuation + = bias1 [ i ] <nl> actuation = ti . tanh ( actuation ) <nl> hidden [ t , i ] = actuation <nl> def forward ( output = None , visualize = True ) : <nl> total_steps = steps if not output else steps * 2 <nl> <nl> for t in range ( 1 , total_steps ) : <nl> + compute_center ( t - 1 ) <nl> nn1 ( t - 1 ) <nl> nn2 ( t - 1 ) <nl> apply_spring_force ( t - 1 ) <nl> def optimize ( toi , visualize ) : <nl> global use_toi <nl> use_toi = toi <nl> for i in range ( n_hidden ) : <nl> - for j in range ( n_sin_waves ) : <nl> + for j in range ( n_input_states ( ) ) : <nl> weights1 [ i , j ] = np . random . randn ( ) * 0 . 1 <nl> <nl> for i in range ( n_springs ) : <nl> for j in range ( n_hidden ) : <nl> - weights2 [ i , j ] = np . random . randn ( ) * 0 . 1 <nl> + weights2 [ i , j ] = np . random . randn ( ) * 0 . 3 <nl> <nl> losses = [ ] <nl> forward ( ' initial ' , visualize = visualize ) <nl> def optimize ( toi , visualize ) : <nl> <nl> total_norm_sqr = 0 <nl> for i in range ( n_springs ) : <nl> - for j in range ( n_sin_waves ) : <nl> + for j in range ( n_input_states ( ) ) : <nl> total_norm_sqr + = weights1 . grad [ i , j ] * * 2 <nl> total_norm_sqr + = bias1 . grad [ i ] * * 2 <nl> <nl> def optimize ( toi , visualize ) : <nl> gradient_clip = 0 . 1 <nl> scale = gradient_clip / ( total_norm_sqr * * 0 . 5 + 1e - 6 ) <nl> for i in range ( n_hidden ) : <nl> - for j in range ( n_sin_waves ) : <nl> + for j in range ( n_input_states ( ) ) : <nl> weights1 [ i , j ] - = scale * weights1 . grad [ i , j ] <nl> bias1 [ i ] - = scale * bias1 . grad [ i ] <nl> <nl>
object states as input
taichi-dev/taichi
f1d8e920d489b1ed90c2a14eb1cc92a744610032
2019-09-22T01:50:32Z
mmm a / tensorflow / python / keras / backend . py <nl> ppp b / tensorflow / python / keras / backend . py <nl> def random_uniform_variable ( shape , low , high , dtype = None , name = None , seed = None ) : <nl> Example : <nl> <nl> > > > kvar = tf . keras . backend . random_uniform_variable ( shape = ( 2 , 3 ) , <nl> - low = 0 . 0 , high = 1 . 0 ) <nl> + . . . low = 0 . 0 , high = 1 . 0 ) <nl> > > > kvar <nl> < tf . Variable ' Variable : 0 ' shape = ( 2 , 3 ) dtype = float32 , numpy = . . . , <nl> dtype = float32 ) > <nl> def random_normal_variable ( shape , mean , scale , dtype = None , name = None , <nl> Example : <nl> <nl> > > > kvar = tf . keras . backend . random_normal_variable ( shape = ( 2 , 3 ) , <nl> - mean = 0 . 0 , scale = 1 . 0 ) <nl> + . . . mean = 0 . 0 , scale = 1 . 0 ) <nl> > > > kvar <nl> < tf . Variable ' Variable : 0 ' shape = ( 2 , 3 ) dtype = float32 , numpy = . . . , <nl> dtype = float32 ) > <nl> def random_uniform ( shape , minval = 0 . 0 , maxval = 1 . 0 , dtype = None , seed = None ) : <nl> Example : <nl> <nl> > > > random_uniform_tensor = tf . keras . backend . random_uniform ( shape = ( 2 , 3 ) , <nl> - minval = 0 . 0 , maxval = 1 . 0 ) <nl> + . . . minval = 0 . 0 , maxval = 1 . 0 ) <nl> > > > random_uniform_tensor <nl> < tf . Tensor : shape = ( 2 , 3 ) , dtype = float32 , numpy = . . . , <nl> dtype = float32 ) > <nl> def random_binomial ( shape , p = 0 . 0 , dtype = None , seed = None ) : <nl> Example : <nl> <nl> > > > random_binomial_tensor = tf . keras . backend . random_binomial ( shape = ( 2 , 3 ) , <nl> - p = 0 . 5 ) <nl> + . . . p = 0 . 5 ) <nl> > > > random_binomial_tensor <nl> < tf . Tensor : shape = ( 2 , 3 ) , dtype = float32 , numpy = . . . , <nl> dtype = float32 ) > <nl>
use correct indent for multiline doctest
tensorflow/tensorflow
c826dad7f49869eef62777c4ca386ee3f988fe70
2020-03-20T01:26:19Z
mmm a / atom / renderer / atom_renderer_client . cc <nl> ppp b / atom / renderer / atom_renderer_client . cc <nl> class AtomRenderFrameObserver : public content : : RenderFrameObserver { <nl> AtomRendererClient : : AtomRendererClient ( ) <nl> : node_bindings_ ( NodeBindings : : Create ( false ) ) , <nl> atom_bindings_ ( new AtomRendererBindings ) , <nl> - main_frame_ ( nullptr ) { <nl> + main_frame_ ( nullptr ) , <nl> + is_initialized_ ( false ) { <nl> } <nl> <nl> AtomRendererClient : : ~ AtomRendererClient ( ) { <nl> void AtomRendererClient : : DidCreateScriptContext ( blink : : WebFrame * frame , <nl> / / The first web frame is the main frame . <nl> main_frame_ = frame ; <nl> <nl> - v8 : : Context : : Scope scope ( context ) ; <nl> - <nl> - / / Check the existance of process object to prevent duplicate initialization . <nl> - if ( context - > Global ( ) - > Has ( <nl> - mate : : StringToV8 ( context - > GetIsolate ( ) , " process " ) ) ) <nl> - return ; <nl> - <nl> / / Give the node loop a run to make sure everything is ready . <nl> node_bindings_ - > RunMessageLoop ( ) ; <nl> <nl> void AtomRendererClient : : DidCreateScriptContext ( blink : : WebFrame * frame , <nl> / / Make uv loop being wrapped by window context . <nl> if ( node_bindings_ - > uv_env ( ) = = nullptr ) <nl> node_bindings_ - > set_uv_env ( env ) ; <nl> - <nl> - / / Load everything . <nl> - node_bindings_ - > LoadEnvironment ( env ) ; <nl> } <nl> <nl> void AtomRendererClient : : DidClearWindowObject ( ) { <nl> + if ( ! main_frame_ | | is_initialized_ ) <nl> + return ; <nl> + <nl> + is_initialized_ = true ; <nl> + <nl> + v8 : : Local < v8 : : Context > context = main_frame_ - > mainWorldScriptContext ( ) ; <nl> + v8 : : Context : : Scope scope ( context ) ; <nl> + <nl> + node : : Environment * env = node : : Environment : : GetCurrent ( context ) ; <nl> + DCHECK ( env ) ; <nl> + <nl> + / / Load everything . <nl> + node_bindings_ - > LoadEnvironment ( env ) ; <nl> } <nl> <nl> bool AtomRendererClient : : ShouldFork ( blink : : WebFrame * frame , <nl> mmm a / atom / renderer / atom_renderer_client . h <nl> ppp b / atom / renderer / atom_renderer_client . h <nl> class AtomRendererClient : public content : : ContentRendererClient , <nl> / / The main frame . <nl> blink : : WebFrame * main_frame_ ; <nl> <nl> + / / Whether we have already initialized . <nl> + bool is_initialized_ ; <nl> + <nl> DISALLOW_COPY_AND_ASSIGN ( AtomRendererClient ) ; <nl> } ; <nl> <nl> mmm a / atom / renderer / lib / init . coffee <nl> ppp b / atom / renderer / lib / init . coffee <nl> if nodeIntegration in [ ' true ' , ' all ' , ' except - iframe ' , ' manual - enable - iframe ' ] <nl> window . addEventListener ' unload ' , - > <nl> process . emit ' exit ' <nl> else <nl> - delete global . process <nl> - delete global . setImmediate <nl> - delete global . clearImmediate <nl> + # The Module . runMain will run process . _tickCallck ( ) immediately , so we are <nl> + # able to delete the symbols in this tick even though we used process . nextTick <nl> + # to schedule it . <nl> + # It is important that we put this in process . nextTick , if we delete them now <nl> + # some code in node . js will complain about " process not defined " . <nl> + process . nextTick - > <nl> + delete global . process <nl> + delete global . setImmediate <nl> + delete global . clearImmediate <nl> <nl> # Load the script specfied by the " preload " attribute . <nl> if preloadScript <nl>
Initialize node integration after window object is cleared
electron/electron
ef15b670a93008187cec96bd3226378377c35225
2015-01-22T00:40:19Z
mmm a / hphp / runtime / ext / ext_spl . cpp <nl> ppp b / hphp / runtime / ext / ext_spl . cpp <nl> Variant f_class_uses ( const Variant & obj , bool autoload / * = true * / ) { <nl> return ret ; <nl> } <nl> <nl> - Object get_traversable_object_iterator ( const Variant & obj ) { <nl> - if ( ! obj . isObject ( ) | | <nl> - ! obj . getObjectData ( ) - > instanceof ( SystemLib : : s_TraversableClass ) ) { <nl> - raise_error ( " Argument must implement interface Traversable " ) ; <nl> + # define CHECK_TRAVERSABLE_IMPL ( obj , ret ) \ <nl> + if ( ! obj . isObject ( ) | | \ <nl> + ! obj . getObjectData ( ) - > instanceof ( SystemLib : : s_TraversableClass ) ) { \ <nl> + raise_recoverable_error ( " Argument must implement interface Traversable " ) ; \ <nl> + return ret ; \ <nl> } <nl> <nl> + Object get_traversable_object_iterator ( const Variant & obj ) { <nl> bool isIteratorAggregate ; <nl> Object itObj = obj . getObjectData ( ) <nl> - > iterableObject ( isIteratorAggregate , true ) ; <nl> Object get_traversable_object_iterator ( const Variant & obj ) { <nl> <nl> Variant f_iterator_apply ( const Variant & obj , const Variant & func , <nl> const Array & params / * = null_array * / ) { <nl> + CHECK_TRAVERSABLE_IMPL ( obj , 0 ) ; <nl> Object pobj = get_traversable_object_iterator ( obj ) ; <nl> pobj - > o_invoke_few_args ( s_rewind , 0 ) ; <nl> int64_t count = 0 ; <nl> Variant f_iterator_apply ( const Variant & obj , const Variant & func , <nl> } <nl> <nl> Variant f_iterator_count ( const Variant & obj ) { <nl> + CHECK_TRAVERSABLE_IMPL ( obj , 0 ) ; <nl> Object pobj = get_traversable_object_iterator ( obj ) ; <nl> pobj - > o_invoke_few_args ( s_rewind , 0 ) ; <nl> int64_t count = 0 ; <nl> Variant f_iterator_count ( const Variant & obj ) { <nl> } <nl> <nl> Variant f_iterator_to_array ( const Variant & obj , bool use_keys / * = true * / ) { <nl> - Object pobj = get_traversable_object_iterator ( obj ) ; <nl> Array ret ( Array : : Create ( ) ) ; <nl> + CHECK_TRAVERSABLE_IMPL ( obj , ret ) ; <nl> + Object pobj = get_traversable_object_iterator ( obj ) ; <nl> <nl> pobj - > o_invoke_few_args ( s_rewind , 0 ) ; <nl> while ( same ( pobj - > o_invoke_few_args ( s_valid , 0 ) , true ) ) { <nl> mmm a / hphp / test / slow / iterator / iterator_to_array . php . expectf <nl> ppp b / hphp / test / slow / iterator / iterator_to_array . php . expectf <nl> array ( 3 ) { <nl> int ( 3 ) <nl> } <nl> <nl> - Fatal error : Argument must implement interface Traversable in % s / test / slow / iterator / iterator_to_array . php on line 11 <nl> + Catchable fatal error : Argument must implement interface Traversable in % s / test / slow / iterator / iterator_to_array . php on line 11 <nl> similarity index 100 % <nl> rename from hphp / test / zend / bad / ext / spl / tests / iterator_count . php <nl> rename to hphp / test / zend / good / ext / spl / tests / iterator_count . php <nl> similarity index 100 % <nl> rename from hphp / test / zend / bad / ext / spl / tests / iterator_count . php . expectf <nl> rename to hphp / test / zend / good / ext / spl / tests / iterator_count . php . expectf <nl> similarity index 100 % <nl> rename from hphp / test / zend / bad / ext / spl / tests / iterator_to_array . php <nl> rename to hphp / test / zend / good / ext / spl / tests / iterator_to_array . php <nl> similarity index 100 % <nl> rename from hphp / test / zend / bad / ext / spl / tests / iterator_to_array . php . expectf <nl> rename to hphp / test / zend / good / ext / spl / tests / iterator_to_array . php . expectf <nl>
SPL object not implementing Traversable is now a recoverable error
facebook/hhvm
a664efeebc9468dfa0e818ae4265adc374d38ef6
2014-10-20T18:00:27Z
mmm a / ports / xlnt / CONTROL <nl> ppp b / ports / xlnt / CONTROL <nl> <nl> Source : xlnt <nl> - Version : 0 . 9 . 1 <nl> + Version : 0 . 9 . 4 <nl> Description : Cross - platform user - friendly xlsx library for C + + 14 <nl> + Depends : zlib cryptopp expat <nl> mmm a / ports / xlnt / portfile . cmake <nl> ppp b / ports / xlnt / portfile . cmake <nl> include ( vcpkg_common_functions ) <nl> find_program ( GIT git ) <nl> <nl> set ( GIT_URL " https : / / github . com / tfussell / xlnt . git " ) <nl> - set ( GIT_REV " c43561b4bdfb66caa78e2acfb7d513c0810cd4b0 " ) <nl> + set ( GIT_REV " f908dca6fe342f44b814073947aaccc69df7c9b6 " ) <nl> <nl> if ( NOT EXISTS " $ { DOWNLOADS } / xlnt . git " ) <nl> message ( STATUS " Cloning " ) <nl>
bump xlnt version , add dependencies to control file , point at latest commit
microsoft/vcpkg
23ab2a0f9769a2744b4bd125a756a617c8865d37
2017-03-12T22:38:52Z
mmm a / swoole_client_coro . c <nl> ppp b / swoole_client_coro . c <nl> static void client_onClose ( swClient * cli ) <nl> php_swoole_client_coro_free ( zobject , cli TSRMLS_CC ) ; <nl> } <nl> client_execute_callback ( zobject , SW_CLIENT_CB_onClose ) ; <nl> + # if PHP_MAJOR_VERSION < 7 <nl> + sw_zval_ptr_dtor ( & zobject ) ; <nl> + # endif <nl> } <nl> <nl> static void client_onError ( swClient * cli ) <nl> static PHP_METHOD ( swoole_client_coro , connect ) <nl> sw_copy_to_stack ( cli - > object , ccp - > _object ) ; <nl> # endif <nl> <nl> + # if PHP_MAJOR_VERSION < 7 <nl> + sw_zval_add_ref ( & zobject ) ; <nl> + # endif <nl> + <nl> cli - > timeout = timeout ; <nl> / / nonblock async <nl> if ( cli - > connect ( cli , host , port , timeout , sock_flag ) < 0 ) <nl> mmm a / swoole_http_client_coro . c <nl> ppp b / swoole_http_client_coro . c <nl> static int http_client_coro_execute ( zval * zobject , char * uri , zend_size_t uri_le <nl> return SW_ERR ; <nl> } <nl> <nl> + # if PHP_MAJOR_VERSION < 7 <nl> + sw_zval_add_ref ( & zobject ) ; <nl> + # endif <nl> + <nl> cli - > object = zobject ; <nl> / / sw_copy_to_stack ( cli - > object , hcc - > _object ) ; <nl> cli - > open_eof_check = 0 ; <nl> void swoole_http_client_coro_init ( int module_number TSRMLS_DC ) <nl> * / <nl> static void http_client_coro_onClose ( swClient * cli ) <nl> { <nl> + zval * zobject = cli - > object ; <nl> + # if PHP_MAJOR_VERSION < 7 <nl> + sw_zval_ptr_dtor ( & zobject ) ; <nl> + # endif <nl> return ; <nl> } <nl> <nl> mmm a / swoole_mysql_coro . c <nl> ppp b / swoole_mysql_coro . c <nl> static zend_bool swoole_mysql_coro_close ( zval * this ) <nl> client - > state = SW_MYSQL_STATE_CLOSED ; <nl> client - > iowait = SW_MYSQL_CORO_STATUS_CLOSED ; <nl> <nl> + # if PHP_MAJOR_VERSION < 7 <nl> + sw_zval_ptr_dtor ( & object ) ; <nl> + # endif <nl> return SUCCESS ; <nl> } <nl> <nl> static PHP_METHOD ( swoole_mysql_coro , connect ) <nl> client - > cli = cli ; <nl> sw_copy_to_stack ( client - > object , client - > _object ) ; <nl> <nl> - swConnection * _socket = swReactor_get ( SwooleG . main_reactor , cli - > socket - > fd ) ; <nl> + # if PHP_MAJOR_VERSION < 7 <nl> + sw_zval_add_ref ( & client - > object ) ; <nl> + # endif <nl> + <nl> + swConnection * _socket = swReactor_get ( SwooleG . main_reactor , cli - > socket - > fd ) ; <nl> _socket - > object = client ; <nl> - _socket - > active = 0 ; <nl> + _socket - > active = 0 ; <nl> <nl> php_context * context = swoole_get_property ( getThis ( ) , 0 ) ; <nl> if ( ! context ) <nl> mmm a / swoole_redis_coro . c <nl> ppp b / swoole_redis_coro . c <nl> static PHP_METHOD ( swoole_redis_coro , connect ) <nl> RETURN_FALSE ; <nl> } <nl> <nl> + # if PHP_MAJOR_VERSION < 7 <nl> + sw_zval_add_ref ( & getThis ( ) ) ; <nl> + # endif <nl> swConnection * conn = swReactor_get ( SwooleG . main_reactor , redis - > context - > c . fd ) ; <nl> conn - > object = redis ; <nl> php_context * sw_current_context = swoole_get_property ( getThis ( ) , 0 ) ; <nl> static PHP_METHOD ( swoole_redis_coro , close ) <nl> } <nl> redis - > context - > replies . head = NULL ; <nl> redisAsyncDisconnect ( redis - > context ) ; <nl> - <nl> - RETURN_TRUE ; <nl> + RETURN_TRUE ; <nl> } <nl> <nl> static PHP_METHOD ( swoole_redis_coro , __destruct ) <nl> static void swoole_redis_coro_onClose ( const redisAsyncContext * c , int status ) <nl> swRedisClient * redis = c - > ev . data ; <nl> redis - > state = SWOOLE_REDIS_CORO_STATE_CLOSED ; <nl> redis - > context = NULL ; <nl> + <nl> + # if PHP_MAJOR_VERSION < 7 <nl> + sw_zval_ptr_dtor ( & redis - > object ) ; <nl> + # endif <nl> } <nl> <nl> static void swoole_redis_coro_event_AddRead ( void * privdata ) <nl>
add ref in php5 when calling client connect
swoole/swoole-src
0d31cdd485e74e9d66b8198ff901bf540c87d0a7
2017-01-21T09:51:09Z
mmm a / data / gui . xml <nl> ppp b / data / gui . xml <nl> <nl> < key command = " CopyMerged " shortcut = " Ctrl + Shift + C " mac = " Cmd + Shift + C " / > <nl> < key command = " Paste " shortcut = " Ctrl + V " mac = " Cmd + V " / > <nl> < key command = " Paste " shortcut = " Shift + Ins " / > <nl> - < key command = " NewLayer " shortcut = " Ctrl + Shift + V " mac = " Cmd + Shift + V " > <nl> - < param name = " fromClipboard " value = " true " / > <nl> - < / key > <nl> < key command = " Clear " shortcut = " Del " / > <nl> < key command = " Clear " shortcut = " Backspace " / > <nl> < key command = " Fill " shortcut = " F " / > <nl> <nl> < key command = " NewLayer " shortcut = " Alt + Shift + N " > <nl> < param name = " group " value = " true " / > <nl> < / key > <nl> + < key command = " NewLayer " shortcut = " Ctrl + Shift + V " mac = " Cmd + Shift + V " > <nl> + < param name = " fromClipboard " value = " true " / > <nl> + < / key > <nl> + < key command = " NewLayer " shortcut = " Ctrl + J " mac = " Cmd + J " > <nl> + < param name = " viaCopy " value = " true " / > <nl> + < / key > <nl> + < key command = " NewLayer " shortcut = " Ctrl + Shift + J " mac = " Cmd + Shift + J " > <nl> + < param name = " viaCut " value = " true " / > <nl> + < / key > <nl> < key command = " GotoPreviousLayer " shortcut = " Down " context = " Normal " / > <nl> < key command = " GotoNextLayer " shortcut = " Up " context = " Normal " / > <nl> < ! - - Frame - - > <nl> <nl> < item command = " LayerLock " text = " @ . layer_lock_layers " / > <nl> < item command = " OpenGroup " text = " @ . layer_open_group " / > <nl> < separator / > <nl> - < item command = " NewLayer " text = " @ . layer_new_layer " / > <nl> - < item command = " NewLayer " text = " @ . layer_new_group " > <nl> - < param name = " group " value = " true " / > <nl> - < / item > <nl> + < menu text = " @ . layer_new " > <nl> + < item command = " NewLayer " text = " @ . layer_new_layer " / > <nl> + < item command = " NewLayer " text = " @ . layer_new_group " > <nl> + < param name = " group " value = " true " / > <nl> + < / item > <nl> + < separator / > <nl> + < item command = " NewLayer " text = " @ . layer_new_layer_from_clipboard " > <nl> + < param name = " fromClipboard " value = " true " / > <nl> + < / item > <nl> + < item command = " NewLayer " text = " @ . layer_new_layer_via_copy " > <nl> + < param name = " viaCopy " value = " true " / > <nl> + < / item > <nl> + < item command = " NewLayer " text = " @ . layer_new_layer_via_cut " > <nl> + < param name = " viaCut " value = " true " / > <nl> + < / item > <nl> + < separator / > <nl> + < item command = " NewLayer " text = " @ . layer_new_reference_layer_from_file " > <nl> + < param name = " reference " value = " true " / > <nl> + < param name = " fromFile " value = " true " / > <nl> + < / item > <nl> + < item command = " NewLayer " text = " @ . layer_new_reference_layer_from_clipboard " > <nl> + < param name = " reference " value = " true " / > <nl> + < param name = " fromClipboard " value = " true " / > <nl> + < / item > <nl> + < / menu > <nl> < item command = " RemoveLayer " text = " @ . layer_delete_layer " / > <nl> < item command = " BackgroundFromLayer " text = " @ . layer_background_from_layer " / > <nl> < item command = " LayerFromBackground " text = " @ . layer_layer_from_background " / > <nl> <nl> < item command = " FlattenLayers " text = " @ . layer_flatten_visible " > <nl> < param name = " visibleOnly " value = " true " / > <nl> < / item > <nl> - < separator / > <nl> - < item command = " NewLayer " text = " @ . layer_add_reference_layer " > <nl> - < param name = " reference " value = " true " / > <nl> - < param name = " fromFile " value = " true " / > <nl> - < / item > <nl> < / menu > <nl> < menu text = " @ . frame " > <nl> < item command = " FrameProperties " text = " @ . frame_properties " > <nl> mmm a / data / strings / en . ini <nl> ppp b / data / strings / en . ini <nl> AddColor_Specific = Specific <nl> AdvancedMode = Advanced Mode <nl> AutocropSprite = Trim Sprite <nl> AutocropSprite_ByGrid = Trim Sprite by Grid <nl> - BackgroundFromLayer = Background From Layer <nl> + BackgroundFromLayer = Background from Layer <nl> BrightnessContrast = Adjust Brightness / Contrast <nl> Cancel = Cancel Current Operation <nl> CanvasSize = Canvas Size <nl> InvertColor = Invert Color <nl> InvertMask = Invert Selection <nl> KeyboardShortcuts = Keyboard Shortcuts <nl> Launch = Launch <nl> - LayerFromBackground = Layer From Background <nl> + LayerFromBackground = Layer from Background <nl> LayerLock = Lock Layers <nl> LayerOpacity = Set Layer Opacity to { 0 } ( { 1 } % ) <nl> LayerProperties = Layer Properties <nl> MoveMask_Boundaries = Selection Boundaries <nl> MoveMask_Content = Selection Content <nl> NewBrush = New Brush <nl> NewFile = New File <nl> - NewFile_FromClipboard = New File ( From Clipboard ) <nl> + NewFile_FromClipboard = New File from Clipboard <nl> NewFrame = New Frame <nl> NewFrame_NewEmptyFrame = New Empty Frame <nl> NewFrame_DuplicateCels = Duplicate Linked Cels <nl> NewLayer_BeforeActiveLayer = New { } Below <nl> NewLayer_Layer = Layer <nl> NewLayer_Group = Group <nl> NewLayer_ReferenceLayer = Reference Layer <nl> - NewLayer_FromClipboard = { } ( From Clipboard ) <nl> - NewSpriteFromSelection = New Sprite From Selection <nl> + NewLayer_FromClipboard = { } from Clipboard <nl> + NewLayer_ViaCopy = { } via Copy <nl> + NewLayer_ViaCut = { } via Cut <nl> + NewSpriteFromSelection = New Sprite from Selection <nl> OpenBrowser = Open Browser <nl> OpenFile = Open Sprite <nl> OpenGroup = Open / Close Group <nl> edit_shift_right = & Right <nl> edit_shift_up = & Up <nl> edit_shift_down = & Down <nl> edit_new_brush = New & Brush <nl> - edit_new_sprite_from_selection = & New Sprite from Selection <nl> + edit_new_sprite_from_selection = & New Sprite From Selection <nl> edit_replace_color = R & eplace Color . . . <nl> edit_invert_color = & Invert . . . <nl> edit_adjustments = Ad & justments <nl> layer_properties = & Properties . . . <nl> layer_visible = & Visible <nl> layer_lock_layers = Loc & k Layers <nl> layer_open_group = & Open Group <nl> + layer_new = & New . . . <nl> layer_new_layer = & New Layer <nl> layer_new_group = New & Group <nl> + layer_new_layer_from_clipboard = New Layer from Clip & board <nl> + layer_new_layer_via_copy = New Layer via & Copy <nl> + layer_new_layer_via_cut = New Layer via Cu & t <nl> + layer_new_reference_layer_from_file = New & Reference Layer from File <nl> + layer_new_reference_layer_from_clipboard = New R & eference Layer from Clipboard <nl> layer_delete_layer = Delete Laye & r <nl> layer_background_from_layer = & Background from Layer <nl> layer_layer_from_background = & Layer from Background <nl> layer_duplicate = & Duplicate <nl> layer_merge_down = & Merge Down <nl> layer_flatten = & Flatten <nl> layer_flatten_visible = Flatten Vi & sible <nl> - layer_add_reference_layer = Add R & eference Layer <nl> frame = F & rame <nl> frame_properties = Frame & Properties . . . <nl> frame_cel_properties = & Cel Properties . . . <nl> mmm a / src / app / commands / cmd_new_layer . cpp <nl> ppp b / src / app / commands / cmd_new_layer . cpp <nl> <nl> # endif <nl> <nl> # include " app / app . h " <nl> + # include " app / cmd / clear_mask . h " <nl> # include " app / cmd / move_layer . h " <nl> + # include " app / cmd / trim_cel . h " <nl> # include " app / commands / command . h " <nl> # include " app / commands / commands . h " <nl> # include " app / commands / new_params . h " <nl> <nl> # include " app / ui / status_bar . h " <nl> # include " app / ui_context . h " <nl> # include " app / util / clipboard . h " <nl> + # include " app / util / new_image_from_mask . h " <nl> # include " doc / layer . h " <nl> # include " doc / primitives . h " <nl> # include " doc / sprite . h " <nl> struct NewLayerParams : public NewParams { <nl> Param < bool > ask { this , false , " ask " } ; <nl> Param < bool > fromFile { this , false , { " fromFile " , " from - file " } } ; <nl> Param < bool > fromClipboard { this , false , " fromClipboard " } ; <nl> + Param < bool > viaCut { this , false , " viaCut " } ; <nl> + Param < bool > viaCopy { this , false , " viaCopy " } ; <nl> Param < bool > top { this , false , " top " } ; <nl> Param < bool > before { this , false , " before " } ; <nl> } ; <nl> class NewLayerCommand : public CommandWithNewParams < NewLayerParams > { <nl> std : : string onGetFriendlyName ( ) const override ; <nl> <nl> private : <nl> + void adjustRefCelBounds ( Cel * cel , gfx : : RectF bounds ) ; <nl> std : : string getUniqueLayerName ( const Sprite * sprite ) const ; <nl> int getMaxLayerNum ( const Layer * layer ) const ; <nl> std : : string layerPrefix ( ) const ; <nl> void NewLayerCommand : : onLoadParams ( const Params & commandParams ) <nl> <nl> bool NewLayerCommand : : onEnabled ( Context * context ) <nl> { <nl> - return context - > checkFlags ( ContextFlags : : ActiveDocumentIsWritable | <nl> - ContextFlags : : HasActiveSprite ) <nl> - & & ( ! params ( ) . fromClipboard ( ) <nl> + if ( ! context - > checkFlags ( ContextFlags : : ActiveDocumentIsWritable | <nl> + ContextFlags : : HasActiveSprite ) ) <nl> + return false ; <nl> + <nl> # ifdef ENABLE_UI <nl> - | | ( clipboard : : get_current_format ( ) = = clipboard : : ClipboardImage ) <nl> + if ( params ( ) . fromClipboard ( ) & & <nl> + clipboard : : get_current_format ( ) ! = clipboard : : ClipboardImage ) <nl> + return false ; <nl> # endif <nl> - ) ; <nl> + <nl> + if ( ( params ( ) . viaCut ( ) | | <nl> + params ( ) . viaCopy ( ) ) & & <nl> + ! context - > checkFlags ( ContextFlags : : HasVisibleMask ) ) <nl> + return false ; <nl> + <nl> + return true ; <nl> } <nl> <nl> namespace { <nl> class Scoped { / / TODO move this to base library <nl> void NewLayerCommand : : onExecute ( Context * context ) <nl> { <nl> ContextWriter writer ( context ) ; <nl> + Site site = context - > activeSite ( ) ; <nl> Doc * document ( writer . document ( ) ) ; <nl> Sprite * sprite ( writer . sprite ( ) ) ; <nl> std : : string name ; <nl> void NewLayerCommand : : onExecute ( Context * context ) <nl> <nl> LayerGroup * parent = sprite - > root ( ) ; <nl> Layer * activeLayer = writer . layer ( ) ; <nl> - SelectedLayers selLayers = writer . site ( ) - > selectedLayers ( ) ; <nl> + SelectedLayers selLayers = site . selectedLayers ( ) ; <nl> if ( activeLayer ) { <nl> if ( activeLayer - > isGroup ( ) & & <nl> activeLayer - > isExpanded ( ) & & <nl> void NewLayerCommand : : onExecute ( Context * context ) <nl> } <nl> <nl> / / Put all selected layers inside the group <nl> - if ( m_type = = Type : : Group & & writer . site ( ) - > inTimeline ( ) ) { <nl> + if ( m_type = = Type : : Group & & site . inTimeline ( ) ) { <nl> LayerGroup * commonParent = nullptr ; <nl> layer_t sameParents = 0 ; <nl> for ( Layer * l : selLayers ) { <nl> void NewLayerCommand : : onExecute ( Context * context ) <nl> <nl> if ( cel ) { <nl> if ( layer - > isReference ( ) ) { <nl> - gfx : : RectF bounds ( 0 , 0 , pasteSpr - > width ( ) , pasteSpr - > height ( ) ) ; <nl> - double scale = MIN ( double ( sprite - > width ( ) ) / bounds . w , <nl> - double ( sprite - > height ( ) ) / bounds . h ) ; <nl> - bounds . w * = scale ; <nl> - bounds . h * = scale ; <nl> - bounds . x = sprite - > width ( ) / 2 - bounds . w / 2 ; <nl> - bounds . y = sprite - > height ( ) / 2 - bounds . h / 2 ; <nl> - cel - > setBoundsF ( bounds ) ; <nl> + adjustRefCelBounds ( <nl> + cel , gfx : : RectF ( 0 , 0 , pasteSpr - > width ( ) , pasteSpr - > height ( ) ) ) ; <nl> } <nl> else { <nl> cel - > setPosition ( sprite - > width ( ) / 2 - pasteSpr - > width ( ) / 2 , <nl> void NewLayerCommand : : onExecute ( Context * context ) <nl> / / Paste new layer from clipboard <nl> else if ( params ( ) . fromClipboard ( ) & & layer - > isImage ( ) ) { <nl> clipboard : : paste ( context , false ) ; <nl> + <nl> + if ( layer - > isReference ( ) ) { <nl> + if ( Cel * cel = layer - > cel ( site . frame ( ) ) ) { <nl> + adjustRefCelBounds ( <nl> + cel , cel - > boundsF ( ) ) ; <nl> + } <nl> + } <nl> } <nl> # endif / / ENABLE_UI <nl> + / / Paste new layer from selection <nl> + else if ( ( params ( ) . viaCut ( ) | | params ( ) . viaCopy ( ) ) <nl> + & & layer - > isImage ( ) <nl> + & & document - > isMaskVisible ( ) ) { <nl> + const doc : : Mask * mask = document - > mask ( ) ; <nl> + ASSERT ( mask ) ; <nl> + ImageRef image ( new_image_from_mask ( site , mask , true ) ) ; <nl> + if ( image ) { <nl> + Cel * cel = api . addCel ( static_cast < LayerImage * > ( layer ) , <nl> + site . frame ( ) , image ) ; <nl> + if ( cel ) { <nl> + gfx : : Point pos = mask - > bounds ( ) . origin ( ) ; <nl> + cel - > setPosition ( pos . x , pos . y ) ; <nl> + } <nl> + <nl> + if ( params ( ) . viaCut ( ) & & <nl> + site . cel ( ) & & site . layer ( ) ) { <nl> + tx ( new cmd : : ClearMask ( site . cel ( ) ) ) ; <nl> + <nl> + if ( site . layer ( ) - > isTransparent ( ) ) { <nl> + / / If the cel wasn ' t deleted by cmd : : ClearMask , we trim it . <nl> + cel = site . layer ( ) - > cel ( site . frame ( ) ) ; <nl> + if ( cel ) <nl> + tx ( new cmd : : TrimCel ( cel ) ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> <nl> tx . commit ( ) ; <nl> } <nl> std : : string NewLayerCommand : : onGetFriendlyName ( ) const <nl> text = fmt : : format ( Strings : : commands_NewLayer ( ) , layerPrefix ( ) ) ; <nl> if ( params ( ) . fromClipboard ( ) ) <nl> text = fmt : : format ( Strings : : commands_NewLayer_FromClipboard ( ) , text ) ; <nl> + if ( params ( ) . viaCopy ( ) ) <nl> + text = fmt : : format ( Strings : : commands_NewLayer_ViaCopy ( ) , text ) ; <nl> + if ( params ( ) . viaCut ( ) ) <nl> + text = fmt : : format ( Strings : : commands_NewLayer_ViaCut ( ) , text ) ; <nl> return text ; <nl> } <nl> <nl> + void NewLayerCommand : : adjustRefCelBounds ( Cel * cel , gfx : : RectF bounds ) <nl> + { <nl> + Sprite * sprite = cel - > sprite ( ) ; <nl> + double scale = MIN ( double ( sprite - > width ( ) ) / bounds . w , <nl> + double ( sprite - > height ( ) ) / bounds . h ) ; <nl> + bounds . w * = scale ; <nl> + bounds . h * = scale ; <nl> + bounds . x = sprite - > width ( ) / 2 - bounds . w / 2 ; <nl> + bounds . y = sprite - > height ( ) / 2 - bounds . h / 2 ; <nl> + cel - > setBoundsF ( bounds ) ; <nl> + } <nl> + <nl> std : : string NewLayerCommand : : getUniqueLayerName ( const Sprite * sprite ) const <nl> { <nl> return fmt : : format ( " { } { } " , <nl>
Add New Layer via Cut / Copy commands ( fix )
aseprite/aseprite
757fadeaf0617d4a52c421f81b43b487565d2a8b
2019-07-02T18:28:05Z
mmm a / torch / csrc / jit / ir . cpp <nl> ppp b / torch / csrc / jit / ir . cpp <nl> bool Node : : matches ( const char * signature_literal , at : : ArrayRef < Symbol > const_inp <nl> return true ; <nl> } <nl> <nl> + void Node : : dump ( ) const { <nl> + std : : cout < < * this < < " \ n " ; <nl> + } <nl> + <nl> void Node : : findSchema ( ) const { <nl> schema_ = & getOperatorFor ( this ) . schema ( ) ; <nl> } <nl> mmm a / torch / csrc / jit / ir . h <nl> ppp b / torch / csrc / jit / ir . h <nl> struct Node : public Attributes < Node > { <nl> return * schema_ ; <nl> } <nl> <nl> + void dump ( ) const ; <nl> + <nl> virtual ~ Node ( ) = default ; <nl> private : <nl> std : : pair < Value * , const Argument & > findInput ( Symbol name ) ; <nl>
Add a dump ( ) method to IR Node ' s . ( )
pytorch/pytorch
f126687fbca9414696c930a67f7cf1bded29804c
2018-08-01T18:09:53Z
mmm a / xbmc / addons / binary / interfaces / api1 / PVR / AddonCallbacksPVR . cpp <nl> ppp b / xbmc / addons / binary / interfaces / api1 / PVR / AddonCallbacksPVR . cpp <nl> void CAddonCallbacksPVR : : PVREpgEventStateChange ( void * addonData , EPG_TAG * tag , u <nl> return ; <nl> } <nl> <nl> - CLog : : Log ( LOGDEBUG , " PVR - % s - state for epg event ' % d ' on channel ' % d ' on client ' % s ' changed to ' % d ' . " , <nl> - __FUNCTION__ , tag - > iUniqueBroadcastId , iUniqueChannelId , client - > Name ( ) . c_str ( ) , newState ) ; <nl> - <nl> static CCriticalSection queueMutex ; <nl> static std : : vector < EpgEventStateChange > queuedChanges ; <nl> <nl> mmm a / xbmc / epg / Epg . cpp <nl> ppp b / xbmc / epg / Epg . cpp <nl> bool CEpg : : UpdateEntry ( const EPG_TAG * data , bool bUpdateDatabase / * = false * / ) <nl> return UpdateEntry ( tag , false , bUpdateDatabase ) ; <nl> } <nl> <nl> - bool CEpg : : UpdateEntry ( const CEpgInfoTagPtr & tag , bool bNotifyObeservers , bool bUpdateDatabase / * = false * / ) <nl> + bool CEpg : : UpdateEntry ( const CEpgInfoTagPtr & tag , bool bNotifyObservers , bool bUpdateDatabase / * = false * / ) <nl> { <nl> CSingleLock lock ( m_critSection ) ; <nl> auto it = m_tags . find ( tag - > StartAsUTC ( ) ) ; <nl> EPG_EVENT_STATE state = ( it = = m_tags . end ( ) ) ? EPG_EVENT_CREATED : EPG_EVENT_UPDATED ; <nl> <nl> - if ( UpdateEntry ( tag , state , it , bUpdateDatabase ) & & bNotifyObeservers ) <nl> + if ( UpdateEntry ( tag , state , it , bUpdateDatabase ) ) <nl> { <nl> - SetChanged ( ) ; <nl> - lock . Leave ( ) ; <nl> - NotifyObservers ( ObservableMessageEpg ) ; <nl> + if ( bNotifyObservers ) <nl> + { <nl> + SetChanged ( ) ; <nl> + lock . Leave ( ) ; <nl> + NotifyObservers ( ObservableMessageEpg ) ; <nl> + } <nl> return true ; <nl> } <nl> return false ; <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRBase . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRBase . cpp <nl> std : : string CGUIWindowPVRBase : : GetSelectedItemPath ( bool bRadio ) <nl> return m_selectedItemPaths [ bRadio ] ; <nl> } <nl> <nl> + void CGUIWindowPVRBase : : ResetObservers ( void ) <nl> + { <nl> + UnregisterObservers ( ) ; <nl> + if ( IsActive ( ) ) <nl> + RegisterObservers ( ) ; <nl> + } <nl> + <nl> void CGUIWindowPVRBase : : Notify ( const Observable & obs , const ObservableMessage msg ) <nl> { <nl> if ( IsActive ( ) ) <nl> void CGUIWindowPVRBase : : OnInitWindow ( void ) <nl> <nl> / / mark item as selected by channel path <nl> m_viewControl . SetSelectedItem ( GetSelectedItemPath ( m_bRadio ) ) ; <nl> + <nl> + RegisterObservers ( ) ; <nl> } <nl> <nl> void CGUIWindowPVRBase : : OnDeinitWindow ( int nextWindowID ) <nl> { <nl> + UnregisterObservers ( ) ; <nl> UpdateSelectedItemPath ( ) ; <nl> + CGUIMediaWindow : : OnDeinitWindow ( nextWindowID ) ; <nl> } <nl> <nl> bool CGUIWindowPVRBase : : OnMessage ( CGUIMessage & message ) <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRBase . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRBase . h <nl> namespace PVR <nl> virtual bool OnAction ( const CAction & action ) override ; <nl> virtual bool OnBack ( int actionID ) override ; <nl> virtual bool OpenGroupSelectionDialog ( void ) ; <nl> - virtual void ResetObservers ( void ) { } ; <nl> virtual void Notify ( const Observable & obs , const ObservableMessage msg ) override ; <nl> virtual void SetInvalid ( ) override ; <nl> virtual bool CanBeActivated ( ) const override ; <nl> <nl> + void ResetObservers ( void ) ; <nl> + <nl> static std : : string GetSelectedItemPath ( bool bRadio ) ; <nl> static void SetSelectedItemPath ( bool bRadio , const std : : string & path ) ; <nl> <nl> namespace PVR <nl> bool OnContextButtonEditTimer ( CFileItem * item , CONTEXT_BUTTON button ) ; <nl> bool OnContextButtonEditTimerRule ( CFileItem * item , CONTEXT_BUTTON button ) ; <nl> <nl> + virtual void RegisterObservers ( void ) { } ; <nl> + virtual void UnregisterObservers ( void ) { } ; <nl> + <nl> static CCriticalSection m_selectedItemPathsLock ; <nl> static std : : string m_selectedItemPaths [ 2 ] ; <nl> <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRChannels . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRChannels . cpp <nl> CGUIWindowPVRChannels : : CGUIWindowPVRChannels ( bool bRadio ) : <nl> { <nl> } <nl> <nl> - void CGUIWindowPVRChannels : : ResetObservers ( void ) <nl> + void CGUIWindowPVRChannels : : RegisterObservers ( void ) <nl> { <nl> CSingleLock lock ( m_critSection ) ; <nl> - UnregisterObservers ( ) ; <nl> g_EpgContainer . RegisterObserver ( this ) ; <nl> g_PVRTimers - > RegisterObserver ( this ) ; <nl> g_infoManager . RegisterObserver ( this ) ; <nl> bool CGUIWindowPVRChannels : : OnMessage ( CGUIMessage & message ) <nl> case ObservableMessageEpgActiveItem : <nl> case ObservableMessageCurrentItem : <nl> { <nl> - if ( IsActive ( ) ) <nl> - SetInvalid ( ) ; <nl> + SetInvalid ( ) ; <nl> bReturn = true ; <nl> break ; <nl> } <nl> case ObservableMessageChannelGroupReset : <nl> { <nl> - if ( IsActive ( ) ) <nl> - Refresh ( true ) ; <nl> + Refresh ( true ) ; <nl> bReturn = true ; <nl> break ; <nl> } <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRChannels . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRChannels . h <nl> namespace PVR <nl> virtual bool OnContextButton ( int itemNumber , CONTEXT_BUTTON button ) override ; <nl> virtual bool Update ( const std : : string & strDirectory , bool updateFilterPath = true ) override ; <nl> virtual void UpdateButtons ( void ) override ; <nl> - virtual void ResetObservers ( void ) override ; <nl> - void UnregisterObservers ( void ) ; <nl> virtual bool OnAction ( const CAction & action ) override ; <nl> <nl> protected : <nl> virtual std : : string GetDirectoryPath ( void ) override ; <nl> + virtual void RegisterObservers ( void ) override ; <nl> + virtual void UnregisterObservers ( void ) override ; <nl> <nl> private : <nl> bool OnContextButtonAdd ( CFileItem * item , CONTEXT_BUTTON button ) ; <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRGuide . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRGuide . cpp <nl> void CGUIWindowPVRGuide : : OnInitWindow ( ) <nl> CGUIWindowPVRBase : : OnInitWindow ( ) ; <nl> } <nl> <nl> - void CGUIWindowPVRGuide : : ResetObservers ( void ) <nl> + void CGUIWindowPVRGuide : : RegisterObservers ( void ) <nl> { <nl> - UnregisterObservers ( ) ; <nl> g_EpgContainer . RegisterObserver ( this ) ; <nl> } <nl> <nl> bool CGUIWindowPVRGuide : : OnMessage ( CGUIMessage & message ) <nl> { <nl> m_bUpdateRequired = true ; <nl> / / do not allow more than MAX_UPDATE_FREQUENCY updates <nl> - if ( IsActive ( ) & & m_nextUpdateTimeout . IsTimePast ( ) ) <nl> + if ( m_nextUpdateTimeout . IsTimePast ( ) ) <nl> { <nl> Refresh ( true ) ; <nl> m_nextUpdateTimeout . Set ( MAX_UPDATE_FREQUENCY ) ; <nl> bool CGUIWindowPVRGuide : : OnMessage ( CGUIMessage & message ) <nl> } <nl> case ObservableMessageEpgActiveItem : <nl> { <nl> - if ( IsActive ( ) & & m_viewControl . GetCurrentControl ( ) ! = GUIDE_VIEW_TIMELINE ) <nl> + if ( m_viewControl . GetCurrentControl ( ) ! = GUIDE_VIEW_TIMELINE ) <nl> SetInvalid ( ) ; <nl> else <nl> m_bUpdateRequired = true ; <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRGuide . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRGuide . h <nl> namespace PVR <nl> virtual bool OnAction ( const CAction & action ) override ; <nl> virtual void GetContextButtons ( int itemNumber , CContextButtons & buttons ) override ; <nl> virtual bool OnContextButton ( int itemNumber , CONTEXT_BUTTON button ) override ; <nl> - virtual void ResetObservers ( void ) override ; <nl> - void UnregisterObservers ( void ) ; <nl> virtual bool Update ( const std : : string & strDirectory , bool updateFilterPath = true ) override ; <nl> virtual void UpdateButtons ( void ) override ; <nl> <nl> namespace PVR <nl> virtual void UpdateSelectedItemPath ( ) override ; <nl> virtual std : : string GetDirectoryPath ( void ) override { return " " ; } <nl> virtual bool GetDirectory ( const std : : string & strDirectory , CFileItemList & items ) override ; <nl> + virtual void RegisterObservers ( void ) override ; <nl> + virtual void UnregisterObservers ( void ) override ; <nl> <nl> private : <nl> bool SelectPlayingFile ( void ) ; <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRRecordings . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRRecordings . cpp <nl> CGUIWindowPVRRecordings : : CGUIWindowPVRRecordings ( bool bRadio ) : <nl> { <nl> } <nl> <nl> + void CGUIWindowPVRRecordings : : RegisterObservers ( void ) <nl> + { <nl> + CSingleLock lock ( m_critSection ) ; <nl> + g_PVRRecordings - > RegisterObserver ( this ) ; <nl> + g_PVRTimers - > RegisterObserver ( this ) ; <nl> + g_infoManager . RegisterObserver ( this ) ; <nl> + } <nl> + <nl> void CGUIWindowPVRRecordings : : UnregisterObservers ( void ) <nl> { <nl> CSingleLock lock ( m_critSection ) ; <nl> void CGUIWindowPVRRecordings : : UnregisterObservers ( void ) <nl> g_infoManager . UnregisterObserver ( this ) ; <nl> } <nl> <nl> - void CGUIWindowPVRRecordings : : ResetObservers ( void ) <nl> - { <nl> - CSingleLock lock ( m_critSection ) ; <nl> - UnregisterObservers ( ) ; <nl> - g_PVRRecordings - > RegisterObserver ( this ) ; <nl> - g_PVRTimers - > RegisterObserver ( this ) ; <nl> - g_infoManager . RegisterObserver ( this ) ; <nl> - } <nl> - <nl> void CGUIWindowPVRRecordings : : OnWindowLoaded ( ) <nl> { <nl> CONTROL_SELECT ( CONTROL_BTNGROUPITEMS ) ; <nl> bool CGUIWindowPVRRecordings : : OnMessage ( CGUIMessage & message ) <nl> case ObservableMessageEpgActiveItem : <nl> case ObservableMessageCurrentItem : <nl> { <nl> - if ( IsActive ( ) ) <nl> - SetInvalid ( ) ; <nl> + SetInvalid ( ) ; <nl> bReturn = true ; <nl> break ; <nl> } <nl> case ObservableMessageRecordings : <nl> case ObservableMessageTimersReset : <nl> { <nl> - if ( IsActive ( ) ) <nl> - Refresh ( true ) ; <nl> + Refresh ( true ) ; <nl> bReturn = true ; <nl> break ; <nl> } <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRRecordings . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRRecordings . h <nl> namespace PVR <nl> virtual bool OnContextButton ( int itemNumber , CONTEXT_BUTTON button ) override ; <nl> virtual bool Update ( const std : : string & strDirectory , bool updateFilterPath = true ) override ; <nl> virtual void UpdateButtons ( void ) override ; <nl> - void UnregisterObservers ( void ) ; <nl> - virtual void ResetObservers ( void ) override ; <nl> <nl> protected : <nl> virtual std : : string GetDirectoryPath ( void ) override ; <nl> virtual void OnPrepareFileItems ( CFileItemList & items ) override ; <nl> + virtual void RegisterObservers ( void ) override ; <nl> + virtual void UnregisterObservers ( void ) override ; <nl> <nl> private : <nl> bool ActionDeleteRecording ( CFileItem * item ) ; <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRTimersBase . cpp <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRTimersBase . cpp <nl> CGUIWindowPVRTimersBase : : CGUIWindowPVRTimersBase ( bool bRadio , int id , const std : <nl> { <nl> } <nl> <nl> - void CGUIWindowPVRTimersBase : : UnregisterObservers ( void ) <nl> + void CGUIWindowPVRTimersBase : : RegisterObservers ( void ) <nl> { <nl> CSingleLock lock ( m_critSection ) ; <nl> - if ( g_PVRTimers ) <nl> - g_PVRTimers - > UnregisterObserver ( this ) ; <nl> - g_infoManager . UnregisterObserver ( this ) ; <nl> + g_PVRTimers - > RegisterObserver ( this ) ; <nl> + g_infoManager . RegisterObserver ( this ) ; <nl> } <nl> <nl> - void CGUIWindowPVRTimersBase : : ResetObservers ( void ) <nl> + void CGUIWindowPVRTimersBase : : UnregisterObservers ( void ) <nl> { <nl> CSingleLock lock ( m_critSection ) ; <nl> - UnregisterObservers ( ) ; <nl> - g_PVRTimers - > RegisterObserver ( this ) ; <nl> - g_infoManager . RegisterObserver ( this ) ; <nl> + if ( g_PVRTimers ) <nl> + g_PVRTimers - > UnregisterObserver ( this ) ; <nl> + g_infoManager . UnregisterObserver ( this ) ; <nl> } <nl> <nl> void CGUIWindowPVRTimersBase : : GetContextButtons ( int itemNumber , CContextButtons & buttons ) <nl> bool CGUIWindowPVRTimersBase : : OnMessage ( CGUIMessage & message ) <nl> case ObservableMessageEpgActiveItem : <nl> case ObservableMessageCurrentItem : <nl> { <nl> - if ( IsActive ( ) ) <nl> - SetInvalid ( ) ; <nl> + SetInvalid ( ) ; <nl> bReturn = true ; <nl> break ; <nl> } <nl> case ObservableMessageTimersReset : <nl> { <nl> - if ( IsActive ( ) ) <nl> - Refresh ( true ) ; <nl> + Refresh ( true ) ; <nl> bReturn = true ; <nl> break ; <nl> } <nl> mmm a / xbmc / pvr / windows / GUIWindowPVRTimersBase . h <nl> ppp b / xbmc / pvr / windows / GUIWindowPVRTimersBase . h <nl> namespace PVR <nl> bool OnContextButton ( int itemNumber , CONTEXT_BUTTON button ) ; <nl> bool Update ( const std : : string & strDirectory , bool updateFilterPath = true ) ; <nl> void UpdateButtons ( void ) ; <nl> - void UnregisterObservers ( void ) ; <nl> - void ResetObservers ( void ) ; <nl> + <nl> + protected : <nl> + virtual void RegisterObservers ( void ) ; <nl> + virtual void UnregisterObservers ( void ) ; <nl> <nl> private : <nl> bool ActionDeleteTimer ( CFileItem * item ) ; <nl>
Merge pull request from ksooo / async - epg - update - performance
xbmc/xbmc
9154f4ed6cf567af002a028a9a279b2bd88b1958
2016-03-15T10:53:13Z
mmm a / src / CMakeLists . txt <nl> ppp b / src / CMakeLists . txt <nl> endif ( ) <nl> <nl> if ( ENABLE_UPDATER ) <nl> if ( USE_SHARED_CURL ) <nl> - find_library ( LIBCURL_LIBRARY curl ) <nl> - find_path ( LIBCURL_INCLUDE_DIR curl / curl . h ) <nl> + find_library ( LIBCURL_LIBRARY NAMES curl ) <nl> + find_path ( LIBCURL_INCLUDE_DIR NAMES curl / curl . h ) <nl> <nl> set ( libs3rdparty $ { libs3rdparty } $ { LIBCURL_LIBRARY } ) <nl> include_directories ( $ { LIBCURL_INCLUDE_DIR } ) <nl>
Fix find_library / path to find shared libcurl ( thanks to Tobias Hansen )
aseprite/aseprite
c6ad4a94c1be6277d91b187796d33af3b3ab33ad
2012-03-19T17:29:54Z
mmm a / Source / Math / CPUSparseMatrix . cpp <nl> ppp b / Source / Math / CPUSparseMatrix . cpp <nl> void CPUSparseMatrix < ElemType > : : MaskColumnsValue ( const CPUMatrix < char > & columnsM <nl> RuntimeError ( " Matrix and column mask must have equal number of columns . " ) ; <nl> <nl> if ( val ! = 0 ) <nl> - NOT_IMPLEMENTED ; <nl> + LogicError ( " MaskColumnsValue is not implmented for a non - zero mask for sparse matrices . " ) ; <nl> <nl> # ifdef _DEBUG <nl> if ( GetFormat ( ) = = MatrixFormat : : matrixFormatSparseCSC ) <nl> void CPUSparseMatrix < ElemType > : : MaskColumnsValue ( const CPUMatrix < char > & columnsM <nl> # pragma omp parallel for <nl> for ( long j = 0 ; j < n ; j + + ) <nl> if ( maskedCols [ j ] = = 0 & & colVector [ j + 1 ] ! = colVector [ j ] ) <nl> - NOT_IMPLEMENTED ; <nl> + LogicError ( " CPUSparseMatrix attempted to mask column % d , but it has % d elements in it . " , j , ( colVector [ j + 1 ] - colVector [ j ] ) ) ; <nl> } <nl> else <nl> NOT_IMPLEMENTED ; <nl> mmm a / Source / Math / GPUSparseMatrix . cu <nl> ppp b / Source / Math / GPUSparseMatrix . cu <nl> void GPUSparseMatrix < ElemType > : : MaskColumnsValue ( const GPUMatrix < char > & columnsM <nl> RuntimeError ( " Matrix and column mask must have equal number of columns " ) ; <nl> <nl> if ( val ! = 0 ) <nl> - NOT_IMPLEMENTED ; <nl> + LogicError ( " MaskColumnsValue is not implmented for a non - zero mask for sparse matrices . " ) ; <nl> <nl> # ifdef _DEBUG <nl> if ( GetFormat ( ) = = MatrixFormat : : matrixFormatSparseCSC ) <nl> void GPUSparseMatrix < ElemType > : : MaskColumnsValue ( const GPUMatrix < char > & columnsM <nl> # pragma omp parallel for <nl> for ( long j = 0 ; j < n ; j + + ) <nl> if ( maskedCols [ j ] = = 0 & & colVector [ j + 1 ] ! = colVector [ j ] ) <nl> - NOT_IMPLEMENTED ; <nl> - <nl> + RuntimeError ( " GPUSparseMatrix attempted to mask column % d , but it has % d elements in it . " , j , ( colVector [ j + 1 ] - colVector [ j ] ) ) ; <nl> } <nl> else <nl> NOT_IMPLEMENTED ; <nl>
Updated NOT_IMPLEMENTED to LogicError
microsoft/CNTK
512f325f9199d5d84acedb9f052a26b556206f7f
2016-04-21T02:12:50Z
mmm a / xbmc / pvr / channels / PVRChannelGroup . cpp <nl> ppp b / xbmc / pvr / channels / PVRChannelGroup . cpp <nl> CPVRChannelGroup : : CPVRChannelGroup ( const PVR_CHANNEL_GROUP & group ) : <nl> OnInit ( ) ; <nl> } <nl> <nl> + CPVRChannelGroup : : CPVRChannelGroup ( const CPVRChannelGroup & group ) : <nl> + m_strGroupName ( group . m_strGroupName ) <nl> + { <nl> + m_bRadio = group . m_bRadio ; <nl> + m_iGroupType = group . m_iGroupType ; <nl> + m_iGroupId = group . m_iGroupId ; <nl> + m_bLoaded = group . m_bLoaded ; <nl> + m_bChanged = group . m_bChanged ; <nl> + m_bUsingBackendChannelOrder = group . m_bUsingBackendChannelOrder ; <nl> + m_bUsingBackendChannelNumbers = group . m_bUsingBackendChannelNumbers ; <nl> + m_iLastWatched = group . m_iLastWatched ; <nl> + m_bHidden = group . m_bHidden ; <nl> + m_bSelectedGroup = group . m_bSelectedGroup ; <nl> + m_bPreventSortAndRenumber = group . m_bPreventSortAndRenumber ; <nl> + m_members = group . m_members ; <nl> + m_sortedMembers = group . m_sortedMembers ; <nl> + m_iPosition = group . m_iPosition ; <nl> + OnInit ( ) ; <nl> + } <nl> + <nl> CPVRChannelGroup : : ~ CPVRChannelGroup ( void ) <nl> { <nl> CSettings : : Get ( ) . UnregisterCallback ( this ) ; <nl> std : : pair < int , int > CPVRChannelGroup : : PathIdToStorageId ( uint64_t storageId ) <nl> return std : : make_pair ( storageId > > 32 , storageId & 0xFFFFFFFF ) ; <nl> } <nl> <nl> - CPVRChannelGroup : : CPVRChannelGroup ( const CPVRChannelGroup & group ) : <nl> - m_strGroupName ( group . m_strGroupName ) <nl> - { <nl> - m_bRadio = group . m_bRadio ; <nl> - m_iGroupType = group . m_iGroupType ; <nl> - m_iGroupId = group . m_iGroupId ; <nl> - m_bLoaded = group . m_bLoaded ; <nl> - m_bChanged = group . m_bChanged ; <nl> - m_bUsingBackendChannelOrder = group . m_bUsingBackendChannelOrder ; <nl> - m_bUsingBackendChannelNumbers = group . m_bUsingBackendChannelNumbers ; <nl> - m_iLastWatched = group . m_iLastWatched ; <nl> - m_bHidden = group . m_bHidden ; <nl> - m_bSelectedGroup = group . m_bSelectedGroup ; <nl> - m_bPreventSortAndRenumber = group . m_bPreventSortAndRenumber ; <nl> - m_members = group . m_members ; <nl> - m_sortedMembers = group . m_sortedMembers ; <nl> - m_iPosition = group . m_iPosition ; <nl> - OnInit ( ) ; <nl> - } <nl> - <nl> void CPVRChannelGroup : : OnInit ( void ) <nl> { <nl> CSettings : : Get ( ) . RegisterCallback ( this , { <nl>
[ cosmetics ] sort ctor in CPVRChannelGroup
xbmc/xbmc
b3db4e710a019f268cd97b6afcb742e8e00d91de
2015-06-30T09:41:05Z
mmm a / test / test_jit . py <nl> ppp b / test / test_jit . py <nl> <nl> from torch . quantization import quantize <nl> from common_quantization import SingleLayerLinearModel , AnnotatedSingleLayerLinearModel <nl> from common_quantization import ConvModel , AnnotatedConvModel <nl> - from common_quantization import test_only_eval_fn <nl> + from common_quantization import test_only_eval_fn as _test_only_eval_fn <nl> <nl> <nl> # Testing utils <nl> def copy_weights ( name , m , ref_m ) : <nl> m = M ( ) <nl> copy_weights ( name , m , ref_m ) <nl> ref_m . qconfig = qconfig <nl> - ref_m = quantize ( ref_m , test_only_eval_fn , [ ( data , torch . randint ( 0 , 1 , ( 5 , ) , dtype = torch . long ) ) ] ) <nl> + ref_m = quantize ( ref_m , _test_only_eval_fn , [ ( data , torch . randint ( 0 , 1 , ( 5 , ) , dtype = torch . long ) ) ] ) <nl> ref_res = ref_m ( data ) <nl> # script mode <nl> m = torch . jit . script ( m ) <nl>
Fix test_jit under pytest
pytorch/pytorch
1eb9f49cc6dae89a80e62fa81f907da28371a4fc
2019-11-21T04:44:28Z
mmm a / include / external / tbc_text_format . h <nl> ppp b / include / external / tbc_text_format . h <nl> namespace Tbc { <nl> public : <nl> Text ( std : : string const & _str , TextAttributes const & _attr = TextAttributes ( ) ) <nl> : attr ( _attr ) <nl> + { <nl> + init ( _str ) ; <nl> + } <nl> + <nl> + template < typename T > <nl> + Text ( T const & _val , TextAttributes const & _attr = TextAttributes ( ) ) <nl> + : attr ( _attr ) <nl> + { <nl> + std : : ostringstream oss ; <nl> + oss < < _val ; <nl> + init ( oss . str ( ) ) ; <nl> + } <nl> + <nl> + typedef std : : vector < std : : string > : : const_iterator const_iterator ; <nl> + <nl> + const_iterator begin ( ) const { return lines . begin ( ) ; } <nl> + const_iterator end ( ) const { return lines . end ( ) ; } <nl> + std : : string const & last ( ) const { return lines . back ( ) ; } <nl> + std : : size_t size ( ) const { return lines . size ( ) ; } <nl> + std : : string const & operator [ ] ( std : : size_t _index ) const { return lines [ _index ] ; } <nl> + std : : string toString ( ) const { <nl> + std : : ostringstream oss ; <nl> + oss < < * this ; <nl> + return oss . str ( ) ; <nl> + } <nl> + <nl> + inline friend std : : ostream & operator < < ( std : : ostream & _stream , Text const & _text ) { <nl> + for ( Text : : const_iterator it = _text . begin ( ) , itEnd = _text . end ( ) ; <nl> + it ! = itEnd ; + + it ) { <nl> + if ( it ! = _text . begin ( ) ) <nl> + _stream < < " \ n " ; <nl> + _stream < < * it ; <nl> + } <nl> + return _stream ; <nl> + } <nl> + <nl> + <nl> + private : <nl> + TextAttributes attr ; <nl> + std : : vector < std : : string > lines ; <nl> + <nl> + void init ( std : : string const & _str ) <nl> { <nl> const std : : string wrappableBeforeChars = " [ ( { < \ t " ; <nl> const std : : string wrappableAfterChars = " ] ) } > - , . / | \ \ " ; <nl> const std : : string wrappableInsteadOfChars = " \ n \ r " ; <nl> - std : : string indent = _attr . initialIndent ! = std : : string : : npos <nl> - ? std : : string ( _attr . initialIndent , ' ' ) <nl> - : std : : string ( _attr . indent , ' ' ) ; <nl> + std : : string indent = attr . initialIndent ! = std : : string : : npos <nl> + ? std : : string ( attr . initialIndent , ' ' ) <nl> + : std : : string ( attr . indent , ' ' ) ; <nl> <nl> typedef std : : string : : const_iterator iterator ; <nl> iterator it = _str . begin ( ) ; <nl> namespace Tbc { <nl> <nl> <nl> std : : string suffix ; <nl> - std : : size_t width = ( std : : min ) ( static_cast < size_t > ( strEnd - it ) , _attr . width - static_cast < size_t > ( indent . size ( ) ) ) ; <nl> + std : : size_t width = ( std : : min ) ( static_cast < size_t > ( strEnd - it ) , attr . width - static_cast < size_t > ( indent . size ( ) ) ) ; <nl> iterator itEnd = it + width ; <nl> iterator itNext = _str . end ( ) ; <nl> <nl> namespace Tbc { <nl> } <nl> lines . push_back ( indent + std : : string ( it , itEnd ) + suffix ) ; <nl> <nl> - if ( indent . size ( ) ! = _attr . indent ) <nl> - indent = std : : string ( _attr . indent , ' ' ) ; <nl> + if ( indent . size ( ) ! = attr . indent ) <nl> + indent = std : : string ( attr . indent , ' ' ) ; <nl> it = itNext ; <nl> } <nl> } <nl> - <nl> - <nl> - <nl> - typedef std : : vector < std : : string > : : const_iterator const_iterator ; <nl> - <nl> - const_iterator begin ( ) const { return lines . begin ( ) ; } <nl> - const_iterator end ( ) const { return lines . end ( ) ; } <nl> - std : : string const & last ( ) const { return lines . back ( ) ; } <nl> - std : : size_t size ( ) const { return lines . size ( ) ; } <nl> - std : : string const & operator [ ] ( std : : size_t _index ) const { return lines [ _index ] ; } <nl> - std : : string toString ( ) const { <nl> - std : : ostringstream oss ; <nl> - oss < < * this ; <nl> - return oss . str ( ) ; <nl> - } <nl> - <nl> - inline friend std : : ostream & operator < < ( std : : ostream & _stream , Text const & _text ) { <nl> - for ( Text : : const_iterator it = _text . begin ( ) , itEnd = _text . end ( ) ; <nl> - it ! = itEnd ; + + it ) { <nl> - if ( it ! = _text . begin ( ) ) <nl> - _stream < < " \ n " ; <nl> - _stream < < * it ; <nl> - } <nl> - return _stream ; <nl> - } <nl> - <nl> - <nl> - private : <nl> - std : : string str ; <nl> - TextAttributes attr ; <nl> - std : : vector < std : : string > lines ; <nl> } ; <nl> <nl> } / / end namespace Tbc <nl>
Add ability to format any streamable class
catchorg/Catch2
694fe61ae3f3d36b9fc11300eb133b467e7de8a5
2017-07-02T08:52:29Z
mmm a / dbms / src / Storages / MergeTree / MergeTreeData . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeData . cpp <nl> void MergeTreeData : : checkAlter ( const AlterCommands & params ) <nl> if ( primary_expr ) <nl> keys = primary_expr - > getRequiredColumns ( ) ; <nl> <nl> + keys . push_back ( date_column_name ) ; <nl> + <nl> if ( ! merging_params . sign_column . empty ( ) ) <nl> keys . push_back ( merging_params . sign_column ) ; <nl> <nl>
forbid altering of date column in MergeTree engines
ClickHouse/ClickHouse
8d001db5d58991f7885324ab8a476856a72ce375
2017-02-10T05:05:33Z
mmm a / modules / dnn / test / test_torch_importer . cpp <nl> ppp b / modules / dnn / test / test_torch_importer . cpp <nl> typedef testing : : TestWithParam < DNNTarget > Test_Torch_layers ; <nl> <nl> TEST_P ( Test_Torch_layers , run_convolution ) <nl> { <nl> - runTorchNet ( " net_conv " , GetParam ( ) ) ; <nl> + runTorchNet ( " net_conv " , GetParam ( ) , " " , false , true ) ; <nl> } <nl> <nl> TEST_P ( Test_Torch_layers , run_pool_max ) <nl>
Merge pull request from dkurt : update_torch_testdata
opencv/opencv
f95e91e2bc73d4bc05dd0d4fce7831f4524444de
2018-04-03T18:02:58Z
mmm a / Telegram / SourceFiles / boxes / connection_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / connection_box . cpp <nl> void ProxyRow : : updateFields ( View & & view ) { <nl> } <nl> <nl> void ProxyRow : : step_radial ( TimeMs ms , bool timer ) { <nl> - if ( timer ) { <nl> + if ( timer & & ! anim : : Disabled ( ) ) { <nl> update ( ) ; <nl> } <nl> } <nl> void ProxyRow : : paintCheck ( Painter & p , TimeMs ms ) { <nl> p . setPen ( pen ) ; <nl> p . setBrush ( _st - > bg ) ; <nl> const auto rect = rtlrect ( QRectF ( left , top , _st - > diameter , _st - > diameter ) . marginsRemoved ( QMarginsF ( _st - > thickness / 2 . , _st - > thickness / 2 . , _st - > thickness / 2 . , _st - > thickness / 2 . ) ) , outerWidth ) ; <nl> - if ( loading . arcLength < FullArcLength ) { <nl> + if ( _progress & & loading . shown > 0 & & anim : : Disabled ( ) ) { <nl> + anim : : DrawStaticLoading ( <nl> + p , <nl> + rect , <nl> + _st - > thickness , <nl> + pen . color ( ) , <nl> + _st - > bg ) ; <nl> + } else if ( loading . arcLength < FullArcLength ) { <nl> p . drawArc ( rect , loading . arcFrom , loading . arcLength ) ; <nl> } else { <nl> p . drawEllipse ( rect ) ; <nl> } <nl> <nl> - if ( toggled > 0 ) { <nl> + if ( toggled > 0 & & ( ! _progress | | ! anim : : Disabled ( ) ) ) { <nl> p . setPen ( Qt : : NoPen ) ; <nl> p . setBrush ( anim : : brush ( _st - > untoggledFg , _st - > toggledFg , toggled * set ) ) ; <nl> <nl> mmm a / Telegram / SourceFiles / boxes / local_storage_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / local_storage_box . cpp <nl> void LocalStorageBox : : Row : : toggleProgress ( bool shown ) { <nl> } <nl> <nl> void LocalStorageBox : : Row : : step_radial ( TimeMs ms , bool timer ) { <nl> - if ( timer ) { <nl> + if ( timer & & ! anim : : Disabled ( ) ) { <nl> RpWidget : : update ( ) ; <nl> } <nl> } <nl> mmm a / Telegram / SourceFiles / boxes / stickers_box . cpp <nl> ppp b / Telegram / SourceFiles / boxes / stickers_box . cpp <nl> void StickersBox : : Inner : : leaveToChildEvent ( QEvent * e , QWidget * child ) { <nl> } <nl> <nl> void StickersBox : : Inner : : step_shifting ( TimeMs ms , bool timer ) { <nl> + if ( anim : : Disabled ( ) ) { <nl> + ms + = st : : stickersRowDuration ; <nl> + } <nl> auto animating = false ; <nl> auto updateMin = - 1 ; <nl> auto updateMax = 0 ; <nl> mmm a / Telegram / SourceFiles / chat_helpers / stickers_list_widget . cpp <nl> ppp b / Telegram / SourceFiles / chat_helpers / stickers_list_widget . cpp <nl> void StickersListWidget : : Footer : : paintSetIcon ( <nl> } <nl> <nl> void StickersListWidget : : Footer : : step_icons ( TimeMs ms , bool timer ) { <nl> + if ( anim : : Disabled ( ) ) { <nl> + ms + = st : : stickerIconMove ; <nl> + } <nl> if ( _iconsStartAnim ) { <nl> auto dt = ( ms - _iconsStartAnim ) / float64 ( st : : stickerIconMove ) ; <nl> if ( dt > = 1 ) { <nl> mmm a / Telegram / SourceFiles / dialogs / dialogs_inner_widget . cpp <nl> ppp b / Telegram / SourceFiles / dialogs / dialogs_inner_widget . cpp <nl> bool DialogsInner : : updateReorderPinned ( QPoint localPosition ) { <nl> } <nl> <nl> void DialogsInner : : step_pinnedShifting ( TimeMs ms , bool timer ) { <nl> + if ( anim : : Disabled ( ) ) { <nl> + ms + = st : : stickersRowDuration ; <nl> + } <nl> + <nl> auto animating = false ; <nl> auto updateMin = - 1 ; <nl> auto updateMax = 0 ; <nl> mmm a / Telegram / SourceFiles / history / history . cpp <nl> ppp b / Telegram / SourceFiles / history / history . cpp <nl> bool History : : updateSendActionNeedsAnimating ( TimeMs ms , bool force ) { <nl> } <nl> } <nl> auto result = ( ! _typing . isEmpty ( ) | | ! _sendActions . isEmpty ( ) ) ; <nl> - if ( changed | | result ) { <nl> + if ( changed | | ( result & & ! anim : : Disabled ( ) ) ) { <nl> App : : histories ( ) . sendActionAnimationUpdated ( ) . notify ( { <nl> this , <nl> _sendActionAnimation . width ( ) , <nl> mmm a / Telegram / SourceFiles / history / history_item_components . cpp <nl> ppp b / Telegram / SourceFiles / history / history_item_components . cpp <nl> void ReplyKeyboard : : startAnimation ( int i , int j , int direction ) { <nl> } <nl> <nl> void ReplyKeyboard : : step_selected ( TimeMs ms , bool timer ) { <nl> + if ( anim : : Disabled ( ) ) { <nl> + ms + = st : : botKbDuration ; <nl> + } <nl> for ( auto i = _animations . begin ( ) ; i ! = _animations . end ( ) ; ) { <nl> const auto index = std : : abs ( i - > first ) - 1 ; <nl> const auto row = ( index / MatrixRowShift ) ; <nl> mmm a / Telegram / SourceFiles / history / history_media_types . cpp <nl> ppp b / Telegram / SourceFiles / history / history_media_types . cpp <nl> void HistoryFileMedia : : setStatusSize ( int newSize , int fullSize , int duration , qi <nl> } <nl> <nl> void HistoryFileMedia : : step_radial ( TimeMs ms , bool timer ) { <nl> + const auto updateRadial = [ & ] { <nl> + return _animation - > radial . update ( <nl> + dataProgress ( ) , <nl> + dataFinished ( ) , <nl> + ms ) ; <nl> + } ; <nl> if ( timer ) { <nl> - Auth ( ) . data ( ) . requestViewRepaint ( _parent ) ; <nl> + if ( ! anim : : Disabled ( ) | | updateRadial ( ) ) { <nl> + Auth ( ) . data ( ) . requestViewRepaint ( _parent ) ; <nl> + } <nl> } else { <nl> - _animation - > radial . update ( dataProgress ( ) , dataFinished ( ) , ms ) ; <nl> + updateRadial ( ) ; <nl> if ( ! _animation - > radial . animating ( ) ) { <nl> checkAnimationFinished ( ) ; <nl> } <nl> QMargins HistoryDocument : : bubbleMargins ( ) const { <nl> } <nl> <nl> void HistoryDocument : : step_voiceProgress ( float64 ms , bool timer ) { <nl> + if ( anim : : Disabled ( ) ) { <nl> + ms + = ( 2 * AudioVoiceMsgUpdateView ) ; <nl> + } <nl> if ( auto voice = Get < HistoryDocumentVoice > ( ) ) { <nl> if ( voice - > _playback ) { <nl> float64 dt = ms / ( 2 * AudioVoiceMsgUpdateView ) ; <nl> mmm a / Telegram / SourceFiles / history / history_widget . cpp <nl> ppp b / Telegram / SourceFiles / history / history_widget . cpp <nl> void HistoryWidget : : saveEditMsg ( ) { <nl> <nl> const auto textWithTags = _field - > getTextWithAppliedMarkdown ( ) ; <nl> const auto prepareFlags = Ui : : ItemTextOptions ( <nl> - _history , <nl> + _history , <nl> Auth ( ) . user ( ) ) . flags ; <nl> auto sending = TextWithEntities ( ) ; <nl> auto left = TextWithEntities { textWithTags . text , ConvertTextTagsToEntities ( textWithTags . tags ) } ; <nl> void HistoryWidget : : unreadMentionsAnimationFinish ( ) { <nl> } <nl> <nl> void HistoryWidget : : step_recording ( float64 ms , bool timer ) { <nl> - float64 dt = ms / AudioVoiceMsgUpdateView ; <nl> + const auto dt = anim : : Disabled ( ) ? 1 . : ( ms / AudioVoiceMsgUpdateView ) ; <nl> if ( dt > = 1 ) { <nl> _a_recording . stop ( ) ; <nl> a_recordingLevel . finish ( ) ; <nl> } else { <nl> a_recordingLevel . update ( dt , anim : : linear ) ; <nl> } <nl> - if ( timer ) update ( _attachToggle - > geometry ( ) ) ; <nl> + if ( timer & & ! anim : : Disabled ( ) ) { <nl> + update ( _attachToggle - > geometry ( ) ) ; <nl> + } <nl> } <nl> <nl> void HistoryWidget : : chooseAttach ( ) { <nl> mmm a / Telegram / SourceFiles / history / view / history_view_top_bar_widget . cpp <nl> ppp b / Telegram / SourceFiles / history / view / history_view_top_bar_widget . cpp <nl> void TopBarWidget : : updateConnectingState ( ) { <nl> } <nl> <nl> void TopBarWidget : : step_connecting ( TimeMs ms , bool timer ) { <nl> - if ( timer ) { <nl> + if ( timer & & ! anim : : Disabled ( ) ) { <nl> update ( ) ; <nl> } <nl> } <nl> mmm a / Telegram / SourceFiles / inline_bots / inline_bot_layout_internal . cpp <nl> ppp b / Telegram / SourceFiles / inline_bots / inline_bot_layout_internal . cpp <nl> bool Gif : : isRadialAnimation ( TimeMs ms ) const { <nl> } <nl> <nl> void Gif : : step_radial ( TimeMs ms , bool timer ) { <nl> + const auto document = getShownDocument ( ) ; <nl> + const auto updateRadial = [ & ] { <nl> + return _animation - > radial . update ( <nl> + document - > progress ( ) , <nl> + ! document - > loading ( ) | | document - > loaded ( ) , <nl> + ms ) ; <nl> + } ; <nl> if ( timer ) { <nl> - update ( ) ; <nl> + if ( ! anim : : Disabled ( ) | | updateRadial ( ) ) { <nl> + update ( ) ; <nl> + } <nl> } else { <nl> - DocumentData * document = getShownDocument ( ) ; <nl> - _animation - > radial . update ( document - > progress ( ) , ! document - > loading ( ) | | document - > loaded ( ) , ms ) ; <nl> + updateRadial ( ) ; <nl> if ( ! _animation - > radial . animating ( ) & & document - > loaded ( ) ) { <nl> _animation . reset ( ) ; <nl> } <nl> void File : : thumbAnimationCallback ( ) { <nl> } <nl> <nl> void File : : step_radial ( TimeMs ms , bool timer ) { <nl> - if ( timer ) { <nl> - update ( ) ; <nl> - } else { <nl> - _animation - > radial . update ( <nl> + const auto updateRadial = [ & ] { <nl> + return _animation - > radial . update ( <nl> _document - > progress ( ) , <nl> ! _document - > loading ( ) | | _document - > loaded ( ) , <nl> ms ) ; <nl> + } ; <nl> + if ( timer ) { <nl> + if ( ! anim : : Disabled ( ) | | updateRadial ( ) ) { <nl> + update ( ) ; <nl> + } <nl> + } else { <nl> + updateRadial ( ) ; <nl> if ( ! _animation - > radial . animating ( ) ) { <nl> checkAnimationFinished ( ) ; <nl> } <nl> bool Game : : isRadialAnimation ( TimeMs ms ) const { <nl> } <nl> <nl> void Game : : step_radial ( TimeMs ms , bool timer ) { <nl> + const auto document = getResultDocument ( ) ; <nl> + const auto updateRadial = [ & ] { <nl> + return _radial - > update ( <nl> + document - > progress ( ) , <nl> + ! document - > loading ( ) | | document - > loaded ( ) , <nl> + ms ) ; <nl> + } ; <nl> if ( timer ) { <nl> - update ( ) ; <nl> + if ( ! anim : : Disabled ( ) | | updateRadial ( ) ) { <nl> + update ( ) ; <nl> + } <nl> } else { <nl> - auto document = getResultDocument ( ) ; <nl> - _radial - > update ( document - > progress ( ) , ! document - > loading ( ) | | document - > loaded ( ) , ms ) ; <nl> + updateRadial ( ) ; <nl> if ( ! _radial - > animating ( ) & & document - > loaded ( ) ) { <nl> _radial . reset ( ) ; <nl> } <nl> mmm a / Telegram / SourceFiles / media / view / media_clip_playback . cpp <nl> ppp b / Telegram / SourceFiles / media / view / media_clip_playback . cpp <nl> void Playback : : setValue ( float64 value , bool animated ) { <nl> } <nl> <nl> void Playback : : step_value ( float64 ms , bool timer ) { <nl> - auto dt = ms / kPlaybackAnimationDurationMs ; <nl> + auto dt = anim : : Disabled ( ) ? 1 . : ( ms / kPlaybackAnimationDurationMs ) ; <nl> if ( dt > = 1 . ) { <nl> _a_value . stop ( ) ; <nl> a_value . finish ( ) ; <nl> mmm a / Telegram / SourceFiles / mediaview . cpp <nl> ppp b / Telegram / SourceFiles / mediaview . cpp <nl> auto MediaView : : computeOverviewType ( ) const <nl> } <nl> <nl> void MediaView : : step_state ( TimeMs ms , bool timer ) { <nl> + if ( anim : : Disabled ( ) ) { <nl> + ms + = st : : mediaviewShowDuration + st : : mediaviewHideDuration ; <nl> + } <nl> bool result = false ; <nl> - for ( Showing : : iterator i = _animations . begin ( ) ; i ! = _animations . end ( ) ; ) { <nl> + for ( auto i = _animations . begin ( ) ; i ! = _animations . end ( ) ; ) { <nl> TimeMs start = i . value ( ) ; <nl> switch ( i . key ( ) ) { <nl> case OverLeftNav : update ( _leftNav ) ; break ; <nl> void MediaView : : step_radial ( TimeMs ms , bool timer ) { <nl> return ; <nl> } <nl> const auto wasAnimating = _radial . animating ( ) ; <nl> - _radial . update ( radialProgress ( ) , ! radialLoading ( ) , ms + radialTimeShift ( ) ) ; <nl> - if ( timer & & ( wasAnimating | | _radial . animating ( ) ) ) { <nl> + const auto updated = _radial . update ( <nl> + radialProgress ( ) , <nl> + ! radialLoading ( ) , <nl> + ms + radialTimeShift ( ) ) ; <nl> + if ( timer & & ( wasAnimating | | _radial . animating ( ) ) & & ( ! anim : : Disabled ( ) | | updated ) ) { <nl> update ( radialRect ( ) ) ; <nl> } <nl> const auto ready = _doc & & _doc - > loaded ( ) ; <nl> mmm a / Telegram / SourceFiles / overview / overview_layout . cpp <nl> ppp b / Telegram / SourceFiles / overview / overview_layout . cpp <nl> void RadialProgressItem : : setLinks ( ClickHandlerPtr & & openl , ClickHandlerPtr & & sav <nl> } <nl> <nl> void RadialProgressItem : : step_radial ( TimeMs ms , bool timer ) { <nl> + const auto updateRadial = [ & ] { <nl> + return _radial - > update ( dataProgress ( ) , dataFinished ( ) , ms ) ; <nl> + } ; <nl> if ( timer ) { <nl> - Auth ( ) . data ( ) . requestItemRepaint ( parent ( ) ) ; <nl> + if ( ! anim : : Disabled ( ) | | updateRadial ( ) ) { <nl> + Auth ( ) . data ( ) . requestItemRepaint ( parent ( ) ) ; <nl> + } <nl> } else { <nl> - _radial - > update ( dataProgress ( ) , dataFinished ( ) , ms ) ; <nl> + updateRadial ( ) ; <nl> if ( ! _radial - > animating ( ) ) { <nl> checkRadialFinished ( ) ; <nl> } <nl> mmm a / Telegram / SourceFiles / settings / settings_chat . cpp <nl> ppp b / Telegram / SourceFiles / settings / settings_chat . cpp <nl> TimeMs BackgroundRow : : radialTimeShift ( ) const { <nl> } <nl> <nl> void BackgroundRow : : step_radial ( TimeMs ms , bool timer ) { <nl> - _radial . update ( <nl> + const auto updated = _radial . update ( <nl> radialProgress ( ) , <nl> ! radialLoading ( ) , <nl> ms + radialTimeShift ( ) ) ; <nl> - if ( timer & & _radial . animating ( ) ) { <nl> + if ( timer & & _radial . animating ( ) & & ( ! anim : : Disabled ( ) | | updated ) ) { <nl> rtlupdate ( radialRect ( ) ) ; <nl> } <nl> } <nl> mmm a / Telegram / SourceFiles / ui / animation . cpp <nl> ppp b / Telegram / SourceFiles / ui / animation . cpp <nl> void stopManager ( ) { <nl> Media : : Clip : : Finish ( ) ; <nl> } <nl> <nl> - void registerClipManager ( Media : : Clip : : Manager * manager ) { <nl> - manager - > connect ( manager , SIGNAL ( callback ( Media : : Clip : : Reader * , qint32 , qint32 ) ) , _manager , SLOT ( clipCallback ( Media : : Clip : : Reader * , qint32 , qint32 ) ) ) ; <nl> + void registerClipManager ( not_null < Media : : Clip : : Manager * > manager ) { <nl> + Expects ( _manager ! = nullptr ) ; <nl> + <nl> + _manager - > registerClip ( manager ) ; <nl> } <nl> <nl> bool Disabled ( ) { <nl> bool Disabled ( ) { <nl> void SetDisabled ( bool disabled ) { <nl> AnimationsDisabled = disabled ; <nl> if ( disabled & & _manager ) { <nl> - _manager - > timeout ( ) ; <nl> + _manager - > step ( ) ; <nl> + } <nl> + } <nl> + <nl> + void DrawStaticLoading ( <nl> + QPainter & p , <nl> + QRectF rect , <nl> + int stroke , <nl> + QPen pen , <nl> + QBrush brush ) { <nl> + PainterHighQualityEnabler hq ( p ) ; <nl> + <nl> + p . setBrush ( brush ) ; <nl> + pen . setWidthF ( stroke ) ; <nl> + pen . setCapStyle ( Qt : : RoundCap ) ; <nl> + pen . setJoinStyle ( Qt : : RoundJoin ) ; <nl> + p . setPen ( pen ) ; <nl> + p . drawEllipse ( rect ) ; <nl> + <nl> + const auto center = rect . center ( ) ; <nl> + const auto first = QPointF ( center . x ( ) , rect . y ( ) + 1 . 5 * stroke ) ; <nl> + const auto delta = center . y ( ) - first . y ( ) ; <nl> + const auto second = QPointF ( center . x ( ) + delta * 2 / 3 . , center . y ( ) ) ; <nl> + if ( delta > 0 ) { <nl> + QPainterPath path ; <nl> + path . moveTo ( first ) ; <nl> + path . lineTo ( center ) ; <nl> + path . lineTo ( second ) ; <nl> + p . drawPath ( path ) ; <nl> } <nl> } <nl> <nl> void BasicAnimation : : stop ( ) { <nl> _manager - > stop ( this ) ; <nl> } <nl> <nl> - AnimationManager : : AnimationManager ( ) : _timer ( this ) , _iterating ( false ) { <nl> + AnimationManager : : AnimationManager ( ) : _timer ( this ) { <nl> _timer . setSingleShot ( false ) ; <nl> - connect ( & _timer , SIGNAL ( timeout ( ) ) , this , SLOT ( timeout ( ) ) ) ; <nl> + connect ( & _timer , & QTimer : : timeout , this , & AnimationManager : : step ) ; <nl> } <nl> <nl> void AnimationManager : : start ( BasicAnimation * obj ) { <nl> if ( _iterating ) { <nl> _starting . insert ( obj ) ; <nl> - if ( ! _stopping . isEmpty ( ) ) { <nl> - _stopping . remove ( obj ) ; <nl> + if ( ! _stopping . empty ( ) ) { <nl> + _stopping . erase ( obj ) ; <nl> } <nl> } else { <nl> - if ( _objects . isEmpty ( ) ) { <nl> + if ( _objects . empty ( ) ) { <nl> _timer . start ( AnimationTimerDelta ) ; <nl> } <nl> _objects . insert ( obj ) ; <nl> void AnimationManager : : start ( BasicAnimation * obj ) { <nl> void AnimationManager : : stop ( BasicAnimation * obj ) { <nl> if ( _iterating ) { <nl> _stopping . insert ( obj ) ; <nl> - if ( ! _starting . isEmpty ( ) ) { <nl> - _starting . remove ( obj ) ; <nl> + if ( ! _starting . empty ( ) ) { <nl> + _starting . erase ( obj ) ; <nl> } <nl> } else { <nl> auto i = _objects . find ( obj ) ; <nl> void AnimationManager : : stop ( BasicAnimation * obj ) { <nl> } <nl> } <nl> <nl> - void AnimationManager : : timeout ( ) { <nl> + void AnimationManager : : registerClip ( not_null < Media : : Clip : : Manager * > clip ) { <nl> + connect ( <nl> + clip , <nl> + & Media : : Clip : : Manager : : callback , <nl> + this , <nl> + & AnimationManager : : clipCallback ) ; <nl> + } <nl> + <nl> + void AnimationManager : : step ( ) { <nl> _iterating = true ; <nl> - auto ms = getms ( ) ; <nl> - for_const ( auto object , _objects ) { <nl> + const auto ms = getms ( ) ; <nl> + for ( const auto object : _objects ) { <nl> if ( ! _stopping . contains ( object ) ) { <nl> object - > step ( ms , true ) ; <nl> } <nl> } <nl> _iterating = false ; <nl> <nl> - if ( ! _starting . isEmpty ( ) ) { <nl> - for_const ( auto object , _starting ) { <nl> - _objects . insert ( object ) ; <nl> + if ( ! _starting . empty ( ) ) { <nl> + for ( const auto object : _starting ) { <nl> + _objects . emplace ( object ) ; <nl> } <nl> _starting . clear ( ) ; <nl> } <nl> - if ( ! _stopping . isEmpty ( ) ) { <nl> - for_const ( auto object , _stopping ) { <nl> - _objects . remove ( object ) ; <nl> + if ( ! _stopping . empty ( ) ) { <nl> + for ( const auto object : _stopping ) { <nl> + _objects . erase ( object ) ; <nl> } <nl> _stopping . clear ( ) ; <nl> } <nl> void AnimationManager : : timeout ( ) { <nl> } <nl> } <nl> <nl> - void AnimationManager : : clipCallback ( Media : : Clip : : Reader * reader , qint32 threadIndex , qint32 notification ) { <nl> - Media : : Clip : : Reader : : callback ( reader , threadIndex , Media : : Clip : : Notification ( notification ) ) ; <nl> + void AnimationManager : : clipCallback ( <nl> + Media : : Clip : : Reader * reader , <nl> + qint32 threadIndex , <nl> + qint32 notification ) { <nl> + Media : : Clip : : Reader : : callback ( <nl> + reader , <nl> + threadIndex , <nl> + Media : : Clip : : Notification ( notification ) ) ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / ui / animation . h <nl> ppp b / Telegram / SourceFiles / ui / animation . h <nl> For license and copyright information please follow this link : <nl> <nl> # include < QtCore / QTimer > <nl> # include < QtGui / QColor > <nl> + # include " base / binary_guard . h " <nl> + # include " base / flat_set . h " <nl> <nl> namespace Media { <nl> namespace Clip { <nl> class value { <nl> <nl> void startManager ( ) ; <nl> void stopManager ( ) ; <nl> - void registerClipManager ( Media : : Clip : : Manager * manager ) ; <nl> + void registerClipManager ( not_null < Media : : Clip : : Manager * > manager ) ; <nl> <nl> TG_FORCE_INLINE int interpolate ( int a , int b , float64 b_ratio ) { <nl> return qRound ( a + float64 ( b - a ) * b_ratio ) ; <nl> QPainterPath path ( QPointF ( & from ) [ N ] ) { <nl> bool Disabled ( ) ; <nl> void SetDisabled ( bool disabled ) ; <nl> <nl> + void DrawStaticLoading ( <nl> + QPainter & p , <nl> + QRectF rect , <nl> + int stroke , <nl> + QPen pen , <nl> + QBrush brush = Qt : : NoBrush ) ; <nl> + <nl> } ; <nl> <nl> class BasicAnimation ; <nl> class Animation { <nl> struct Data { <nl> template < typename Lambda > <nl> Data ( float64 from , Lambda updateCallback ) <nl> - : value ( from , from ) <nl> - , a_animation ( animation ( this , & Data : : step ) ) <nl> - , updateCallback ( std : : move ( updateCallback ) ) { <nl> + : value ( from , from ) <nl> + , a_animation ( animation ( this , & Data : : step ) ) <nl> + , updateCallback ( std : : move ( updateCallback ) ) { <nl> } <nl> void step ( float64 ms , bool timer ) { <nl> - auto dt = ( ms > = duration | | anim : : Disabled ( ) ) ? 1 . : ( ms / duration ) ; <nl> + const auto dt = ( ms > = duration | | anim : : Disabled ( ) ) <nl> + ? 1 . <nl> + : ( ms / duration ) ; <nl> if ( dt > = 1 ) { <nl> value . finish ( ) ; <nl> a_animation . stop ( ) ; <nl> class Animation { <nl> } ; <nl> <nl> class AnimationManager : public QObject { <nl> - Q_OBJECT <nl> - <nl> public : <nl> AnimationManager ( ) ; <nl> <nl> void start ( BasicAnimation * obj ) ; <nl> void stop ( BasicAnimation * obj ) ; <nl> <nl> - public slots : <nl> - void timeout ( ) ; <nl> - <nl> - void clipCallback ( Media : : Clip : : Reader * reader , qint32 threadIndex , qint32 notification ) ; <nl> + void registerClip ( not_null < Media : : Clip : : Manager * > clip ) ; <nl> + void step ( ) ; <nl> <nl> private : <nl> - using AnimatingObjects = OrderedSet < BasicAnimation * > ; <nl> - AnimatingObjects _objects , _starting , _stopping ; <nl> + void clipCallback ( <nl> + Media : : Clip : : Reader * reader , <nl> + qint32 threadIndex , <nl> + qint32 notification ) ; <nl> + <nl> + base : : flat_set < BasicAnimation * > _objects , _starting , _stopping ; <nl> QTimer _timer ; <nl> - bool _iterating ; <nl> + bool _iterating = false ; <nl> <nl> } ; <nl> mmm a / Telegram / SourceFiles / ui / effects / cross_animation . cpp <nl> ppp b / Telegram / SourceFiles / ui / effects / cross_animation . cpp <nl> namespace Ui { <nl> namespace { <nl> <nl> constexpr auto kPointCount = 12 ; <nl> + constexpr auto kStaticLoadingValue = float64 ( - 666 ) ; <nl> <nl> / / <nl> / / 1 3 <nl> void transformLoadingCross ( float64 loading , std : : array < QPointF , kPointCount > & po <nl> <nl> } / / namespace <nl> <nl> - void CrossAnimation : : paint ( Painter & p , const style : : CrossAnimation & st , style : : color color , int x , int y , int outerWidth , float64 shown , float64 loading ) { <nl> + void CrossAnimation : : paintStaticLoading ( <nl> + Painter & p , <nl> + const style : : CrossAnimation & st , <nl> + style : : color color , <nl> + int x , <nl> + int y , <nl> + int outerWidth , <nl> + float64 shown ) { <nl> + paint ( p , st , color , x , y , outerWidth , shown , kStaticLoadingValue ) ; <nl> + } <nl> + <nl> + void CrossAnimation : : paint ( <nl> + Painter & p , <nl> + const style : : CrossAnimation & st , <nl> + style : : color color , <nl> + int x , <nl> + int y , <nl> + int outerWidth , <nl> + float64 shown , <nl> + float64 loading ) { <nl> PainterHighQualityEnabler hq ( p ) ; <nl> <nl> auto sqrt2 = sqrt ( 2 . ) ; <nl> void CrossAnimation : : paint ( Painter & p , const style : : CrossAnimation & st , style : : c <nl> } } ; <nl> auto pathDeleteSize = kPointCount ; <nl> <nl> - auto loadingArcLength = 0 ; <nl> + const auto staticLoading = ( loading = = kStaticLoadingValue ) ; <nl> + auto loadingArcLength = staticLoading ? FullArcLength : 0 ; <nl> if ( loading > 0 . ) { <nl> transformLoadingCross ( loading , pathDelete , pathDeleteSize ) ; <nl> <nl> void CrossAnimation : : paint ( Painter & p , const style : : CrossAnimation & st , style : : c <nl> loadingArcLength = qRound ( - loadingArc * 2 * FullArcLength ) ; <nl> } <nl> <nl> - if ( shown < 1 . ) { <nl> - auto alpha = - ( shown - 1 . ) * M_PI_2 ; <nl> - auto cosalpha = cos ( alpha ) ; <nl> - auto sinalpha = sin ( alpha ) ; <nl> - auto shiftx = deleteLeft + ( deleteWidth / 2 . ) ; <nl> - auto shifty = deleteTop + ( deleteHeight / 2 . ) ; <nl> - for ( auto & point : pathDelete ) { <nl> - auto x = point . x ( ) - shiftx ; <nl> - auto y = point . y ( ) - shifty ; <nl> - point . setX ( shiftx + x * cosalpha - y * sinalpha ) ; <nl> - point . setY ( shifty + y * cosalpha + x * sinalpha ) ; <nl> + if ( ! staticLoading ) { <nl> + if ( shown < 1 . ) { <nl> + auto alpha = - ( shown - 1 . ) * M_PI_2 ; <nl> + auto cosalpha = cos ( alpha ) ; <nl> + auto sinalpha = sin ( alpha ) ; <nl> + auto shiftx = deleteLeft + ( deleteWidth / 2 . ) ; <nl> + auto shifty = deleteTop + ( deleteHeight / 2 . ) ; <nl> + for ( auto & point : pathDelete ) { <nl> + auto x = point . x ( ) - shiftx ; <nl> + auto y = point . y ( ) - shifty ; <nl> + point . setX ( shiftx + x * cosalpha - y * sinalpha ) ; <nl> + point . setY ( shifty + y * cosalpha + x * sinalpha ) ; <nl> + } <nl> } <nl> + QPainterPath path ; <nl> + path . moveTo ( pathDelete [ 0 ] ) ; <nl> + for ( int i = 1 ; i ! = pathDeleteSize ; + + i ) { <nl> + path . lineTo ( pathDelete [ i ] ) ; <nl> + } <nl> + path . lineTo ( pathDelete [ 0 ] ) ; <nl> + p . fillPath ( path , color ) ; <nl> } <nl> - QPainterPath path ; <nl> - path . moveTo ( pathDelete [ 0 ] ) ; <nl> - for ( int i = 1 ; i ! = pathDeleteSize ; + + i ) { <nl> - path . lineTo ( pathDelete [ i ] ) ; <nl> - } <nl> - path . lineTo ( pathDelete [ 0 ] ) ; <nl> - p . fillPath ( path , color ) ; <nl> - <nl> if ( loadingArcLength ! = 0 ) { <nl> - auto loadingArcStart = FullArcLength / 8 ; <nl> auto roundSkip = ( st . size * ( 1 - sqrt2 ) + 2 * sqrt2 * deleteSkip + st . stroke ) / 2 ; <nl> auto roundPart = QRectF ( x + roundSkip , y + roundSkip , st . size - 2 * roundSkip , st . size - 2 * roundSkip ) ; <nl> - if ( shown < 1 . ) { <nl> - loadingArcStart - = qRound ( - ( shown - 1 . ) * FullArcLength / 4 . ) ; <nl> - } <nl> - p . setBrush ( Qt : : NoBrush ) ; <nl> - auto pen = color - > p ; <nl> - pen . setWidthF ( st . stroke ) ; <nl> - pen . setCapStyle ( Qt : : RoundCap ) ; <nl> - p . setPen ( pen ) ; <nl> - if ( loadingArcLength < 0 ) { <nl> - loadingArcStart + = loadingArcLength ; <nl> - loadingArcLength = - loadingArcLength ; <nl> + if ( staticLoading ) { <nl> + anim : : DrawStaticLoading ( p , roundPart , st . stroke , color ) ; <nl> + } else { <nl> + auto loadingArcStart = FullArcLength / 8 ; <nl> + if ( shown < 1 . ) { <nl> + loadingArcStart - = qRound ( - ( shown - 1 . ) * FullArcLength / 4 . ) ; <nl> + } <nl> + if ( loadingArcLength < 0 ) { <nl> + loadingArcStart + = loadingArcLength ; <nl> + loadingArcLength = - loadingArcLength ; <nl> + } <nl> + <nl> + p . setBrush ( Qt : : NoBrush ) ; <nl> + auto pen = color - > p ; <nl> + pen . setWidthF ( st . stroke ) ; <nl> + pen . setCapStyle ( Qt : : RoundCap ) ; <nl> + p . setPen ( pen ) ; <nl> + p . drawArc ( roundPart , loadingArcStart , loadingArcLength ) ; <nl> } <nl> - p . drawArc ( roundPart , loadingArcStart , loadingArcLength ) ; <nl> } <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / ui / effects / cross_animation . h <nl> ppp b / Telegram / SourceFiles / ui / effects / cross_animation . h <nl> namespace Ui { <nl> <nl> class CrossAnimation { <nl> public : <nl> - static void paint ( Painter & p , const style : : CrossAnimation & st , style : : color color , int x , int y , int outerWidth , float64 shown , float64 loading = 0 . ) ; <nl> + static void paint ( <nl> + Painter & p , <nl> + const style : : CrossAnimation & st , <nl> + style : : color color , <nl> + int x , <nl> + int y , <nl> + int outerWidth , <nl> + float64 shown , <nl> + float64 loading = 0 . ) ; <nl> + static void paintStaticLoading ( <nl> + Painter & p , <nl> + const style : : CrossAnimation & st , <nl> + style : : color color , <nl> + int x , <nl> + int y , <nl> + int outerWidth , <nl> + float64 shown ) ; <nl> <nl> } ; <nl> <nl> mmm a / Telegram / SourceFiles / ui / effects / radial_animation . cpp <nl> ppp b / Telegram / SourceFiles / ui / effects / radial_animation . cpp <nl> void RadialAnimation : : start ( float64 prg ) { <nl> _animation . start ( ) ; <nl> } <nl> <nl> - void RadialAnimation : : update ( float64 prg , bool finished , TimeMs ms ) { <nl> - auto iprg = qRound ( qMax ( prg , 0 . 0001 ) * AlmostFullArcLength ) ; <nl> - if ( iprg ! = qRound ( a_arcEnd . to ( ) ) ) { <nl> + bool RadialAnimation : : update ( float64 prg , bool finished , TimeMs ms ) { <nl> + const auto iprg = qRound ( qMax ( prg , 0 . 0001 ) * AlmostFullArcLength ) ; <nl> + const auto result = ( iprg ! = qRound ( a_arcEnd . to ( ) ) ) ; <nl> + if ( result ) { <nl> a_arcEnd . start ( iprg ) ; <nl> _lastStart = _lastTime ; <nl> } <nl> void RadialAnimation : : update ( float64 prg , bool finished , TimeMs ms ) { <nl> auto dt = float64 ( ms - _lastStart ) ; <nl> auto fulldt = float64 ( ms - _firstStart ) ; <nl> _opacity = qMin ( fulldt / st : : radialDuration , 1 . ) ; <nl> - if ( ! finished ) { <nl> + if ( anim : : Disabled ( ) ) { <nl> + a_arcEnd . update ( 1 . , anim : : linear ) ; <nl> + if ( finished ) { <nl> + stop ( ) ; <nl> + } <nl> + } else if ( ! finished ) { <nl> a_arcEnd . update ( 1 . - ( st : : radialDuration / ( st : : radialDuration + dt ) ) , anim : : linear ) ; <nl> } else if ( dt > = st : : radialDuration ) { <nl> a_arcEnd . update ( 1 . , anim : : linear ) ; <nl> void RadialAnimation : : update ( float64 prg , bool finished , TimeMs ms ) { <nl> } <nl> auto fromstart = fulldt / st : : radialPeriod ; <nl> a_arcStart . update ( fromstart - std : : floor ( fromstart ) , anim : : linear ) ; <nl> + return result ; <nl> } <nl> <nl> void RadialAnimation : : stop ( ) { <nl> void RadialAnimation : : draw ( Painter & p , const QRect & inner , int32 thickness , styl <nl> p . setPen ( pen ) ; <nl> <nl> auto len = MinArcLength + qRound ( a_arcEnd . current ( ) ) ; <nl> - auto from = QuarterArcLength - qRound ( a_arcStart . current ( ) ) - len ; <nl> + auto from = QuarterArcLength <nl> + - len <nl> + - ( anim : : Disabled ( ) ? 0 : qRound ( a_arcStart . current ( ) ) ) ; <nl> if ( rtl ( ) ) { <nl> from = QuarterArcLength - ( from - QuarterArcLength ) - len ; <nl> if ( from < 0 ) from + = FullArcLength ; <nl> void InfiniteRadialAnimation : : start ( ) { <nl> <nl> void InfiniteRadialAnimation : : stop ( ) { <nl> const auto now = getms ( ) ; <nl> + if ( anim : : Disabled ( ) ) { <nl> + _workFinished = now ; <nl> + } <nl> if ( ! _workFinished ) { <nl> const auto zero = _workStarted - _st . sineDuration ; <nl> const auto index = ( now - zero + _st . sinePeriod - _st . sineShift ) <nl> void InfiniteRadialAnimation : : draw ( <nl> auto o = p . opacity ( ) ; <nl> p . setOpacity ( o * state . shown ) ; <nl> <nl> - auto pen = _st . color - > p ; <nl> - auto was = p . pen ( ) ; <nl> - pen . setWidth ( _st . thickness ) ; <nl> - pen . setCapStyle ( Qt : : RoundCap ) ; <nl> - p . setPen ( pen ) ; <nl> + const auto rect = rtlrect ( <nl> + position . x ( ) , <nl> + position . y ( ) , <nl> + size . width ( ) , <nl> + size . height ( ) , <nl> + outerWidth ) ; <nl> + const auto was = p . pen ( ) ; <nl> + const auto brush = p . brush ( ) ; <nl> + if ( anim : : Disabled ( ) ) { <nl> + anim : : DrawStaticLoading ( p , rect , _st . thickness , _st . color ) ; <nl> + } else { <nl> + auto pen = _st . color - > p ; <nl> + pen . setWidth ( _st . thickness ) ; <nl> + pen . setCapStyle ( Qt : : RoundCap ) ; <nl> + p . setPen ( pen ) ; <nl> <nl> - { <nl> - PainterHighQualityEnabler hq ( p ) ; <nl> - p . drawArc ( <nl> - rtlrect ( <nl> - position . x ( ) , <nl> - position . y ( ) , <nl> - size . width ( ) , <nl> - size . height ( ) , <nl> - outerWidth ) , <nl> - state . arcFrom , <nl> - state . arcLength ) ; <nl> + { <nl> + PainterHighQualityEnabler hq ( p ) ; <nl> + p . drawArc ( <nl> + rect , <nl> + state . arcFrom , <nl> + state . arcLength ) ; <nl> + } <nl> } <nl> - <nl> p . setPen ( was ) ; <nl> + p . setBrush ( brush ) ; <nl> p . setOpacity ( o ) ; <nl> } <nl> <nl> auto InfiniteRadialAnimation : : computeState ( ) - > State { <nl> linear , <nl> FullArcLength } ; <nl> } <nl> + if ( anim : : Disabled ( ) ) { <nl> + const auto shown = 1 . ; <nl> + return { 1 . , 0 , FullArcLength } ; <nl> + } <nl> const auto min = int ( std : : round ( FullArcLength * _st . arcMin ) ) ; <nl> const auto max = int ( std : : round ( FullArcLength * _st . arcMax ) ) ; <nl> if ( now < = _workStarted ) { <nl> mmm a / Telegram / SourceFiles / ui / effects / radial_animation . h <nl> ppp b / Telegram / SourceFiles / ui / effects / radial_animation . h <nl> class RadialAnimation { <nl> } <nl> <nl> void start ( float64 prg ) ; <nl> - void update ( float64 prg , bool finished , TimeMs ms ) ; <nl> + bool update ( float64 prg , bool finished , TimeMs ms ) ; <nl> void stop ( ) ; <nl> <nl> void step ( TimeMs ms ) ; <nl> mmm a / Telegram / SourceFiles / ui / effects / send_action_animations . cpp <nl> ppp b / Telegram / SourceFiles / ui / effects / send_action_animations . cpp <nl> bool SendActionAnimation : : Impl : : supports ( Type type ) const { <nl> return Implementations - > value ( type , & TypingAnimation : : kMeta ) = = metaData ( ) ; <nl> } <nl> <nl> + void SendActionAnimation : : Impl : : paint ( <nl> + Painter & p , <nl> + style : : color color , <nl> + int x , <nl> + int y , <nl> + int outerWidth , <nl> + TimeMs ms ) { <nl> + paintFrame ( <nl> + p , <nl> + color , <nl> + x , <nl> + y , <nl> + outerWidth , <nl> + anim : : Disabled ( ) ? 0 : ( qMax ( ms - _started , 0LL ) % _period ) ) ; <nl> + } <nl> + <nl> + <nl> void SendActionAnimation : : start ( Type type ) { <nl> if ( ! _impl | | ! _impl - > supports ( type ) ) { <nl> _impl = createByType ( type ) ; <nl> mmm a / Telegram / SourceFiles / ui / effects / send_action_animations . h <nl> ppp b / Telegram / SourceFiles / ui / effects / send_action_animations . h <nl> class SendActionAnimation { <nl> bool supports ( Type type ) const ; <nl> <nl> virtual int width ( ) const = 0 ; <nl> - void paint ( Painter & p , style : : color color , int x , int y , int outerWidth , TimeMs ms ) { <nl> - paintFrame ( p , color , x , y , outerWidth , qMax ( ms - _started , 0LL ) % _period ) ; <nl> - } <nl> + void paint ( <nl> + Painter & p , <nl> + style : : color color , <nl> + int x , <nl> + int y , <nl> + int outerWidth , <nl> + TimeMs ms ) ; <nl> <nl> virtual ~ Impl ( ) = default ; <nl> <nl> mmm a / Telegram / SourceFiles / ui / special_buttons . cpp <nl> ppp b / Telegram / SourceFiles / ui / special_buttons . cpp <nl> void EmojiButton : : paintEvent ( QPaintEvent * e ) { <nl> p . fillRect ( e - > rect ( ) , st : : historyComposeAreaBg ) ; <nl> paintRipple ( p , _st . rippleAreaPosition . x ( ) , _st . rippleAreaPosition . y ( ) , ms , _rippleOverride ? & ( * _rippleOverride ) - > c : nullptr ) ; <nl> <nl> + const auto over = isOver ( ) ; <nl> const auto loadingState = _loading <nl> ? _loading - > computeState ( ) <nl> : Ui : : InfiniteRadialAnimation : : State { 0 . , 0 , FullArcLength } ; <nl> - p . setOpacity ( 1 . - loadingState . shown ) ; <nl> + if ( loadingState . shown < 1 . ) { <nl> + p . setOpacity ( 1 . - loadingState . shown ) ; <nl> <nl> - auto over = isOver ( ) ; <nl> - auto icon = _iconOverride ? _iconOverride : & ( over ? _st . iconOver : _st . icon ) ; <nl> - icon - > paint ( p , _st . iconPosition , width ( ) ) ; <nl> + auto icon = _iconOverride ? _iconOverride : & ( over ? _st . iconOver : _st . icon ) ; <nl> + icon - > paint ( p , _st . iconPosition , width ( ) ) ; <nl> <nl> - p . setOpacity ( 1 . ) ; <nl> - auto pen = _colorOverride ? ( * _colorOverride ) - > p : ( over ? st : : historyEmojiCircleFgOver : st : : historyEmojiCircleFg ) - > p ; <nl> - pen . setWidth ( st : : historyEmojiCircleLine ) ; <nl> - pen . setCapStyle ( Qt : : RoundCap ) ; <nl> - p . setPen ( pen ) ; <nl> - p . setBrush ( Qt : : NoBrush ) ; <nl> + p . setOpacity ( 1 . ) ; <nl> + } <nl> <nl> - PainterHighQualityEnabler hq ( p ) ; <nl> QRect inner ( QPoint ( ( width ( ) - st : : historyEmojiCircle . width ( ) ) / 2 , st : : historyEmojiCircleTop ) , st : : historyEmojiCircle ) ; <nl> - if ( loadingState . arcLength < FullArcLength ) { <nl> - p . drawArc ( inner , loadingState . arcFrom , loadingState . arcLength ) ; <nl> + const auto color = ( _colorOverride <nl> + ? * _colorOverride <nl> + : ( over <nl> + ? st : : historyEmojiCircleFgOver <nl> + : st : : historyEmojiCircleFg ) ) ; <nl> + if ( _loading & & anim : : Disabled ( ) ) { <nl> + anim : : DrawStaticLoading ( <nl> + p , <nl> + inner , <nl> + st : : historyEmojiCircleLine , <nl> + color ) ; <nl> } else { <nl> - p . drawEllipse ( inner ) ; <nl> + auto pen = color - > p ; <nl> + pen . setWidth ( st : : historyEmojiCircleLine ) ; <nl> + pen . setCapStyle ( Qt : : RoundCap ) ; <nl> + p . setPen ( pen ) ; <nl> + p . setBrush ( Qt : : NoBrush ) ; <nl> + <nl> + PainterHighQualityEnabler hq ( p ) ; <nl> + if ( loadingState . arcLength < FullArcLength ) { <nl> + p . drawArc ( inner , loadingState . arcFrom , loadingState . arcLength ) ; <nl> + } else { <nl> + p . drawEllipse ( inner ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void EmojiButton : : step_loading ( TimeMs ms , bool timer ) { <nl> + if ( timer & & ! anim : : Disabled ( ) ) { <nl> + update ( ) ; <nl> } <nl> } <nl> <nl> void EmojiButton : : setLoading ( bool loading ) { <nl> } <nl> if ( loading ) { <nl> _loading - > start ( ) ; <nl> + update ( ) ; <nl> } else if ( _loading ) { <nl> _loading - > stop ( ) ; <nl> + update ( ) ; <nl> } <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / ui / special_buttons . h <nl> ppp b / Telegram / SourceFiles / ui / special_buttons . h <nl> class EmojiButton : public RippleButton { <nl> QPoint prepareRippleStartPosition ( ) const override ; <nl> <nl> private : <nl> - void step_loading ( TimeMs ms , bool timer ) { <nl> - if ( timer ) { <nl> - update ( ) ; <nl> - } <nl> - } <nl> + void step_loading ( TimeMs ms , bool timer ) ; <nl> <nl> const style : : IconButton & _st ; <nl> <nl> mmm a / Telegram / SourceFiles / ui / twidget . h <nl> ppp b / Telegram / SourceFiles / ui / twidget . h <nl> class Painter : public QPainter { <nl> <nl> class PainterHighQualityEnabler { <nl> public : <nl> - PainterHighQualityEnabler ( Painter & p ) : _painter ( p ) { <nl> + PainterHighQualityEnabler ( QPainter & p ) : _painter ( p ) { <nl> static constexpr QPainter : : RenderHint Hints [ ] = { <nl> QPainter : : Antialiasing , <nl> QPainter : : SmoothPixmapTransform , <nl> class PainterHighQualityEnabler { <nl> QPainter : : HighQualityAntialiasing <nl> } ; <nl> <nl> - auto hints = _painter . renderHints ( ) ; <nl> - for_const ( auto hint , Hints ) { <nl> + const auto hints = _painter . renderHints ( ) ; <nl> + for ( const auto hint : Hints ) { <nl> if ( ! ( hints & hint ) ) { <nl> _hints | = hint ; <nl> } <nl> class PainterHighQualityEnabler { <nl> _painter . setRenderHints ( _hints ) ; <nl> } <nl> } <nl> - PainterHighQualityEnabler ( const PainterHighQualityEnabler & other ) = delete ; <nl> - PainterHighQualityEnabler & operator = ( const PainterHighQualityEnabler & other ) = delete ; <nl> + <nl> + PainterHighQualityEnabler ( <nl> + const PainterHighQualityEnabler & other ) = delete ; <nl> + PainterHighQualityEnabler & operator = ( <nl> + const PainterHighQualityEnabler & other ) = delete ; <nl> + <nl> ~ PainterHighQualityEnabler ( ) { <nl> if ( _hints ) { <nl> _painter . setRenderHints ( _hints , false ) ; <nl> class PainterHighQualityEnabler { <nl> } <nl> <nl> private : <nl> - Painter & _painter ; <nl> + QPainter & _painter ; <nl> QPainter : : RenderHints _hints = 0 ; <nl> <nl> } ; <nl> mmm a / Telegram / SourceFiles / ui / widgets / buttons . cpp <nl> ppp b / Telegram / SourceFiles / ui / widgets / buttons . cpp <nl> CrossButton : : CrossButton ( QWidget * parent , const style : : CrossButton & st ) : Ripple <nl> void CrossButton : : step_loading ( TimeMs ms , bool timer ) { <nl> if ( stopLoadingAnimation ( ms ) ) { <nl> _a_loading . stop ( ) ; <nl> - } <nl> - if ( timer ) { <nl> + update ( ) ; <nl> + } else if ( timer & & ! anim : : Disabled ( ) ) { <nl> update ( ) ; <nl> } <nl> } <nl> void CrossButton : : paintEvent ( QPaintEvent * e ) { <nl> if ( _a_loading . animating ( ) ) { <nl> if ( stopLoadingAnimation ( ms ) ) { <nl> _a_loading . stop ( ) ; <nl> + } else if ( anim : : Disabled ( ) ) { <nl> + CrossAnimation : : paintStaticLoading ( <nl> + p , <nl> + _st . cross , <nl> + over ? _st . crossFgOver : _st . crossFg , <nl> + _st . crossPosition . x ( ) , <nl> + _st . crossPosition . y ( ) , <nl> + width ( ) , <nl> + shown ) ; <nl> + return ; <nl> } else { <nl> - loading = ( ( ms - _loadingStartMs ) % _st . loadingPeriod ) / float64 ( _st . loadingPeriod ) ; <nl> + loading = ( ( ms - _loadingStartMs ) % _st . loadingPeriod ) <nl> + / float64 ( _st . loadingPeriod ) ; <nl> } <nl> } <nl> - CrossAnimation : : paint ( p , _st . cross , over ? _st . crossFgOver : _st . crossFg , _st . crossPosition . x ( ) , _st . crossPosition . y ( ) , width ( ) , shown , loading ) ; <nl> + CrossAnimation : : paint ( <nl> + p , <nl> + _st . cross , <nl> + over ? _st . crossFgOver : _st . crossFg , <nl> + _st . crossPosition . x ( ) , <nl> + _st . crossPosition . y ( ) , <nl> + width ( ) , <nl> + shown , <nl> + loading ) ; <nl> } <nl> <nl> bool CrossButton : : stopLoadingAnimation ( TimeMs ms ) { <nl> void CrossButton : : setLoadingAnimation ( bool enabled ) { <nl> _a_loading . stop ( ) ; <nl> } <nl> } <nl> + if ( anim : : Disabled ( ) ) { <nl> + update ( ) ; <nl> + } <nl> } <nl> <nl> void CrossButton : : onStateChanged ( State was , StateChangeSource source ) { <nl> mmm a / Telegram / SourceFiles / window / notifications_manager_default . cpp <nl> ppp b / Telegram / SourceFiles / window / notifications_manager_default . cpp <nl> void Widget : : opacityAnimationCallback ( ) { <nl> } <nl> <nl> void Widget : : step_shift ( float64 ms , bool timer ) { <nl> + if ( anim : : Disabled ( ) ) { <nl> + ms + = st : : notifyFastAnim ; <nl> + } <nl> float64 dt = ms / float64 ( st : : notifyFastAnim ) ; <nl> if ( dt > = 1 ) { <nl> a_shift . finish ( ) ; <nl> mmm a / Telegram / SourceFiles / window / window_connecting_widget . cpp <nl> ppp b / Telegram / SourceFiles / window / window_connecting_widget . cpp <nl> void Progress : : paintEvent ( QPaintEvent * e ) { <nl> } <nl> <nl> void Progress : : step ( TimeMs ms , bool timer ) { <nl> - if ( timer ) { <nl> + if ( timer & & ! anim : : Disabled ( ) ) { <nl> update ( ) ; <nl> } <nl> } <nl>
Finish animations disabling .
telegramdesktop/tdesktop
1ffbec0215f70f3a0a04b1a9c31513817f6c3e3b
2018-09-20T18:26:10Z
mmm a / modules / imgproc / src / imgwarp . cpp <nl> ppp b / modules / imgproc / src / imgwarp . cpp <nl> class ResizeAreaFastVec_SIMD_8u <nl> if ( cn = = 1 ) <nl> { <nl> __m128i masklow = _mm_set1_epi16 ( 0x00ff ) ; <nl> - for ( ; dx < w - 8 ; dx + = 8 , S0 + = 16 , S1 + = 16 , D + = 8 ) <nl> + for ( ; dx < = w - 8 ; dx + = 8 , S0 + = 16 , S1 + = 16 , D + = 8 ) <nl> { <nl> __m128i r0 = _mm_loadu_si128 ( ( const __m128i * ) S0 ) ; <nl> __m128i r1 = _mm_loadu_si128 ( ( const __m128i * ) S1 ) ; <nl> class ResizeAreaFastVec_SIMD_8u <nl> } <nl> } <nl> else if ( cn = = 3 ) <nl> - for ( ; dx < w - 6 ; dx + = 6 , S0 + = 12 , S1 + = 12 , D + = 6 ) <nl> + for ( ; dx < = w - 6 ; dx + = 6 , S0 + = 12 , S1 + = 12 , D + = 6 ) <nl> { <nl> __m128i r0 = _mm_loadu_si128 ( ( const __m128i * ) S0 ) ; <nl> __m128i r1 = _mm_loadu_si128 ( ( const __m128i * ) S1 ) ; <nl> class ResizeAreaFastVec_SIMD_8u <nl> else <nl> { <nl> CV_Assert ( cn = = 4 ) ; <nl> - for ( ; dx < w - 8 ; dx + = 8 , S0 + = 16 , S1 + = 16 , D + = 8 ) <nl> + for ( ; dx < = w - 8 ; dx + = 8 , S0 + = 16 , S1 + = 16 , D + = 8 ) <nl> { <nl> __m128i r0 = _mm_loadu_si128 ( ( const __m128i * ) S0 ) ; <nl> __m128i r1 = _mm_loadu_si128 ( ( const __m128i * ) S1 ) ; <nl> class ResizeAreaFastVec_SIMD_16u <nl> <nl> if ( cn = = 1 ) <nl> { <nl> - for ( ; dx < w - 4 ; dx + = 4 , S0 + = 8 , S1 + = 8 , D + = 4 ) <nl> + for ( ; dx < = w - 4 ; dx + = 4 , S0 + = 8 , S1 + = 8 , D + = 4 ) <nl> { <nl> __m128i r0 = _mm_loadu_si128 ( ( const __m128i * ) S0 ) ; <nl> __m128i r1 = _mm_loadu_si128 ( ( const __m128i * ) S1 ) ; <nl> class ResizeAreaFastVec_SIMD_16u <nl> } <nl> } <nl> else if ( cn = = 3 ) <nl> - for ( ; dx < w - 3 ; dx + = 3 , S0 + = 6 , S1 + = 6 , D + = 3 ) <nl> + for ( ; dx < = w - 3 ; dx + = 3 , S0 + = 6 , S1 + = 6 , D + = 3 ) <nl> { <nl> __m128i r0 = _mm_loadu_si128 ( ( const __m128i * ) S0 ) ; <nl> __m128i r1 = _mm_loadu_si128 ( ( const __m128i * ) S1 ) ; <nl> class ResizeAreaFastVec_SIMD_16u <nl> __m128i r1_16l = _mm_unpacklo_epi16 ( r1 , zero ) ; <nl> __m128i r1_16h = _mm_unpacklo_epi16 ( _mm_srli_si128 ( r1 , 6 ) , zero ) ; <nl> <nl> - __m128i s0 = _mm_add_epi16 ( r0_16l , r0_16h ) ; <nl> - __m128i s1 = _mm_add_epi16 ( r1_16l , r1_16h ) ; <nl> - s0 = _mm_add_epi32 ( s1 , _mm_add_epi32 ( s0 , delta2 ) ) ; <nl> + __m128i s0 = _mm_add_epi32 ( r0_16l , r0_16h ) ; <nl> + __m128i s1 = _mm_add_epi32 ( r1_16l , r1_16h ) ; <nl> + s0 = _mm_add_epi32 ( delta2 , _mm_add_epi32 ( s0 , s1 ) ) ; <nl> s0 = _mm_packus_epi32 ( _mm_srli_epi32 ( s0 , 2 ) , zero ) ; <nl> _mm_storel_epi64 ( ( __m128i * ) D , s0 ) ; <nl> } <nl> else <nl> { <nl> CV_Assert ( cn = = 4 ) ; <nl> - for ( ; dx < w - 4 ; dx + = 4 , S0 + = 8 , S1 + = 8 , D + = 4 ) <nl> + for ( ; dx < = w - 4 ; dx + = 4 , S0 + = 8 , S1 + = 8 , D + = 4 ) <nl> { <nl> __m128i r0 = _mm_loadu_si128 ( ( const __m128i * ) S0 ) ; <nl> __m128i r1 = _mm_loadu_si128 ( ( const __m128i * ) S1 ) ; <nl>
fixed bug connected with SSE2 version of resize with AREA interpolation
opencv/opencv
1bc76813f336b8536a2d1be82a810fb8b9164359
2012-12-31T11:35:40Z
mmm a / AUTHORS <nl> ppp b / AUTHORS <nl> a license to everyone to use it as detailed in LICENSE . ) <nl> * Holland Schutte < hgschutte1 @ gmail . com > <nl> * Kerby Geffrard < kerby . geffrard @ gmail . com > <nl> * cynecx < me @ cynecx . net > <nl> - <nl> + * Chris Gibson < cgibson @ mrvoxel . com > <nl> mmm a / src / library_glfw . js <nl> ppp b / src / library_glfw . js <nl> var LibraryGLFW = { <nl> this . id = id ; <nl> this . x = 0 ; <nl> this . y = 0 ; <nl> + this . fullscreen = false ; / / Used to determine if app in fullscreen mode <nl> this . storedX = 0 ; / / Used to store X before fullscreen <nl> this . storedY = 0 ; / / Used to store Y before fullscreen <nl> this . width = width ; <nl> var LibraryGLFW = { <nl> event . preventDefault ( ) ; <nl> } , <nl> <nl> - onFullScreenEventChange : function ( ) { <nl> + onCanvasResize : function ( width , height ) { <nl> if ( ! GLFW . active ) return ; <nl> <nl> + var resizeNeeded = true ; <nl> + <nl> + / / If the client is requestiong fullscreen mode <nl> if ( document [ " fullScreen " ] | | document [ " mozFullScreen " ] | | document [ " webkitIsFullScreen " ] ) { <nl> GLFW . active . storedX = GLFW . active . x ; <nl> GLFW . active . storedY = GLFW . active . y ; <nl> var LibraryGLFW = { <nl> GLFW . active . x = GLFW . active . y = 0 ; <nl> GLFW . active . width = screen . width ; <nl> GLFW . active . height = screen . height ; <nl> - } else { <nl> + GLFW . active . fullscreen = true ; <nl> + <nl> + / / If the client is reverting from fullscreen mode <nl> + } else if ( GLFW . active . fullscreen = = true ) { <nl> GLFW . active . x = GLFW . active . storedX ; <nl> GLFW . active . y = GLFW . active . storedY ; <nl> GLFW . active . width = GLFW . active . storedWidth ; <nl> GLFW . active . height = GLFW . active . storedHeight ; <nl> + GLFW . active . fullscreen = false ; <nl> + <nl> + / / If the width / height values do not match current active window sizes <nl> + } else if ( GLFW . active . width ! = width | | GLFW . active . height ! = height ) { <nl> + GLFW . active . width = width ; <nl> + GLFW . active . height = height ; <nl> + } else { <nl> + resizeNeeded = false ; <nl> + } <nl> + <nl> + / / If any of the above conditions were true , we need to resize the canvas <nl> + if ( resizeNeeded ) { <nl> + / / resets the canvas size to counter the aspect preservation of Browser . updateCanvasDimensions <nl> + Browser . setCanvasSize ( GLFW . active . width , GLFW . active . height ) ; <nl> + / / TODO : Client dimensions ( clientWidth / clientHeight ) vs pixel dimensions ( width / height ) of <nl> + / / the canvas should drive window and framebuffer size respectfully . <nl> + GLFW . onWindowSizeChanged ( ) ; <nl> + GLFW . onFramebufferSizeChanged ( ) ; <nl> } <nl> + } , <nl> <nl> - Browser . setCanvasSize ( GLFW . active . width , GLFW . active . height , true ) ; / / resets the canvas size to counter the aspect preservation of Browser . updateCanvasDimensions <nl> + onWindowSizeChanged : function ( ) { <nl> + if ( ! GLFW . active ) return ; <nl> <nl> if ( ! GLFW . active . windowSizeFunc ) return ; <nl> <nl> var LibraryGLFW = { <nl> # endif <nl> } , <nl> <nl> + onFramebufferSizeChanged : function ( ) { <nl> + if ( ! GLFW . active ) return ; <nl> + <nl> + if ( ! GLFW . active . framebufferSizeFunc ) return ; <nl> + <nl> + # if USE_GLFW = = 3 <nl> + Runtime . dynCall ( ' viii ' , GLFW . active . framebufferSizeFunc , [ GLFW . active . id , GLFW . active . width , GLFW . active . height ] ) ; <nl> + # endif <nl> + } , <nl> + <nl> requestFullScreen : function ( ) { <nl> var RFS = Module [ " canvas " ] [ ' requestFullscreen ' ] | | <nl> Module [ " canvas " ] [ ' requestFullScreen ' ] | | <nl> var LibraryGLFW = { <nl> Module [ " canvas " ] . addEventListener ( ' mousewheel ' , GLFW . onMouseWheel , true ) ; <nl> <nl> Browser . resizeListeners . push ( function ( width , height ) { <nl> - GLFW . onFullScreenEventChange ( ) ; <nl> + GLFW . onCanvasResize ( width , height ) ; <nl> } ) ; <nl> return 1 ; / / GL_TRUE <nl> } , <nl> var LibraryGLFW = { <nl> glfwSetFramebufferSizeCallback : function ( winid , cbfun ) { <nl> var win = GLFW . WindowFromId ( winid ) ; <nl> if ( ! win ) return ; <nl> - win . windowFramebufferSizeFunc = cbfun ; <nl> + win . framebufferSizeFunc = cbfun ; <nl> } , <nl> <nl> glfwGetInputMode : function ( winid , mode ) { <nl>
Merge remote - tracking branch ' remotes / cgibson / glfw - window - callback - fix ' into incoming
emscripten-core/emscripten
ee5c1f9b1a4a4e68231088a03b205ce73afa91b8
2016-05-07T09:01:06Z
mmm a / src / arch / runtime / coroutines . cc <nl> ppp b / src / arch / runtime / coroutines . cc <nl> void coro_t : : parse_coroutine_type ( const char * coroutine_function ) <nl> # endif <nl> <nl> coro_t * coro_t : : self ( ) { / * class method * / <nl> - return cglobals - > current_coro ; <nl> + return cglobals = = NULL ? NULL : cglobals - > current_coro ; <nl> } <nl> <nl> void coro_t : : wait ( ) { / * class method * / <nl> mmm a / src / backtrace . cc <nl> ppp b / src / backtrace . cc <nl> <nl> # include " backtrace . hpp " <nl> <nl> # include < cxxabi . h > <nl> + # include < execinfo . h > <nl> # include < stdio . h > <nl> # include < string . h > <nl> # include < unistd . h > <nl> <nl> # include < string > <nl> <nl> # include " errors . hpp " <nl> - # include < boost / tokenizer . hpp > <nl> # include < boost / ptr_container / ptr_map . hpp > <nl> <nl> # include " containers / scoped . hpp " <nl> # include " logger . hpp " <nl> + # include " thread_stack_pcs . hpp " <nl> # include " utils . hpp " <nl> <nl> static bool parse_backtrace_line ( char * line , char * * filename , char * * function , char * * offset , char * * address ) { <nl> std : : string print_frames ( void * * stack_frames , int size , bool use_addr2line ) { <nl> } <nl> <nl> lazy_backtrace_t : : lazy_backtrace_t ( ) : timestamp ( time ( 0 ) ) , timestr ( time2str ( timestamp ) ) { <nl> - size = backtrace ( stack_frames , max_frames ) ; <nl> + size = rethinkdb_backtrace ( stack_frames , max_frames ) ; <nl> } <nl> <nl> std : : string lazy_backtrace_t : : addrs ( ) { <nl> mmm a / src / backtrace . hpp <nl> ppp b / src / backtrace . hpp <nl> <nl> # ifndef BACKTRACE_HPP_ <nl> # define BACKTRACE_HPP_ <nl> <nl> - # include < execinfo . h > <nl> # include < stdio . h > <nl> # include < time . h > <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 6a9d13e1fb9 <nl> mmm / dev / null <nl> ppp b / src / thread_stack_pcs . cc <nl> <nl> + / * <nl> + * Copyright ( c ) 1999 , 2007 Apple Inc . All rights reserved . <nl> + * Some parts Copyright ( c ) 2013 RethinkDB . <nl> + * <nl> + * @ APPLE_LICENSE_HEADER_START @ <nl> + * <nl> + * This file contains Original Code and / or Modifications of Original Code <nl> + * as defined in and that are subject to the Apple Public Source License <nl> + * Version 2 . 0 ( the ' License ' ) . You may not use this file except in <nl> + * compliance with the License . Please obtain a copy of the License at <nl> + * http : / / www . opensource . apple . com / apsl / and read it before using this <nl> + * file . <nl> + * <nl> + * The Original Code and all software distributed under the License are <nl> + * distributed on an ' AS IS ' basis , WITHOUT WARRANTY OF ANY KIND , EITHER <nl> + * EXPRESS OR IMPLIED , AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES , <nl> + * INCLUDING WITHOUT LIMITATION , ANY WARRANTIES OF MERCHANTABILITY , <nl> + * FITNESS FOR A PARTICULAR PURPOSE , QUIET ENJOYMENT OR NON - INFRINGEMENT . <nl> + * Please see the License for the specific language governing rights and <nl> + * limitations under the License . <nl> + * <nl> + * @ APPLE_LICENSE_HEADER_END @ <nl> + * <nl> + * Modified by RethinkDB to handle RethinkDB coroutines . Also moved <nl> + * rethinkdb_backtrace here ( renamed from backtrace , which was in OS X Libc ' s <nl> + * gen / backtrace . cc ) . <nl> + * / <nl> + <nl> + / * Bertrand from vmutils - > CF - > System * / <nl> + <nl> + # ifdef __MACH__ <nl> + <nl> + # include < pthread . h > <nl> + # include < mach / mach . h > <nl> + # include < mach / vm_statistics . h > <nl> + # include < stdlib . h > <nl> + <nl> + # include " arch / runtime / coroutines . hpp " <nl> + # include " arch / runtime / context_switching . hpp " <nl> + <nl> + int rethinkdb_backtrace ( void * * buffer , int size ) { <nl> + extern void _rethinkdb_thread_stack_pcs ( vm_address_t * buffer , unsigned max , unsigned * nb , unsigned skip ) ; <nl> + unsigned int num_frames ; <nl> + _rethinkdb_thread_stack_pcs ( ( vm_address_t * ) buffer , size , & num_frames , 1 ) ; <nl> + while ( num_frames > = 1 & & buffer [ num_frames - 1 ] = = NULL ) num_frames - = 1 ; <nl> + return num_frames ; <nl> + } <nl> + <nl> + <nl> + # if defined ( __i386__ ) | | defined ( __x86_64__ ) | | defined ( __arm__ ) <nl> + # define FP_LINK_OFFSET 1 <nl> + # elif defined ( __ppc__ ) | | defined ( __ppc64__ ) <nl> + # define FP_LINK_OFFSET 2 <nl> + # else <nl> + # error * * * * * * * * * * Unimplemented architecture <nl> + # endif <nl> + <nl> + # define INSTACK ( a ) ( ( a ) > = stackbot & & ( a ) < = stacktop ) <nl> + # if defined ( __ppc__ ) | | defined ( __ppc64__ ) | | defined ( __x86_64__ ) <nl> + # define ISALIGNED ( a ) ( ( ( ( uintptr_t ) ( a ) ) & 0xf ) = = 0 ) <nl> + # elif defined ( __arm__ ) <nl> + # define ISALIGNED ( a ) ( ( ( ( uintptr_t ) ( a ) ) & 0x1 ) = = 0 ) <nl> + # elif defined ( __i386__ ) <nl> + # define ISALIGNED ( a ) ( ( ( ( uintptr_t ) ( a ) ) & 0xf ) = = 8 ) <nl> + # endif <nl> + <nl> + __private_extern__ __attribute__ ( ( noinline ) ) <nl> + void <nl> + _rethinkdb_thread_stack_pcs ( vm_address_t * buffer , unsigned max , <nl> + unsigned * nb , unsigned skip ) <nl> + { <nl> + void * frame , * next ; <nl> + void * stacktop ; <nl> + void * stackbot ; <nl> + <nl> + { <nl> + coro_t * coro = coro_t : : self ( ) ; <nl> + if ( coro ! = NULL ) { <nl> + artificial_stack_t * stack = coro - > get_stack ( ) ; <nl> + stacktop = stack - > get_stack_base ( ) ; <nl> + stackbot = stack - > get_stack_bound ( ) ; <nl> + } else { <nl> + pthread_t self = pthread_self ( ) ; <nl> + stacktop = pthread_get_stackaddr_np ( self ) ; <nl> + stackbot = static_cast < char * > ( stacktop ) - pthread_get_stacksize_np ( self ) ; <nl> + } <nl> + } <nl> + <nl> + * nb = 0 ; <nl> + <nl> + / * make sure return address is never out of bounds * / <nl> + stacktop = static_cast < char * > ( stacktop ) - ( FP_LINK_OFFSET + 1 ) * sizeof ( void * ) ; <nl> + <nl> + / * <nl> + * The original implementation called the first_frame_address ( ) function , <nl> + * which returned the stack frame pointer . The problem was that in ppc , <nl> + * it was a leaf function , so no new stack frame was set up with <nl> + * optimization turned on ( while a new stack frame was set up without <nl> + * optimization ) . We now inline the code to get the stack frame pointer , <nl> + * so we are consistent about the stack frame . <nl> + * / <nl> + # if defined ( __i386__ ) | | defined ( __x86_64__ ) | | defined ( __arm__ ) <nl> + frame = __builtin_frame_address ( 0 ) ; <nl> + # elif defined ( __ppc__ ) | | defined ( __ppc64__ ) <nl> + / * __builtin_frame_address IS BROKEN IN BEAKER : RADAR # 2340421 * / <nl> + __asm__ volatile ( " mr % 0 , r1 " : " = r " ( frame ) ) ; <nl> + # endif <nl> + if ( ! INSTACK ( frame ) | | ! ISALIGNED ( frame ) ) <nl> + return ; <nl> + # if defined ( __ppc__ ) | | defined ( __ppc64__ ) <nl> + / * back up the stack pointer up over the current stack frame * / <nl> + next = * ( void * * ) frame ; <nl> + if ( ! INSTACK ( next ) | | ! ISALIGNED ( next ) | | next < = frame ) <nl> + return ; <nl> + frame = next ; <nl> + # endif <nl> + while ( skip - - ) { <nl> + next = * ( void * * ) frame ; <nl> + if ( ! INSTACK ( next ) | | ! ISALIGNED ( next ) | | next < = frame ) <nl> + return ; <nl> + frame = next ; <nl> + } <nl> + while ( max - - ) { <nl> + buffer [ * nb ] = * ( vm_address_t * ) ( ( ( void * * ) frame ) + FP_LINK_OFFSET ) ; <nl> + ( * nb ) + + ; <nl> + next = * ( void * * ) frame ; <nl> + if ( ! INSTACK ( next ) | | ! ISALIGNED ( next ) | | next < = frame ) <nl> + return ; <nl> + frame = next ; <nl> + } <nl> + } <nl> + <nl> + void <nl> + rethinkdb_thread_stack_pcs ( vm_address_t * buffer , unsigned max , unsigned * nb ) <nl> + { <nl> + _rethinkdb_thread_stack_pcs ( buffer , max , nb , 0 ) ; <nl> + <nl> + / / The following prevents thread_stack_pcs ( ) from getting tail - call - optimized into _thread_stack_pcs ( ) on 64 - bit environments , <nl> + / / thus making the " number of hot frames to skip " be more predictable , giving more consistent backtraces . <nl> + / / See < rdar : / / problem / 5364825 > " stack logging : frames keep getting truncated " for why this is necessary . <nl> + __asm__ volatile ( " " ) ; <nl> + } <nl> + <nl> + # else <nl> + int rethinkdb_backtrace ( void * * buffer , int size ) { <nl> + return backtrace ( buffer , size ) ; <nl> + } <nl> + # endif / / __MACH__ <nl> new file mode 100644 <nl> index 00000000000 . . 9a10f4df8f3 <nl> mmm / dev / null <nl> ppp b / src / thread_stack_pcs . hpp <nl> <nl> + # ifndef THREAD_STACK_PCS_HPP_ <nl> + # define THREAD_STACK_PCS_HPP_ <nl> + <nl> + # ifdef __MACH__ <nl> + int rethinkdb_backtrace ( void * * buffer , int size ) ; <nl> + # endif <nl> + <nl> + # endif / / THREAD_STACK_PCS_HPP_ <nl>
Added backtrace - in - coroutine support for OS X .
rethinkdb/rethinkdb
94e9966908d2dc365f318e288a3bd6e978425fa2
2013-05-08T22:51:58Z
mmm a / jstests / multiVersion / index_bigkeys_secondary_downgrade_during_index_build_background . js <nl> ppp b / jstests / multiVersion / index_bigkeys_secondary_downgrade_during_index_build_background . js <nl> <nl> primaryDB . runCommand ( { insert : collName , documents : documents , writeConcern : { w : 2 } } ) ) ; <nl> <nl> assert . commandWorked ( secondaryDB . adminCommand ( <nl> - { configureFailPoint : " hangAfterStartingIndexBuild " , mode : " alwaysOn " } ) ) ; <nl> + { configureFailPoint : " slowBackgroundIndexBuild " , mode : " alwaysOn " } ) ) ; <nl> <nl> / / Start the index build on the primary . <nl> assert . commandWorked ( primaryDB . runCommand ( <nl> <nl> <nl> / / Continue index build on the secondary . There should be no KeyTooLong error . <nl> assert . commandWorked ( <nl> - secondaryDB . adminCommand ( { configureFailPoint : " hangAfterStartingIndexBuild " , mode : " off " } ) ) ; <nl> + secondaryDB . adminCommand ( { configureFailPoint : " slowBackgroundIndexBuild " , mode : " off " } ) ) ; <nl> <nl> / / Make sure the index is successfully created . <nl> assert . soon ( ( ) = > { <nl> mmm a / jstests / noPassthrough / characterize_index_builds_on_restart . js <nl> ppp b / jstests / noPassthrough / characterize_index_builds_on_restart . js <nl> <nl> <nl> if ( isReplicaNode ) { <nl> assert . commandWorked ( hangDB . adminCommand ( <nl> - { configureFailPoint : " hangAfterStartingIndexBuild " , mode : " alwaysOn " } ) ) ; <nl> + { configureFailPoint : " slowBackgroundIndexBuild " , mode : " alwaysOn " } ) ) ; <nl> <nl> db . runCommand ( { <nl> createIndexes : collName , <nl> mmm a / jstests / noPassthrough / indexbg_drop . js <nl> ppp b / jstests / noPassthrough / indexbg_drop . js <nl> <nl> } <nl> assert . writeOK ( bulk . execute ( { w : 2 , wtimeout : replTest . kDefaultTimeoutMS } ) ) ; <nl> <nl> - assert . commandWorked ( secondDB . adminCommand ( <nl> - { configureFailPoint : " hangAfterStartingIndexBuild " , mode : " alwaysOn " } ) ) ; <nl> + assert . commandWorked ( <nl> + secondDB . adminCommand ( { configureFailPoint : " slowBackgroundIndexBuild " , mode : " alwaysOn " } ) ) ; <nl> <nl> jsTest . log ( " Starting background indexing for test of : " + tojson ( dc ) ) ; <nl> <nl> <nl> <nl> jsTest . log ( " Waiting on replication " ) ; <nl> assert . commandWorked ( <nl> - secondDB . adminCommand ( { configureFailPoint : " hangAfterStartingIndexBuild " , mode : " off " } ) ) ; <nl> + secondDB . adminCommand ( { configureFailPoint : " slowBackgroundIndexBuild " , mode : " off " } ) ) ; <nl> replTest . awaitReplication ( ) ; <nl> <nl> print ( " Index list on master : " ) ; <nl> mmm a / src / mongo / db / catalog / multi_index_block . cpp <nl> ppp b / src / mongo / db / catalog / multi_index_block . cpp <nl> const StringData kCommitReadyMembersFieldName = " commitReadyMembers " _sd ; <nl> MONGO_FAIL_POINT_DEFINE ( crashAfterStartingIndexBuild ) ; <nl> MONGO_FAIL_POINT_DEFINE ( hangAfterStartingIndexBuild ) ; <nl> MONGO_FAIL_POINT_DEFINE ( hangAfterStartingIndexBuildUnlocked ) ; <nl> + MONGO_FAIL_POINT_DEFINE ( slowBackgroundIndexBuild ) ; <nl> MONGO_FAIL_POINT_DEFINE ( hangBeforeIndexBuildOf ) ; <nl> MONGO_FAIL_POINT_DEFINE ( hangAfterIndexBuildOf ) ; <nl> <nl> Status MultiIndexBlock : : insertAllDocumentsInCollection ( ) { <nl> if ( _allowInterruption & & ! _opCtx - > checkForInterruptNoAssert ( ) . isOK ( ) ) <nl> return _opCtx - > checkForInterruptNoAssert ( ) ; <nl> <nl> - if ( ! retries & & PlanExecutor : : ADVANCED ! = state ) { <nl> + if ( ! ( retries | | PlanExecutor : : ADVANCED = = state ) | | <nl> + MONGO_FAIL_POINT ( slowBackgroundIndexBuild ) ) { <nl> + log ( ) < < " Hanging index build due to failpoint " ; <nl> + invariant ( _allowInterruption ) ; <nl> + sleepmillis ( 1000 ) ; <nl> continue ; <nl> } <nl> <nl> / / Make sure we are working with the latest version of the document . <nl> if ( objToIndex . snapshotId ( ) ! = _opCtx - > recoveryUnit ( ) - > getSnapshotId ( ) & & <nl> ! _collection - > findDoc ( _opCtx , loc , & objToIndex ) ) { <nl> - / / Document was deleted so don ' t index it . <nl> + / / doc was deleted so don ' t index it . <nl> retries = 0 ; <nl> continue ; <nl> } <nl>
Revert " SERVER - 37498 remove slowBackgroundIndexBuild failpoint "
mongodb/mongo
110d33066a44bd77536c5cc34a44ecf46dd3d31e
2018-12-31T18:53:38Z
mmm a / tensorflow / compiler / xla / client / xla_builder . cc <nl> ppp b / tensorflow / compiler / xla / client / xla_builder . cc <nl> XlaOp XlaBuilder : : GetTupleElement ( const XlaOp & tuple_data , int64 index ) { <nl> " Operand to GetTupleElement ( ) is not a tuple ; got % s " , <nl> ShapeUtil : : HumanString ( tuple_shape ) ) ; <nl> } <nl> + if ( index < 0 | | index > = ShapeUtil : : TupleElementCount ( tuple_shape ) ) { <nl> + return InvalidArgument ( <nl> + " GetTupleElement ( ) index ( % d ) out of range for tuple shape % s " , index , <nl> + ShapeUtil : : HumanString ( tuple_shape ) ) ; <nl> + } <nl> * instr . mutable_shape ( ) = <nl> ShapeUtil : : GetTupleElementShape ( tuple_shape , index ) . ToProto ( ) ; <nl> <nl>
[ XLA ] Fail gracefully if an out - of - range tuple index is passed to GetTupleElement ( ) .
tensorflow/tensorflow
e7db8af8ba1c26f3ba45a0d63f3701f9a4b1be1f
2019-07-30T19:38:14Z
mmm a / docs / api / python / ndarray / ndarray . md <nl> ppp b / docs / api / python / ndarray / ndarray . md <nl> The ` ndarray ` package provides several classes : <nl> . . autosummary : : <nl> : nosignatures : <nl> <nl> - sample_uniform <nl> - sample_normal <nl> - sample_gamma <nl> - sample_exponential <nl> - sample_poisson <nl> - sample_negative_binomial <nl> - sample_generalized_negative_binomial <nl> + mxnet . nd . random . uniform <nl> + mxnet . nd . random . normal <nl> + mxnet . nd . random . gamma <nl> + mxnet . nd . random . exponential <nl> + mxnet . nd . random . poisson <nl> + mxnet . nd . random . negative_binomial <nl> + mxnet . nd . random . generalized_negative_binomial <nl> mxnet . random . seed <nl> ` ` ` <nl> <nl> The ` ndarray ` package provides several classes : <nl> argsort <nl> argmax <nl> argmin <nl> - argmax_channel <nl> ` ` ` <nl> <nl> # # # Sequence operation <nl> mmm a / docs / api / python / symbol / symbol . md <nl> ppp b / docs / api / python / symbol / symbol . md <nl> Composite multiple symbols into a new one by an operator . <nl> . . autosummary : : <nl> : nosignatures : <nl> <nl> - sample_uniform <nl> - sample_normal <nl> - sample_gamma <nl> - sample_exponential <nl> - sample_poisson <nl> - sample_negative_binomial <nl> - sample_generalized_negative_binomial <nl> + mxnet . sym . random . uniform <nl> + mxnet . sym . random . normal <nl> + mxnet . sym . random . gamma <nl> + mxnet . sym . random . exponential <nl> + mxnet . sym . random . poisson <nl> + mxnet . sym . random . negative_binomial <nl> + mxnet . sym . random . generalized_negative_binomial <nl> mxnet . random . seed <nl> ` ` ` <nl> <nl> Composite multiple symbols into a new one by an operator . <nl> argsort <nl> argmax <nl> argmin <nl> - argmax_channel <nl> ` ` ` <nl> <nl> # # # Sequence operation <nl> mmm a / python / mxnet / gluon / rnn / rnn_layer . py <nl> ppp b / python / mxnet / gluon / rnn / rnn_layer . py <nl> class RNN ( _RNNLayer ) : <nl> mmmmmm - - <nl> > > > layer = mx . gluon . rnn . RNN ( 100 , 3 ) <nl> > > > layer . initialize ( ) <nl> - > > > input = mx . nd . random_uniform ( shape = ( 5 , 3 , 10 ) ) <nl> + > > > input = mx . nd . random . uniform ( shape = ( 5 , 3 , 10 ) ) <nl> > > > # by default zeros are used as begin state <nl> > > > output = layer ( input ) <nl> > > > # manually specify begin state . <nl> - > > > h0 = mx . nd . random_uniform ( shape = ( 3 , 3 , 100 ) ) <nl> + > > > h0 = mx . nd . random . uniform ( shape = ( 3 , 3 , 100 ) ) <nl> > > > output , hn = layer ( input , h0 ) <nl> " " " <nl> def __init__ ( self , hidden_size , num_layers = 1 , activation = ' relu ' , <nl> class LSTM ( _RNNLayer ) : <nl> mmmmmm - - <nl> > > > layer = mx . gluon . rnn . LSTM ( 100 , 3 ) <nl> > > > layer . initialize ( ) <nl> - > > > input = mx . nd . random_uniform ( shape = ( 5 , 3 , 10 ) ) <nl> + > > > input = mx . nd . random . uniform ( shape = ( 5 , 3 , 10 ) ) <nl> > > > # by default zeros are used as begin state <nl> > > > output = layer ( input ) <nl> > > > # manually specify begin state . <nl> - > > > h0 = mx . nd . random_uniform ( shape = ( 3 , 3 , 100 ) ) <nl> - > > > c0 = mx . nd . random_uniform ( shape = ( 3 , 3 , 100 ) ) <nl> + > > > h0 = mx . nd . random . uniform ( shape = ( 3 , 3 , 100 ) ) <nl> + > > > c0 = mx . nd . random . uniform ( shape = ( 3 , 3 , 100 ) ) <nl> > > > output , hn = layer ( input , [ h0 , c0 ] ) <nl> " " " <nl> def __init__ ( self , hidden_size , num_layers = 1 , layout = ' TNC ' , <nl> class GRU ( _RNNLayer ) : <nl> mmmmmm - - <nl> > > > layer = mx . gluon . rnn . GRU ( 100 , 3 ) <nl> > > > layer . initialize ( ) <nl> - > > > input = mx . nd . random_uniform ( shape = ( 5 , 3 , 10 ) ) <nl> + > > > input = mx . nd . random . uniform ( shape = ( 5 , 3 , 10 ) ) <nl> > > > # by default zeros are used as begin state <nl> > > > output = layer ( input ) <nl> > > > # manually specify begin state . <nl> - > > > h0 = mx . nd . random_uniform ( shape = ( 3 , 3 , 100 ) ) <nl> + > > > h0 = mx . nd . random . uniform ( shape = ( 3 , 3 , 100 ) ) <nl> > > > output , hn = layer ( input , h0 ) <nl> " " " <nl> def __init__ ( self , hidden_size , num_layers = 1 , layout = ' TNC ' , <nl>
fix random docs ( )
apache/incubator-mxnet
a8f79bc6aca2ac709ad2cdb3cfaa1fd2b7fdd650
2017-11-19T05:15:59Z
mmm a / xbmc / music / MusicDatabase . cpp <nl> ppp b / xbmc / music / MusicDatabase . cpp <nl> <nl> # include " settings / Settings . h " <nl> # include " utils / StringUtils . h " <nl> # include " guilib / LocalizeStrings . h " <nl> + # include " utils / LegacyPathTranslation . h " <nl> # include " utils / log . h " <nl> # include " utils / TimeUtils . h " <nl> # include " TextureCache . h " <nl> bool CMusicDatabase : : UpdateOldVersion ( int version ) <nl> CSettings : : Get ( ) . Save ( ) ; <nl> } <nl> <nl> + if ( version < 36 ) <nl> + { <nl> + / / translate legacy musicdb : / / paths <nl> + if ( m_pDS - > query ( " SELECT strPath FROM content " ) ) <nl> + { <nl> + vector < string > contentPaths ; <nl> + while ( ! m_pDS - > eof ( ) ) <nl> + { <nl> + contentPaths . push_back ( m_pDS - > fv ( 0 ) . get_asString ( ) ) ; <nl> + m_pDS - > next ( ) ; <nl> + } <nl> + m_pDS - > close ( ) ; <nl> + <nl> + for ( vector < string > : : const_iterator it = contentPaths . begin ( ) ; it ! = contentPaths . end ( ) ; it + + ) <nl> + { <nl> + std : : string originalPath = * it ; <nl> + std : : string path = CLegacyPathTranslation : : TranslateMusicDbPath ( originalPath ) ; <nl> + m_pDS - > exec ( PrepareSQL ( " UPDATE content SET strPath = ' % s ' WHERE strPath = ' % s ' " , path . c_str ( ) , originalPath . c_str ( ) ) . c_str ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> / / always recreate the views after any table change <nl> CreateViews ( ) ; <nl> <nl> bool CMusicDatabase : : UpdateOldVersion ( int version ) <nl> <nl> int CMusicDatabase : : GetMinVersion ( ) const <nl> { <nl> - return 35 ; <nl> + return 36 ; <nl> } <nl> <nl> unsigned int CMusicDatabase : : GetSongIDs ( const Filter & filter , vector < pair < int , int > > & songIDs ) <nl>
musicdb : update musicdb : / / paths in " content " table
xbmc/xbmc
41eea3255a429c87859c719b307a4821fc5ded0a
2013-05-13T19:29:12Z
mmm a / swoole_http . c <nl> ppp b / swoole_http . c <nl> PHP_METHOD ( swoole_http_response , end ) <nl> char * key_connection = " Connection " ; <nl> char * key_content_length = " Content - Length " ; <nl> char * key_date = " Date " ; <nl> + <nl> HashTable * ht = Z_ARRVAL_P ( header ) ; <nl> for ( zend_hash_internal_pointer_reset ( ht ) ; zend_hash_has_more_elements ( ht ) = = 0 ; zend_hash_move_forward ( ht ) ) <nl> { <nl> PHP_METHOD ( swoole_http_response , end ) <nl> { <nl> continue ; <nl> } <nl> - if ( strcmp ( key , key_server ) = = 0 ) <nl> + if ( strcmp ( key , key_server ) = = 0 ) <nl> { <nl> flag | = 0x1 ; <nl> } <nl> - else if ( strcmp ( key , key_connection ) = = 0 ) <nl> + else if ( strcmp ( key , key_connection ) = = 0 ) <nl> { <nl> flag | = 0x2 ; <nl> } <nl> - else if ( strcmp ( key , key_content_length ) = = 0 ) <nl> + else if ( strcmp ( key , key_content_length ) = = 0 ) <nl> { <nl> flag | = 0x4 ; <nl> } <nl> - else if ( strcmp ( key , key_date ) = = 0 ) <nl> + else if ( strcmp ( key , key_date ) = = 0 ) <nl> { <nl> flag | = 0x8 ; <nl> } <nl> PHP_METHOD ( swoole_http_response , end ) <nl> } <nl> } <nl> <nl> - if ( client - > request . method = = PHP_HTTP_OPTIONS ) <nl> + if ( client - > request . method = = PHP_HTTP_OPTIONS ) <nl> { <nl> swString_append_ptr ( response , ZEND_STRL ( " Allow : GET , POST , PUT , PATCH , DELETE , HEAD , OPTIONS \ r \ nContent - Length : 0 \ r \ n " ) ) ; <nl> } <nl> PHP_METHOD ( swoole_http_response , end ) <nl> } <nl> } <nl> <nl> - <nl> if ( ! ( flag & 0x8 ) ) <nl> { <nl> date_str = php_format_date ( ZEND_STRL ( " D , d - M - Y H : i : s T " ) , SwooleGS - > now , 0 TSRMLS_CC ) ; <nl> PHP_METHOD ( swoole_http_response , end ) <nl> swString_append_ptr ( response , ZEND_STRL ( " Connection : close \ r \ n " ) ) ; <nl> } <nl> <nl> - date_str = php_format_date ( ZEND_STRL ( " D , d - M - Y H : i : s T " ) , 1 , SwooleGS - > now TSRMLS_CC ) ; <nl> + date_str = php_format_date ( ZEND_STRL ( " D , d - M - Y H : i : s T " ) , SwooleGS - > now , 0 TSRMLS_CC ) ; <nl> n = snprintf ( buf , 128 , " Date : % s \ r \ n " , date_str ) ; <nl> efree ( date_str ) ; <nl> swString_append_ptr ( response , buf , n ) ; <nl> - if ( client - > request . method = = PHP_HTTP_OPTIONS ) { <nl> + <nl> + if ( client - > request . method = = PHP_HTTP_OPTIONS ) <nl> + { <nl> n = snprintf ( buf , 128 , " Allow : GET , POST , PUT , DELETE , HEAD , OPTIONS \ r \ nContent - Length : % d \ r \ n " , 0 ) ; <nl> - } else { <nl> + } <nl> + else <nl> + { <nl> n = snprintf ( buf , 128 , " Content - Length : % d \ r \ n " , body . length ) ; <nl> } <nl> swString_append_ptr ( response , buf , n ) ; <nl>
fixed http response Date error .
swoole/swoole-src
b42208e0371adcf7a1c8f9a8c4e54577df21c7a8
2015-01-09T05:59:00Z
mmm a / hphp / hack / src / hh_bulk_check . ml <nl> ppp b / hphp / hack / src / hh_bulk_check . ml <nl> <nl> * <nl> * ) <nl> <nl> + open ServerEnv <nl> + <nl> + type schedule_args = { <nl> + bin_root : Path . t ; <nl> + root : Path . t ; <nl> + naming_table : string ; <nl> + input_file : string ; <nl> + ( * Number of remote workers * ) <nl> + num_remote_workers : int ; <nl> + num_local_workers : int ; <nl> + batch_size : int option ; <nl> + timeout : int ; <nl> + } <nl> + <nl> type command = <nl> - | CSchedule of { <nl> - bin_root : Path . t ; <nl> - root : Path . t ; <nl> - timeout : int ; <nl> - } <nl> + | CSchedule of schedule_args <nl> | CWork of unit option RemoteWorker . work_env <nl> <nl> type command_keyword = <nl> let parse_root ( args : string list ) : Path . t = <nl> Printf . fprintf stderr " Error : please provide at most one root directory \ n % ! " ; <nl> exit 1 <nl> <nl> + let validate_required_arg arg_ref arg_name = <nl> + match ! arg_ref with <nl> + | None - > failwith ( Printf . sprintf " % s is required . " arg_name ) <nl> + | Some arg - > arg <nl> + <nl> let parse_schedule_args ( ) : command = <nl> let timeout = ref 9999 in <nl> + let naming_table = ref None in <nl> + let num_remote_workers = ref 1 in <nl> + let num_local_workers = ref Sys_utils . nbr_procs in <nl> + let batch_size = ref None in <nl> + let input_file = ref None in <nl> + let set_option_arg name reference value = <nl> + match ! reference with <nl> + | None - > reference : = Some value <nl> + | Some _ - > failwith ( Printf . sprintf " Attempted to set % s twice " name ) <nl> + in <nl> let options = <nl> - [ ( " - - timeout " , Arg . Int ( fun x - > timeout : = x ) , " The timeout " ) ] <nl> + [ <nl> + ( " - - timeout " , Arg . Int ( fun x - > timeout : = x ) , " The timeout " ) ; <nl> + ( " - - naming - table " , <nl> + Arg . String ( set_option_arg " naming table file " naming_table ) , <nl> + " input naming table SQLlite file path ( required ) . " ) ; <nl> + ( " - - num - remote - workers " , <nl> + Arg . Int ( fun x - > num_remote_workers : = x ) , <nl> + " The number of remote workers ( default is 1 ) . " ) ; <nl> + ( " - - num - local - workers " , <nl> + Arg . Int ( fun x - > num_local_workers : = x ) , <nl> + " The number of local workers ( default is the number of processors ) . " ) ; <nl> + ( " - - batch - size " , <nl> + Arg . Int ( set_option_arg " batch size " batch_size ) , <nl> + " Remote worker batch size . " ) ; <nl> + ( " - - input - file " , <nl> + Arg . String ( set_option_arg " input file " input_file ) , <nl> + " Input file path that contains the list of files to type check . " ) ; <nl> + ] <nl> in <nl> let usage = " Usage : " ^ Sys . executable_name ^ " schedule < repo_root > " in <nl> let args = parse_without_command options usage ~ keyword : CKSchedule in <nl> let ( root : Path . t ) = parse_root args in <nl> let bin_root = Path . make ( Filename . dirname Sys . argv . ( 0 ) ) in <nl> - CSchedule { bin_root ; root ; timeout = ! timeout } <nl> + <nl> + CSchedule <nl> + { <nl> + bin_root ; <nl> + root ; <nl> + naming_table = validate_required_arg naming_table " - - naming - table " ; <nl> + input_file = validate_required_arg input_file " - - input - file " ; <nl> + num_remote_workers = ! num_remote_workers ; <nl> + num_local_workers = ! num_local_workers ; <nl> + batch_size = ! batch_size ; <nl> + timeout = ! timeout ; <nl> + } <nl> <nl> let make_remote_server_api ( ) : <nl> ( module RemoteWorker . RemoteServerApi with type naming_table = unit option ) = <nl> let parse_args ( ) = <nl> parse_schedule_args ( ) <nl> | CKWork - > parse_work_args ( ) <nl> <nl> + ( * <nl> + Initialize env / genv from naming_table and create local workers <nl> + based on num_local_workers . <nl> + * ) <nl> + let init_env_and_create_local_workers root naming_table num_local_workers = <nl> + Tempfile . with_real_tempdir @ @ fun tmp - > <nl> + let t = Unix . gettimeofday ( ) in <nl> + Relative_path . set_path_prefix Relative_path . Root root ; <nl> + Relative_path . set_path_prefix Relative_path . Tmp tmp ; <nl> + let hhi_root = Hhi . get_hhi_root ( ) in <nl> + Hh_logger . log " Extracted hhi files to directory % s " ( Path . to_string hhi_root ) ; <nl> + Relative_path . set_path_prefix Relative_path . Hhi hhi_root ; <nl> + <nl> + let init_id = Random_id . short_string ( ) in <nl> + HackEventLogger . init_batch_tool ~ init_id ~ root ~ time : t ; <nl> + let server_args = ServerArgs . default_options ~ root : ( Path . to_string root ) in <nl> + let ( server_config , server_local_config ) = <nl> + ServerConfig . load ServerConfig . filename server_args <nl> + in <nl> + let hhconfig_version = <nl> + server_config | > ServerConfig . version | > Config_file . version_to_string_opt <nl> + in <nl> + let sharedmem_config = ServerConfig . sharedmem_config server_config in <nl> + let handle = SharedMem . init sharedmem_config ~ num_workers : num_local_workers in <nl> + let server_env = <nl> + { <nl> + ( ServerEnvBuild . make_env server_config ) with <nl> + ServerEnv . naming_table = Naming_table . load_from_sqlite naming_table ; <nl> + } <nl> + in <nl> + let t = Unix . gettimeofday ( ) in <nl> + let gc_control = ServerConfig . gc_control server_config in <nl> + let workers = <nl> + ServerWorker . make <nl> + ~ nbr_procs : num_local_workers <nl> + gc_control <nl> + handle <nl> + ~ logging_init : ( fun ( ) - > <nl> + HackEventLogger . init_worker <nl> + ~ root <nl> + ~ hhconfig_version <nl> + ~ init_id : ( init_id ^ " . " ^ Random_id . short_string ( ) ) <nl> + ~ time : t <nl> + ~ profile_type_check_duration_threshold : 0 . <nl> + ~ profile_owner : ( Sys_utils . logname ( ) ) <nl> + ~ profile_desc : " hh_bulk_check " <nl> + ~ max_times_to_defer : None ) <nl> + in <nl> + let genv = <nl> + ServerEnvBuild . make_genv <nl> + server_args <nl> + server_config <nl> + server_local_config <nl> + workers <nl> + in <nl> + ( server_env , genv , workers ) <nl> + <nl> + let get_batch_size genv ( batch_size : int option ) = <nl> + match batch_size with <nl> + | Some size - > ( size , size ) <nl> + | None - > <nl> + ( ServerLocalConfig . ( genv . local_config . remote_type_check . max_batch_size ) , <nl> + ServerLocalConfig . ( genv . local_config . remote_type_check . min_batch_size ) ) <nl> + <nl> + ( * <nl> + Start remote checking service with number of remote workers specified by num_remote_workers . <nl> + * ) <nl> + let start_remote_checking_service genv env num_remote_workers batch_size = <nl> + let version_specifier = <nl> + ServerLocalConfig . ( genv . local_config . remote_version_specifier ) <nl> + in <nl> + <nl> + let ( max_batch_size , min_batch_size ) = get_batch_size genv batch_size in <nl> + let worker_min_log_level = <nl> + ServerLocalConfig . ( genv . local_config . remote_type_check . worker_min_log_level ) <nl> + in <nl> + let root = Relative_path . path_of_prefix Relative_path . Root in <nl> + let delegate_state = <nl> + Typing_service_delegate . start <nl> + Typing_service_types . <nl> + { <nl> + init_id = env . init_env . init_id ; <nl> + mergebase = env . init_env . mergebase ; <nl> + num_workers = num_remote_workers ; <nl> + root ; <nl> + server = <nl> + ServerApi . make_local_server_api <nl> + env . naming_table <nl> + ~ root <nl> + ~ ignore_hh_version : ( ServerArgs . ignore_hh_version genv . options ) ; <nl> + version_specifier ; <nl> + worker_min_log_level ; <nl> + } <nl> + ( Typing_service_delegate . create ~ max_batch_size ~ min_batch_size ( ) ) <nl> + ~ recheck_id : env . init_env . recheck_id <nl> + in <nl> + delegate_state <nl> + <nl> + ( * <nl> + Initialize envs , create local workers and create remote checking service delegate . <nl> + * ) <nl> + let create_service_delegate ( schedule_env : schedule_args ) = <nl> + let ( env , genv , _ ) = <nl> + init_env_and_create_local_workers <nl> + schedule_env . root <nl> + schedule_env . naming_table <nl> + schedule_env . num_local_workers <nl> + in <nl> + let delegate_state = <nl> + start_remote_checking_service <nl> + genv <nl> + env <nl> + schedule_env . num_remote_workers <nl> + schedule_env . batch_size <nl> + in <nl> + ( env , genv , delegate_state ) <nl> + <nl> + ( * Parse input_file which should contain a list of php files relative to root * ) <nl> + let read_input_file input_file = <nl> + let file_content = Disk . cat input_file in <nl> + let file_lines = String . split_on_char ' \ n ' ( String . trim file_content ) in <nl> + List . map ( fun x - > Relative_path . from_root x ) file_lines <nl> + <nl> + let print_errors errors = <nl> + let print_error l = <nl> + Hh_logger . log " % s " ( Errors . to_string ( Errors . to_absolute_for_test l ) ) <nl> + in <nl> + List . iter print_error errors ; <nl> + ( ) <nl> + <nl> + ( * <nl> + Schedule type checking for input file . <nl> + The type checking mode can be controlled by - - num - remote - workers / - - num - remote - workers options : <nl> + If - - num - remote - workers ! = 0 , remote type checking will be enabled . <nl> + Otherwise , pure - local type checking is used . <nl> + If - - num - remote - workers ! = 0 and - - num - locak - workers = = 0 , pure - remote <nl> + remote type checking will be used . <nl> + * ) <nl> + let schedule_type_checking schedule_env = <nl> + let ( env , genv , delegate_state ) = create_service_delegate schedule_env in <nl> + let telemetry = Telemetry . create ( ) in <nl> + let memory_cap = <nl> + genv . ServerEnv . local_config <nl> + . ServerLocalConfig . max_typechecker_worker_memory_mb <nl> + in <nl> + let files_to_check = read_input_file schedule_env . input_file in <nl> + Parser_options_provider . set env . ServerEnv . popt ; <nl> + let check_info = <nl> + Typing_check_service . <nl> + { <nl> + ( ServerCheckUtils . get_check_info genv env ) with <nl> + profile_log = true ; <nl> + profile_type_check_duration_threshold = 0 . 0 ; <nl> + } <nl> + in <nl> + let ctx = Provider_utils . ctx_from_server_env env in <nl> + ( * Typing checking entry point * ) <nl> + let ( res , _delegate_state , _telemetry ) = <nl> + Typing_check_service . go <nl> + ctx <nl> + genv . workers <nl> + delegate_state <nl> + telemetry <nl> + Relative_path . Set . empty <nl> + files_to_check <nl> + ~ memory_cap <nl> + ~ check_info <nl> + in <nl> + let errs = Errors . get_error_list res in <nl> + match List . length errs with <nl> + | 0 - > Hh_logger . log " Type check finished with zero error . " <nl> + | _ - > print_errors errs <nl> + <nl> let ( ) = <nl> let ( ) = Daemon . check_entry_point ( ) in <nl> let command = parse_args ( ) in <nl> let _errors = <nl> match command with <nl> - | CSchedule _schedule_env - > <nl> - ( * TODO : use schedule_env * ) <nl> - ( ) <nl> + | CSchedule schedule_env - > schedule_type_checking schedule_env <nl> | CWork work_env - > <nl> ( * TODO : RemoteWorker . go work_env * ) <nl> ignore work_env <nl>
Introduce hh_bulk_check tool to easy control local and remote type checking
facebook/hhvm
8d658100d7d2d8db1894161880045591d6b040ce
2020-03-04T07:03:48Z
mmm a / stdlib / core / BridgeStorage . swift <nl> ppp b / stdlib / core / BridgeStorage . swift <nl> struct _BridgeStorage < <nl> } <nl> } <nl> <nl> + @ inline ( __always ) <nl> + public / / @ testable <nl> + mutating func isUniquelyReferenced_native_noSpareBits ( ) - > Bool { <nl> + _sanityCheck ( isNative ) <nl> + _sanityCheck ( _nonPointerBits ( rawValue ) = = 0 ) <nl> + let p : UnsafePointer < HeapObject > = Builtin . reinterpretCast ( rawValue ) <nl> + return _swift_isUniquelyReferenced_nonNull_native ( p ) ! = 0 <nl> + } <nl> + <nl> public / / @ testable <nl> var objCInstance : ObjC { <nl> @ inline ( __always ) get { <nl> mmm a / test / 1_stdlib / BridgeStorage . swift . gyb <nl> ppp b / test / 1_stdlib / BridgeStorage . swift . gyb <nl> protocol BridgeStorage { <nl> init ( objC : ObjC ) <nl> <nl> mutating func isUniquelyReferencedNative ( ) - > Bool <nl> + mutating func isUniquelyReferenced_native_noSpareBits ( ) - > Bool <nl> var isNative : Bool { get } <nl> var isObjC : Bool { get } <nl> var nativeInstance : Native { get } <nl> struct BridgeObject < NativeType : AnyObject , ObjCType : AnyObject > <nl> return Builtin . bridgeFromRawPointer ( rawObject ) <nl> } <nl> <nl> + mutating func isUniquelyReferenced_native_noSpareBits ( ) - > Bool { <nl> + precondition ( isNative ) <nl> + precondition ( spareBits = = 0 ) <nl> + return _swift_isUniquelyReferenced_nonNull_native ( <nl> + UnsafePointer ( rawObject ) ) ! = 0 <nl> + } <nl> + <nl> var objCInstance : ObjC { <nl> precondition ( isObjC ) <nl> return Builtin . bridgeFromRawPointer ( rawObject ) <nl> allTests . test ( " $ { Self } " ) { <nl> expectFalse ( b . isObjC ) <nl> expectTrue ( b . isNative ) <nl> expectTrue ( b . isUniquelyReferencedNative ( ) ) <nl> + if i = = 0 { <nl> + expectTrue ( b . isUniquelyReferenced_native_noSpareBits ( ) ) <nl> + } <nl> expectEqual ( i , b . spareBits ) <nl> } <nl> <nl> allTests . test ( " $ { Self } " ) { <nl> expectTrue ( b . nativeInstance = = = c ) <nl> if i = = 0 { <nl> expectTrue ( b . nativeInstance_noSpareBits = = = c ) <nl> + expectFalse ( b . isUniquelyReferenced_native_noSpareBits ( ) ) <nl> } <nl> } <nl> <nl>
[ stdlib ] BridgeStorage : uniqueness check fast path
apple/swift
15e195943e1b13f158110620e91bd7d1ec030037
2014-11-21T01:39:55Z
mmm a / dbms / cmake / version . cmake <nl> ppp b / dbms / cmake / version . cmake <nl> <nl> set ( VERSION_REVISION 54407 CACHE STRING " " ) <nl> set ( VERSION_MAJOR 18 CACHE STRING " " ) <nl> set ( VERSION_MINOR 12 CACHE STRING " " ) <nl> - set ( VERSION_PATCH 16 CACHE STRING " " ) <nl> - set ( VERSION_GITHASH 38534bba80bb19aa4280af199071cc56b79da47a CACHE STRING " " ) <nl> - set ( VERSION_DESCRIBE v18 . 12 . 16 - testing CACHE STRING " " ) <nl> - set ( VERSION_STRING 18 . 12 . 16 CACHE STRING " " ) <nl> + set ( VERSION_PATCH 14 CACHE STRING " " ) <nl> + set ( VERSION_GITHASH 84cde3429bec15028992497b3ab7e7a0fb53a253 CACHE STRING " " ) <nl> + set ( VERSION_DESCRIBE v18 . 12 . 14 - testing CACHE STRING " " ) <nl> + set ( VERSION_STRING 18 . 12 . 14 CACHE STRING " " ) <nl> # end of autochange <nl> <nl> set ( VERSION_EXTRA " " CACHE STRING " " ) <nl> mmm a / debian / changelog <nl> ppp b / debian / changelog <nl> <nl> - clickhouse ( 18 . 12 . 16 ) unstable ; urgency = low <nl> + clickhouse ( 18 . 12 . 14 ) unstable ; urgency = low <nl> <nl> * Modified source code <nl> <nl> - - - < root @ yandex - team . ru > Fri , 14 Sep 2018 06 : 23 : 40 + 0300 <nl> + - - < root @ yandex - team . ru > Thu , 13 Sep 2018 13 : 19 : 54 + 0300 <nl> mmm a / docker / client / Dockerfile <nl> ppp b / docker / client / Dockerfile <nl> <nl> FROM ubuntu : 18 . 04 <nl> <nl> ARG repository = " deb http : / / repo . yandex . ru / clickhouse / deb / stable / main / " <nl> - ARG version = 18 . 12 . 16 <nl> + ARG version = 18 . 12 . 14 <nl> <nl> RUN apt - get update & & \ <nl> apt - get install - y apt - transport - https dirmngr & & \ <nl> mmm a / docker / server / Dockerfile <nl> ppp b / docker / server / Dockerfile <nl> <nl> FROM ubuntu : 18 . 04 <nl> <nl> ARG repository = " deb http : / / repo . yandex . ru / clickhouse / deb / stable / main / " <nl> - ARG version = 18 . 12 . 16 <nl> + ARG version = 18 . 12 . 14 <nl> <nl> RUN apt - get update & & \ <nl> apt - get install - y apt - transport - https dirmngr & & \ <nl> mmm a / docker / test / Dockerfile <nl> ppp b / docker / test / Dockerfile <nl> <nl> FROM ubuntu : 18 . 04 <nl> <nl> ARG repository = " deb http : / / repo . yandex . ru / clickhouse / deb / stable / main / " <nl> - ARG version = 18 . 12 . 16 <nl> + ARG version = 18 . 12 . 14 <nl> <nl> RUN apt - get update & & \ <nl> apt - get install - y apt - transport - https dirmngr & & \ <nl>
Revert " Auto version update to [ 18 . 12 . 16 ] [ 54407 ] "
ClickHouse/ClickHouse
767db1bc30ad39e3379d4accac99325c1b3b7ead
2018-09-16T02:23:48Z
mmm a / templates / cpp - template - default / proj . win8 . 1 - universal / App . Shared / OpenGLESPage . xaml . cpp <nl> ppp b / templates / cpp - template - default / proj . win8 . 1 - universal / App . Shared / OpenGLESPage . xaml . cpp <nl> OpenGLESPage : : OpenGLESPage ( OpenGLES * openGLES ) : <nl> mRenderSurface ( EGL_NO_SURFACE ) , <nl> mCustomRenderSurfaceSize ( 0 , 0 ) , <nl> mUseCustomRenderSurfaceSize ( false ) , <nl> - m_coreInput ( nullptr ) , <nl> - m_dpi ( 0 . 0f ) , <nl> - m_deviceLost ( false ) , <nl> - m_orientation ( DisplayOrientations : : Landscape ) <nl> + mCoreInput ( nullptr ) , <nl> + mDpi ( 0 . 0f ) , <nl> + mDeviceLost ( false ) , <nl> + mVisible ( false ) , <nl> + mOrientation ( DisplayOrientations : : Landscape ) <nl> { <nl> InitializeComponent ( ) ; <nl> <nl> OpenGLESPage : : OpenGLESPage ( OpenGLES * openGLES ) : <nl> currentDisplayInformation - > OrientationChanged + = <nl> ref new TypedEventHandler < DisplayInformation ^ , Object ^ > ( this , & OpenGLESPage : : OnOrientationChanged ) ; <nl> <nl> - m_orientation = currentDisplayInformation - > CurrentOrientation ; <nl> + mOrientation = currentDisplayInformation - > CurrentOrientation ; <nl> <nl> this - > Loaded + = <nl> ref new Windows : : UI : : Xaml : : RoutedEventHandler ( this , & OpenGLESPage : : OnPageLoaded ) ; <nl> OpenGLESPage : : OpenGLESPage ( OpenGLES * openGLES ) : <nl> auto workItemHandler = ref new WorkItemHandler ( [ this ] ( IAsyncAction ^ ) <nl> { <nl> / / The CoreIndependentInputSource will raise pointer events for the specified device types on whichever thread it ' s created on . <nl> - m_coreInput = swapChainPanel - > CreateCoreIndependentInputSource ( <nl> + mCoreInput = swapChainPanel - > CreateCoreIndependentInputSource ( <nl> Windows : : UI : : Core : : CoreInputDeviceTypes : : Mouse | <nl> Windows : : UI : : Core : : CoreInputDeviceTypes : : Touch | <nl> Windows : : UI : : Core : : CoreInputDeviceTypes : : Pen <nl> ) ; <nl> <nl> / / Register for pointer events , which will be raised on the background thread . <nl> - m_coreInput - > PointerPressed + = ref new TypedEventHandler < Object ^ , PointerEventArgs ^ > ( this , & OpenGLESPage : : OnPointerPressed ) ; <nl> - m_coreInput - > PointerMoved + = ref new TypedEventHandler < Object ^ , PointerEventArgs ^ > ( this , & OpenGLESPage : : OnPointerMoved ) ; <nl> - m_coreInput - > PointerReleased + = ref new TypedEventHandler < Object ^ , PointerEventArgs ^ > ( this , & OpenGLESPage : : OnPointerReleased ) ; <nl> + mCoreInput - > PointerPressed + = ref new TypedEventHandler < Object ^ , PointerEventArgs ^ > ( this , & OpenGLESPage : : OnPointerPressed ) ; <nl> + mCoreInput - > PointerMoved + = ref new TypedEventHandler < Object ^ , PointerEventArgs ^ > ( this , & OpenGLESPage : : OnPointerMoved ) ; <nl> + mCoreInput - > PointerReleased + = ref new TypedEventHandler < Object ^ , PointerEventArgs ^ > ( this , & OpenGLESPage : : OnPointerReleased ) ; <nl> <nl> / / Begin processing input messages as they ' re delivered . <nl> - m_coreInput - > Dispatcher - > ProcessEvents ( CoreProcessEventsOption : : ProcessUntilQuit ) ; <nl> + mCoreInput - > Dispatcher - > ProcessEvents ( CoreProcessEventsOption : : ProcessUntilQuit ) ; <nl> } ) ; <nl> <nl> / / Run task on a dedicated high priority background thread . <nl> - m_inputLoopWorker = ThreadPool : : RunAsync ( workItemHandler , WorkItemPriority : : High , WorkItemOptions : : TimeSliced ) ; <nl> + mInputLoopWorker = ThreadPool : : RunAsync ( workItemHandler , WorkItemPriority : : High , WorkItemOptions : : TimeSliced ) ; <nl> } <nl> <nl> OpenGLESPage : : ~ OpenGLESPage ( ) <nl> void OpenGLESPage : : OnPageLoaded ( Platform : : Object ^ sender , Windows : : UI : : Xaml : : Rou <nl> / / The SwapChainPanel has been created and arranged in the page layout , so EGL can be initialized . <nl> CreateRenderSurface ( ) ; <nl> StartRenderLoop ( ) ; <nl> + mVisible = true ; <nl> } <nl> <nl> void OpenGLESPage : : OnPointerPressed ( Object ^ sender , PointerEventArgs ^ e ) <nl> { <nl> - if ( m_renderer ) <nl> + if ( mRenderer ) <nl> { <nl> - m_renderer - > QueuePointerEvent ( PointerEventType : : PointerPressed , e ) ; <nl> + mRenderer - > QueuePointerEvent ( PointerEventType : : PointerPressed , e ) ; <nl> } <nl> } <nl> <nl> void OpenGLESPage : : OnPointerMoved ( Object ^ sender , PointerEventArgs ^ e ) <nl> { <nl> - if ( m_renderer ) <nl> + if ( mRenderer ) <nl> { <nl> - m_renderer - > QueuePointerEvent ( PointerEventType : : PointerMoved , e ) ; <nl> + mRenderer - > QueuePointerEvent ( PointerEventType : : PointerMoved , e ) ; <nl> } <nl> } <nl> <nl> void OpenGLESPage : : OnPointerReleased ( Object ^ sender , PointerEventArgs ^ e ) <nl> { <nl> - if ( m_renderer ) <nl> + if ( mRenderer ) <nl> { <nl> - m_renderer - > QueuePointerEvent ( PointerEventType : : PointerReleased , e ) ; <nl> + mRenderer - > QueuePointerEvent ( PointerEventType : : PointerReleased , e ) ; <nl> } <nl> } <nl> <nl> void OpenGLESPage : : OnKeyPressed ( CoreWindow ^ sender , KeyEventArgs ^ e ) <nl> if ( ! e - > KeyStatus . WasKeyDown ) <nl> { <nl> / / log ( " OpenGLESPage : : OnKeyPressed % d " , e - > VirtualKey ) ; <nl> - if ( m_renderer ) <nl> + if ( mRenderer ) <nl> { <nl> - m_renderer - > QueueKeyboardEvent ( WinRTKeyboardEventType : : KeyPressed , e ) ; <nl> + mRenderer - > QueueKeyboardEvent ( WinRTKeyboardEventType : : KeyPressed , e ) ; <nl> } <nl> } <nl> } <nl> void OpenGLESPage : : OnCharacterReceived ( CoreWindow ^ sender , CharacterReceivedEven <nl> void OpenGLESPage : : OnKeyReleased ( CoreWindow ^ sender , KeyEventArgs ^ e ) <nl> { <nl> / / log ( " OpenGLESPage : : OnKeyReleased % d " , e - > VirtualKey ) ; <nl> - if ( m_renderer ) <nl> + if ( mRenderer ) <nl> { <nl> - m_renderer - > QueueKeyboardEvent ( WinRTKeyboardEventType : : KeyReleased , e ) ; <nl> + mRenderer - > QueueKeyboardEvent ( WinRTKeyboardEventType : : KeyReleased , e ) ; <nl> } <nl> } <nl> <nl> void OpenGLESPage : : OnKeyReleased ( CoreWindow ^ sender , KeyEventArgs ^ e ) <nl> void OpenGLESPage : : OnOrientationChanged ( DisplayInformation ^ sender , Object ^ args ) <nl> { <nl> critical_section : : scoped_lock lock ( mSwapChainPanelSizeCriticalSection ) ; <nl> - m_orientation = sender - > CurrentOrientation ; <nl> + mOrientation = sender - > CurrentOrientation ; <nl> } <nl> <nl> void OpenGLESPage : : OnVisibilityChanged ( Windows : : UI : : Core : : CoreWindow ^ sender , Windows : : UI : : Core : : VisibilityChangedEventArgs ^ args ) <nl> { <nl> if ( args - > Visible & & mRenderSurface ! = EGL_NO_SURFACE ) <nl> { <nl> - StartRenderLoop ( ) ; <nl> + std : : unique_lock < std : : mutex > locker ( mSleepMutex ) ; <nl> + mVisible = true ; <nl> + mSleepCondition . notify_one ( ) ; <nl> } <nl> else <nl> { <nl> - StopRenderLoop ( ) ; <nl> + mVisible = false ; <nl> } <nl> } <nl> <nl> void OpenGLESPage : : OnVisibilityChanged ( Windows : : UI : : Core : : CoreWindow ^ sender , Wi <nl> * / <nl> void OpenGLESPage : : OnBackButtonPressed ( Object ^ sender , BackPressedEventArgs ^ args ) <nl> { <nl> - if ( m_renderer ) <nl> + if ( mRenderer ) <nl> { <nl> - m_renderer - > QueueBackButtonEvent ( ) ; <nl> + mRenderer - > QueueBackButtonEvent ( ) ; <nl> args - > Handled = true ; <nl> } <nl> } <nl> void OpenGLESPage : : DestroyRenderSurface ( ) <nl> <nl> void OpenGLESPage : : RecoverFromLostDevice ( ) <nl> { <nl> - / / Stop the render loop , reset OpenGLES , recreate the render surface <nl> - / / and start the render loop again to recover from a lost device . <nl> - <nl> - StopRenderLoop ( ) ; <nl> - <nl> - { <nl> - critical_section : : scoped_lock lock ( mRenderSurfaceCriticalSection ) ; <nl> - DestroyRenderSurface ( ) ; <nl> - mOpenGLES - > Reset ( ) ; <nl> - CreateRenderSurface ( ) ; <nl> - } <nl> - <nl> - StartRenderLoop ( ) ; <nl> + critical_section : : scoped_lock lock ( mRenderSurfaceCriticalSection ) ; <nl> + DestroyRenderSurface ( ) ; <nl> + mOpenGLES - > Reset ( ) ; <nl> + CreateRenderSurface ( ) ; <nl> + std : : unique_lock < std : : mutex > locker ( mSleepMutex ) ; <nl> + mDeviceLost = false ; <nl> + mSleepCondition . notify_one ( ) ; <nl> } <nl> <nl> void OpenGLESPage : : TerminateApp ( ) <nl> void OpenGLESPage : : StartRenderLoop ( ) <nl> } <nl> <nl> DisplayInformation ^ currentDisplayInformation = DisplayInformation : : GetForCurrentView ( ) ; <nl> - m_dpi = currentDisplayInformation - > LogicalDpi ; <nl> + mDpi = currentDisplayInformation - > LogicalDpi ; <nl> <nl> auto dispatcher = Windows : : UI : : Xaml : : Window : : Current - > CoreWindow - > Dispatcher ; <nl> <nl> / / Create a task for rendering that will be run on a background thread . <nl> auto workItemHandler = ref new Windows : : System : : Threading : : WorkItemHandler ( [ this , dispatcher ] ( Windows : : Foundation : : IAsyncAction ^ action ) <nl> { <nl> - critical_section : : scoped_lock lock ( mRenderSurfaceCriticalSection ) ; <nl> - <nl> mOpenGLES - > MakeCurrent ( mRenderSurface ) ; <nl> <nl> GLsizei panelWidth = 0 ; <nl> GLsizei panelHeight = 0 ; <nl> GetSwapChainPanelSize ( & panelWidth , & panelHeight ) ; <nl> <nl> - if ( m_renderer . get ( ) = = nullptr ) <nl> + if ( mRenderer . get ( ) = = nullptr ) <nl> { <nl> - m_renderer = std : : make_shared < Cocos2dRenderer > ( panelWidth , panelHeight , m_dpi , m_orientation , dispatcher , swapChainPanel ) ; <nl> + mRenderer = std : : make_shared < Cocos2dRenderer > ( panelWidth , panelHeight , mDpi , mOrientation , dispatcher , swapChainPanel ) ; <nl> } <nl> <nl> - if ( m_deviceLost ) <nl> - { <nl> - m_deviceLost = false ; <nl> - m_renderer - > DeviceLost ( ) ; <nl> - } <nl> - else <nl> + mRenderer - > Resume ( ) ; <nl> + <nl> + while ( action - > Status = = Windows : : Foundation : : AsyncStatus : : Started ) <nl> { <nl> - m_renderer - > Resume ( ) ; <nl> - } <nl> + if ( ! mVisible ) <nl> + { <nl> + mRenderer - > Pause ( ) ; <nl> + } <nl> <nl> + while ( ! mVisible ) <nl> + { <nl> + std : : unique_lock < std : : mutex > lock ( mSleepMutex ) ; <nl> + mSleepCondition . wait ( lock ) ; <nl> + <nl> + if ( action - > Status ! = Windows : : Foundation : : AsyncStatus : : Started ) <nl> + { <nl> + return ; / / thread was cancelled . Exit thread <nl> + } <nl> + <nl> + if ( mVisible ) <nl> + { <nl> + mRenderer - > Resume ( ) ; <nl> + } <nl> + else / / spurious wake up <nl> + { <nl> + continue ; <nl> + } <nl> + } <nl> <nl> - while ( action - > Status = = Windows : : Foundation : : AsyncStatus : : Started & & ! m_deviceLost ) <nl> - { <nl> GetSwapChainPanelSize ( & panelWidth , & panelHeight ) ; <nl> - m_renderer . get ( ) - > Draw ( panelWidth , panelHeight , m_dpi , m_orientation ) ; <nl> + mRenderer . get ( ) - > Draw ( panelWidth , panelHeight , mDpi , mOrientation ) ; <nl> <nl> - / / run on main UI thread <nl> - if ( m_renderer - > AppShouldExit ( ) ) <nl> + if ( mRenderer - > AppShouldExit ( ) ) <nl> { <nl> + / / run on main UI thread <nl> swapChainPanel - > Dispatcher - > RunAsync ( Windows : : UI : : Core : : CoreDispatcherPriority : : Normal , ref new DispatchedHandler ( [ this ] ( ) <nl> { <nl> TerminateApp ( ) ; <nl> void OpenGLESPage : : StartRenderLoop ( ) <nl> <nl> return ; <nl> } <nl> - else if ( mOpenGLES - > SwapBuffers ( mRenderSurface ) ! = GL_TRUE ) <nl> - { <nl> - / / The call to eglSwapBuffers might not be successful ( i . e . due to Device Lost ) <nl> - / / If the call fails , then we must reinitialize EGL and the GL resources . <nl> <nl> - m_deviceLost = true ; <nl> + EGLBoolean result = GL_FALSE ; <nl> + { <nl> + critical_section : : scoped_lock lock ( mRenderSurfaceCriticalSection ) ; <nl> + result = mOpenGLES - > SwapBuffers ( mRenderSurface ) ; <nl> + } <nl> <nl> - if ( m_renderer ) <nl> - { <nl> - m_renderer - > Pause ( ) ; <nl> - } <nl> + if ( result ! = GL_TRUE ) <nl> + { <nl> + / / The call to eglSwapBuffers was not be successful ( i . e . due to Device Lost ) <nl> + / / If the call fails , then we must reinitialize EGL and the GL resources . <nl> + mRenderer - > Pause ( ) ; <nl> + mDeviceLost = true ; <nl> <nl> / / XAML objects like the SwapChainPanel must only be manipulated on the UI thread . <nl> swapChainPanel - > Dispatcher - > RunAsync ( Windows : : UI : : Core : : CoreDispatcherPriority : : High , ref new Windows : : UI : : Core : : DispatchedHandler ( [ = ] ( ) <nl> { <nl> RecoverFromLostDevice ( ) ; <nl> + <nl> } , CallbackContext : : Any ) ) ; <nl> <nl> - return ; <nl> + while ( mDeviceLost ) <nl> + { <nl> + std : : unique_lock < std : : mutex > lock ( mSleepMutex ) ; <nl> + mSleepCondition . wait ( lock ) ; <nl> + <nl> + if ( action - > Status ! = Windows : : Foundation : : AsyncStatus : : Started ) <nl> + { <nl> + return ; / / thread was cancelled . Exit thread <nl> + } <nl> + <nl> + if ( ! mDeviceLost ) <nl> + { <nl> + / / restart cocos2d - x <nl> + mRenderer - > DeviceLost ( ) ; <nl> + } <nl> + else / / spurious wake up <nl> + { <nl> + continue ; <nl> + } <nl> + } <nl> } <nl> } <nl> - <nl> - if ( m_renderer ) <nl> - { <nl> - m_renderer - > Pause ( ) ; <nl> - } <nl> } ) ; <nl> <nl> / / Run task on a dedicated high priority background thread . <nl> void OpenGLESPage : : StopRenderLoop ( ) <nl> if ( mRenderLoopWorker ) <nl> { <nl> mRenderLoopWorker - > Cancel ( ) ; <nl> + std : : unique_lock < std : : mutex > locker ( mSleepMutex ) ; <nl> + mSleepCondition . notify_one ( ) ; <nl> mRenderLoopWorker = nullptr ; <nl> } <nl> } <nl> \ No newline at end of file <nl> mmm a / templates / cpp - template - default / proj . win8 . 1 - universal / App . Shared / OpenGLESPage . xaml . h <nl> ppp b / templates / cpp - template - default / proj . win8 . 1 - universal / App . Shared / OpenGLESPage . xaml . h <nl> <nl> # include " OpenGLES . h " <nl> # include " OpenGLESPage . g . h " <nl> # include < memory > <nl> + # include < condition_variable > <nl> + # include < mutex > <nl> <nl> # include " Cocos2dRenderer . h " <nl> <nl> namespace cocos2d <nl> void StopRenderLoop ( ) ; <nl> <nl> OpenGLES * mOpenGLES ; <nl> - std : : shared_ptr < cocos2d : : Cocos2dRenderer > m_renderer ; <nl> + std : : shared_ptr < cocos2d : : Cocos2dRenderer > mRenderer ; <nl> <nl> Windows : : Foundation : : Size mSwapChainPanelSize ; <nl> Concurrency : : critical_section mSwapChainPanelSizeCriticalSection ; <nl> namespace cocos2d <nl> Windows : : Foundation : : IAsyncAction ^ mRenderLoopWorker ; <nl> <nl> / / Track user input on a background worker thread . <nl> - Windows : : Foundation : : IAsyncAction ^ m_inputLoopWorker ; <nl> - Windows : : UI : : Core : : CoreIndependentInputSource ^ m_coreInput ; <nl> + Windows : : Foundation : : IAsyncAction ^ mInputLoopWorker ; <nl> + Windows : : UI : : Core : : CoreIndependentInputSource ^ mCoreInput ; <nl> <nl> / / Independent input handling functions . <nl> void OnPointerPressed ( Platform : : Object ^ sender , Windows : : UI : : Core : : PointerEventArgs ^ e ) ; <nl> namespace cocos2d <nl> <nl> void OnOrientationChanged ( Windows : : Graphics : : Display : : DisplayInformation ^ sender , Platform : : Object ^ args ) ; <nl> <nl> - float m_dpi ; <nl> - bool m_deviceLost ; <nl> - Windows : : Graphics : : Display : : DisplayOrientations m_orientation ; <nl> + float mDpi ; <nl> + bool mDeviceLost ; <nl> + bool mVisible ; <nl> + Windows : : Graphics : : Display : : DisplayOrientations mOrientation ; <nl> <nl> + std : : mutex mSleepMutex ; <nl> + std : : condition_variable mSleepCondition ; <nl> } ; <nl> } <nl> mmm a / templates / cpp - template - default / proj . win8 . 1 - universal / App . Windows / HelloCpp . Windows . vcxproj <nl> ppp b / templates / cpp - template - default / proj . win8 . 1 - universal / App . Windows / HelloCpp . Windows . vcxproj <nl> <nl> < SDLCheck > false < / SDLCheck > <nl> < PreprocessorDefinitions > CC_ENABLE_BULLET_INTEGRATION = 1 ; CC_ENABLE_CHIPMUNK_INTEGRATION = 1 ; COCOS2D_DEBUG = 1 ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> < / ClCompile > <nl> + < Link > <nl> + < IgnoreSpecificDefaultLibraries > MSVCRT ; % ( IgnoreSpecificDefaultLibraries ) < / IgnoreSpecificDefaultLibraries > <nl> + < / Link > <nl> < / ItemDefinitionGroup > <nl> < ItemDefinitionGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | ARM ' " > <nl> < ClCompile > <nl> <nl> < SDLCheck > false < / SDLCheck > <nl> < PreprocessorDefinitions > CC_ENABLE_BULLET_INTEGRATION = 1 ; CC_ENABLE_CHIPMUNK_INTEGRATION = 1 ; COCOS2D_DEBUG = 1 ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> < / ClCompile > <nl> + < Link > <nl> + < IgnoreSpecificDefaultLibraries > MSVCRT ; % ( IgnoreSpecificDefaultLibraries ) < / IgnoreSpecificDefaultLibraries > <nl> + < / Link > <nl> < / ItemDefinitionGroup > <nl> < ItemDefinitionGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " > <nl> < ClCompile > <nl> <nl> < SDLCheck > false < / SDLCheck > <nl> < PreprocessorDefinitions > CC_ENABLE_BULLET_INTEGRATION = 1 ; CC_ENABLE_CHIPMUNK_INTEGRATION = 1 ; COCOS2D_DEBUG = 1 ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> < / ClCompile > <nl> + < Link > <nl> + < IgnoreSpecificDefaultLibraries > MSVCRT ; % ( IgnoreSpecificDefaultLibraries ) < / IgnoreSpecificDefaultLibraries > <nl> + < / Link > <nl> < / ItemDefinitionGroup > <nl> < ItemDefinitionGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | x64 ' " > <nl> < ClCompile > <nl> mmm a / templates / cpp - template - default / proj . win8 . 1 - universal / App . WindowsPhone / HelloCpp . WindowsPhone . vcxproj <nl> ppp b / templates / cpp - template - default / proj . win8 . 1 - universal / App . WindowsPhone / HelloCpp . WindowsPhone . vcxproj <nl> <nl> < SDLCheck > false < / SDLCheck > <nl> < PreprocessorDefinitions > CC_WINDOWS_PHONE_8_1 ; CC_ENABLE_BULLET_INTEGRATION = 1 ; CC_ENABLE_CHIPMUNK_INTEGRATION = 1 ; CC_ENABLE_CHIPMUNK_INTEGRATION = 1 ; COCOS2D_DEBUG = 1 ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> < / ClCompile > <nl> + < Link > <nl> + < IgnoreSpecificDefaultLibraries > MSVCRT ; % ( IgnoreSpecificDefaultLibraries ) < / IgnoreSpecificDefaultLibraries > <nl> + < / Link > <nl> < / ItemDefinitionGroup > <nl> < ItemDefinitionGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | ARM ' " > <nl> < ClCompile > <nl> <nl> < SDLCheck > false < / SDLCheck > <nl> < PreprocessorDefinitions > CC_WINDOWS_PHONE_8_1 ; CC_ENABLE_BULLET_INTEGRATION = 1 ; CC_ENABLE_CHIPMUNK_INTEGRATION = 1 ; CC_ENABLE_CHIPMUNK_INTEGRATION = 1 ; COCOS2D_DEBUG = 1 ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> < / ClCompile > <nl> + < Link > <nl> + < IgnoreSpecificDefaultLibraries > MSVCRT ; % ( IgnoreSpecificDefaultLibraries ) < / IgnoreSpecificDefaultLibraries > <nl> + < / Link > <nl> < / ItemDefinitionGroup > <nl> < ItemDefinitionGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Release | Win32 ' " > <nl> < ClCompile > <nl>
updated cpp template files for Windows 8 . 1 Universal App
cocos2d/cocos2d-x
6f2c7574bf81a60a25348326f7de8b0c6742a51d
2015-06-03T16:25:50Z
mmm a / atom . gyp <nl> ppp b / atom . gyp <nl> <nl> ' < @ ( libchromiumcontent_shared_v8_libraries ) ' , <nl> ] , <nl> } , { <nl> - ' copied_libraries ' : [ <nl> - ' < ( libchromiumcontent_dir ) / boringssl . dll ' , <nl> - ] , <nl> + ' copied_libraries ' : [ ] , <nl> } ] , <nl> ] , <nl> } , <nl> <nl> } , { <nl> ' copied_libraries ' : [ <nl> ' < ( PRODUCT_DIR ) / lib / libnode . so ' , <nl> - ' < ( libchromiumcontent_dir ) / libboringssl . so ' , <nl> ] , <nl> } ] , <nl> ] , <nl> <nl> } , { <nl> ' copied_libraries ' : [ <nl> ' < ( PRODUCT_DIR ) / libnode . dylib ' , <nl> - ' < ( libchromiumcontent_dir ) / libboringssl . dylib ' <nl> ] , <nl> } ] , <nl> ] , <nl> <nl> } , <nl> ] , <nl> ' postbuilds ' : [ <nl> - { <nl> - ' postbuild_name ' : ' Fix path of libboringssl ' , <nl> - ' action ' : [ <nl> - ' install_name_tool ' , <nl> - ' - change ' , <nl> - ' / usr / local / lib / libboringssl . dylib ' , <nl> - ' @ rpath / libboringssl . dylib ' , <nl> - ' $ { BUILT_PRODUCTS_DIR } / < ( product_name ) Framework . framework / Versions / A / < ( product_name ) Framework ' , <nl> - ] , <nl> - } , <nl> { <nl> ' postbuild_name ' : ' Fix path of libnode ' , <nl> ' action ' : [ <nl> mmm a / script / create - dist . py <nl> ppp b / script / create - dist . py <nl> <nl> ] , <nl> ' win32 ' : [ <nl> ' atom . exe ' , <nl> - ' boringssl . dll ' , <nl> ' content_shell . pak ' , <nl> ' d3dcompiler_47 . dll ' , <nl> ' ffmpegsumo . dll ' , <nl> <nl> ' atom ' , <nl> ' content_shell . pak ' , <nl> ' icudtl . dat ' , <nl> - ' libboringssl . so ' , <nl> ' libffmpegsumo . so ' , <nl> ' libnode . so ' , <nl> ' natives_blob . bin ' , <nl> def copy_chromedriver ( ) : <nl> shutil . copyfile ( src , dest ) <nl> os . chmod ( dest , os . stat ( dest ) . st_mode | stat . S_IEXEC ) <nl> <nl> - # Fix the linking with boringssl . <nl> - if TARGET_PLATFORM = = ' linux ' : <nl> - execute ( [ ' chrpath ' , ' - r ' , ' $ ORIGIN ' , dest ] ) <nl> - elif TARGET_PLATFORM = = ' darwin ' : <nl> - shutil . copy2 ( os . path . join ( CHROMIUM_DIR , ' libboringssl . dylib ' ) , DIST_DIR ) <nl> - execute ( [ ' install_name_tool ' , ' - change ' , <nl> - ' / usr / local / lib / libboringssl . dylib ' , <nl> - ' @ loader_path / libboringssl . dylib ' , <nl> - dest ] ) <nl> - <nl> <nl> def copy_license ( ) : <nl> shutil . copy2 ( os . path . join ( SOURCE_ROOT , ' LICENSE ' ) , DIST_DIR ) <nl> def create_chromedriver_zip ( ) : <nl> with scoped_cwd ( DIST_DIR ) : <nl> files = [ ' LICENSE ' ] <nl> if TARGET_PLATFORM = = ' win32 ' : <nl> - files + = [ ' chromedriver . exe ' , ' boringssl . dll ' ] <nl> - elif TARGET_PLATFORM = = ' darwin ' : <nl> - files + = [ ' chromedriver ' , ' libboringssl . dylib ' ] <nl> - elif TARGET_PLATFORM = = ' linux ' : <nl> - files + = [ ' chromedriver ' , ' libboringssl . so ' ] <nl> + files + = [ ' chromedriver . exe ' ] <nl> + else : <nl> + files + = [ ' chromedriver ' ] <nl> make_zip ( zip_file , files , [ ] ) <nl> <nl> <nl> mmm a / script / lib / config . py <nl> ppp b / script / lib / config . py <nl> <nl> import sys <nl> <nl> BASE_URL = ' http : / / gh - contractor - zcbenz . s3 . amazonaws . com / libchromiumcontent ' <nl> - LIBCHROMIUMCONTENT_COMMIT = ' 0718fa8b44e004a39ee2511858abbef1dae89cef ' <nl> + LIBCHROMIUMCONTENT_COMMIT = ' f1ad1412461ba3345a27cfe935ffc872dba0ac5b ' <nl> <nl> ARCH = { <nl> ' cygwin ' : ' 32bit ' , <nl> mmm a / vendor / brightray <nl> ppp b / vendor / brightray <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit 0f37e5fa4d5273f80c91d485e0ce6edef683a26e <nl> + Subproject commit 14b4dc7151ded676d7133f0da17033149a4954df <nl>
Don ' t link with shared_librayr of boringssl in Release mode
electron/electron
dfa6fedaedfa12dd3c510f149af95c5a7e55f9f0
2015-04-10T11:00:06Z
mmm a / tools / dockerfile / grpc_go / Dockerfile <nl> ppp b / tools / dockerfile / grpc_go / Dockerfile <nl> FROM golang : 1 . 4 <nl> <nl> # Get the source from GitHub <nl> RUN go get google . golang . org / grpc <nl> + RUN go get golang . org / x / oauth2 <nl> + RUN go get google . golang . org / cloud <nl> <nl> # Add a service_account directory containing the auth creds file <nl> ADD service_account service_account <nl> new file mode 100755 <nl> index 00000000000 . . a3887c731b2 <nl> mmm / dev / null <nl> ppp b / tools / dockerfile / grpc_go / build . sh <nl> <nl> + # ! / bin / bash <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + cp - R / var / local / git - clone / grpc - go / . / go / <nl> + go get golang . org / x / oauth2 <nl> + go get google . golang . org / cloud <nl> + cd src / google . golang . org / grpc / interop / client & & go install <nl> + cd src / google . golang . org / grpc / interop / server & & go install <nl>
Merge pull request from donnadionne / interop_nrp
grpc/grpc
d3b078782b0feac2bef947feac0219a6eb285c93
2015-08-12T20:17:25Z
mmm a / dbms / src / Server / Server . cpp <nl> ppp b / dbms / src / Server / Server . cpp <nl> int Server : : main ( const std : : vector < std : : string > & args ) <nl> if ( config ( ) . has ( " replica_name " ) ) <nl> global_context - > setDefaultReplicaName ( config ( ) . getString ( " replica_name " ) ) ; <nl> <nl> - std : : string users_config_path = config ( ) . getString ( " users_config " , config ( ) . getString ( " config - file " , " config ( ) . xml " ) ) ; <nl> + std : : string users_config_path = config ( ) . getString ( " users_config " , config ( ) . getString ( " config - file " , " config . xml " ) ) ; <nl> users_config_reloader = new UsersConfigReloader ( users_config_path , global_context ) ; <nl> <nl> / / / Максимальное количество одновременно выполняющихся запросов . <nl>
dbms : fixed default users config name . [ # METR - 2807 ]
ClickHouse/ClickHouse
19a037c8a4946e5eed5747ada59e15b9eca2ecaf
2014-05-14T14:19:58Z
mmm a / db / module . cpp <nl> ppp b / db / module . cpp <nl> <nl> <nl> namespace mongo { <nl> <nl> - std : : list < Module * > Module : : _all ; <nl> + std : : list < Module * > * Module : : _all ; <nl> <nl> Module : : Module ( const string & name ) <nl> : _name ( name ) , _options ( ( string ) " Module " + name + " options " ) { <nl> - / / if ( ! allModules ) <nl> - / / allModules = new list < Module * > ( ) ; <nl> - _all . push_back ( this ) ; <nl> + if ( ! _all ) <nl> + _all = new list < Module * > ( ) ; <nl> + _all - > push_back ( this ) ; <nl> } <nl> <nl> Module : : ~ Module ( ) { } <nl> <nl> void Module : : addOptions ( program_options : : options_description & options ) { <nl> - for ( list < Module * > : : iterator i = _all . begin ( ) ; i ! = _all . end ( ) ; i + + ) { <nl> + for ( list < Module * > : : iterator i = _all - > begin ( ) ; i ! = _all - > end ( ) ; i + + ) { <nl> Module * m = * i ; <nl> options . add ( m - > _options ) ; <nl> } <nl> } <nl> <nl> void Module : : configAll ( program_options : : variables_map & params ) { <nl> - for ( list < Module * > : : iterator i = _all . begin ( ) ; i ! = _all . end ( ) ; i + + ) { <nl> + for ( list < Module * > : : iterator i = _all - > begin ( ) ; i ! = _all - > end ( ) ; i + + ) { <nl> Module * m = * i ; <nl> m - > config ( params ) ; <nl> } <nl> namespace mongo { <nl> <nl> <nl> void Module : : initAll ( ) { <nl> - for ( list < Module * > : : iterator i = _all . begin ( ) ; i ! = _all . end ( ) ; i + + ) { <nl> + for ( list < Module * > : : iterator i = _all - > begin ( ) ; i ! = _all - > end ( ) ; i + + ) { <nl> Module * m = * i ; <nl> m - > init ( ) ; <nl> } <nl> mmm a / db / module . h <nl> ppp b / db / module . h <nl> namespace mongo { <nl> static void initAll ( ) ; <nl> <nl> private : <nl> - static std : : list < Module * > _all ; <nl> + static std : : list < Module * > * _all ; <nl> string _name ; <nl> program_options : : options_description _options ; <nl> } ; <nl>
fix linux static init . will have to look at windows more
mongodb/mongo
b6e7aafcd4d855de40c5c8d76342417b1d544018
2009-11-18T22:08:58Z